Compare commits
181 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9323c57f49 | ||
|
|
85e3c73525 | ||
|
|
a74bc2e58f | ||
|
|
0680638933 | ||
|
|
46d31ee5f7 | ||
|
|
e794b397d3 | ||
|
|
d41350050b | ||
|
|
4cd5b06b7f | ||
|
|
cd768439d2 | ||
|
|
9e5fd2d576 | ||
|
|
ecb46f591c | ||
|
|
d62d53aa8e | ||
|
|
2c515ab13c | ||
|
|
83d556ff0c | ||
|
|
678d313836 | ||
|
|
705d840ea3 | ||
|
|
7dff8c01dd | ||
|
|
5860679624 | ||
|
|
4628e4519d | ||
|
|
b884fd20a1 | ||
|
|
67c657003d | ||
|
|
580c1bbc7d | ||
|
|
2b6383d243 | ||
|
|
f27455a26f | ||
|
|
1d4f900e48 | ||
|
|
c5ca588a6f | ||
|
|
06888251e3 | ||
|
|
1a6e4cf4e4 | ||
|
|
9f86196a9d | ||
|
|
1e31043fb3 | ||
|
|
85adcf1ae5 | ||
|
|
9abb4d2873 | ||
|
|
235ff44736 | ||
|
|
9c2d741749 | ||
|
|
37cc0c34cf | ||
|
|
5633b6ac94 | ||
|
|
175f2aeace | ||
|
|
feefe69094 | ||
|
|
46df3ee7cd | ||
|
|
bb945ad01b | ||
|
|
de86aa671e | ||
|
|
e38771bbbd | ||
|
|
a3f9a8d7dc | ||
|
|
4b6bc6ef66 | ||
|
|
455a23361f | ||
|
|
1a8ec04733 | ||
|
|
4e60df7a08 | ||
|
|
219a9d9f5e | ||
|
|
48baf723a4 | ||
|
|
6530904883 | ||
|
|
d15d24f4ff | ||
|
|
8d992d637e | ||
|
|
6ebc83c3b7 | ||
|
|
b32f4451ee | ||
|
|
99142c7552 | ||
|
|
db710bb931 | ||
|
|
a9e9a397d8 | ||
|
|
d46a6ac687 | ||
|
|
1eb5495802 | ||
|
|
7cf8809d77 | ||
|
|
043aa27aa3 | ||
|
|
9824d94a1c | ||
|
|
e8ef76b8f9 | ||
|
|
be1ddb4203 | ||
|
|
caddf21fca | ||
|
|
5379329ef7 | ||
|
|
6faaeaae66 | ||
|
|
3fed323385 | ||
|
|
58a928547d | ||
|
|
558410c5bd | ||
|
|
0dc0decaa7 | ||
|
|
d11d663c5c | ||
|
|
771233176f | ||
|
|
ed70b07d81 | ||
|
|
e25fc7083d | ||
|
|
fa364c3f2c | ||
|
|
b5f9fe4d3b | ||
|
|
013d4c28b2 | ||
|
|
63acc8619b | ||
|
|
ec920b5756 | ||
|
|
95caaf2a40 | ||
|
|
7099f8bee8 | ||
|
|
b41a0d840c | ||
|
|
c577ade90e | ||
|
|
257b143df1 | ||
|
|
34ee326ce9 | ||
|
|
090104ce1b | ||
|
|
3305d5dc92 | ||
|
|
296063e135 | ||
|
|
b9daa59e5d | ||
|
|
5bdcfe128d | ||
|
|
1842a796fb | ||
|
|
ce99e5c583 | ||
|
|
0c96c2d305 | ||
|
|
5796b6b554 | ||
|
|
c7ab27c86f | ||
|
|
8c03746a67 | ||
|
|
8746d36845 | ||
|
|
448e6ac917 | ||
|
|
729c9cff41 | ||
|
|
22b9c80007 | ||
|
|
ab4355cfed | ||
|
|
948dc82228 | ||
|
|
bc74fd23e7 | ||
|
|
37776241be | ||
|
|
feba41ec88 | ||
|
|
6a8f42da8a | ||
|
|
670d8cb83a | ||
|
|
2f7fbde789 | ||
|
|
c698bca2b9 | ||
|
|
0b6a003a8b | ||
|
|
c64560016e | ||
|
|
978be0b4a9 | ||
|
|
b58bff1178 | ||
|
|
2f3e18caa9 | ||
|
|
6a291040bd | ||
|
|
dbc082dc75 | ||
|
|
32a0dd09bf | ||
|
|
f847c6e225 | ||
|
|
99da5fbebb | ||
|
|
6a0d024c69 | ||
|
|
b24929a243 | ||
|
|
9a47821642 | ||
|
|
d69968313b | ||
|
|
3c377d97dc | ||
|
|
ea15218197 | ||
|
|
0eee907c88 | ||
|
|
c877583979 | ||
|
|
844cf70345 | ||
|
|
a0d92a167c | ||
|
|
d7b0d6f9f5 | ||
|
|
4c3b328aca | ||
|
|
260ffee093 | ||
|
|
c59cfe3371 | ||
|
|
0822c0c128 | ||
|
|
57a88f0a1b | ||
|
|
87393409f9 | ||
|
|
062f5e4712 | ||
|
|
aaba1e8368 | ||
|
|
ff2684dfee | ||
|
|
6b5fa201aa | ||
|
|
7167e443ca | ||
|
|
175d647e47 | ||
|
|
4c324e1160 | ||
|
|
0365b7c6a4 | ||
|
|
19889187a5 | ||
|
|
9571277c44 | ||
|
|
a202da9e23 | ||
|
|
e5a77a477d | ||
|
|
c05dc50f53 | ||
|
|
3bbdbb832c | ||
|
|
d9684bef6b | ||
|
|
db0c45c172 | ||
|
|
ad4393e3f7 | ||
|
|
f83a8a36d1 | ||
|
|
0e9eba8c8b | ||
|
|
d5c760960a | ||
|
|
2c6ef2bc68 | ||
|
|
7032ae5587 | ||
|
|
eba22c2d94 | ||
|
|
11cc9ae0c0 | ||
|
|
fb648db47d | ||
|
|
959283d333 | ||
|
|
385c2227e7 | ||
|
|
6d9f03e84b | ||
|
|
6a972e4b19 | ||
|
|
171b174ce9 | ||
|
|
93b7ded1e6 | ||
|
|
29c6b145ca | ||
|
|
a7a479623c | ||
|
|
83dff9ae6e | ||
|
|
6b2cc5a3ee | ||
|
|
5247e0d773 | ||
|
|
05b308b8b4 | ||
|
|
9621278fca | ||
|
|
570d6c8bf9 | ||
|
|
ad48e9ed0f | ||
|
|
f724addf9a | ||
|
|
aa20974703 | ||
|
|
a846f6c610 | ||
|
|
c218c34812 |
@@ -3,6 +3,7 @@ target
|
|||||||
|
|
||||||
# Data folder
|
# Data folder
|
||||||
data
|
data
|
||||||
|
.env
|
||||||
|
|
||||||
# IDE files
|
# IDE files
|
||||||
.vscode
|
.vscode
|
||||||
@@ -10,5 +11,15 @@ data
|
|||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
|
.github
|
||||||
*.md
|
*.md
|
||||||
|
*.txt
|
||||||
|
*.yml
|
||||||
|
*.yaml
|
||||||
|
|
||||||
|
# Docker folders
|
||||||
|
hooks
|
||||||
|
tools
|
||||||
|
|
||||||
|
# Web vault
|
||||||
|
web-vault
|
||||||
109
.env.template
@@ -1,14 +1,28 @@
|
|||||||
## Bitwarden_RS Configuration File
|
## Bitwarden_RS Configuration File
|
||||||
## Uncomment any of the following lines to change the defaults
|
## Uncomment any of the following lines to change the defaults
|
||||||
|
##
|
||||||
|
## Be aware that most of these settings will be overridden if they were changed
|
||||||
|
## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
|
||||||
|
|
||||||
## Main data folder
|
## Main data folder
|
||||||
# DATA_FOLDER=data
|
# DATA_FOLDER=data
|
||||||
|
|
||||||
## Database URL
|
## Database URL
|
||||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||||
## When using MySQL, this it is the URL to the DB, including username and password:
|
|
||||||
## Format: mysql://[user[:password]@]host/database_name
|
|
||||||
# DATABASE_URL=data/db.sqlite3
|
# DATABASE_URL=data/db.sqlite3
|
||||||
|
## When using MySQL, specify an appropriate connection URI.
|
||||||
|
## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html
|
||||||
|
# DATABASE_URL=mysql://user:password@host[:port]/database_name
|
||||||
|
## When using PostgreSQL, specify an appropriate connection URI (recommended)
|
||||||
|
## or keyword/value connection string.
|
||||||
|
## Details:
|
||||||
|
## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html
|
||||||
|
## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
|
||||||
|
# DATABASE_URL=postgresql://user:password@host[:port]/database_name
|
||||||
|
|
||||||
|
## Database max connections
|
||||||
|
## Define the size of the connection pool used for connecting to the database.
|
||||||
|
# DATABASE_MAX_CONNS=10
|
||||||
|
|
||||||
## Individual folders, these override %DATA_FOLDER%
|
## Individual folders, these override %DATA_FOLDER%
|
||||||
# RSA_KEY_FILENAME=data/rsa_key
|
# RSA_KEY_FILENAME=data/rsa_key
|
||||||
@@ -60,7 +74,7 @@
|
|||||||
## Log level
|
## Log level
|
||||||
## Change the verbosity of the log output
|
## Change the verbosity of the log output
|
||||||
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
## Valid values are "trace", "debug", "info", "warn", "error" and "off"
|
||||||
## Setting it to "trace" or "debug" would also show logs for mounted
|
## Setting it to "trace" or "debug" would also show logs for mounted
|
||||||
## routes and static file, websocket and alive requests
|
## routes and static file, websocket and alive requests
|
||||||
# LOG_LEVEL=Info
|
# LOG_LEVEL=Info
|
||||||
|
|
||||||
@@ -72,6 +86,10 @@
|
|||||||
## cause performance degradation or might render the service unable to start.
|
## cause performance degradation or might render the service unable to start.
|
||||||
# ENABLE_DB_WAL=true
|
# ENABLE_DB_WAL=true
|
||||||
|
|
||||||
|
## Database connection retries
|
||||||
|
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
|
||||||
|
# DB_CONNECTION_RETRIES=15
|
||||||
|
|
||||||
## Disable icon downloading
|
## Disable icon downloading
|
||||||
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
|
||||||
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
|
||||||
@@ -86,10 +104,11 @@
|
|||||||
## Icon blacklist Regex
|
## Icon blacklist Regex
|
||||||
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
## Any domains or IPs that match this regex won't be fetched by the icon service.
|
||||||
## Useful to hide other servers in the local network. Check the WIKI for more details
|
## Useful to hide other servers in the local network. Check the WIKI for more details
|
||||||
# ICON_BLACKLIST_REGEX=192\.168\.1\.[0-9].*^
|
## NOTE: Always enclose this regex withing single quotes!
|
||||||
|
# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$'
|
||||||
|
|
||||||
## Any IP which is not defined as a global IP will be blacklisted.
|
## Any IP which is not defined as a global IP will be blacklisted.
|
||||||
## Usefull to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block
|
||||||
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
# ICON_BLACKLIST_NON_GLOBAL_IPS=true
|
||||||
|
|
||||||
## Disable 2FA remember
|
## Disable 2FA remember
|
||||||
@@ -97,6 +116,18 @@
|
|||||||
## Note that the checkbox would still be present, but ignored.
|
## Note that the checkbox would still be present, but ignored.
|
||||||
# DISABLE_2FA_REMEMBER=false
|
# DISABLE_2FA_REMEMBER=false
|
||||||
|
|
||||||
|
## Maximum attempts before an email token is reset and a new email will need to be sent.
|
||||||
|
# EMAIL_ATTEMPTS_LIMIT=3
|
||||||
|
|
||||||
|
## Token expiration time
|
||||||
|
## Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||||
|
# EMAIL_EXPIRATION_TIME=600
|
||||||
|
|
||||||
|
## Email token size
|
||||||
|
## Number of digits in an email token (min: 6, max: 19).
|
||||||
|
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
|
||||||
|
# EMAIL_TOKEN_SIZE=6
|
||||||
|
|
||||||
## Controls if new users can register
|
## Controls if new users can register
|
||||||
# SIGNUPS_ALLOWED=true
|
# SIGNUPS_ALLOWED=true
|
||||||
|
|
||||||
@@ -118,6 +149,14 @@
|
|||||||
## even if SIGNUPS_ALLOWED is set to false
|
## even if SIGNUPS_ALLOWED is set to false
|
||||||
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org
|
||||||
|
|
||||||
|
## Controls which users can create new orgs.
|
||||||
|
## Blank or 'all' means all users can create orgs (this is the default):
|
||||||
|
# ORG_CREATION_USERS=
|
||||||
|
## 'none' means no users can create orgs:
|
||||||
|
# ORG_CREATION_USERS=none
|
||||||
|
## A comma-separated list means only those users can create orgs:
|
||||||
|
# ORG_CREATION_USERS=admin1@example.com,admin2@example.com
|
||||||
|
|
||||||
## Token for the admin interface, preferably use a long random string
|
## Token for the admin interface, preferably use a long random string
|
||||||
## One option is to use 'openssl rand -base64 48'
|
## One option is to use 'openssl rand -base64 48'
|
||||||
## If not set, the admin panel is disabled
|
## If not set, the admin panel is disabled
|
||||||
@@ -129,6 +168,16 @@
|
|||||||
|
|
||||||
## Invitations org admins to invite users, even when signups are disabled
|
## Invitations org admins to invite users, even when signups are disabled
|
||||||
# INVITATIONS_ALLOWED=true
|
# INVITATIONS_ALLOWED=true
|
||||||
|
## Name shown in the invitation emails that don't come from a specific organization
|
||||||
|
# INVITATION_ORG_NAME=Bitwarden_RS
|
||||||
|
|
||||||
|
## Per-organization attachment limit (KB)
|
||||||
|
## Limit in kilobytes for an organization attachments, once the limit is exceeded it won't be possible to upload more
|
||||||
|
# ORG_ATTACHMENT_LIMIT=
|
||||||
|
## Per-user attachment limit (KB).
|
||||||
|
## Limit in kilobytes for a users attachments, once the limit is exceeded it won't be possible to upload more
|
||||||
|
# USER_ATTACHMENT_LIMIT=
|
||||||
|
|
||||||
|
|
||||||
## Controls the PBBKDF password iterations to apply on the server
|
## Controls the PBBKDF password iterations to apply on the server
|
||||||
## The change only applies when the password is changed
|
## The change only applies when the password is changed
|
||||||
@@ -144,6 +193,13 @@
|
|||||||
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs
|
||||||
# DOMAIN=https://bw.domain.tld:8443
|
# DOMAIN=https://bw.domain.tld:8443
|
||||||
|
|
||||||
|
## Allowed iframe ancestors (Know the risks!)
|
||||||
|
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors
|
||||||
|
## Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
|
||||||
|
## This adds the configured value to the 'Content-Security-Policy' headers 'frame-ancestors' value.
|
||||||
|
## Multiple values must be separated with a whitespace.
|
||||||
|
# ALLOWED_IFRAME_ANCESTORS=
|
||||||
|
|
||||||
## Yubico (Yubikey) Settings
|
## Yubico (Yubikey) Settings
|
||||||
## Set your Client ID and Secret Key for Yubikey OTP
|
## Set your Client ID and Secret Key for Yubikey OTP
|
||||||
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
## You can generate it here: https://upgrade.yubico.com/getapikey/
|
||||||
@@ -166,7 +222,7 @@
|
|||||||
## Authenticator Settings
|
## Authenticator Settings
|
||||||
## Disable authenticator time drifted codes to be valid.
|
## Disable authenticator time drifted codes to be valid.
|
||||||
## TOTP codes of the previous and next 30 seconds will be invalid
|
## TOTP codes of the previous and next 30 seconds will be invalid
|
||||||
##
|
##
|
||||||
## According to the RFC6238 (https://tools.ietf.org/html/rfc6238),
|
## According to the RFC6238 (https://tools.ietf.org/html/rfc6238),
|
||||||
## we allow by default the TOTP code which was valid one step back and one in the future.
|
## we allow by default the TOTP code which was valid one step back and one in the future.
|
||||||
## This can however allow attackers to be a bit more lucky with there attempts because there are 3 valid codes.
|
## This can however allow attackers to be a bit more lucky with there attempts because there are 3 valid codes.
|
||||||
@@ -187,12 +243,45 @@
|
|||||||
# SMTP_HOST=smtp.domain.tld
|
# SMTP_HOST=smtp.domain.tld
|
||||||
# SMTP_FROM=bitwarden-rs@domain.tld
|
# SMTP_FROM=bitwarden-rs@domain.tld
|
||||||
# SMTP_FROM_NAME=Bitwarden_RS
|
# SMTP_FROM_NAME=Bitwarden_RS
|
||||||
# SMTP_PORT=587
|
# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 is outdated and used with Implicit TLS.
|
||||||
# SMTP_SSL=true
|
# SMTP_SSL=true # (Explicit) - This variable by default configures Explicit STARTTLS, it will upgrade an insecure connection to a secure one. Unless SMTP_EXPLICIT_TLS is set to true. Either port 587 or 25 are default.
|
||||||
# SMTP_EXPLICIT_TLS=true # N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851)
|
# SMTP_EXPLICIT_TLS=true # (Implicit) - N.B. This variable configures Implicit TLS. It's currently mislabelled (see bug #851) - SMTP_SSL Needs to be set to true for this option to work. Usually port 465 is used here.
|
||||||
# SMTP_USERNAME=username
|
# SMTP_USERNAME=username
|
||||||
# SMTP_PASSWORD=password
|
# SMTP_PASSWORD=password
|
||||||
# SMTP_AUTH_MECHANISM="Plain"
|
|
||||||
# SMTP_TIMEOUT=15
|
# SMTP_TIMEOUT=15
|
||||||
|
|
||||||
|
## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections.
|
||||||
|
## Possible values: ["Plain", "Login", "Xoauth2"].
|
||||||
|
## Multiple options need to be separated by a comma ','.
|
||||||
|
# SMTP_AUTH_MECHANISM="Plain"
|
||||||
|
|
||||||
|
## Server name sent during the SMTP HELO
|
||||||
|
## By default this value should be is on the machine's hostname,
|
||||||
|
## but might need to be changed in case it trips some anti-spam filters
|
||||||
|
# HELO_NAME=
|
||||||
|
|
||||||
|
## SMTP debugging
|
||||||
|
## When set to true this will output very detailed SMTP messages.
|
||||||
|
## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
|
# SMTP_DEBUG=false
|
||||||
|
|
||||||
|
## Accept Invalid Hostnames
|
||||||
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
# SMTP_ACCEPT_INVALID_HOSTNAMES=false
|
||||||
|
|
||||||
|
## Accept Invalid Certificates
|
||||||
|
## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
|
## Only use this as a last resort if you are not able to use a valid certificate.
|
||||||
|
## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead.
|
||||||
|
# SMTP_ACCEPT_INVALID_CERTS=false
|
||||||
|
|
||||||
|
## Require new device emails. When a user logs in an email is required to be sent.
|
||||||
|
## If sending the email fails the login attempt will fail!!
|
||||||
|
# REQUIRE_DEVICE_EMAIL=false
|
||||||
|
|
||||||
|
## HIBP Api Key
|
||||||
|
## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key
|
||||||
|
# HIBP_API_KEY=
|
||||||
|
|
||||||
# vim: syntax=ini
|
# vim: syntax=ini
|
||||||
|
|||||||
23
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -6,27 +6,36 @@ labels: ''
|
|||||||
assignees: ''
|
assignees: ''
|
||||||
|
|
||||||
---
|
---
|
||||||
|
<!--
|
||||||
|
# ###
|
||||||
|
NOTE: Please update to the latest version of bitwarden_rs before reporting an issue!
|
||||||
|
This saves you and us a lot of time and troubleshooting.
|
||||||
|
See: https://github.com/dani-garcia/bitwarden_rs/issues/1180
|
||||||
|
# ###
|
||||||
|
-->
|
||||||
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Please fill out the following template to make solving your problem easier and faster for us.
|
Please fill out the following template to make solving your problem easier and faster for us.
|
||||||
This is only a guideline. If you think that parts are unneccessary for your issue, feel free to remove them.
|
This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them.
|
||||||
|
|
||||||
Remember to hide/obfuscate personal and confidential information,
|
Remember to hide/obfuscate personal and confidential information,
|
||||||
such as names, global IP/DNS adresses and especially passwords, if neccessary.
|
such as names, global IP/DNS addresses and especially passwords, if necessary.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
### Subject of the issue
|
### Subject of the issue
|
||||||
<!-- Describe your issue here.-->
|
<!-- Describe your issue here.-->
|
||||||
|
|
||||||
### Your environment
|
### Your environment
|
||||||
<!-- The version number, obtained from the logs or the admin page -->
|
<!-- The version number, obtained from the logs or the admin diagnostics page -->
|
||||||
* Bitwarden_rs version:
|
<!-- Remember to check your issue on the latest version first! -->
|
||||||
|
* Bitwarden_rs version:
|
||||||
<!-- How the server was installed: Docker image / package / built from source -->
|
<!-- How the server was installed: Docker image / package / built from source -->
|
||||||
* Install method:
|
* Install method:
|
||||||
* Clients used: <!-- if applicable -->
|
* Clients used: <!-- if applicable -->
|
||||||
* Reverse proxy and version: <!-- if applicable -->
|
* Reverse proxy and version: <!-- if applicable -->
|
||||||
* Version of mysql/postgresql: <!-- if applicable -->
|
* Version of mysql/postgresql: <!-- if applicable -->
|
||||||
* Other relevant information:
|
* Other relevant information:
|
||||||
|
|
||||||
### Steps to reproduce
|
### Steps to reproduce
|
||||||
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults)
|
||||||
|
|||||||
125
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
name: Build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
# Ignore when there are only changes done too one of these paths
|
||||||
|
paths-ignore:
|
||||||
|
- "**.md"
|
||||||
|
- "**.txt"
|
||||||
|
- "azure-pipelines.yml"
|
||||||
|
- "docker/**"
|
||||||
|
- "hooks/**"
|
||||||
|
- "tools/**"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
channel:
|
||||||
|
- nightly
|
||||||
|
# - stable
|
||||||
|
target-triple:
|
||||||
|
- x86_64-unknown-linux-gnu
|
||||||
|
# - x86_64-unknown-linux-musl
|
||||||
|
include:
|
||||||
|
- target-triple: x86_64-unknown-linux-gnu
|
||||||
|
host-triple: x86_64-unknown-linux-gnu
|
||||||
|
features: "sqlite,mysql,postgresql"
|
||||||
|
channel: nightly
|
||||||
|
os: ubuntu-18.04
|
||||||
|
ext:
|
||||||
|
# - target-triple: x86_64-unknown-linux-gnu
|
||||||
|
# host-triple: x86_64-unknown-linux-gnu
|
||||||
|
# features: "sqlite,mysql,postgresql"
|
||||||
|
# channel: stable
|
||||||
|
# os: ubuntu-18.04
|
||||||
|
# ext:
|
||||||
|
# - target-triple: x86_64-unknown-linux-musl
|
||||||
|
# host-triple: x86_64-unknown-linux-gnu
|
||||||
|
# features: "sqlite,postgresql"
|
||||||
|
# channel: nightly
|
||||||
|
# os: ubuntu-18.04
|
||||||
|
# ext:
|
||||||
|
# - target-triple: x86_64-unknown-linux-musl
|
||||||
|
# host-triple: x86_64-unknown-linux-gnu
|
||||||
|
# features: "sqlite,postgresql"
|
||||||
|
# channel: stable
|
||||||
|
# os: ubuntu-18.04
|
||||||
|
# ext:
|
||||||
|
|
||||||
|
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
# Checkout the repo
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
|
# Install musl-tools when needed
|
||||||
|
- name: Install musl tools
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends musl-dev musl-tools cmake
|
||||||
|
if: matrix.target-triple == 'x86_64-unknown-linux-musl'
|
||||||
|
# End Install musl-tools when needed
|
||||||
|
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
- name: Install dependencies Ubuntu
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl sqlite build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||||
|
if: startsWith( matrix.os, 'ubuntu' )
|
||||||
|
# End Install dependencies
|
||||||
|
|
||||||
|
|
||||||
|
# Enable Rust Caching
|
||||||
|
- uses: Swatinem/rust-cache@v1
|
||||||
|
# End Enable Rust Caching
|
||||||
|
|
||||||
|
|
||||||
|
# Uses the rust-toolchain file to determine version
|
||||||
|
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
|
||||||
|
uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
target: ${{ matrix.target-triple }}
|
||||||
|
# End Uses the rust-toolchain file to determine version
|
||||||
|
|
||||||
|
|
||||||
|
# Run cargo tests (In release mode to speed up cargo build afterwards)
|
||||||
|
- name: '`cargo test --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||||
|
# End Run cargo tests
|
||||||
|
|
||||||
|
|
||||||
|
# Build the binary
|
||||||
|
- name: '`cargo build --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}`'
|
||||||
|
uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: build
|
||||||
|
args: --release --features ${{ matrix.features }} --target ${{ matrix.target-triple }}
|
||||||
|
# End Build the binary
|
||||||
|
|
||||||
|
|
||||||
|
# Upload artifact to Github Actions
|
||||||
|
- name: Upload artifact
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||||
|
path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||||
|
# End Upload artifact to Github Actions
|
||||||
|
|
||||||
|
|
||||||
|
## This is not used at the moment
|
||||||
|
## We could start using this when we can build static binaries
|
||||||
|
# Upload to github actions release
|
||||||
|
# - name: Release
|
||||||
|
# uses: Shopify/upload-to-release@1
|
||||||
|
# if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
# with:
|
||||||
|
# name: bitwarden_rs-${{ matrix.target-triple }}${{ matrix.ext }}
|
||||||
|
# path: target/${{ matrix.target-triple }}/release/bitwarden_rs${{ matrix.ext }}
|
||||||
|
# repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
# End Upload to github actions release
|
||||||
34
.github/workflows/hadolint.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
name: Hadolint
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
# Ignore when there are only changes done too one of these paths
|
||||||
|
paths:
|
||||||
|
- "docker/**"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
hadolint:
|
||||||
|
name: Validate Dockerfile syntax
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
# Checkout the repo
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
|
# Download hadolint
|
||||||
|
- name: Download hadolint
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
|
||||||
|
sudo chmod +x /usr/local/bin/hadolint
|
||||||
|
env:
|
||||||
|
HADOLINT_VERSION: 1.19.0
|
||||||
|
# End Download hadolint
|
||||||
|
|
||||||
|
# Test Dockerfiles
|
||||||
|
- name: Run hadolint
|
||||||
|
shell: bash
|
||||||
|
run: git ls-files --exclude='docker/*/Dockerfile*' --ignored | xargs hadolint
|
||||||
|
# End Test Dockerfiles
|
||||||
148
.github/workflows/workspace.yml
vendored
@@ -1,148 +0,0 @@
|
|||||||
name: Workflow
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
paths-ignore:
|
|
||||||
- "**.md"
|
|
||||||
#pull_request:
|
|
||||||
# paths-ignore:
|
|
||||||
# - "**.md"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Build
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
db-backend: [sqlite, mysql, postgresql]
|
|
||||||
target:
|
|
||||||
- x86_64-unknown-linux-gnu
|
|
||||||
# - x86_64-unknown-linux-musl
|
|
||||||
# - x86_64-apple-darwin
|
|
||||||
# - x86_64-pc-windows-msvc
|
|
||||||
include:
|
|
||||||
- target: x86_64-unknown-linux-gnu
|
|
||||||
os: ubuntu-latest
|
|
||||||
ext:
|
|
||||||
# - target: x86_64-unknown-linux-musl
|
|
||||||
# os: ubuntu-latest
|
|
||||||
# ext:
|
|
||||||
# - target: x86_64-apple-darwin
|
|
||||||
# os: macOS-latest
|
|
||||||
# ext:
|
|
||||||
# - target: x86_64-pc-windows-msvc
|
|
||||||
# os: windows-latest
|
|
||||||
# ext: .exe
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v1
|
|
||||||
|
|
||||||
# - name: Cache choco cache
|
|
||||||
# uses: actions/cache@v1.0.3
|
|
||||||
# if: matrix.os == 'windows-latest'
|
|
||||||
# with:
|
|
||||||
# path: ~\AppData\Local\Temp\chocolatey
|
|
||||||
# key: ${{ runner.os }}-choco-cache-${{ matrix.db-backend }}
|
|
||||||
|
|
||||||
- name: Cache vcpkg installed
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
with:
|
|
||||||
path: $VCPKG_ROOT/installed
|
|
||||||
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
|
|
||||||
env:
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
|
|
||||||
- name: Cache vcpkg downloads
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
with:
|
|
||||||
path: $VCPKG_ROOT/downloads
|
|
||||||
key: ${{ runner.os }}-vcpkg-cache-${{ matrix.db-backend }}
|
|
||||||
env:
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
|
|
||||||
# - name: Cache homebrew
|
|
||||||
# uses: actions/cache@v1.0.3
|
|
||||||
# if: matrix.os == 'macOS-latest'
|
|
||||||
# with:
|
|
||||||
# path: ~/Library/Caches/Homebrew
|
|
||||||
# key: ${{ runner.os }}-brew-cache
|
|
||||||
|
|
||||||
# - name: Cache apt
|
|
||||||
# uses: actions/cache@v1.0.3
|
|
||||||
# if: matrix.os == 'ubuntu-latest'
|
|
||||||
# with:
|
|
||||||
# path: /var/cache/apt/archives
|
|
||||||
# key: ${{ runner.os }}-apt-cache
|
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
- name: Install dependencies macOS
|
|
||||||
run: brew update; brew install openssl sqlite libpq mysql
|
|
||||||
if: matrix.os == 'macOS-latest'
|
|
||||||
|
|
||||||
- name: Install dependencies Ubuntu
|
|
||||||
run: sudo apt-get update && sudo apt-get install --no-install-recommends openssl sqlite libpq-dev libmysql++-dev
|
|
||||||
if: matrix.os == 'ubuntu-latest'
|
|
||||||
|
|
||||||
- name: Install dependencies Windows
|
|
||||||
run: vcpkg integrate install; vcpkg install sqlite3:x64-windows openssl:x64-windows libpq:x64-windows libmysql:x64-windows
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
env:
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
# End Install dependencies
|
|
||||||
|
|
||||||
# Install rust nightly toolchain
|
|
||||||
- name: Cache cargo registry
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: ~/.cargo/registry
|
|
||||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
- name: Cache cargo index
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: ~/.cargo/git
|
|
||||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
- name: Cache cargo build
|
|
||||||
uses: actions/cache@v1.0.3
|
|
||||||
with:
|
|
||||||
path: target
|
|
||||||
key: ${{ runner.os }}-${{matrix.db-backend}}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
|
|
||||||
|
|
||||||
- name: Install latest nightly
|
|
||||||
uses: actions-rs/toolchain@v1.0.5
|
|
||||||
with:
|
|
||||||
# Uses rust-toolchain to determine version
|
|
||||||
profile: minimal
|
|
||||||
target: ${{ matrix.target }}
|
|
||||||
|
|
||||||
# Build
|
|
||||||
- name: Build Win
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
run: cargo.exe build --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
|
|
||||||
env:
|
|
||||||
RUSTFLAGS: -Ctarget-feature=+crt-static
|
|
||||||
VCPKG_ROOT: 'C:\vcpkg'
|
|
||||||
|
|
||||||
- name: Build macOS / Ubuntu
|
|
||||||
if: matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-latest'
|
|
||||||
run: cargo build --verbose --features ${{ matrix.db-backend }} --release --target ${{ matrix.target }}
|
|
||||||
|
|
||||||
# Test
|
|
||||||
- name: Run tests
|
|
||||||
run: cargo test --features ${{ matrix.db-backend }}
|
|
||||||
|
|
||||||
# Upload & Release
|
|
||||||
- name: Upload artifact
|
|
||||||
uses: actions/upload-artifact@v1.0.0
|
|
||||||
with:
|
|
||||||
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
|
|
||||||
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
|
|
||||||
|
|
||||||
- name: Release
|
|
||||||
uses: Shopify/upload-to-release@1.0.0
|
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
with:
|
|
||||||
name: bitwarden_rs-${{ matrix.db-backend }}-${{ matrix.target }}${{ matrix.ext }}
|
|
||||||
path: target/${{ matrix.target }}/release/bitwarden_rs${{ matrix.ext }}
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
21
.travis.yml
@@ -1,21 +0,0 @@
|
|||||||
dist: xenial
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
- HADOLINT_VERSION=1.17.1
|
|
||||||
|
|
||||||
language: rust
|
|
||||||
rust: nightly
|
|
||||||
cache: cargo
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- sudo curl -L https://github.com/hadolint/hadolint/releases/download/v$HADOLINT_VERSION/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint
|
|
||||||
- sudo chmod +rx /usr/local/bin/hadolint
|
|
||||||
- rustup set profile minimal
|
|
||||||
|
|
||||||
# Nothing to install
|
|
||||||
install: true
|
|
||||||
script:
|
|
||||||
- git ls-files --exclude='Dockerfile*' --ignored | xargs --max-lines=1 hadolint
|
|
||||||
- cargo test --features "sqlite"
|
|
||||||
- cargo test --features "mysql"
|
|
||||||
1402
Cargo.lock
generated
67
Cargo.toml
@@ -16,9 +16,11 @@ enable_syslog = []
|
|||||||
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
|
||||||
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
|
||||||
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"]
|
||||||
|
# Enable to use a vendored and statically linked openssl
|
||||||
|
vendored_openssl = ["openssl/vendored"]
|
||||||
|
|
||||||
# Enable unstable features, requires nightly
|
# Enable unstable features, requires nightly
|
||||||
# Currently only used to enable rusts official ip support
|
# Currently only used to enable rusts official ip support
|
||||||
unstable = []
|
unstable = []
|
||||||
|
|
||||||
[target."cfg(not(windows))".dependencies]
|
[target."cfg(not(windows))".dependencies]
|
||||||
@@ -30,27 +32,26 @@ rocket = { version = "0.5.0-dev", features = ["tls"], default-features = false }
|
|||||||
rocket_contrib = "0.5.0-dev"
|
rocket_contrib = "0.5.0-dev"
|
||||||
|
|
||||||
# HTTP client
|
# HTTP client
|
||||||
reqwest = { version = "0.10.6", features = ["blocking", "json"] }
|
reqwest = { version = "0.11.0", features = ["blocking", "json"] }
|
||||||
|
|
||||||
# multipart/form-data support
|
# multipart/form-data support
|
||||||
multipart = { version = "0.17.0", features = ["server"], default-features = false }
|
multipart = { version = "0.17.1", features = ["server"], default-features = false }
|
||||||
|
|
||||||
# WebSockets library
|
# WebSockets library
|
||||||
ws = "0.9.1"
|
ws = { version = "0.10.0", package = "parity-ws" }
|
||||||
|
|
||||||
# MessagePack library
|
# MessagePack library
|
||||||
rmpv = "0.4.4"
|
rmpv = "0.4.7"
|
||||||
|
|
||||||
# Concurrent hashmap implementation
|
# Concurrent hashmap implementation
|
||||||
chashmap = "2.2.2"
|
chashmap = "2.2.2"
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = "1.0.114"
|
serde = { version = "1.0.123", features = ["derive"] }
|
||||||
serde_derive = "1.0.114"
|
serde_json = "1.0.62"
|
||||||
serde_json = "1.0.56"
|
|
||||||
|
|
||||||
# Logging
|
# Logging
|
||||||
log = "0.4.11"
|
log = "0.4.14"
|
||||||
fern = { version = "0.6.0", features = ["syslog-4"] }
|
fern = { version = "0.6.0", features = ["syslog-4"] }
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
@@ -60,22 +61,23 @@ diesel_migrations = "1.4.0"
|
|||||||
# Bundled SQLite
|
# Bundled SQLite
|
||||||
libsqlite3-sys = { version = "0.18.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.18.0", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto library
|
# Crypto-related libraries
|
||||||
ring = "0.16.15"
|
rand = "0.8.3"
|
||||||
|
ring = "0.16.20"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "0.8.1", features = ["v4"] }
|
uuid = { version = "0.8.2", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = "0.4.13"
|
chrono = "0.4.19"
|
||||||
chrono-tz = "0.5.2"
|
chrono-tz = "0.5.3"
|
||||||
time = "0.2.16"
|
time = "0.2.25"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
oath = "0.10.2"
|
oath = "0.10.2"
|
||||||
|
|
||||||
# Data encoding library
|
# Data encoding library
|
||||||
data-encoding = "2.2.1"
|
data-encoding = "2.3.2"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "7.2.0"
|
jsonwebtoken = "7.2.0"
|
||||||
@@ -84,48 +86,51 @@ jsonwebtoken = "7.2.0"
|
|||||||
u2f = "0.2.0"
|
u2f = "0.2.0"
|
||||||
|
|
||||||
# Yubico Library
|
# Yubico Library
|
||||||
yubico = { version = "0.9.1", features = ["online-tokio"], default-features = false }
|
yubico = { version = "0.9.2", features = ["online-tokio"], default-features = false }
|
||||||
|
|
||||||
# A `dotenv` implementation for Rust
|
# A `dotenv` implementation for Rust
|
||||||
dotenv = { version = "0.15.0", default-features = false }
|
dotenv = { version = "0.15.0", default-features = false }
|
||||||
|
|
||||||
# Lazy initialization
|
# Lazy initialization
|
||||||
once_cell = "1.4.0"
|
once_cell = "1.5.2"
|
||||||
|
|
||||||
# Numerical libraries
|
# Numerical libraries
|
||||||
num-traits = "0.2.12"
|
num-traits = "0.2.14"
|
||||||
num-derive = "0.3.0"
|
num-derive = "0.3.3"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.10.0-alpha.1", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname"], default-features = false }
|
lettre = { version = "0.10.0-alpha.5", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
|
||||||
native-tls = "0.2.4"
|
newline-converter = "0.1.0"
|
||||||
|
|
||||||
# Template library
|
# Template library
|
||||||
handlebars = { version = "3.3.0", features = ["dir_source"] }
|
handlebars = { version = "3.5.2", features = ["dir_source"] }
|
||||||
|
|
||||||
# For favicon extraction from main website
|
# For favicon extraction from main website
|
||||||
soup = "0.5.0"
|
soup = "0.5.0"
|
||||||
regex = "1.3.9"
|
regex = { version = "1.4.3", features = ["std", "perf"], default-features = false }
|
||||||
data-url = "0.1.0"
|
data-url = "0.1.0"
|
||||||
|
|
||||||
# Used by U2F, JWT and Postgres
|
# Used by U2F, JWT and Postgres
|
||||||
openssl = "0.10.30"
|
openssl = "0.10.32"
|
||||||
|
|
||||||
# URL encoding library
|
# URL encoding library
|
||||||
percent-encoding = "2.1.0"
|
percent-encoding = "2.1.0"
|
||||||
# Punycode conversion
|
# Punycode conversion
|
||||||
idna = "0.2.0"
|
idna = "0.2.1"
|
||||||
|
|
||||||
# CLI argument parsing
|
# CLI argument parsing
|
||||||
structopt = "0.3.15"
|
structopt = "0.3.21"
|
||||||
|
|
||||||
# Logging panics to logfile instead stderr only
|
# Logging panics to logfile instead stderr only
|
||||||
backtrace = "0.3.50"
|
backtrace = "0.3.56"
|
||||||
|
|
||||||
|
# Macro ident concatenation
|
||||||
|
paste = "1.0.4"
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# Use newest ring
|
# Use newest ring
|
||||||
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '1010f6a2a88fac899dec0cd2f642156908038a53' }
|
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||||
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '1010f6a2a88fac899dec0cd2f642156908038a53' }
|
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
|
||||||
|
|
||||||
# For favicon extraction from main website
|
# For favicon extraction from main website
|
||||||
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '7f1bd6ce1c2fde599a757302a843a60e714c5f72' }
|
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = '540ede02d0771824c0c80ff9f57fe8eff38b1291' }
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
docker/amd64/sqlite/Dockerfile
|
docker/amd64/Dockerfile
|
||||||
@@ -21,7 +21,6 @@ Image is based on [Rust implementation of Bitwarden API](https://github.com/dani
|
|||||||
|
|
||||||
Basically full implementation of Bitwarden API is provided including:
|
Basically full implementation of Bitwarden API is provided including:
|
||||||
|
|
||||||
* Single user functionality
|
|
||||||
* Organizations support
|
* Organizations support
|
||||||
* Attachments
|
* Attachments
|
||||||
* Vault API support
|
* Vault API support
|
||||||
@@ -59,3 +58,4 @@ If you prefer to chat, we're usually hanging around at [#bitwarden_rs:matrix.org
|
|||||||
Thanks for your contribution to the project!
|
Thanks for your contribution to the project!
|
||||||
|
|
||||||
- [@ChonoN](https://github.com/ChonoN)
|
- [@ChonoN](https://github.com/ChonoN)
|
||||||
|
- [@themightychris](https://github.com/themightychris)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
pool:
|
pool:
|
||||||
vmImage: 'Ubuntu-16.04'
|
vmImage: 'Ubuntu-18.04'
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- script: |
|
- script: |
|
||||||
@@ -10,16 +10,13 @@ steps:
|
|||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y libmysql++-dev
|
sudo apt-get install -y --no-install-recommends build-essential libmariadb-dev-compat libpq-dev libssl-dev pkgconf
|
||||||
displayName: Install libmysql
|
displayName: 'Install build libraries.'
|
||||||
|
|
||||||
- script: |
|
- script: |
|
||||||
rustc -Vv
|
rustc -Vv
|
||||||
cargo -V
|
cargo -V
|
||||||
displayName: Query rust and cargo versions
|
displayName: Query rust and cargo versions
|
||||||
|
|
||||||
- script : cargo test --features "sqlite"
|
- script : cargo test --features "sqlite,mysql,postgresql"
|
||||||
displayName: 'Test project with sqlite backend'
|
displayName: 'Test project with sqlite, mysql and postgresql backends'
|
||||||
|
|
||||||
- script : cargo test --features "mysql"
|
|
||||||
displayName: 'Test project with mysql backend'
|
|
||||||
|
|||||||
15
build.rs
@@ -1,13 +1,14 @@
|
|||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
use std::env;
|
use std::env;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
#[cfg(all(feature = "sqlite", feature = "mysql"))]
|
// This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros
|
||||||
compile_error!("Can't enable both sqlite and mysql at the same time");
|
#[cfg(feature = "sqlite")]
|
||||||
#[cfg(all(feature = "sqlite", feature = "postgresql"))]
|
println!("cargo:rustc-cfg=sqlite");
|
||||||
compile_error!("Can't enable both sqlite and postgresql at the same time");
|
#[cfg(feature = "mysql")]
|
||||||
#[cfg(all(feature = "mysql", feature = "postgresql"))]
|
println!("cargo:rustc-cfg=mysql");
|
||||||
compile_error!("Can't enable both mysql and postgresql at the same time");
|
#[cfg(feature = "postgresql")]
|
||||||
|
println!("cargo:rustc-cfg=postgresql");
|
||||||
|
|
||||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||||
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
compile_error!("You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite");
|
||||||
|
|||||||
33
docker/Dockerfile.buildx
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# The cross-built images have the build arch (`amd64`) embedded in the image
|
||||||
|
# manifest, rather than the target arch. For example:
|
||||||
|
#
|
||||||
|
# $ docker inspect bitwardenrs/server:latest-armv7 | jq -r '.[]|.Architecture'
|
||||||
|
# amd64
|
||||||
|
#
|
||||||
|
# Recent versions of Docker have started printing a warning when the image's
|
||||||
|
# claimed arch doesn't match the host arch. For example:
|
||||||
|
#
|
||||||
|
# WARNING: The requested image's platform (linux/amd64) does not match the
|
||||||
|
# detected host platform (linux/arm/v7) and no specific platform was requested
|
||||||
|
#
|
||||||
|
# The image still works fine, but the spurious warning creates confusion.
|
||||||
|
#
|
||||||
|
# Docker doesn't seem to provide a way to directly set the arch of an image
|
||||||
|
# at build time. To resolve the build vs. target arch discrepancy, we use
|
||||||
|
# Docker Buildx to build a new set of images with the correct target arch.
|
||||||
|
#
|
||||||
|
# Docker Buildx uses this Dockerfile to build an image for each requested
|
||||||
|
# platform. Since the Dockerfile basically consists of a single `FROM`
|
||||||
|
# instruction, we're effectively telling Buildx to build a platform-specific
|
||||||
|
# image by simply copying the existing cross-built image and setting the
|
||||||
|
# correct target arch as a side effect.
|
||||||
|
#
|
||||||
|
# References:
|
||||||
|
#
|
||||||
|
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
|
||||||
|
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
|
||||||
|
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
|
||||||
|
#
|
||||||
|
ARG LOCAL_REPO
|
||||||
|
ARG DOCKER_TAG
|
||||||
|
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}
|
||||||
@@ -1,68 +1,89 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
{% set build_stage_base_image = "rust:1.40" %}
|
{% set build_stage_base_image = "rust:1.48" %}
|
||||||
{% if "alpine" in target_file %}
|
{% if "alpine" in target_file %}
|
||||||
{% set build_stage_base_image = "clux/muslrust:nightly-2020-03-09" %}
|
{% if "amd64" in target_file %}
|
||||||
{% set runtime_stage_base_image = "alpine:3.11" %}
|
{% set build_stage_base_image = "clux/muslrust:nightly-2021-01-25" %}
|
||||||
{% set package_arch_name = "" %}
|
{% set runtime_stage_base_image = "alpine:3.12" %}
|
||||||
|
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
|
||||||
|
{% elif "armv7" in target_file %}
|
||||||
|
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
|
||||||
|
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.12" %}
|
||||||
|
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
|
||||||
|
{% endif %}
|
||||||
{% elif "amd64" in target_file %}
|
{% elif "amd64" in target_file %}
|
||||||
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
{% set runtime_stage_base_image = "debian:buster-slim" %}
|
||||||
{% set package_arch_name = "" %}
|
{% elif "arm64" in target_file %}
|
||||||
{% elif "arm64v8" in target_file %}
|
|
||||||
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
|
||||||
{% set package_arch_name = "arm64" %}
|
{% set package_arch_name = "arm64" %}
|
||||||
{% elif "arm32v6" in target_file %}
|
{% set package_arch_target = "aarch64-unknown-linux-gnu" %}
|
||||||
|
{% set package_cross_compiler = "aarch64-linux-gnu" %}
|
||||||
|
{% elif "armv6" in target_file %}
|
||||||
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
|
||||||
{% set package_arch_name = "armel" %}
|
{% set package_arch_name = "armel" %}
|
||||||
{% elif "arm32v7" in target_file %}
|
{% set package_arch_target = "arm-unknown-linux-gnueabi" %}
|
||||||
|
{% set package_cross_compiler = "arm-linux-gnueabi" %}
|
||||||
|
{% elif "armv7" in target_file %}
|
||||||
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
|
||||||
{% set package_arch_name = "armhf" %}
|
{% set package_arch_name = "armhf" %}
|
||||||
|
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
|
||||||
|
{% set package_cross_compiler = "arm-linux-gnueabihf" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% set package_arch_prefix = ":" + package_arch_name %}
|
{% if package_arch_name is defined %}
|
||||||
{% if package_arch_name == "" %}
|
{% set package_arch_prefix = ":" + package_arch_name %}
|
||||||
|
{% else %}
|
||||||
{% set package_arch_prefix = "" %}
|
{% set package_arch_prefix = "" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if package_arch_target is defined %}
|
||||||
|
{% set package_arch_target_param = " --target=" + package_arch_target %}
|
||||||
|
{% else %}
|
||||||
|
{% set package_arch_target_param = "" %}
|
||||||
|
{% endif %}
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
{% set vault_image_hash = "sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c" %}
|
{% set vault_version = "2.18.1b" %}
|
||||||
{% raw %}
|
{% set vault_image_digest = "sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb" %}
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# It can be viewed in multiple ways:
|
# Using the digest instead of the tag name provides better security,
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the console, with the following commands:
|
# be changed to point to a malicious image.
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
{% endraw %}
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
FROM bitwardenrs/web-vault@{{ vault_image_hash }} as vault
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v{{ vault_version }}
|
||||||
|
# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" bitwardenrs/web-vault:v{{ vault_version }}
|
||||||
|
# [bitwardenrs/web-vault@{{ vault_image_digest }}]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" bitwardenrs/web-vault@{{ vault_image_digest }}
|
||||||
|
# [bitwardenrs/web-vault:v{{ vault_version }}]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@{{ vault_image_digest }} as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
{% if "musl" in build_stage_base_image %}
|
|
||||||
# Musl build image for statically compiled binary
|
|
||||||
{% else %}
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
{% endif %}
|
|
||||||
FROM {{ build_stage_base_image }} as build
|
FROM {{ build_stage_base_image }} as build
|
||||||
|
|
||||||
{% if "sqlite" in target_file %}
|
{% if "alpine" in target_file %}
|
||||||
# set sqlite as default for DB ARG for backward compatibility
|
{% if "amd64" in target_file %}
|
||||||
|
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||||
|
ARG DB=sqlite,postgresql
|
||||||
|
{% set features = "sqlite,postgresql" %}
|
||||||
|
{% else %}
|
||||||
|
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||||
ARG DB=sqlite
|
ARG DB=sqlite
|
||||||
|
{% set features = "sqlite" %}
|
||||||
{% elif "mysql" in target_file %}
|
{% endif %}
|
||||||
# set mysql backend
|
{% else %}
|
||||||
ARG DB=mysql
|
# Debian-based builds support multidb
|
||||||
|
ARG DB=sqlite,mysql,postgresql
|
||||||
{% elif "postgresql" in target_file %}
|
{% set features = "sqlite,mysql,postgresql" %}
|
||||||
# set postgresql backend
|
|
||||||
ARG DB=postgresql
|
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
|
|
||||||
@@ -72,9 +93,9 @@ RUN rustup set profile minimal
|
|||||||
{% if "alpine" in target_file %}
|
{% if "alpine" in target_file %}
|
||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
ENV RUSTFLAGS='-C link-arg=-s'
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
{% elif "arm" in target_file %}
|
||||||
{% elif "arm32" in target_file or "arm64" in target_file %}
|
|
||||||
# Install required build libs for {{ package_arch_name }} architecture.
|
# Install required build libs for {{ package_arch_name }} architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
&& dpkg --add-architecture {{ package_arch_name }} \
|
&& dpkg --add-architecture {{ package_arch_name }} \
|
||||||
@@ -82,65 +103,34 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libssl-dev{{ package_arch_prefix }} \
|
libssl-dev{{ package_arch_prefix }} \
|
||||||
libc6-dev{{ package_arch_prefix }}
|
libc6-dev{{ package_arch_prefix }} \
|
||||||
|
libpq5{{ package_arch_prefix }} \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev{{ package_arch_prefix }} \
|
||||||
|
libmariadb-dev-compat{{ package_arch_prefix }}
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc-{{ package_cross_compiler }} \
|
||||||
|
&& mkdir -p ~/.cargo \
|
||||||
|
&& echo '[target.{{ package_arch_target }}]' >> ~/.cargo/config \
|
||||||
|
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> ~/.cargo/config
|
||||||
|
|
||||||
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
|
ENV USER "root"
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
{% if "arm64v8" in target_file %}
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
{% if "amd64" in target_file and "alpine" not in target_file %}
|
||||||
ENV USER "root"
|
# Install DB packages
|
||||||
|
|
||||||
{% elif "arm32v6" in target_file %}
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
{% elif "arm32v7" in target_file %}
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
{% if "mysql" in target_file %}
|
|
||||||
# Install MySQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
{% if "musl" in build_stage_base_image %}
|
|
||||||
libmysqlclient-dev{{ package_arch_prefix }} \
|
|
||||||
{% else %}
|
|
||||||
libmariadb-dev{{ package_arch_prefix }} \
|
libmariadb-dev{{ package_arch_prefix }} \
|
||||||
{% endif %}
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
{% elif "postgresql" in target_file %}
|
|
||||||
# Install PostgreSQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libpq-dev{{ package_arch_prefix }} \
|
libpq-dev{{ package_arch_prefix }} \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
@@ -150,39 +140,38 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
{% if "arm64v8" in target_file %}
|
{% if "alpine" not in target_file %}
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
{% if "arm" in target_file %}
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
|
||||||
|
|
||||||
|
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
|
||||||
ENV CROSS_COMPILE="1"
|
ENV CROSS_COMPILE="1"
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}"
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
ENV OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
|
||||||
{% elif "arm32v6" in target_file %}
|
{% endif -%}
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
{% elif "arm32v7" in target_file %}
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if "alpine" in target_file %}
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
{% elif "arm64v8" in target_file %}
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
{% elif "arm32v6" in target_file %}
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
{% elif "arm32v7" in target_file %}
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if package_arch_target is defined %}
|
||||||
|
RUN rustup target add {{ package_arch_target }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release
|
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||||
RUN find . -not -path "./target*" -delete
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
@@ -194,14 +183,11 @@ RUN touch src/main.rs
|
|||||||
|
|
||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
{% if "amd64" in target_file %}
|
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
|
||||||
RUN cargo build --features ${DB} --release
|
{% if "alpine" in target_file %}
|
||||||
{% elif "arm64v8" in target_file %}
|
{% if "armv7" in target_file %}
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
RUN musl-strip target/{{ package_arch_target }}/release/bitwarden_rs
|
||||||
{% elif "arm32v6" in target_file %}
|
{% endif %}
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
{% elif "arm32v7" in target_file %}
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
@@ -225,11 +211,14 @@ RUN [ "cross-build-start" ]
|
|||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
curl \
|
curl \
|
||||||
{% if "sqlite" in target_file %}
|
dumb-init \
|
||||||
|
{% if "sqlite" in features %}
|
||||||
sqlite \
|
sqlite \
|
||||||
{% elif "mysql" in target_file %}
|
{% endif %}
|
||||||
|
{% if "mysql" in features %}
|
||||||
mariadb-connector-c \
|
mariadb-connector-c \
|
||||||
{% elif "postgresql" in target_file %}
|
{% endif %}
|
||||||
|
{% if "postgresql" in features %}
|
||||||
postgresql-libs \
|
postgresql-libs \
|
||||||
{% endif %}
|
{% endif %}
|
||||||
ca-certificates
|
ca-certificates
|
||||||
@@ -239,13 +228,10 @@ RUN apt-get update && apt-get install -y \
|
|||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
{% if "sqlite" in target_file %}
|
dumb-init \
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
{% elif "mysql" in target_file %}
|
libmariadb-dev-compat \
|
||||||
libmariadbclient-dev \
|
|
||||||
{% elif "postgresql" in target_file %}
|
|
||||||
libpq5 \
|
libpq5 \
|
||||||
{% endif %}
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
@@ -263,16 +249,10 @@ EXPOSE 3012
|
|||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
{% if "alpine" in target_file %}
|
{% if package_arch_target is defined %}
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
COPY --from=build /app/target/{{ package_arch_target }}/release/bitwarden_rs .
|
||||||
{% elif "arm64v8" in target_file %}
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
|
||||||
{% elif "arm32v6" in target_file %}
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
|
||||||
{% elif "arm32v7" in target_file %}
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
|
||||||
{% else %}
|
{% else %}
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
COPY --from=build /app/target/release/bitwarden_rs .
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
@@ -282,4 +262,5 @@ HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
|
|||||||
@@ -1,29 +1,34 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# [bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb
|
||||||
|
# [bitwardenrs/web-vault:v2.18.1b]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
FROM rust:1.48 as build
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set postgresql backend
|
# Debian-based builds support multidb
|
||||||
ARG DB=postgresql
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
@@ -31,9 +36,10 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|||||||
# Don't download rust docs
|
# Don't download rust docs
|
||||||
RUN rustup set profile minimal
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
# Install PostgreSQL package
|
# Install DB packages
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
|
libmariadb-dev \
|
||||||
libpq-dev \
|
libpq-dev \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
@@ -46,6 +52,7 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
@@ -78,6 +85,9 @@ RUN apt-get update && apt-get install -y \
|
|||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
sqlite3 \
|
||||||
|
libmariadb-dev-compat \
|
||||||
libpq5 \
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
@@ -90,7 +100,7 @@ EXPOSE 3012
|
|||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
COPY --from=build /app/target/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -99,4 +109,5 @@ HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
@@ -1,28 +1,34 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# [bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb
|
||||||
|
# [bitwardenrs/web-vault:v2.18.1b]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# Musl build image for statically compiled binary
|
FROM clux/muslrust:nightly-2021-01-25 as build
|
||||||
FROM clux/muslrust:nightly-2020-03-09 as build
|
|
||||||
|
|
||||||
# set postgresql backend
|
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
|
||||||
ARG DB=postgresql
|
ARG DB=sqlite,postgresql
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
@@ -33,12 +39,6 @@ RUN rustup set profile minimal
|
|||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
ENV RUSTFLAGS='-C link-arg=-s'
|
ENV RUSTFLAGS='-C link-arg=-s'
|
||||||
|
|
||||||
# Install PostgreSQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libpq-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
# Creates a dummy project used to grab dependencies
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
@@ -53,7 +53,7 @@ RUN rustup target add x86_64-unknown-linux-musl
|
|||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release
|
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||||
RUN find . -not -path "./target*" -delete
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
@@ -65,12 +65,12 @@ RUN touch src/main.rs
|
|||||||
|
|
||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
RUN cargo build --features ${DB} --release
|
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM alpine:3.11
|
FROM alpine:3.12
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_ENV "staging"
|
||||||
ENV ROCKET_PORT=80
|
ENV ROCKET_PORT=80
|
||||||
@@ -81,6 +81,8 @@ ENV SSL_CERT_DIR=/etc/ssl/certs
|
|||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
curl \
|
curl \
|
||||||
|
dumb-init \
|
||||||
|
sqlite \
|
||||||
postgresql-libs \
|
postgresql-libs \
|
||||||
ca-certificates
|
ca-certificates
|
||||||
|
|
||||||
@@ -102,4 +104,5 @@ HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
@@ -1,102 +0,0 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
|
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
|
||||||
# It can be viewed in multiple ways:
|
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
# Install MySQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM debian:buster-slim
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/start.sh"]
|
|
||||||
@@ -1,105 +0,0 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
|
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
|
||||||
# It can be viewed in multiple ways:
|
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# Musl build image for statically compiled binary
|
|
||||||
FROM clux/muslrust:nightly-2020-03-09 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
ENV USER "root"
|
|
||||||
ENV RUSTFLAGS='-C link-arg=-s'
|
|
||||||
|
|
||||||
# Install MySQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmysqlclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM alpine:3.11
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apk add --no-cache \
|
|
||||||
openssl \
|
|
||||||
curl \
|
|
||||||
mariadb-connector-c \
|
|
||||||
ca-certificates
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/start.sh"]
|
|
||||||
@@ -1,96 +0,0 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
|
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
|
||||||
# It can be viewed in multiple ways:
|
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward compatibility
|
|
||||||
ARG DB=sqlite
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM debian:buster-slim
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
sqlite3 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build app/target/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/start.sh"]
|
|
||||||
@@ -1,134 +0,0 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
|
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
|
||||||
# It can be viewed in multiple ways:
|
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
# Install required build libs for armel architecture.
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armel \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:armel \
|
|
||||||
libc6-dev:armel
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabi \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
# Install MySQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev:armel \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
|
|
||||||
RUN rustup target add arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/rpi-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/start.sh"]
|
|
||||||
@@ -1,133 +0,0 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
|
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
|
||||||
# It can be viewed in multiple ways:
|
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
# Install required build libs for armhf architecture.
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture armhf \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:armhf \
|
|
||||||
libc6-dev:armhf
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-arm-linux-gnueabihf \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
# Install MySQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev:armhf \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/armv7hf-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/start.sh"]
|
|
||||||
@@ -1,29 +1,34 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# [bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb
|
||||||
|
# [bitwardenrs/web-vault:v2.18.1b]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
FROM rust:1.48 as build
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward compatibility
|
# Debian-based builds support multidb
|
||||||
ARG DB=sqlite
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
@@ -32,6 +37,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|||||||
RUN rustup set profile minimal
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
# Install required build libs for arm64 architecture.
|
# Install required build libs for arm64 architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
&& dpkg --add-architecture arm64 \
|
&& dpkg --add-architecture arm64 \
|
||||||
@@ -39,7 +45,11 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libssl-dev:arm64 \
|
libssl-dev:arm64 \
|
||||||
libc6-dev:arm64
|
libc6-dev:arm64 \
|
||||||
|
libpq5:arm64 \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev:arm64 \
|
||||||
|
libmariadb-dev-compat:arm64
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
@@ -47,7 +57,8 @@ RUN apt-get update \
|
|||||||
gcc-aarch64-linux-gnu \
|
gcc-aarch64-linux-gnu \
|
||||||
&& mkdir -p ~/.cargo \
|
&& mkdir -p ~/.cargo \
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> ~/.cargo/config
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
@@ -61,6 +72,22 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
|
||||||
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
||||||
ENV CROSS_COMPILE="1"
|
ENV CROSS_COMPILE="1"
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
||||||
@@ -70,7 +97,7 @@ RUN rustup target add aarch64-unknown-linux-gnu
|
|||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release
|
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
||||||
RUN find . -not -path "./target*" -delete
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
@@ -101,7 +128,10 @@ RUN apt-get update && apt-get install -y \
|
|||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
|
dumb-init \
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /data
|
RUN mkdir /data
|
||||||
@@ -125,4 +155,5 @@ HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
@@ -1,134 +0,0 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
|
||||||
|
|
||||||
# Using multistage build:
|
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
|
||||||
####################### VAULT BUILD IMAGE #######################
|
|
||||||
|
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
|
||||||
# It can be viewed in multiple ways:
|
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
|
||||||
# We need to use the Rust build image, because
|
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set mysql backend
|
|
||||||
ARG DB=mysql
|
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|
||||||
|
|
||||||
# Don't download rust docs
|
|
||||||
RUN rustup set profile minimal
|
|
||||||
|
|
||||||
# Install required build libs for arm64 architecture.
|
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
|
||||||
&& dpkg --add-architecture arm64 \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libssl-dev:arm64 \
|
|
||||||
libc6-dev:arm64
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
&& mkdir -p ~/.cargo \
|
|
||||||
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
|
|
||||||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config
|
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
|
||||||
ENV USER "root"
|
|
||||||
|
|
||||||
# Install MySQL package
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libmariadb-dev:arm64 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Creates a dummy project used to grab dependencies
|
|
||||||
RUN USER=root cargo new --bin /app
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copies over *only* your manifests and build files
|
|
||||||
COPY ./Cargo.* ./
|
|
||||||
COPY ./rust-toolchain ./rust-toolchain
|
|
||||||
COPY ./build.rs ./build.rs
|
|
||||||
|
|
||||||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
|
|
||||||
ENV CROSS_COMPILE="1"
|
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
|
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
|
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
|
||||||
# dummy project, except the target folder
|
|
||||||
# This folder contains the compiled dependencies
|
|
||||||
RUN cargo build --features ${DB} --release
|
|
||||||
RUN find . -not -path "./target*" -delete
|
|
||||||
|
|
||||||
# Copies the complete project
|
|
||||||
# To avoid copying unneeded files, use .dockerignore
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Make sure that we actually build the project
|
|
||||||
RUN touch src/main.rs
|
|
||||||
|
|
||||||
# Builds again, this time it'll just be
|
|
||||||
# your actual source files being built
|
|
||||||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
|
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
|
||||||
# Create a new stage with a minimal image
|
|
||||||
# because we already have a binary built
|
|
||||||
FROM balenalib/aarch64-debian:buster
|
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
|
||||||
ENV ROCKET_PORT=80
|
|
||||||
ENV ROCKET_WORKERS=10
|
|
||||||
|
|
||||||
RUN [ "cross-build-start" ]
|
|
||||||
|
|
||||||
# Install needed libraries
|
|
||||||
RUN apt-get update && apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
openssl \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
libmariadbclient-dev \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir /data
|
|
||||||
|
|
||||||
RUN [ "cross-build-end" ]
|
|
||||||
|
|
||||||
VOLUME /data
|
|
||||||
EXPOSE 80
|
|
||||||
EXPOSE 3012
|
|
||||||
|
|
||||||
# Copies the files from the context (Rocket.toml file and web-vault)
|
|
||||||
# and the binary from the "build" stage to the current stage
|
|
||||||
COPY Rocket.toml .
|
|
||||||
COPY --from=vault /web-vault ./web-vault
|
|
||||||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/bitwarden_rs .
|
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
|
||||||
COPY docker/start.sh /start.sh
|
|
||||||
|
|
||||||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|
||||||
|
|
||||||
# Configures the startup!
|
|
||||||
WORKDIR /
|
|
||||||
CMD ["/start.sh"]
|
|
||||||
@@ -1,29 +1,34 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# [bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb
|
||||||
|
# [bitwardenrs/web-vault:v2.18.1b]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
FROM rust:1.48 as build
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward compatibility
|
# Debian-based builds support multidb
|
||||||
ARG DB=sqlite
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
@@ -32,6 +37,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|||||||
RUN rustup set profile minimal
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
# Install required build libs for armel architecture.
|
# Install required build libs for armel architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
&& dpkg --add-architecture armel \
|
&& dpkg --add-architecture armel \
|
||||||
@@ -39,7 +45,11 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libssl-dev:armel \
|
libssl-dev:armel \
|
||||||
libc6-dev:armel
|
libc6-dev:armel \
|
||||||
|
libpq5:armel \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev:armel \
|
||||||
|
libmariadb-dev-compat:armel
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
@@ -47,7 +57,8 @@ RUN apt-get update \
|
|||||||
gcc-arm-linux-gnueabi \
|
gcc-arm-linux-gnueabi \
|
||||||
&& mkdir -p ~/.cargo \
|
&& mkdir -p ~/.cargo \
|
||||||
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
|
||||||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config
|
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> ~/.cargo/config
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
@@ -61,6 +72,22 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
|
||||||
|
|
||||||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
|
||||||
ENV CROSS_COMPILE="1"
|
ENV CROSS_COMPILE="1"
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
|
||||||
@@ -70,7 +97,7 @@ RUN rustup target add arm-unknown-linux-gnueabi
|
|||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release
|
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
|
||||||
RUN find . -not -path "./target*" -delete
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
@@ -101,7 +128,10 @@ RUN apt-get update && apt-get install -y \
|
|||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
|
dumb-init \
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /data
|
RUN mkdir /data
|
||||||
@@ -125,4 +155,5 @@ HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
@@ -1,29 +1,34 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# [bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb
|
||||||
|
# [bitwardenrs/web-vault:v2.18.1b]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# We need to use the Rust build image, because
|
FROM rust:1.48 as build
|
||||||
# we need the Rust compiler and Cargo tooling
|
|
||||||
FROM rust:1.40 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward compatibility
|
# Debian-based builds support multidb
|
||||||
ARG DB=sqlite
|
ARG DB=sqlite,mysql,postgresql
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
||||||
@@ -32,6 +37,7 @@ ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
|
|||||||
RUN rustup set profile minimal
|
RUN rustup set profile minimal
|
||||||
|
|
||||||
# Install required build libs for armhf architecture.
|
# Install required build libs for armhf architecture.
|
||||||
|
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
|
||||||
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
||||||
/etc/apt/sources.list.d/deb-src.list \
|
/etc/apt/sources.list.d/deb-src.list \
|
||||||
&& dpkg --add-architecture armhf \
|
&& dpkg --add-architecture armhf \
|
||||||
@@ -39,7 +45,11 @@ RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
|
|||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libssl-dev:armhf \
|
libssl-dev:armhf \
|
||||||
libc6-dev:armhf
|
libc6-dev:armhf \
|
||||||
|
libpq5:armhf \
|
||||||
|
libpq-dev \
|
||||||
|
libmariadb-dev:armhf \
|
||||||
|
libmariadb-dev-compat:armhf
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
@@ -47,7 +57,8 @@ RUN apt-get update \
|
|||||||
gcc-arm-linux-gnueabihf \
|
gcc-arm-linux-gnueabihf \
|
||||||
&& mkdir -p ~/.cargo \
|
&& mkdir -p ~/.cargo \
|
||||||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
|
||||||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config
|
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config \
|
||||||
|
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> ~/.cargo/config
|
||||||
|
|
||||||
ENV CARGO_HOME "/root/.cargo"
|
ENV CARGO_HOME "/root/.cargo"
|
||||||
ENV USER "root"
|
ENV USER "root"
|
||||||
@@ -61,15 +72,32 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
|
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
|
||||||
|
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
|
||||||
|
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
|
||||||
|
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
|
||||||
|
# What we can do is a force install, because nothing important is overlapping each other.
|
||||||
|
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 && \
|
||||||
|
apt-get download libmariadb-dev-compat:amd64 && \
|
||||||
|
dpkg --force-all -i ./libmariadb-dev-compat*.deb && \
|
||||||
|
rm -rvf ./libmariadb-dev-compat*.deb
|
||||||
|
|
||||||
|
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
|
||||||
|
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
|
||||||
|
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
|
||||||
|
# Without this specific file the ld command will fail and compilation fails with it.
|
||||||
|
RUN ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
|
||||||
|
|
||||||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
|
||||||
ENV CROSS_COMPILE="1"
|
ENV CROSS_COMPILE="1"
|
||||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
|
||||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||||
RUN rustup target add armv7-unknown-linux-gnueabihf
|
RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
|
||||||
RUN find . -not -path "./target*" -delete
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
@@ -100,7 +128,10 @@ RUN apt-get update && apt-get install -y \
|
|||||||
openssl \
|
openssl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
curl \
|
||||||
|
dumb-init \
|
||||||
sqlite3 \
|
sqlite3 \
|
||||||
|
libmariadb-dev-compat \
|
||||||
|
libpq5 \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /data
|
RUN mkdir /data
|
||||||
@@ -124,4 +155,5 @@ HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
@@ -1,27 +1,33 @@
|
|||||||
# This file was generated using a Jinja2 template.
|
# This file was generated using a Jinja2 template.
|
||||||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfile's.
|
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
|
||||||
|
|
||||||
# Using multistage build:
|
# Using multistage build:
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
# https://docs.docker.com/develop/develop-images/multistage-build/
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
# https://whitfin.io/speeding-up-rust-docker-builds/
|
||||||
####################### VAULT BUILD IMAGE #######################
|
####################### VAULT BUILD IMAGE #######################
|
||||||
|
# The web-vault digest specifies a particular web-vault build on Docker Hub.
|
||||||
# This hash is extracted from the docker web-vault builds and it's prefered over a simple tag because it's immutable.
|
# Using the digest instead of the tag name provides better security,
|
||||||
# It can be viewed in multiple ways:
|
# as the digest of an image is immutable, whereas a tag name can later
|
||||||
# - From the https://hub.docker.com/repository/docker/bitwardenrs/web-vault/tags page, click the tag name and the digest should be there.
|
# be changed to point to a malicious image.
|
||||||
# - From the console, with the following commands:
|
|
||||||
# docker pull bitwardenrs/web-vault:v2.15.1
|
|
||||||
# docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.15.1
|
|
||||||
#
|
#
|
||||||
# - To do the opposite, and get the tag from the hash, you can do:
|
# To verify the current digest for a given tag name:
|
||||||
# docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c
|
# - From https://hub.docker.com/r/bitwardenrs/web-vault/tags,
|
||||||
FROM bitwardenrs/web-vault@sha256:afba1e3bded09dc0a6a0dbacb3363ac33b6f122b4b26d3682cafb9115bdf785c as vault
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
|
# - From the command line:
|
||||||
|
# $ docker pull bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# $ docker image inspect --format "{{.RepoDigests}}" bitwardenrs/web-vault:v2.18.1b
|
||||||
|
# [bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb]
|
||||||
|
#
|
||||||
|
# - Conversely, to get the tag name from the digest:
|
||||||
|
# $ docker image inspect --format "{{.RepoTags}}" bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb
|
||||||
|
# [bitwardenrs/web-vault:v2.18.1b]
|
||||||
|
#
|
||||||
|
FROM bitwardenrs/web-vault@sha256:345a509dd5482343458b672dcd69203836ffac2e5181a1c99826d9695b9cb1eb as vault
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# Musl build image for statically compiled binary
|
FROM messense/rust-musl-cross:armv7-musleabihf as build
|
||||||
FROM clux/muslrust:nightly-2020-03-09 as build
|
|
||||||
|
|
||||||
# set sqlite as default for DB ARG for backward compatibility
|
# Alpine-based ARM (musl) only supports sqlite during compile time.
|
||||||
ARG DB=sqlite
|
ARG DB=sqlite
|
||||||
|
|
||||||
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
# Build time options to avoid dpkg warnings and help with reproducible builds.
|
||||||
@@ -42,12 +48,12 @@ COPY ./Cargo.* ./
|
|||||||
COPY ./rust-toolchain ./rust-toolchain
|
COPY ./rust-toolchain ./rust-toolchain
|
||||||
COPY ./build.rs ./build.rs
|
COPY ./build.rs ./build.rs
|
||||||
|
|
||||||
RUN rustup target add x86_64-unknown-linux-musl
|
RUN rustup target add armv7-unknown-linux-musleabihf
|
||||||
|
|
||||||
# Builds your dependencies and removes the
|
# Builds your dependencies and removes the
|
||||||
# dummy project, except the target folder
|
# dummy project, except the target folder
|
||||||
# This folder contains the compiled dependencies
|
# This folder contains the compiled dependencies
|
||||||
RUN cargo build --features ${DB} --release
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||||
RUN find . -not -path "./target*" -delete
|
RUN find . -not -path "./target*" -delete
|
||||||
|
|
||||||
# Copies the complete project
|
# Copies the complete project
|
||||||
@@ -59,26 +65,33 @@ RUN touch src/main.rs
|
|||||||
|
|
||||||
# Builds again, this time it'll just be
|
# Builds again, this time it'll just be
|
||||||
# your actual source files being built
|
# your actual source files being built
|
||||||
RUN cargo build --features ${DB} --release
|
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
|
||||||
|
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/bitwarden_rs
|
||||||
|
|
||||||
######################## RUNTIME IMAGE ########################
|
######################## RUNTIME IMAGE ########################
|
||||||
# Create a new stage with a minimal image
|
# Create a new stage with a minimal image
|
||||||
# because we already have a binary built
|
# because we already have a binary built
|
||||||
FROM alpine:3.11
|
FROM balenalib/armv7hf-alpine:3.12
|
||||||
|
|
||||||
ENV ROCKET_ENV "staging"
|
ENV ROCKET_ENV "staging"
|
||||||
ENV ROCKET_PORT=80
|
ENV ROCKET_PORT=80
|
||||||
ENV ROCKET_WORKERS=10
|
ENV ROCKET_WORKERS=10
|
||||||
ENV SSL_CERT_DIR=/etc/ssl/certs
|
ENV SSL_CERT_DIR=/etc/ssl/certs
|
||||||
|
|
||||||
|
RUN [ "cross-build-start" ]
|
||||||
|
|
||||||
# Install needed libraries
|
# Install needed libraries
|
||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
openssl \
|
openssl \
|
||||||
curl \
|
curl \
|
||||||
|
dumb-init \
|
||||||
sqlite \
|
sqlite \
|
||||||
ca-certificates
|
ca-certificates
|
||||||
|
|
||||||
RUN mkdir /data
|
RUN mkdir /data
|
||||||
|
|
||||||
|
RUN [ "cross-build-end" ]
|
||||||
|
|
||||||
VOLUME /data
|
VOLUME /data
|
||||||
EXPOSE 80
|
EXPOSE 80
|
||||||
EXPOSE 3012
|
EXPOSE 3012
|
||||||
@@ -87,7 +100,7 @@ EXPOSE 3012
|
|||||||
# and the binary from the "build" stage to the current stage
|
# and the binary from the "build" stage to the current stage
|
||||||
COPY Rocket.toml .
|
COPY Rocket.toml .
|
||||||
COPY --from=vault /web-vault ./web-vault
|
COPY --from=vault /web-vault ./web-vault
|
||||||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/bitwarden_rs .
|
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/bitwarden_rs .
|
||||||
|
|
||||||
COPY docker/healthcheck.sh /healthcheck.sh
|
COPY docker/healthcheck.sh /healthcheck.sh
|
||||||
COPY docker/start.sh /start.sh
|
COPY docker/start.sh /start.sh
|
||||||
@@ -96,4 +109,5 @@ HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
|
|||||||
|
|
||||||
# Configures the startup!
|
# Configures the startup!
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
|
||||||
CMD ["/start.sh"]
|
CMD ["/start.sh"]
|
||||||
@@ -10,7 +10,7 @@ Docker Hub hooks provide these predefined [environment variables](https://docs.d
|
|||||||
* `DOCKER_TAG`: the Docker repository tag being built.
|
* `DOCKER_TAG`: the Docker repository tag being built.
|
||||||
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
|
||||||
|
|
||||||
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/database/OS combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
|
||||||
|
|
||||||
## References
|
## References
|
||||||
|
|
||||||
|
|||||||
@@ -1,30 +1,16 @@
|
|||||||
# The default Debian-based SQLite images support these arches.
|
# The default Debian-based images support these arches for all database backends.
|
||||||
#
|
|
||||||
# Other images (Alpine-based, or with other database backends) currently
|
|
||||||
# support only a subset of these.
|
|
||||||
arches=(
|
arches=(
|
||||||
amd64
|
amd64
|
||||||
arm32v6
|
armv6
|
||||||
arm32v7
|
armv7
|
||||||
arm64v8
|
arm64
|
||||||
)
|
)
|
||||||
|
|
||||||
case "${DOCKER_REPO}" in
|
|
||||||
*-mysql)
|
|
||||||
db=mysql
|
|
||||||
arches=(amd64)
|
|
||||||
;;
|
|
||||||
*-postgresql)
|
|
||||||
db=postgresql
|
|
||||||
arches=(amd64)
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
db=sqlite
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||||
# The Alpine build currently only works for amd64.
|
# The Alpine image build currently only works for certain arches.
|
||||||
os_suffix=.alpine
|
distro_suffix=.alpine
|
||||||
arches=(amd64)
|
arches=(
|
||||||
|
amd64
|
||||||
|
armv7
|
||||||
|
)
|
||||||
fi
|
fi
|
||||||
|
|||||||
33
hooks/build
@@ -4,11 +4,42 @@ echo ">>> Building images..."
|
|||||||
|
|
||||||
source ./hooks/arches.sh
|
source ./hooks/arches.sh
|
||||||
|
|
||||||
|
if [[ -z "${SOURCE_COMMIT}" ]]; then
|
||||||
|
# This var is typically predefined by Docker Hub, but it won't be
|
||||||
|
# when testing locally.
|
||||||
|
SOURCE_COMMIT="$(git rev-parse HEAD)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Construct a version string in the style of `build.rs`.
|
||||||
|
GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)"
|
||||||
|
if [[ -n "${GIT_EXACT_TAG}" ]]; then
|
||||||
|
SOURCE_VERSION="${GIT_EXACT_TAG}"
|
||||||
|
else
|
||||||
|
GIT_LAST_TAG="$(git describe --tags --abbrev=0)"
|
||||||
|
SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
LABELS=(
|
||||||
|
# https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||||
|
org.opencontainers.image.created="$(date --utc --iso-8601=seconds)"
|
||||||
|
org.opencontainers.image.documentation="https://github.com/dani-garcia/bitwarden_rs/wiki"
|
||||||
|
org.opencontainers.image.licenses="GPL-3.0-only"
|
||||||
|
org.opencontainers.image.revision="${SOURCE_COMMIT}"
|
||||||
|
org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}"
|
||||||
|
org.opencontainers.image.url="https://hub.docker.com/r/${DOCKER_REPO#*/}"
|
||||||
|
org.opencontainers.image.version="${SOURCE_VERSION}"
|
||||||
|
)
|
||||||
|
LABEL_ARGS=()
|
||||||
|
for label in "${LABELS[@]}"; do
|
||||||
|
LABEL_ARGS+=(--label "${label}")
|
||||||
|
done
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
for arch in "${arches[@]}"; do
|
for arch in "${arches[@]}"; do
|
||||||
docker build \
|
docker build \
|
||||||
|
"${LABEL_ARGS[@]}" \
|
||||||
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
|
||||||
-f docker/${arch}/${db}/Dockerfile${os_suffix} \
|
-f docker/${arch}/Dockerfile${distro_suffix} \
|
||||||
.
|
.
|
||||||
done
|
done
|
||||||
|
|||||||
28
hooks/pre_build
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# If requested, print some environment info for troubleshooting.
|
||||||
|
if [[ -n "${DOCKER_HUB_DEBUG}" ]]; then
|
||||||
|
id
|
||||||
|
pwd
|
||||||
|
df -h
|
||||||
|
env
|
||||||
|
docker info
|
||||||
|
docker version
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install build dependencies.
|
||||||
|
deps=(
|
||||||
|
jq
|
||||||
|
)
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y "${deps[@]}"
|
||||||
|
|
||||||
|
# Docker Hub uses a shallow clone and doesn't fetch tags, which breaks some
|
||||||
|
# Git operations that we perform later, so fetch the complete history and
|
||||||
|
# tags first. Note that if the build is cached, the clone may have been
|
||||||
|
# unshallowed already; if so, unshallowing will fail, so skip it.
|
||||||
|
if [[ -f .git/shallow ]]; then
|
||||||
|
git fetch --unshallow --tags
|
||||||
|
fi
|
||||||
196
hooks/push
@@ -1,102 +1,138 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
echo ">>> Pushing images..."
|
|
||||||
|
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
|
||||||
|
|
||||||
declare -A annotations=(
|
|
||||||
[amd64]="--os linux --arch amd64"
|
|
||||||
[arm32v6]="--os linux --arch arm --variant v6"
|
|
||||||
[arm32v7]="--os linux --arch arm --variant v7"
|
|
||||||
[arm64v8]="--os linux --arch arm64 --variant v8"
|
|
||||||
)
|
|
||||||
|
|
||||||
source ./hooks/arches.sh
|
source ./hooks/arches.sh
|
||||||
|
|
||||||
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
|
||||||
|
# Join a list of args with a single char.
|
||||||
|
# Ref: https://stackoverflow.com/a/17841619
|
||||||
|
join() { local IFS="$1"; shift; echo "$*"; }
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
declare -A images
|
echo ">>> Starting local Docker registry..."
|
||||||
|
|
||||||
|
# Docker Buildx's `docker-container` driver is needed for multi-platform
|
||||||
|
# builds, but it can't access existing images on the Docker host (like the
|
||||||
|
# cross-compiled ones we just built). Those images first need to be pushed to
|
||||||
|
# a registry -- Docker Hub could be used, but since it's not trivial to clean
|
||||||
|
# up those intermediate images on Docker Hub, it's easier to just run a local
|
||||||
|
# Docker registry, which gets cleaned up automatically once the build job ends.
|
||||||
|
#
|
||||||
|
# https://docs.docker.com/registry/deploying/
|
||||||
|
# https://hub.docker.com/_/registry
|
||||||
|
#
|
||||||
|
# Use host networking so the buildx container can access the registry via
|
||||||
|
# localhost.
|
||||||
|
#
|
||||||
|
docker run -d --name registry --network host registry:2 # defaults to port 5000
|
||||||
|
|
||||||
|
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
|
||||||
|
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
|
||||||
|
LOCAL_REGISTRY="localhost:5000"
|
||||||
|
REPO="${DOCKER_REPO#*/}"
|
||||||
|
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
|
||||||
|
|
||||||
|
echo ">>> Pushing images to local registry..."
|
||||||
|
|
||||||
for arch in ${arches[@]}; do
|
for arch in ${arches[@]}; do
|
||||||
images[$arch]="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
|
||||||
|
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
|
||||||
|
docker tag "${docker_image}" "${local_image}"
|
||||||
|
docker push "${local_image}"
|
||||||
done
|
done
|
||||||
|
|
||||||
# Push the images that were just built; manifest list creation fails if the
|
echo ">>> Setting up Docker Buildx..."
|
||||||
# images (manifests) referenced don't already exist in the Docker registry.
|
|
||||||
for image in "${images[@]}"; do
|
|
||||||
docker push "${image}"
|
|
||||||
done
|
|
||||||
|
|
||||||
manifest_lists=("${DOCKER_REPO}:${DOCKER_TAG}")
|
# Same as earlier, use host networking so the buildx container can access the
|
||||||
|
# registry via localhost.
|
||||||
|
#
|
||||||
|
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
|
||||||
|
#
|
||||||
|
docker buildx create --name builder --use --driver-opt network=host
|
||||||
|
|
||||||
# If the Docker tag starts with a version number, assume the latest release is
|
echo ">>> Running Docker Buildx..."
|
||||||
# being pushed. Add an extra manifest (`latest` or `alpine`, as appropriate)
|
|
||||||
|
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
|
||||||
|
|
||||||
|
# If the Docker tag starts with a version number, assume the latest release
|
||||||
|
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
|
||||||
# to make it easier for users to track the latest release.
|
# to make it easier for users to track the latest release.
|
||||||
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||||
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
if [[ "${DOCKER_TAG}" == *alpine ]]; then
|
||||||
manifest_lists+=(${DOCKER_REPO}:alpine)
|
tags+=(${DOCKER_REPO}:alpine)
|
||||||
else
|
else
|
||||||
manifest_lists+=(${DOCKER_REPO}:latest)
|
tags+=(${DOCKER_REPO}:latest)
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for manifest_list in "${manifest_lists[@]}"; do
|
tag_args=()
|
||||||
# Create the (multi-arch) manifest list of arch-specific images.
|
for tag in "${tags[@]}"; do
|
||||||
docker manifest create ${manifest_list} ${images[@]}
|
tag_args+=(--tag "${tag}")
|
||||||
|
|
||||||
# Make sure each image manifest is annotated with the correct arch info.
|
|
||||||
# Docker does not auto-detect the arch of each cross-compiled image, so
|
|
||||||
# everything would appear as `linux/amd64` otherwise.
|
|
||||||
for arch in "${arches[@]}"; do
|
|
||||||
docker manifest annotate ${annotations[$arch]} ${manifest_list} ${images[$arch]}
|
|
||||||
done
|
|
||||||
|
|
||||||
# Push the manifest list.
|
|
||||||
docker manifest push --purge ${manifest_list}
|
|
||||||
done
|
done
|
||||||
|
|
||||||
# Avoid logging credentials and tokens.
|
# Docker Buildx takes a list of target platforms (OS/arch/variant), so map
|
||||||
set +ex
|
# the arch list to a platform list (assuming the OS is always `linux`).
|
||||||
|
declare -A arch_to_platform=(
|
||||||
# Delete the arch-specific tags, if credentials for doing so are available.
|
[amd64]="linux/amd64"
|
||||||
# Note that `DOCKER_PASSWORD` must be the actual user password. Passing a JWT
|
[armv6]="linux/arm/v6"
|
||||||
# obtained using a personal access token results in a 403 error with
|
[armv7]="linux/arm/v7"
|
||||||
# {"detail": "access to the resource is forbidden with personal access token"}
|
[arm64]="linux/arm64"
|
||||||
if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then
|
)
|
||||||
exit 0
|
platforms=()
|
||||||
fi
|
|
||||||
|
|
||||||
# Given a JSON input on stdin, extract the string value associated with the
|
|
||||||
# specified key. This avoids an extra dependency on a tool like `jq`.
|
|
||||||
extract() {
|
|
||||||
local key="$1"
|
|
||||||
# Extract "<key>":"<val>" (assumes key/val won't contain double quotes).
|
|
||||||
# The colon may have whitespace on either side.
|
|
||||||
grep -o "\"${key}\"[[:space:]]*:[[:space:]]*\"[^\"]\+\"" |
|
|
||||||
# Extract just <val> by deleting the last '"', and then greedily deleting
|
|
||||||
# everything up to '"'.
|
|
||||||
sed -e 's/"$//' -e 's/.*"//'
|
|
||||||
}
|
|
||||||
|
|
||||||
echo ">>> Getting API token..."
|
|
||||||
jwt=$(curl -sS -X POST \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d "{\"username\":\"${DOCKER_USERNAME}\",\"password\": \"${DOCKER_PASSWORD}\"}" \
|
|
||||||
"https://hub.docker.com/v2/users/login" |
|
|
||||||
extract 'token')
|
|
||||||
|
|
||||||
# Strip the registry portion from `index.docker.io/user/repo`.
|
|
||||||
repo="${DOCKER_REPO#*/}"
|
|
||||||
|
|
||||||
for arch in ${arches[@]}; do
|
for arch in ${arches[@]}; do
|
||||||
# Don't delete the `arm32v6` tag; Docker can't seem to properly
|
platforms+=("${arch_to_platform[$arch]}")
|
||||||
# auto-select that image on Armv6 platforms like Raspberry Pi 1 and Zero
|
|
||||||
# (https://github.com/moby/moby/issues/41017).
|
|
||||||
if [[ ${arch} == 'arm32v6' ]]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
tag="${DOCKER_TAG}-${arch}"
|
|
||||||
echo ">>> Deleting '${repo}:${tag}'..."
|
|
||||||
curl -sS -X DELETE \
|
|
||||||
-H "Authorization: Bearer ${jwt}" \
|
|
||||||
"https://hub.docker.com/v2/repositories/${repo}/tags/${tag}/"
|
|
||||||
done
|
done
|
||||||
|
platforms="$(join "," "${platforms[@]}")"
|
||||||
|
|
||||||
|
# Run the build, pushing the resulting images and multi-arch manifest list to
|
||||||
|
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
|
||||||
|
# context, which isn't needed here since the actual cross-compiled images
|
||||||
|
# have already been built.
|
||||||
|
docker buildx build \
|
||||||
|
--network host \
|
||||||
|
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
|
||||||
|
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
|
||||||
|
--platform "${platforms}" \
|
||||||
|
"${tag_args[@]}" \
|
||||||
|
--push \
|
||||||
|
- < ./docker/Dockerfile.buildx
|
||||||
|
|
||||||
|
# Add an extra arch-specific tag for `arm32v6`; Docker can't seem to properly
|
||||||
|
# auto-select that image on ARMv6 platforms like Raspberry Pi 1 and Zero
|
||||||
|
# (https://github.com/moby/moby/issues/41017).
|
||||||
|
#
|
||||||
|
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
|
||||||
|
# existing bitwarden_rs tags, which adhere to the naming conventions of the
|
||||||
|
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
|
||||||
|
# Unfortunately, these per-arch repo names aren't always consistent with the
|
||||||
|
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
|
||||||
|
# 32-bit ARM arches (e.g., `linux/arm/v6` is used, not `linux/arm32/v6`).
|
||||||
|
#
|
||||||
|
# TODO: It looks like this issue should be fixed starting in Docker 20.10.0,
|
||||||
|
# so this step can be removed once fixed versions are in wider distribution.
|
||||||
|
#
|
||||||
|
# Tags:
|
||||||
|
#
|
||||||
|
# testing => testing-arm32v6
|
||||||
|
# testing-alpine => <ignored>
|
||||||
|
# x.y.z => x.y.z-arm32v6, latest-arm32v6
|
||||||
|
# x.y.z-alpine => <ignored>
|
||||||
|
#
|
||||||
|
if [[ "${DOCKER_TAG}" != *alpine ]]; then
|
||||||
|
image="${DOCKER_REPO}":"${DOCKER_TAG}"
|
||||||
|
|
||||||
|
# Fetch the multi-arch manifest list and find the digest of the armv6 image.
|
||||||
|
filter='.manifests|.[]|select(.platform.architecture=="arm" and .platform.variant=="v6")|.digest'
|
||||||
|
digest="$(docker manifest inspect "${image}" | jq -r "${filter}")"
|
||||||
|
|
||||||
|
# Pull the armv6 image by digest, retag it, and repush it.
|
||||||
|
docker pull "${DOCKER_REPO}"@"${digest}"
|
||||||
|
docker tag "${DOCKER_REPO}"@"${digest}" "${image}"-arm32v6
|
||||||
|
docker push "${image}"-arm32v6
|
||||||
|
|
||||||
|
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||||
|
docker tag "${image}"-arm32v6 "${DOCKER_REPO}:latest"-arm32v6
|
||||||
|
docker push "${DOCKER_REPO}:latest"-arm32v6
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|||||||
@@ -0,0 +1,13 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
UPDATE ciphers
|
||||||
|
SET favorite = TRUE
|
||||||
|
WHERE EXISTS (
|
||||||
|
SELECT * FROM favorites
|
||||||
|
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||||
|
AND favorites.cipher_uuid = ciphers.uuid
|
||||||
|
);
|
||||||
|
|
||||||
|
DROP TABLE favorites;
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
CREATE TABLE favorites (
|
||||||
|
user_uuid CHAR(36) NOT NULL REFERENCES users(uuid),
|
||||||
|
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers(uuid),
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||||
|
SELECT user_uuid, uuid
|
||||||
|
FROM ciphers
|
||||||
|
WHERE favorite = TRUE
|
||||||
|
AND user_uuid IS NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE ciphers
|
||||||
|
DROP COLUMN favorite;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE;
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
UPDATE ciphers
|
||||||
|
SET favorite = TRUE
|
||||||
|
WHERE EXISTS (
|
||||||
|
SELECT * FROM favorites
|
||||||
|
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||||
|
AND favorites.cipher_uuid = ciphers.uuid
|
||||||
|
);
|
||||||
|
|
||||||
|
DROP TABLE favorites;
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
CREATE TABLE favorites (
|
||||||
|
user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid),
|
||||||
|
cipher_uuid VARCHAR(40) NOT NULL REFERENCES ciphers(uuid),
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||||
|
SELECT user_uuid, uuid
|
||||||
|
FROM ciphers
|
||||||
|
WHERE favorite = TRUE
|
||||||
|
AND user_uuid IS NOT NULL;
|
||||||
|
|
||||||
|
ALTER TABLE ciphers
|
||||||
|
DROP COLUMN favorite;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT true;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
ALTER TABLE ciphers
|
||||||
|
ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT 0; -- FALSE
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
UPDATE ciphers
|
||||||
|
SET favorite = 1
|
||||||
|
WHERE EXISTS (
|
||||||
|
SELECT * FROM favorites
|
||||||
|
WHERE favorites.user_uuid = ciphers.user_uuid
|
||||||
|
AND favorites.cipher_uuid = ciphers.uuid
|
||||||
|
);
|
||||||
|
|
||||||
|
DROP TABLE favorites;
|
||||||
@@ -0,0 +1,71 @@
|
|||||||
|
CREATE TABLE favorites (
|
||||||
|
user_uuid TEXT NOT NULL REFERENCES users(uuid),
|
||||||
|
cipher_uuid TEXT NOT NULL REFERENCES ciphers(uuid),
|
||||||
|
|
||||||
|
PRIMARY KEY (user_uuid, cipher_uuid)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Transfer favorite status for user-owned ciphers.
|
||||||
|
INSERT INTO favorites(user_uuid, cipher_uuid)
|
||||||
|
SELECT user_uuid, uuid
|
||||||
|
FROM ciphers
|
||||||
|
WHERE favorite = 1
|
||||||
|
AND user_uuid IS NOT NULL;
|
||||||
|
|
||||||
|
-- Drop the `favorite` column from the `ciphers` table, using the 12-step
|
||||||
|
-- procedure from <https://www.sqlite.org/lang_altertable.html#altertabrename>.
|
||||||
|
-- Note that some steps aren't applicable and are omitted.
|
||||||
|
|
||||||
|
-- 1. If foreign key constraints are enabled, disable them using PRAGMA foreign_keys=OFF.
|
||||||
|
--
|
||||||
|
-- Diesel runs each migration in its own transaction. `PRAGMA foreign_keys`
|
||||||
|
-- is a no-op within a transaction, so this step must be done outside of this
|
||||||
|
-- file, before starting the Diesel migrations.
|
||||||
|
|
||||||
|
-- 2. Start a transaction.
|
||||||
|
--
|
||||||
|
-- Diesel already runs each migration in its own transaction.
|
||||||
|
|
||||||
|
-- 4. Use CREATE TABLE to construct a new table "new_X" that is in the
|
||||||
|
-- desired revised format of table X. Make sure that the name "new_X" does
|
||||||
|
-- not collide with any existing table name, of course.
|
||||||
|
|
||||||
|
CREATE TABLE new_ciphers(
|
||||||
|
uuid TEXT NOT NULL PRIMARY KEY,
|
||||||
|
created_at DATETIME NOT NULL,
|
||||||
|
updated_at DATETIME NOT NULL,
|
||||||
|
user_uuid TEXT REFERENCES users(uuid),
|
||||||
|
organization_uuid TEXT REFERENCES organizations(uuid),
|
||||||
|
atype INTEGER NOT NULL,
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
notes TEXT,
|
||||||
|
fields TEXT,
|
||||||
|
data TEXT NOT NULL,
|
||||||
|
password_history TEXT,
|
||||||
|
deleted_at DATETIME
|
||||||
|
);
|
||||||
|
|
||||||
|
-- 5. Transfer content from X into new_X using a statement like:
|
||||||
|
-- INSERT INTO new_X SELECT ... FROM X.
|
||||||
|
|
||||||
|
INSERT INTO new_ciphers(uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
|
||||||
|
name, notes, fields, data, password_history, deleted_at)
|
||||||
|
SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, atype,
|
||||||
|
name, notes, fields, data, password_history, deleted_at
|
||||||
|
FROM ciphers;
|
||||||
|
|
||||||
|
-- 6. Drop the old table X: DROP TABLE X.
|
||||||
|
|
||||||
|
DROP TABLE ciphers;
|
||||||
|
|
||||||
|
-- 7. Change the name of new_X to X using: ALTER TABLE new_X RENAME TO X.
|
||||||
|
|
||||||
|
ALTER TABLE new_ciphers RENAME TO ciphers;
|
||||||
|
|
||||||
|
-- 11. Commit the transaction started in step 2.
|
||||||
|
|
||||||
|
-- 12. If foreign keys constraints were originally enabled, reenable them now.
|
||||||
|
--
|
||||||
|
-- `PRAGMA foreign_keys` is scoped to a database connection, and Diesel
|
||||||
|
-- migrations are run in a separate database connection that is closed once
|
||||||
|
-- the migrations finish.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1;
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL;
|
||||||
@@ -1 +1 @@
|
|||||||
nightly-2020-07-11
|
nightly-2021-01-25
|
||||||
264
src/api/admin.rs
@@ -1,24 +1,25 @@
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::process::Command;
|
use std::{env, process::Command, time::Duration};
|
||||||
|
|
||||||
|
use reqwest::{blocking::Client, header::USER_AGENT};
|
||||||
use rocket::{
|
use rocket::{
|
||||||
http::{Cookie, Cookies, SameSite},
|
http::{Cookie, Cookies, SameSite},
|
||||||
request::{self, FlashMessage, Form, FromRequest, Request, Outcome},
|
request::{self, FlashMessage, Form, FromRequest, Outcome, Request},
|
||||||
response::{content::Html, Flash, Redirect},
|
response::{content::Html, Flash, Redirect},
|
||||||
Route,
|
Route,
|
||||||
};
|
};
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{ApiResult, EmptyResult, JsonResult},
|
api::{ApiResult, EmptyResult, JsonResult, NumberOrString},
|
||||||
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
|
||||||
config::ConfigBuilder,
|
config::ConfigBuilder,
|
||||||
db::{backup_database, models::*, DbConn},
|
db::{backup_database, models::*, DbConn, DbConnType},
|
||||||
error::{Error, MapResult},
|
error::{Error, MapResult},
|
||||||
mail,
|
mail,
|
||||||
util::get_display_size,
|
util::{format_naive_datetime_local, get_display_size},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -36,7 +37,10 @@ pub fn routes() -> Vec<Route> {
|
|||||||
logout,
|
logout,
|
||||||
delete_user,
|
delete_user,
|
||||||
deauth_user,
|
deauth_user,
|
||||||
|
disable_user,
|
||||||
|
enable_user,
|
||||||
remove_2fa,
|
remove_2fa,
|
||||||
|
update_user_org_type,
|
||||||
update_revision_users,
|
update_revision_users,
|
||||||
post_config,
|
post_config,
|
||||||
delete_config,
|
delete_config,
|
||||||
@@ -44,12 +48,28 @@ pub fn routes() -> Vec<Route> {
|
|||||||
test_smtp,
|
test_smtp,
|
||||||
users_overview,
|
users_overview,
|
||||||
organizations_overview,
|
organizations_overview,
|
||||||
|
delete_organization,
|
||||||
diagnostics,
|
diagnostics,
|
||||||
|
get_diagnostics_config
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
static CAN_BACKUP: Lazy<bool> =
|
static DB_TYPE: Lazy<&str> = Lazy::new(|| {
|
||||||
Lazy::new(|| cfg!(feature = "sqlite") && Command::new("sqlite3").arg("-version").status().is_ok());
|
DbConnType::from_url(&CONFIG.database_url())
|
||||||
|
.map(|t| match t {
|
||||||
|
DbConnType::sqlite => "SQLite",
|
||||||
|
DbConnType::mysql => "MySQL",
|
||||||
|
DbConnType::postgresql => "PostgreSQL",
|
||||||
|
})
|
||||||
|
.unwrap_or("Unknown")
|
||||||
|
});
|
||||||
|
|
||||||
|
static CAN_BACKUP: Lazy<bool> = Lazy::new(|| {
|
||||||
|
DbConnType::from_url(&CONFIG.database_url())
|
||||||
|
.map(|t| t == DbConnType::sqlite)
|
||||||
|
.unwrap_or(false)
|
||||||
|
&& Command::new("sqlite3").arg("-version").status().is_ok()
|
||||||
|
});
|
||||||
|
|
||||||
#[get("/")]
|
#[get("/")]
|
||||||
fn admin_disabled() -> &'static str {
|
fn admin_disabled() -> &'static str {
|
||||||
@@ -66,12 +86,35 @@ fn admin_path() -> String {
|
|||||||
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct Referer(Option<String>);
|
||||||
|
|
||||||
|
impl<'a, 'r> FromRequest<'a, 'r> for Referer {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
|
||||||
|
Outcome::Success(Referer(request.headers().get_one("Referer").map(str::to_string)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Used for `Location` response headers, which must specify an absolute URI
|
/// Used for `Location` response headers, which must specify an absolute URI
|
||||||
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
/// (see https://tools.ietf.org/html/rfc2616#section-14.30).
|
||||||
fn admin_url() -> String {
|
fn admin_url(referer: Referer) -> String {
|
||||||
// Don't use CONFIG.domain() directly, since the user may want to keep a
|
// If we get a referer use that to make it work when, DOMAIN is not set
|
||||||
// trailing slash there, particularly when running under a subpath.
|
if let Some(mut referer) = referer.0 {
|
||||||
format!("{}{}{}", CONFIG.domain_origin(), CONFIG.domain_path(), ADMIN_PATH)
|
if let Some(start_index) = referer.find(ADMIN_PATH) {
|
||||||
|
referer.truncate(start_index + ADMIN_PATH.len());
|
||||||
|
return referer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if CONFIG.domain_set() {
|
||||||
|
// Don't use CONFIG.domain() directly, since the user may want to keep a
|
||||||
|
// trailing slash there, particularly when running under a subpath.
|
||||||
|
format!("{}{}{}", CONFIG.domain_origin(), CONFIG.domain_path(), ADMIN_PATH)
|
||||||
|
} else {
|
||||||
|
// Last case, when no referer or domain set, technically invalid but better than nothing
|
||||||
|
ADMIN_PATH.to_string()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/", rank = 2)]
|
#[get("/", rank = 2)]
|
||||||
@@ -91,14 +134,19 @@ struct LoginForm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[post("/", data = "<data>")]
|
#[post("/", data = "<data>")]
|
||||||
fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -> Result<Redirect, Flash<Redirect>> {
|
fn post_admin_login(
|
||||||
|
data: Form<LoginForm>,
|
||||||
|
mut cookies: Cookies,
|
||||||
|
ip: ClientIp,
|
||||||
|
referer: Referer,
|
||||||
|
) -> Result<Redirect, Flash<Redirect>> {
|
||||||
let data = data.into_inner();
|
let data = data.into_inner();
|
||||||
|
|
||||||
// If the token is invalid, redirect to login page
|
// If the token is invalid, redirect to login page
|
||||||
if !_validate_token(&data.token) {
|
if !_validate_token(&data.token) {
|
||||||
error!("Invalid admin token. IP: {}", ip.ip);
|
error!("Invalid admin token. IP: {}", ip.ip);
|
||||||
Err(Flash::error(
|
Err(Flash::error(
|
||||||
Redirect::to(admin_url()),
|
Redirect::to(admin_url(referer)),
|
||||||
"Invalid admin token, please try again.",
|
"Invalid admin token, please try again.",
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
@@ -114,7 +162,7 @@ fn post_admin_login(data: Form<LoginForm>, mut cookies: Cookies, ip: ClientIp) -
|
|||||||
.finish();
|
.finish();
|
||||||
|
|
||||||
cookies.add(cookie);
|
cookies.add(cookie);
|
||||||
Ok(Redirect::to(admin_url()))
|
Ok(Redirect::to(admin_url(referer)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -243,9 +291,9 @@ fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/logout")]
|
#[get("/logout")]
|
||||||
fn logout(mut cookies: Cookies) -> Result<Redirect, ()> {
|
fn logout(mut cookies: Cookies, referer: Referer) -> Result<Redirect, ()> {
|
||||||
cookies.remove(Cookie::named(COOKIE_NAME));
|
cookies.remove(Cookie::named(COOKIE_NAME));
|
||||||
Ok(Redirect::to(admin_url()))
|
Ok(Redirect::to(admin_url(referer)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/users")]
|
#[get("/users")]
|
||||||
@@ -259,14 +307,22 @@ fn get_users_json(_token: AdminToken, conn: DbConn) -> JsonResult {
|
|||||||
#[get("/users/overview")]
|
#[get("/users/overview")]
|
||||||
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
let users = User::get_all(&conn);
|
let users = User::get_all(&conn);
|
||||||
|
let dt_fmt = "%Y-%m-%d %H:%M:%S %Z";
|
||||||
let users_json: Vec<Value> = users.iter()
|
let users_json: Vec<Value> = users.iter()
|
||||||
.map(|u| {
|
.map(|u| {
|
||||||
let mut usr = u.to_json(&conn);
|
let mut usr = u.to_json(&conn);
|
||||||
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn));
|
||||||
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn));
|
||||||
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn) as i32));
|
||||||
usr
|
usr["user_enabled"] = json!(u.enabled);
|
||||||
}).collect();
|
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, dt_fmt));
|
||||||
|
usr["last_active"] = match u.last_active(&conn) {
|
||||||
|
Some(dt) => json!(format_naive_datetime_local(&dt, dt_fmt)),
|
||||||
|
None => json!("Never")
|
||||||
|
};
|
||||||
|
usr
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
let text = AdminTemplateData::users(users_json).render()?;
|
let text = AdminTemplateData::users(users_json).render()?;
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
@@ -287,6 +343,24 @@ fn deauth_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
|||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/disable")]
|
||||||
|
fn disable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
|
Device::delete_all_by_user(&user.uuid, &conn)?;
|
||||||
|
user.reset_security_stamp();
|
||||||
|
user.enabled = false;
|
||||||
|
|
||||||
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/<uuid>/enable")]
|
||||||
|
fn enable_user(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
|
user.enabled = true;
|
||||||
|
|
||||||
|
user.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/users/<uuid>/remove-2fa")]
|
#[post("/users/<uuid>/remove-2fa")]
|
||||||
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
let mut user = User::find_by_uuid(&uuid, &conn).map_res("User doesn't exist")?;
|
||||||
@@ -295,6 +369,41 @@ fn remove_2fa(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
|||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
struct UserOrgTypeData {
|
||||||
|
user_type: NumberOrString,
|
||||||
|
user_uuid: String,
|
||||||
|
org_uuid: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/users/org_type", data = "<data>")]
|
||||||
|
fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let data: UserOrgTypeData = data.into_inner();
|
||||||
|
|
||||||
|
let mut user_to_edit = match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn) {
|
||||||
|
Some(user) => user,
|
||||||
|
None => err!("The specified user isn't member of the organization"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) {
|
||||||
|
Some(new_type) => new_type as i32,
|
||||||
|
None => err!("Invalid type"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner {
|
||||||
|
// Removing owner permmission, check that there are at least another owner
|
||||||
|
let num_owners = UserOrganization::find_by_org_and_type(&data.org_uuid, UserOrgType::Owner as i32, &conn).len();
|
||||||
|
|
||||||
|
if num_owners <= 1 {
|
||||||
|
err!("Can't change the type of the last owner")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
user_to_edit.atype = new_type as i32;
|
||||||
|
user_to_edit.save(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#[post("/users/update_revision")]
|
#[post("/users/update_revision")]
|
||||||
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
User::update_all_revisions(&conn)
|
User::update_all_revisions(&conn)
|
||||||
@@ -303,19 +412,27 @@ fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult {
|
|||||||
#[get("/organizations/overview")]
|
#[get("/organizations/overview")]
|
||||||
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
let organizations = Organization::get_all(&conn);
|
let organizations = Organization::get_all(&conn);
|
||||||
let organizations_json: Vec<Value> = organizations.iter().map(|o| {
|
let organizations_json: Vec<Value> = organizations.iter()
|
||||||
let mut org = o.to_json();
|
.map(|o| {
|
||||||
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
let mut org = o.to_json();
|
||||||
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &conn));
|
||||||
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn));
|
||||||
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn));
|
||||||
org
|
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn) as i32));
|
||||||
}).collect();
|
org
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
let text = AdminTemplateData::organizations(organizations_json).render()?;
|
let text = AdminTemplateData::organizations(organizations_json).render()?;
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[post("/organizations/<uuid>/delete")]
|
||||||
|
fn delete_organization(uuid: String, _token: AdminToken, conn: DbConn) -> EmptyResult {
|
||||||
|
let org = Organization::find_by_uuid(&uuid, &conn).map_res("Organization doesn't exist")?;
|
||||||
|
org.delete(&conn)
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct WebVaultVersion {
|
struct WebVaultVersion {
|
||||||
version: String,
|
version: String,
|
||||||
@@ -332,77 +449,110 @@ struct GitCommit {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
|
||||||
use reqwest::{blocking::Client, header::USER_AGENT};
|
|
||||||
use std::time::Duration;
|
|
||||||
let github_api = Client::builder().build()?;
|
let github_api = Client::builder().build()?;
|
||||||
|
|
||||||
Ok(
|
Ok(github_api
|
||||||
github_api.get(url)
|
.get(url)
|
||||||
.timeout(Duration::from_secs(10))
|
.timeout(Duration::from_secs(10))
|
||||||
.header(USER_AGENT, "Bitwarden_RS")
|
.header(USER_AGENT, "Bitwarden_RS")
|
||||||
.send()?
|
.send()?
|
||||||
.error_for_status()?
|
.error_for_status()?
|
||||||
.json::<T>()?
|
.json::<T>()?)
|
||||||
)
|
}
|
||||||
|
|
||||||
|
fn has_http_access() -> bool {
|
||||||
|
let http_access = Client::builder().build().unwrap();
|
||||||
|
|
||||||
|
match http_access
|
||||||
|
.head("https://github.com/dani-garcia/bitwarden_rs")
|
||||||
|
.timeout(Duration::from_secs(10))
|
||||||
|
.header(USER_AGENT, "Bitwarden_RS")
|
||||||
|
.send()
|
||||||
|
{
|
||||||
|
Ok(r) => r.status().is_success(),
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/diagnostics")]
|
#[get("/diagnostics")]
|
||||||
fn diagnostics(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
fn diagnostics(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
|
||||||
use std::net::ToSocketAddrs;
|
|
||||||
use chrono::prelude::*;
|
|
||||||
use crate::util::read_file_string;
|
use crate::util::read_file_string;
|
||||||
|
use chrono::prelude::*;
|
||||||
|
use std::net::ToSocketAddrs;
|
||||||
|
|
||||||
|
// Get current running versions
|
||||||
let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json");
|
let vault_version_path = format!("{}/{}", CONFIG.web_vault_folder(), "version.json");
|
||||||
let vault_version_str = read_file_string(&vault_version_path)?;
|
let vault_version_str = read_file_string(&vault_version_path)?;
|
||||||
let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?;
|
let web_vault_version: WebVaultVersion = serde_json::from_str(&vault_version_str)?;
|
||||||
|
|
||||||
let github_ips = ("github.com", 0).to_socket_addrs().map(|mut i| i.next());
|
// Execute some environment checks
|
||||||
let (dns_resolved, dns_ok) = match github_ips {
|
let running_within_docker = std::path::Path::new("/.dockerenv").exists() || std::path::Path::new("/run/.containerenv").exists();
|
||||||
Ok(Some(a)) => (a.ip().to_string(), true),
|
let has_http_access = has_http_access();
|
||||||
_ => ("Could not resolve domain name.".to_string(), false),
|
let uses_proxy = env::var_os("HTTP_PROXY").is_some()
|
||||||
|
|| env::var_os("http_proxy").is_some()
|
||||||
|
|| env::var_os("HTTPS_PROXY").is_some()
|
||||||
|
|| env::var_os("https_proxy").is_some();
|
||||||
|
|
||||||
|
// Check if we are able to resolve DNS entries
|
||||||
|
let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) {
|
||||||
|
Ok(Some(a)) => a.ip().to_string(),
|
||||||
|
_ => "Could not resolve domain name.".to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// If the DNS Check failed, do not even attempt to check for new versions since we were not able to resolve github.com
|
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||||
let (latest_release, latest_commit, latest_web_build) = if dns_ok {
|
// TODO: Maybe we need to cache this using a LazyStatic or something. Github only allows 60 requests per hour, and we use 3 here already.
|
||||||
|
let (latest_release, latest_commit, latest_web_build) = if has_http_access {
|
||||||
(
|
(
|
||||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bitwarden_rs/releases/latest") {
|
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bitwarden_rs/releases/latest") {
|
||||||
Ok(r) => r.tag_name,
|
Ok(r) => r.tag_name,
|
||||||
_ => "-".to_string()
|
_ => "-".to_string(),
|
||||||
},
|
},
|
||||||
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/bitwarden_rs/commits/master") {
|
match get_github_api::<GitCommit>("https://api.github.com/repos/dani-garcia/bitwarden_rs/commits/master") {
|
||||||
Ok(mut c) => {
|
Ok(mut c) => {
|
||||||
c.sha.truncate(8);
|
c.sha.truncate(8);
|
||||||
c.sha
|
c.sha
|
||||||
},
|
}
|
||||||
_ => "-".to_string()
|
_ => "-".to_string(),
|
||||||
},
|
},
|
||||||
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest") {
|
// Do not fetch the web-vault version when running within Docker.
|
||||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||||
_ => "-".to_string()
|
if running_within_docker {
|
||||||
|
"-".to_string()
|
||||||
|
} else {
|
||||||
|
match get_github_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest") {
|
||||||
|
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||||
|
_ => "-".to_string(),
|
||||||
|
}
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
("-".to_string(), "-".to_string(), "-".to_string())
|
("-".to_string(), "-".to_string(), "-".to_string())
|
||||||
};
|
};
|
||||||
|
|
||||||
// Run the date check as the last item right before filling the json.
|
|
||||||
// This should ensure that the time difference between the browser and the server is as minimal as possible.
|
|
||||||
let dt = Utc::now();
|
|
||||||
let server_time = dt.format("%Y-%m-%d %H:%M:%S").to_string();
|
|
||||||
|
|
||||||
let diagnostics_json = json!({
|
let diagnostics_json = json!({
|
||||||
"dns_resolved": dns_resolved,
|
"dns_resolved": dns_resolved,
|
||||||
"server_time": server_time,
|
|
||||||
"web_vault_version": web_vault_version.version,
|
"web_vault_version": web_vault_version.version,
|
||||||
"latest_release": latest_release,
|
"latest_release": latest_release,
|
||||||
"latest_commit": latest_commit,
|
"latest_commit": latest_commit,
|
||||||
"latest_web_build": latest_web_build,
|
"latest_web_build": latest_web_build,
|
||||||
|
"running_within_docker": running_within_docker,
|
||||||
|
"has_http_access": has_http_access,
|
||||||
|
"uses_proxy": uses_proxy,
|
||||||
|
"db_type": *DB_TYPE,
|
||||||
|
"admin_url": format!("{}/diagnostics", admin_url(Referer(None))),
|
||||||
|
"server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the date/time check as the last item to minimize the difference
|
||||||
});
|
});
|
||||||
|
|
||||||
let text = AdminTemplateData::diagnostics(diagnostics_json).render()?;
|
let text = AdminTemplateData::diagnostics(diagnostics_json).render()?;
|
||||||
Ok(Html(text))
|
Ok(Html(text))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[get("/diagnostics/config")]
|
||||||
|
fn get_diagnostics_config(_token: AdminToken) -> JsonResult {
|
||||||
|
let support_json = CONFIG.get_support_json();
|
||||||
|
Ok(Json(support_json))
|
||||||
|
}
|
||||||
|
|
||||||
#[post("/config", data = "<data>")]
|
#[post("/config", data = "<data>")]
|
||||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||||
let data: ConfigBuilder = data.into_inner();
|
let data: ConfigBuilder = data.into_inner();
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ pub fn routes() -> Vec<rocket::Route> {
|
|||||||
revision_date,
|
revision_date,
|
||||||
password_hint,
|
password_hint,
|
||||||
prelogin,
|
prelogin,
|
||||||
|
verify_password,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,7 +115,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
|
|||||||
user.client_kdf_type = client_kdf_type;
|
user.client_kdf_type = client_kdf_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
user.set_password(&data.MasterPasswordHash);
|
user.set_password(&data.MasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
|
|
||||||
// Add extra fields if present
|
// Add extra fields if present
|
||||||
@@ -231,7 +232,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
|
|||||||
err!("Invalid password")
|
err!("Invalid password")
|
||||||
}
|
}
|
||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash, Some("post_rotatekey"));
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
@@ -258,7 +259,7 @@ fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, conn: DbConn) ->
|
|||||||
|
|
||||||
user.client_kdf_iter = data.KdfIterations;
|
user.client_kdf_iter = data.KdfIterations;
|
||||||
user.client_kdf_type = data.Kdf;
|
user.client_kdf_type = data.Kdf;
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
@@ -337,6 +338,7 @@ fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, conn: DbConn, nt:
|
|||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
user.private_key = Some(data.PrivateKey);
|
user.private_key = Some(data.PrivateKey);
|
||||||
user.reset_security_stamp();
|
user.reset_security_stamp();
|
||||||
|
user.reset_stamp_exception();
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
}
|
}
|
||||||
@@ -444,7 +446,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
|
|||||||
user.email_new = None;
|
user.email_new = None;
|
||||||
user.email_new_token = None;
|
user.email_new_token = None;
|
||||||
|
|
||||||
user.set_password(&data.NewMasterPasswordHash);
|
user.set_password(&data.NewMasterPasswordHash, None);
|
||||||
user.akey = data.Key;
|
user.akey = data.Key;
|
||||||
|
|
||||||
user.save(&conn)
|
user.save(&conn)
|
||||||
@@ -459,7 +461,7 @@ fn post_verify_email(headers: Headers, _conn: DbConn) -> EmptyResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
if let Err(e) = mail::send_verify_email(&user.email, &user.uuid) {
|
||||||
error!("Error sending delete account email: {:#?}", e);
|
error!("Error sending verify_email email: {:#?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -623,3 +625,20 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> JsonResult {
|
|||||||
"KdfIterations": kdf_iter
|
"KdfIterations": kdf_iter
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct VerifyPasswordData {
|
||||||
|
MasterPasswordHash: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/accounts/verify-password", data = "<data>")]
|
||||||
|
fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||||
|
let data: VerifyPasswordData = data.into_inner().data;
|
||||||
|
let user = headers.user;
|
||||||
|
|
||||||
|
if !user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
|
err!("Invalid password")
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
|
use chrono::{NaiveDateTime, Utc};
|
||||||
use rocket::{http::ContentType, request::Form, Data, Route};
|
use rocket::{http::ContentType, request::Form, Data, Route};
|
||||||
use rocket_contrib::json::Json;
|
use rocket_contrib::json::Json;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
@@ -17,6 +18,16 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub fn routes() -> Vec<Route> {
|
pub fn routes() -> Vec<Route> {
|
||||||
|
// Note that many routes have an `admin` variant; this seems to be
|
||||||
|
// because the stored procedure that upstream Bitwarden uses to determine
|
||||||
|
// whether the user can edit a cipher doesn't take into account whether
|
||||||
|
// the user is an org owner/admin. The `admin` variant first checks
|
||||||
|
// whether the user is an owner/admin of the relevant org, and if so,
|
||||||
|
// allows the operation unconditionally.
|
||||||
|
//
|
||||||
|
// bitwarden_rs factors in the org owner/admin status as part of
|
||||||
|
// determining the write accessibility of a cipher, so most
|
||||||
|
// admin/non-admin implementations can be shared.
|
||||||
routes![
|
routes![
|
||||||
sync,
|
sync,
|
||||||
get_ciphers,
|
get_ciphers,
|
||||||
@@ -38,7 +49,7 @@ pub fn routes() -> Vec<Route> {
|
|||||||
post_cipher_admin,
|
post_cipher_admin,
|
||||||
post_cipher_share,
|
post_cipher_share,
|
||||||
put_cipher_share,
|
put_cipher_share,
|
||||||
put_cipher_share_seleted,
|
put_cipher_share_selected,
|
||||||
post_cipher,
|
post_cipher,
|
||||||
put_cipher,
|
put_cipher,
|
||||||
delete_cipher_post,
|
delete_cipher_post,
|
||||||
@@ -50,6 +61,9 @@ pub fn routes() -> Vec<Route> {
|
|||||||
delete_cipher_selected,
|
delete_cipher_selected,
|
||||||
delete_cipher_selected_post,
|
delete_cipher_selected_post,
|
||||||
delete_cipher_selected_put,
|
delete_cipher_selected_put,
|
||||||
|
delete_cipher_selected_admin,
|
||||||
|
delete_cipher_selected_post_admin,
|
||||||
|
delete_cipher_selected_put_admin,
|
||||||
restore_cipher_put,
|
restore_cipher_put,
|
||||||
restore_cipher_put_admin,
|
restore_cipher_put_admin,
|
||||||
restore_cipher_selected,
|
restore_cipher_selected,
|
||||||
@@ -77,12 +91,14 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
|
||||||
|
|
||||||
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
let collections = Collection::find_by_user_uuid(&headers.user.uuid, &conn);
|
||||||
let collections_json: Vec<Value> = collections.iter().map(Collection::to_json).collect();
|
let collections_json: Vec<Value> = collections.iter()
|
||||||
|
.map(|c| c.to_json_details(&headers.user.uuid, &conn))
|
||||||
|
.collect();
|
||||||
|
|
||||||
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
|
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
|
||||||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
|
||||||
|
|
||||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||||
let ciphers_json: Vec<Value> = ciphers
|
let ciphers_json: Vec<Value> = ciphers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
.map(|c| c.to_json(&headers.host, &headers.user.uuid, &conn))
|
||||||
@@ -107,7 +123,7 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> JsonResult {
|
|||||||
|
|
||||||
#[get("/ciphers")]
|
#[get("/ciphers")]
|
||||||
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
|
fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult {
|
||||||
let ciphers = Cipher::find_by_user(&headers.user.uuid, &conn);
|
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
|
||||||
|
|
||||||
let ciphers_json: Vec<Value> = ciphers
|
let ciphers_json: Vec<Value> = ciphers
|
||||||
.iter()
|
.iter()
|
||||||
@@ -181,6 +197,14 @@ pub struct CipherData {
|
|||||||
#[serde(rename = "Attachments")]
|
#[serde(rename = "Attachments")]
|
||||||
_Attachments: Option<Value>, // Unused, contains map of {id: filename}
|
_Attachments: Option<Value>, // Unused, contains map of {id: filename}
|
||||||
Attachments2: Option<HashMap<String, Attachments2Data>>,
|
Attachments2: Option<HashMap<String, Attachments2Data>>,
|
||||||
|
|
||||||
|
// The revision datetime (in ISO 8601 format) of the client's local copy
|
||||||
|
// of the cipher. This is used to prevent a client from updating a cipher
|
||||||
|
// when it doesn't have the latest version, as that can result in data
|
||||||
|
// loss. It's not an error when no value is provided; this can happen
|
||||||
|
// when using older client versions, or if the operation doesn't involve
|
||||||
|
// updating an existing cipher.
|
||||||
|
LastKnownRevisionDate: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Deserialize, Debug)]
|
||||||
@@ -190,22 +214,46 @@ pub struct Attachments2Data {
|
|||||||
Key: String,
|
Key: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Called when an org admin clones an org cipher.
|
||||||
#[post("/ciphers/admin", data = "<data>")]
|
#[post("/ciphers/admin", data = "<data>")]
|
||||||
fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
fn post_ciphers_admin(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
let data: ShareCipherData = data.into_inner().data;
|
post_ciphers_create(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Called when creating a new org-owned cipher, or cloning a cipher (whether
|
||||||
|
/// user- or org-owned). When cloning a cipher to a user-owned cipher,
|
||||||
|
/// `organizationId` is null.
|
||||||
|
#[post("/ciphers/create", data = "<data>")]
|
||||||
|
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
|
let mut data: ShareCipherData = data.into_inner().data;
|
||||||
|
|
||||||
|
// Check if there are one more more collections selected when this cipher is part of an organization.
|
||||||
|
// err if this is not the case before creating an empty cipher.
|
||||||
|
if data.Cipher.OrganizationId.is_some() && data.CollectionIds.is_empty() {
|
||||||
|
err!("You must select at least one collection.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// This check is usually only needed in update_cipher_from_data(), but we
|
||||||
|
// need it here as well to avoid creating an empty cipher in the call to
|
||||||
|
// cipher.save() below.
|
||||||
|
enforce_personal_ownership_policy(&data.Cipher, &headers, &conn)?;
|
||||||
|
|
||||||
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
|
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
|
||||||
cipher.user_uuid = Some(headers.user.uuid.clone());
|
cipher.user_uuid = Some(headers.user.uuid.clone());
|
||||||
cipher.save(&conn)?;
|
cipher.save(&conn)?;
|
||||||
|
|
||||||
|
// When cloning a cipher, the Bitwarden clients seem to set this field
|
||||||
|
// based on the cipher being cloned (when creating a new cipher, it's set
|
||||||
|
// to null as expected). However, `cipher.created_at` is initialized to
|
||||||
|
// the current time, so the stale data check will end up failing down the
|
||||||
|
// line. Since this function only creates new ciphers (whether by cloning
|
||||||
|
// or otherwise), we can just ignore this field entirely.
|
||||||
|
data.Cipher.LastKnownRevisionDate = None;
|
||||||
|
|
||||||
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt)
|
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/ciphers/create", data = "<data>")]
|
/// Called when creating a new user-owned cipher.
|
||||||
fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
|
||||||
post_ciphers_admin(data, headers, conn, nt)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[post("/ciphers", data = "<data>")]
|
#[post("/ciphers", data = "<data>")]
|
||||||
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
let data: CipherData = data.into_inner().data;
|
let data: CipherData = data.into_inner().data;
|
||||||
@@ -216,6 +264,38 @@ fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt
|
|||||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
|
||||||
|
/// A non-owner/admin user belonging to an org with the personal ownership policy
|
||||||
|
/// enabled isn't allowed to create new user-owned ciphers or modify existing ones
|
||||||
|
/// (that were created before the policy was applicable to the user). The user is
|
||||||
|
/// allowed to delete or share such ciphers to an org, however.
|
||||||
|
///
|
||||||
|
/// Ref: https://bitwarden.com/help/article/policies/#personal-ownership
|
||||||
|
fn enforce_personal_ownership_policy(
|
||||||
|
data: &CipherData,
|
||||||
|
headers: &Headers,
|
||||||
|
conn: &DbConn
|
||||||
|
) -> EmptyResult {
|
||||||
|
if data.OrganizationId.is_none() {
|
||||||
|
let user_uuid = &headers.user.uuid;
|
||||||
|
for policy in OrgPolicy::find_by_user(user_uuid, conn) {
|
||||||
|
if policy.enabled && policy.has_type(OrgPolicyType::PersonalOwnership) {
|
||||||
|
let org_uuid = &policy.org_uuid;
|
||||||
|
match UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) {
|
||||||
|
Some(user) =>
|
||||||
|
if user.atype < UserOrgType::Admin &&
|
||||||
|
user.has_status(UserOrgStatus::Confirmed) {
|
||||||
|
err!("Due to an Enterprise Policy, you are restricted \
|
||||||
|
from saving items to your personal vault.")
|
||||||
|
},
|
||||||
|
None => err!("Error looking up user type"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn update_cipher_from_data(
|
pub fn update_cipher_from_data(
|
||||||
cipher: &mut Cipher,
|
cipher: &mut Cipher,
|
||||||
data: CipherData,
|
data: CipherData,
|
||||||
@@ -225,6 +305,19 @@ pub fn update_cipher_from_data(
|
|||||||
nt: &Notify,
|
nt: &Notify,
|
||||||
ut: UpdateType,
|
ut: UpdateType,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
|
enforce_personal_ownership_policy(&data, headers, conn)?;
|
||||||
|
|
||||||
|
// Check that the client isn't updating an existing cipher with stale data.
|
||||||
|
if let Some(dt) = data.LastKnownRevisionDate {
|
||||||
|
match NaiveDateTime::parse_from_str(&dt, "%+") { // ISO 8601 format
|
||||||
|
Err(err) =>
|
||||||
|
warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
|
||||||
|
Ok(dt) if cipher.updated_at.signed_duration_since(dt).num_seconds() > 1 =>
|
||||||
|
err!("The client copy of this cipher is out of date. Resync the client and try again."),
|
||||||
|
Ok(_) => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
|
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
|
||||||
err!("Organization mismatch. Please resync the client before updating the cipher")
|
err!("Organization mismatch. Please resync the client before updating the cipher")
|
||||||
}
|
}
|
||||||
@@ -238,6 +331,11 @@ pub fn update_cipher_from_data(
|
|||||||
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn)
|
|| cipher.is_write_accessible_to_user(&headers.user.uuid, &conn)
|
||||||
{
|
{
|
||||||
cipher.organization_uuid = Some(org_id);
|
cipher.organization_uuid = Some(org_id);
|
||||||
|
// After some discussion in PR #1329 re-added the user_uuid = None again.
|
||||||
|
// TODO: Audit/Check the whole save/update cipher chain.
|
||||||
|
// Upstream uses the user_uuid to allow a cipher added by a user to an org to still allow the user to view/edit the cipher
|
||||||
|
// even when the user has hide-passwords configured as there policy.
|
||||||
|
// Removing the line below would fix that, but we have to check which effect this would have on the rest of the code.
|
||||||
cipher.user_uuid = None;
|
cipher.user_uuid = None;
|
||||||
} else {
|
} else {
|
||||||
err!("You don't have permission to add cipher directly to organization")
|
err!("You don't have permission to add cipher directly to organization")
|
||||||
@@ -281,6 +379,23 @@ pub fn update_cipher_from_data(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cleanup cipher data, like removing the 'Response' key.
|
||||||
|
// This key is somewhere generated during Javascript so no way for us this fix this.
|
||||||
|
// Also, upstream only retrieves keys they actually want to store, and thus skip the 'Response' key.
|
||||||
|
// We do not mind which data is in it, the keep our model more flexible when there are upstream changes.
|
||||||
|
// But, we at least know we do not need to store and return this specific key.
|
||||||
|
fn _clean_cipher_data(mut json_data: Value) -> Value {
|
||||||
|
if json_data.is_array() {
|
||||||
|
json_data.as_array_mut()
|
||||||
|
.unwrap()
|
||||||
|
.iter_mut()
|
||||||
|
.for_each(|ref mut f| {
|
||||||
|
f.as_object_mut().unwrap().remove("Response");
|
||||||
|
});
|
||||||
|
};
|
||||||
|
json_data
|
||||||
|
}
|
||||||
|
|
||||||
let type_data_opt = match data.Type {
|
let type_data_opt = match data.Type {
|
||||||
1 => data.Login,
|
1 => data.Login,
|
||||||
2 => data.SecureNote,
|
2 => data.SecureNote,
|
||||||
@@ -289,29 +404,28 @@ pub fn update_cipher_from_data(
|
|||||||
_ => err!("Invalid type"),
|
_ => err!("Invalid type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut type_data = match type_data_opt {
|
let type_data = match type_data_opt {
|
||||||
Some(data) => data,
|
Some(mut data) => {
|
||||||
|
// Remove the 'Response' key from the base object.
|
||||||
|
data.as_object_mut().unwrap().remove("Response");
|
||||||
|
// Remove the 'Response' key from every Uri.
|
||||||
|
if data["Uris"].is_array() {
|
||||||
|
data["Uris"] = _clean_cipher_data(data["Uris"].clone());
|
||||||
|
}
|
||||||
|
data
|
||||||
|
},
|
||||||
None => err!("Data missing"),
|
None => err!("Data missing"),
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: ******* Backwards compat start **********
|
|
||||||
// To remove backwards compatibility, just delete this code,
|
|
||||||
// and remove the compat code from cipher::to_json
|
|
||||||
type_data["Name"] = Value::String(data.Name.clone());
|
|
||||||
type_data["Notes"] = data.Notes.clone().map(Value::String).unwrap_or(Value::Null);
|
|
||||||
type_data["Fields"] = data.Fields.clone().unwrap_or(Value::Null);
|
|
||||||
type_data["PasswordHistory"] = data.PasswordHistory.clone().unwrap_or(Value::Null);
|
|
||||||
// TODO: ******* Backwards compat end **********
|
|
||||||
|
|
||||||
cipher.favorite = data.Favorite.unwrap_or(false);
|
|
||||||
cipher.name = data.Name;
|
cipher.name = data.Name;
|
||||||
cipher.notes = data.Notes;
|
cipher.notes = data.Notes;
|
||||||
cipher.fields = data.Fields.map(|f| f.to_string());
|
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string() );
|
||||||
cipher.data = type_data.to_string();
|
cipher.data = type_data.to_string();
|
||||||
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
|
||||||
|
|
||||||
cipher.save(&conn)?;
|
cipher.save(&conn)?;
|
||||||
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
|
cipher.move_to_folder(data.FolderId, &headers.user.uuid, &conn)?;
|
||||||
|
cipher.set_favorite(data.Favorite, &headers.user.uuid, &conn)?;
|
||||||
|
|
||||||
if ut != UpdateType::None {
|
if ut != UpdateType::None {
|
||||||
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(ut, &cipher, &cipher.update_users_revision(&conn));
|
||||||
@@ -374,6 +488,7 @@ fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbC
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Called when an org admin modifies an existing org cipher.
|
||||||
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
#[put("/ciphers/<uuid>/admin", data = "<data>")]
|
||||||
fn put_cipher_admin(
|
fn put_cipher_admin(
|
||||||
uuid: String,
|
uuid: String,
|
||||||
@@ -410,6 +525,11 @@ fn put_cipher(uuid: String, data: JsonUpcase<CipherData>, headers: Headers, conn
|
|||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// TODO: Check if only the folder ID or favorite status is being changed.
|
||||||
|
// These are per-user properties that technically aren't part of the
|
||||||
|
// cipher itself, so the user shouldn't need write access to change these.
|
||||||
|
// Interestingly, upstream Bitwarden doesn't properly handle this either.
|
||||||
|
|
||||||
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn) {
|
||||||
err!("Cipher is not write accessible")
|
err!("Cipher is not write accessible")
|
||||||
}
|
}
|
||||||
@@ -543,7 +663,7 @@ struct ShareSelectedCipherData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[put("/ciphers/share", data = "<data>")]
|
#[put("/ciphers/share", data = "<data>")]
|
||||||
fn put_cipher_share_seleted(
|
fn put_cipher_share_selected(
|
||||||
data: JsonUpcase<ShareSelectedCipherData>,
|
data: JsonUpcase<ShareSelectedCipherData>,
|
||||||
headers: Headers,
|
headers: Headers,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
@@ -857,22 +977,37 @@ fn delete_cipher_selected_post(data: JsonUpcase<Value>, headers: Headers, conn:
|
|||||||
|
|
||||||
#[put("/ciphers/delete", data = "<data>")]
|
#[put("/ciphers/delete", data = "<data>")]
|
||||||
fn delete_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn delete_cipher_selected_put(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
_delete_multiple_ciphers(data, headers, conn, true, nt)
|
_delete_multiple_ciphers(data, headers, conn, true, nt) // soft delete
|
||||||
|
}
|
||||||
|
|
||||||
|
#[delete("/ciphers/admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
delete_cipher_selected(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/ciphers/delete-admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_post_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
delete_cipher_selected_post(data, headers, conn, nt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/ciphers/delete-admin", data = "<data>")]
|
||||||
|
fn delete_cipher_selected_put_admin(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
||||||
|
delete_cipher_selected_put(data, headers, conn, nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/ciphers/<uuid>/restore")]
|
#[put("/ciphers/<uuid>/restore")]
|
||||||
fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn restore_cipher_put(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/ciphers/<uuid>/restore-admin")]
|
#[put("/ciphers/<uuid>/restore-admin")]
|
||||||
fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn restore_cipher_put_admin(uuid: String, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
_restore_cipher_by_uuid(&uuid, &headers, &conn, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[put("/ciphers/restore", data = "<data>")]
|
#[put("/ciphers/restore", data = "<data>")]
|
||||||
fn restore_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn restore_cipher_selected(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> JsonResult {
|
||||||
_restore_multiple_ciphers(data, headers, conn, nt)
|
_restore_multiple_ciphers(data, &headers, &conn, &nt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -958,7 +1093,6 @@ fn delete_all(
|
|||||||
Some(user_org) => {
|
Some(user_org) => {
|
||||||
if user_org.atype == UserOrgType::Owner {
|
if user_org.atype == UserOrgType::Owner {
|
||||||
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
Cipher::delete_all_by_organization(&org_data.org_id, &conn)?;
|
||||||
Collection::delete_all_by_organization(&org_data.org_id, &conn)?;
|
|
||||||
nt.send_user_update(UpdateType::Vault, &user);
|
nt.send_user_update(UpdateType::Vault, &user);
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
@@ -997,13 +1131,14 @@ fn _delete_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, soft_del
|
|||||||
}
|
}
|
||||||
|
|
||||||
if soft_delete {
|
if soft_delete {
|
||||||
cipher.deleted_at = Some(chrono::Utc::now().naive_utc());
|
cipher.deleted_at = Some(Utc::now().naive_utc());
|
||||||
cipher.save(&conn)?;
|
cipher.save(&conn)?;
|
||||||
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||||
} else {
|
} else {
|
||||||
cipher.delete(&conn)?;
|
cipher.delete(&conn)?;
|
||||||
|
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
||||||
}
|
}
|
||||||
|
|
||||||
nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(&conn));
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1027,7 +1162,7 @@ fn _delete_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbC
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> EmptyResult {
|
fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||||
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
let mut cipher = match Cipher::find_by_uuid(&uuid, &conn) {
|
||||||
Some(cipher) => cipher,
|
Some(cipher) => cipher,
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
@@ -1041,10 +1176,10 @@ fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &DbConn, nt: &No
|
|||||||
cipher.save(&conn)?;
|
cipher.save(&conn)?;
|
||||||
|
|
||||||
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&conn));
|
||||||
Ok(())
|
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, &conn)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
|
fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: &Headers, conn: &DbConn, nt: &Notify) -> JsonResult {
|
||||||
let data: Value = data.into_inner().data;
|
let data: Value = data.into_inner().data;
|
||||||
|
|
||||||
let uuids = match data.get("Ids") {
|
let uuids = match data.get("Ids") {
|
||||||
@@ -1055,13 +1190,19 @@ fn _restore_multiple_ciphers(data: JsonUpcase<Value>, headers: Headers, conn: Db
|
|||||||
None => err!("Request missing ids field"),
|
None => err!("Request missing ids field"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut ciphers: Vec<Value> = Vec::new();
|
||||||
for uuid in uuids {
|
for uuid in uuids {
|
||||||
if let error @ Err(_) = _restore_cipher_by_uuid(uuid, &headers, &conn, &nt) {
|
match _restore_cipher_by_uuid(uuid, headers, conn, nt) {
|
||||||
return error;
|
Ok(json) => ciphers.push(json.into_inner()),
|
||||||
};
|
err => return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(Json(json!({
|
||||||
|
"Data": ciphers,
|
||||||
|
"Object": "list",
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _delete_cipher_attachment_by_id(
|
fn _delete_cipher_attachment_by_id(
|
||||||
|
|||||||
@@ -172,7 +172,7 @@ fn hibp_breach(username: String) -> JsonResult {
|
|||||||
"Domain": "haveibeenpwned.com",
|
"Domain": "haveibeenpwned.com",
|
||||||
"BreachDate": "2019-08-18T00:00:00Z",
|
"BreachDate": "2019-08-18T00:00:00Z",
|
||||||
"AddedDate": "2019-08-18T00:00:00Z",
|
"AddedDate": "2019-08-18T00:00:00Z",
|
||||||
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noopener\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
|
||||||
"LogoPath": "bwrs_static/hibp.png",
|
"LogoPath": "bwrs_static/hibp.png",
|
||||||
"PwnCount": 0,
|
"PwnCount": 0,
|
||||||
"DataClasses": [
|
"DataClasses": [
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use serde_json::Value;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
|
api::{EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, Notify, NumberOrString, PasswordData, UpdateType},
|
||||||
auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders},
|
auth::{decode_invite, AdminHeaders, Headers, OwnerHeaders, ManagerHeaders, ManagerHeadersLoose},
|
||||||
db::{models::*, DbConn},
|
db::{models::*, DbConn},
|
||||||
mail, CONFIG,
|
mail, CONFIG,
|
||||||
};
|
};
|
||||||
@@ -47,6 +47,10 @@ pub fn routes() -> Vec<Route> {
|
|||||||
list_policies_token,
|
list_policies_token,
|
||||||
get_policy,
|
get_policy,
|
||||||
put_policy,
|
put_policy,
|
||||||
|
get_organization_tax,
|
||||||
|
get_plans,
|
||||||
|
get_plans_tax_rates,
|
||||||
|
import,
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -76,6 +80,10 @@ struct NewCollectionData {
|
|||||||
|
|
||||||
#[post("/organizations", data = "<data>")]
|
#[post("/organizations", data = "<data>")]
|
||||||
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
|
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
|
||||||
|
if !CONFIG.is_org_creation_allowed(&headers.user.email) {
|
||||||
|
err!("User not allowed to create organizations")
|
||||||
|
}
|
||||||
|
|
||||||
let data: OrgData = data.into_inner().data;
|
let data: OrgData = data.into_inner().data;
|
||||||
|
|
||||||
let org = Organization::new(data.Name, data.BillingEmail);
|
let org = Organization::new(data.Name, data.BillingEmail);
|
||||||
@@ -212,7 +220,7 @@ fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) ->
|
|||||||
#[post("/organizations/<org_id>/collections", data = "<data>")]
|
#[post("/organizations/<org_id>/collections", data = "<data>")]
|
||||||
fn post_organization_collections(
|
fn post_organization_collections(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
_headers: AdminHeaders,
|
headers: ManagerHeadersLoose,
|
||||||
data: JsonUpcase<NewCollectionData>,
|
data: JsonUpcase<NewCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
@@ -223,9 +231,22 @@ fn post_organization_collections(
|
|||||||
None => err!("Can't find organization details"),
|
None => err!("Can't find organization details"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Get the user_organization record so that we can check if the user has access to all collections.
|
||||||
|
let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||||
|
Some(u) => u,
|
||||||
|
None => err!("User is not part of organization"),
|
||||||
|
};
|
||||||
|
|
||||||
let collection = Collection::new(org.uuid, data.Name);
|
let collection = Collection::new(org.uuid, data.Name);
|
||||||
collection.save(&conn)?;
|
collection.save(&conn)?;
|
||||||
|
|
||||||
|
// If the user doesn't have access to all collections, only in case of a Manger,
|
||||||
|
// then we need to save the creating user uuid (Manager) to the users_collection table.
|
||||||
|
// Else the user will not have access to his own created collection.
|
||||||
|
if !user_org.access_all {
|
||||||
|
CollectionUser::save(&headers.user.uuid, &collection.uuid, false, false, &conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Json(collection.to_json()))
|
Ok(Json(collection.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,7 +254,7 @@ fn post_organization_collections(
|
|||||||
fn put_organization_collection_update(
|
fn put_organization_collection_update(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
col_id: String,
|
col_id: String,
|
||||||
headers: AdminHeaders,
|
headers: ManagerHeaders,
|
||||||
data: JsonUpcase<NewCollectionData>,
|
data: JsonUpcase<NewCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
@@ -244,7 +265,7 @@ fn put_organization_collection_update(
|
|||||||
fn post_organization_collection_update(
|
fn post_organization_collection_update(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
col_id: String,
|
col_id: String,
|
||||||
_headers: AdminHeaders,
|
_headers: ManagerHeaders,
|
||||||
data: JsonUpcase<NewCollectionData>,
|
data: JsonUpcase<NewCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> JsonResult {
|
) -> JsonResult {
|
||||||
@@ -312,7 +333,7 @@ fn post_organization_collection_delete_user(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/organizations/<org_id>/collections/<col_id>")]
|
#[delete("/organizations/<org_id>/collections/<col_id>")]
|
||||||
fn delete_organization_collection(org_id: String, col_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
|
fn delete_organization_collection(org_id: String, col_id: String, _headers: ManagerHeaders, conn: DbConn) -> EmptyResult {
|
||||||
match Collection::find_by_uuid(&col_id, &conn) {
|
match Collection::find_by_uuid(&col_id, &conn) {
|
||||||
None => err!("Collection not found"),
|
None => err!("Collection not found"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
@@ -336,7 +357,7 @@ struct DeleteCollectionData {
|
|||||||
fn post_organization_collection_delete(
|
fn post_organization_collection_delete(
|
||||||
org_id: String,
|
org_id: String,
|
||||||
col_id: String,
|
col_id: String,
|
||||||
headers: AdminHeaders,
|
headers: ManagerHeaders,
|
||||||
_data: JsonUpcase<DeleteCollectionData>,
|
_data: JsonUpcase<DeleteCollectionData>,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
@@ -344,7 +365,7 @@ fn post_organization_collection_delete(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/collections/<coll_id>/details")]
|
#[get("/organizations/<org_id>/collections/<coll_id>/details")]
|
||||||
fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_org_collection_detail(org_id: String, coll_id: String, headers: ManagerHeaders, conn: DbConn) -> JsonResult {
|
||||||
match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) {
|
match Collection::find_by_uuid_and_user(&coll_id, &headers.user.uuid, &conn) {
|
||||||
None => err!("Collection not found"),
|
None => err!("Collection not found"),
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
@@ -358,7 +379,7 @@ fn get_org_collection_detail(org_id: String, coll_id: String, headers: AdminHead
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/collections/<coll_id>/users")]
|
#[get("/organizations/<org_id>/collections/<coll_id>/users")]
|
||||||
fn get_collection_users(org_id: String, coll_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_collection_users(org_id: String, coll_id: String, _headers: ManagerHeaders, conn: DbConn) -> JsonResult {
|
||||||
// Get org and collection, check that collection is from org
|
// Get org and collection, check that collection is from org
|
||||||
let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn) {
|
let collection = match Collection::find_by_uuid_and_org(&coll_id, &org_id, &conn) {
|
||||||
None => err!("Collection not found in Organization"),
|
None => err!("Collection not found in Organization"),
|
||||||
@@ -383,7 +404,7 @@ fn put_collection_users(
|
|||||||
org_id: String,
|
org_id: String,
|
||||||
coll_id: String,
|
coll_id: String,
|
||||||
data: JsonUpcaseVec<CollectionData>,
|
data: JsonUpcaseVec<CollectionData>,
|
||||||
_headers: AdminHeaders,
|
_headers: ManagerHeaders,
|
||||||
conn: DbConn,
|
conn: DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
// Get org and collection, check that collection is from org
|
// Get org and collection, check that collection is from org
|
||||||
@@ -435,7 +456,7 @@ fn get_org_details(data: Form<OrgIdData>, headers: Headers, conn: DbConn) -> Jso
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[get("/organizations/<org_id>/users")]
|
#[get("/organizations/<org_id>/users")]
|
||||||
fn get_org_users(org_id: String, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_org_users(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> JsonResult {
|
||||||
let users = UserOrganization::find_by_org(&org_id, &conn);
|
let users = UserOrganization::find_by_org(&org_id, &conn);
|
||||||
let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect();
|
let users_json: Vec<Value> = users.iter().map(|c| c.to_json_user_details(&conn)).collect();
|
||||||
|
|
||||||
@@ -948,7 +969,7 @@ fn list_policies_token(org_id: String, token: String, conn: DbConn) -> JsonResul
|
|||||||
fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
fn get_policy(org_id: String, pol_type: i32, _headers: AdminHeaders, conn: DbConn) -> JsonResult {
|
||||||
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
|
||||||
Some(pt) => pt,
|
Some(pt) => pt,
|
||||||
None => err!("Invalid policy type"),
|
None => err!("Invalid or unsupported policy type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type, &conn) {
|
||||||
@@ -987,3 +1008,169 @@ fn put_policy(org_id: String, pol_type: i32, data: Json<PolicyData>, _headers: A
|
|||||||
|
|
||||||
Ok(Json(policy.to_json()))
|
Ok(Json(policy.to_json()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(unused_variables)]
|
||||||
|
#[get("/organizations/<org_id>/tax")]
|
||||||
|
fn get_organization_tax(org_id: String, _headers: Headers, _conn: DbConn) -> EmptyResult {
|
||||||
|
// Prevent a 404 error, which also causes Javascript errors.
|
||||||
|
err!("Only allowed when not self hosted.")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/plans")]
|
||||||
|
fn get_plans(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Object": "list",
|
||||||
|
"Data": [
|
||||||
|
{
|
||||||
|
"Object": "plan",
|
||||||
|
"Type": 0,
|
||||||
|
"Product": 0,
|
||||||
|
"Name": "Free",
|
||||||
|
"IsAnnual": false,
|
||||||
|
"NameLocalizationKey": "planNameFree",
|
||||||
|
"DescriptionLocalizationKey": "planDescFree",
|
||||||
|
"CanBeUsedByBusiness": false,
|
||||||
|
"BaseSeats": 2,
|
||||||
|
"BaseStorageGb": null,
|
||||||
|
"MaxCollections": 2,
|
||||||
|
"MaxUsers": 2,
|
||||||
|
"HasAdditionalSeatsOption": false,
|
||||||
|
"MaxAdditionalSeats": null,
|
||||||
|
"HasAdditionalStorageOption": false,
|
||||||
|
"MaxAdditionalStorage": null,
|
||||||
|
"HasPremiumAccessOption": false,
|
||||||
|
"TrialPeriodDays": null,
|
||||||
|
"HasSelfHost": false,
|
||||||
|
"HasPolicies": false,
|
||||||
|
"HasGroups": false,
|
||||||
|
"HasDirectory": false,
|
||||||
|
"HasEvents": false,
|
||||||
|
"HasTotp": false,
|
||||||
|
"Has2fa": false,
|
||||||
|
"HasApi": false,
|
||||||
|
"HasSso": false,
|
||||||
|
"UsersGetPremium": false,
|
||||||
|
"UpgradeSortOrder": -1,
|
||||||
|
"DisplaySortOrder": -1,
|
||||||
|
"LegacyYear": null,
|
||||||
|
"Disabled": false,
|
||||||
|
"StripePlanId": null,
|
||||||
|
"StripeSeatPlanId": null,
|
||||||
|
"StripeStoragePlanId": null,
|
||||||
|
"StripePremiumAccessPlanId": null,
|
||||||
|
"BasePrice": 0.0,
|
||||||
|
"SeatPrice": 0.0,
|
||||||
|
"AdditionalStoragePricePerGb": 0.0,
|
||||||
|
"PremiumAccessOptionPrice": 0.0
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/plans/sales-tax-rates")]
|
||||||
|
fn get_plans_tax_rates(_headers: Headers, _conn: DbConn) -> JsonResult {
|
||||||
|
// Prevent a 404 error, which also causes Javascript errors.
|
||||||
|
Ok(Json(json!({
|
||||||
|
"Object": "list",
|
||||||
|
"Data": [],
|
||||||
|
"ContinuationToken": null
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct OrgImportGroupData {
|
||||||
|
Name: String, // "GroupName"
|
||||||
|
ExternalId: String, // "cn=GroupName,ou=Groups,dc=example,dc=com"
|
||||||
|
Users: Vec<String>, // ["uid=user,ou=People,dc=example,dc=com"]
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct OrgImportUserData {
|
||||||
|
Email: String, // "user@maildomain.net"
|
||||||
|
ExternalId: String, // "uid=user,ou=People,dc=example,dc=com"
|
||||||
|
Deleted: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Debug)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct OrgImportData {
|
||||||
|
Groups: Vec<OrgImportGroupData>,
|
||||||
|
OverwriteExisting: bool,
|
||||||
|
Users: Vec<OrgImportUserData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/organizations/<org_id>/import", data = "<data>")]
|
||||||
|
fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Headers, conn: DbConn) -> EmptyResult {
|
||||||
|
let data = data.into_inner().data;
|
||||||
|
|
||||||
|
// TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way
|
||||||
|
// to differentiate between auto-imported users and manually added ones.
|
||||||
|
// This means that this endpoint can end up removing users that were added manually by an admin,
|
||||||
|
// as opposed to upstream which only removes auto-imported users.
|
||||||
|
|
||||||
|
// User needs to be admin or owner to use the Directry Connector
|
||||||
|
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &conn) {
|
||||||
|
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
|
||||||
|
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
|
||||||
|
None => err!("User not part of organization"),
|
||||||
|
};
|
||||||
|
|
||||||
|
for user_data in &data.Users {
|
||||||
|
if user_data.Deleted {
|
||||||
|
// If user is marked for deletion and it exists, delete it
|
||||||
|
if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn) {
|
||||||
|
user_org.delete(&conn)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If user is not part of the organization, but it exists
|
||||||
|
} else if UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &conn).is_none() {
|
||||||
|
if let Some (user) = User::find_by_mail(&user_data.Email, &conn) {
|
||||||
|
|
||||||
|
let user_org_status = if CONFIG.mail_enabled() {
|
||||||
|
UserOrgStatus::Invited as i32
|
||||||
|
} else {
|
||||||
|
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
|
||||||
|
new_org_user.access_all = false;
|
||||||
|
new_org_user.atype = UserOrgType::User as i32;
|
||||||
|
new_org_user.status = user_org_status;
|
||||||
|
|
||||||
|
new_org_user.save(&conn)?;
|
||||||
|
|
||||||
|
if CONFIG.mail_enabled() {
|
||||||
|
let org_name = match Organization::find_by_uuid(&org_id, &conn) {
|
||||||
|
Some(org) => org.name,
|
||||||
|
None => err!("Error looking up organization"),
|
||||||
|
};
|
||||||
|
|
||||||
|
mail::send_invite(
|
||||||
|
&user_data.Email,
|
||||||
|
&user.uuid,
|
||||||
|
Some(org_id.clone()),
|
||||||
|
Some(new_org_user.uuid),
|
||||||
|
&org_name,
|
||||||
|
Some(headers.user.email.clone()),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
|
||||||
|
if data.OverwriteExisting {
|
||||||
|
for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User as i32, &conn) {
|
||||||
|
if let Some (user_email) = User::find_by_uuid(&user_org.user_uuid, &conn).map(|u| u.email) {
|
||||||
|
if !data.Users.iter().any(|u| u.Email == user_email) {
|
||||||
|
user_org.delete(&conn)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|||||||
258
src/api/icons.rs
@@ -1,13 +1,15 @@
|
|||||||
use std::{
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||||
io::prelude::*,
|
io::prelude::*,
|
||||||
net::{IpAddr, ToSocketAddrs},
|
net::{IpAddr, ToSocketAddrs},
|
||||||
|
sync::RwLock,
|
||||||
time::{Duration, SystemTime},
|
time::{Duration, SystemTime},
|
||||||
};
|
};
|
||||||
|
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use reqwest::{blocking::Client, blocking::Response, header::HeaderMap, Url};
|
use reqwest::{blocking::Client, blocking::Response, header, Url};
|
||||||
use rocket::{http::ContentType, http::Cookie, response::Content, Route};
|
use rocket::{http::ContentType, http::Cookie, response::Content, Route};
|
||||||
use soup::prelude::*;
|
use soup::prelude::*;
|
||||||
|
|
||||||
@@ -17,33 +19,67 @@ pub fn routes() -> Vec<Route> {
|
|||||||
routes![icon]
|
routes![icon]
|
||||||
}
|
}
|
||||||
|
|
||||||
const FALLBACK_ICON: &[u8; 344] = include_bytes!("../static/fallback-icon.png");
|
|
||||||
|
|
||||||
const ALLOWED_CHARS: &str = "_-.";
|
const ALLOWED_CHARS: &str = "_-.";
|
||||||
|
|
||||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||||
|
// Generate the default headers
|
||||||
|
let mut default_headers = header::HeaderMap::new();
|
||||||
|
default_headers.insert(header::USER_AGENT, header::HeaderValue::from_static("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15"));
|
||||||
|
default_headers.insert(header::ACCEPT_LANGUAGE, header::HeaderValue::from_static("en-US,en;q=0.8"));
|
||||||
|
default_headers.insert(header::CACHE_CONTROL, header::HeaderValue::from_static("no-cache"));
|
||||||
|
default_headers.insert(header::PRAGMA, header::HeaderValue::from_static("no-cache"));
|
||||||
|
default_headers.insert(header::ACCEPT, header::HeaderValue::from_static("text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8"));
|
||||||
|
|
||||||
// Reuse the client between requests
|
// Reuse the client between requests
|
||||||
Client::builder()
|
Client::builder()
|
||||||
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
.timeout(Duration::from_secs(CONFIG.icon_download_timeout()))
|
||||||
.default_headers(_header_map())
|
.default_headers(default_headers)
|
||||||
.build()
|
.build()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
});
|
});
|
||||||
|
|
||||||
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"icon$|apple.*icon").unwrap());
|
// Build Regex only once since this takes a lot of time.
|
||||||
static ICON_HREF_REGEX: Lazy<Regex> =
|
static ICON_REL_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?i)icon$|apple.*icon").unwrap());
|
||||||
Lazy::new(|| Regex::new(r"(?i)\w+\.(jpg|jpeg|png|ico)(\?.*)?$|^data:image.*base64").unwrap());
|
|
||||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||||
|
|
||||||
|
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
||||||
|
static ICON_BLACKLIST_REGEX: Lazy<RwLock<HashMap<String, Regex>>> = Lazy::new(|| RwLock::new(HashMap::new()));
|
||||||
|
|
||||||
|
#[get("/<domain>/icon.png")]
|
||||||
|
fn icon(domain: String) -> Option<Cached<Content<Vec<u8>>>> {
|
||||||
|
if !is_valid_domain(&domain) {
|
||||||
|
warn!("Invalid domain: {}", domain);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
get_icon(&domain).map(|icon| Cached::long(Content(ContentType::new("image", "x-icon"), icon)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns if the domain provided is valid or not.
|
||||||
|
///
|
||||||
|
/// This does some manual checks and makes use of Url to do some basic checking.
|
||||||
|
/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255.
|
||||||
fn is_valid_domain(domain: &str) -> bool {
|
fn is_valid_domain(domain: &str) -> bool {
|
||||||
// Don't allow empty or too big domains or path traversal
|
// If parsing the domain fails using Url, it will not work with reqwest.
|
||||||
if domain.is_empty() || domain.len() > 255 || domain.contains("..") {
|
if let Err(parse_error) = Url::parse(format!("https://{}", domain).as_str()) {
|
||||||
|
debug!("Domain parse error: '{}' - {:?}", domain, parse_error);
|
||||||
|
return false;
|
||||||
|
} else if domain.is_empty()
|
||||||
|
|| domain.contains("..")
|
||||||
|
|| domain.starts_with('.')
|
||||||
|
|| domain.starts_with('-')
|
||||||
|
|| domain.ends_with('-')
|
||||||
|
{
|
||||||
|
debug!("Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'", domain);
|
||||||
|
return false;
|
||||||
|
} else if domain.len() > 255 {
|
||||||
|
debug!("Domain validation error: '{}' exceeds 255 characters", domain);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only alphanumeric or specific characters
|
|
||||||
for c in domain.chars() {
|
for c in domain.chars() {
|
||||||
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) {
|
||||||
|
debug!("Domain validation error: '{}' contains an invalid character '{}'", domain, c);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -51,21 +87,10 @@ fn is_valid_domain(domain: &str) -> bool {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/<domain>/icon.png")]
|
|
||||||
fn icon(domain: String) -> Cached<Content<Vec<u8>>> {
|
|
||||||
let icon_type = ContentType::new("image", "x-icon");
|
|
||||||
|
|
||||||
if !is_valid_domain(&domain) {
|
|
||||||
warn!("Invalid domain: {:#?}", domain);
|
|
||||||
return Cached::long(Content(icon_type, FALLBACK_ICON.to_vec()));
|
|
||||||
}
|
|
||||||
|
|
||||||
Cached::long(Content(icon_type, get_icon(&domain)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
||||||
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
||||||
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
||||||
|
#[allow(clippy::nonminimal_bool)]
|
||||||
#[cfg(not(feature = "unstable"))]
|
#[cfg(not(feature = "unstable"))]
|
||||||
fn is_global(ip: IpAddr) -> bool {
|
fn is_global(ip: IpAddr) -> bool {
|
||||||
match ip {
|
match ip {
|
||||||
@@ -161,7 +186,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
fn is_domain_blacklisted(domain: &str) -> bool {
|
||||||
let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips()
|
let mut is_blacklisted = CONFIG.icon_blacklist_non_global_ips()
|
||||||
&& (domain, 0)
|
&& (domain, 0)
|
||||||
.to_socket_addrs()
|
.to_socket_addrs()
|
||||||
@@ -179,7 +204,31 @@ fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
|||||||
// Skip the regex check if the previous one is true already
|
// Skip the regex check if the previous one is true already
|
||||||
if !is_blacklisted {
|
if !is_blacklisted {
|
||||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
||||||
let regex = Regex::new(&blacklist).expect("Valid Regex");
|
let mut regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||||
|
|
||||||
|
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
||||||
|
let regex = if let Some(regex) = regex_hashmap.get(&blacklist) {
|
||||||
|
regex
|
||||||
|
} else {
|
||||||
|
drop(regex_hashmap);
|
||||||
|
|
||||||
|
let mut regex_hashmap_write = ICON_BLACKLIST_REGEX.write().unwrap();
|
||||||
|
// Clear the current list if the previous key doesn't exists.
|
||||||
|
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
||||||
|
if regex_hashmap_write.len() >= 1 {
|
||||||
|
regex_hashmap_write.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate the regex to store in too the Lazy Static HashMap.
|
||||||
|
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
||||||
|
regex_hashmap_write.insert(blacklist.to_string(), blacklist_regex);
|
||||||
|
drop(regex_hashmap_write);
|
||||||
|
|
||||||
|
regex_hashmap = ICON_BLACKLIST_REGEX.read().unwrap();
|
||||||
|
regex_hashmap.get(&blacklist).unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use the pre-generate Regex stored in a Lazy HashMap.
|
||||||
if regex.is_match(&domain) {
|
if regex.is_match(&domain) {
|
||||||
warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist);
|
warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist);
|
||||||
is_blacklisted = true;
|
is_blacklisted = true;
|
||||||
@@ -190,39 +239,38 @@ fn check_icon_domain_is_blacklisted(domain: &str) -> bool {
|
|||||||
is_blacklisted
|
is_blacklisted
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_icon(domain: &str) -> Vec<u8> {
|
fn get_icon(domain: &str) -> Option<Vec<u8>> {
|
||||||
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
|
||||||
|
|
||||||
|
// Check for expiration of negatively cached copy
|
||||||
|
if icon_is_negcached(&path) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(icon) = get_cached_icon(&path) {
|
if let Some(icon) = get_cached_icon(&path) {
|
||||||
return icon;
|
return Some(icon);
|
||||||
}
|
}
|
||||||
|
|
||||||
if CONFIG.disable_icon_download() {
|
if CONFIG.disable_icon_download() {
|
||||||
return FALLBACK_ICON.to_vec();
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the icon, or fallback in case of error
|
// Get the icon, or None in case of error
|
||||||
match download_icon(&domain) {
|
match download_icon(&domain) {
|
||||||
Ok(icon) => {
|
Ok(icon) => {
|
||||||
save_icon(&path, &icon);
|
save_icon(&path, &icon);
|
||||||
icon
|
Some(icon)
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Error downloading icon: {:?}", e);
|
error!("Error downloading icon: {:?}", e);
|
||||||
let miss_indicator = path + ".miss";
|
let miss_indicator = path + ".miss";
|
||||||
let empty_icon = Vec::new();
|
save_icon(&miss_indicator, &[]);
|
||||||
save_icon(&miss_indicator, &empty_icon);
|
None
|
||||||
FALLBACK_ICON.to_vec()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
||||||
// Check for expiration of negatively cached copy
|
|
||||||
if icon_is_negcached(path) {
|
|
||||||
return Some(FALLBACK_ICON.to_vec());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for expiration of successfully cached copy
|
// Check for expiration of successfully cached copy
|
||||||
if icon_is_expired(path) {
|
if icon_is_expired(path) {
|
||||||
return None;
|
return None;
|
||||||
@@ -284,6 +332,12 @@ impl Icon {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct IconUrlResult {
|
||||||
|
iconlist: Vec<Icon>,
|
||||||
|
cookies: String,
|
||||||
|
referer: String,
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response.
|
/// Returns a Result/Tuple which holds a Vector IconList and a string which holds the cookies from the last response.
|
||||||
/// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies.
|
/// There will always be a result with a string which will contain https://example.com/favicon.ico and an empty string for the cookies.
|
||||||
/// This does not mean that that location does exists, but it is the default location browser use.
|
/// This does not mean that that location does exists, but it is the default location browser use.
|
||||||
@@ -296,24 +350,65 @@ impl Icon {
|
|||||||
/// let (mut iconlist, cookie_str) = get_icon_url("github.com")?;
|
/// let (mut iconlist, cookie_str) = get_icon_url("github.com")?;
|
||||||
/// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?;
|
/// let (mut iconlist, cookie_str) = get_icon_url("gitlab.com")?;
|
||||||
/// ```
|
/// ```
|
||||||
fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||||
// Default URL with secure and insecure schemes
|
// Default URL with secure and insecure schemes
|
||||||
let ssldomain = format!("https://{}", domain);
|
let ssldomain = format!("https://{}", domain);
|
||||||
let httpdomain = format!("http://{}", domain);
|
let httpdomain = format!("http://{}", domain);
|
||||||
|
|
||||||
|
// First check the domain as given during the request for both HTTPS and HTTP.
|
||||||
|
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)) {
|
||||||
|
Ok(c) => Ok(c),
|
||||||
|
Err(e) => {
|
||||||
|
let mut sub_resp = Err(e);
|
||||||
|
|
||||||
|
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
||||||
|
let is_ip = domain.parse::<IpAddr>();
|
||||||
|
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
||||||
|
let mut domain_parts = domain.split('.');
|
||||||
|
let base_domain = format!(
|
||||||
|
"{base}.{tld}",
|
||||||
|
tld = domain_parts.next_back().unwrap(),
|
||||||
|
base = domain_parts.next_back().unwrap()
|
||||||
|
);
|
||||||
|
if is_valid_domain(&base_domain) {
|
||||||
|
let sslbase = format!("https://{}", base_domain);
|
||||||
|
let httpbase = format!("http://{}", base_domain);
|
||||||
|
debug!("[get_icon_url]: Trying without subdomains '{}'", base_domain);
|
||||||
|
|
||||||
|
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase));
|
||||||
|
}
|
||||||
|
|
||||||
|
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||||
|
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||||
|
let www_domain = format!("www.{}", domain);
|
||||||
|
if is_valid_domain(&www_domain) {
|
||||||
|
let sslwww = format!("https://{}", www_domain);
|
||||||
|
let httpwww = format!("http://{}", www_domain);
|
||||||
|
debug!("[get_icon_url]: Trying with www. prefix '{}'", www_domain);
|
||||||
|
|
||||||
|
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sub_resp
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Create the iconlist
|
// Create the iconlist
|
||||||
let mut iconlist: Vec<Icon> = Vec::new();
|
let mut iconlist: Vec<Icon> = Vec::new();
|
||||||
|
|
||||||
// Create the cookie_str to fill it all the cookies from the response
|
// Create the cookie_str to fill it all the cookies from the response
|
||||||
// These cookies can be used to request/download the favicon image.
|
// These cookies can be used to request/download the favicon image.
|
||||||
// Some sites have extra security in place with for example XSRF Tokens.
|
// Some sites have extra security in place with for example XSRF Tokens.
|
||||||
let mut cookie_str = String::new();
|
let mut cookie_str = "".to_string();
|
||||||
|
let mut referer = "".to_string();
|
||||||
|
|
||||||
let resp = get_page(&ssldomain).or_else(|_| get_page(&httpdomain));
|
|
||||||
if let Ok(content) = resp {
|
if let Ok(content) = resp {
|
||||||
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
// Extract the URL from the respose in case redirects occured (like @ gitlab.com)
|
||||||
let url = content.url().clone();
|
let url = content.url().clone();
|
||||||
|
|
||||||
|
// Get all the cookies and pass it on to the next function.
|
||||||
|
// Needed for XSRF Cookies for example (like @ mijn.ing.nl)
|
||||||
let raw_cookies = content.headers().get_all("set-cookie");
|
let raw_cookies = content.headers().get_all("set-cookie");
|
||||||
cookie_str = raw_cookies
|
cookie_str = raw_cookies
|
||||||
.iter()
|
.iter()
|
||||||
@@ -327,6 +422,10 @@ fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
|||||||
})
|
})
|
||||||
.collect::<String>();
|
.collect::<String>();
|
||||||
|
|
||||||
|
// Set the referer to be used on the final request, some sites check this.
|
||||||
|
// Mostly used to prevent direct linking and other security resons.
|
||||||
|
referer = url.as_str().to_string();
|
||||||
|
|
||||||
// Add the default favicon.ico to the list with the domain the content responded from.
|
// Add the default favicon.ico to the list with the domain the content responded from.
|
||||||
iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string()));
|
iconlist.push(Icon::new(35, url.join("/favicon.ico").unwrap().into_string()));
|
||||||
|
|
||||||
@@ -339,14 +438,18 @@ fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
|||||||
let favicons = soup
|
let favicons = soup
|
||||||
.tag("link")
|
.tag("link")
|
||||||
.attr("rel", ICON_REL_REGEX.clone()) // Only use icon rels
|
.attr("rel", ICON_REL_REGEX.clone()) // Only use icon rels
|
||||||
.attr("href", ICON_HREF_REGEX.clone()) // Only allow specific extensions
|
.attr_name("href") // Make sure there is a href
|
||||||
.find_all();
|
.find_all();
|
||||||
|
|
||||||
// Loop through all the found icons and determine it's priority
|
// Loop through all the found icons and determine it's priority
|
||||||
for favicon in favicons {
|
for favicon in favicons {
|
||||||
let sizes = favicon.get("sizes");
|
let sizes = favicon.get("sizes");
|
||||||
let href = favicon.get("href").expect("Missing href");
|
let href = favicon.get("href").unwrap();
|
||||||
let full_href = url.join(&href).unwrap().into_string();
|
// Skip invalid url's
|
||||||
|
let full_href = match url.join(&href) {
|
||||||
|
Ok(h) => h.into_string(),
|
||||||
|
_ => continue,
|
||||||
|
};
|
||||||
|
|
||||||
let priority = get_icon_priority(&full_href, sizes);
|
let priority = get_icon_priority(&full_href, sizes);
|
||||||
|
|
||||||
@@ -362,28 +465,33 @@ fn get_icon_url(domain: &str) -> Result<(Vec<Icon>, String), Error> {
|
|||||||
iconlist.sort_by_key(|x| x.priority);
|
iconlist.sort_by_key(|x| x.priority);
|
||||||
|
|
||||||
// There always is an icon in the list, so no need to check if it exists, and just return the first one
|
// There always is an icon in the list, so no need to check if it exists, and just return the first one
|
||||||
Ok((iconlist, cookie_str))
|
Ok(IconUrlResult{
|
||||||
|
iconlist,
|
||||||
|
cookies: cookie_str,
|
||||||
|
referer
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_page(url: &str) -> Result<Response, Error> {
|
fn get_page(url: &str) -> Result<Response, Error> {
|
||||||
get_page_with_cookies(url, "")
|
get_page_with_cookies(url, "", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_page_with_cookies(url: &str, cookie_str: &str) -> Result<Response, Error> {
|
fn get_page_with_cookies(url: &str, cookie_str: &str, referer: &str) -> Result<Response, Error> {
|
||||||
if check_icon_domain_is_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
if is_domain_blacklisted(Url::parse(url).unwrap().host_str().unwrap_or_default()) {
|
||||||
err!("Favicon rel linked to a non blacklisted domain!");
|
err!("Favicon rel linked to a blacklisted domain!");
|
||||||
}
|
}
|
||||||
|
|
||||||
if cookie_str.is_empty() {
|
let mut client = CLIENT.get(url);
|
||||||
CLIENT.get(url).send()?.error_for_status().map_err(Into::into)
|
if !cookie_str.is_empty() {
|
||||||
} else {
|
client = client.header("Cookie", cookie_str)
|
||||||
CLIENT
|
|
||||||
.get(url)
|
|
||||||
.header("cookie", cookie_str)
|
|
||||||
.send()?
|
|
||||||
.error_for_status()
|
|
||||||
.map_err(Into::into)
|
|
||||||
}
|
}
|
||||||
|
if !referer.is_empty() {
|
||||||
|
client = client.header("Referer", referer)
|
||||||
|
}
|
||||||
|
|
||||||
|
client.send()?
|
||||||
|
.error_for_status()
|
||||||
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||||
@@ -411,7 +519,7 @@ fn get_icon_priority(href: &str, sizes: Option<String>) -> u8 {
|
|||||||
1
|
1
|
||||||
} else if width == 64 {
|
} else if width == 64 {
|
||||||
2
|
2
|
||||||
} else if width >= 24 && width <= 128 {
|
} else if (24..=128).contains(&width) {
|
||||||
3
|
3
|
||||||
} else if width == 16 {
|
} else if width == 16 {
|
||||||
4
|
4
|
||||||
@@ -466,17 +574,17 @@ fn parse_sizes(sizes: Option<String>) -> (u16, u16) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
||||||
if check_icon_domain_is_blacklisted(domain) {
|
if is_domain_blacklisted(domain) {
|
||||||
err!("Domain is blacklisted", domain)
|
err!("Domain is blacklisted", domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
let (iconlist, cookie_str) = get_icon_url(&domain)?;
|
let icon_result = get_icon_url(&domain)?;
|
||||||
|
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
|
|
||||||
use data_url::DataUrl;
|
use data_url::DataUrl;
|
||||||
|
|
||||||
for icon in iconlist.iter().take(5) {
|
for icon in icon_result.iconlist.iter().take(5) {
|
||||||
if icon.href.starts_with("data:image") {
|
if icon.href.starts_with("data:image") {
|
||||||
let datauri = DataUrl::process(&icon.href).unwrap();
|
let datauri = DataUrl::process(&icon.href).unwrap();
|
||||||
// Check if we are able to decode the data uri
|
// Check if we are able to decode the data uri
|
||||||
@@ -491,13 +599,13 @@ fn download_icon(domain: &str) -> Result<Vec<u8>, Error> {
|
|||||||
_ => warn!("data uri is invalid"),
|
_ => warn!("data uri is invalid"),
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
match get_page_with_cookies(&icon.href, &cookie_str) {
|
match get_page_with_cookies(&icon.href, &icon_result.cookies, &icon_result.referer) {
|
||||||
Ok(mut res) => {
|
Ok(mut res) => {
|
||||||
info!("Downloaded icon from {}", icon.href);
|
info!("Downloaded icon from {}", icon.href);
|
||||||
res.copy_to(&mut buffer)?;
|
res.copy_to(&mut buffer)?;
|
||||||
break;
|
break;
|
||||||
}
|
},
|
||||||
Err(_) => info!("Download failed for {}", icon.href),
|
_ => warn!("Download failed for {}", icon.href),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -522,25 +630,3 @@ fn save_icon(path: &str, icon: &[u8]) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _header_map() -> HeaderMap {
|
|
||||||
// Set some default headers for the request.
|
|
||||||
// Use a browser like user-agent to make sure most websites will return there correct website.
|
|
||||||
use reqwest::header::*;
|
|
||||||
|
|
||||||
macro_rules! headers {
|
|
||||||
($( $name:ident : $value:literal),+ $(,)? ) => {
|
|
||||||
let mut headers = HeaderMap::new();
|
|
||||||
$( headers.insert($name, HeaderValue::from_static($value)); )+
|
|
||||||
headers
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
headers! {
|
|
||||||
USER_AGENT: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299",
|
|
||||||
ACCEPT_LANGUAGE: "en-US,en;q=0.8",
|
|
||||||
CACHE_CONTROL: "no-cache",
|
|
||||||
PRAGMA: "no-cache",
|
|
||||||
ACCEPT: "text/html,application/xhtml+xml,application/xml; q=0.9,image/webp,image/apng,*/*;q=0.8",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -68,6 +68,11 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
|
|||||||
"refresh_token": device.refresh_token,
|
"refresh_token": device.refresh_token,
|
||||||
"Key": user.akey,
|
"Key": user.akey,
|
||||||
"PrivateKey": user.private_key,
|
"PrivateKey": user.private_key,
|
||||||
|
|
||||||
|
"Kdf": user.client_kdf_type,
|
||||||
|
"KdfIterations": user.client_kdf_iter,
|
||||||
|
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
|
||||||
|
"scope": "api offline_access"
|
||||||
})))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,6 +102,14 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if the user is disabled
|
||||||
|
if !user.enabled {
|
||||||
|
err!(
|
||||||
|
"This user has been disabled",
|
||||||
|
format!("IP: {}. Username: {}.", ip.ip, username)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
let now = Local::now();
|
let now = Local::now();
|
||||||
|
|
||||||
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
|
||||||
@@ -142,7 +155,6 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Common
|
// Common
|
||||||
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
|
|
||||||
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
|
||||||
|
|
||||||
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
|
||||||
@@ -156,6 +168,11 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
|
|||||||
"Key": user.akey,
|
"Key": user.akey,
|
||||||
"PrivateKey": user.private_key,
|
"PrivateKey": user.private_key,
|
||||||
//"TwoFactorToken": "11122233333444555666777888999"
|
//"TwoFactorToken": "11122233333444555666777888999"
|
||||||
|
|
||||||
|
"Kdf": user.client_kdf_type,
|
||||||
|
"KdfIterations": user.client_kdf_iter,
|
||||||
|
"ResetMasterPassword": false,// TODO: Same as above
|
||||||
|
"scope": "api offline_access"
|
||||||
});
|
});
|
||||||
|
|
||||||
if let Some(token) = twofactor_token {
|
if let Some(token) = twofactor_token {
|
||||||
|
|||||||
@@ -19,11 +19,12 @@ static SHOW_WEBSOCKETS_MSG: AtomicBool = AtomicBool::new(true);
|
|||||||
|
|
||||||
#[get("/hub")]
|
#[get("/hub")]
|
||||||
fn websockets_err() -> EmptyResult {
|
fn websockets_err() -> EmptyResult {
|
||||||
if CONFIG.websocket_enabled() && SHOW_WEBSOCKETS_MSG.compare_and_swap(true, false, Ordering::Relaxed) {
|
if CONFIG.websocket_enabled() && SHOW_WEBSOCKETS_MSG.compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed).is_ok() {
|
||||||
err!("###########################################################
|
err!("
|
||||||
|
###########################################################
|
||||||
'/notifications/hub' should be proxied to the websocket server or notifications won't work.
|
'/notifications/hub' should be proxied to the websocket server or notifications won't work.
|
||||||
Go to the Wiki for more info, or disable WebSockets setting WEBSOCKET_ENABLED=false.
|
Go to the Wiki for more info, or disable WebSockets setting WEBSOCKET_ENABLED=false.
|
||||||
###########################################################################################")
|
###########################################################################################\n")
|
||||||
} else {
|
} else {
|
||||||
Err(Error::empty())
|
Err(Error::empty())
|
||||||
}
|
}
|
||||||
@@ -137,7 +138,6 @@ struct InitialMessage {
|
|||||||
const PING_MS: u64 = 15_000;
|
const PING_MS: u64 = 15_000;
|
||||||
const PING: Token = Token(1);
|
const PING: Token = Token(1);
|
||||||
|
|
||||||
const ID_KEY: &str = "id=";
|
|
||||||
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
const ACCESS_TOKEN_KEY: &str = "access_token=";
|
||||||
|
|
||||||
impl WSHandler {
|
impl WSHandler {
|
||||||
@@ -148,6 +148,32 @@ impl WSHandler {
|
|||||||
let io_error = io::Error::from(io::ErrorKind::InvalidData);
|
let io_error = io::Error::from(io::ErrorKind::InvalidData);
|
||||||
Err(ws::Error::new(ws::ErrorKind::Io(io_error), msg))
|
Err(ws::Error::new(ws::ErrorKind::Io(io_error), msg))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_request_token(&self, hs: Handshake) -> Option<String> {
|
||||||
|
use std::str::from_utf8;
|
||||||
|
|
||||||
|
// Verify we have a token header
|
||||||
|
if let Some(header_value) = hs.request.header("Authorization") {
|
||||||
|
if let Ok(converted) = from_utf8(header_value) {
|
||||||
|
if let Some(token_part) = converted.split("Bearer ").nth(1) {
|
||||||
|
return Some(token_part.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Otherwise verify the query parameter value
|
||||||
|
let path = hs.request.resource();
|
||||||
|
if let Some(params) = path.split('?').nth(1) {
|
||||||
|
let params_iter = params.split('&').take(1);
|
||||||
|
for val in params_iter {
|
||||||
|
if val.starts_with(ACCESS_TOKEN_KEY) {
|
||||||
|
return Some(val[ACCESS_TOKEN_KEY.len()..].into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Handler for WSHandler {
|
impl Handler for WSHandler {
|
||||||
@@ -156,35 +182,16 @@ impl Handler for WSHandler {
|
|||||||
//
|
//
|
||||||
// We don't use `id`, and as of around 2020-03-25, the official clients
|
// We don't use `id`, and as of around 2020-03-25, the official clients
|
||||||
// no longer seem to pass `id` (only `access_token`).
|
// no longer seem to pass `id` (only `access_token`).
|
||||||
let path = hs.request.resource();
|
|
||||||
|
|
||||||
let (_id, access_token) = match path.split('?').nth(1) {
|
// Get user token from header or query parameter
|
||||||
Some(params) => {
|
let access_token = match self.get_request_token(hs) {
|
||||||
let params_iter = params.split('&').take(2);
|
Some(token) => token,
|
||||||
|
_ => return self.err("Missing access token"),
|
||||||
let mut id = None;
|
|
||||||
let mut access_token = None;
|
|
||||||
|
|
||||||
for val in params_iter {
|
|
||||||
if val.starts_with(ID_KEY) {
|
|
||||||
id = Some(&val[ID_KEY.len()..]);
|
|
||||||
} else if val.starts_with(ACCESS_TOKEN_KEY) {
|
|
||||||
access_token = Some(&val[ACCESS_TOKEN_KEY.len()..]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
match (id, access_token) {
|
|
||||||
(Some(a), Some(b)) => (a, b),
|
|
||||||
(None, Some(b)) => ("", b), // Ignore missing `id`.
|
|
||||||
_ => return self.err("Missing access token"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => return self.err("Missing query parameters"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Validate the user
|
// Validate the user
|
||||||
use crate::auth;
|
use crate::auth;
|
||||||
let claims = match auth::decode_login(access_token) {
|
let claims = match auth::decode_login(access_token.as_str()) {
|
||||||
Ok(claims) => claims,
|
Ok(claims) => claims,
|
||||||
Err(_) => return self.err("Invalid access token provided"),
|
Err(_) => return self.err("Invalid access token provided"),
|
||||||
};
|
};
|
||||||
@@ -335,7 +342,7 @@ impl WebSocketUsers {
|
|||||||
/* Message Structure
|
/* Message Structure
|
||||||
[
|
[
|
||||||
1, // MessageType.Invocation
|
1, // MessageType.Invocation
|
||||||
{}, // Headers
|
{}, // Headers (map)
|
||||||
null, // InvocationId
|
null, // InvocationId
|
||||||
"ReceiveMessage", // Target
|
"ReceiveMessage", // Target
|
||||||
[ // Arguments
|
[ // Arguments
|
||||||
@@ -352,7 +359,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType) -> Vec<u8> {
|
|||||||
|
|
||||||
let value = V::Array(vec![
|
let value = V::Array(vec![
|
||||||
1.into(),
|
1.into(),
|
||||||
V::Array(vec![]),
|
V::Map(vec![]),
|
||||||
V::Nil,
|
V::Nil,
|
||||||
"ReceiveMessage".into(),
|
"ReceiveMessage".into(),
|
||||||
V::Array(vec![V::Map(vec![
|
V::Array(vec![V::Map(vec![
|
||||||
|
|||||||
@@ -78,9 +78,12 @@ fn static_files(filename: String) -> Result<Content<&'static [u8]>, Error> {
|
|||||||
"hibp.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
"hibp.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/hibp.png"))),
|
||||||
|
|
||||||
"bootstrap.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
"bootstrap.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
|
||||||
"bootstrap-native-v4.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native-v4.js"))),
|
"bootstrap-native.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap-native.js"))),
|
||||||
"md5.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/md5.js"))),
|
"md5.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/md5.js"))),
|
||||||
"identicon.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))),
|
"identicon.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/identicon.js"))),
|
||||||
|
"datatables.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
|
||||||
|
"datatables.css" => Ok(Content(ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
|
||||||
|
"jquery-3.5.1.slim.js" => Ok(Content(ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.5.1.slim.js"))),
|
||||||
_ => err!(format!("Static file not found: {}", filename)),
|
_ => err!(format!("Static file not found: {}", filename)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
157
src/auth.rs
@@ -215,12 +215,10 @@ pub fn generate_admin_claims() -> AdminJWTClaims {
|
|||||||
//
|
//
|
||||||
// Bearer token authentication
|
// Bearer token authentication
|
||||||
//
|
//
|
||||||
use rocket::{
|
use rocket::request::{FromRequest, Outcome, Request};
|
||||||
request::{FromRequest, Request, Outcome},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::db::{
|
use crate::db::{
|
||||||
models::{Device, User, UserOrgStatus, UserOrgType, UserOrganization},
|
models::{CollectionUser, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException},
|
||||||
DbConn,
|
DbConn,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -298,7 +296,25 @@ impl<'a, 'r> FromRequest<'a, 'r> for Headers {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if user.security_stamp != claims.sstamp {
|
if user.security_stamp != claims.sstamp {
|
||||||
err_handler!("Invalid security stamp")
|
if let Some(stamp_exception) = user
|
||||||
|
.stamp_exception
|
||||||
|
.as_deref()
|
||||||
|
.and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
|
||||||
|
{
|
||||||
|
let current_route = match request.route().and_then(|r| r.name) {
|
||||||
|
Some(name) => name,
|
||||||
|
_ => err_handler!("Error getting current route for stamp exception"),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check if both match, if not this route is not allowed with the current security stamp.
|
||||||
|
if stamp_exception.route != current_route {
|
||||||
|
err_handler!("Invalid security stamp: Current route and exception route do not match")
|
||||||
|
} else if stamp_exception.security_stamp != claims.sstamp {
|
||||||
|
err_handler!("Invalid security stamp for matched stamp exception")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err_handler!("Invalid security stamp")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Outcome::Success(Headers { host, device, user })
|
Outcome::Success(Headers { host, device, user })
|
||||||
@@ -310,11 +326,13 @@ pub struct OrgHeaders {
|
|||||||
pub device: Device,
|
pub device: Device,
|
||||||
pub user: User,
|
pub user: User,
|
||||||
pub org_user_type: UserOrgType,
|
pub org_user_type: UserOrgType,
|
||||||
|
pub org_user: UserOrganization,
|
||||||
|
pub org_id: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
// org_id is usually the second param ("/organizations/<org_id>")
|
// org_id is usually the second path param ("/organizations/<org_id>"),
|
||||||
// But there are cases where it is located in a query value.
|
// but there are cases where it is a query value.
|
||||||
// First check the param, if this is not a valid uuid, we will try the query value.
|
// First check the path, if this is not a valid uuid, try the query values.
|
||||||
fn get_org_id(request: &Request) -> Option<String> {
|
fn get_org_id(request: &Request) -> Option<String> {
|
||||||
if let Some(Ok(org_id)) = request.get_param::<String>(1) {
|
if let Some(Ok(org_id)) = request.get_param::<String>(1) {
|
||||||
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
if uuid::Uuid::parse_str(&org_id).is_ok() {
|
||||||
@@ -370,6 +388,8 @@ impl<'a, 'r> FromRequest<'a, 'r> for OrgHeaders {
|
|||||||
err_handler!("Unknown user type in the database")
|
err_handler!("Unknown user type in the database")
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
org_user,
|
||||||
|
org_id,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
_ => err_handler!("Error getting the organization id"),
|
_ => err_handler!("Error getting the organization id"),
|
||||||
@@ -419,6 +439,127 @@ impl Into<Headers> for AdminHeaders {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// col_id is usually the fourth path param ("/organizations/<org_id>/collections/<col_id>"),
|
||||||
|
// but there could be cases where it is a query value.
|
||||||
|
// First check the path, if this is not a valid uuid, try the query values.
|
||||||
|
fn get_col_id(request: &Request) -> Option<String> {
|
||||||
|
if let Some(Ok(col_id)) = request.get_param::<String>(3) {
|
||||||
|
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||||
|
return Some(col_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(Ok(col_id)) = request.get_query_value::<String>("collectionId") {
|
||||||
|
if uuid::Uuid::parse_str(&col_id).is_ok() {
|
||||||
|
return Some(col_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The ManagerHeaders are used to check if you are at least a Manager
|
||||||
|
/// and have access to the specific collection provided via the <col_id>/collections/collectionId.
|
||||||
|
/// This does strict checking on the collection_id, ManagerHeadersLoose does not.
|
||||||
|
pub struct ManagerHeaders {
|
||||||
|
pub host: String,
|
||||||
|
pub device: Device,
|
||||||
|
pub user: User,
|
||||||
|
pub org_user_type: UserOrgType,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeaders {
|
||||||
|
type Error = &'static str;
|
||||||
|
|
||||||
|
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||||
|
match request.guard::<OrgHeaders>() {
|
||||||
|
Outcome::Forward(_) => Outcome::Forward(()),
|
||||||
|
Outcome::Failure(f) => Outcome::Failure(f),
|
||||||
|
Outcome::Success(headers) => {
|
||||||
|
if headers.org_user_type >= UserOrgType::Manager {
|
||||||
|
match get_col_id(request) {
|
||||||
|
Some(col_id) => {
|
||||||
|
let conn = match request.guard::<DbConn>() {
|
||||||
|
Outcome::Success(conn) => conn,
|
||||||
|
_ => err_handler!("Error getting DB"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if !headers.org_user.has_full_access() {
|
||||||
|
match CollectionUser::find_by_collection_and_user(&col_id, &headers.org_user.user_uuid, &conn) {
|
||||||
|
Some(_) => (),
|
||||||
|
None => err_handler!("The current user isn't a manager for this collection"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => err_handler!("Error getting the collection id"),
|
||||||
|
}
|
||||||
|
|
||||||
|
Outcome::Success(Self {
|
||||||
|
host: headers.host,
|
||||||
|
device: headers.device,
|
||||||
|
user: headers.user,
|
||||||
|
org_user_type: headers.org_user_type,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<Headers> for ManagerHeaders {
|
||||||
|
fn into(self) -> Headers {
|
||||||
|
Headers {
|
||||||
|
host: self.host,
|
||||||
|
device: self.device,
|
||||||
|
user: self.user,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The ManagerHeadersLoose is used when you at least need to be a Manager,
|
||||||
|
/// but there is no collection_id sent with the request (either in the path or as form data).
|
||||||
|
pub struct ManagerHeadersLoose {
|
||||||
|
pub host: String,
|
||||||
|
pub device: Device,
|
||||||
|
pub user: User,
|
||||||
|
pub org_user_type: UserOrgType,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, 'r> FromRequest<'a, 'r> for ManagerHeadersLoose {
|
||||||
|
type Error = &'static str;
|
||||||
|
|
||||||
|
fn from_request(request: &'a Request<'r>) -> Outcome<Self, Self::Error> {
|
||||||
|
match request.guard::<OrgHeaders>() {
|
||||||
|
Outcome::Forward(_) => Outcome::Forward(()),
|
||||||
|
Outcome::Failure(f) => Outcome::Failure(f),
|
||||||
|
Outcome::Success(headers) => {
|
||||||
|
if headers.org_user_type >= UserOrgType::Manager {
|
||||||
|
Outcome::Success(Self {
|
||||||
|
host: headers.host,
|
||||||
|
device: headers.device,
|
||||||
|
user: headers.user,
|
||||||
|
org_user_type: headers.org_user_type,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
err_handler!("You need to be a Manager, Admin or Owner to call this endpoint")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<Headers> for ManagerHeadersLoose {
|
||||||
|
fn into(self) -> Headers {
|
||||||
|
Headers {
|
||||||
|
host: self.host,
|
||||||
|
device: self.device,
|
||||||
|
user: self.user,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct OwnerHeaders {
|
pub struct OwnerHeaders {
|
||||||
pub host: String,
|
pub host: String,
|
||||||
pub device: Device,
|
pub device: Device,
|
||||||
|
|||||||
203
src/config.rs
@@ -2,9 +2,11 @@ use std::process::exit;
|
|||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
|
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
use regex::Regex;
|
||||||
use reqwest::Url;
|
use reqwest::Url;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
db::DbConnType,
|
||||||
error::Error,
|
error::Error,
|
||||||
util::{get_env, get_env_bool},
|
util::{get_env, get_env_bool},
|
||||||
};
|
};
|
||||||
@@ -21,6 +23,21 @@ pub static CONFIG: Lazy<Config> = Lazy::new(|| {
|
|||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
|
static PRIVACY_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"[\w]").unwrap());
|
||||||
|
const PRIVACY_CONFIG: &[&str] = &[
|
||||||
|
"allowed_iframe_ancestors",
|
||||||
|
"database_url",
|
||||||
|
"domain_origin",
|
||||||
|
"domain_path",
|
||||||
|
"domain",
|
||||||
|
"helo_name",
|
||||||
|
"org_creation_users",
|
||||||
|
"signups_domains_whitelist",
|
||||||
|
"smtp_from",
|
||||||
|
"smtp_host",
|
||||||
|
"smtp_username",
|
||||||
|
];
|
||||||
|
|
||||||
pub type Pass = String;
|
pub type Pass = String;
|
||||||
|
|
||||||
macro_rules! make_config {
|
macro_rules! make_config {
|
||||||
@@ -51,8 +68,34 @@ macro_rules! make_config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ConfigBuilder {
|
impl ConfigBuilder {
|
||||||
|
#[allow(clippy::field_reassign_with_default)]
|
||||||
fn from_env() -> Self {
|
fn from_env() -> Self {
|
||||||
dotenv::from_path(".env").ok();
|
match dotenv::from_path(".env") {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(e) => match e {
|
||||||
|
dotenv::Error::LineParse(msg, pos) => {
|
||||||
|
panic!("Error loading the .env file:\nNear {:?} on position {}\nPlease fix and restart!\n", msg, pos);
|
||||||
|
},
|
||||||
|
dotenv::Error::Io(ioerr) => match ioerr.kind() {
|
||||||
|
std::io::ErrorKind::NotFound => {
|
||||||
|
println!("[INFO] No .env file found.\n");
|
||||||
|
()
|
||||||
|
},
|
||||||
|
std::io::ErrorKind::PermissionDenied => {
|
||||||
|
println!("[WARNING] Permission Denied while trying to read the .env file!\n");
|
||||||
|
()
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
println!("[WARNING] Reading the .env file failed:\n{:?}\n", ioerr);
|
||||||
|
()
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
println!("[WARNING] Reading the .env file failed:\n{:?}\n", e);
|
||||||
|
()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mut builder = ConfigBuilder::default();
|
let mut builder = ConfigBuilder::default();
|
||||||
$($(
|
$($(
|
||||||
@@ -115,6 +158,7 @@ macro_rules! make_config {
|
|||||||
config.domain_set = _domain_set;
|
config.domain_set = _domain_set;
|
||||||
|
|
||||||
config.signups_domains_whitelist = config.signups_domains_whitelist.trim().to_lowercase();
|
config.signups_domains_whitelist = config.signups_domains_whitelist.trim().to_lowercase();
|
||||||
|
config.org_creation_users = config.org_creation_users.trim().to_lowercase();
|
||||||
|
|
||||||
config
|
config
|
||||||
}
|
}
|
||||||
@@ -169,9 +213,38 @@ macro_rules! make_config {
|
|||||||
}, )+
|
}, )+
|
||||||
]}, )+ ])
|
]}, )+ ])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_support_json(&self) -> serde_json::Value {
|
||||||
|
let cfg = {
|
||||||
|
let inner = &self.inner.read().unwrap();
|
||||||
|
inner.config.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
json!({ $($(
|
||||||
|
stringify!($name): make_config!{ @supportstr $name, cfg.$name, $ty, $none_action },
|
||||||
|
)+)+ })
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Support string print
|
||||||
|
( @supportstr $name:ident, $value:expr, Pass, option ) => { $value.as_ref().map(|_| String::from("***")) }; // Optional pass, we map to an Option<String> with "***"
|
||||||
|
( @supportstr $name:ident, $value:expr, Pass, $none_action:ident ) => { String::from("***") }; // Required pass, we return "***"
|
||||||
|
( @supportstr $name:ident, $value:expr, $ty:ty, option ) => { // Optional other value, we return as is or convert to string to apply the privacy config
|
||||||
|
if PRIVACY_CONFIG.contains(&stringify!($name)) {
|
||||||
|
json!($value.as_ref().map(|x| PRIVACY_REGEX.replace_all(&x.to_string(), "${1}*").to_string()))
|
||||||
|
} else {
|
||||||
|
json!($value)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
( @supportstr $name:ident, $value:expr, $ty:ty, $none_action:ident ) => { // Required other value, we return as is or convert to string to apply the privacy config
|
||||||
|
if PRIVACY_CONFIG.contains(&stringify!($name)) {
|
||||||
|
json!(PRIVACY_REGEX.replace_all(&$value.to_string(), "${1}*").to_string())
|
||||||
|
} else {
|
||||||
|
json!($value)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Group or empty string
|
// Group or empty string
|
||||||
( @show ) => { "" };
|
( @show ) => { "" };
|
||||||
( @show $lit:literal ) => { $lit };
|
( @show $lit:literal ) => { $lit };
|
||||||
@@ -220,6 +293,8 @@ make_config! {
|
|||||||
data_folder: String, false, def, "data".to_string();
|
data_folder: String, false, def, "data".to_string();
|
||||||
/// Database URL
|
/// Database URL
|
||||||
database_url: String, false, auto, |c| format!("{}/{}", c.data_folder, "db.sqlite3");
|
database_url: String, false, auto, |c| format!("{}/{}", c.data_folder, "db.sqlite3");
|
||||||
|
/// Database connection pool size
|
||||||
|
database_max_conns: u32, false, def, 10;
|
||||||
/// Icon cache folder
|
/// Icon cache folder
|
||||||
icon_cache_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "icon_cache");
|
icon_cache_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "icon_cache");
|
||||||
/// Attachments folder
|
/// Attachments folder
|
||||||
@@ -276,6 +351,9 @@ make_config! {
|
|||||||
signups_verify_resend_limit: u32, true, def, 6;
|
signups_verify_resend_limit: u32, true, def, 6;
|
||||||
/// Email domain whitelist |> Allow signups only from this list of comma-separated domains, even when signups are otherwise disabled
|
/// Email domain whitelist |> Allow signups only from this list of comma-separated domains, even when signups are otherwise disabled
|
||||||
signups_domains_whitelist: String, true, def, "".to_string();
|
signups_domains_whitelist: String, true, def, "".to_string();
|
||||||
|
/// Org creation users |> Allow org creation only by this list of comma-separated user emails.
|
||||||
|
/// Blank or 'all' means all users can create orgs; 'none' means no users can create orgs.
|
||||||
|
org_creation_users: String, true, def, "".to_string();
|
||||||
/// Allow invitations |> Controls whether users can be invited by organization admins, even when signups are otherwise disabled
|
/// Allow invitations |> Controls whether users can be invited by organization admins, even when signups are otherwise disabled
|
||||||
invitations_allowed: bool, true, def, true;
|
invitations_allowed: bool, true, def, true;
|
||||||
/// Password iterations |> Number of server-side passwords hashing iterations.
|
/// Password iterations |> Number of server-side passwords hashing iterations.
|
||||||
@@ -342,6 +420,9 @@ make_config! {
|
|||||||
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
|
/// that do not support WAL. Please make sure you read project wiki on the topic before changing this setting.
|
||||||
enable_db_wal: bool, false, def, true;
|
enable_db_wal: bool, false, def, true;
|
||||||
|
|
||||||
|
/// Max database connection retries |> Number of times to retry the database connection during startup, with 1 second between each retry, set to 0 to retry indefinitely
|
||||||
|
db_connection_retries: u32, false, def, 15;
|
||||||
|
|
||||||
/// Bypass admin page security (Know the risks!) |> Disables the Admin Token for the admin page so you may use your own auth in-front
|
/// Bypass admin page security (Know the risks!) |> Disables the Admin Token for the admin page so you may use your own auth in-front
|
||||||
disable_admin_token: bool, true, def, false;
|
disable_admin_token: bool, true, def, false;
|
||||||
|
|
||||||
@@ -378,36 +459,42 @@ make_config! {
|
|||||||
/// SMTP Email Settings
|
/// SMTP Email Settings
|
||||||
smtp: _enable_smtp {
|
smtp: _enable_smtp {
|
||||||
/// Enabled
|
/// Enabled
|
||||||
_enable_smtp: bool, true, def, true;
|
_enable_smtp: bool, true, def, true;
|
||||||
/// Host
|
/// Host
|
||||||
smtp_host: String, true, option;
|
smtp_host: String, true, option;
|
||||||
/// Enable SSL
|
/// Enable Secure SMTP |> (Explicit) - Enabling this by default would use STARTTLS (Standard ports 587 or 25)
|
||||||
smtp_ssl: bool, true, def, true;
|
smtp_ssl: bool, true, def, true;
|
||||||
/// Use explicit TLS |> Enabling this would force the use of an explicit TLS connection, instead of upgrading an insecure one with STARTTLS
|
/// Force TLS |> (Implicit) - Enabling this would force the use of an SSL/TLS connection, instead of upgrading an insecure one with STARTTLS (Standard port 465)
|
||||||
smtp_explicit_tls: bool, true, def, false;
|
smtp_explicit_tls: bool, true, def, false;
|
||||||
/// Port
|
/// Port
|
||||||
smtp_port: u16, true, auto, |c| if c.smtp_explicit_tls {465} else if c.smtp_ssl {587} else {25};
|
smtp_port: u16, true, auto, |c| if c.smtp_explicit_tls {465} else if c.smtp_ssl {587} else {25};
|
||||||
/// From Address
|
/// From Address
|
||||||
smtp_from: String, true, def, String::new();
|
smtp_from: String, true, def, String::new();
|
||||||
/// From Name
|
/// From Name
|
||||||
smtp_from_name: String, true, def, "Bitwarden_RS".to_string();
|
smtp_from_name: String, true, def, "Bitwarden_RS".to_string();
|
||||||
/// Username
|
/// Username
|
||||||
smtp_username: String, true, option;
|
smtp_username: String, true, option;
|
||||||
/// Password
|
/// Password
|
||||||
smtp_password: Pass, true, option;
|
smtp_password: Pass, true, option;
|
||||||
/// Json form auth mechanism |> Defaults for ssl is "Plain" and "Login" and nothing for non-ssl connections. Possible values: ["Plain", "Login", "Xoauth2"]
|
/// SMTP Auth mechanism |> Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections. Possible values: ["Plain", "Login", "Xoauth2"]. Multiple options need to be separated by a comma ','.
|
||||||
smtp_auth_mechanism: String, true, option;
|
smtp_auth_mechanism: String, true, option;
|
||||||
/// SMTP connection timeout |> Number of seconds when to stop trying to connect to the SMTP server
|
/// SMTP connection timeout |> Number of seconds when to stop trying to connect to the SMTP server
|
||||||
smtp_timeout: u64, true, def, 15;
|
smtp_timeout: u64, true, def, 15;
|
||||||
/// Server name sent during HELO |> By default this value should be is on the machine's hostname, but might need to be changed in case it trips some anti-spam filters
|
/// Server name sent during HELO |> By default this value should be is on the machine's hostname, but might need to be changed in case it trips some anti-spam filters
|
||||||
helo_name: String, true, option;
|
helo_name: String, true, option;
|
||||||
|
/// Enable SMTP debugging (Know the risks!) |> DANGEROUS: Enabling this will output very detailed SMTP messages. This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting!
|
||||||
|
smtp_debug: bool, true, def, false;
|
||||||
|
/// Accept Invalid Certs (Know the risks!) |> DANGEROUS: Allow invalid certificates. This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
|
smtp_accept_invalid_certs: bool, true, def, false;
|
||||||
|
/// Accept Invalid Hostnames (Know the risks!) |> DANGEROUS: Allow invalid hostnames. This option introduces significant vulnerabilities to man-in-the-middle attacks!
|
||||||
|
smtp_accept_invalid_hostnames: bool, true, def, false;
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Email 2FA Settings
|
/// Email 2FA Settings
|
||||||
email_2fa: _enable_email_2fa {
|
email_2fa: _enable_email_2fa {
|
||||||
/// Enabled |> Disabling will prevent users from setting up new email 2FA and using existing email 2FA configured
|
/// Enabled |> Disabling will prevent users from setting up new email 2FA and using existing email 2FA configured
|
||||||
_enable_email_2fa: bool, true, auto, |c| c._enable_smtp && c.smtp_host.is_some();
|
_enable_email_2fa: bool, true, auto, |c| c._enable_smtp && c.smtp_host.is_some();
|
||||||
/// Token number length |> Length of the numbers in an email token. Minimum of 6. Maximum is 19.
|
/// Email token size |> Number of digits in an email token (min: 6, max: 19). Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting.
|
||||||
email_token_size: u32, true, def, 6;
|
email_token_size: u32, true, def, 6;
|
||||||
/// Token expiration time |> Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
/// Token expiration time |> Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
|
||||||
email_expiration_time: u64, true, def, 600;
|
email_expiration_time: u64, true, def, 600;
|
||||||
@@ -417,24 +504,22 @@ make_config! {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
||||||
let db_url = cfg.database_url.to_lowercase();
|
// Validate connection URL is valid and DB feature is enabled
|
||||||
if cfg!(feature = "sqlite")
|
DbConnType::from_url(&cfg.database_url)?;
|
||||||
&& (db_url.starts_with("mysql:") || db_url.starts_with("postgresql:") || db_url.starts_with("postgres:"))
|
|
||||||
{
|
|
||||||
err!("`DATABASE_URL` is meant for MySQL or Postgres, while this server is meant for SQLite")
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg!(feature = "mysql") && !db_url.starts_with("mysql:") {
|
let limit = 256;
|
||||||
err!("`DATABASE_URL` should start with mysql: when using the MySQL server")
|
if cfg.database_max_conns < 1 || cfg.database_max_conns > limit {
|
||||||
}
|
err!(format!(
|
||||||
|
"`DATABASE_MAX_CONNS` contains an invalid value. Ensure it is between 1 and {}.",
|
||||||
if cfg!(feature = "postgresql") && !(db_url.starts_with("postgresql:") || db_url.starts_with("postgres:")) {
|
limit,
|
||||||
err!("`DATABASE_URL` should start with postgresql: when using the PostgreSQL server")
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let dom = cfg.domain.to_lowercase();
|
let dom = cfg.domain.to_lowercase();
|
||||||
if !dom.starts_with("http://") && !dom.starts_with("https://") {
|
if !dom.starts_with("http://") && !dom.starts_with("https://") {
|
||||||
err!("DOMAIN variable needs to contain the protocol (http, https). Use 'http[s]://bw.example.com' instead of 'bw.example.com'");
|
err!(
|
||||||
|
"DOMAIN variable needs to contain the protocol (http, https). Use 'http[s]://bw.example.com' instead of 'bw.example.com'"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let whitelist = &cfg.signups_domains_whitelist;
|
let whitelist = &cfg.signups_domains_whitelist;
|
||||||
@@ -442,6 +527,13 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
err!("`SIGNUPS_DOMAINS_WHITELIST` contains empty tokens");
|
err!("`SIGNUPS_DOMAINS_WHITELIST` contains empty tokens");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let org_creation_users = cfg.org_creation_users.trim().to_lowercase();
|
||||||
|
if !(org_creation_users.is_empty() || org_creation_users == "all" || org_creation_users == "none")
|
||||||
|
&& org_creation_users.split(',').any(|u| !u.contains('@'))
|
||||||
|
{
|
||||||
|
err!("`ORG_CREATION_USERS` contains invalid email addresses");
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(ref token) = cfg.admin_token {
|
if let Some(ref token) = cfg.admin_token {
|
||||||
if token.trim().is_empty() && !cfg.disable_admin_token {
|
if token.trim().is_empty() && !cfg.disable_admin_token {
|
||||||
println!("[WARNING] `ADMIN_TOKEN` is enabled but has an empty value, so the admin page will be disabled.");
|
println!("[WARNING] `ADMIN_TOKEN` is enabled but has an empty value, so the admin page will be disabled.");
|
||||||
@@ -465,6 +557,10 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support")
|
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg.smtp_host.is_some() && !cfg.smtp_from.contains('@') {
|
||||||
|
err!("SMTP_FROM does not contain a mandatory @ sign")
|
||||||
|
}
|
||||||
|
|
||||||
if cfg.smtp_username.is_some() != cfg.smtp_password.is_some() {
|
if cfg.smtp_username.is_some() != cfg.smtp_password.is_some() {
|
||||||
err!("Both `SMTP_USERNAME` and `SMTP_PASSWORD` need to be set to enable email authentication")
|
err!("Both `SMTP_USERNAME` and `SMTP_PASSWORD` need to be set to enable email authentication")
|
||||||
}
|
}
|
||||||
@@ -482,6 +578,15 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if the icon blacklist regex is valid
|
||||||
|
if let Some(ref r) = cfg.icon_blacklist_regex {
|
||||||
|
let validate_regex = Regex::new(&r);
|
||||||
|
match validate_regex {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {:#?}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -522,7 +627,12 @@ impl Config {
|
|||||||
validate_config(&config)?;
|
validate_config(&config)?;
|
||||||
|
|
||||||
Ok(Config {
|
Ok(Config {
|
||||||
inner: RwLock::new(Inner { templates: load_templates(&config.templates_folder), config, _env, _usr }),
|
inner: RwLock::new(Inner {
|
||||||
|
templates: load_templates(&config.templates_folder),
|
||||||
|
config,
|
||||||
|
_env,
|
||||||
|
_usr,
|
||||||
|
}),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -592,6 +702,19 @@ impl Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Tests whether the specified user is allowed to create an organization.
|
||||||
|
pub fn is_org_creation_allowed(&self, email: &str) -> bool {
|
||||||
|
let users = self.org_creation_users();
|
||||||
|
if users.is_empty() || users == "all" {
|
||||||
|
true
|
||||||
|
} else if users == "none" {
|
||||||
|
false
|
||||||
|
} else {
|
||||||
|
let email = email.to_lowercase();
|
||||||
|
users.split(',').any(|u| u.trim() == email)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn delete_user_config(&self) -> Result<(), Error> {
|
pub fn delete_user_config(&self) -> Result<(), Error> {
|
||||||
crate::util::delete_file(&CONFIG_FILE)?;
|
crate::util::delete_file(&CONFIG_FILE)?;
|
||||||
|
|
||||||
@@ -636,8 +759,10 @@ impl Config {
|
|||||||
let akey_s = data_encoding::BASE64.encode(&akey);
|
let akey_s = data_encoding::BASE64.encode(&akey);
|
||||||
|
|
||||||
// Save the new value
|
// Save the new value
|
||||||
let mut builder = ConfigBuilder::default();
|
let builder = ConfigBuilder {
|
||||||
builder._duo_akey = Some(akey_s.clone());
|
_duo_akey: Some(akey_s.clone()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
self.update_config_partial(builder).ok();
|
self.update_config_partial(builder).ok();
|
||||||
|
|
||||||
akey_s
|
akey_s
|
||||||
@@ -751,14 +876,20 @@ fn js_escape_helper<'reg, 'rc>(
|
|||||||
.param(0)
|
.param(0)
|
||||||
.ok_or_else(|| RenderError::new("Param not found for helper \"js_escape\""))?;
|
.ok_or_else(|| RenderError::new("Param not found for helper \"js_escape\""))?;
|
||||||
|
|
||||||
|
let no_quote = h
|
||||||
|
.param(1)
|
||||||
|
.is_some();
|
||||||
|
|
||||||
let value = param
|
let value = param
|
||||||
.value()
|
.value()
|
||||||
.as_str()
|
.as_str()
|
||||||
.ok_or_else(|| RenderError::new("Param for helper \"js_escape\" is not a String"))?;
|
.ok_or_else(|| RenderError::new("Param for helper \"js_escape\" is not a String"))?;
|
||||||
|
|
||||||
let escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27");
|
||||||
let quoted_value = format!(""{}"", escaped_value);
|
if ! no_quote {
|
||||||
|
escaped_value = format!(""{}"", escaped_value);
|
||||||
|
}
|
||||||
|
|
||||||
out.write("ed_value)?;
|
out.write(&escaped_value)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,17 +55,21 @@ pub fn get_random(mut array: Vec<u8>) -> Vec<u8> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_token(token_size: u32) -> Result<String, Error> {
|
pub fn generate_token(token_size: u32) -> Result<String, Error> {
|
||||||
|
// A u64 can represent all whole numbers up to 19 digits long.
|
||||||
if token_size > 19 {
|
if token_size > 19 {
|
||||||
err!("Generating token failed")
|
err!("Token size is limited to 19 digits")
|
||||||
}
|
}
|
||||||
|
|
||||||
// 8 bytes to create an u64 for up to 19 token digits
|
let low: u64 = 0;
|
||||||
let bytes = get_random(vec![0; 8]);
|
let high: u64 = 10u64.pow(token_size);
|
||||||
let mut bytes_array = [0u8; 8];
|
|
||||||
bytes_array.copy_from_slice(&bytes);
|
|
||||||
|
|
||||||
let number = u64::from_be_bytes(bytes_array) % 10u64.pow(token_size);
|
// Generate a random number in the range [low, high), then format it as a
|
||||||
|
// token of fixed width, left-padding with 0 as needed.
|
||||||
|
use rand::{thread_rng, Rng};
|
||||||
|
let mut rng = thread_rng();
|
||||||
|
let number: u64 = rng.gen_range(low..high);
|
||||||
let token = format!("{:0size$}", number, size = token_size as usize);
|
let token = format!("{:0size$}", number, size = token_size as usize);
|
||||||
|
|
||||||
Ok(token)
|
Ok(token)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
312
src/db/mod.rs
@@ -1,51 +1,207 @@
|
|||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use diesel::{r2d2, r2d2::ConnectionManager, Connection as DieselConnection, ConnectionError};
|
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection};
|
||||||
use rocket::{
|
use rocket::{
|
||||||
http::Status,
|
http::Status,
|
||||||
request::{FromRequest, Outcome},
|
request::{FromRequest, Outcome},
|
||||||
Request, State,
|
Request, State,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{error::Error, CONFIG};
|
use crate::{
|
||||||
|
error::{Error, MapResult},
|
||||||
|
CONFIG,
|
||||||
|
};
|
||||||
|
|
||||||
/// An alias to the database connection used
|
#[cfg(sqlite)]
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
type Connection = diesel::sqlite::SqliteConnection;
|
|
||||||
#[cfg(feature = "mysql")]
|
|
||||||
type Connection = diesel::mysql::MysqlConnection;
|
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
type Connection = diesel::pg::PgConnection;
|
|
||||||
|
|
||||||
/// An alias to the type for a pool of Diesel connections.
|
|
||||||
type Pool = r2d2::Pool<ConnectionManager<Connection>>;
|
|
||||||
|
|
||||||
/// Connection request guard type: a wrapper around an r2d2 pooled connection.
|
|
||||||
pub struct DbConn(pub r2d2::PooledConnection<ConnectionManager<Connection>>);
|
|
||||||
|
|
||||||
pub mod models;
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
#[path = "schemas/sqlite/schema.rs"]
|
#[path = "schemas/sqlite/schema.rs"]
|
||||||
pub mod schema;
|
pub mod __sqlite_schema;
|
||||||
#[cfg(feature = "mysql")]
|
|
||||||
|
#[cfg(mysql)]
|
||||||
#[path = "schemas/mysql/schema.rs"]
|
#[path = "schemas/mysql/schema.rs"]
|
||||||
pub mod schema;
|
pub mod __mysql_schema;
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
|
#[cfg(postgresql)]
|
||||||
#[path = "schemas/postgresql/schema.rs"]
|
#[path = "schemas/postgresql/schema.rs"]
|
||||||
pub mod schema;
|
pub mod __postgresql_schema;
|
||||||
|
|
||||||
/// Initializes a database pool.
|
|
||||||
pub fn init_pool() -> Pool {
|
|
||||||
let manager = ConnectionManager::new(CONFIG.database_url());
|
|
||||||
|
|
||||||
r2d2::Pool::builder().build(manager).expect("Failed to create pool")
|
// This is used to generate the main DbConn and DbPool enums, which contain one variant for each database supported
|
||||||
|
macro_rules! generate_connections {
|
||||||
|
( $( $name:ident: $ty:ty ),+ ) => {
|
||||||
|
#[allow(non_camel_case_types, dead_code)]
|
||||||
|
#[derive(Eq, PartialEq)]
|
||||||
|
pub enum DbConnType { $( $name, )+ }
|
||||||
|
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
pub enum DbConn { $( #[cfg($name)] $name(PooledConnection<ConnectionManager< $ty >>), )+ }
|
||||||
|
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
pub enum DbPool { $( #[cfg($name)] $name(Pool<ConnectionManager< $ty >>), )+ }
|
||||||
|
|
||||||
|
impl DbPool {
|
||||||
|
// For the given database URL, guess it's type, run migrations create pool and return it
|
||||||
|
pub fn from_config() -> Result<Self, Error> {
|
||||||
|
let url = CONFIG.database_url();
|
||||||
|
let conn_type = DbConnType::from_url(&url)?;
|
||||||
|
|
||||||
|
match conn_type { $(
|
||||||
|
DbConnType::$name => {
|
||||||
|
#[cfg($name)]
|
||||||
|
{
|
||||||
|
paste::paste!{ [< $name _migrations >]::run_migrations()?; }
|
||||||
|
let manager = ConnectionManager::new(&url);
|
||||||
|
let pool = Pool::builder()
|
||||||
|
.max_size(CONFIG.database_max_conns())
|
||||||
|
.build(manager)
|
||||||
|
.map_res("Failed to create pool")?;
|
||||||
|
return Ok(Self::$name(pool));
|
||||||
|
}
|
||||||
|
#[cfg(not($name))]
|
||||||
|
#[allow(unreachable_code)]
|
||||||
|
return unreachable!("Trying to use a DB backend when it's feature is disabled");
|
||||||
|
},
|
||||||
|
)+ }
|
||||||
|
}
|
||||||
|
// Get a connection from the pool
|
||||||
|
pub fn get(&self) -> Result<DbConn, Error> {
|
||||||
|
match self { $(
|
||||||
|
#[cfg($name)]
|
||||||
|
Self::$name(p) => Ok(DbConn::$name(p.get().map_res("Error retrieving connection from pool")?)),
|
||||||
|
)+ }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_connection() -> Result<Connection, ConnectionError> {
|
generate_connections! {
|
||||||
Connection::establish(&CONFIG.database_url())
|
sqlite: diesel::sqlite::SqliteConnection,
|
||||||
|
mysql: diesel::mysql::MysqlConnection,
|
||||||
|
postgresql: diesel::pg::PgConnection
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl DbConnType {
|
||||||
|
pub fn from_url(url: &str) -> Result<DbConnType, Error> {
|
||||||
|
// Mysql
|
||||||
|
if url.starts_with("mysql:") {
|
||||||
|
#[cfg(mysql)]
|
||||||
|
return Ok(DbConnType::mysql);
|
||||||
|
|
||||||
|
#[cfg(not(mysql))]
|
||||||
|
err!("`DATABASE_URL` is a MySQL URL, but the 'mysql' feature is not enabled")
|
||||||
|
|
||||||
|
// Postgres
|
||||||
|
} else if url.starts_with("postgresql:") || url.starts_with("postgres:") {
|
||||||
|
#[cfg(postgresql)]
|
||||||
|
return Ok(DbConnType::postgresql);
|
||||||
|
|
||||||
|
#[cfg(not(postgresql))]
|
||||||
|
err!("`DATABASE_URL` is a PostgreSQL URL, but the 'postgresql' feature is not enabled")
|
||||||
|
|
||||||
|
//Sqlite
|
||||||
|
} else {
|
||||||
|
#[cfg(sqlite)]
|
||||||
|
return Ok(DbConnType::sqlite);
|
||||||
|
|
||||||
|
#[cfg(not(sqlite))]
|
||||||
|
err!("`DATABASE_URL` looks like a SQLite URL, but 'sqlite' feature is not enabled")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! db_run {
|
||||||
|
// Same for all dbs
|
||||||
|
( $conn:ident: $body:block ) => {
|
||||||
|
db_run! { $conn: sqlite, mysql, postgresql $body }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Different code for each db
|
||||||
|
( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {
|
||||||
|
#[allow(unused)] use diesel::prelude::*;
|
||||||
|
match $conn {
|
||||||
|
$($(
|
||||||
|
#[cfg($db)]
|
||||||
|
crate::db::DbConn::$db(ref $conn) => {
|
||||||
|
paste::paste! {
|
||||||
|
#[allow(unused)] use crate::db::[<__ $db _schema>]::{self as schema, *};
|
||||||
|
#[allow(unused)] use [<__ $db _model>]::*;
|
||||||
|
#[allow(unused)] use crate::db::FromDb;
|
||||||
|
}
|
||||||
|
$body
|
||||||
|
},
|
||||||
|
)+)+
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub trait FromDb {
|
||||||
|
type Output;
|
||||||
|
#[allow(clippy::wrong_self_convention)]
|
||||||
|
fn from_db(self) -> Self::Output;
|
||||||
|
}
|
||||||
|
|
||||||
|
// For each struct eg. Cipher, we create a CipherDb inside a module named __$db_model (where $db is sqlite, mysql or postgresql),
|
||||||
|
// to implement the Diesel traits. We also provide methods to convert between them and the basic structs. Later, that module will be auto imported when using db_run!
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! db_object {
|
||||||
|
( $(
|
||||||
|
$( #[$attr:meta] )*
|
||||||
|
pub struct $name:ident {
|
||||||
|
$( $( #[$field_attr:meta] )* $vis:vis $field:ident : $typ:ty ),+
|
||||||
|
$(,)?
|
||||||
|
}
|
||||||
|
)+ ) => {
|
||||||
|
// Create the normal struct, without attributes
|
||||||
|
$( pub struct $name { $( /*$( #[$field_attr] )**/ $vis $field : $typ, )+ } )+
|
||||||
|
|
||||||
|
#[cfg(sqlite)]
|
||||||
|
pub mod __sqlite_model { $( db_object! { @db sqlite | $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ }
|
||||||
|
#[cfg(mysql)]
|
||||||
|
pub mod __mysql_model { $( db_object! { @db mysql | $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ }
|
||||||
|
#[cfg(postgresql)]
|
||||||
|
pub mod __postgresql_model { $( db_object! { @db postgresql | $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ }
|
||||||
|
};
|
||||||
|
|
||||||
|
( @db $db:ident | $( #[$attr:meta] )* | $name:ident | $( $( #[$field_attr:meta] )* $vis:vis $field:ident : $typ:ty),+) => {
|
||||||
|
paste::paste! {
|
||||||
|
#[allow(unused)] use super::*;
|
||||||
|
#[allow(unused)] use diesel::prelude::*;
|
||||||
|
#[allow(unused)] use crate::db::[<__ $db _schema>]::*;
|
||||||
|
|
||||||
|
$( #[$attr] )*
|
||||||
|
pub struct [<$name Db>] { $(
|
||||||
|
$( #[$field_attr] )* $vis $field : $typ,
|
||||||
|
)+ }
|
||||||
|
|
||||||
|
impl [<$name Db>] {
|
||||||
|
#[allow(clippy::wrong_self_convention)]
|
||||||
|
#[inline(always)] pub fn to_db(x: &super::$name) -> Self { Self { $( $field: x.$field.clone(), )+ } }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl crate::db::FromDb for [<$name Db>] {
|
||||||
|
type Output = super::$name;
|
||||||
|
#[inline(always)] fn from_db(self) -> Self::Output { super::$name { $( $field: self.$field, )+ } }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl crate::db::FromDb for Vec<[<$name Db>]> {
|
||||||
|
type Output = Vec<super::$name>;
|
||||||
|
#[inline(always)] fn from_db(self) -> Self::Output { self.into_iter().map(crate::db::FromDb::from_db).collect() }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl crate::db::FromDb for Option<[<$name Db>]> {
|
||||||
|
type Output = Option<super::$name>;
|
||||||
|
#[inline(always)] fn from_db(self) -> Self::Output { self.map(crate::db::FromDb::from_db) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reexport the models, needs to be after the macros are defined so it can access them
|
||||||
|
pub mod models;
|
||||||
|
|
||||||
/// Creates a back-up of the database using sqlite3
|
/// Creates a back-up of the database using sqlite3
|
||||||
pub fn backup_database() -> Result<(), Error> {
|
pub fn backup_database() -> Result<(), Error> {
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
@@ -73,18 +229,102 @@ impl<'a, 'r> FromRequest<'a, 'r> for DbConn {
|
|||||||
|
|
||||||
fn from_request(request: &'a Request<'r>) -> Outcome<DbConn, ()> {
|
fn from_request(request: &'a Request<'r>) -> Outcome<DbConn, ()> {
|
||||||
// https://github.com/SergioBenitez/Rocket/commit/e3c1a4ad3ab9b840482ec6de4200d30df43e357c
|
// https://github.com/SergioBenitez/Rocket/commit/e3c1a4ad3ab9b840482ec6de4200d30df43e357c
|
||||||
let pool = try_outcome!(request.guard::<State<Pool>>());
|
let pool = try_outcome!(request.guard::<State<DbPool>>());
|
||||||
match pool.get() {
|
match pool.get() {
|
||||||
Ok(conn) => Outcome::Success(DbConn(conn)),
|
Ok(conn) => Outcome::Success(conn),
|
||||||
Err(_) => Outcome::Failure((Status::ServiceUnavailable, ())),
|
Err(_) => Outcome::Failure((Status::ServiceUnavailable, ())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// For the convenience of using an &DbConn as a &Database.
|
// Embed the migrations from the migrations folder into the application
|
||||||
impl std::ops::Deref for DbConn {
|
// This way, the program automatically migrates the database to the latest version
|
||||||
type Target = Connection;
|
// https://docs.rs/diesel_migrations/*/diesel_migrations/macro.embed_migrations.html
|
||||||
fn deref(&self) -> &Self::Target {
|
#[cfg(sqlite)]
|
||||||
&self.0
|
mod sqlite_migrations {
|
||||||
|
#[allow(unused_imports)]
|
||||||
|
embed_migrations!("migrations/sqlite");
|
||||||
|
|
||||||
|
pub fn run_migrations() -> Result<(), super::Error> {
|
||||||
|
// Make sure the directory exists
|
||||||
|
let url = crate::CONFIG.database_url();
|
||||||
|
let path = std::path::Path::new(&url);
|
||||||
|
|
||||||
|
if let Some(parent) = path.parent() {
|
||||||
|
if std::fs::create_dir_all(parent).is_err() {
|
||||||
|
error!("Error creating database directory");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
use diesel::{Connection, RunQueryDsl};
|
||||||
|
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||||
|
let connection =
|
||||||
|
diesel::sqlite::SqliteConnection::establish(&crate::CONFIG.database_url())?;
|
||||||
|
// Disable Foreign Key Checks during migration
|
||||||
|
|
||||||
|
// Scoped to a connection.
|
||||||
|
diesel::sql_query("PRAGMA foreign_keys = OFF")
|
||||||
|
.execute(&connection)
|
||||||
|
.expect("Failed to disable Foreign Key Checks during migrations");
|
||||||
|
|
||||||
|
// Turn on WAL in SQLite
|
||||||
|
if crate::CONFIG.enable_db_wal() {
|
||||||
|
diesel::sql_query("PRAGMA journal_mode=wal")
|
||||||
|
.execute(&connection)
|
||||||
|
.expect("Failed to turn on WAL");
|
||||||
|
}
|
||||||
|
|
||||||
|
embedded_migrations::run_with_output(&connection, &mut std::io::stdout())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(mysql)]
|
||||||
|
mod mysql_migrations {
|
||||||
|
#[allow(unused_imports)]
|
||||||
|
embed_migrations!("migrations/mysql");
|
||||||
|
|
||||||
|
pub fn run_migrations() -> Result<(), super::Error> {
|
||||||
|
use diesel::{Connection, RunQueryDsl};
|
||||||
|
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||||
|
let connection =
|
||||||
|
diesel::mysql::MysqlConnection::establish(&crate::CONFIG.database_url())?;
|
||||||
|
// Disable Foreign Key Checks during migration
|
||||||
|
|
||||||
|
// Scoped to a connection/session.
|
||||||
|
diesel::sql_query("SET FOREIGN_KEY_CHECKS = 0")
|
||||||
|
.execute(&connection)
|
||||||
|
.expect("Failed to disable Foreign Key Checks during migrations");
|
||||||
|
|
||||||
|
embedded_migrations::run_with_output(&connection, &mut std::io::stdout())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(postgresql)]
|
||||||
|
mod postgresql_migrations {
|
||||||
|
#[allow(unused_imports)]
|
||||||
|
embed_migrations!("migrations/postgresql");
|
||||||
|
|
||||||
|
pub fn run_migrations() -> Result<(), super::Error> {
|
||||||
|
use diesel::{Connection, RunQueryDsl};
|
||||||
|
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
||||||
|
let connection =
|
||||||
|
diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?;
|
||||||
|
// Disable Foreign Key Checks during migration
|
||||||
|
|
||||||
|
// FIXME: Per https://www.postgresql.org/docs/12/sql-set-constraints.html,
|
||||||
|
// "SET CONSTRAINTS sets the behavior of constraint checking within the
|
||||||
|
// current transaction", so this setting probably won't take effect for
|
||||||
|
// any of the migrations since it's being run outside of a transaction.
|
||||||
|
// Migrations that need to disable foreign key checks should run this
|
||||||
|
// from within the migration script itself.
|
||||||
|
diesel::sql_query("SET CONSTRAINTS ALL DEFERRED")
|
||||||
|
.execute(&connection)
|
||||||
|
.expect("Failed to disable Foreign Key Checks during migrations");
|
||||||
|
|
||||||
|
embedded_migrations::run_with_output(&connection, &mut std::io::stdout())?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,17 +3,19 @@ use serde_json::Value;
|
|||||||
use super::Cipher;
|
use super::Cipher;
|
||||||
use crate::CONFIG;
|
use crate::CONFIG;
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
db_object! {
|
||||||
#[table_name = "attachments"]
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||||
#[changeset_options(treat_none_as_null="true")]
|
#[table_name = "attachments"]
|
||||||
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
#[changeset_options(treat_none_as_null="true")]
|
||||||
#[primary_key(id)]
|
#[belongs_to(super::Cipher, foreign_key = "cipher_uuid")]
|
||||||
pub struct Attachment {
|
#[primary_key(id)]
|
||||||
pub id: String,
|
pub struct Attachment {
|
||||||
pub cipher_uuid: String,
|
pub id: String,
|
||||||
pub file_name: String,
|
pub cipher_uuid: String,
|
||||||
pub file_size: i32,
|
pub file_name: String,
|
||||||
pub akey: Option<String>,
|
pub file_size: i32,
|
||||||
|
pub akey: Option<String>,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
@@ -50,43 +52,57 @@ impl Attachment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::schema::{attachments, ciphers};
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
use diesel::prelude::*;
|
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl Attachment {
|
impl Attachment {
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
|
||||||
diesel::insert_into(attachments::table)
|
|
||||||
.values(self)
|
|
||||||
.on_conflict(attachments::id)
|
|
||||||
.do_update()
|
|
||||||
.set(self)
|
|
||||||
.execute(&**conn)
|
|
||||||
.map_res("Error saving attachment")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "postgresql"))]
|
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::replace_into(attachments::table)
|
db_run! { conn:
|
||||||
.values(self)
|
sqlite, mysql {
|
||||||
.execute(&**conn)
|
match diesel::replace_into(attachments::table)
|
||||||
.map_res("Error saving attachment")
|
.values(AttachmentDb::to_db(self))
|
||||||
|
.execute(conn)
|
||||||
|
{
|
||||||
|
Ok(_) => Ok(()),
|
||||||
|
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||||
|
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||||
|
diesel::update(attachments::table)
|
||||||
|
.filter(attachments::id.eq(&self.id))
|
||||||
|
.set(AttachmentDb::to_db(self))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving attachment")
|
||||||
|
}
|
||||||
|
Err(e) => Err(e.into()),
|
||||||
|
}.map_res("Error saving attachment")
|
||||||
|
}
|
||||||
|
postgresql {
|
||||||
|
let value = AttachmentDb::to_db(self);
|
||||||
|
diesel::insert_into(attachments::table)
|
||||||
|
.values(&value)
|
||||||
|
.on_conflict(attachments::id)
|
||||||
|
.do_update()
|
||||||
|
.set(&value)
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving attachment")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
crate::util::retry(
|
db_run! { conn: {
|
||||||
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(&**conn),
|
crate::util::retry(
|
||||||
10,
|
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
|
||||||
)
|
10,
|
||||||
.map_res("Error deleting attachment")?;
|
)
|
||||||
|
.map_res("Error deleting attachment")?;
|
||||||
|
|
||||||
crate::util::delete_file(&self.get_file_path())?;
|
crate::util::delete_file(&self.get_file_path())?;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
@@ -97,67 +113,78 @@ impl Attachment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_id(id: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_id(id: &str, conn: &DbConn) -> Option<Self> {
|
||||||
let id = id.to_lowercase();
|
db_run! { conn: {
|
||||||
|
attachments::table
|
||||||
attachments::table
|
.filter(attachments::id.eq(id.to_lowercase()))
|
||||||
.filter(attachments::id.eq(id))
|
.first::<AttachmentDb>(conn)
|
||||||
.first::<Self>(&**conn)
|
.ok()
|
||||||
.ok()
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_cipher(cipher_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_cipher(cipher_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
attachments::table
|
db_run! { conn: {
|
||||||
.filter(attachments::cipher_uuid.eq(cipher_uuid))
|
attachments::table
|
||||||
.load::<Self>(&**conn)
|
.filter(attachments::cipher_uuid.eq(cipher_uuid))
|
||||||
.expect("Error loading attachments")
|
.load::<AttachmentDb>(conn)
|
||||||
|
.expect("Error loading attachments")
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_ciphers(cipher_uuids: Vec<String>, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_ciphers(cipher_uuids: Vec<String>, conn: &DbConn) -> Vec<Self> {
|
||||||
attachments::table
|
db_run! { conn: {
|
||||||
.filter(attachments::cipher_uuid.eq_any(cipher_uuids))
|
attachments::table
|
||||||
.load::<Self>(&**conn)
|
.filter(attachments::cipher_uuid.eq_any(cipher_uuids))
|
||||||
.expect("Error loading attachments")
|
.load::<AttachmentDb>(conn)
|
||||||
|
.expect("Error loading attachments")
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn size_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
pub fn size_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||||
let result: Option<i64> = attachments::table
|
db_run! { conn: {
|
||||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
let result: Option<i64> = attachments::table
|
||||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||||
.select(diesel::dsl::sum(attachments::file_size))
|
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||||
.first(&**conn)
|
.select(diesel::dsl::sum(attachments::file_size))
|
||||||
.expect("Error loading user attachment total size");
|
.first(conn)
|
||||||
|
.expect("Error loading user attachment total size");
|
||||||
result.unwrap_or(0)
|
result.unwrap_or(0)
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn count_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
pub fn count_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||||
attachments::table
|
db_run! { conn: {
|
||||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
attachments::table
|
||||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||||
.count()
|
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||||
.first::<i64>(&**conn)
|
.count()
|
||||||
.ok()
|
.first(conn)
|
||||||
.unwrap_or(0)
|
.unwrap_or(0)
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn size_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
pub fn size_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||||
let result: Option<i64> = attachments::table
|
db_run! { conn: {
|
||||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
let result: Option<i64> = attachments::table
|
||||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||||
.select(diesel::dsl::sum(attachments::file_size))
|
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||||
.first(&**conn)
|
.select(diesel::dsl::sum(attachments::file_size))
|
||||||
.expect("Error loading user attachment total size");
|
.first(conn)
|
||||||
|
.expect("Error loading user attachment total size");
|
||||||
result.unwrap_or(0)
|
result.unwrap_or(0)
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||||
attachments::table
|
db_run! { conn: {
|
||||||
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
attachments::table
|
||||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
|
||||||
.count()
|
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||||
.first(&**conn)
|
.count()
|
||||||
.ok()
|
.first(conn)
|
||||||
.unwrap_or(0)
|
.unwrap_or(0)
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,39 +2,48 @@ use chrono::{NaiveDateTime, Utc};
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
Attachment, CollectionCipher, FolderCipher, Organization, User, UserOrgStatus, UserOrgType, UserOrganization,
|
Attachment,
|
||||||
|
CollectionCipher,
|
||||||
|
Favorite,
|
||||||
|
FolderCipher,
|
||||||
|
Organization,
|
||||||
|
User,
|
||||||
|
UserOrgStatus,
|
||||||
|
UserOrgType,
|
||||||
|
UserOrganization,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
db_object! {
|
||||||
#[table_name = "ciphers"]
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||||
#[changeset_options(treat_none_as_null="true")]
|
#[table_name = "ciphers"]
|
||||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
#[changeset_options(treat_none_as_null="true")]
|
||||||
#[belongs_to(Organization, foreign_key = "organization_uuid")]
|
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||||
#[primary_key(uuid)]
|
#[belongs_to(Organization, foreign_key = "organization_uuid")]
|
||||||
pub struct Cipher {
|
#[primary_key(uuid)]
|
||||||
pub uuid: String,
|
pub struct Cipher {
|
||||||
pub created_at: NaiveDateTime,
|
pub uuid: String,
|
||||||
pub updated_at: NaiveDateTime,
|
pub created_at: NaiveDateTime,
|
||||||
|
pub updated_at: NaiveDateTime,
|
||||||
|
|
||||||
pub user_uuid: Option<String>,
|
pub user_uuid: Option<String>,
|
||||||
pub organization_uuid: Option<String>,
|
pub organization_uuid: Option<String>,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Login = 1,
|
Login = 1,
|
||||||
SecureNote = 2,
|
SecureNote = 2,
|
||||||
Card = 3,
|
Card = 3,
|
||||||
Identity = 4
|
Identity = 4
|
||||||
*/
|
*/
|
||||||
pub atype: i32,
|
pub atype: i32,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub notes: Option<String>,
|
pub notes: Option<String>,
|
||||||
pub fields: Option<String>,
|
pub fields: Option<String>,
|
||||||
|
|
||||||
pub data: String,
|
pub data: String,
|
||||||
|
|
||||||
pub favorite: bool,
|
pub password_history: Option<String>,
|
||||||
pub password_history: Option<String>,
|
pub deleted_at: Option<NaiveDateTime>,
|
||||||
pub deleted_at: Option<NaiveDateTime>,
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
@@ -51,7 +60,6 @@ impl Cipher {
|
|||||||
organization_uuid: None,
|
organization_uuid: None,
|
||||||
|
|
||||||
atype,
|
atype,
|
||||||
favorite: false,
|
|
||||||
name,
|
name,
|
||||||
|
|
||||||
notes: None,
|
notes: None,
|
||||||
@@ -64,9 +72,7 @@ impl Cipher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::schema::*;
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
use diesel::prelude::*;
|
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
@@ -77,13 +83,18 @@ impl Cipher {
|
|||||||
use crate::util::format_date;
|
use crate::util::format_date;
|
||||||
|
|
||||||
let attachments = Attachment::find_by_cipher(&self.uuid, conn);
|
let attachments = Attachment::find_by_cipher(&self.uuid, conn);
|
||||||
let attachments_json: Vec<Value> = attachments.iter().map(|c| c.to_json(host)).collect();
|
// When there are no attachments use null instead of an empty array
|
||||||
|
let attachments_json = if attachments.is_empty() {
|
||||||
|
Value::Null
|
||||||
|
} else {
|
||||||
|
attachments.iter().map(|c| c.to_json(host)).collect()
|
||||||
|
};
|
||||||
|
|
||||||
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||||
let password_history_json = self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
let password_history_json = self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
|
||||||
|
|
||||||
let (read_only, hide_passwords) =
|
let (read_only, hide_passwords) =
|
||||||
match self.get_access_restrictions(&user_uuid, &conn) {
|
match self.get_access_restrictions(&user_uuid, conn) {
|
||||||
Some((ro, hp)) => (ro, hp),
|
Some((ro, hp)) => (ro, hp),
|
||||||
None => {
|
None => {
|
||||||
error!("Cipher ownership assertion failure");
|
error!("Cipher ownership assertion failure");
|
||||||
@@ -91,28 +102,31 @@ impl Cipher {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get the data or a default empty value to avoid issues with the mobile apps
|
// Get the type_data or a default to an empty json object '{}'.
|
||||||
let mut data_json: Value = serde_json::from_str(&self.data).unwrap_or_else(|_| json!({
|
// If not passing an empty object, mobile clients will crash.
|
||||||
"Fields":null,
|
let mut type_data_json: Value = serde_json::from_str(&self.data).unwrap_or(json!({}));
|
||||||
"Name": self.name,
|
|
||||||
"Notes":null,
|
|
||||||
"Password":null,
|
|
||||||
"PasswordHistory":null,
|
|
||||||
"PasswordRevisionDate":null,
|
|
||||||
"Response":null,
|
|
||||||
"Totp":null,
|
|
||||||
"Uris":null,
|
|
||||||
"Username":null
|
|
||||||
}));
|
|
||||||
|
|
||||||
// TODO: ******* Backwards compat start **********
|
// NOTE: This was marked as *Backwards Compatibilty Code*, but as of January 2021 this is still being used by upstream
|
||||||
// To remove backwards compatibility, just remove this entire section
|
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
|
||||||
// and remove the compat code from ciphers::update_cipher_from_data
|
if self.atype == 1 {
|
||||||
if self.atype == 1 && data_json["Uris"].is_array() {
|
if type_data_json["Uris"].is_array() {
|
||||||
let uri = data_json["Uris"][0]["Uri"].clone();
|
let uri = type_data_json["Uris"][0]["Uri"].clone();
|
||||||
data_json["Uri"] = uri;
|
type_data_json["Uri"] = uri;
|
||||||
|
} else {
|
||||||
|
// Upstream always has an Uri key/value
|
||||||
|
type_data_json["Uri"] = Value::Null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// TODO: ******* Backwards compat end **********
|
|
||||||
|
// Clone the type_data and add some default value.
|
||||||
|
let mut data_json = type_data_json.clone();
|
||||||
|
|
||||||
|
// NOTE: This was marked as *Backwards Compatibilty Code*, but as of January 2021 this is still being used by upstream
|
||||||
|
// data_json should always contain the following keys with every atype
|
||||||
|
data_json["Fields"] = json!(fields_json);
|
||||||
|
data_json["Name"] = json!(self.name);
|
||||||
|
data_json["Notes"] = json!(self.notes);
|
||||||
|
data_json["PasswordHistory"] = json!(password_history_json);
|
||||||
|
|
||||||
// There are three types of cipher response models in upstream
|
// There are three types of cipher response models in upstream
|
||||||
// Bitwarden: "cipherMini", "cipher", and "cipherDetails" (in order
|
// Bitwarden: "cipherMini", "cipher", and "cipherDetails" (in order
|
||||||
@@ -127,14 +141,16 @@ impl Cipher {
|
|||||||
"Type": self.atype,
|
"Type": self.atype,
|
||||||
"RevisionDate": format_date(&self.updated_at),
|
"RevisionDate": format_date(&self.updated_at),
|
||||||
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
|
||||||
"FolderId": self.get_folder_uuid(&user_uuid, &conn),
|
"FolderId": self.get_folder_uuid(&user_uuid, conn),
|
||||||
"Favorite": self.favorite,
|
"Favorite": self.is_favorite(&user_uuid, conn),
|
||||||
"OrganizationId": self.organization_uuid,
|
"OrganizationId": self.organization_uuid,
|
||||||
"Attachments": attachments_json,
|
"Attachments": attachments_json,
|
||||||
|
// We have UseTotp set to true by default within the Organization model.
|
||||||
|
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
|
||||||
"OrganizationUseTotp": true,
|
"OrganizationUseTotp": true,
|
||||||
|
|
||||||
// This field is specific to the cipherDetails type.
|
// This field is specific to the cipherDetails type.
|
||||||
"CollectionIds": self.get_collections(user_uuid, &conn),
|
"CollectionIds": self.get_collections(user_uuid, conn),
|
||||||
|
|
||||||
"Name": self.name,
|
"Name": self.name,
|
||||||
"Notes": self.notes,
|
"Notes": self.notes,
|
||||||
@@ -149,6 +165,12 @@ impl Cipher {
|
|||||||
"ViewPassword": !hide_passwords,
|
"ViewPassword": !hide_passwords,
|
||||||
|
|
||||||
"PasswordHistory": password_history_json,
|
"PasswordHistory": password_history_json,
|
||||||
|
|
||||||
|
// All Cipher types are included by default as null, but only the matching one will be populated
|
||||||
|
"Login": null,
|
||||||
|
"SecureNote": null,
|
||||||
|
"Card": null,
|
||||||
|
"Identity": null,
|
||||||
});
|
});
|
||||||
|
|
||||||
let key = match self.atype {
|
let key = match self.atype {
|
||||||
@@ -159,7 +181,7 @@ impl Cipher {
|
|||||||
_ => panic!("Wrong type"),
|
_ => panic!("Wrong type"),
|
||||||
};
|
};
|
||||||
|
|
||||||
json_object[key] = data_json;
|
json_object[key] = type_data_json;
|
||||||
json_object
|
json_object
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,41 +207,54 @@ impl Cipher {
|
|||||||
user_uuids
|
user_uuids
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||||
self.update_users_revision(conn);
|
self.update_users_revision(conn);
|
||||||
self.updated_at = Utc::now().naive_utc();
|
self.updated_at = Utc::now().naive_utc();
|
||||||
|
|
||||||
diesel::insert_into(ciphers::table)
|
db_run! { conn:
|
||||||
.values(&*self)
|
sqlite, mysql {
|
||||||
.on_conflict(ciphers::uuid)
|
match diesel::replace_into(ciphers::table)
|
||||||
.do_update()
|
.values(CipherDb::to_db(self))
|
||||||
.set(&*self)
|
.execute(conn)
|
||||||
.execute(&**conn)
|
{
|
||||||
.map_res("Error saving cipher")
|
Ok(_) => Ok(()),
|
||||||
}
|
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||||
|
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||||
#[cfg(not(feature = "postgresql"))]
|
diesel::update(ciphers::table)
|
||||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
.filter(ciphers::uuid.eq(&self.uuid))
|
||||||
self.update_users_revision(conn);
|
.set(CipherDb::to_db(self))
|
||||||
self.updated_at = Utc::now().naive_utc();
|
.execute(conn)
|
||||||
|
.map_res("Error saving cipher")
|
||||||
diesel::replace_into(ciphers::table)
|
}
|
||||||
.values(&*self)
|
Err(e) => Err(e.into()),
|
||||||
.execute(&**conn)
|
}.map_res("Error saving cipher")
|
||||||
.map_res("Error saving cipher")
|
}
|
||||||
|
postgresql {
|
||||||
|
let value = CipherDb::to_db(self);
|
||||||
|
diesel::insert_into(ciphers::table)
|
||||||
|
.values(&value)
|
||||||
|
.on_conflict(ciphers::uuid)
|
||||||
|
.do_update()
|
||||||
|
.set(&value)
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving cipher")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||||
self.update_users_revision(conn);
|
self.update_users_revision(conn);
|
||||||
|
|
||||||
FolderCipher::delete_all_by_cipher(&self.uuid, &conn)?;
|
FolderCipher::delete_all_by_cipher(&self.uuid, conn)?;
|
||||||
CollectionCipher::delete_all_by_cipher(&self.uuid, &conn)?;
|
CollectionCipher::delete_all_by_cipher(&self.uuid, conn)?;
|
||||||
Attachment::delete_all_by_cipher(&self.uuid, &conn)?;
|
Attachment::delete_all_by_cipher(&self.uuid, conn)?;
|
||||||
|
Favorite::delete_all_by_cipher(&self.uuid, conn)?;
|
||||||
|
|
||||||
diesel::delete(ciphers::table.filter(ciphers::uuid.eq(&self.uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(ciphers::table.filter(ciphers::uuid.eq(&self.uuid)))
|
||||||
.map_res("Error deleting cipher")
|
.execute(conn)
|
||||||
|
.map_res("Error deleting cipher")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
@@ -237,28 +272,28 @@ impl Cipher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(user_uuid, &conn);
|
User::update_uuid_revision(user_uuid, conn);
|
||||||
|
|
||||||
match (self.get_folder_uuid(&user_uuid, &conn), folder_uuid) {
|
match (self.get_folder_uuid(&user_uuid, conn), folder_uuid) {
|
||||||
// No changes
|
// No changes
|
||||||
(None, None) => Ok(()),
|
(None, None) => Ok(()),
|
||||||
(Some(ref old), Some(ref new)) if old == new => Ok(()),
|
(Some(ref old), Some(ref new)) if old == new => Ok(()),
|
||||||
|
|
||||||
// Add to folder
|
// Add to folder
|
||||||
(None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(&conn),
|
(None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn),
|
||||||
|
|
||||||
// Remove from folder
|
// Remove from folder
|
||||||
(Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, &conn) {
|
(Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn) {
|
||||||
Some(old) => old.delete(&conn),
|
Some(old) => old.delete(conn),
|
||||||
None => err!("Couldn't move from previous folder"),
|
None => err!("Couldn't move from previous folder"),
|
||||||
},
|
},
|
||||||
|
|
||||||
// Move to another folder
|
// Move to another folder
|
||||||
(Some(old), Some(new)) => {
|
(Some(old), Some(new)) => {
|
||||||
if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, &conn) {
|
if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn) {
|
||||||
old.delete(&conn)?;
|
old.delete(conn)?;
|
||||||
}
|
}
|
||||||
FolderCipher::new(&new, &self.uuid).save(&conn)
|
FolderCipher::new(&new, &self.uuid).save(conn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -271,7 +306,7 @@ impl Cipher {
|
|||||||
/// Returns whether this cipher is owned by an org in which the user has full access.
|
/// Returns whether this cipher is owned by an org in which the user has full access.
|
||||||
pub fn is_in_full_access_org(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
pub fn is_in_full_access_org(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||||
if let Some(ref org_uuid) = self.organization_uuid {
|
if let Some(ref org_uuid) = self.organization_uuid {
|
||||||
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user_uuid, &org_uuid, &conn) {
|
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user_uuid, &org_uuid, conn) {
|
||||||
return user_org.has_full_access();
|
return user_org.has_full_access();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -292,38 +327,40 @@ impl Cipher {
|
|||||||
return Some((false, false));
|
return Some((false, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check whether this cipher is in any collections accessible to the
|
db_run! {conn: {
|
||||||
// user. If so, retrieve the access flags for each collection.
|
// Check whether this cipher is in any collections accessible to the
|
||||||
let query = ciphers::table
|
// user. If so, retrieve the access flags for each collection.
|
||||||
.filter(ciphers::uuid.eq(&self.uuid))
|
let query = ciphers::table
|
||||||
.inner_join(ciphers_collections::table.on(
|
.filter(ciphers::uuid.eq(&self.uuid))
|
||||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)))
|
.inner_join(ciphers_collections::table.on(
|
||||||
.inner_join(users_collections::table.on(
|
ciphers::uuid.eq(ciphers_collections::cipher_uuid)))
|
||||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
.inner_join(users_collections::table.on(
|
||||||
.and(users_collections::user_uuid.eq(user_uuid))))
|
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||||
.select((users_collections::read_only, users_collections::hide_passwords));
|
.and(users_collections::user_uuid.eq(user_uuid))))
|
||||||
|
.select((users_collections::read_only, users_collections::hide_passwords));
|
||||||
|
|
||||||
// There's an edge case where a cipher can be in multiple collections
|
// There's an edge case where a cipher can be in multiple collections
|
||||||
// with inconsistent access flags. For example, a cipher could be in
|
// with inconsistent access flags. For example, a cipher could be in
|
||||||
// one collection where the user has read-only access, but also in
|
// one collection where the user has read-only access, but also in
|
||||||
// another collection where the user has read/write access. To handle
|
// another collection where the user has read/write access. To handle
|
||||||
// this, we do a boolean OR of all values in each of the `read_only`
|
// this, we do a boolean OR of all values in each of the `read_only`
|
||||||
// and `hide_passwords` columns. This could ideally be done as part
|
// and `hide_passwords` columns. This could ideally be done as part
|
||||||
// of the query, but Diesel doesn't support a max() or bool_or()
|
// of the query, but Diesel doesn't support a max() or bool_or()
|
||||||
// function on booleans and this behavior isn't portable anyway.
|
// function on booleans and this behavior isn't portable anyway.
|
||||||
if let Some(vec) = query.load::<(bool, bool)>(&**conn).ok() {
|
if let Ok(vec) = query.load::<(bool, bool)>(conn) {
|
||||||
let mut read_only = false;
|
let mut read_only = false;
|
||||||
let mut hide_passwords = false;
|
let mut hide_passwords = false;
|
||||||
for (ro, hp) in vec.iter() {
|
for (ro, hp) in vec.iter() {
|
||||||
read_only |= ro;
|
read_only |= ro;
|
||||||
hide_passwords |= hp;
|
hide_passwords |= hp;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some((read_only, hide_passwords))
|
||||||
|
} else {
|
||||||
|
// This cipher isn't in any collections accessible to the user.
|
||||||
|
None
|
||||||
}
|
}
|
||||||
|
}}
|
||||||
Some((read_only, hide_passwords))
|
|
||||||
} else {
|
|
||||||
// This cipher isn't in any collections accessible to the user.
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
pub fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||||
@@ -337,113 +374,167 @@ impl Cipher {
|
|||||||
self.get_access_restrictions(&user_uuid, &conn).is_some()
|
self.get_access_restrictions(&user_uuid, &conn).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns whether this cipher is a favorite of the specified user.
|
||||||
|
pub fn is_favorite(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||||
|
Favorite::is_favorite(&self.uuid, user_uuid, conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets whether this cipher is a favorite of the specified user.
|
||||||
|
pub fn set_favorite(&self, favorite: Option<bool>, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
|
match favorite {
|
||||||
|
None => Ok(()), // No change requested.
|
||||||
|
Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_folder_uuid(&self, user_uuid: &str, conn: &DbConn) -> Option<String> {
|
pub fn get_folder_uuid(&self, user_uuid: &str, conn: &DbConn) -> Option<String> {
|
||||||
folders_ciphers::table
|
db_run! {conn: {
|
||||||
.inner_join(folders::table)
|
folders_ciphers::table
|
||||||
.filter(folders::user_uuid.eq(&user_uuid))
|
.inner_join(folders::table)
|
||||||
.filter(folders_ciphers::cipher_uuid.eq(&self.uuid))
|
.filter(folders::user_uuid.eq(&user_uuid))
|
||||||
.select(folders_ciphers::folder_uuid)
|
.filter(folders_ciphers::cipher_uuid.eq(&self.uuid))
|
||||||
.first::<String>(&**conn)
|
.select(folders_ciphers::folder_uuid)
|
||||||
.ok()
|
.first::<String>(conn)
|
||||||
|
.ok()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
ciphers::table
|
db_run! {conn: {
|
||||||
.filter(ciphers::uuid.eq(uuid))
|
ciphers::table
|
||||||
.first::<Self>(&**conn)
|
.filter(ciphers::uuid.eq(uuid))
|
||||||
.ok()
|
.first::<CipherDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find all ciphers accessible to user
|
// Find all ciphers accessible or visible to the specified user.
|
||||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
//
|
||||||
ciphers::table
|
// "Accessible" means the user has read access to the cipher, either via
|
||||||
.left_join(users_organizations::table.on(
|
// direct ownership or via collection access.
|
||||||
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable()).and(
|
//
|
||||||
users_organizations::user_uuid.eq(user_uuid).and(
|
// "Visible" usually means the same as accessible, except when an org
|
||||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
// owner/admin sets their account to have access to only selected
|
||||||
)
|
// collections in the org (presumably because they aren't interested in
|
||||||
)
|
// the other collections in the org). In this case, if `visible_only` is
|
||||||
))
|
// true, then the non-interesting ciphers will not be returned. As a
|
||||||
.left_join(ciphers_collections::table.on(
|
// result, those ciphers will not appear in "My Vault" for the org
|
||||||
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
|
// owner/admin, but they can still be accessed via the org vault view.
|
||||||
))
|
pub fn find_by_user(user_uuid: &str, visible_only: bool, conn: &DbConn) -> Vec<Self> {
|
||||||
.left_join(users_collections::table.on(
|
db_run! {conn: {
|
||||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
let mut query = ciphers::table
|
||||||
))
|
.left_join(ciphers_collections::table.on(
|
||||||
.filter(ciphers::user_uuid.eq(user_uuid).or( // Cipher owner
|
ciphers::uuid.eq(ciphers_collections::cipher_uuid)
|
||||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
))
|
||||||
users_organizations::atype.le(UserOrgType::Admin as i32).or( // Org admin or owner
|
.left_join(users_organizations::table.on(
|
||||||
users_collections::user_uuid.eq(user_uuid).and( // Access to Collection
|
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
|
||||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
.and(users_organizations::user_uuid.eq(user_uuid))
|
||||||
)
|
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||||
)
|
))
|
||||||
)
|
.left_join(users_collections::table.on(
|
||||||
))
|
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
|
||||||
.select(ciphers::all_columns)
|
// Ensure that users_collections::user_uuid is NULL for unconfirmed users.
|
||||||
.distinct()
|
.and(users_organizations::user_uuid.eq(users_collections::user_uuid))
|
||||||
.load::<Self>(&**conn).expect("Error loading ciphers")
|
))
|
||||||
|
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
|
||||||
|
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
|
||||||
|
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
|
||||||
|
.into_boxed();
|
||||||
|
|
||||||
|
if !visible_only {
|
||||||
|
query = query.or_filter(
|
||||||
|
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
query
|
||||||
|
.select(ciphers::all_columns)
|
||||||
|
.distinct()
|
||||||
|
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find all ciphers directly owned by user
|
// Find all ciphers visible to the specified user.
|
||||||
|
pub fn find_by_user_visible(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
|
Self::find_by_user(user_uuid, true, conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find all ciphers directly owned by the specified user.
|
||||||
pub fn find_owned_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_owned_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
ciphers::table
|
db_run! {conn: {
|
||||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
ciphers::table
|
||||||
.load::<Self>(&**conn).expect("Error loading ciphers")
|
.filter(
|
||||||
|
ciphers::user_uuid.eq(user_uuid)
|
||||||
|
.and(ciphers::organization_uuid.is_null())
|
||||||
|
)
|
||||||
|
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn count_owned_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
pub fn count_owned_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
|
||||||
ciphers::table
|
db_run! {conn: {
|
||||||
.filter(ciphers::user_uuid.eq(user_uuid))
|
ciphers::table
|
||||||
.count()
|
.filter(ciphers::user_uuid.eq(user_uuid))
|
||||||
.first::<i64>(&**conn)
|
.count()
|
||||||
.ok()
|
.first::<i64>(conn)
|
||||||
.unwrap_or(0)
|
.ok()
|
||||||
|
.unwrap_or(0)
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
ciphers::table
|
db_run! {conn: {
|
||||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
ciphers::table
|
||||||
.load::<Self>(&**conn).expect("Error loading ciphers")
|
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||||
|
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||||
ciphers::table
|
db_run! {conn: {
|
||||||
.filter(ciphers::organization_uuid.eq(org_uuid))
|
ciphers::table
|
||||||
.count()
|
.filter(ciphers::organization_uuid.eq(org_uuid))
|
||||||
.first::<i64>(&**conn)
|
.count()
|
||||||
.ok()
|
.first::<i64>(conn)
|
||||||
.unwrap_or(0)
|
.ok()
|
||||||
|
.unwrap_or(0)
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
folders_ciphers::table.inner_join(ciphers::table)
|
db_run! {conn: {
|
||||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
folders_ciphers::table.inner_join(ciphers::table)
|
||||||
.select(ciphers::all_columns)
|
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||||
.load::<Self>(&**conn).expect("Error loading ciphers")
|
.select(ciphers::all_columns)
|
||||||
|
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec<String> {
|
pub fn get_collections(&self, user_id: &str, conn: &DbConn) -> Vec<String> {
|
||||||
ciphers_collections::table
|
db_run! {conn: {
|
||||||
.inner_join(collections::table.on(
|
ciphers_collections::table
|
||||||
collections::uuid.eq(ciphers_collections::collection_uuid)
|
.inner_join(collections::table.on(
|
||||||
))
|
collections::uuid.eq(ciphers_collections::collection_uuid)
|
||||||
.inner_join(users_organizations::table.on(
|
))
|
||||||
users_organizations::org_uuid.eq(collections::org_uuid).and(
|
.inner_join(users_organizations::table.on(
|
||||||
users_organizations::user_uuid.eq(user_id)
|
users_organizations::org_uuid.eq(collections::org_uuid).and(
|
||||||
)
|
users_organizations::user_uuid.eq(user_id)
|
||||||
))
|
)
|
||||||
.left_join(users_collections::table.on(
|
))
|
||||||
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and(
|
.left_join(users_collections::table.on(
|
||||||
users_collections::user_uuid.eq(user_id)
|
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and(
|
||||||
)
|
users_collections::user_uuid.eq(user_id)
|
||||||
))
|
)
|
||||||
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
))
|
||||||
.filter(users_collections::user_uuid.eq(user_id).or( // User has access to collection
|
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
|
||||||
users_organizations::access_all.eq(true).or( // User has access all
|
.filter(users_collections::user_uuid.eq(user_id).or( // User has access to collection
|
||||||
users_organizations::atype.le(UserOrgType::Admin as i32) // User is admin or owner
|
users_organizations::access_all.eq(true).or( // User has access all
|
||||||
)
|
users_organizations::atype.le(UserOrgType::Admin as i32) // User is admin or owner
|
||||||
))
|
)
|
||||||
.select(ciphers_collections::collection_uuid)
|
))
|
||||||
.load::<String>(&**conn).unwrap_or_default()
|
.select(ciphers_collections::collection_uuid)
|
||||||
|
.load::<String>(conn).unwrap_or_default()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,39 @@
|
|||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::{Organization, UserOrgStatus, UserOrgType, UserOrganization};
|
use super::{Organization, UserOrgStatus, UserOrgType, UserOrganization, User, Cipher};
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
db_object! {
|
||||||
#[table_name = "collections"]
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||||
#[belongs_to(Organization, foreign_key = "org_uuid")]
|
#[table_name = "collections"]
|
||||||
#[primary_key(uuid)]
|
#[belongs_to(Organization, foreign_key = "org_uuid")]
|
||||||
pub struct Collection {
|
#[primary_key(uuid)]
|
||||||
pub uuid: String,
|
pub struct Collection {
|
||||||
pub org_uuid: String,
|
pub uuid: String,
|
||||||
pub name: String,
|
pub org_uuid: String,
|
||||||
|
pub name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
||||||
|
#[table_name = "users_collections"]
|
||||||
|
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||||
|
#[belongs_to(Collection, foreign_key = "collection_uuid")]
|
||||||
|
#[primary_key(user_uuid, collection_uuid)]
|
||||||
|
pub struct CollectionUser {
|
||||||
|
pub user_uuid: String,
|
||||||
|
pub collection_uuid: String,
|
||||||
|
pub read_only: bool,
|
||||||
|
pub hide_passwords: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
||||||
|
#[table_name = "ciphers_collections"]
|
||||||
|
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
||||||
|
#[belongs_to(Collection, foreign_key = "collection_uuid")]
|
||||||
|
#[primary_key(cipher_uuid, collection_uuid)]
|
||||||
|
pub struct CollectionCipher {
|
||||||
|
pub cipher_uuid: String,
|
||||||
|
pub collection_uuid: String,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
@@ -25,44 +49,62 @@ impl Collection {
|
|||||||
|
|
||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
|
"ExternalId": null, // Not support by us
|
||||||
"Id": self.uuid,
|
"Id": self.uuid,
|
||||||
"OrganizationId": self.org_uuid,
|
"OrganizationId": self.org_uuid,
|
||||||
"Name": self.name,
|
"Name": self.name,
|
||||||
"Object": "collection",
|
"Object": "collection",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn to_json_details(&self, user_uuid: &str, conn: &DbConn) -> Value {
|
||||||
|
let mut json_object = self.to_json();
|
||||||
|
json_object["Object"] = json!("collectionDetails");
|
||||||
|
json_object["ReadOnly"] = json!(!self.is_writable_by_user(user_uuid, conn));
|
||||||
|
json_object["HidePasswords"] = json!(self.hide_passwords_for_user(user_uuid, conn));
|
||||||
|
json_object
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::schema::*;
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
use diesel::prelude::*;
|
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl Collection {
|
impl Collection {
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||||
self.update_users_revision(conn);
|
self.update_users_revision(conn);
|
||||||
|
|
||||||
diesel::insert_into(collections::table)
|
db_run! { conn:
|
||||||
.values(self)
|
sqlite, mysql {
|
||||||
.on_conflict(collections::uuid)
|
match diesel::replace_into(collections::table)
|
||||||
.do_update()
|
.values(CollectionDb::to_db(self))
|
||||||
.set(self)
|
.execute(conn)
|
||||||
.execute(&**conn)
|
{
|
||||||
.map_res("Error saving collection")
|
Ok(_) => Ok(()),
|
||||||
}
|
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||||
|
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||||
#[cfg(not(feature = "postgresql"))]
|
diesel::update(collections::table)
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
.filter(collections::uuid.eq(&self.uuid))
|
||||||
self.update_users_revision(conn);
|
.set(CollectionDb::to_db(self))
|
||||||
|
.execute(conn)
|
||||||
diesel::replace_into(collections::table)
|
.map_res("Error saving collection")
|
||||||
.values(self)
|
}
|
||||||
.execute(&**conn)
|
Err(e) => Err(e.into()),
|
||||||
.map_res("Error saving collection")
|
}.map_res("Error saving collection")
|
||||||
|
}
|
||||||
|
postgresql {
|
||||||
|
let value = CollectionDb::to_db(self);
|
||||||
|
diesel::insert_into(collections::table)
|
||||||
|
.values(&value)
|
||||||
|
.on_conflict(collections::uuid)
|
||||||
|
.do_update()
|
||||||
|
.set(&value)
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving collection")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
@@ -70,9 +112,11 @@ impl Collection {
|
|||||||
CollectionCipher::delete_all_by_collection(&self.uuid, &conn)?;
|
CollectionCipher::delete_all_by_collection(&self.uuid, &conn)?;
|
||||||
CollectionUser::delete_all_by_collection(&self.uuid, &conn)?;
|
CollectionUser::delete_all_by_collection(&self.uuid, &conn)?;
|
||||||
|
|
||||||
diesel::delete(collections::table.filter(collections::uuid.eq(self.uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(collections::table.filter(collections::uuid.eq(self.uuid)))
|
||||||
.map_res("Error deleting collection")
|
.execute(conn)
|
||||||
|
.map_res("Error deleting collection")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
@@ -91,33 +135,38 @@ impl Collection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
collections::table
|
db_run! { conn: {
|
||||||
.filter(collections::uuid.eq(uuid))
|
collections::table
|
||||||
.first::<Self>(&**conn)
|
.filter(collections::uuid.eq(uuid))
|
||||||
.ok()
|
.first::<CollectionDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_user_uuid(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_user_uuid(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
collections::table
|
db_run! { conn: {
|
||||||
.left_join(users_collections::table.on(
|
collections::table
|
||||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
.left_join(users_collections::table.on(
|
||||||
users_collections::user_uuid.eq(user_uuid)
|
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||||
|
users_collections::user_uuid.eq(user_uuid)
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.left_join(users_organizations::table.on(
|
||||||
|
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||||
|
users_organizations::user_uuid.eq(user_uuid)
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.filter(
|
||||||
|
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||||
)
|
)
|
||||||
))
|
.filter(
|
||||||
.left_join(users_organizations::table.on(
|
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
||||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
users_organizations::access_all.eq(true) // access_all in Organization
|
||||||
users_organizations::user_uuid.eq(user_uuid)
|
)
|
||||||
)
|
).select(collections::all_columns)
|
||||||
))
|
.load::<CollectionDb>(conn).expect("Error loading collections").from_db()
|
||||||
.filter(
|
}}
|
||||||
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
|
||||||
)
|
|
||||||
.filter(
|
|
||||||
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
|
|
||||||
users_organizations::access_all.eq(true) // access_all in Organization
|
|
||||||
)
|
|
||||||
).select(collections::all_columns)
|
|
||||||
.load::<Self>(&**conn).expect("Error loading collections")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
@@ -128,155 +177,200 @@ impl Collection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_organization(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_organization(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
collections::table
|
db_run! { conn: {
|
||||||
.filter(collections::org_uuid.eq(org_uuid))
|
collections::table
|
||||||
.load::<Self>(&**conn)
|
.filter(collections::org_uuid.eq(org_uuid))
|
||||||
.expect("Error loading collections")
|
.load::<CollectionDb>(conn)
|
||||||
|
.expect("Error loading collections")
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
collections::table
|
db_run! { conn: {
|
||||||
.filter(collections::uuid.eq(uuid))
|
collections::table
|
||||||
.filter(collections::org_uuid.eq(org_uuid))
|
.filter(collections::uuid.eq(uuid))
|
||||||
.select(collections::all_columns)
|
.filter(collections::org_uuid.eq(org_uuid))
|
||||||
.first::<Self>(&**conn)
|
.select(collections::all_columns)
|
||||||
.ok()
|
.first::<CollectionDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
collections::table
|
db_run! { conn: {
|
||||||
.left_join(users_collections::table.on(
|
collections::table
|
||||||
users_collections::collection_uuid.eq(collections::uuid).and(
|
.left_join(users_collections::table.on(
|
||||||
users_collections::user_uuid.eq(user_uuid)
|
users_collections::collection_uuid.eq(collections::uuid).and(
|
||||||
)
|
users_collections::user_uuid.eq(user_uuid)
|
||||||
))
|
|
||||||
.left_join(users_organizations::table.on(
|
|
||||||
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
|
||||||
users_organizations::user_uuid.eq(user_uuid)
|
|
||||||
)
|
|
||||||
))
|
|
||||||
.filter(collections::uuid.eq(uuid))
|
|
||||||
.filter(
|
|
||||||
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
|
||||||
users_organizations::access_all.eq(true).or( // access_all in Organization
|
|
||||||
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
|
||||||
)
|
)
|
||||||
)
|
))
|
||||||
).select(collections::all_columns)
|
.left_join(users_organizations::table.on(
|
||||||
.first::<Self>(&**conn).ok()
|
collections::org_uuid.eq(users_organizations::org_uuid).and(
|
||||||
|
users_organizations::user_uuid.eq(user_uuid)
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.filter(collections::uuid.eq(uuid))
|
||||||
|
.filter(
|
||||||
|
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
|
||||||
|
users_organizations::access_all.eq(true).or( // access_all in Organization
|
||||||
|
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
|
||||||
|
)
|
||||||
|
)
|
||||||
|
).select(collections::all_columns)
|
||||||
|
.first::<CollectionDb>(conn).ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_writable_by_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
pub fn is_writable_by_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||||
match UserOrganization::find_by_user_and_org(&user_uuid, &self.org_uuid, &conn) {
|
match UserOrganization::find_by_user_and_org(&user_uuid, &self.org_uuid, &conn) {
|
||||||
None => false, // Not in Org
|
None => false, // Not in Org
|
||||||
Some(user_org) => {
|
Some(user_org) => {
|
||||||
if user_org.access_all {
|
if user_org.has_full_access() {
|
||||||
true
|
return true;
|
||||||
} else {
|
|
||||||
users_collections::table
|
|
||||||
.inner_join(collections::table)
|
|
||||||
.filter(users_collections::collection_uuid.eq(&self.uuid))
|
|
||||||
.filter(users_collections::user_uuid.eq(&user_uuid))
|
|
||||||
.filter(users_collections::read_only.eq(false))
|
|
||||||
.select(collections::all_columns)
|
|
||||||
.first::<Self>(&**conn)
|
|
||||||
.ok()
|
|
||||||
.is_some() // Read only or no access to collection
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
db_run! { conn: {
|
||||||
|
users_collections::table
|
||||||
|
.filter(users_collections::collection_uuid.eq(&self.uuid))
|
||||||
|
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||||
|
.filter(users_collections::read_only.eq(false))
|
||||||
|
.count()
|
||||||
|
.first::<i64>(conn)
|
||||||
|
.ok()
|
||||||
|
.unwrap_or(0) != 0
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn hide_passwords_for_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
|
||||||
|
match UserOrganization::find_by_user_and_org(&user_uuid, &self.org_uuid, &conn) {
|
||||||
|
None => true, // Not in Org
|
||||||
|
Some(user_org) => {
|
||||||
|
if user_org.has_full_access() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
db_run! { conn: {
|
||||||
|
users_collections::table
|
||||||
|
.filter(users_collections::collection_uuid.eq(&self.uuid))
|
||||||
|
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||||
|
.filter(users_collections::hide_passwords.eq(true))
|
||||||
|
.count()
|
||||||
|
.first::<i64>(conn)
|
||||||
|
.ok()
|
||||||
|
.unwrap_or(0) != 0
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use super::User;
|
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
|
||||||
#[table_name = "users_collections"]
|
|
||||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
|
||||||
#[belongs_to(Collection, foreign_key = "collection_uuid")]
|
|
||||||
#[primary_key(user_uuid, collection_uuid)]
|
|
||||||
pub struct CollectionUser {
|
|
||||||
pub user_uuid: String,
|
|
||||||
pub collection_uuid: String,
|
|
||||||
pub read_only: bool,
|
|
||||||
pub hide_passwords: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl CollectionUser {
|
impl CollectionUser {
|
||||||
pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
users_collections::table
|
db_run! { conn: {
|
||||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
users_collections::table
|
||||||
.inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid)))
|
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||||
.filter(collections::org_uuid.eq(org_uuid))
|
.inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid)))
|
||||||
.select(users_collections::all_columns)
|
.filter(collections::org_uuid.eq(org_uuid))
|
||||||
.load::<Self>(&**conn)
|
.select(users_collections::all_columns)
|
||||||
.expect("Error loading users_collections")
|
.load::<CollectionUserDb>(conn)
|
||||||
|
.expect("Error loading users_collections")
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(user_uuid: &str, collection_uuid: &str, read_only: bool, hide_passwords: bool, conn: &DbConn) -> EmptyResult {
|
pub fn save(user_uuid: &str, collection_uuid: &str, read_only: bool, hide_passwords: bool, conn: &DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(&user_uuid, conn);
|
User::update_uuid_revision(&user_uuid, conn);
|
||||||
|
|
||||||
diesel::insert_into(users_collections::table)
|
db_run! { conn:
|
||||||
.values((
|
sqlite, mysql {
|
||||||
users_collections::user_uuid.eq(user_uuid),
|
match diesel::replace_into(users_collections::table)
|
||||||
users_collections::collection_uuid.eq(collection_uuid),
|
.values((
|
||||||
users_collections::read_only.eq(read_only),
|
users_collections::user_uuid.eq(user_uuid),
|
||||||
users_collections::hide_passwords.eq(hide_passwords),
|
users_collections::collection_uuid.eq(collection_uuid),
|
||||||
))
|
users_collections::read_only.eq(read_only),
|
||||||
.on_conflict((users_collections::user_uuid, users_collections::collection_uuid))
|
users_collections::hide_passwords.eq(hide_passwords),
|
||||||
.do_update()
|
))
|
||||||
.set((
|
.execute(conn)
|
||||||
users_collections::read_only.eq(read_only),
|
{
|
||||||
users_collections::hide_passwords.eq(hide_passwords),
|
Ok(_) => Ok(()),
|
||||||
))
|
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||||
.execute(&**conn)
|
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||||
.map_res("Error adding user to collection")
|
diesel::update(users_collections::table)
|
||||||
}
|
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||||
|
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||||
#[cfg(not(feature = "postgresql"))]
|
.set((
|
||||||
pub fn save(user_uuid: &str, collection_uuid: &str, read_only: bool, hide_passwords: bool, conn: &DbConn) -> EmptyResult {
|
users_collections::user_uuid.eq(user_uuid),
|
||||||
User::update_uuid_revision(&user_uuid, conn);
|
users_collections::collection_uuid.eq(collection_uuid),
|
||||||
|
users_collections::read_only.eq(read_only),
|
||||||
diesel::replace_into(users_collections::table)
|
users_collections::hide_passwords.eq(hide_passwords),
|
||||||
.values((
|
))
|
||||||
users_collections::user_uuid.eq(user_uuid),
|
.execute(conn)
|
||||||
users_collections::collection_uuid.eq(collection_uuid),
|
.map_res("Error adding user to collection")
|
||||||
users_collections::read_only.eq(read_only),
|
}
|
||||||
users_collections::hide_passwords.eq(hide_passwords),
|
Err(e) => Err(e.into()),
|
||||||
))
|
}.map_res("Error adding user to collection")
|
||||||
.execute(&**conn)
|
}
|
||||||
.map_res("Error adding user to collection")
|
postgresql {
|
||||||
|
diesel::insert_into(users_collections::table)
|
||||||
|
.values((
|
||||||
|
users_collections::user_uuid.eq(user_uuid),
|
||||||
|
users_collections::collection_uuid.eq(collection_uuid),
|
||||||
|
users_collections::read_only.eq(read_only),
|
||||||
|
users_collections::hide_passwords.eq(hide_passwords),
|
||||||
|
))
|
||||||
|
.on_conflict((users_collections::user_uuid, users_collections::collection_uuid))
|
||||||
|
.do_update()
|
||||||
|
.set((
|
||||||
|
users_collections::read_only.eq(read_only),
|
||||||
|
users_collections::hide_passwords.eq(hide_passwords),
|
||||||
|
))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error adding user to collection")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(&self.user_uuid, conn);
|
User::update_uuid_revision(&self.user_uuid, conn);
|
||||||
|
|
||||||
diesel::delete(
|
db_run! { conn: {
|
||||||
users_collections::table
|
diesel::delete(
|
||||||
.filter(users_collections::user_uuid.eq(&self.user_uuid))
|
users_collections::table
|
||||||
.filter(users_collections::collection_uuid.eq(&self.collection_uuid)),
|
.filter(users_collections::user_uuid.eq(&self.user_uuid))
|
||||||
)
|
.filter(users_collections::collection_uuid.eq(&self.collection_uuid)),
|
||||||
.execute(&**conn)
|
)
|
||||||
.map_res("Error removing user from collection")
|
.execute(conn)
|
||||||
|
.map_res("Error removing user from collection")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_collection(collection_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_collection(collection_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
users_collections::table
|
db_run! { conn: {
|
||||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
users_collections::table
|
||||||
.select(users_collections::all_columns)
|
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||||
.load::<Self>(&**conn)
|
.select(users_collections::all_columns)
|
||||||
.expect("Error loading users_collections")
|
.load::<CollectionUserDb>(conn)
|
||||||
|
.expect("Error loading users_collections")
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_collection_and_user(collection_uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_collection_and_user(collection_uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
users_collections::table
|
db_run! { conn: {
|
||||||
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
users_collections::table
|
||||||
.filter(users_collections::user_uuid.eq(user_uuid))
|
.filter(users_collections::collection_uuid.eq(collection_uuid))
|
||||||
.select(users_collections::all_columns)
|
.filter(users_collections::user_uuid.eq(user_uuid))
|
||||||
.first::<Self>(&**conn)
|
.select(users_collections::all_columns)
|
||||||
.ok()
|
.first::<CollectionUserDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
@@ -286,81 +380,90 @@ impl CollectionUser {
|
|||||||
User::update_uuid_revision(&collection.user_uuid, conn);
|
User::update_uuid_revision(&collection.user_uuid, conn);
|
||||||
});
|
});
|
||||||
|
|
||||||
diesel::delete(users_collections::table.filter(users_collections::collection_uuid.eq(collection_uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(users_collections::table.filter(users_collections::collection_uuid.eq(collection_uuid)))
|
||||||
.map_res("Error deleting users from collection")
|
.execute(conn)
|
||||||
|
.map_res("Error deleting users from collection")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(&user_uuid, conn);
|
let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn);
|
||||||
|
|
||||||
diesel::delete(users_collections::table.filter(users_collections::user_uuid.eq(user_uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
for user in collectionusers {
|
||||||
.map_res("Error removing user from collections")
|
diesel::delete(users_collections::table.filter(
|
||||||
|
users_collections::user_uuid.eq(user_uuid)
|
||||||
|
.and(users_collections::collection_uuid.eq(user.collection_uuid))
|
||||||
|
))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error removing user from collections")?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use super::Cipher;
|
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
|
||||||
#[table_name = "ciphers_collections"]
|
|
||||||
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
|
||||||
#[belongs_to(Collection, foreign_key = "collection_uuid")]
|
|
||||||
#[primary_key(cipher_uuid, collection_uuid)]
|
|
||||||
pub struct CollectionCipher {
|
|
||||||
pub cipher_uuid: String,
|
|
||||||
pub collection_uuid: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl CollectionCipher {
|
impl CollectionCipher {
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
Self::update_users_revision(&collection_uuid, conn);
|
Self::update_users_revision(&collection_uuid, conn);
|
||||||
diesel::insert_into(ciphers_collections::table)
|
|
||||||
.values((
|
|
||||||
ciphers_collections::cipher_uuid.eq(cipher_uuid),
|
|
||||||
ciphers_collections::collection_uuid.eq(collection_uuid),
|
|
||||||
))
|
|
||||||
.on_conflict((ciphers_collections::cipher_uuid, ciphers_collections::collection_uuid))
|
|
||||||
.do_nothing()
|
|
||||||
.execute(&**conn)
|
|
||||||
.map_res("Error adding cipher to collection")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "postgresql"))]
|
db_run! { conn:
|
||||||
pub fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
sqlite, mysql {
|
||||||
Self::update_users_revision(&collection_uuid, conn);
|
// Not checking for ForeignKey Constraints here.
|
||||||
diesel::replace_into(ciphers_collections::table)
|
// Table ciphers_collections does not have ForeignKey Constraints which would cause conflicts.
|
||||||
.values((
|
// This table has no constraints pointing to itself, but only to others.
|
||||||
ciphers_collections::cipher_uuid.eq(cipher_uuid),
|
diesel::replace_into(ciphers_collections::table)
|
||||||
ciphers_collections::collection_uuid.eq(collection_uuid),
|
.values((
|
||||||
))
|
ciphers_collections::cipher_uuid.eq(cipher_uuid),
|
||||||
.execute(&**conn)
|
ciphers_collections::collection_uuid.eq(collection_uuid),
|
||||||
.map_res("Error adding cipher to collection")
|
))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error adding cipher to collection")
|
||||||
|
}
|
||||||
|
postgresql {
|
||||||
|
diesel::insert_into(ciphers_collections::table)
|
||||||
|
.values((
|
||||||
|
ciphers_collections::cipher_uuid.eq(cipher_uuid),
|
||||||
|
ciphers_collections::collection_uuid.eq(collection_uuid),
|
||||||
|
))
|
||||||
|
.on_conflict((ciphers_collections::cipher_uuid, ciphers_collections::collection_uuid))
|
||||||
|
.do_nothing()
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error adding cipher to collection")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
Self::update_users_revision(&collection_uuid, conn);
|
Self::update_users_revision(&collection_uuid, conn);
|
||||||
diesel::delete(
|
|
||||||
ciphers_collections::table
|
db_run! { conn: {
|
||||||
.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid))
|
diesel::delete(
|
||||||
.filter(ciphers_collections::collection_uuid.eq(collection_uuid)),
|
ciphers_collections::table
|
||||||
)
|
.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid))
|
||||||
.execute(&**conn)
|
.filter(ciphers_collections::collection_uuid.eq(collection_uuid)),
|
||||||
.map_res("Error deleting cipher from collection")
|
)
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error deleting cipher from collection")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid)))
|
||||||
.map_res("Error removing cipher from collections")
|
.execute(conn)
|
||||||
|
.map_res("Error removing cipher from collections")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid)))
|
||||||
.map_res("Error removing ciphers from collection")
|
.execute(conn)
|
||||||
|
.map_res("Error removing ciphers from collection")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_users_revision(collection_uuid: &str, conn: &DbConn) {
|
pub fn update_users_revision(collection_uuid: &str, conn: &DbConn) {
|
||||||
|
|||||||
@@ -3,26 +3,28 @@ use chrono::{NaiveDateTime, Utc};
|
|||||||
use super::User;
|
use super::User;
|
||||||
use crate::CONFIG;
|
use crate::CONFIG;
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
db_object! {
|
||||||
#[table_name = "devices"]
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||||
#[changeset_options(treat_none_as_null="true")]
|
#[table_name = "devices"]
|
||||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
#[changeset_options(treat_none_as_null="true")]
|
||||||
#[primary_key(uuid)]
|
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||||
pub struct Device {
|
#[primary_key(uuid)]
|
||||||
pub uuid: String,
|
pub struct Device {
|
||||||
pub created_at: NaiveDateTime,
|
pub uuid: String,
|
||||||
pub updated_at: NaiveDateTime,
|
pub created_at: NaiveDateTime,
|
||||||
|
pub updated_at: NaiveDateTime,
|
||||||
|
|
||||||
pub user_uuid: String,
|
pub user_uuid: String,
|
||||||
|
|
||||||
pub name: String,
|
pub name: String,
|
||||||
/// https://github.com/bitwarden/core/tree/master/src/Core/Enums
|
// https://github.com/bitwarden/core/tree/master/src/Core/Enums
|
||||||
pub atype: i32,
|
pub atype: i32,
|
||||||
pub push_token: Option<String>,
|
pub push_token: Option<String>,
|
||||||
|
|
||||||
pub refresh_token: String,
|
pub refresh_token: String,
|
||||||
|
|
||||||
pub twofactor_remember: Option<String>,
|
pub twofactor_remember: Option<String>,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
@@ -105,41 +107,39 @@ impl Device {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::schema::devices;
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
use diesel::prelude::*;
|
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl Device {
|
impl Device {
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||||
self.updated_at = Utc::now().naive_utc();
|
self.updated_at = Utc::now().naive_utc();
|
||||||
|
|
||||||
crate::util::retry(
|
db_run! { conn:
|
||||||
|| diesel::insert_into(devices::table).values(&*self).on_conflict(devices::uuid).do_update().set(&*self).execute(&**conn),
|
sqlite, mysql {
|
||||||
10,
|
crate::util::retry(
|
||||||
)
|
|| diesel::replace_into(devices::table).values(DeviceDb::to_db(self)).execute(conn),
|
||||||
.map_res("Error saving device")
|
10,
|
||||||
}
|
).map_res("Error saving device")
|
||||||
|
}
|
||||||
#[cfg(not(feature = "postgresql"))]
|
postgresql {
|
||||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
let value = DeviceDb::to_db(self);
|
||||||
self.updated_at = Utc::now().naive_utc();
|
crate::util::retry(
|
||||||
|
|| diesel::insert_into(devices::table).values(&value).on_conflict(devices::uuid).do_update().set(&value).execute(conn),
|
||||||
crate::util::retry(
|
10,
|
||||||
|| diesel::replace_into(devices::table).values(&*self).execute(&**conn),
|
).map_res("Error saving device")
|
||||||
10,
|
}
|
||||||
)
|
}
|
||||||
.map_res("Error saving device")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(devices::table.filter(devices::uuid.eq(self.uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(devices::table.filter(devices::uuid.eq(self.uuid)))
|
||||||
.map_res("Error removing device")
|
.execute(conn)
|
||||||
|
.map_res("Error removing device")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
@@ -150,23 +150,43 @@ impl Device {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
devices::table
|
db_run! { conn: {
|
||||||
.filter(devices::uuid.eq(uuid))
|
devices::table
|
||||||
.first::<Self>(&**conn)
|
.filter(devices::uuid.eq(uuid))
|
||||||
.ok()
|
.first::<DeviceDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_refresh_token(refresh_token: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_refresh_token(refresh_token: &str, conn: &DbConn) -> Option<Self> {
|
||||||
devices::table
|
db_run! { conn: {
|
||||||
.filter(devices::refresh_token.eq(refresh_token))
|
devices::table
|
||||||
.first::<Self>(&**conn)
|
.filter(devices::refresh_token.eq(refresh_token))
|
||||||
.ok()
|
.first::<DeviceDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
devices::table
|
db_run! { conn: {
|
||||||
.filter(devices::user_uuid.eq(user_uuid))
|
devices::table
|
||||||
.load::<Self>(&**conn)
|
.filter(devices::user_uuid.eq(user_uuid))
|
||||||
.expect("Error loading devices")
|
.load::<DeviceDb>(conn)
|
||||||
|
.expect("Error loading devices")
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_latest_active_by_user(user_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
|
db_run! { conn: {
|
||||||
|
devices::table
|
||||||
|
.filter(devices::user_uuid.eq(user_uuid))
|
||||||
|
.order(devices::updated_at.desc())
|
||||||
|
.first::<DeviceDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
83
src/db/models/favorite.rs
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
use super::{Cipher, User};
|
||||||
|
|
||||||
|
db_object! {
|
||||||
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
||||||
|
#[table_name = "favorites"]
|
||||||
|
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||||
|
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
||||||
|
#[primary_key(user_uuid, cipher_uuid)]
|
||||||
|
pub struct Favorite {
|
||||||
|
pub user_uuid: String,
|
||||||
|
pub cipher_uuid: String,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
use crate::db::DbConn;
|
||||||
|
|
||||||
|
use crate::api::EmptyResult;
|
||||||
|
use crate::error::MapResult;
|
||||||
|
|
||||||
|
impl Favorite {
|
||||||
|
// Returns whether the specified cipher is a favorite of the specified user.
|
||||||
|
pub fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> bool {
|
||||||
|
db_run!{ conn: {
|
||||||
|
let query = favorites::table
|
||||||
|
.filter(favorites::cipher_uuid.eq(cipher_uuid))
|
||||||
|
.filter(favorites::user_uuid.eq(user_uuid))
|
||||||
|
.count();
|
||||||
|
|
||||||
|
query.first::<i64>(conn).ok().unwrap_or(0) != 0
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets whether the specified cipher is a favorite of the specified user.
|
||||||
|
pub fn set_favorite(favorite: bool, cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
|
let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, &conn), favorite);
|
||||||
|
match (old, new) {
|
||||||
|
(false, true) => {
|
||||||
|
User::update_uuid_revision(user_uuid, &conn);
|
||||||
|
db_run!{ conn: {
|
||||||
|
diesel::insert_into(favorites::table)
|
||||||
|
.values((
|
||||||
|
favorites::user_uuid.eq(user_uuid),
|
||||||
|
favorites::cipher_uuid.eq(cipher_uuid),
|
||||||
|
))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error adding favorite")
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
(true, false) => {
|
||||||
|
User::update_uuid_revision(user_uuid, &conn);
|
||||||
|
db_run!{ conn: {
|
||||||
|
diesel::delete(
|
||||||
|
favorites::table
|
||||||
|
.filter(favorites::user_uuid.eq(user_uuid))
|
||||||
|
.filter(favorites::cipher_uuid.eq(cipher_uuid))
|
||||||
|
)
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error removing favorite")
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
// Otherwise, the favorite status is already what it should be.
|
||||||
|
_ => Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all favorite entries associated with the specified cipher.
|
||||||
|
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
|
db_run! { conn: {
|
||||||
|
diesel::delete(favorites::table.filter(favorites::cipher_uuid.eq(cipher_uuid)))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error removing favorites by cipher")
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all favorite entries associated with the specified user.
|
||||||
|
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
|
db_run! { conn: {
|
||||||
|
diesel::delete(favorites::table.filter(favorites::user_uuid.eq(user_uuid)))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error removing favorites by user")
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,26 +3,28 @@ use serde_json::Value;
|
|||||||
|
|
||||||
use super::{Cipher, User};
|
use super::{Cipher, User};
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
db_object! {
|
||||||
#[table_name = "folders"]
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
#[table_name = "folders"]
|
||||||
#[primary_key(uuid)]
|
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||||
pub struct Folder {
|
#[primary_key(uuid)]
|
||||||
pub uuid: String,
|
pub struct Folder {
|
||||||
pub created_at: NaiveDateTime,
|
pub uuid: String,
|
||||||
pub updated_at: NaiveDateTime,
|
pub created_at: NaiveDateTime,
|
||||||
pub user_uuid: String,
|
pub updated_at: NaiveDateTime,
|
||||||
pub name: String,
|
pub user_uuid: String,
|
||||||
}
|
pub name: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations)]
|
||||||
#[table_name = "folders_ciphers"]
|
#[table_name = "folders_ciphers"]
|
||||||
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
#[belongs_to(Cipher, foreign_key = "cipher_uuid")]
|
||||||
#[belongs_to(Folder, foreign_key = "folder_uuid")]
|
#[belongs_to(Folder, foreign_key = "folder_uuid")]
|
||||||
#[primary_key(cipher_uuid, folder_uuid)]
|
#[primary_key(cipher_uuid, folder_uuid)]
|
||||||
pub struct FolderCipher {
|
pub struct FolderCipher {
|
||||||
pub cipher_uuid: String,
|
pub cipher_uuid: String,
|
||||||
pub folder_uuid: String,
|
pub folder_uuid: String,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
@@ -61,47 +63,58 @@ impl FolderCipher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::schema::{folders, folders_ciphers};
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
use diesel::prelude::*;
|
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl Folder {
|
impl Folder {
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(&self.user_uuid, conn);
|
User::update_uuid_revision(&self.user_uuid, conn);
|
||||||
self.updated_at = Utc::now().naive_utc();
|
self.updated_at = Utc::now().naive_utc();
|
||||||
|
|
||||||
diesel::insert_into(folders::table)
|
db_run! { conn:
|
||||||
.values(&*self)
|
sqlite, mysql {
|
||||||
.on_conflict(folders::uuid)
|
match diesel::replace_into(folders::table)
|
||||||
.do_update()
|
.values(FolderDb::to_db(self))
|
||||||
.set(&*self)
|
.execute(conn)
|
||||||
.execute(&**conn)
|
{
|
||||||
.map_res("Error saving folder")
|
Ok(_) => Ok(()),
|
||||||
}
|
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||||
|
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||||
#[cfg(not(feature = "postgresql"))]
|
diesel::update(folders::table)
|
||||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
.filter(folders::uuid.eq(&self.uuid))
|
||||||
User::update_uuid_revision(&self.user_uuid, conn);
|
.set(FolderDb::to_db(self))
|
||||||
self.updated_at = Utc::now().naive_utc();
|
.execute(conn)
|
||||||
|
.map_res("Error saving folder")
|
||||||
diesel::replace_into(folders::table)
|
}
|
||||||
.values(&*self)
|
Err(e) => Err(e.into()),
|
||||||
.execute(&**conn)
|
}.map_res("Error saving folder")
|
||||||
.map_res("Error saving folder")
|
}
|
||||||
|
postgresql {
|
||||||
|
let value = FolderDb::to_db(self);
|
||||||
|
diesel::insert_into(folders::table)
|
||||||
|
.values(&value)
|
||||||
|
.on_conflict(folders::uuid)
|
||||||
|
.do_update()
|
||||||
|
.set(&value)
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving folder")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(&self, conn: &DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(&self.user_uuid, conn);
|
User::update_uuid_revision(&self.user_uuid, conn);
|
||||||
FolderCipher::delete_all_by_folder(&self.uuid, &conn)?;
|
FolderCipher::delete_all_by_folder(&self.uuid, &conn)?;
|
||||||
|
|
||||||
diesel::delete(folders::table.filter(folders::uuid.eq(&self.uuid)))
|
|
||||||
.execute(&**conn)
|
db_run! { conn: {
|
||||||
.map_res("Error deleting folder")
|
diesel::delete(folders::table.filter(folders::uuid.eq(&self.uuid)))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error deleting folder")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
@@ -112,73 +125,95 @@ impl Folder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
folders::table
|
db_run! { conn: {
|
||||||
.filter(folders::uuid.eq(uuid))
|
folders::table
|
||||||
.first::<Self>(&**conn)
|
.filter(folders::uuid.eq(uuid))
|
||||||
.ok()
|
.first::<FolderDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
folders::table
|
db_run! { conn: {
|
||||||
.filter(folders::user_uuid.eq(user_uuid))
|
folders::table
|
||||||
.load::<Self>(&**conn)
|
.filter(folders::user_uuid.eq(user_uuid))
|
||||||
.expect("Error loading folders")
|
.load::<FolderDb>(conn)
|
||||||
|
.expect("Error loading folders")
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FolderCipher {
|
impl FolderCipher {
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::insert_into(folders_ciphers::table)
|
db_run! { conn:
|
||||||
.values(&*self)
|
sqlite, mysql {
|
||||||
.on_conflict((folders_ciphers::cipher_uuid, folders_ciphers::folder_uuid))
|
// Not checking for ForeignKey Constraints here.
|
||||||
.do_nothing()
|
// Table folders_ciphers does not have ForeignKey Constraints which would cause conflicts.
|
||||||
.execute(&**conn)
|
// This table has no constraints pointing to itself, but only to others.
|
||||||
.map_res("Error adding cipher to folder")
|
diesel::replace_into(folders_ciphers::table)
|
||||||
}
|
.values(FolderCipherDb::to_db(self))
|
||||||
|
.execute(conn)
|
||||||
#[cfg(not(feature = "postgresql"))]
|
.map_res("Error adding cipher to folder")
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
}
|
||||||
diesel::replace_into(folders_ciphers::table)
|
postgresql {
|
||||||
.values(&*self)
|
diesel::insert_into(folders_ciphers::table)
|
||||||
.execute(&**conn)
|
.values(FolderCipherDb::to_db(self))
|
||||||
.map_res("Error adding cipher to folder")
|
.on_conflict((folders_ciphers::cipher_uuid, folders_ciphers::folder_uuid))
|
||||||
|
.do_nothing()
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error adding cipher to folder")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(
|
db_run! { conn: {
|
||||||
folders_ciphers::table
|
diesel::delete(
|
||||||
.filter(folders_ciphers::cipher_uuid.eq(self.cipher_uuid))
|
folders_ciphers::table
|
||||||
.filter(folders_ciphers::folder_uuid.eq(self.folder_uuid)),
|
.filter(folders_ciphers::cipher_uuid.eq(self.cipher_uuid))
|
||||||
)
|
.filter(folders_ciphers::folder_uuid.eq(self.folder_uuid)),
|
||||||
.execute(&**conn)
|
)
|
||||||
.map_res("Error removing cipher from folder")
|
.execute(conn)
|
||||||
|
.map_res("Error removing cipher from folder")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid)))
|
||||||
.map_res("Error removing cipher from folders")
|
.execute(conn)
|
||||||
|
.map_res("Error removing cipher from folders")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_folder(folder_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_folder(folder_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid)))
|
||||||
.map_res("Error removing ciphers from folder")
|
.execute(conn)
|
||||||
|
.map_res("Error removing ciphers from folder")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_folder_and_cipher(folder_uuid: &str, cipher_uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_folder_and_cipher(folder_uuid: &str, cipher_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
folders_ciphers::table
|
db_run! { conn: {
|
||||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
folders_ciphers::table
|
||||||
.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid))
|
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||||
.first::<Self>(&**conn)
|
.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid))
|
||||||
.ok()
|
.first::<FolderCipherDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
folders_ciphers::table
|
db_run! { conn: {
|
||||||
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
folders_ciphers::table
|
||||||
.load::<Self>(&**conn)
|
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
|
||||||
.expect("Error loading folders")
|
.load::<FolderCipherDb>(conn)
|
||||||
|
.expect("Error loading folders")
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,21 @@
|
|||||||
mod attachment;
|
mod attachment;
|
||||||
mod cipher;
|
mod cipher;
|
||||||
mod device;
|
|
||||||
mod folder;
|
|
||||||
mod user;
|
|
||||||
|
|
||||||
mod collection;
|
mod collection;
|
||||||
|
mod device;
|
||||||
|
mod favorite;
|
||||||
|
mod folder;
|
||||||
|
mod org_policy;
|
||||||
mod organization;
|
mod organization;
|
||||||
mod two_factor;
|
mod two_factor;
|
||||||
mod org_policy;
|
mod user;
|
||||||
|
|
||||||
pub use self::attachment::Attachment;
|
pub use self::attachment::Attachment;
|
||||||
pub use self::cipher::Cipher;
|
pub use self::cipher::Cipher;
|
||||||
pub use self::collection::{Collection, CollectionCipher, CollectionUser};
|
pub use self::collection::{Collection, CollectionCipher, CollectionUser};
|
||||||
pub use self::device::Device;
|
pub use self::device::Device;
|
||||||
|
pub use self::favorite::Favorite;
|
||||||
pub use self::folder::{Folder, FolderCipher};
|
pub use self::folder::{Folder, FolderCipher};
|
||||||
pub use self::organization::Organization;
|
pub use self::org_policy::{OrgPolicy, OrgPolicyType};
|
||||||
pub use self::organization::{UserOrgStatus, UserOrgType, UserOrganization};
|
pub use self::organization::{Organization, UserOrgStatus, UserOrgType, UserOrganization};
|
||||||
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
pub use self::two_factor::{TwoFactor, TwoFactorType};
|
||||||
pub use self::user::{Invitation, User};
|
pub use self::user::{Invitation, User, UserStampException};
|
||||||
pub use self::org_policy::{OrgPolicy, OrgPolicyType};
|
|
||||||
|
|||||||
@@ -1,23 +1,23 @@
|
|||||||
use diesel::prelude::*;
|
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::db::schema::org_policies;
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
|
|
||||||
use super::Organization;
|
use super::{Organization, UserOrgStatus};
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
db_object! {
|
||||||
#[table_name = "org_policies"]
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||||
#[belongs_to(Organization, foreign_key = "org_uuid")]
|
#[table_name = "org_policies"]
|
||||||
#[primary_key(uuid)]
|
#[belongs_to(Organization, foreign_key = "org_uuid")]
|
||||||
pub struct OrgPolicy {
|
#[primary_key(uuid)]
|
||||||
pub uuid: String,
|
pub struct OrgPolicy {
|
||||||
pub org_uuid: String,
|
pub uuid: String,
|
||||||
pub atype: i32,
|
pub org_uuid: String,
|
||||||
pub enabled: bool,
|
pub atype: i32,
|
||||||
pub data: String,
|
pub enabled: bool,
|
||||||
|
pub data: String,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
@@ -26,6 +26,9 @@ pub enum OrgPolicyType {
|
|||||||
TwoFactorAuthentication = 0,
|
TwoFactorAuthentication = 0,
|
||||||
MasterPassword = 1,
|
MasterPassword = 1,
|
||||||
PasswordGenerator = 2,
|
PasswordGenerator = 2,
|
||||||
|
// SingleOrg = 3, // Not currently supported.
|
||||||
|
// RequireSso = 4, // Not currently supported.
|
||||||
|
PersonalOwnership = 5,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
@@ -40,6 +43,10 @@ impl OrgPolicy {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn has_type(&self, policy_type: OrgPolicyType) -> bool {
|
||||||
|
self.atype == policy_type as i32
|
||||||
|
}
|
||||||
|
|
||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
|
||||||
json!({
|
json!({
|
||||||
@@ -55,87 +62,119 @@ impl OrgPolicy {
|
|||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl OrgPolicy {
|
impl OrgPolicy {
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||||
// We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
|
db_run! { conn:
|
||||||
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
sqlite, mysql {
|
||||||
// not support multiple constraints on ON CONFLICT clauses.
|
match diesel::replace_into(org_policies::table)
|
||||||
diesel::delete(
|
.values(OrgPolicyDb::to_db(self))
|
||||||
org_policies::table
|
.execute(conn)
|
||||||
.filter(org_policies::org_uuid.eq(&self.org_uuid))
|
{
|
||||||
.filter(org_policies::atype.eq(&self.atype)),
|
Ok(_) => Ok(()),
|
||||||
)
|
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||||
.execute(&**conn)
|
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||||
.map_res("Error deleting org_policy for insert")?;
|
diesel::update(org_policies::table)
|
||||||
|
.filter(org_policies::uuid.eq(&self.uuid))
|
||||||
|
.set(OrgPolicyDb::to_db(self))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving org_policy")
|
||||||
|
}
|
||||||
|
Err(e) => Err(e.into()),
|
||||||
|
}.map_res("Error saving org_policy")
|
||||||
|
}
|
||||||
|
postgresql {
|
||||||
|
let value = OrgPolicyDb::to_db(self);
|
||||||
|
// We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
|
||||||
|
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
||||||
|
// not support multiple constraints on ON CONFLICT clauses.
|
||||||
|
diesel::delete(
|
||||||
|
org_policies::table
|
||||||
|
.filter(org_policies::org_uuid.eq(&self.org_uuid))
|
||||||
|
.filter(org_policies::atype.eq(&self.atype)),
|
||||||
|
)
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error deleting org_policy for insert")?;
|
||||||
|
|
||||||
diesel::insert_into(org_policies::table)
|
diesel::insert_into(org_policies::table)
|
||||||
.values(self)
|
.values(&value)
|
||||||
.on_conflict(org_policies::uuid)
|
.on_conflict(org_policies::uuid)
|
||||||
.do_update()
|
.do_update()
|
||||||
.set(self)
|
.set(&value)
|
||||||
.execute(&**conn)
|
.execute(conn)
|
||||||
.map_res("Error saving org_policy")
|
.map_res("Error saving org_policy")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
#[cfg(not(feature = "postgresql"))]
|
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
|
||||||
diesel::replace_into(org_policies::table)
|
|
||||||
.values(&*self)
|
|
||||||
.execute(&**conn)
|
|
||||||
.map_res("Error saving org_policy")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(org_policies::table.filter(org_policies::uuid.eq(self.uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(org_policies::table.filter(org_policies::uuid.eq(self.uuid)))
|
||||||
.map_res("Error deleting org_policy")
|
.execute(conn)
|
||||||
|
.map_res("Error deleting org_policy")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
org_policies::table
|
db_run! { conn: {
|
||||||
.filter(org_policies::uuid.eq(uuid))
|
org_policies::table
|
||||||
.first::<Self>(&**conn)
|
.filter(org_policies::uuid.eq(uuid))
|
||||||
.ok()
|
.first::<OrgPolicyDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
org_policies::table
|
db_run! { conn: {
|
||||||
.filter(org_policies::org_uuid.eq(org_uuid))
|
org_policies::table
|
||||||
.load::<Self>(&**conn)
|
.filter(org_policies::org_uuid.eq(org_uuid))
|
||||||
.expect("Error loading org_policy")
|
.load::<OrgPolicyDb>(conn)
|
||||||
|
.expect("Error loading org_policy")
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
use crate::db::schema::users_organizations;
|
db_run! { conn: {
|
||||||
|
org_policies::table
|
||||||
org_policies::table
|
.inner_join(
|
||||||
.left_join(
|
users_organizations::table.on(
|
||||||
users_organizations::table.on(
|
users_organizations::org_uuid.eq(org_policies::org_uuid)
|
||||||
users_organizations::org_uuid.eq(org_policies::org_uuid)
|
.and(users_organizations::user_uuid.eq(user_uuid)))
|
||||||
.and(users_organizations::user_uuid.eq(user_uuid)))
|
)
|
||||||
)
|
.filter(
|
||||||
.select(org_policies::all_columns)
|
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
|
||||||
.load::<Self>(&**conn)
|
)
|
||||||
.expect("Error loading org_policy")
|
.select(org_policies::all_columns)
|
||||||
|
.load::<OrgPolicyDb>(conn)
|
||||||
|
.expect("Error loading org_policy")
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
|
||||||
org_policies::table
|
db_run! { conn: {
|
||||||
.filter(org_policies::org_uuid.eq(org_uuid))
|
org_policies::table
|
||||||
.filter(org_policies::atype.eq(atype))
|
.filter(org_policies::org_uuid.eq(org_uuid))
|
||||||
.first::<Self>(&**conn)
|
.filter(org_policies::atype.eq(atype))
|
||||||
.ok()
|
.first::<OrgPolicyDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid)))
|
||||||
.map_res("Error deleting org_policy")
|
.execute(conn)
|
||||||
|
.map_res("Error deleting org_policy")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
/*pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
||||||
.map_res("Error deleting twofactors")
|
.execute(conn)
|
||||||
|
.map_res("Error deleting twofactors")
|
||||||
|
}}
|
||||||
}*/
|
}*/
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,27 +4,29 @@ use num_traits::FromPrimitive;
|
|||||||
|
|
||||||
use super::{CollectionUser, User, OrgPolicy};
|
use super::{CollectionUser, User, OrgPolicy};
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
db_object! {
|
||||||
#[table_name = "organizations"]
|
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
#[primary_key(uuid)]
|
#[table_name = "organizations"]
|
||||||
pub struct Organization {
|
#[primary_key(uuid)]
|
||||||
pub uuid: String,
|
pub struct Organization {
|
||||||
pub name: String,
|
pub uuid: String,
|
||||||
pub billing_email: String,
|
pub name: String,
|
||||||
}
|
pub billing_email: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
#[table_name = "users_organizations"]
|
#[table_name = "users_organizations"]
|
||||||
#[primary_key(uuid)]
|
#[primary_key(uuid)]
|
||||||
pub struct UserOrganization {
|
pub struct UserOrganization {
|
||||||
pub uuid: String,
|
pub uuid: String,
|
||||||
pub user_uuid: String,
|
pub user_uuid: String,
|
||||||
pub org_uuid: String,
|
pub org_uuid: String,
|
||||||
|
|
||||||
pub access_all: bool,
|
pub access_all: bool,
|
||||||
pub akey: String,
|
pub akey: String,
|
||||||
pub status: i32,
|
pub status: i32,
|
||||||
pub atype: i32,
|
pub atype: i32,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum UserOrgStatus {
|
pub enum UserOrgStatus {
|
||||||
@@ -42,24 +44,28 @@ pub enum UserOrgType {
|
|||||||
Manager = 3,
|
Manager = 3,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl UserOrgType {
|
||||||
|
pub fn from_str(s: &str) -> Option<Self> {
|
||||||
|
match s {
|
||||||
|
"0" | "Owner" => Some(UserOrgType::Owner),
|
||||||
|
"1" | "Admin" => Some(UserOrgType::Admin),
|
||||||
|
"2" | "User" => Some(UserOrgType::User),
|
||||||
|
"3" | "Manager" => Some(UserOrgType::Manager),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Ord for UserOrgType {
|
impl Ord for UserOrgType {
|
||||||
fn cmp(&self, other: &UserOrgType) -> Ordering {
|
fn cmp(&self, other: &UserOrgType) -> Ordering {
|
||||||
if self == other {
|
// For easy comparison, map each variant to an access level (where 0 is lowest).
|
||||||
Ordering::Equal
|
static ACCESS_LEVEL: [i32; 4] = [
|
||||||
} else {
|
3, // Owner
|
||||||
match self {
|
2, // Admin
|
||||||
UserOrgType::Owner => Ordering::Greater,
|
0, // User
|
||||||
UserOrgType::Admin => match other {
|
1, // Manager
|
||||||
UserOrgType::Owner => Ordering::Less,
|
];
|
||||||
_ => Ordering::Greater,
|
ACCESS_LEVEL[*self as usize].cmp(&ACCESS_LEVEL[*other as usize])
|
||||||
},
|
|
||||||
UserOrgType::Manager => match other {
|
|
||||||
UserOrgType::Owner | UserOrgType::Admin => Ordering::Less,
|
|
||||||
_ => Ordering::Greater,
|
|
||||||
},
|
|
||||||
UserOrgType::User => Ordering::Less,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,18 +133,6 @@ impl PartialOrd<UserOrgType> for i32 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserOrgType {
|
|
||||||
pub fn from_str(s: &str) -> Option<Self> {
|
|
||||||
match s {
|
|
||||||
"0" | "Owner" => Some(UserOrgType::Owner),
|
|
||||||
"1" | "Admin" => Some(UserOrgType::Admin),
|
|
||||||
"2" | "User" => Some(UserOrgType::User),
|
|
||||||
"3" | "Manager" => Some(UserOrgType::Manager),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
impl Organization {
|
impl Organization {
|
||||||
pub fn new(name: String, billing_email: String) -> Self {
|
pub fn new(name: String, billing_email: String) -> Self {
|
||||||
@@ -153,9 +147,10 @@ impl Organization {
|
|||||||
pub fn to_json(&self) -> Value {
|
pub fn to_json(&self) -> Value {
|
||||||
json!({
|
json!({
|
||||||
"Id": self.uuid,
|
"Id": self.uuid,
|
||||||
|
"Identifier": null, // not supported by us
|
||||||
"Name": self.name,
|
"Name": self.name,
|
||||||
"Seats": 10,
|
"Seats": 10, // The value doesn't matter, we don't check server-side
|
||||||
"MaxCollections": 10,
|
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
|
||||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||||
"Use2fa": true,
|
"Use2fa": true,
|
||||||
"UseDirectory": false,
|
"UseDirectory": false,
|
||||||
@@ -163,6 +158,9 @@ impl Organization {
|
|||||||
"UseGroups": false,
|
"UseGroups": false,
|
||||||
"UseTotp": true,
|
"UseTotp": true,
|
||||||
"UsePolicies": true,
|
"UsePolicies": true,
|
||||||
|
"UseSso": false, // We do not support SSO
|
||||||
|
"SelfHost": true,
|
||||||
|
"UseApi": false, // not supported by us
|
||||||
|
|
||||||
"BusinessName": null,
|
"BusinessName": null,
|
||||||
"BusinessAddress1": null,
|
"BusinessAddress1": null,
|
||||||
@@ -196,16 +194,13 @@ impl UserOrganization {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::schema::{ciphers_collections, organizations, users_collections, users_organizations};
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
use diesel::prelude::*;
|
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl Organization {
|
impl Organization {
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||||
UserOrganization::find_by_org(&self.uuid, conn)
|
UserOrganization::find_by_org(&self.uuid, conn)
|
||||||
.iter()
|
.iter()
|
||||||
@@ -213,27 +208,36 @@ impl Organization {
|
|||||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
User::update_uuid_revision(&user_org.user_uuid, conn);
|
||||||
});
|
});
|
||||||
|
|
||||||
diesel::insert_into(organizations::table)
|
db_run! { conn:
|
||||||
.values(self)
|
sqlite, mysql {
|
||||||
.on_conflict(organizations::uuid)
|
match diesel::replace_into(organizations::table)
|
||||||
.do_update()
|
.values(OrganizationDb::to_db(self))
|
||||||
.set(self)
|
.execute(conn)
|
||||||
.execute(&**conn)
|
{
|
||||||
.map_res("Error saving organization")
|
Ok(_) => Ok(()),
|
||||||
}
|
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||||
|
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||||
|
diesel::update(organizations::table)
|
||||||
|
.filter(organizations::uuid.eq(&self.uuid))
|
||||||
|
.set(OrganizationDb::to_db(self))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving organization")
|
||||||
|
}
|
||||||
|
Err(e) => Err(e.into()),
|
||||||
|
}.map_res("Error saving organization")
|
||||||
|
|
||||||
#[cfg(not(feature = "postgresql"))]
|
}
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
postgresql {
|
||||||
UserOrganization::find_by_org(&self.uuid, conn)
|
let value = OrganizationDb::to_db(self);
|
||||||
.iter()
|
diesel::insert_into(organizations::table)
|
||||||
.for_each(|user_org| {
|
.values(&value)
|
||||||
User::update_uuid_revision(&user_org.user_uuid, conn);
|
.on_conflict(organizations::uuid)
|
||||||
});
|
.do_update()
|
||||||
|
.set(&value)
|
||||||
diesel::replace_into(organizations::table)
|
.execute(conn)
|
||||||
.values(self)
|
.map_res("Error saving organization")
|
||||||
.execute(&**conn)
|
}
|
||||||
.map_res("Error saving organization")
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
@@ -244,32 +248,40 @@ impl Organization {
|
|||||||
UserOrganization::delete_all_by_organization(&self.uuid, &conn)?;
|
UserOrganization::delete_all_by_organization(&self.uuid, &conn)?;
|
||||||
OrgPolicy::delete_all_by_organization(&self.uuid, &conn)?;
|
OrgPolicy::delete_all_by_organization(&self.uuid, &conn)?;
|
||||||
|
|
||||||
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
|
|
||||||
.execute(&**conn)
|
db_run! { conn: {
|
||||||
.map_res("Error saving organization")
|
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving organization")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
organizations::table
|
db_run! { conn: {
|
||||||
.filter(organizations::uuid.eq(uuid))
|
organizations::table
|
||||||
.first::<Self>(&**conn)
|
.filter(organizations::uuid.eq(uuid))
|
||||||
.ok()
|
.first::<OrganizationDb>(conn)
|
||||||
|
.ok().from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_all(conn: &DbConn) -> Vec<Self> {
|
pub fn get_all(conn: &DbConn) -> Vec<Self> {
|
||||||
organizations::table.load::<Self>(&**conn).expect("Error loading organizations")
|
db_run! { conn: {
|
||||||
|
organizations::table.load::<OrganizationDb>(conn).expect("Error loading organizations").from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UserOrganization {
|
impl UserOrganization {
|
||||||
pub fn to_json(&self, conn: &DbConn) -> Value {
|
pub fn to_json(&self, conn: &DbConn) -> Value {
|
||||||
let org = Organization::find_by_uuid(&self.org_uuid, conn).unwrap();
|
let org = Organization::find_by_uuid(&self.org_uuid, conn).unwrap();
|
||||||
|
|
||||||
json!({
|
json!({
|
||||||
"Id": self.org_uuid,
|
"Id": self.org_uuid,
|
||||||
|
"Identifier": null, // not supported by us
|
||||||
"Name": org.name,
|
"Name": org.name,
|
||||||
"Seats": 10,
|
"Seats": 10, // The value doesn't matter, we don't check server-side
|
||||||
"MaxCollections": 10,
|
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
|
||||||
"UsersGetPremium": true,
|
"UsersGetPremium": true,
|
||||||
|
|
||||||
"Use2fa": true,
|
"Use2fa": true,
|
||||||
@@ -278,8 +290,30 @@ impl UserOrganization {
|
|||||||
"UseGroups": false,
|
"UseGroups": false,
|
||||||
"UseTotp": true,
|
"UseTotp": true,
|
||||||
"UsePolicies": true,
|
"UsePolicies": true,
|
||||||
"UseApi": false,
|
"UseApi": false, // not supported by us
|
||||||
"SelfHost": true,
|
"SelfHost": true,
|
||||||
|
"SsoBound": false, // We do not support SSO
|
||||||
|
"UseSso": false, // We do not support SSO
|
||||||
|
// TODO: Add support for Business Portal
|
||||||
|
// Upstream is moving Policies and SSO management outside of the web-vault to /portal
|
||||||
|
// For now they still have that code also in the web-vault, but they will remove it at some point.
|
||||||
|
// https://github.com/bitwarden/server/tree/master/bitwarden_license/src/
|
||||||
|
"UseBusinessPortal": false, // Disable BusinessPortal Button
|
||||||
|
|
||||||
|
// TODO: Add support for Custom User Roles
|
||||||
|
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
|
||||||
|
// "Permissions": {
|
||||||
|
// "AccessBusinessPortal": false,
|
||||||
|
// "AccessEventLogs": false,
|
||||||
|
// "AccessImportExport": false,
|
||||||
|
// "AccessReports": false,
|
||||||
|
// "ManageAllCollections": false,
|
||||||
|
// "ManageAssignedCollections": false,
|
||||||
|
// "ManageGroups": false,
|
||||||
|
// "ManagePolicies": false,
|
||||||
|
// "ManageSso": false,
|
||||||
|
// "ManageUsers": false
|
||||||
|
// },
|
||||||
|
|
||||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||||
|
|
||||||
@@ -345,38 +379,50 @@ impl UserOrganization {
|
|||||||
"Object": "organizationUserDetails",
|
"Object": "organizationUserDetails",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(&self.user_uuid, conn);
|
User::update_uuid_revision(&self.user_uuid, conn);
|
||||||
|
|
||||||
diesel::insert_into(users_organizations::table)
|
db_run! { conn:
|
||||||
.values(self)
|
sqlite, mysql {
|
||||||
.on_conflict(users_organizations::uuid)
|
match diesel::replace_into(users_organizations::table)
|
||||||
.do_update()
|
.values(UserOrganizationDb::to_db(self))
|
||||||
.set(self)
|
.execute(conn)
|
||||||
.execute(&**conn)
|
{
|
||||||
.map_res("Error adding user to organization")
|
Ok(_) => Ok(()),
|
||||||
}
|
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||||
|
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||||
#[cfg(not(feature = "postgresql"))]
|
diesel::update(users_organizations::table)
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
.filter(users_organizations::uuid.eq(&self.uuid))
|
||||||
User::update_uuid_revision(&self.user_uuid, conn);
|
.set(UserOrganizationDb::to_db(self))
|
||||||
|
.execute(conn)
|
||||||
diesel::replace_into(users_organizations::table)
|
.map_res("Error adding user to organization")
|
||||||
.values(self)
|
}
|
||||||
.execute(&**conn)
|
Err(e) => Err(e.into()),
|
||||||
.map_res("Error adding user to organization")
|
}.map_res("Error adding user to organization")
|
||||||
|
}
|
||||||
|
postgresql {
|
||||||
|
let value = UserOrganizationDb::to_db(self);
|
||||||
|
diesel::insert_into(users_organizations::table)
|
||||||
|
.values(&value)
|
||||||
|
.on_conflict(users_organizations::uuid)
|
||||||
|
.do_update()
|
||||||
|
.set(&value)
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error adding user to organization")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(&self.user_uuid, conn);
|
User::update_uuid_revision(&self.user_uuid, conn);
|
||||||
|
|
||||||
CollectionUser::delete_all_by_user(&self.user_uuid, &conn)?;
|
CollectionUser::delete_all_by_user_and_org(&self.user_uuid, &self.org_uuid, &conn)?;
|
||||||
|
|
||||||
diesel::delete(users_organizations::table.filter(users_organizations::uuid.eq(self.uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(users_organizations::table.filter(users_organizations::uuid.eq(self.uuid)))
|
||||||
.map_res("Error removing user from organization")
|
.execute(conn)
|
||||||
|
.map_res("Error removing user from organization")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
@@ -393,117 +439,166 @@ impl UserOrganization {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn has_status(self, status: UserOrgStatus) -> bool {
|
pub fn find_by_email_and_org(email: &str, org_id: &str, conn: &DbConn) -> Option<UserOrganization> {
|
||||||
|
if let Some(user) = super::User::find_by_mail(email, conn) {
|
||||||
|
if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, &conn) {
|
||||||
|
return Some(user_org);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn has_status(&self, status: UserOrgStatus) -> bool {
|
||||||
self.status == status as i32
|
self.status == status as i32
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn has_full_access(self) -> bool {
|
pub fn has_type(&self, user_type: UserOrgType) -> bool {
|
||||||
|
self.atype == user_type as i32
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn has_full_access(&self) -> bool {
|
||||||
(self.access_all || self.atype >= UserOrgType::Admin) &&
|
(self.access_all || self.atype >= UserOrgType::Admin) &&
|
||||||
self.has_status(UserOrgStatus::Confirmed)
|
self.has_status(UserOrgStatus::Confirmed)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
users_organizations::table
|
db_run! { conn: {
|
||||||
.filter(users_organizations::uuid.eq(uuid))
|
users_organizations::table
|
||||||
.first::<Self>(&**conn)
|
.filter(users_organizations::uuid.eq(uuid))
|
||||||
.ok()
|
.first::<UserOrganizationDb>(conn)
|
||||||
|
.ok().from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
users_organizations::table
|
db_run! { conn: {
|
||||||
.filter(users_organizations::uuid.eq(uuid))
|
users_organizations::table
|
||||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
.filter(users_organizations::uuid.eq(uuid))
|
||||||
.first::<Self>(&**conn)
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
.ok()
|
.first::<UserOrganizationDb>(conn)
|
||||||
|
.ok().from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
users_organizations::table
|
db_run! { conn: {
|
||||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
users_organizations::table
|
||||||
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||||
.load::<Self>(&**conn)
|
.filter(users_organizations::status.eq(UserOrgStatus::Confirmed as i32))
|
||||||
.unwrap_or_default()
|
.load::<UserOrganizationDb>(conn)
|
||||||
|
.unwrap_or_default().from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_invited_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_invited_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
users_organizations::table
|
db_run! { conn: {
|
||||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
users_organizations::table
|
||||||
.filter(users_organizations::status.eq(UserOrgStatus::Invited as i32))
|
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||||
.load::<Self>(&**conn)
|
.filter(users_organizations::status.eq(UserOrgStatus::Invited as i32))
|
||||||
.unwrap_or_default()
|
.load::<UserOrganizationDb>(conn)
|
||||||
|
.unwrap_or_default().from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_any_state_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_any_state_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
users_organizations::table
|
db_run! { conn: {
|
||||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
users_organizations::table
|
||||||
.load::<Self>(&**conn)
|
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||||
.unwrap_or_default()
|
.load::<UserOrganizationDb>(conn)
|
||||||
|
.unwrap_or_default().from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
users_organizations::table
|
db_run! { conn: {
|
||||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
users_organizations::table
|
||||||
.load::<Self>(&**conn)
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
.expect("Error loading user organizations")
|
.load::<UserOrganizationDb>(conn)
|
||||||
|
.expect("Error loading user organizations").from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
pub fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
|
||||||
users_organizations::table
|
db_run! { conn: {
|
||||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
users_organizations::table
|
||||||
.count()
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
.first::<i64>(&**conn)
|
.count()
|
||||||
.ok()
|
.first::<i64>(conn)
|
||||||
.unwrap_or(0)
|
.ok()
|
||||||
|
.unwrap_or(0)
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_org_and_type(org_uuid: &str, atype: i32, conn: &DbConn) -> Vec<Self> {
|
||||||
users_organizations::table
|
db_run! { conn: {
|
||||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
users_organizations::table
|
||||||
.filter(users_organizations::atype.eq(atype))
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
.load::<Self>(&**conn)
|
.filter(users_organizations::atype.eq(atype))
|
||||||
.expect("Error loading user organizations")
|
.load::<UserOrganizationDb>(conn)
|
||||||
|
.expect("Error loading user organizations").from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
users_organizations::table
|
db_run! { conn: {
|
||||||
.filter(users_organizations::user_uuid.eq(user_uuid))
|
users_organizations::table
|
||||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
.filter(users_organizations::user_uuid.eq(user_uuid))
|
||||||
.first::<Self>(&**conn)
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
.ok()
|
.first::<UserOrganizationDb>(conn)
|
||||||
|
.ok().from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_cipher_and_org(cipher_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_cipher_and_org(cipher_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
users_organizations::table
|
db_run! { conn: {
|
||||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
users_organizations::table
|
||||||
.left_join(users_collections::table.on(
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
users_collections::user_uuid.eq(users_organizations::user_uuid)
|
.left_join(users_collections::table.on(
|
||||||
))
|
users_collections::user_uuid.eq(users_organizations::user_uuid)
|
||||||
.left_join(ciphers_collections::table.on(
|
))
|
||||||
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid).and(
|
.left_join(ciphers_collections::table.on(
|
||||||
ciphers_collections::cipher_uuid.eq(&cipher_uuid)
|
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid).and(
|
||||||
|
ciphers_collections::cipher_uuid.eq(&cipher_uuid)
|
||||||
|
)
|
||||||
|
))
|
||||||
|
.filter(
|
||||||
|
users_organizations::access_all.eq(true).or( // AccessAll..
|
||||||
|
ciphers_collections::cipher_uuid.eq(&cipher_uuid) // ..or access to collection with cipher
|
||||||
|
)
|
||||||
)
|
)
|
||||||
))
|
.select(users_organizations::all_columns)
|
||||||
.filter(
|
.load::<UserOrganizationDb>(conn).expect("Error loading user organizations").from_db()
|
||||||
users_organizations::access_all.eq(true).or( // AccessAll..
|
}}
|
||||||
ciphers_collections::cipher_uuid.eq(&cipher_uuid) // ..or access to collection with cipher
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.select(users_organizations::all_columns)
|
|
||||||
.load::<Self>(&**conn).expect("Error loading user organizations")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
users_organizations::table
|
db_run! { conn: {
|
||||||
.filter(users_organizations::org_uuid.eq(org_uuid))
|
users_organizations::table
|
||||||
.left_join(users_collections::table.on(
|
.filter(users_organizations::org_uuid.eq(org_uuid))
|
||||||
users_collections::user_uuid.eq(users_organizations::user_uuid)
|
.left_join(users_collections::table.on(
|
||||||
))
|
users_collections::user_uuid.eq(users_organizations::user_uuid)
|
||||||
.filter(
|
))
|
||||||
users_organizations::access_all.eq(true).or( // AccessAll..
|
.filter(
|
||||||
users_collections::collection_uuid.eq(&collection_uuid) // ..or access to collection with cipher
|
users_organizations::access_all.eq(true).or( // AccessAll..
|
||||||
|
users_collections::collection_uuid.eq(&collection_uuid) // ..or access to collection with cipher
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
.select(users_organizations::all_columns)
|
||||||
.select(users_organizations::all_columns)
|
.load::<UserOrganizationDb>(conn).expect("Error loading user organizations").from_db()
|
||||||
.load::<Self>(&**conn).expect("Error loading user organizations")
|
}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
fn partial_cmp_UserOrgType() {
|
||||||
|
assert!(UserOrgType::Owner > UserOrgType::Admin);
|
||||||
|
assert!(UserOrgType::Admin > UserOrgType::Manager);
|
||||||
|
assert!(UserOrgType::Manager > UserOrgType::User);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,24 +1,24 @@
|
|||||||
use diesel::prelude::*;
|
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::db::schema::twofactor;
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
|
|
||||||
use super::User;
|
use super::User;
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
db_object! {
|
||||||
#[table_name = "twofactor"]
|
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
|
||||||
#[belongs_to(User, foreign_key = "user_uuid")]
|
#[table_name = "twofactor"]
|
||||||
#[primary_key(uuid)]
|
#[belongs_to(User, foreign_key = "user_uuid")]
|
||||||
pub struct TwoFactor {
|
#[primary_key(uuid)]
|
||||||
pub uuid: String,
|
pub struct TwoFactor {
|
||||||
pub user_uuid: String,
|
pub uuid: String,
|
||||||
pub atype: i32,
|
pub user_uuid: String,
|
||||||
pub enabled: bool,
|
pub atype: i32,
|
||||||
pub data: String,
|
pub enabled: bool,
|
||||||
pub last_used: i32,
|
pub data: String,
|
||||||
|
pub last_used: i32,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
@@ -70,57 +70,80 @@ impl TwoFactor {
|
|||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
impl TwoFactor {
|
impl TwoFactor {
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||||
// We need to make sure we're not going to violate the unique constraint on user_uuid and atype.
|
db_run! { conn:
|
||||||
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
sqlite, mysql {
|
||||||
// not support multiple constraints on ON CONFLICT clauses.
|
match diesel::replace_into(twofactor::table)
|
||||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
|
.values(TwoFactorDb::to_db(self))
|
||||||
.execute(&**conn)
|
.execute(conn)
|
||||||
.map_res("Error deleting twofactor for insert")?;
|
{
|
||||||
|
Ok(_) => Ok(()),
|
||||||
|
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||||
|
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||||
|
diesel::update(twofactor::table)
|
||||||
|
.filter(twofactor::uuid.eq(&self.uuid))
|
||||||
|
.set(TwoFactorDb::to_db(self))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving twofactor")
|
||||||
|
}
|
||||||
|
Err(e) => Err(e.into()),
|
||||||
|
}.map_res("Error saving twofactor")
|
||||||
|
}
|
||||||
|
postgresql {
|
||||||
|
let value = TwoFactorDb::to_db(self);
|
||||||
|
// We need to make sure we're not going to violate the unique constraint on user_uuid and atype.
|
||||||
|
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
|
||||||
|
// not support multiple constraints on ON CONFLICT clauses.
|
||||||
|
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error deleting twofactor for insert")?;
|
||||||
|
|
||||||
diesel::insert_into(twofactor::table)
|
diesel::insert_into(twofactor::table)
|
||||||
.values(self)
|
.values(&value)
|
||||||
.on_conflict(twofactor::uuid)
|
.on_conflict(twofactor::uuid)
|
||||||
.do_update()
|
.do_update()
|
||||||
.set(self)
|
.set(&value)
|
||||||
.execute(&**conn)
|
.execute(conn)
|
||||||
.map_res("Error saving twofactor")
|
.map_res("Error saving twofactor")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
#[cfg(not(feature = "postgresql"))]
|
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
|
||||||
diesel::replace_into(twofactor::table)
|
|
||||||
.values(self)
|
|
||||||
.execute(&**conn)
|
|
||||||
.map_res("Error saving twofactor")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(twofactor::table.filter(twofactor::uuid.eq(self.uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(twofactor::table.filter(twofactor::uuid.eq(self.uuid)))
|
||||||
.map_res("Error deleting twofactor")
|
.execute(conn)
|
||||||
|
.map_res("Error deleting twofactor")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
|
||||||
twofactor::table
|
db_run! { conn: {
|
||||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
twofactor::table
|
||||||
.filter(twofactor::atype.lt(1000)) // Filter implementation types
|
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||||
.load::<Self>(&**conn)
|
.filter(twofactor::atype.lt(1000)) // Filter implementation types
|
||||||
.expect("Error loading twofactor")
|
.load::<TwoFactorDb>(conn)
|
||||||
|
.expect("Error loading twofactor")
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
|
||||||
twofactor::table
|
db_run! { conn: {
|
||||||
.filter(twofactor::user_uuid.eq(user_uuid))
|
twofactor::table
|
||||||
.filter(twofactor::atype.eq(atype))
|
.filter(twofactor::user_uuid.eq(user_uuid))
|
||||||
.first::<Self>(&**conn)
|
.filter(twofactor::atype.eq(atype))
|
||||||
.ok()
|
.first::<TwoFactorDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
db_run! { conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
|
||||||
.map_res("Error deleting twofactors")
|
.execute(conn)
|
||||||
|
.map_res("Error deleting twofactors")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,43 +4,55 @@ use serde_json::Value;
|
|||||||
use crate::crypto;
|
use crate::crypto;
|
||||||
use crate::CONFIG;
|
use crate::CONFIG;
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
db_object! {
|
||||||
#[table_name = "users"]
|
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset)]
|
||||||
#[changeset_options(treat_none_as_null="true")]
|
#[table_name = "users"]
|
||||||
#[primary_key(uuid)]
|
#[changeset_options(treat_none_as_null="true")]
|
||||||
pub struct User {
|
#[primary_key(uuid)]
|
||||||
pub uuid: String,
|
pub struct User {
|
||||||
pub created_at: NaiveDateTime,
|
pub uuid: String,
|
||||||
pub updated_at: NaiveDateTime,
|
pub enabled: bool,
|
||||||
pub verified_at: Option<NaiveDateTime>,
|
pub created_at: NaiveDateTime,
|
||||||
pub last_verifying_at: Option<NaiveDateTime>,
|
pub updated_at: NaiveDateTime,
|
||||||
pub login_verify_count: i32,
|
pub verified_at: Option<NaiveDateTime>,
|
||||||
|
pub last_verifying_at: Option<NaiveDateTime>,
|
||||||
|
pub login_verify_count: i32,
|
||||||
|
|
||||||
pub email: String,
|
pub email: String,
|
||||||
pub email_new: Option<String>,
|
pub email_new: Option<String>,
|
||||||
pub email_new_token: Option<String>,
|
pub email_new_token: Option<String>,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
|
||||||
pub password_hash: Vec<u8>,
|
pub password_hash: Vec<u8>,
|
||||||
pub salt: Vec<u8>,
|
pub salt: Vec<u8>,
|
||||||
pub password_iterations: i32,
|
pub password_iterations: i32,
|
||||||
pub password_hint: Option<String>,
|
pub password_hint: Option<String>,
|
||||||
|
|
||||||
pub akey: String,
|
pub akey: String,
|
||||||
pub private_key: Option<String>,
|
pub private_key: Option<String>,
|
||||||
pub public_key: Option<String>,
|
pub public_key: Option<String>,
|
||||||
|
|
||||||
#[column_name = "totp_secret"]
|
#[column_name = "totp_secret"] // Note, this is only added to the UserDb structs, not to User
|
||||||
_totp_secret: Option<String>,
|
_totp_secret: Option<String>,
|
||||||
pub totp_recover: Option<String>,
|
pub totp_recover: Option<String>,
|
||||||
|
|
||||||
pub security_stamp: String,
|
pub security_stamp: String,
|
||||||
|
pub stamp_exception: Option<String>,
|
||||||
|
|
||||||
pub equivalent_domains: String,
|
pub equivalent_domains: String,
|
||||||
pub excluded_globals: String,
|
pub excluded_globals: String,
|
||||||
|
|
||||||
pub client_kdf_type: i32,
|
pub client_kdf_type: i32,
|
||||||
pub client_kdf_iter: i32,
|
pub client_kdf_iter: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[derive(Debug, Identifiable, Queryable, Insertable)]
|
||||||
|
#[table_name = "invitations"]
|
||||||
|
#[primary_key(email)]
|
||||||
|
pub struct Invitation {
|
||||||
|
pub email: String,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum UserStatus {
|
enum UserStatus {
|
||||||
@@ -49,6 +61,12 @@ enum UserStatus {
|
|||||||
_Disabled = 2,
|
_Disabled = 2,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct UserStampException {
|
||||||
|
pub route: String,
|
||||||
|
pub security_stamp: String
|
||||||
|
}
|
||||||
|
|
||||||
/// Local methods
|
/// Local methods
|
||||||
impl User {
|
impl User {
|
||||||
pub const CLIENT_KDF_TYPE_DEFAULT: i32 = 0; // PBKDF2: 0
|
pub const CLIENT_KDF_TYPE_DEFAULT: i32 = 0; // PBKDF2: 0
|
||||||
@@ -60,6 +78,7 @@ impl User {
|
|||||||
|
|
||||||
Self {
|
Self {
|
||||||
uuid: crate::util::get_uuid(),
|
uuid: crate::util::get_uuid(),
|
||||||
|
enabled: true,
|
||||||
created_at: now,
|
created_at: now,
|
||||||
updated_at: now,
|
updated_at: now,
|
||||||
verified_at: None,
|
verified_at: None,
|
||||||
@@ -76,6 +95,7 @@ impl User {
|
|||||||
password_iterations: CONFIG.password_iterations(),
|
password_iterations: CONFIG.password_iterations(),
|
||||||
|
|
||||||
security_stamp: crate::util::get_uuid(),
|
security_stamp: crate::util::get_uuid(),
|
||||||
|
stamp_exception: None,
|
||||||
|
|
||||||
password_hint: None,
|
password_hint: None,
|
||||||
private_key: None,
|
private_key: None,
|
||||||
@@ -109,19 +129,56 @@ impl User {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_password(&mut self, password: &str) {
|
/// Set the password hash generated
|
||||||
|
/// And resets the security_stamp. Based upon the allow_next_route the security_stamp will be different.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `password` - A str which contains a hashed version of the users master password.
|
||||||
|
/// * `allow_next_route` - A Option<&str> with the function name of the next allowed (rocket) route.
|
||||||
|
///
|
||||||
|
pub fn set_password(&mut self, password: &str, allow_next_route: Option<&str>) {
|
||||||
self.password_hash = crypto::hash_password(password.as_bytes(), &self.salt, self.password_iterations as u32);
|
self.password_hash = crypto::hash_password(password.as_bytes(), &self.salt, self.password_iterations as u32);
|
||||||
|
|
||||||
|
if let Some(route) = allow_next_route {
|
||||||
|
self.set_stamp_exception(route);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.reset_security_stamp()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset_security_stamp(&mut self) {
|
pub fn reset_security_stamp(&mut self) {
|
||||||
self.security_stamp = crate::util::get_uuid();
|
self.security_stamp = crate::util::get_uuid();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set the stamp_exception to only allow a subsequent request matching a specific route using the current security-stamp.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `route_exception` - A str with the function name of the next allowed (rocket) route.
|
||||||
|
///
|
||||||
|
/// ### Future
|
||||||
|
/// In the future it could be posible that we need more of these exception routes.
|
||||||
|
/// In that case we could use an Vec<UserStampException> and add multiple exceptions.
|
||||||
|
pub fn set_stamp_exception(&mut self, route_exception: &str) {
|
||||||
|
let stamp_exception = UserStampException {
|
||||||
|
route: route_exception.to_string(),
|
||||||
|
security_stamp: self.security_stamp.to_string()
|
||||||
|
};
|
||||||
|
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resets the stamp_exception to prevent re-use of the previous security-stamp
|
||||||
|
///
|
||||||
|
/// ### Future
|
||||||
|
/// In the future it could be posible that we need more of these exception routes.
|
||||||
|
/// In that case we could use an Vec<UserStampException> and add multiple exceptions.
|
||||||
|
pub fn reset_stamp_exception(&mut self) {
|
||||||
|
self.stamp_exception = None;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use super::{Cipher, Device, Folder, TwoFactor, UserOrgType, UserOrganization};
|
use super::{Cipher, Device, Favorite, Folder, TwoFactor, UserOrgType, UserOrganization};
|
||||||
use crate::db::schema::{invitations, users};
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
use diesel::prelude::*;
|
|
||||||
|
|
||||||
use crate::api::EmptyResult;
|
use crate::api::EmptyResult;
|
||||||
use crate::error::MapResult;
|
use crate::error::MapResult;
|
||||||
@@ -158,7 +215,6 @@ impl User {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
||||||
if self.email.trim().is_empty() {
|
if self.email.trim().is_empty() {
|
||||||
err!("User email can't be empty")
|
err!("User email can't be empty")
|
||||||
@@ -166,49 +222,60 @@ impl User {
|
|||||||
|
|
||||||
self.updated_at = Utc::now().naive_utc();
|
self.updated_at = Utc::now().naive_utc();
|
||||||
|
|
||||||
diesel::insert_into(users::table) // Insert or update
|
db_run! {conn:
|
||||||
.values(&*self)
|
sqlite, mysql {
|
||||||
.on_conflict(users::uuid)
|
match diesel::replace_into(users::table)
|
||||||
.do_update()
|
.values(UserDb::to_db(self))
|
||||||
.set(&*self)
|
.execute(conn)
|
||||||
.execute(&**conn)
|
{
|
||||||
.map_res("Error saving user")
|
Ok(_) => Ok(()),
|
||||||
}
|
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
||||||
|
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
|
||||||
#[cfg(not(feature = "postgresql"))]
|
diesel::update(users::table)
|
||||||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
|
.filter(users::uuid.eq(&self.uuid))
|
||||||
if self.email.trim().is_empty() {
|
.set(UserDb::to_db(self))
|
||||||
err!("User email can't be empty")
|
.execute(conn)
|
||||||
|
.map_res("Error saving user")
|
||||||
|
}
|
||||||
|
Err(e) => Err(e.into()),
|
||||||
|
}.map_res("Error saving user")
|
||||||
|
}
|
||||||
|
postgresql {
|
||||||
|
let value = UserDb::to_db(self);
|
||||||
|
diesel::insert_into(users::table) // Insert or update
|
||||||
|
.values(&value)
|
||||||
|
.on_conflict(users::uuid)
|
||||||
|
.do_update()
|
||||||
|
.set(&value)
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving user")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.updated_at = Utc::now().naive_utc();
|
|
||||||
|
|
||||||
diesel::replace_into(users::table) // Insert or update
|
|
||||||
.values(&*self)
|
|
||||||
.execute(&**conn)
|
|
||||||
.map_res("Error saving user")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
for user_org in UserOrganization::find_by_user(&self.uuid, &*conn) {
|
for user_org in UserOrganization::find_by_user(&self.uuid, conn) {
|
||||||
if user_org.atype == UserOrgType::Owner {
|
if user_org.atype == UserOrgType::Owner {
|
||||||
let owner_type = UserOrgType::Owner as i32;
|
let owner_type = UserOrgType::Owner as i32;
|
||||||
if UserOrganization::find_by_org_and_type(&user_org.org_uuid, owner_type, &conn).len() <= 1 {
|
if UserOrganization::find_by_org_and_type(&user_org.org_uuid, owner_type, conn).len() <= 1 {
|
||||||
err!("Can't delete last owner")
|
err!("Can't delete last owner")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
UserOrganization::delete_all_by_user(&self.uuid, &*conn)?;
|
UserOrganization::delete_all_by_user(&self.uuid, conn)?;
|
||||||
Cipher::delete_all_by_user(&self.uuid, &*conn)?;
|
Cipher::delete_all_by_user(&self.uuid, conn)?;
|
||||||
Folder::delete_all_by_user(&self.uuid, &*conn)?;
|
Favorite::delete_all_by_user(&self.uuid, conn)?;
|
||||||
Device::delete_all_by_user(&self.uuid, &*conn)?;
|
Folder::delete_all_by_user(&self.uuid, conn)?;
|
||||||
TwoFactor::delete_all_by_user(&self.uuid, &*conn)?;
|
Device::delete_all_by_user(&self.uuid, conn)?;
|
||||||
Invitation::take(&self.email, &*conn); // Delete invitation if any
|
TwoFactor::delete_all_by_user(&self.uuid, conn)?;
|
||||||
|
Invitation::take(&self.email, conn); // Delete invitation if any
|
||||||
|
|
||||||
diesel::delete(users::table.filter(users::uuid.eq(self.uuid)))
|
db_run! {conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(users::table.filter(users::uuid.eq(self.uuid)))
|
||||||
.map_res("Error deleting user")
|
.execute(conn)
|
||||||
|
.map_res("Error deleting user")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_uuid_revision(uuid: &str, conn: &DbConn) {
|
pub fn update_uuid_revision(uuid: &str, conn: &DbConn) {
|
||||||
@@ -220,15 +287,14 @@ impl User {
|
|||||||
pub fn update_all_revisions(conn: &DbConn) -> EmptyResult {
|
pub fn update_all_revisions(conn: &DbConn) -> EmptyResult {
|
||||||
let updated_at = Utc::now().naive_utc();
|
let updated_at = Utc::now().naive_utc();
|
||||||
|
|
||||||
crate::util::retry(
|
db_run! {conn: {
|
||||||
|| {
|
crate::util::retry(|| {
|
||||||
diesel::update(users::table)
|
diesel::update(users::table)
|
||||||
.set(users::updated_at.eq(updated_at))
|
.set(users::updated_at.eq(updated_at))
|
||||||
.execute(&**conn)
|
.execute(conn)
|
||||||
},
|
}, 10)
|
||||||
10,
|
.map_res("Error updating revision date for all users")
|
||||||
)
|
}}
|
||||||
.map_res("Error updating revision date for all users")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_revision(&mut self, conn: &DbConn) -> EmptyResult {
|
pub fn update_revision(&mut self, conn: &DbConn) -> EmptyResult {
|
||||||
@@ -238,39 +304,45 @@ impl User {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &DbConn) -> EmptyResult {
|
fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &DbConn) -> EmptyResult {
|
||||||
crate::util::retry(
|
db_run! {conn: {
|
||||||
|| {
|
crate::util::retry(|| {
|
||||||
diesel::update(users::table.filter(users::uuid.eq(uuid)))
|
diesel::update(users::table.filter(users::uuid.eq(uuid)))
|
||||||
.set(users::updated_at.eq(date))
|
.set(users::updated_at.eq(date))
|
||||||
.execute(&**conn)
|
.execute(conn)
|
||||||
},
|
}, 10)
|
||||||
10,
|
.map_res("Error updating user revision")
|
||||||
)
|
}}
|
||||||
.map_res("Error updating user revision")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
|
||||||
let lower_mail = mail.to_lowercase();
|
let lower_mail = mail.to_lowercase();
|
||||||
users::table
|
db_run! {conn: {
|
||||||
.filter(users::email.eq(lower_mail))
|
users::table
|
||||||
.first::<Self>(&**conn)
|
.filter(users::email.eq(lower_mail))
|
||||||
.ok()
|
.first::<UserDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
|
||||||
users::table.filter(users::uuid.eq(uuid)).first::<Self>(&**conn).ok()
|
db_run! {conn: {
|
||||||
|
users::table.filter(users::uuid.eq(uuid)).first::<UserDb>(conn).ok().from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_all(conn: &DbConn) -> Vec<Self> {
|
pub fn get_all(conn: &DbConn) -> Vec<Self> {
|
||||||
users::table.load::<Self>(&**conn).expect("Error loading users")
|
db_run! {conn: {
|
||||||
|
users::table.load::<UserDb>(conn).expect("Error loading users").from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Identifiable, Queryable, Insertable)]
|
pub fn last_active(&self, conn: &DbConn) -> Option<NaiveDateTime> {
|
||||||
#[table_name = "invitations"]
|
match Device::find_latest_active_by_user(&self.uuid, conn) {
|
||||||
#[primary_key(email)]
|
Some(device) => Some(device.updated_at),
|
||||||
pub struct Invitation {
|
None => None
|
||||||
pub email: String,
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Invitation {
|
impl Invitation {
|
||||||
@@ -278,44 +350,48 @@ impl Invitation {
|
|||||||
Self { email }
|
Self { email }
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
||||||
if self.email.trim().is_empty() {
|
if self.email.trim().is_empty() {
|
||||||
err!("Invitation email can't be empty")
|
err!("Invitation email can't be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
diesel::insert_into(invitations::table)
|
db_run! {conn:
|
||||||
.values(self)
|
sqlite, mysql {
|
||||||
.on_conflict(invitations::email)
|
// Not checking for ForeignKey Constraints here
|
||||||
.do_nothing()
|
// Table invitations does not have any ForeignKey Constraints.
|
||||||
.execute(&**conn)
|
diesel::replace_into(invitations::table)
|
||||||
.map_res("Error saving invitation")
|
.values(InvitationDb::to_db(self))
|
||||||
}
|
.execute(conn)
|
||||||
|
.map_res("Error saving invitation")
|
||||||
#[cfg(not(feature = "postgresql"))]
|
}
|
||||||
pub fn save(&self, conn: &DbConn) -> EmptyResult {
|
postgresql {
|
||||||
if self.email.trim().is_empty() {
|
diesel::insert_into(invitations::table)
|
||||||
err!("Invitation email can't be empty")
|
.values(InvitationDb::to_db(self))
|
||||||
|
.on_conflict(invitations::email)
|
||||||
|
.do_nothing()
|
||||||
|
.execute(conn)
|
||||||
|
.map_res("Error saving invitation")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
diesel::replace_into(invitations::table)
|
|
||||||
.values(self)
|
|
||||||
.execute(&**conn)
|
|
||||||
.map_res("Error saving invitation")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
pub fn delete(self, conn: &DbConn) -> EmptyResult {
|
||||||
diesel::delete(invitations::table.filter(invitations::email.eq(self.email)))
|
db_run! {conn: {
|
||||||
.execute(&**conn)
|
diesel::delete(invitations::table.filter(invitations::email.eq(self.email)))
|
||||||
.map_res("Error deleting invitation")
|
.execute(conn)
|
||||||
|
.map_res("Error deleting invitation")
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
|
pub fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
|
||||||
let lower_mail = mail.to_lowercase();
|
let lower_mail = mail.to_lowercase();
|
||||||
invitations::table
|
db_run! {conn: {
|
||||||
.filter(invitations::email.eq(lower_mail))
|
invitations::table
|
||||||
.first::<Self>(&**conn)
|
.filter(invitations::email.eq(lower_mail))
|
||||||
.ok()
|
.first::<InvitationDb>(conn)
|
||||||
|
.ok()
|
||||||
|
.from_db()
|
||||||
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn take(mail: &str, conn: &DbConn) -> bool {
|
pub fn take(mail: &str, conn: &DbConn) -> bool {
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
table! {
|
table! {
|
||||||
attachments (id) {
|
attachments (id) {
|
||||||
id -> Varchar,
|
id -> Text,
|
||||||
cipher_uuid -> Varchar,
|
cipher_uuid -> Text,
|
||||||
file_name -> Text,
|
file_name -> Text,
|
||||||
file_size -> Integer,
|
file_size -> Integer,
|
||||||
akey -> Nullable<Text>,
|
akey -> Nullable<Text>,
|
||||||
@@ -10,17 +10,16 @@ table! {
|
|||||||
|
|
||||||
table! {
|
table! {
|
||||||
ciphers (uuid) {
|
ciphers (uuid) {
|
||||||
uuid -> Varchar,
|
uuid -> Text,
|
||||||
created_at -> Datetime,
|
created_at -> Datetime,
|
||||||
updated_at -> Datetime,
|
updated_at -> Datetime,
|
||||||
user_uuid -> Nullable<Varchar>,
|
user_uuid -> Nullable<Text>,
|
||||||
organization_uuid -> Nullable<Varchar>,
|
organization_uuid -> Nullable<Text>,
|
||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
name -> Text,
|
name -> Text,
|
||||||
notes -> Nullable<Text>,
|
notes -> Nullable<Text>,
|
||||||
fields -> Nullable<Text>,
|
fields -> Nullable<Text>,
|
||||||
data -> Text,
|
data -> Text,
|
||||||
favorite -> Bool,
|
|
||||||
password_history -> Nullable<Text>,
|
password_history -> Nullable<Text>,
|
||||||
deleted_at -> Nullable<Datetime>,
|
deleted_at -> Nullable<Datetime>,
|
||||||
}
|
}
|
||||||
@@ -28,25 +27,25 @@ table! {
|
|||||||
|
|
||||||
table! {
|
table! {
|
||||||
ciphers_collections (cipher_uuid, collection_uuid) {
|
ciphers_collections (cipher_uuid, collection_uuid) {
|
||||||
cipher_uuid -> Varchar,
|
cipher_uuid -> Text,
|
||||||
collection_uuid -> Varchar,
|
collection_uuid -> Text,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
collections (uuid) {
|
collections (uuid) {
|
||||||
uuid -> Varchar,
|
uuid -> Text,
|
||||||
org_uuid -> Varchar,
|
org_uuid -> Text,
|
||||||
name -> Text,
|
name -> Text,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
devices (uuid) {
|
devices (uuid) {
|
||||||
uuid -> Varchar,
|
uuid -> Text,
|
||||||
created_at -> Datetime,
|
created_at -> Datetime,
|
||||||
updated_at -> Datetime,
|
updated_at -> Datetime,
|
||||||
user_uuid -> Varchar,
|
user_uuid -> Text,
|
||||||
name -> Text,
|
name -> Text,
|
||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
push_token -> Nullable<Text>,
|
push_token -> Nullable<Text>,
|
||||||
@@ -55,33 +54,40 @@ table! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table! {
|
||||||
|
favorites (user_uuid, cipher_uuid) {
|
||||||
|
user_uuid -> Text,
|
||||||
|
cipher_uuid -> Text,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
folders (uuid) {
|
folders (uuid) {
|
||||||
uuid -> Varchar,
|
uuid -> Text,
|
||||||
created_at -> Datetime,
|
created_at -> Datetime,
|
||||||
updated_at -> Datetime,
|
updated_at -> Datetime,
|
||||||
user_uuid -> Varchar,
|
user_uuid -> Text,
|
||||||
name -> Text,
|
name -> Text,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
folders_ciphers (cipher_uuid, folder_uuid) {
|
folders_ciphers (cipher_uuid, folder_uuid) {
|
||||||
cipher_uuid -> Varchar,
|
cipher_uuid -> Text,
|
||||||
folder_uuid -> Varchar,
|
folder_uuid -> Text,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
invitations (email) {
|
invitations (email) {
|
||||||
email -> Varchar,
|
email -> Text,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
org_policies (uuid) {
|
org_policies (uuid) {
|
||||||
uuid -> Varchar,
|
uuid -> Text,
|
||||||
org_uuid -> Varchar,
|
org_uuid -> Text,
|
||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
enabled -> Bool,
|
enabled -> Bool,
|
||||||
data -> Text,
|
data -> Text,
|
||||||
@@ -90,7 +96,7 @@ table! {
|
|||||||
|
|
||||||
table! {
|
table! {
|
||||||
organizations (uuid) {
|
organizations (uuid) {
|
||||||
uuid -> Varchar,
|
uuid -> Text,
|
||||||
name -> Text,
|
name -> Text,
|
||||||
billing_email -> Text,
|
billing_email -> Text,
|
||||||
}
|
}
|
||||||
@@ -98,8 +104,8 @@ table! {
|
|||||||
|
|
||||||
table! {
|
table! {
|
||||||
twofactor (uuid) {
|
twofactor (uuid) {
|
||||||
uuid -> Varchar,
|
uuid -> Text,
|
||||||
user_uuid -> Varchar,
|
user_uuid -> Text,
|
||||||
atype -> Integer,
|
atype -> Integer,
|
||||||
enabled -> Bool,
|
enabled -> Bool,
|
||||||
data -> Text,
|
data -> Text,
|
||||||
@@ -109,18 +115,19 @@ table! {
|
|||||||
|
|
||||||
table! {
|
table! {
|
||||||
users (uuid) {
|
users (uuid) {
|
||||||
uuid -> Varchar,
|
uuid -> Text,
|
||||||
|
enabled -> Bool,
|
||||||
created_at -> Datetime,
|
created_at -> Datetime,
|
||||||
updated_at -> Datetime,
|
updated_at -> Datetime,
|
||||||
verified_at -> Nullable<Datetime>,
|
verified_at -> Nullable<Datetime>,
|
||||||
last_verifying_at -> Nullable<Datetime>,
|
last_verifying_at -> Nullable<Datetime>,
|
||||||
login_verify_count -> Integer,
|
login_verify_count -> Integer,
|
||||||
email -> Varchar,
|
email -> Text,
|
||||||
email_new -> Nullable<Varchar>,
|
email_new -> Nullable<Text>,
|
||||||
email_new_token -> Nullable<Varchar>,
|
email_new_token -> Nullable<Text>,
|
||||||
name -> Text,
|
name -> Text,
|
||||||
password_hash -> Blob,
|
password_hash -> Binary,
|
||||||
salt -> Blob,
|
salt -> Binary,
|
||||||
password_iterations -> Integer,
|
password_iterations -> Integer,
|
||||||
password_hint -> Nullable<Text>,
|
password_hint -> Nullable<Text>,
|
||||||
akey -> Text,
|
akey -> Text,
|
||||||
@@ -129,6 +136,7 @@ table! {
|
|||||||
totp_secret -> Nullable<Text>,
|
totp_secret -> Nullable<Text>,
|
||||||
totp_recover -> Nullable<Text>,
|
totp_recover -> Nullable<Text>,
|
||||||
security_stamp -> Text,
|
security_stamp -> Text,
|
||||||
|
stamp_exception -> Nullable<Text>,
|
||||||
equivalent_domains -> Text,
|
equivalent_domains -> Text,
|
||||||
excluded_globals -> Text,
|
excluded_globals -> Text,
|
||||||
client_kdf_type -> Integer,
|
client_kdf_type -> Integer,
|
||||||
@@ -138,8 +146,8 @@ table! {
|
|||||||
|
|
||||||
table! {
|
table! {
|
||||||
users_collections (user_uuid, collection_uuid) {
|
users_collections (user_uuid, collection_uuid) {
|
||||||
user_uuid -> Varchar,
|
user_uuid -> Text,
|
||||||
collection_uuid -> Varchar,
|
collection_uuid -> Text,
|
||||||
read_only -> Bool,
|
read_only -> Bool,
|
||||||
hide_passwords -> Bool,
|
hide_passwords -> Bool,
|
||||||
}
|
}
|
||||||
@@ -147,9 +155,9 @@ table! {
|
|||||||
|
|
||||||
table! {
|
table! {
|
||||||
users_organizations (uuid) {
|
users_organizations (uuid) {
|
||||||
uuid -> Varchar,
|
uuid -> Text,
|
||||||
user_uuid -> Varchar,
|
user_uuid -> Text,
|
||||||
org_uuid -> Varchar,
|
org_uuid -> Text,
|
||||||
access_all -> Bool,
|
access_all -> Bool,
|
||||||
akey -> Text,
|
akey -> Text,
|
||||||
status -> Integer,
|
status -> Integer,
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ table! {
|
|||||||
notes -> Nullable<Text>,
|
notes -> Nullable<Text>,
|
||||||
fields -> Nullable<Text>,
|
fields -> Nullable<Text>,
|
||||||
data -> Text,
|
data -> Text,
|
||||||
favorite -> Bool,
|
|
||||||
password_history -> Nullable<Text>,
|
password_history -> Nullable<Text>,
|
||||||
deleted_at -> Nullable<Timestamp>,
|
deleted_at -> Nullable<Timestamp>,
|
||||||
}
|
}
|
||||||
@@ -55,6 +54,13 @@ table! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table! {
|
||||||
|
favorites (user_uuid, cipher_uuid) {
|
||||||
|
user_uuid -> Text,
|
||||||
|
cipher_uuid -> Text,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
folders (uuid) {
|
folders (uuid) {
|
||||||
uuid -> Text,
|
uuid -> Text,
|
||||||
@@ -110,6 +116,7 @@ table! {
|
|||||||
table! {
|
table! {
|
||||||
users (uuid) {
|
users (uuid) {
|
||||||
uuid -> Text,
|
uuid -> Text,
|
||||||
|
enabled -> Bool,
|
||||||
created_at -> Timestamp,
|
created_at -> Timestamp,
|
||||||
updated_at -> Timestamp,
|
updated_at -> Timestamp,
|
||||||
verified_at -> Nullable<Timestamp>,
|
verified_at -> Nullable<Timestamp>,
|
||||||
@@ -129,6 +136,7 @@ table! {
|
|||||||
totp_secret -> Nullable<Text>,
|
totp_secret -> Nullable<Text>,
|
||||||
totp_recover -> Nullable<Text>,
|
totp_recover -> Nullable<Text>,
|
||||||
security_stamp -> Text,
|
security_stamp -> Text,
|
||||||
|
stamp_exception -> Nullable<Text>,
|
||||||
equivalent_domains -> Text,
|
equivalent_domains -> Text,
|
||||||
excluded_globals -> Text,
|
excluded_globals -> Text,
|
||||||
client_kdf_type -> Integer,
|
client_kdf_type -> Integer,
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ table! {
|
|||||||
notes -> Nullable<Text>,
|
notes -> Nullable<Text>,
|
||||||
fields -> Nullable<Text>,
|
fields -> Nullable<Text>,
|
||||||
data -> Text,
|
data -> Text,
|
||||||
favorite -> Bool,
|
|
||||||
password_history -> Nullable<Text>,
|
password_history -> Nullable<Text>,
|
||||||
deleted_at -> Nullable<Timestamp>,
|
deleted_at -> Nullable<Timestamp>,
|
||||||
}
|
}
|
||||||
@@ -55,6 +54,13 @@ table! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table! {
|
||||||
|
favorites (user_uuid, cipher_uuid) {
|
||||||
|
user_uuid -> Text,
|
||||||
|
cipher_uuid -> Text,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
table! {
|
table! {
|
||||||
folders (uuid) {
|
folders (uuid) {
|
||||||
uuid -> Text,
|
uuid -> Text,
|
||||||
@@ -110,6 +116,7 @@ table! {
|
|||||||
table! {
|
table! {
|
||||||
users (uuid) {
|
users (uuid) {
|
||||||
uuid -> Text,
|
uuid -> Text,
|
||||||
|
enabled -> Bool,
|
||||||
created_at -> Timestamp,
|
created_at -> Timestamp,
|
||||||
updated_at -> Timestamp,
|
updated_at -> Timestamp,
|
||||||
verified_at -> Nullable<Timestamp>,
|
verified_at -> Nullable<Timestamp>,
|
||||||
@@ -129,6 +136,7 @@ table! {
|
|||||||
totp_secret -> Nullable<Text>,
|
totp_secret -> Nullable<Text>,
|
||||||
totp_recover -> Nullable<Text>,
|
totp_recover -> Nullable<Text>,
|
||||||
security_stamp -> Text,
|
security_stamp -> Text,
|
||||||
|
stamp_exception -> Nullable<Text>,
|
||||||
equivalent_domains -> Text,
|
equivalent_domains -> Text,
|
||||||
excluded_globals -> Text,
|
excluded_globals -> Text,
|
||||||
client_kdf_type -> Integer,
|
client_kdf_type -> Integer,
|
||||||
|
|||||||
14
src/error.rs
@@ -34,6 +34,9 @@ macro_rules! make_error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
use diesel::result::Error as DieselErr;
|
use diesel::result::Error as DieselErr;
|
||||||
|
use diesel::ConnectionError as DieselConErr;
|
||||||
|
use diesel_migrations::RunMigrationsError as DieselMigErr;
|
||||||
|
use diesel::r2d2::PoolError as R2d2Err;
|
||||||
use handlebars::RenderError as HbErr;
|
use handlebars::RenderError as HbErr;
|
||||||
use jsonwebtoken::errors::Error as JWTErr;
|
use jsonwebtoken::errors::Error as JWTErr;
|
||||||
use regex::Error as RegexErr;
|
use regex::Error as RegexErr;
|
||||||
@@ -48,7 +51,7 @@ use yubico::yubicoerror::YubicoError as YubiErr;
|
|||||||
use lettre::address::AddressError as AddrErr;
|
use lettre::address::AddressError as AddrErr;
|
||||||
use lettre::error::Error as LettreErr;
|
use lettre::error::Error as LettreErr;
|
||||||
use lettre::message::mime::FromStrError as FromStrErr;
|
use lettre::message::mime::FromStrError as FromStrErr;
|
||||||
use lettre::transport::smtp::error::Error as SmtpErr;
|
use lettre::transport::smtp::Error as SmtpErr;
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
pub struct Empty {}
|
pub struct Empty {}
|
||||||
@@ -66,6 +69,7 @@ make_error! {
|
|||||||
// Used for special return values, like 2FA errors
|
// Used for special return values, like 2FA errors
|
||||||
JsonError(Value): _no_source, _serialize,
|
JsonError(Value): _no_source, _serialize,
|
||||||
DbError(DieselErr): _has_source, _api_error,
|
DbError(DieselErr): _has_source, _api_error,
|
||||||
|
R2d2Error(R2d2Err): _has_source, _api_error,
|
||||||
U2fError(U2fErr): _has_source, _api_error,
|
U2fError(U2fErr): _has_source, _api_error,
|
||||||
SerdeError(SerdeErr): _has_source, _api_error,
|
SerdeError(SerdeErr): _has_source, _api_error,
|
||||||
JWTError(JWTErr): _has_source, _api_error,
|
JWTError(JWTErr): _has_source, _api_error,
|
||||||
@@ -77,10 +81,13 @@ make_error! {
|
|||||||
RegexError(RegexErr): _has_source, _api_error,
|
RegexError(RegexErr): _has_source, _api_error,
|
||||||
YubiError(YubiErr): _has_source, _api_error,
|
YubiError(YubiErr): _has_source, _api_error,
|
||||||
|
|
||||||
LetreError(LettreErr): _has_source, _api_error,
|
LettreError(LettreErr): _has_source, _api_error,
|
||||||
AddressError(AddrErr): _has_source, _api_error,
|
AddressError(AddrErr): _has_source, _api_error,
|
||||||
SmtpError(SmtpErr): _has_source, _api_error,
|
SmtpError(SmtpErr): _has_source, _api_error,
|
||||||
FromStrError(FromStrErr): _has_source, _api_error,
|
FromStrError(FromStrErr): _has_source, _api_error,
|
||||||
|
|
||||||
|
DieselConError(DieselConErr): _has_source, _api_error,
|
||||||
|
DieselMigError(DieselMigErr): _has_source, _api_error,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for Error {
|
impl std::fmt::Debug for Error {
|
||||||
@@ -184,6 +191,7 @@ impl<'r> Responder<'r> for Error {
|
|||||||
fn respond_to(self, _: &Request) -> response::Result<'r> {
|
fn respond_to(self, _: &Request) -> response::Result<'r> {
|
||||||
match self.error {
|
match self.error {
|
||||||
ErrorKind::EmptyError(_) => {} // Don't print the error in this situation
|
ErrorKind::EmptyError(_) => {} // Don't print the error in this situation
|
||||||
|
ErrorKind::SimpleError(_) => {} // Don't print the error in this situation
|
||||||
_ => error!(target: "error", "{:#?}", self),
|
_ => error!(target: "error", "{:#?}", self),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -203,9 +211,11 @@ impl<'r> Responder<'r> for Error {
|
|||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! err {
|
macro_rules! err {
|
||||||
($msg:expr) => {{
|
($msg:expr) => {{
|
||||||
|
error!("{}", $msg);
|
||||||
return Err(crate::error::Error::new($msg, $msg));
|
return Err(crate::error::Error::new($msg, $msg));
|
||||||
}};
|
}};
|
||||||
($usr_msg:expr, $log_value:expr) => {{
|
($usr_msg:expr, $log_value:expr) => {{
|
||||||
|
error!("{}. {}", $usr_msg, $log_value);
|
||||||
return Err(crate::error::Error::new($usr_msg, $log_value));
|
return Err(crate::error::Error::new($usr_msg, $log_value));
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|||||||
145
src/mail.rs
@@ -1,15 +1,14 @@
|
|||||||
use std::{env, str::FromStr};
|
use std::{str::FromStr};
|
||||||
|
|
||||||
use chrono::{DateTime, Local};
|
use chrono::{DateTime, Local};
|
||||||
use chrono_tz::Tz;
|
|
||||||
use native_tls::{Protocol, TlsConnector};
|
|
||||||
use percent_encoding::{percent_encode, NON_ALPHANUMERIC};
|
use percent_encoding::{percent_encode, NON_ALPHANUMERIC};
|
||||||
|
|
||||||
use lettre::{
|
use lettre::{
|
||||||
message::{header, Mailbox, Message, MultiPart, SinglePart},
|
message::{header, Mailbox, Message, MultiPart, SinglePart},
|
||||||
transport::smtp::authentication::{Credentials, Mechanism as SmtpAuthMechanism},
|
transport::smtp::authentication::{Credentials, Mechanism as SmtpAuthMechanism},
|
||||||
|
transport::smtp::client::{Tls, TlsParameters},
|
||||||
transport::smtp::extension::ClientId,
|
transport::smtp::extension::ClientId,
|
||||||
Address, SmtpTransport, Tls, TlsParameters, Transport,
|
Address, SmtpTransport, Transport,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -20,53 +19,67 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
fn mailer() -> SmtpTransport {
|
fn mailer() -> SmtpTransport {
|
||||||
|
use std::time::Duration;
|
||||||
let host = CONFIG.smtp_host().unwrap();
|
let host = CONFIG.smtp_host().unwrap();
|
||||||
|
|
||||||
let client_security = if CONFIG.smtp_ssl() {
|
let smtp_client = SmtpTransport::builder_dangerous(host.as_str())
|
||||||
let tls = TlsConnector::builder()
|
.port(CONFIG.smtp_port())
|
||||||
.min_protocol_version(Some(Protocol::Tlsv11))
|
.timeout(Some(Duration::from_secs(CONFIG.smtp_timeout())));
|
||||||
.build()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let params = TlsParameters::new(host.clone(), tls);
|
// Determine security
|
||||||
|
let smtp_client = if CONFIG.smtp_ssl() {
|
||||||
|
let mut tls_parameters = TlsParameters::builder(host);
|
||||||
|
if CONFIG.smtp_accept_invalid_hostnames() {
|
||||||
|
tls_parameters.dangerous_accept_invalid_hostnames(true);
|
||||||
|
}
|
||||||
|
if CONFIG.smtp_accept_invalid_certs() {
|
||||||
|
tls_parameters.dangerous_accept_invalid_certs(true);
|
||||||
|
}
|
||||||
|
let tls_parameters = tls_parameters.build().unwrap();
|
||||||
|
|
||||||
if CONFIG.smtp_explicit_tls() {
|
if CONFIG.smtp_explicit_tls() {
|
||||||
Tls::Wrapper(params)
|
smtp_client.tls(Tls::Wrapper(tls_parameters))
|
||||||
} else {
|
} else {
|
||||||
Tls::Required(params)
|
smtp_client.tls(Tls::Required(tls_parameters))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Tls::None
|
smtp_client
|
||||||
};
|
};
|
||||||
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
let smtp_client = SmtpTransport::builder(host).port(CONFIG.smtp_port()).tls(client_security);
|
|
||||||
|
|
||||||
let smtp_client = match (CONFIG.smtp_username(), CONFIG.smtp_password()) {
|
let smtp_client = match (CONFIG.smtp_username(), CONFIG.smtp_password()) {
|
||||||
(Some(user), Some(pass)) => smtp_client.credentials(Credentials::new(user, pass)),
|
(Some(user), Some(pass)) => smtp_client.credentials(Credentials::new(user, pass)),
|
||||||
_ => smtp_client,
|
_ => smtp_client,
|
||||||
};
|
};
|
||||||
|
|
||||||
let smtp_client = match CONFIG.helo_name() {
|
let smtp_client = match CONFIG.helo_name() {
|
||||||
Some(helo_name) => smtp_client.hello_name(ClientId::new(helo_name)),
|
Some(helo_name) => smtp_client.hello_name(ClientId::Domain(helo_name)),
|
||||||
None => smtp_client,
|
None => smtp_client,
|
||||||
};
|
};
|
||||||
|
|
||||||
let smtp_client = match CONFIG.smtp_auth_mechanism() {
|
let smtp_client = match CONFIG.smtp_auth_mechanism() {
|
||||||
Some(mechanism) => {
|
Some(mechanism) => {
|
||||||
let correct_mechanism = format!("\"{}\"", crate::util::upcase_first(mechanism.trim_matches('"')));
|
let allowed_mechanisms = vec![SmtpAuthMechanism::Plain, SmtpAuthMechanism::Login, SmtpAuthMechanism::Xoauth2];
|
||||||
|
let mut selected_mechanisms = vec![];
|
||||||
|
for wanted_mechanism in mechanism.split(',') {
|
||||||
|
for m in &allowed_mechanisms {
|
||||||
|
if m.to_string().to_lowercase() == wanted_mechanism.trim_matches(|c| c == '"' || c == '\'' || c == ' ').to_lowercase() {
|
||||||
|
selected_mechanisms.push(*m);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// TODO: Allow more than one mechanism
|
if !selected_mechanisms.is_empty() {
|
||||||
match serde_json::from_str::<SmtpAuthMechanism>(&correct_mechanism) {
|
smtp_client.authentication(selected_mechanisms)
|
||||||
Ok(auth_mechanism) => smtp_client.authentication(vec![auth_mechanism]),
|
} else {
|
||||||
_ => panic!("Failure to parse mechanism. Is it proper Json? Eg. `\"Plain\"` not `Plain`"),
|
// Only show a warning, and return without setting an actual authentication mechanism
|
||||||
|
warn!("No valid SMTP Auth mechanism found for '{}', using default values", mechanism);
|
||||||
|
smtp_client
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => smtp_client,
|
_ => smtp_client,
|
||||||
};
|
};
|
||||||
|
|
||||||
smtp_client.timeout(Some(Duration::from_secs(CONFIG.smtp_timeout()))).build()
|
smtp_client.build()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_text(template_name: &'static str, data: serde_json::Value) -> Result<(String, String, String), Error> {
|
fn get_text(template_name: &'static str, data: serde_json::Value) -> Result<(String, String, String), Error> {
|
||||||
@@ -84,30 +97,15 @@ fn get_template(template_name: &str, data: &serde_json::Value) -> Result<(String
|
|||||||
None => err!("Template doesn't contain subject"),
|
None => err!("Template doesn't contain subject"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use newline_converter::unix2dos;
|
||||||
let body = match text_split.next() {
|
let body = match text_split.next() {
|
||||||
Some(s) => s.trim().to_string(),
|
Some(s) => unix2dos(s.trim()).to_string(),
|
||||||
None => err!("Template doesn't contain body"),
|
None => err!("Template doesn't contain body"),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok((subject, body))
|
Ok((subject, body))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn format_datetime(dt: &DateTime<Local>) -> String {
|
|
||||||
let fmt = "%A, %B %_d, %Y at %r %Z";
|
|
||||||
|
|
||||||
// With a DateTime<Local>, `%Z` formats as the time zone's UTC offset
|
|
||||||
// (e.g., `+00:00`). If the `TZ` environment variable is set, try to
|
|
||||||
// format as a time zone abbreviation instead (e.g., `UTC`).
|
|
||||||
if let Ok(tz) = env::var("TZ") {
|
|
||||||
if let Ok(tz) = tz.parse::<Tz>() {
|
|
||||||
return dt.with_timezone(&tz).format(fmt).to_string();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, fall back to just displaying the UTC offset.
|
|
||||||
dt.format(fmt).to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn send_password_hint(address: &str, hint: Option<String>) -> EmptyResult {
|
pub fn send_password_hint(address: &str, hint: Option<String>) -> EmptyResult {
|
||||||
let template_name = if hint.is_some() {
|
let template_name = if hint.is_some() {
|
||||||
"email/pw_hint_some"
|
"email/pw_hint_some"
|
||||||
@@ -242,13 +240,14 @@ pub fn send_new_device_logged_in(address: &str, ip: &str, dt: &DateTime<Local>,
|
|||||||
use crate::util::upcase_first;
|
use crate::util::upcase_first;
|
||||||
let device = upcase_first(device);
|
let device = upcase_first(device);
|
||||||
|
|
||||||
|
let fmt = "%A, %B %_d, %Y at %r %Z";
|
||||||
let (subject, body_html, body_text) = get_text(
|
let (subject, body_html, body_text) = get_text(
|
||||||
"email/new_device_logged_in",
|
"email/new_device_logged_in",
|
||||||
json!({
|
json!({
|
||||||
"url": CONFIG.domain(),
|
"url": CONFIG.domain(),
|
||||||
"ip": ip,
|
"ip": ip,
|
||||||
"device": device,
|
"device": device,
|
||||||
"datetime": format_datetime(dt),
|
"datetime": crate::util::format_datetime_local(dt, fmt),
|
||||||
}),
|
}),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@@ -303,35 +302,51 @@ fn send_email(address: &str, subject: &str, body_html: &str, body_text: &str) ->
|
|||||||
|
|
||||||
let address = format!("{}@{}", address_split[1], domain_puny);
|
let address = format!("{}@{}", address_split[1], domain_puny);
|
||||||
|
|
||||||
let data = MultiPart::mixed()
|
let html = SinglePart::builder()
|
||||||
.multipart(
|
// We force Base64 encoding because in the past we had issues with different encodings.
|
||||||
MultiPart::alternative()
|
.header(header::ContentTransferEncoding::Base64)
|
||||||
.singlepart(
|
.header(header::ContentType("text/html; charset=utf-8".parse()?))
|
||||||
SinglePart::quoted_printable()
|
.body(String::from(body_html));
|
||||||
.header(header::ContentType("text/plain; charset=utf-8".parse()?))
|
|
||||||
.body(body_text),
|
|
||||||
)
|
|
||||||
.multipart(
|
|
||||||
MultiPart::related().singlepart(
|
|
||||||
SinglePart::quoted_printable()
|
|
||||||
.header(header::ContentType("text/html; charset=utf-8".parse()?))
|
|
||||||
.body(body_html),
|
|
||||||
)
|
|
||||||
// .singlepart(SinglePart::base64() -- Inline files would go here
|
|
||||||
),
|
|
||||||
)
|
|
||||||
// .singlepart(SinglePart::base64() -- Attachments would go here
|
|
||||||
;
|
|
||||||
|
|
||||||
|
let text = SinglePart::builder()
|
||||||
|
// We force Base64 encoding because in the past we had issues with different encodings.
|
||||||
|
.header(header::ContentTransferEncoding::Base64)
|
||||||
|
.header(header::ContentType("text/plain; charset=utf-8".parse()?))
|
||||||
|
.body(String::from(body_text));
|
||||||
|
|
||||||
|
let smtp_from = &CONFIG.smtp_from();
|
||||||
let email = Message::builder()
|
let email = Message::builder()
|
||||||
|
.message_id(Some(format!("<{}@{}>", crate::util::get_uuid(), smtp_from.split('@').collect::<Vec<&str>>()[1] )))
|
||||||
.to(Mailbox::new(None, Address::from_str(&address)?))
|
.to(Mailbox::new(None, Address::from_str(&address)?))
|
||||||
.from(Mailbox::new(
|
.from(Mailbox::new(
|
||||||
Some(CONFIG.smtp_from_name()),
|
Some(CONFIG.smtp_from_name()),
|
||||||
Address::from_str(&CONFIG.smtp_from())?,
|
Address::from_str(smtp_from)?,
|
||||||
))
|
))
|
||||||
.subject(subject)
|
.subject(subject)
|
||||||
.multipart(data)?;
|
.multipart(
|
||||||
|
MultiPart::alternative()
|
||||||
|
.singlepart(text)
|
||||||
|
.singlepart(html)
|
||||||
|
)?;
|
||||||
|
|
||||||
let _ = mailer().send(&email)?;
|
match mailer().send(&email) {
|
||||||
Ok(())
|
Ok(_) => Ok(()),
|
||||||
|
// Match some common errors and make them more user friendly
|
||||||
|
Err(e) => match e {
|
||||||
|
lettre::transport::smtp::Error::Client(x) => {
|
||||||
|
err!(format!("SMTP Client error: {}", x));
|
||||||
|
},
|
||||||
|
lettre::transport::smtp::Error::Transient(x) => {
|
||||||
|
err!(format!("SMTP 4xx error: {:?}", x.message));
|
||||||
|
},
|
||||||
|
lettre::transport::smtp::Error::Permanent(x) => {
|
||||||
|
err!(format!("SMTP 5xx error: {:?}", x.message));
|
||||||
|
},
|
||||||
|
lettre::transport::smtp::Error::Io(x) => {
|
||||||
|
err!(format!("SMTP IO error: {}", x));
|
||||||
|
},
|
||||||
|
// Fallback for all other errors
|
||||||
|
_ => Err(e.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
115
src/main.rs
@@ -1,12 +1,12 @@
|
|||||||
#![forbid(unsafe_code)]
|
#![forbid(unsafe_code)]
|
||||||
#![cfg_attr(feature = "unstable", feature(ip))]
|
#![cfg_attr(feature = "unstable", feature(ip))]
|
||||||
#![recursion_limit = "256"]
|
#![recursion_limit = "512"]
|
||||||
|
|
||||||
extern crate openssl;
|
extern crate openssl;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate rocket;
|
extern crate rocket;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate serde_derive;
|
extern crate serde;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
@@ -17,7 +17,6 @@ extern crate diesel;
|
|||||||
extern crate diesel_migrations;
|
extern crate diesel_migrations;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
fmt, // For panic logging
|
|
||||||
fs::create_dir_all,
|
fs::create_dir_all,
|
||||||
panic,
|
panic,
|
||||||
path::Path,
|
path::Path,
|
||||||
@@ -26,12 +25,15 @@ use std::{
|
|||||||
thread,
|
thread,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use structopt::StructOpt;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod error;
|
mod error;
|
||||||
mod api;
|
mod api;
|
||||||
mod auth;
|
mod auth;
|
||||||
mod config;
|
mod config;
|
||||||
mod crypto;
|
mod crypto;
|
||||||
|
#[macro_use]
|
||||||
mod db;
|
mod db;
|
||||||
mod mail;
|
mod mail;
|
||||||
mod util;
|
mod util;
|
||||||
@@ -39,18 +41,6 @@ mod util;
|
|||||||
pub use config::CONFIG;
|
pub use config::CONFIG;
|
||||||
pub use error::{Error, MapResult};
|
pub use error::{Error, MapResult};
|
||||||
|
|
||||||
use structopt::StructOpt;
|
|
||||||
|
|
||||||
// Used for catching panics and log them to file instead of stderr
|
|
||||||
use backtrace::Backtrace;
|
|
||||||
struct Shim(Backtrace);
|
|
||||||
|
|
||||||
impl fmt::Debug for Shim {
|
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(fmt, "\n{:?}", self.0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, StructOpt)]
|
#[derive(Debug, StructOpt)]
|
||||||
#[structopt(name = "bitwarden_rs", about = "A Bitwarden API server written in Rust")]
|
#[structopt(name = "bitwarden_rs", about = "A Bitwarden API server written in Rust")]
|
||||||
struct Opt {
|
struct Opt {
|
||||||
@@ -72,10 +62,8 @@ fn main() {
|
|||||||
_ => false,
|
_ => false,
|
||||||
};
|
};
|
||||||
|
|
||||||
check_db();
|
|
||||||
check_rsa_keys();
|
check_rsa_keys();
|
||||||
check_web_vault();
|
check_web_vault();
|
||||||
migrations::run_migrations();
|
|
||||||
|
|
||||||
create_icon_cache_folder();
|
create_icon_cache_folder();
|
||||||
|
|
||||||
@@ -125,8 +113,21 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
.level_for("launch_", log::LevelFilter::Off)
|
.level_for("launch_", log::LevelFilter::Off)
|
||||||
.level_for("rocket::rocket", log::LevelFilter::Off)
|
.level_for("rocket::rocket", log::LevelFilter::Off)
|
||||||
.level_for("rocket::fairing", log::LevelFilter::Off)
|
.level_for("rocket::fairing", log::LevelFilter::Off)
|
||||||
|
// Never show html5ever and hyper::proto logs, too noisy
|
||||||
|
.level_for("html5ever", log::LevelFilter::Off)
|
||||||
|
.level_for("hyper::proto", log::LevelFilter::Off)
|
||||||
.chain(std::io::stdout());
|
.chain(std::io::stdout());
|
||||||
|
|
||||||
|
// Enable smtp debug logging only specifically for smtp when need.
|
||||||
|
// This can contain sensitive information we do not want in the default debug/trace logging.
|
||||||
|
if CONFIG.smtp_debug() {
|
||||||
|
println!("[WARNING] SMTP Debugging is enabled (SMTP_DEBUG=true). Sensitive information could be disclosed via logs!");
|
||||||
|
println!("[WARNING] Only enable SMTP_DEBUG during troubleshooting!\n");
|
||||||
|
logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Debug)
|
||||||
|
} else {
|
||||||
|
logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Off)
|
||||||
|
}
|
||||||
|
|
||||||
if CONFIG.extended_logging() {
|
if CONFIG.extended_logging() {
|
||||||
logger = logger.format(|out, message, record| {
|
logger = logger.format(|out, message, record| {
|
||||||
out.finish(format_args!(
|
out.finish(format_args!(
|
||||||
@@ -156,8 +157,6 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
|
|
||||||
// Catch panics and log them instead of default output to StdErr
|
// Catch panics and log them instead of default output to StdErr
|
||||||
panic::set_hook(Box::new(|info| {
|
panic::set_hook(Box::new(|info| {
|
||||||
let backtrace = Backtrace::new();
|
|
||||||
|
|
||||||
let thread = thread::current();
|
let thread = thread::current();
|
||||||
let thread = thread.name().unwrap_or("unnamed");
|
let thread = thread.name().unwrap_or("unnamed");
|
||||||
|
|
||||||
@@ -169,23 +168,25 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let backtrace = backtrace::Backtrace::new();
|
||||||
|
|
||||||
match info.location() {
|
match info.location() {
|
||||||
Some(location) => {
|
Some(location) => {
|
||||||
error!(
|
error!(
|
||||||
target: "panic", "thread '{}' panicked at '{}': {}:{}{:?}",
|
target: "panic", "thread '{}' panicked at '{}': {}:{}\n{:?}",
|
||||||
thread,
|
thread,
|
||||||
msg,
|
msg,
|
||||||
location.file(),
|
location.file(),
|
||||||
location.line(),
|
location.line(),
|
||||||
Shim(backtrace)
|
backtrace
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
None => error!(
|
None => error!(
|
||||||
target: "panic",
|
target: "panic",
|
||||||
"thread '{}' panicked at '{}'{:?}",
|
"thread '{}' panicked at '{}'\n{:?}",
|
||||||
thread,
|
thread,
|
||||||
msg,
|
msg,
|
||||||
Shim(backtrace)
|
backtrace
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
@@ -211,30 +212,6 @@ fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_db() {
|
|
||||||
if cfg!(feature = "sqlite") {
|
|
||||||
let url = CONFIG.database_url();
|
|
||||||
let path = Path::new(&url);
|
|
||||||
|
|
||||||
if let Some(parent) = path.parent() {
|
|
||||||
if create_dir_all(parent).is_err() {
|
|
||||||
error!("Error creating database directory");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turn on WAL in SQLite
|
|
||||||
if CONFIG.enable_db_wal() {
|
|
||||||
use diesel::RunQueryDsl;
|
|
||||||
let connection = db::get_connection().expect("Can't connect to DB");
|
|
||||||
diesel::sql_query("PRAGMA journal_mode=wal")
|
|
||||||
.execute(&connection)
|
|
||||||
.expect("Failed to turn on WAL");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
db::get_connection().expect("Can't connect to DB");
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_icon_cache_folder() {
|
fn create_icon_cache_folder() {
|
||||||
// Try to create the icon cache folder, and generate an error if it could not.
|
// Try to create the icon cache folder, and generate an error if it could not.
|
||||||
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache directory");
|
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache directory");
|
||||||
@@ -296,46 +273,22 @@ fn check_web_vault() {
|
|||||||
let index_path = Path::new(&CONFIG.web_vault_folder()).join("index.html");
|
let index_path = Path::new(&CONFIG.web_vault_folder()).join("index.html");
|
||||||
|
|
||||||
if !index_path.exists() {
|
if !index_path.exists() {
|
||||||
error!("Web vault is not found. To install it, please follow the steps in: ");
|
error!("Web vault is not found at '{}'. To install it, please follow the steps in: ", CONFIG.web_vault_folder());
|
||||||
error!("https://github.com/dani-garcia/bitwarden_rs/wiki/Building-binary#install-the-web-vault");
|
error!("https://github.com/dani-garcia/bitwarden_rs/wiki/Building-binary#install-the-web-vault");
|
||||||
error!("You can also set the environment variable 'WEB_VAULT_ENABLED=false' to disable it");
|
error!("You can also set the environment variable 'WEB_VAULT_ENABLED=false' to disable it");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Embed the migrations from the migrations folder into the application
|
|
||||||
// This way, the program automatically migrates the database to the latest version
|
|
||||||
// https://docs.rs/diesel_migrations/*/diesel_migrations/macro.embed_migrations.html
|
|
||||||
#[allow(unused_imports)]
|
|
||||||
mod migrations {
|
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
embed_migrations!("migrations/sqlite");
|
|
||||||
#[cfg(feature = "mysql")]
|
|
||||||
embed_migrations!("migrations/mysql");
|
|
||||||
#[cfg(feature = "postgresql")]
|
|
||||||
embed_migrations!("migrations/postgresql");
|
|
||||||
|
|
||||||
pub fn run_migrations() {
|
|
||||||
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
|
|
||||||
let connection = crate::db::get_connection().expect("Can't connect to DB");
|
|
||||||
|
|
||||||
use std::io::stdout;
|
|
||||||
|
|
||||||
// Disable Foreign Key Checks during migration
|
|
||||||
use diesel::RunQueryDsl;
|
|
||||||
#[cfg(feature = "postgres")]
|
|
||||||
diesel::sql_query("SET CONSTRAINTS ALL DEFERRED").execute(&connection).expect("Failed to disable Foreign Key Checks during migrations");
|
|
||||||
#[cfg(feature = "mysql")]
|
|
||||||
diesel::sql_query("SET FOREIGN_KEY_CHECKS = 0").execute(&connection).expect("Failed to disable Foreign Key Checks during migrations");
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
diesel::sql_query("PRAGMA defer_foreign_keys = ON").execute(&connection).expect("Failed to disable Foreign Key Checks during migrations");
|
|
||||||
|
|
||||||
embedded_migrations::run_with_output(&connection, &mut stdout()).expect("Can't run migrations");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn launch_rocket(extra_debug: bool) {
|
fn launch_rocket(extra_debug: bool) {
|
||||||
|
let pool = match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()) {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Error creating database pool: {:?}", e);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let basepath = &CONFIG.domain_path();
|
let basepath = &CONFIG.domain_path();
|
||||||
|
|
||||||
// If adding more paths here, consider also adding them to
|
// If adding more paths here, consider also adding them to
|
||||||
@@ -347,7 +300,7 @@ fn launch_rocket(extra_debug: bool) {
|
|||||||
.mount(&[basepath, "/identity"].concat(), api::identity_routes())
|
.mount(&[basepath, "/identity"].concat(), api::identity_routes())
|
||||||
.mount(&[basepath, "/icons"].concat(), api::icons_routes())
|
.mount(&[basepath, "/icons"].concat(), api::icons_routes())
|
||||||
.mount(&[basepath, "/notifications"].concat(), api::notifications_routes())
|
.mount(&[basepath, "/notifications"].concat(), api::notifications_routes())
|
||||||
.manage(db::init_pool())
|
.manage(pool)
|
||||||
.manage(api::start_notification_server())
|
.manage(api::start_notification_server())
|
||||||
.attach(util::AppHeaders())
|
.attach(util::AppHeaders())
|
||||||
.attach(util::CORS())
|
.attach(util::CORS())
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 344 B |
@@ -39,8 +39,7 @@
|
|||||||
"Type": 1,
|
"Type": 1,
|
||||||
"Domains": [
|
"Domains": [
|
||||||
"apple.com",
|
"apple.com",
|
||||||
"icloud.com",
|
"icloud.com"
|
||||||
"tv.apple.com"
|
|
||||||
],
|
],
|
||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
@@ -106,6 +105,7 @@
|
|||||||
"passport.net",
|
"passport.net",
|
||||||
"windows.com",
|
"windows.com",
|
||||||
"microsoftonline.com",
|
"microsoftonline.com",
|
||||||
|
"office.com",
|
||||||
"office365.com",
|
"office365.com",
|
||||||
"microsoftstore.com",
|
"microsoftstore.com",
|
||||||
"xbox.com",
|
"xbox.com",
|
||||||
@@ -185,15 +185,21 @@
|
|||||||
"Type": 18,
|
"Type": 18,
|
||||||
"Domains": [
|
"Domains": [
|
||||||
"amazon.com",
|
"amazon.com",
|
||||||
"amazon.co.uk",
|
"amazon.ae",
|
||||||
"amazon.ca",
|
"amazon.ca",
|
||||||
"amazon.de",
|
"amazon.co.uk",
|
||||||
"amazon.fr",
|
|
||||||
"amazon.es",
|
|
||||||
"amazon.it",
|
|
||||||
"amazon.com.au",
|
"amazon.com.au",
|
||||||
"amazon.co.nz",
|
"amazon.com.br",
|
||||||
"amazon.in"
|
"amazon.com.mx",
|
||||||
|
"amazon.com.tr",
|
||||||
|
"amazon.de",
|
||||||
|
"amazon.es",
|
||||||
|
"amazon.fr",
|
||||||
|
"amazon.in",
|
||||||
|
"amazon.it",
|
||||||
|
"amazon.nl",
|
||||||
|
"amazon.sa",
|
||||||
|
"amazon.sg"
|
||||||
],
|
],
|
||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
@@ -386,8 +392,7 @@
|
|||||||
"alibaba.com",
|
"alibaba.com",
|
||||||
"aliexpress.com",
|
"aliexpress.com",
|
||||||
"aliyun.com",
|
"aliyun.com",
|
||||||
"net.cn",
|
"net.cn"
|
||||||
"www.net.cn"
|
|
||||||
],
|
],
|
||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
@@ -503,7 +508,8 @@
|
|||||||
"disneymoviesanywhere.com",
|
"disneymoviesanywhere.com",
|
||||||
"go.com",
|
"go.com",
|
||||||
"disney.com",
|
"disney.com",
|
||||||
"dadt.com"
|
"dadt.com",
|
||||||
|
"disneyplus.com"
|
||||||
],
|
],
|
||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
@@ -583,11 +589,28 @@
|
|||||||
"Type": 64,
|
"Type": 64,
|
||||||
"Domains": [
|
"Domains": [
|
||||||
"ebay.com",
|
"ebay.com",
|
||||||
"ebay.de",
|
"ebay.at",
|
||||||
|
"ebay.be",
|
||||||
"ebay.ca",
|
"ebay.ca",
|
||||||
"ebay.in",
|
"ebay.ch",
|
||||||
|
"ebay.cn",
|
||||||
|
"ebay.co.jp",
|
||||||
|
"ebay.co.th",
|
||||||
"ebay.co.uk",
|
"ebay.co.uk",
|
||||||
"ebay.com.au"
|
"ebay.com.au",
|
||||||
|
"ebay.com.hk",
|
||||||
|
"ebay.com.my",
|
||||||
|
"ebay.com.sg",
|
||||||
|
"ebay.com.tw",
|
||||||
|
"ebay.de",
|
||||||
|
"ebay.es",
|
||||||
|
"ebay.fr",
|
||||||
|
"ebay.ie",
|
||||||
|
"ebay.in",
|
||||||
|
"ebay.it",
|
||||||
|
"ebay.nl",
|
||||||
|
"ebay.ph",
|
||||||
|
"ebay.pl"
|
||||||
],
|
],
|
||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
@@ -717,41 +740,27 @@
|
|||||||
"eventbrite.ca",
|
"eventbrite.ca",
|
||||||
"eventbrite.ch",
|
"eventbrite.ch",
|
||||||
"eventbrite.cl",
|
"eventbrite.cl",
|
||||||
"eventbrite.co.id",
|
"eventbrite.co",
|
||||||
"eventbrite.co.in",
|
|
||||||
"eventbrite.co.kr",
|
|
||||||
"eventbrite.co.nz",
|
"eventbrite.co.nz",
|
||||||
"eventbrite.co.uk",
|
"eventbrite.co.uk",
|
||||||
"eventbrite.co.ve",
|
|
||||||
"eventbrite.com",
|
"eventbrite.com",
|
||||||
|
"eventbrite.com.ar",
|
||||||
"eventbrite.com.au",
|
"eventbrite.com.au",
|
||||||
"eventbrite.com.bo",
|
|
||||||
"eventbrite.com.br",
|
"eventbrite.com.br",
|
||||||
"eventbrite.com.co",
|
"eventbrite.com.mx",
|
||||||
"eventbrite.com.hk",
|
|
||||||
"eventbrite.com.hn",
|
|
||||||
"eventbrite.com.pe",
|
"eventbrite.com.pe",
|
||||||
"eventbrite.com.sg",
|
|
||||||
"eventbrite.com.tr",
|
|
||||||
"eventbrite.com.tw",
|
|
||||||
"eventbrite.cz",
|
|
||||||
"eventbrite.de",
|
"eventbrite.de",
|
||||||
"eventbrite.dk",
|
"eventbrite.dk",
|
||||||
|
"eventbrite.es",
|
||||||
"eventbrite.fi",
|
"eventbrite.fi",
|
||||||
"eventbrite.fr",
|
"eventbrite.fr",
|
||||||
"eventbrite.gy",
|
"eventbrite.hk",
|
||||||
"eventbrite.hu",
|
|
||||||
"eventbrite.ie",
|
"eventbrite.ie",
|
||||||
"eventbrite.is",
|
|
||||||
"eventbrite.it",
|
"eventbrite.it",
|
||||||
"eventbrite.jp",
|
|
||||||
"eventbrite.mx",
|
|
||||||
"eventbrite.nl",
|
"eventbrite.nl",
|
||||||
"eventbrite.no",
|
|
||||||
"eventbrite.pl",
|
|
||||||
"eventbrite.pt",
|
"eventbrite.pt",
|
||||||
"eventbrite.ru",
|
"eventbrite.se",
|
||||||
"eventbrite.se"
|
"eventbrite.sg"
|
||||||
],
|
],
|
||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
@@ -769,15 +778,6 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Type": 75,
|
"Type": 75,
|
||||||
"Domains": [
|
|
||||||
"netcup.de",
|
|
||||||
"netcup.eu",
|
|
||||||
"customercontrolpanel.de"
|
|
||||||
],
|
|
||||||
"Excluded": false
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Type": 76,
|
|
||||||
"Domains": [
|
"Domains": [
|
||||||
"docusign.com",
|
"docusign.com",
|
||||||
"docusign.net"
|
"docusign.net"
|
||||||
@@ -785,7 +785,7 @@
|
|||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Type": 77,
|
"Type": 76,
|
||||||
"Domains": [
|
"Domains": [
|
||||||
"envato.com",
|
"envato.com",
|
||||||
"themeforest.net",
|
"themeforest.net",
|
||||||
@@ -799,7 +799,7 @@
|
|||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Type": 78,
|
"Type": 77,
|
||||||
"Domains": [
|
"Domains": [
|
||||||
"x10hosting.com",
|
"x10hosting.com",
|
||||||
"x10premium.com"
|
"x10premium.com"
|
||||||
@@ -807,7 +807,7 @@
|
|||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Type": 79,
|
"Type": 78,
|
||||||
"Domains": [
|
"Domains": [
|
||||||
"dnsomatic.com",
|
"dnsomatic.com",
|
||||||
"opendns.com",
|
"opendns.com",
|
||||||
@@ -816,7 +816,7 @@
|
|||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Type": 80,
|
"Type": 79,
|
||||||
"Domains": [
|
"Domains": [
|
||||||
"cagreatamerica.com",
|
"cagreatamerica.com",
|
||||||
"canadaswonderland.com",
|
"canadaswonderland.com",
|
||||||
@@ -835,11 +835,64 @@
|
|||||||
"Excluded": false
|
"Excluded": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Type": 81,
|
"Type": 80,
|
||||||
"Domains": [
|
"Domains": [
|
||||||
"ubnt.com",
|
"ubnt.com",
|
||||||
"ui.com"
|
"ui.com"
|
||||||
],
|
],
|
||||||
"Excluded": false
|
"Excluded": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Type": 81,
|
||||||
|
"Domains": [
|
||||||
|
"discordapp.com",
|
||||||
|
"discord.com"
|
||||||
|
],
|
||||||
|
"Excluded": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Type": 82,
|
||||||
|
"Domains": [
|
||||||
|
"netcup.de",
|
||||||
|
"netcup.eu",
|
||||||
|
"customercontrolpanel.de"
|
||||||
|
],
|
||||||
|
"Excluded": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Type": 83,
|
||||||
|
"Domains": [
|
||||||
|
"yandex.com",
|
||||||
|
"ya.ru",
|
||||||
|
"yandex.az",
|
||||||
|
"yandex.by",
|
||||||
|
"yandex.co.il",
|
||||||
|
"yandex.com.am",
|
||||||
|
"yandex.com.ge",
|
||||||
|
"yandex.com.tr",
|
||||||
|
"yandex.ee",
|
||||||
|
"yandex.fi",
|
||||||
|
"yandex.fr",
|
||||||
|
"yandex.kg",
|
||||||
|
"yandex.kz",
|
||||||
|
"yandex.lt",
|
||||||
|
"yandex.lv",
|
||||||
|
"yandex.md",
|
||||||
|
"yandex.pl",
|
||||||
|
"yandex.ru",
|
||||||
|
"yandex.tj",
|
||||||
|
"yandex.tm",
|
||||||
|
"yandex.ua",
|
||||||
|
"yandex.uz"
|
||||||
|
],
|
||||||
|
"Excluded": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Type": 84,
|
||||||
|
"Domains": [
|
||||||
|
"sonyentertainmentnetwork.com",
|
||||||
|
"sony.com"
|
||||||
|
],
|
||||||
|
"Excluded": false
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
Before Width: | Height: | Size: 9.7 KiB After Width: | Height: | Size: 9.2 KiB |
|
Before Width: | Height: | Size: 5.8 KiB After Width: | Height: | Size: 5.3 KiB |
|
Before Width: | Height: | Size: 1.3 KiB After Width: | Height: | Size: 1.3 KiB |
|
Before Width: | Height: | Size: 1.9 KiB After Width: | Height: | Size: 1.8 KiB |
83
src/static/scripts/bootstrap.css
vendored
@@ -1,8 +1,8 @@
|
|||||||
/*!
|
/*!
|
||||||
* Bootstrap v4.5.0 (https://getbootstrap.com/)
|
* Bootstrap v4.5.3 (https://getbootstrap.com/)
|
||||||
* Copyright 2011-2020 The Bootstrap Authors
|
* Copyright 2011-2020 The Bootstrap Authors
|
||||||
* Copyright 2011-2020 Twitter, Inc.
|
* Copyright 2011-2020 Twitter, Inc.
|
||||||
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
|
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
|
||||||
*/
|
*/
|
||||||
:root {
|
:root {
|
||||||
--blue: #007bff;
|
--blue: #007bff;
|
||||||
@@ -163,12 +163,12 @@ a:hover {
|
|||||||
text-decoration: underline;
|
text-decoration: underline;
|
||||||
}
|
}
|
||||||
|
|
||||||
a:not([href]) {
|
a:not([href]):not([class]) {
|
||||||
color: inherit;
|
color: inherit;
|
||||||
text-decoration: none;
|
text-decoration: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
a:not([href]):hover {
|
a:not([href]):not([class]):hover {
|
||||||
color: inherit;
|
color: inherit;
|
||||||
text-decoration: none;
|
text-decoration: none;
|
||||||
}
|
}
|
||||||
@@ -216,6 +216,7 @@ caption {
|
|||||||
|
|
||||||
th {
|
th {
|
||||||
text-align: inherit;
|
text-align: inherit;
|
||||||
|
text-align: -webkit-match-parent;
|
||||||
}
|
}
|
||||||
|
|
||||||
label {
|
label {
|
||||||
@@ -539,39 +540,12 @@ pre code {
|
|||||||
overflow-y: scroll;
|
overflow-y: scroll;
|
||||||
}
|
}
|
||||||
|
|
||||||
.container {
|
.container,
|
||||||
width: 100%;
|
.container-fluid,
|
||||||
padding-right: 15px;
|
.container-sm,
|
||||||
padding-left: 15px;
|
.container-md,
|
||||||
margin-right: auto;
|
.container-lg,
|
||||||
margin-left: auto;
|
.container-xl {
|
||||||
}
|
|
||||||
|
|
||||||
@media (min-width: 576px) {
|
|
||||||
.container {
|
|
||||||
max-width: 540px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@media (min-width: 768px) {
|
|
||||||
.container {
|
|
||||||
max-width: 720px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@media (min-width: 992px) {
|
|
||||||
.container {
|
|
||||||
max-width: 960px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@media (min-width: 1200px) {
|
|
||||||
.container {
|
|
||||||
max-width: 1140px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.container-fluid, .container-sm, .container-md, .container-lg, .container-xl {
|
|
||||||
width: 100%;
|
width: 100%;
|
||||||
padding-right: 15px;
|
padding-right: 15px;
|
||||||
padding-left: 15px;
|
padding-left: 15px;
|
||||||
@@ -640,7 +614,6 @@ pre code {
|
|||||||
flex-basis: 0;
|
flex-basis: 0;
|
||||||
-ms-flex-positive: 1;
|
-ms-flex-positive: 1;
|
||||||
flex-grow: 1;
|
flex-grow: 1;
|
||||||
min-width: 0;
|
|
||||||
max-width: 100%;
|
max-width: 100%;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -884,7 +857,6 @@ pre code {
|
|||||||
flex-basis: 0;
|
flex-basis: 0;
|
||||||
-ms-flex-positive: 1;
|
-ms-flex-positive: 1;
|
||||||
flex-grow: 1;
|
flex-grow: 1;
|
||||||
min-width: 0;
|
|
||||||
max-width: 100%;
|
max-width: 100%;
|
||||||
}
|
}
|
||||||
.row-cols-sm-1 > * {
|
.row-cols-sm-1 > * {
|
||||||
@@ -1087,7 +1059,6 @@ pre code {
|
|||||||
flex-basis: 0;
|
flex-basis: 0;
|
||||||
-ms-flex-positive: 1;
|
-ms-flex-positive: 1;
|
||||||
flex-grow: 1;
|
flex-grow: 1;
|
||||||
min-width: 0;
|
|
||||||
max-width: 100%;
|
max-width: 100%;
|
||||||
}
|
}
|
||||||
.row-cols-md-1 > * {
|
.row-cols-md-1 > * {
|
||||||
@@ -1290,7 +1261,6 @@ pre code {
|
|||||||
flex-basis: 0;
|
flex-basis: 0;
|
||||||
-ms-flex-positive: 1;
|
-ms-flex-positive: 1;
|
||||||
flex-grow: 1;
|
flex-grow: 1;
|
||||||
min-width: 0;
|
|
||||||
max-width: 100%;
|
max-width: 100%;
|
||||||
}
|
}
|
||||||
.row-cols-lg-1 > * {
|
.row-cols-lg-1 > * {
|
||||||
@@ -1493,7 +1463,6 @@ pre code {
|
|||||||
flex-basis: 0;
|
flex-basis: 0;
|
||||||
-ms-flex-positive: 1;
|
-ms-flex-positive: 1;
|
||||||
flex-grow: 1;
|
flex-grow: 1;
|
||||||
min-width: 0;
|
|
||||||
max-width: 100%;
|
max-width: 100%;
|
||||||
}
|
}
|
||||||
.row-cols-xl-1 > * {
|
.row-cols-xl-1 > * {
|
||||||
@@ -2259,6 +2228,7 @@ textarea.form-control {
|
|||||||
.valid-tooltip {
|
.valid-tooltip {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
top: 100%;
|
top: 100%;
|
||||||
|
left: 0;
|
||||||
z-index: 5;
|
z-index: 5;
|
||||||
display: none;
|
display: none;
|
||||||
max-width: 100%;
|
max-width: 100%;
|
||||||
@@ -2359,6 +2329,7 @@ textarea.form-control {
|
|||||||
.invalid-tooltip {
|
.invalid-tooltip {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
top: 100%;
|
top: 100%;
|
||||||
|
left: 0;
|
||||||
z-index: 5;
|
z-index: 5;
|
||||||
display: none;
|
display: none;
|
||||||
max-width: 100%;
|
max-width: 100%;
|
||||||
@@ -3776,9 +3747,12 @@ input[type="button"].btn-block {
|
|||||||
|
|
||||||
.custom-control {
|
.custom-control {
|
||||||
position: relative;
|
position: relative;
|
||||||
|
z-index: 1;
|
||||||
display: block;
|
display: block;
|
||||||
min-height: 1.5rem;
|
min-height: 1.5rem;
|
||||||
padding-left: 1.5rem;
|
padding-left: 1.5rem;
|
||||||
|
-webkit-print-color-adjust: exact;
|
||||||
|
color-adjust: exact;
|
||||||
}
|
}
|
||||||
|
|
||||||
.custom-control-inline {
|
.custom-control-inline {
|
||||||
@@ -4312,12 +4286,14 @@ input[type="button"].btn-block {
|
|||||||
background-color: #007bff;
|
background-color: #007bff;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.nav-fill > .nav-link,
|
||||||
.nav-fill .nav-item {
|
.nav-fill .nav-item {
|
||||||
-ms-flex: 1 1 auto;
|
-ms-flex: 1 1 auto;
|
||||||
flex: 1 1 auto;
|
flex: 1 1 auto;
|
||||||
text-align: center;
|
text-align: center;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.nav-justified > .nav-link,
|
||||||
.nav-justified .nav-item {
|
.nav-justified .nav-item {
|
||||||
-ms-flex-preferred-size: 0;
|
-ms-flex-preferred-size: 0;
|
||||||
flex-basis: 0;
|
flex-basis: 0;
|
||||||
@@ -4775,6 +4751,11 @@ input[type="button"].btn-block {
|
|||||||
border-bottom-left-radius: calc(0.25rem - 1px);
|
border-bottom-left-radius: calc(0.25rem - 1px);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.card > .card-header + .list-group,
|
||||||
|
.card > .list-group + .card-footer {
|
||||||
|
border-top: 0;
|
||||||
|
}
|
||||||
|
|
||||||
.card-body {
|
.card-body {
|
||||||
-ms-flex: 1 1 auto;
|
-ms-flex: 1 1 auto;
|
||||||
flex: 1 1 auto;
|
flex: 1 1 auto;
|
||||||
@@ -4814,10 +4795,6 @@ input[type="button"].btn-block {
|
|||||||
border-radius: calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0;
|
border-radius: calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
.card-header + .list-group .list-group-item:first-child {
|
|
||||||
border-top: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
.card-footer {
|
.card-footer {
|
||||||
padding: 0.75rem 1.25rem;
|
padding: 0.75rem 1.25rem;
|
||||||
background-color: rgba(0, 0, 0, 0.03);
|
background-color: rgba(0, 0, 0, 0.03);
|
||||||
@@ -4847,6 +4824,7 @@ input[type="button"].btn-block {
|
|||||||
bottom: 0;
|
bottom: 0;
|
||||||
left: 0;
|
left: 0;
|
||||||
padding: 1.25rem;
|
padding: 1.25rem;
|
||||||
|
border-radius: calc(0.25rem - 1px);
|
||||||
}
|
}
|
||||||
|
|
||||||
.card-img,
|
.card-img,
|
||||||
@@ -4958,6 +4936,10 @@ input[type="button"].btn-block {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.accordion {
|
||||||
|
overflow-anchor: none;
|
||||||
|
}
|
||||||
|
|
||||||
.accordion > .card {
|
.accordion > .card {
|
||||||
overflow: hidden;
|
overflow: hidden;
|
||||||
}
|
}
|
||||||
@@ -5310,6 +5292,7 @@ a.badge-dark:focus, a.badge-dark.focus {
|
|||||||
position: absolute;
|
position: absolute;
|
||||||
top: 0;
|
top: 0;
|
||||||
right: 0;
|
right: 0;
|
||||||
|
z-index: 2;
|
||||||
padding: 0.75rem 1.25rem;
|
padding: 0.75rem 1.25rem;
|
||||||
color: inherit;
|
color: inherit;
|
||||||
}
|
}
|
||||||
@@ -5876,15 +5859,14 @@ a.close.disabled {
|
|||||||
}
|
}
|
||||||
|
|
||||||
.toast {
|
.toast {
|
||||||
|
-ms-flex-preferred-size: 350px;
|
||||||
|
flex-basis: 350px;
|
||||||
max-width: 350px;
|
max-width: 350px;
|
||||||
overflow: hidden;
|
|
||||||
font-size: 0.875rem;
|
font-size: 0.875rem;
|
||||||
background-color: rgba(255, 255, 255, 0.85);
|
background-color: rgba(255, 255, 255, 0.85);
|
||||||
background-clip: padding-box;
|
background-clip: padding-box;
|
||||||
border: 1px solid rgba(0, 0, 0, 0.1);
|
border: 1px solid rgba(0, 0, 0, 0.1);
|
||||||
box-shadow: 0 0.25rem 0.75rem rgba(0, 0, 0, 0.1);
|
box-shadow: 0 0.25rem 0.75rem rgba(0, 0, 0, 0.1);
|
||||||
-webkit-backdrop-filter: blur(10px);
|
|
||||||
backdrop-filter: blur(10px);
|
|
||||||
opacity: 0;
|
opacity: 0;
|
||||||
border-radius: 0.25rem;
|
border-radius: 0.25rem;
|
||||||
}
|
}
|
||||||
@@ -5916,6 +5898,8 @@ a.close.disabled {
|
|||||||
background-color: rgba(255, 255, 255, 0.85);
|
background-color: rgba(255, 255, 255, 0.85);
|
||||||
background-clip: padding-box;
|
background-clip: padding-box;
|
||||||
border-bottom: 1px solid rgba(0, 0, 0, 0.05);
|
border-bottom: 1px solid rgba(0, 0, 0, 0.05);
|
||||||
|
border-top-left-radius: calc(0.25rem - 1px);
|
||||||
|
border-top-right-radius: calc(0.25rem - 1px);
|
||||||
}
|
}
|
||||||
|
|
||||||
.toast-body {
|
.toast-body {
|
||||||
@@ -10182,6 +10166,7 @@ a.text-dark:hover, a.text-dark:focus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
.text-break {
|
.text-break {
|
||||||
|
word-break: break-word !important;
|
||||||
word-wrap: break-word !important;
|
word-wrap: break-word !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
224
src/static/scripts/datatables.css
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
/*
|
||||||
|
* This combined file was created by the DataTables downloader builder:
|
||||||
|
* https://datatables.net/download
|
||||||
|
*
|
||||||
|
* To rebuild or modify this file with the latest versions of the included
|
||||||
|
* software please visit:
|
||||||
|
* https://datatables.net/download/#bs4/dt-1.10.23
|
||||||
|
*
|
||||||
|
* Included libraries:
|
||||||
|
* DataTables 1.10.23
|
||||||
|
*/
|
||||||
|
|
||||||
|
@charset "UTF-8";
|
||||||
|
table.dataTable {
|
||||||
|
clear: both;
|
||||||
|
margin-top: 6px !important;
|
||||||
|
margin-bottom: 6px !important;
|
||||||
|
max-width: none !important;
|
||||||
|
border-collapse: separate !important;
|
||||||
|
border-spacing: 0;
|
||||||
|
}
|
||||||
|
table.dataTable td,
|
||||||
|
table.dataTable th {
|
||||||
|
-webkit-box-sizing: content-box;
|
||||||
|
box-sizing: content-box;
|
||||||
|
}
|
||||||
|
table.dataTable td.dataTables_empty,
|
||||||
|
table.dataTable th.dataTables_empty {
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
table.dataTable.nowrap th,
|
||||||
|
table.dataTable.nowrap td {
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.dataTables_wrapper div.dataTables_length label {
|
||||||
|
font-weight: normal;
|
||||||
|
text-align: left;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
div.dataTables_wrapper div.dataTables_length select {
|
||||||
|
width: auto;
|
||||||
|
display: inline-block;
|
||||||
|
}
|
||||||
|
div.dataTables_wrapper div.dataTables_filter {
|
||||||
|
text-align: right;
|
||||||
|
}
|
||||||
|
div.dataTables_wrapper div.dataTables_filter label {
|
||||||
|
font-weight: normal;
|
||||||
|
white-space: nowrap;
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
div.dataTables_wrapper div.dataTables_filter input {
|
||||||
|
margin-left: 0.5em;
|
||||||
|
display: inline-block;
|
||||||
|
width: auto;
|
||||||
|
}
|
||||||
|
div.dataTables_wrapper div.dataTables_info {
|
||||||
|
padding-top: 0.85em;
|
||||||
|
}
|
||||||
|
div.dataTables_wrapper div.dataTables_paginate {
|
||||||
|
margin: 0;
|
||||||
|
white-space: nowrap;
|
||||||
|
text-align: right;
|
||||||
|
}
|
||||||
|
div.dataTables_wrapper div.dataTables_paginate ul.pagination {
|
||||||
|
margin: 2px 0;
|
||||||
|
white-space: nowrap;
|
||||||
|
justify-content: flex-end;
|
||||||
|
}
|
||||||
|
div.dataTables_wrapper div.dataTables_processing {
|
||||||
|
position: absolute;
|
||||||
|
top: 50%;
|
||||||
|
left: 50%;
|
||||||
|
width: 200px;
|
||||||
|
margin-left: -100px;
|
||||||
|
margin-top: -26px;
|
||||||
|
text-align: center;
|
||||||
|
padding: 1em 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.dataTable > thead > tr > th:active,
|
||||||
|
table.dataTable > thead > tr > td:active {
|
||||||
|
outline: none;
|
||||||
|
}
|
||||||
|
table.dataTable > thead > tr > th:not(.sorting_disabled),
|
||||||
|
table.dataTable > thead > tr > td:not(.sorting_disabled) {
|
||||||
|
padding-right: 30px;
|
||||||
|
}
|
||||||
|
table.dataTable > thead .sorting,
|
||||||
|
table.dataTable > thead .sorting_asc,
|
||||||
|
table.dataTable > thead .sorting_desc,
|
||||||
|
table.dataTable > thead .sorting_asc_disabled,
|
||||||
|
table.dataTable > thead .sorting_desc_disabled {
|
||||||
|
cursor: pointer;
|
||||||
|
position: relative;
|
||||||
|
}
|
||||||
|
table.dataTable > thead .sorting:before, table.dataTable > thead .sorting:after,
|
||||||
|
table.dataTable > thead .sorting_asc:before,
|
||||||
|
table.dataTable > thead .sorting_asc:after,
|
||||||
|
table.dataTable > thead .sorting_desc:before,
|
||||||
|
table.dataTable > thead .sorting_desc:after,
|
||||||
|
table.dataTable > thead .sorting_asc_disabled:before,
|
||||||
|
table.dataTable > thead .sorting_asc_disabled:after,
|
||||||
|
table.dataTable > thead .sorting_desc_disabled:before,
|
||||||
|
table.dataTable > thead .sorting_desc_disabled:after {
|
||||||
|
position: absolute;
|
||||||
|
bottom: 0.9em;
|
||||||
|
display: block;
|
||||||
|
opacity: 0.3;
|
||||||
|
}
|
||||||
|
table.dataTable > thead .sorting:before,
|
||||||
|
table.dataTable > thead .sorting_asc:before,
|
||||||
|
table.dataTable > thead .sorting_desc:before,
|
||||||
|
table.dataTable > thead .sorting_asc_disabled:before,
|
||||||
|
table.dataTable > thead .sorting_desc_disabled:before {
|
||||||
|
right: 1em;
|
||||||
|
content: "↑";
|
||||||
|
}
|
||||||
|
table.dataTable > thead .sorting:after,
|
||||||
|
table.dataTable > thead .sorting_asc:after,
|
||||||
|
table.dataTable > thead .sorting_desc:after,
|
||||||
|
table.dataTable > thead .sorting_asc_disabled:after,
|
||||||
|
table.dataTable > thead .sorting_desc_disabled:after {
|
||||||
|
right: 0.5em;
|
||||||
|
content: "↓";
|
||||||
|
}
|
||||||
|
table.dataTable > thead .sorting_asc:before,
|
||||||
|
table.dataTable > thead .sorting_desc:after {
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
table.dataTable > thead .sorting_asc_disabled:before,
|
||||||
|
table.dataTable > thead .sorting_desc_disabled:after {
|
||||||
|
opacity: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.dataTables_scrollHead table.dataTable {
|
||||||
|
margin-bottom: 0 !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.dataTables_scrollBody table {
|
||||||
|
border-top: none;
|
||||||
|
margin-top: 0 !important;
|
||||||
|
margin-bottom: 0 !important;
|
||||||
|
}
|
||||||
|
div.dataTables_scrollBody table thead .sorting:before,
|
||||||
|
div.dataTables_scrollBody table thead .sorting_asc:before,
|
||||||
|
div.dataTables_scrollBody table thead .sorting_desc:before,
|
||||||
|
div.dataTables_scrollBody table thead .sorting:after,
|
||||||
|
div.dataTables_scrollBody table thead .sorting_asc:after,
|
||||||
|
div.dataTables_scrollBody table thead .sorting_desc:after {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
div.dataTables_scrollBody table tbody tr:first-child th,
|
||||||
|
div.dataTables_scrollBody table tbody tr:first-child td {
|
||||||
|
border-top: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.dataTables_scrollFoot > .dataTables_scrollFootInner {
|
||||||
|
box-sizing: content-box;
|
||||||
|
}
|
||||||
|
div.dataTables_scrollFoot > .dataTables_scrollFootInner > table {
|
||||||
|
margin-top: 0 !important;
|
||||||
|
border-top: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 767px) {
|
||||||
|
div.dataTables_wrapper div.dataTables_length,
|
||||||
|
div.dataTables_wrapper div.dataTables_filter,
|
||||||
|
div.dataTables_wrapper div.dataTables_info,
|
||||||
|
div.dataTables_wrapper div.dataTables_paginate {
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
div.dataTables_wrapper div.dataTables_paginate ul.pagination {
|
||||||
|
justify-content: center !important;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
table.dataTable.table-sm > thead > tr > th:not(.sorting_disabled) {
|
||||||
|
padding-right: 20px;
|
||||||
|
}
|
||||||
|
table.dataTable.table-sm .sorting:before,
|
||||||
|
table.dataTable.table-sm .sorting_asc:before,
|
||||||
|
table.dataTable.table-sm .sorting_desc:before {
|
||||||
|
top: 5px;
|
||||||
|
right: 0.85em;
|
||||||
|
}
|
||||||
|
table.dataTable.table-sm .sorting:after,
|
||||||
|
table.dataTable.table-sm .sorting_asc:after,
|
||||||
|
table.dataTable.table-sm .sorting_desc:after {
|
||||||
|
top: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.table-bordered.dataTable {
|
||||||
|
border-right-width: 0;
|
||||||
|
}
|
||||||
|
table.table-bordered.dataTable th,
|
||||||
|
table.table-bordered.dataTable td {
|
||||||
|
border-left-width: 0;
|
||||||
|
}
|
||||||
|
table.table-bordered.dataTable th:last-child, table.table-bordered.dataTable th:last-child,
|
||||||
|
table.table-bordered.dataTable td:last-child,
|
||||||
|
table.table-bordered.dataTable td:last-child {
|
||||||
|
border-right-width: 1px;
|
||||||
|
}
|
||||||
|
table.table-bordered.dataTable tbody th,
|
||||||
|
table.table-bordered.dataTable tbody td {
|
||||||
|
border-bottom-width: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.dataTables_scrollHead table.table-bordered {
|
||||||
|
border-bottom-width: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.table-responsive > div.dataTables_wrapper > div.row {
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
div.table-responsive > div.dataTables_wrapper > div.row > div[class^=col-]:first-child {
|
||||||
|
padding-left: 0;
|
||||||
|
}
|
||||||
|
div.table-responsive > div.dataTables_wrapper > div.row > div[class^=col-]:last-child {
|
||||||
|
padding-right: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
15583
src/static/scripts/datatables.js
Normal file
8777
src/static/scripts/jquery-3.5.1.slim.js
Normal file
@@ -4,6 +4,7 @@
|
|||||||
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
|
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no" />
|
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no" />
|
||||||
<meta name="robots" content="noindex,nofollow" />
|
<meta name="robots" content="noindex,nofollow" />
|
||||||
|
<link rel="icon" type="image/png" href="{{urlpath}}/bwrs_static/shield-white.png">
|
||||||
<title>Bitwarden_rs Admin Panel</title>
|
<title>Bitwarden_rs Admin Panel</title>
|
||||||
<link rel="stylesheet" href="{{urlpath}}/bwrs_static/bootstrap.css" />
|
<link rel="stylesheet" href="{{urlpath}}/bwrs_static/bootstrap.css" />
|
||||||
<style>
|
<style>
|
||||||
@@ -73,7 +74,7 @@
|
|||||||
|
|
||||||
<body class="bg-light">
|
<body class="bg-light">
|
||||||
<nav class="navbar navbar-expand-md navbar-dark bg-dark mb-4 shadow fixed-top">
|
<nav class="navbar navbar-expand-md navbar-dark bg-dark mb-4 shadow fixed-top">
|
||||||
<div class="container">
|
<div class="container-xl">
|
||||||
<a class="navbar-brand" href="{{urlpath}}/admin"><img class="pr-1" src="{{urlpath}}/bwrs_static/shield-white.png">Bitwarden_rs Admin</a>
|
<a class="navbar-brand" href="{{urlpath}}/admin"><img class="pr-1" src="{{urlpath}}/bwrs_static/shield-white.png">Bitwarden_rs Admin</a>
|
||||||
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarCollapse"
|
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarCollapse"
|
||||||
aria-controls="navbarCollapse" aria-expanded="false" aria-label="Toggle navigation">
|
aria-controls="navbarCollapse" aria-expanded="false" aria-label="Toggle navigation">
|
||||||
@@ -96,7 +97,7 @@
|
|||||||
</li>
|
</li>
|
||||||
{{/if}}
|
{{/if}}
|
||||||
<li class="nav-item">
|
<li class="nav-item">
|
||||||
<a class="nav-link" href="{{urlpath}}/">Vault</a>
|
<a class="nav-link" href="{{urlpath}}/" target="_blank" rel="noreferrer">Vault</a>
|
||||||
</li>
|
</li>
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
@@ -122,6 +123,6 @@
|
|||||||
})();
|
})();
|
||||||
</script>
|
</script>
|
||||||
<!-- This script needs to be at the bottom, else it will fail! -->
|
<!-- This script needs to be at the bottom, else it will fail! -->
|
||||||
<script src="{{urlpath}}/bwrs_static/bootstrap-native-v4.js"></script>
|
<script src="{{urlpath}}/bwrs_static/bootstrap-native.js"></script>
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
<main class="container">
|
<main class="container-xl">
|
||||||
<div id="diagnostics-block" class="my-3 p-3 bg-white rounded shadow">
|
<div id="diagnostics-block" class="my-3 p-3 bg-white rounded shadow">
|
||||||
<h6 class="border-bottom pb-2 mb-2">Diagnostics</h6>
|
<h6 class="border-bottom pb-2 mb-2">Diagnostics</h6>
|
||||||
|
|
||||||
@@ -15,7 +15,7 @@
|
|||||||
<span id="server-installed">{{version}}</span>
|
<span id="server-installed">{{version}}</span>
|
||||||
</dd>
|
</dd>
|
||||||
<dt class="col-sm-5">Server Latest
|
<dt class="col-sm-5">Server Latest
|
||||||
<span class="badge badge-danger d-none" id="server-failed" title="Unable to determine latest version.">Unknown</span>
|
<span class="badge badge-secondary d-none" id="server-failed" title="Unable to determine latest version.">Unknown</span>
|
||||||
</dt>
|
</dt>
|
||||||
<dd class="col-sm-7">
|
<dd class="col-sm-7">
|
||||||
<span id="server-latest">{{diagnostics.latest_release}}<span id="server-latest-commit" class="d-none">-{{diagnostics.latest_commit}}</span></span>
|
<span id="server-latest">{{diagnostics.latest_release}}<span id="server-latest-commit" class="d-none">-{{diagnostics.latest_commit}}</span></span>
|
||||||
@@ -27,12 +27,14 @@
|
|||||||
<dd class="col-sm-7">
|
<dd class="col-sm-7">
|
||||||
<span id="web-installed">{{diagnostics.web_vault_version}}</span>
|
<span id="web-installed">{{diagnostics.web_vault_version}}</span>
|
||||||
</dd>
|
</dd>
|
||||||
|
{{#unless diagnostics.running_within_docker}}
|
||||||
<dt class="col-sm-5">Web Latest
|
<dt class="col-sm-5">Web Latest
|
||||||
<span class="badge badge-danger d-none" id="web-failed" title="Unable to determine latest version.">Unknown</span>
|
<span class="badge badge-secondary d-none" id="web-failed" title="Unable to determine latest version.">Unknown</span>
|
||||||
</dt>
|
</dt>
|
||||||
<dd class="col-sm-7">
|
<dd class="col-sm-7">
|
||||||
<span id="web-latest">{{diagnostics.latest_web_build}}</span>
|
<span id="web-latest">{{diagnostics.latest_web_build}}</span>
|
||||||
</dd>
|
</dd>
|
||||||
|
{{/unless}}
|
||||||
</dl>
|
</dl>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -41,6 +43,40 @@
|
|||||||
<div class="row">
|
<div class="row">
|
||||||
<div class="col-md">
|
<div class="col-md">
|
||||||
<dl class="row">
|
<dl class="row">
|
||||||
|
<dt class="col-sm-5">Running within Docker</dt>
|
||||||
|
<dd class="col-sm-7">
|
||||||
|
{{#if diagnostics.running_within_docker}}
|
||||||
|
<span id="running-docker" class="d-block"><b>Yes</b></span>
|
||||||
|
{{/if}}
|
||||||
|
{{#unless diagnostics.running_within_docker}}
|
||||||
|
<span id="running-docker" class="d-block"><b>No</b></span>
|
||||||
|
{{/unless}}
|
||||||
|
</dd>
|
||||||
|
<dt class="col-sm-5">Uses a proxy</dt>
|
||||||
|
<dd class="col-sm-7">
|
||||||
|
{{#if diagnostics.uses_proxy}}
|
||||||
|
<span id="running-docker" class="d-block"><b>Yes</b></span>
|
||||||
|
{{/if}}
|
||||||
|
{{#unless diagnostics.uses_proxy}}
|
||||||
|
<span id="running-docker" class="d-block"><b>No</b></span>
|
||||||
|
{{/unless}}
|
||||||
|
</dd>
|
||||||
|
<dt class="col-sm-5">Internet access
|
||||||
|
{{#if diagnostics.has_http_access}}
|
||||||
|
<span class="badge badge-success" id="internet-success" title="We have internet access!">Ok</span>
|
||||||
|
{{/if}}
|
||||||
|
{{#unless diagnostics.has_http_access}}
|
||||||
|
<span class="badge badge-danger" id="internet-warning" title="There seems to be no internet access. Please fix.">Error</span>
|
||||||
|
{{/unless}}
|
||||||
|
</dt>
|
||||||
|
<dd class="col-sm-7">
|
||||||
|
{{#if diagnostics.has_http_access}}
|
||||||
|
<span id="running-docker" class="d-block"><b>Yes</b></span>
|
||||||
|
{{/if}}
|
||||||
|
{{#unless diagnostics.has_http_access}}
|
||||||
|
<span id="running-docker" class="d-block"><b>No</b></span>
|
||||||
|
{{/unless}}
|
||||||
|
</dd>
|
||||||
<dt class="col-sm-5">DNS (github.com)
|
<dt class="col-sm-5">DNS (github.com)
|
||||||
<span class="badge badge-success d-none" id="dns-success" title="DNS Resolving works!">Ok</span>
|
<span class="badge badge-success d-none" id="dns-success" title="DNS Resolving works!">Ok</span>
|
||||||
<span class="badge badge-danger d-none" id="dns-warning" title="DNS Resolving failed. Please fix.">Error</span>
|
<span class="badge badge-danger d-none" id="dns-warning" title="DNS Resolving failed. Please fix.">Error</span>
|
||||||
@@ -57,6 +93,46 @@
|
|||||||
<span id="time-server" class="d-block"><b>Server:</b> <span id="time-server-string">{{diagnostics.server_time}}</span></span>
|
<span id="time-server" class="d-block"><b>Server:</b> <span id="time-server-string">{{diagnostics.server_time}}</span></span>
|
||||||
<span id="time-browser" class="d-block"><b>Browser:</b> <span id="time-browser-string"></span></span>
|
<span id="time-browser" class="d-block"><b>Browser:</b> <span id="time-browser-string"></span></span>
|
||||||
</dd>
|
</dd>
|
||||||
|
|
||||||
|
<dt class="col-sm-5">Domain configuration
|
||||||
|
<span class="badge badge-success d-none" id="domain-success" title="The domain variable matches the browser location and seems to be configured correctly.">Match</span>
|
||||||
|
<span class="badge badge-danger d-none" id="domain-warning" title="The domain variable does not matches the browsers location.
The domain variable does not seem to be configured correctly.
Some features may not work as expected!">No Match</span>
|
||||||
|
<span class="badge badge-success d-none" id="https-success" title="Configurued to use HTTPS">HTTPS</span>
|
||||||
|
<span class="badge badge-danger d-none" id="https-warning" title="Not configured to use HTTPS.
Some features may not work as expected!">No HTTPS</span>
|
||||||
|
</dt>
|
||||||
|
<dd class="col-sm-7">
|
||||||
|
<span id="domain-server" class="d-block"><b>Server:</b> <span id="domain-server-string">{{diagnostics.admin_url}}</span></span>
|
||||||
|
<span id="domain-browser" class="d-block"><b>Browser:</b> <span id="domain-browser-string"></span></span>
|
||||||
|
</dd>
|
||||||
|
</dl>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h3>Support</h3>
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-md">
|
||||||
|
<dl class="row">
|
||||||
|
<dd class="col-sm-12">
|
||||||
|
If you need support please check the following links first before you create a new issue:
|
||||||
|
<a href="https://bitwardenrs.discourse.group/" target="_blank" rel="noreferrer">Bitwarden_RS Forum</a>
|
||||||
|
| <a href="https://github.com/dani-garcia/bitwarden_rs/discussions" target="_blank" rel="noreferrer">Github Discussions</a>
|
||||||
|
</dd>
|
||||||
|
</dl>
|
||||||
|
<dl class="row">
|
||||||
|
<dd class="col-sm-12">
|
||||||
|
You can use the button below to pre-generate a string which you can copy/paste on either the Forum or when Creating a new issue at Github.<br>
|
||||||
|
We try to hide the most sensitive values from the generated support string by default, but please verify if there is nothing in there which you want to hide!<br>
|
||||||
|
</dd>
|
||||||
|
</dl>
|
||||||
|
<dl class="row">
|
||||||
|
<dt class="col-sm-3">
|
||||||
|
<button type="button" id="gen-support" class="btn btn-primary" onclick="generateSupportString(); return false;">Generate Support String</button>
|
||||||
|
<br><br>
|
||||||
|
<button type="button" id="copy-support" class="btn btn-info d-none" onclick="copyToClipboard(); return false;">Copy To Clipboard</button>
|
||||||
|
</dt>
|
||||||
|
<dd class="col-sm-9">
|
||||||
|
<pre id="support-string" class="pre-scrollable d-none" style="width: 100%; height: 16em; size: 0.6em; border: 1px solid; padding: 4px;"></pre>
|
||||||
|
</dd>
|
||||||
</dl>
|
</dl>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -64,7 +140,13 @@
|
|||||||
</main>
|
</main>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
|
dnsCheck = false;
|
||||||
|
timeCheck = false;
|
||||||
|
domainCheck = false;
|
||||||
|
httpsCheck = false;
|
||||||
(() => {
|
(() => {
|
||||||
|
// ================================
|
||||||
|
// Date & Time Check
|
||||||
const d = new Date();
|
const d = new Date();
|
||||||
const year = d.getUTCFullYear();
|
const year = d.getUTCFullYear();
|
||||||
const month = String(d.getUTCMonth()+1).padStart(2, '0');
|
const month = String(d.getUTCMonth()+1).padStart(2, '0');
|
||||||
@@ -72,7 +154,7 @@
|
|||||||
const hour = String(d.getUTCHours()).padStart(2, '0');
|
const hour = String(d.getUTCHours()).padStart(2, '0');
|
||||||
const minute = String(d.getUTCMinutes()).padStart(2, '0');
|
const minute = String(d.getUTCMinutes()).padStart(2, '0');
|
||||||
const seconds = String(d.getUTCSeconds()).padStart(2, '0');
|
const seconds = String(d.getUTCSeconds()).padStart(2, '0');
|
||||||
const browserUTC = year + '-' + month + '-' + day + ' ' + hour + ':' + minute + ':' + seconds;
|
const browserUTC = `${year}-${month}-${day} ${hour}:${minute}:${seconds} UTC`;
|
||||||
document.getElementById("time-browser-string").innerText = browserUTC;
|
document.getElementById("time-browser-string").innerText = browserUTC;
|
||||||
|
|
||||||
const serverUTC = document.getElementById("time-server-string").innerText;
|
const serverUTC = document.getElementById("time-server-string").innerText;
|
||||||
@@ -81,16 +163,21 @@
|
|||||||
document.getElementById('time-warning').classList.remove('d-none');
|
document.getElementById('time-warning').classList.remove('d-none');
|
||||||
} else {
|
} else {
|
||||||
document.getElementById('time-success').classList.remove('d-none');
|
document.getElementById('time-success').classList.remove('d-none');
|
||||||
|
timeCheck = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ================================
|
||||||
// Check if the output is a valid IP
|
// Check if the output is a valid IP
|
||||||
const isValidIp = value => (/^(?:(?:^|\.)(?:2(?:5[0-5]|[0-4]\d)|1?\d?\d)){4}$/.test(value) ? true : false);
|
const isValidIp = value => (/^(?:(?:^|\.)(?:2(?:5[0-5]|[0-4]\d)|1?\d?\d)){4}$/.test(value) ? true : false);
|
||||||
if (isValidIp(document.getElementById('dns-resolved').innerText)) {
|
if (isValidIp(document.getElementById('dns-resolved').innerText)) {
|
||||||
document.getElementById('dns-success').classList.remove('d-none');
|
document.getElementById('dns-success').classList.remove('d-none');
|
||||||
|
dnsCheck = true;
|
||||||
} else {
|
} else {
|
||||||
document.getElementById('dns-warning').classList.remove('d-none');
|
document.getElementById('dns-warning').classList.remove('d-none');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ================================
|
||||||
|
// Version check for both bitwarden_rs and web-vault
|
||||||
let serverInstalled = document.getElementById('server-installed').innerText;
|
let serverInstalled = document.getElementById('server-installed').innerText;
|
||||||
let serverLatest = document.getElementById('server-latest').innerText;
|
let serverLatest = document.getElementById('server-latest').innerText;
|
||||||
let serverLatestCommit = document.getElementById('server-latest-commit').innerText.replace('-', '');
|
let serverLatestCommit = document.getElementById('server-latest-commit').innerText.replace('-', '');
|
||||||
@@ -99,10 +186,12 @@
|
|||||||
}
|
}
|
||||||
|
|
||||||
const webInstalled = document.getElementById('web-installed').innerText;
|
const webInstalled = document.getElementById('web-installed').innerText;
|
||||||
const webLatest = document.getElementById('web-latest').innerText;
|
|
||||||
|
|
||||||
checkVersions('server', serverInstalled, serverLatest, serverLatestCommit);
|
checkVersions('server', serverInstalled, serverLatest, serverLatestCommit);
|
||||||
|
|
||||||
|
{{#unless diagnostics.running_within_docker}}
|
||||||
|
const webLatest = document.getElementById('web-latest').innerText;
|
||||||
checkVersions('web', webInstalled, webLatest);
|
checkVersions('web', webInstalled, webLatest);
|
||||||
|
{{/unless}}
|
||||||
|
|
||||||
function checkVersions(platform, installed, latest, commit=null) {
|
function checkVersions(platform, installed, latest, commit=null) {
|
||||||
if (installed === '-' || latest === '-') {
|
if (installed === '-' || latest === '-') {
|
||||||
@@ -146,5 +235,68 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ================================
|
||||||
|
// Check valid DOMAIN configuration
|
||||||
|
document.getElementById('domain-browser-string').innerText = location.href.toLowerCase();
|
||||||
|
if (document.getElementById('domain-server-string').innerText.toLowerCase() == location.href.toLowerCase()) {
|
||||||
|
document.getElementById('domain-success').classList.remove('d-none');
|
||||||
|
domainCheck = true;
|
||||||
|
} else {
|
||||||
|
document.getElementById('domain-warning').classList.remove('d-none');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for HTTPS at domain-server-string
|
||||||
|
if (document.getElementById('domain-server-string').innerText.toLowerCase().startsWith('https://') ) {
|
||||||
|
document.getElementById('https-success').classList.remove('d-none');
|
||||||
|
httpsCheck = true;
|
||||||
|
} else {
|
||||||
|
document.getElementById('https-warning').classList.remove('d-none');
|
||||||
|
}
|
||||||
})();
|
})();
|
||||||
</script>
|
|
||||||
|
// ================================
|
||||||
|
// Generate support string to be pasted on github or the forum
|
||||||
|
async function generateSupportString() {
|
||||||
|
supportString = "### Your environment (Generated via diagnostics page)\n";
|
||||||
|
|
||||||
|
supportString += "* Bitwarden_rs version: v{{ version }}\n";
|
||||||
|
supportString += "* Web-vault version: v{{ diagnostics.web_vault_version }}\n";
|
||||||
|
supportString += "* Running within Docker: {{ diagnostics.running_within_docker }}\n";
|
||||||
|
supportString += "* Internet access: {{ diagnostics.has_http_access }}\n";
|
||||||
|
supportString += "* Uses a proxy: {{ diagnostics.uses_proxy }}\n";
|
||||||
|
supportString += "* DNS Check: " + dnsCheck + "\n";
|
||||||
|
supportString += "* Time Check: " + timeCheck + "\n";
|
||||||
|
supportString += "* Domain Configuration Check: " + domainCheck + "\n";
|
||||||
|
supportString += "* HTTPS Check: " + httpsCheck + "\n";
|
||||||
|
supportString += "* Database type: {{ diagnostics.db_type }}\n";
|
||||||
|
{{#case diagnostics.db_type "MySQL" "PostgreSQL"}}
|
||||||
|
supportString += "* Database version: [PLEASE PROVIDE DATABASE VERSION]\n";
|
||||||
|
{{/case}}
|
||||||
|
supportString += "* Clients used: \n";
|
||||||
|
supportString += "* Reverse proxy and version: \n";
|
||||||
|
supportString += "* Other relevant information: \n";
|
||||||
|
|
||||||
|
jsonResponse = await fetch('{{urlpath}}/admin/diagnostics/config');
|
||||||
|
configJson = await jsonResponse.json();
|
||||||
|
supportString += "\n### Config (Generated via diagnostics page)\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n";
|
||||||
|
|
||||||
|
document.getElementById('support-string').innerText = supportString;
|
||||||
|
document.getElementById('support-string').classList.remove('d-none');
|
||||||
|
document.getElementById('copy-support').classList.remove('d-none');
|
||||||
|
}
|
||||||
|
|
||||||
|
function copyToClipboard() {
|
||||||
|
const str = document.getElementById('support-string').innerText;
|
||||||
|
const el = document.createElement('textarea');
|
||||||
|
el.value = str;
|
||||||
|
el.setAttribute('readonly', '');
|
||||||
|
el.style.position = 'absolute';
|
||||||
|
el.style.left = '-9999px';
|
||||||
|
document.body.appendChild(el);
|
||||||
|
el.select();
|
||||||
|
document.execCommand('copy');
|
||||||
|
document.body.removeChild(el);
|
||||||
|
}
|
||||||
|
|
||||||
|
</script>
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
<main class="container">
|
<main class="container-xl">
|
||||||
{{#if error}}
|
{{#if error}}
|
||||||
<div class="align-items-center p-3 mb-3 text-white-50 bg-warning rounded shadow">
|
<div class="align-items-center p-3 mb-3 text-white-50 bg-warning rounded shadow">
|
||||||
<div>
|
<div>
|
||||||
|
|||||||
@@ -1,27 +1,30 @@
|
|||||||
<main class="container">
|
<main class="container-xl">
|
||||||
<div id="organizations-block" class="my-3 p-3 bg-white rounded shadow">
|
<div id="organizations-block" class="my-3 p-3 bg-white rounded shadow">
|
||||||
<h6 class="border-bottom pb-2 mb-0">Organizations</h6>
|
<h6 class="border-bottom pb-2 mb-3">Organizations</h6>
|
||||||
|
|
||||||
<div class="table-responsive-xl small">
|
<div class="table-responsive-xl small">
|
||||||
<table class="table table-sm table-striped table-hover">
|
<table id="orgs-table" class="table table-sm table-striped table-hover">
|
||||||
<thead>
|
<thead>
|
||||||
<tr>
|
<tr>
|
||||||
<th style="width: 24px;" colspan="2">Organization</th>
|
<th>Organization</th>
|
||||||
<th>Users</th>
|
<th>Users</th>
|
||||||
<th>Items</th>
|
<th>Items</th>
|
||||||
<th>Attachments</th>
|
<th>Attachments</th>
|
||||||
|
<th style="width: 120px; min-width: 120px;">Actions</th>
|
||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
<tbody>
|
<tbody>
|
||||||
{{#each organizations}}
|
{{#each organizations}}
|
||||||
<tr>
|
<tr>
|
||||||
<td><img class="rounded identicon" data-src="{{Id}}"></td>
|
|
||||||
<td>
|
<td>
|
||||||
<strong>{{Name}}</strong>
|
<img class="mr-2 float-left rounded identicon" data-src="{{Id}}">
|
||||||
<span class="mr-2">({{BillingEmail}})</span>
|
<div class="float-left">
|
||||||
<span class="d-block">
|
<strong>{{Name}}</strong>
|
||||||
<span class="badge badge-success">{{Id}}</span>
|
<span class="mr-2">({{BillingEmail}})</span>
|
||||||
</span>
|
<span class="d-block">
|
||||||
|
<span class="badge badge-success">{{Id}}</span>
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
</td>
|
</td>
|
||||||
<td>
|
<td>
|
||||||
<span class="d-block">{{user_count}}</span>
|
<span class="d-block">{{user_count}}</span>
|
||||||
@@ -35,17 +38,53 @@
|
|||||||
<span class="d-block"><strong>Size:</strong> {{attachment_size}}</span>
|
<span class="d-block"><strong>Size:</strong> {{attachment_size}}</span>
|
||||||
{{/if}}
|
{{/if}}
|
||||||
</td>
|
</td>
|
||||||
|
<td style="font-size: 90%; text-align: right; padding-right: 15px">
|
||||||
|
<a class="d-block" href="#" onclick='deleteOrganization({{jsesc Id}}, {{jsesc Name}}, {{jsesc BillingEmail}})'>Delete Organization</a>
|
||||||
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
{{/each}}
|
{{/each}}
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
</main>
|
</main>
|
||||||
|
|
||||||
|
<link rel="stylesheet" href="{{urlpath}}/bwrs_static/datatables.css" />
|
||||||
|
<script src="{{urlpath}}/bwrs_static/jquery-3.5.1.slim.js"></script>
|
||||||
|
<script src="{{urlpath}}/bwrs_static/datatables.js"></script>
|
||||||
<script>
|
<script>
|
||||||
|
function deleteOrganization(id, name, billing_email) {
|
||||||
|
// First make sure the user wants to delete this organization
|
||||||
|
var continueDelete = confirm("WARNING: All data of this organization ("+ name +") will be lost!\nMake sure you have a backup, this cannot be undone!");
|
||||||
|
if (continueDelete == true) {
|
||||||
|
var input_org_uuid = prompt("To delete the organization '" + name + " (" + billing_email +")', please type the organization uuid below.")
|
||||||
|
if (input_org_uuid != null) {
|
||||||
|
if (input_org_uuid == id) {
|
||||||
|
_post("{{urlpath}}/admin/organizations/" + id + "/delete",
|
||||||
|
"Organization deleted correctly",
|
||||||
|
"Error deleting organization");
|
||||||
|
} else {
|
||||||
|
alert("Wrong organization uuid, please try again")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
document.querySelectorAll("img.identicon").forEach(function (e, i) {
|
document.querySelectorAll("img.identicon").forEach(function (e, i) {
|
||||||
e.src = identicon(e.dataset.src);
|
e.src = identicon(e.dataset.src);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
document.addEventListener("DOMContentLoaded", function(event) {
|
||||||
|
$('#orgs-table').DataTable({
|
||||||
|
"responsive": true,
|
||||||
|
"lengthMenu": [ [-1, 5, 10, 25, 50], ["All", 5, 10, 25, 50] ],
|
||||||
|
"pageLength": -1, // Default show all
|
||||||
|
"columnDefs": [
|
||||||
|
{ "targets": 4, "searchable": false, "orderable": false }
|
||||||
|
]
|
||||||
|
});
|
||||||
|
});
|
||||||
</script>
|
</script>
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
<main class="container">
|
<main class="container-xl">
|
||||||
<div id="config-block" class="align-items-center p-3 mb-3 bg-secondary rounded shadow">
|
<div id="config-block" class="align-items-center p-3 mb-3 bg-secondary rounded shadow">
|
||||||
<div>
|
<div>
|
||||||
<h6 class="text-white mb-3">Configuration</h6>
|
<h6 class="text-white mb-3">Configuration</h6>
|
||||||
@@ -17,7 +17,7 @@
|
|||||||
<div id="g_{{group}}" class="card-body collapse" data-parent="#config-form">
|
<div id="g_{{group}}" class="card-body collapse" data-parent="#config-form">
|
||||||
{{#each elements}}
|
{{#each elements}}
|
||||||
{{#if editable}}
|
{{#if editable}}
|
||||||
<div class="form-group row" title="[{{name}}] {{doc.description}}">
|
<div class="form-group row align-items-center" title="[{{name}}] {{doc.description}}">
|
||||||
{{#case type "text" "number" "password"}}
|
{{#case type "text" "number" "password"}}
|
||||||
<label for="input_{{name}}" class="col-sm-3 col-form-label">{{doc.name}}</label>
|
<label for="input_{{name}}" class="col-sm-3 col-form-label">{{doc.name}}</label>
|
||||||
<div class="col-sm-8 input-group">
|
<div class="col-sm-8 input-group">
|
||||||
@@ -34,7 +34,7 @@
|
|||||||
</div>
|
</div>
|
||||||
{{/case}}
|
{{/case}}
|
||||||
{{#case type "checkbox"}}
|
{{#case type "checkbox"}}
|
||||||
<div class="col-sm-3">{{doc.name}}</div>
|
<div class="col-sm-3 col-form-label">{{doc.name}}</div>
|
||||||
<div class="col-sm-8">
|
<div class="col-sm-8">
|
||||||
<div class="form-check">
|
<div class="form-check">
|
||||||
<input class="form-check-input conf-{{type}}" type="checkbox" id="input_{{name}}"
|
<input class="form-check-input conf-{{type}}" type="checkbox" id="input_{{name}}"
|
||||||
@@ -48,7 +48,7 @@
|
|||||||
{{/if}}
|
{{/if}}
|
||||||
{{/each}}
|
{{/each}}
|
||||||
{{#case group "smtp"}}
|
{{#case group "smtp"}}
|
||||||
<div class="form-group row pt-3 border-top" title="Send a test email to given email address">
|
<div class="form-group row align-items-center pt-3 border-top" title="Send a test email to given email address">
|
||||||
<label for="smtp-test-email" class="col-sm-3 col-form-label">Test SMTP</label>
|
<label for="smtp-test-email" class="col-sm-3 col-form-label">Test SMTP</label>
|
||||||
<div class="col-sm-8 input-group">
|
<div class="col-sm-8 input-group">
|
||||||
<input class="form-control" id="smtp-test-email" type="email" placeholder="Enter test email">
|
<input class="form-control" id="smtp-test-email" type="email" placeholder="Enter test email">
|
||||||
@@ -76,7 +76,7 @@
|
|||||||
{{#each config}}
|
{{#each config}}
|
||||||
{{#each elements}}
|
{{#each elements}}
|
||||||
{{#unless editable}}
|
{{#unless editable}}
|
||||||
<div class="form-group row" title="[{{name}}] {{doc.description}}">
|
<div class="form-group row align-items-center" title="[{{name}}] {{doc.description}}">
|
||||||
{{#case type "text" "number" "password"}}
|
{{#case type "text" "number" "password"}}
|
||||||
<label for="input_{{name}}" class="col-sm-3 col-form-label">{{doc.name}}</label>
|
<label for="input_{{name}}" class="col-sm-3 col-form-label">{{doc.name}}</label>
|
||||||
<div class="col-sm-8 input-group">
|
<div class="col-sm-8 input-group">
|
||||||
@@ -92,9 +92,9 @@
|
|||||||
</div>
|
</div>
|
||||||
{{/case}}
|
{{/case}}
|
||||||
{{#case type "checkbox"}}
|
{{#case type "checkbox"}}
|
||||||
<div class="col-sm-3">{{doc.name}}</div>
|
<div class="col-sm-3 col-form-label">{{doc.name}}</div>
|
||||||
<div class="col-sm-8">
|
<div class="col-sm-8">
|
||||||
<div class="form-check">
|
<div class="form-check align-middle">
|
||||||
<input disabled class="form-check-input" type="checkbox" id="input_{{name}}"
|
<input disabled class="form-check-input" type="checkbox" id="input_{{name}}"
|
||||||
{{#if value}} checked {{/if}}>
|
{{#if value}} checked {{/if}}>
|
||||||
|
|
||||||
@@ -139,6 +139,10 @@
|
|||||||
|
|
||||||
<script>
|
<script>
|
||||||
function smtpTest() {
|
function smtpTest() {
|
||||||
|
if (formHasChanges(config_form)) {
|
||||||
|
alert("Config has been changed but not yet saved.\nPlease save the changes first before sending a test email.");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
test_email = document.getElementById("smtp-test-email");
|
test_email = document.getElementById("smtp-test-email");
|
||||||
data = JSON.stringify({ "email": test_email.value });
|
data = JSON.stringify({ "email": test_email.value });
|
||||||
_post("{{urlpath}}/admin/test/smtp/",
|
_post("{{urlpath}}/admin/test/smtp/",
|
||||||
@@ -205,4 +209,35 @@
|
|||||||
// {{#each config}} {{#if grouptoggle}}
|
// {{#each config}} {{#if grouptoggle}}
|
||||||
masterCheck("input_{{grouptoggle}}", "#g_{{group}} input");
|
masterCheck("input_{{grouptoggle}}", "#g_{{group}} input");
|
||||||
// {{/if}} {{/each}}
|
// {{/if}} {{/each}}
|
||||||
|
|
||||||
|
// Two functions to help check if there were changes to the form fields
|
||||||
|
// Useful for example during the smtp test to prevent people from clicking save before testing there new settings
|
||||||
|
function initChangeDetection(form) {
|
||||||
|
const ignore_fields = ["smtp-test-email"];
|
||||||
|
Array.from(form).forEach((el) => {
|
||||||
|
if (! ignore_fields.includes(el.id)) {
|
||||||
|
el.dataset.origValue = el.value
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
function formHasChanges(form) {
|
||||||
|
return Array.from(form).some(el => 'origValue' in el.dataset && ( el.dataset.origValue !== el.value));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trigger Form Change Detection
|
||||||
|
const config_form = document.getElementById('config-form');
|
||||||
|
initChangeDetection(config_form);
|
||||||
|
|
||||||
|
// Colorize some settings which are high risk
|
||||||
|
const risk_items = document.getElementsByClassName('col-form-label');
|
||||||
|
function colorRiskSettings(risk_el) {
|
||||||
|
Array.from(risk_el).forEach((el) => {
|
||||||
|
if (el.innerText.toLowerCase().includes('risks') ) {
|
||||||
|
el.parentElement.className += ' alert-danger'
|
||||||
|
console.log(el)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
colorRiskSettings(risk_items);
|
||||||
|
|
||||||
</script>
|
</script>
|
||||||
|
|||||||
@@ -1,37 +1,49 @@
|
|||||||
<main class="container">
|
<main class="container-xl">
|
||||||
<div id="users-block" class="my-3 p-3 bg-white rounded shadow">
|
<div id="users-block" class="my-3 p-3 bg-white rounded shadow">
|
||||||
<h6 class="border-bottom pb-2 mb-0">Registered Users</h6>
|
<h6 class="border-bottom pb-2 mb-3">Registered Users</h6>
|
||||||
|
|
||||||
<div class="table-responsive-xl small">
|
<div class="table-responsive-xl small">
|
||||||
<table class="table table-sm table-striped table-hover">
|
<table id="users-table" class="table table-sm table-striped table-hover">
|
||||||
<thead>
|
<thead>
|
||||||
<tr>
|
<tr>
|
||||||
<th style="width: 24px;">User</th>
|
<th>User</th>
|
||||||
<th></th>
|
<th style="width:65px; min-width: 65px;">Created at</th>
|
||||||
<th style="width:60px; min-width: 60px;">Items</th>
|
<th style="width:70px; min-width: 65px;">Last Active</th>
|
||||||
|
<th style="width:35px; min-width: 35px;">Items</th>
|
||||||
<th>Attachments</th>
|
<th>Attachments</th>
|
||||||
<th style="min-width: 140px;">Organizations</th>
|
<th style="min-width: 120px;">Organizations</th>
|
||||||
<th style="width: 140px; min-width: 140px;">Actions</th>
|
<th style="width: 120px; min-width: 120px;">Actions</th>
|
||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
<tbody>
|
<tbody>
|
||||||
{{#each users}}
|
{{#each users}}
|
||||||
<tr>
|
<tr>
|
||||||
<td><img class="mr-2 rounded identicon" data-src="{{Email}}"></td>
|
|
||||||
<td>
|
<td>
|
||||||
<strong>{{Name}}</strong>
|
<img class="float-left mr-2 rounded identicon" data-src="{{Email}}">
|
||||||
<span class="d-block">{{Email}}</span>
|
<div class="float-left">
|
||||||
<span class="d-block">
|
<strong>{{Name}}</strong>
|
||||||
{{#if TwoFactorEnabled}}
|
<span class="d-block">{{Email}}</span>
|
||||||
<span class="badge badge-success mr-2" title="2FA is enabled">2FA</span>
|
<span class="d-block">
|
||||||
{{/if}}
|
{{#unless user_enabled}}
|
||||||
{{#case _Status 1}}
|
<span class="badge badge-danger mr-2" title="User is disabled">Disabled</span>
|
||||||
<span class="badge badge-warning mr-2" title="User is invited">Invited</span>
|
{{/unless}}
|
||||||
{{/case}}
|
{{#if TwoFactorEnabled}}
|
||||||
{{#if EmailVerified}}
|
<span class="badge badge-success mr-2" title="2FA is enabled">2FA</span>
|
||||||
<span class="badge badge-success mr-2" title="Email has been verified">Verified</span>
|
{{/if}}
|
||||||
{{/if}}
|
{{#case _Status 1}}
|
||||||
</span>
|
<span class="badge badge-warning mr-2" title="User is invited">Invited</span>
|
||||||
|
{{/case}}
|
||||||
|
{{#if EmailVerified}}
|
||||||
|
<span class="badge badge-success mr-2" title="Email has been verified">Verified</span>
|
||||||
|
{{/if}}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
<span class="d-block">{{created_at}}</span>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
<span class="d-block">{{last_active}}</span>
|
||||||
</td>
|
</td>
|
||||||
<td>
|
<td>
|
||||||
<span class="d-block">{{cipher_count}}</span>
|
<span class="d-block">{{cipher_count}}</span>
|
||||||
@@ -43,9 +55,11 @@
|
|||||||
{{/if}}
|
{{/if}}
|
||||||
</td>
|
</td>
|
||||||
<td>
|
<td>
|
||||||
|
<div class="overflow-auto" style="max-height: 120px;">
|
||||||
{{#each Organizations}}
|
{{#each Organizations}}
|
||||||
<span class="badge badge-primary" data-orgtype="{{Type}}">{{Name}}</span>
|
<button class="badge badge-primary" data-toggle="modal" data-target="#userOrgTypeDialog" data-orgtype="{{Type}}" data-orguuid="{{jsesc Id no_quote}}" data-orgname="{{jsesc Name no_quote}}" data-useremail="{{jsesc ../Email no_quote}}" data-useruuid="{{jsesc ../Id no_quote}}">{{Name}}</button>
|
||||||
{{/each}}
|
{{/each}}
|
||||||
|
</div>
|
||||||
</td>
|
</td>
|
||||||
<td style="font-size: 90%; text-align: right; padding-right: 15px">
|
<td style="font-size: 90%; text-align: right; padding-right: 15px">
|
||||||
{{#if TwoFactorEnabled}}
|
{{#if TwoFactorEnabled}}
|
||||||
@@ -53,6 +67,11 @@
|
|||||||
{{/if}}
|
{{/if}}
|
||||||
<a class="d-block" href="#" onclick='deauthUser({{jsesc Id}})'>Deauthorize sessions</a>
|
<a class="d-block" href="#" onclick='deauthUser({{jsesc Id}})'>Deauthorize sessions</a>
|
||||||
<a class="d-block" href="#" onclick='deleteUser({{jsesc Id}}, {{jsesc Email}})'>Delete User</a>
|
<a class="d-block" href="#" onclick='deleteUser({{jsesc Id}}, {{jsesc Email}})'>Delete User</a>
|
||||||
|
{{#if user_enabled}}
|
||||||
|
<a class="d-block" href="#" onclick='disableUser({{jsesc Id}}, {{jsesc Email}})'>Disable User</a>
|
||||||
|
{{else}}
|
||||||
|
<a class="d-block" href="#" onclick='enableUser({{jsesc Id}}, {{jsesc Email}})'>Enable User</a>
|
||||||
|
{{/if}}
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
{{/each}}
|
{{/each}}
|
||||||
@@ -81,8 +100,46 @@
|
|||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div id="userOrgTypeDialog" class="modal fade" tabindex="-1" role="dialog" aria-hidden="true">
|
||||||
|
<div class="modal-dialog modal-dialog-centered modal-sm">
|
||||||
|
<div class="modal-content">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h6 class="modal-title" id="userOrgTypeDialogTitle"></h6>
|
||||||
|
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
|
||||||
|
<span aria-hidden="true">×</span>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
<form class="form" id="userOrgTypeForm" onsubmit="updateUserOrgType(); return false;">
|
||||||
|
<input type="hidden" name="user_uuid" id="userOrgTypeUserUuid" value="">
|
||||||
|
<input type="hidden" name="org_uuid" id="userOrgTypeOrgUuid" value="">
|
||||||
|
<div class="modal-body">
|
||||||
|
<div class="radio">
|
||||||
|
<label><input type="radio" value="2" class="form-radio-input" name="user_type" id="userOrgTypeUser"> User</label>
|
||||||
|
</div>
|
||||||
|
<div class="radio">
|
||||||
|
<label><input type="radio" value="3" class="form-radio-input" name="user_type" id="userOrgTypeManager"> Manager</label>
|
||||||
|
</div>
|
||||||
|
<div class="radio">
|
||||||
|
<label><input type="radio" value="1" class="form-radio-input" name="user_type" id="userOrgTypeAdmin"> Admin</label>
|
||||||
|
</div>
|
||||||
|
<div class="radio">
|
||||||
|
<label><input type="radio" value="0" class="form-radio-input" name="user_type" id="userOrgTypeOwner"> Owner</label>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<button type="button" class="btn btn-sm btn-secondary" data-dismiss="modal">Cancel</button>
|
||||||
|
<button type="submit" class="btn btn-sm btn-primary">Change Role</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
</main>
|
</main>
|
||||||
|
|
||||||
|
<link rel="stylesheet" href="{{urlpath}}/bwrs_static/datatables.css" />
|
||||||
|
<script src="{{urlpath}}/bwrs_static/jquery-3.5.1.slim.js"></script>
|
||||||
|
<script src="{{urlpath}}/bwrs_static/datatables.js"></script>
|
||||||
<script>
|
<script>
|
||||||
function deleteUser(id, mail) {
|
function deleteUser(id, mail) {
|
||||||
var input_mail = prompt("To delete user '" + mail + "', please type the email below")
|
var input_mail = prompt("To delete user '" + mail + "', please type the email below")
|
||||||
@@ -109,6 +166,24 @@
|
|||||||
"Error deauthorizing sessions");
|
"Error deauthorizing sessions");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
function disableUser(id, mail) {
|
||||||
|
var confirmed = confirm("Are you sure you want to disable user '" + mail + "'? This will also deauthorize their sessions.")
|
||||||
|
if (confirmed) {
|
||||||
|
_post("{{urlpath}}/admin/users/" + id + "/disable",
|
||||||
|
"User disabled successfully",
|
||||||
|
"Error disabling user");
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
function enableUser(id, mail) {
|
||||||
|
var confirmed = confirm("Are you sure you want to enable user '" + mail + "'?")
|
||||||
|
if (confirmed) {
|
||||||
|
_post("{{urlpath}}/admin/users/" + id + "/enable",
|
||||||
|
"User enabled successfully",
|
||||||
|
"Error enabling user");
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
function updateRevisions() {
|
function updateRevisions() {
|
||||||
_post("{{urlpath}}/admin/users/update_revision",
|
_post("{{urlpath}}/admin/users/update_revision",
|
||||||
"Success, clients will sync next time they connect",
|
"Success, clients will sync next time they connect",
|
||||||
@@ -140,4 +215,77 @@
|
|||||||
e.style.backgroundColor = orgtype.color;
|
e.style.backgroundColor = orgtype.color;
|
||||||
e.title = orgtype.name;
|
e.title = orgtype.name;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Special sort function to sort dates in ISO format
|
||||||
|
jQuery.extend( jQuery.fn.dataTableExt.oSort, {
|
||||||
|
"date-iso-pre": function ( a ) {
|
||||||
|
let x;
|
||||||
|
let sortDate = a.replace(/(<([^>]+)>)/gi, "").trim();
|
||||||
|
if ( sortDate !== '' ) {
|
||||||
|
let dtParts = sortDate.split(' ');
|
||||||
|
var timeParts = (undefined != dtParts[1]) ? dtParts[1].split(':') : [00,00,00];
|
||||||
|
var dateParts = dtParts[0].split('-');
|
||||||
|
x = (dateParts[0] + dateParts[1] + dateParts[2] + timeParts[0] + timeParts[1] + ((undefined != timeParts[2]) ? timeParts[2] : 0)) * 1;
|
||||||
|
if ( isNaN(x) ) {
|
||||||
|
x = 0;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
x = Infinity;
|
||||||
|
}
|
||||||
|
return x;
|
||||||
|
},
|
||||||
|
|
||||||
|
"date-iso-asc": function ( a, b ) {
|
||||||
|
return a - b;
|
||||||
|
},
|
||||||
|
|
||||||
|
"date-iso-desc": function ( a, b ) {
|
||||||
|
return b - a;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
document.addEventListener("DOMContentLoaded", function(event) {
|
||||||
|
$('#users-table').DataTable({
|
||||||
|
"responsive": true,
|
||||||
|
"lengthMenu": [ [-1, 5, 10, 25, 50], ["All", 5, 10, 25, 50] ],
|
||||||
|
"pageLength": -1, // Default show all
|
||||||
|
"columnDefs": [
|
||||||
|
{ "targets": [1,2], "type": "date-iso" },
|
||||||
|
{ "targets": 6, "searchable": false, "orderable": false }
|
||||||
|
]
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
var userOrgTypeDialog = document.getElementById('userOrgTypeDialog');
|
||||||
|
// Fill the form and title
|
||||||
|
userOrgTypeDialog.addEventListener('show.bs.modal', function(event){
|
||||||
|
let userOrgType = event.relatedTarget.getAttribute("data-orgtype");
|
||||||
|
let userOrgTypeName = OrgTypes[userOrgType]["name"];
|
||||||
|
let orgName = event.relatedTarget.getAttribute("data-orgname");
|
||||||
|
let userEmail = event.relatedTarget.getAttribute("data-useremail");
|
||||||
|
let orgUuid = event.relatedTarget.getAttribute("data-orguuid");
|
||||||
|
let userUuid = event.relatedTarget.getAttribute("data-useruuid");
|
||||||
|
|
||||||
|
document.getElementById("userOrgTypeDialogTitle").innerHTML = "<b>Update User Type:</b><br><b>Organization:</b> " + orgName + "<br><b>User:</b> " + userEmail;
|
||||||
|
document.getElementById("userOrgTypeUserUuid").value = userUuid;
|
||||||
|
document.getElementById("userOrgTypeOrgUuid").value = orgUuid;
|
||||||
|
document.getElementById("userOrgType"+userOrgTypeName).checked = true;
|
||||||
|
}, false);
|
||||||
|
|
||||||
|
// Prevent accidental submission of the form with valid elements after the modal has been hidden.
|
||||||
|
userOrgTypeDialog.addEventListener('hide.bs.modal', function(event){
|
||||||
|
document.getElementById("userOrgTypeDialogTitle").innerHTML = '';
|
||||||
|
document.getElementById("userOrgTypeUserUuid").value = '';
|
||||||
|
document.getElementById("userOrgTypeOrgUuid").value = '';
|
||||||
|
}, false);
|
||||||
|
|
||||||
|
function updateUserOrgType() {
|
||||||
|
let orgForm = document.getElementById("userOrgTypeForm");
|
||||||
|
const data = JSON.stringify(Object.fromEntries(new FormData(orgForm).entries()));
|
||||||
|
|
||||||
|
_post("{{urlpath}}/admin/users/org_type",
|
||||||
|
"Updated organization type of the user successfully",
|
||||||
|
"Error updating organization type of the user", data);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
</script>
|
</script>
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
Your Email Change
|
Your Email Change
|
||||||
<!---------------->
|
<!---------------->
|
||||||
<html>
|
To finalize changing your email address enter the following code in web vault: {{token}}
|
||||||
<p>To finalize changing your email address enter the following code in web vault: <b>{{token}}</b></p>
|
|
||||||
<p>If you did not try to change an email address, you can safely ignore this email.</p>
|
If you did not try to change an email address, you can safely ignore this email.
|
||||||
</html>
|
|
||||||
|
===
|
||||||
|
Github: https://github.com/dani-garcia/bitwarden_rs
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
Delete Your Account
|
Delete Your Account
|
||||||
<!---------------->
|
<!---------------->
|
||||||
<html>
|
Click the link below to delete your account.
|
||||||
<p>
|
|
||||||
click the link below to delete your account.
|
Delete Your Account: {{url}}/#/verify-recover-delete?userId={{user_id}}&token={{token}}&email={{email}}
|
||||||
<br>
|
|
||||||
<br>
|
If you did not request this email to delete your account, you can safely ignore this email.
|
||||||
<a href="{{url}}/#/verify-recover-delete?userId={{user_id}}&token={{token}}&email={{email}}">
|
|
||||||
Delete Your Account</a>
|
===
|
||||||
</p>
|
Github: https://github.com/dani-garcia/bitwarden_rs
|
||||||
<p>If you did not request this email to delete your account, you can safely ignore this email.</p>
|
|
||||||
</html>
|
|
||||||
|
|||||||