Compare commits

...

194 Commits

Author SHA1 Message Date
48080344ea Merge branch 'upstream' 2024-02-05 12:25:29 +01:00
6a16176c42 Merge branch 'max' of ssh://git.xolus.net/max/ap-relay 2024-02-05 12:24:18 +01:00
69c76019cf Bump deps 2024-02-05 12:22:23 +01:00
asonix
c8250acce7 Bump version 2024-02-05 00:25:15 -06:00
asonix
b074759eb4 Update background-jobs, rework errors 2024-02-05 00:24:49 -06:00
asonix
ed399f1531 Be more accurate for reqwest errors 2024-02-04 20:51:25 -06:00
asonix
7e39acdcb0 Update config 2024-02-04 20:28:18 -06:00
asonix
894d096622 Bump version 2024-02-04 20:25:59 -06:00
asonix
05e31254ba Update rustls for actix-web, log less 2024-02-04 20:25:50 -06:00
asonix
086ca9fbf2 Support live-reloading TLS certificate 2024-01-31 16:49:23 -06:00
9433f36cc5 Fr line 2024-01-19 17:27:58 +01:00
asonix
603fcc6e57 Bump version 2024-01-18 13:35:00 -05:00
asonix
6b8f15ee08 Use stable background-jobs 2024-01-18 13:34:10 -05:00
asonix
53939f8ae8 Go back to job-server per core 2024-01-18 12:31:26 -05:00
asonix
b53b34c515 Update dependencies (minor & point) 2024-01-14 16:16:56 -05:00
asonix
6dcdf2fc87 clippy 2024-01-14 16:10:32 -05:00
asonix
83e5619eb4 Update flake.lock 2024-01-14 16:10:19 -05:00
asonix
9090bb5c62 Bump version 2024-01-14 15:59:16 -05:00
asonix
d862bf8106 Use tokio rather than actix-rt 2024-01-14 15:56:07 -05:00
asonix
417553e643 Bump version 2024-01-09 18:09:51 -06:00
asonix
a2456c3d5f Update dependencies (minor & point) 2024-01-09 18:08:10 -06:00
asonix
2b3cb8db92 clippy 2024-01-08 17:10:31 -06:00
asonix
18f1096221 Update version 2024-01-08 17:06:02 -06:00
asonix
c640567206 Update to newest background-jobs, implement Job rather than ActixJob 2024-01-08 17:00:15 -06:00
asonix
36aa9120ea Update metrics 2024-01-07 12:43:58 -06:00
asonix
e377f3988b Update minify-html, dependencies (minor & point) 2024-01-07 12:10:43 -06:00
asonix
8c811710ac Bump version 2023-11-25 21:27:05 -06:00
asonix
e4f665d75f use stable async-cpupool 2023-11-25 21:17:59 -06:00
asonix
4383357abe update flake 2023-11-25 20:27:20 -06:00
asonix
f70af22c6a clippy 2023-11-25 20:27:11 -06:00
asonix
8bce3d172f Update streem 2023-11-25 20:20:38 -06:00
asonix
8540e93469 Use async-cpupool 2023-11-25 20:18:11 -06:00
asonix
708e7da301 Update opentelemetry, ring, http-signature-normalization, tracing-log 2023-11-25 20:16:13 -06:00
8f89be0a59 Fix render breakage in template 2023-11-21 19:13:03 +01:00
dc6ec63b06 Merge remote-tracking branch 'hobbiton/max' 2023-11-21 19:11:02 +01:00
4061f96f91 Merge remote-tracking branch 'hobbiton/max' 2023-11-21 19:10:56 +01:00
b8728c9440 Merge branch 'upstream' 2023-11-21 18:44:11 +01:00
asonix
a0f9827e18 Bump version 2023-09-09 18:10:31 -04:00
asonix
9ebed87cde Update http-signature-normalization-actix 2023-09-09 18:09:24 -04:00
asonix
ae3d19a774 Bump version 2023-09-09 17:31:42 -04:00
asonix
2a5e769afb Update http-signature-normalization-actix 2023-09-09 17:30:07 -04:00
asonix
f4839d688e Update dependencies (minor & point) 2023-09-09 16:52:53 -04:00
asonix
206db2079f Remove futures-util dependency 2023-09-09 16:46:22 -04:00
asonix
6714fe48ed Bump version, enable tokio_unstable for console 2023-09-08 19:15:19 -06:00
asonix
804d22ee81 Enable different breaker failure cases for different endpoints
Additionally, don't count 4xx towards succeeding a breaker
2023-09-08 19:11:24 -06:00
asonix
5a6fbbcb77 Update tracing-opentelemetry 2023-09-08 18:41:55 -06:00
asonix
ea926f73c4 Update dependencies (minor & point) 2023-09-08 18:39:37 -06:00
asonix
53b14c3329 Bump version 2023-08-29 23:05:41 -05:00
asonix
9b1fad0e2e Update rustls 2023-08-29 22:15:41 -05:00
asonix
a8ba53fe11 Update flake 2023-08-26 12:20:36 -05:00
asonix
927fb91a5e Update flume 2023-08-17 17:11:08 -05:00
asonix
4d4093c15a Bump version 2023-08-17 17:10:24 -05:00
asonix
75df271b58 Switch from awc to reqwest, enable HTTP Proxies 2023-08-17 17:09:35 -05:00
asonix
73b429ab51 Update opentelemetry 2023-08-05 12:47:52 -05:00
asonix
2f57c855a4 Bump version 2023-08-04 19:01:05 -05:00
asonix
cdbde9519e Update dependencies (minor & point) 2023-08-04 18:57:53 -05:00
asonix
2cbe4864c3 Switch to ring for crypto 2023-08-04 18:57:53 -05:00
asonix
731a831070 Bump version 2023-07-28 17:47:51 -05:00
asonix
795d3238ad Hide nodes that failed breakers from index page 2023-07-28 17:46:23 -05:00
asonix
60abec2b96 Bump version 2023-07-27 13:48:01 -05:00
asonix
e63e1f975e Use verify spawner in routes 2023-07-27 13:39:31 -05:00
asonix
5430da58aa Update description in nix file 2023-07-27 13:14:46 -05:00
asonix
927f15c4ca Update dependencies (minor & point) 2023-07-27 13:11:00 -05:00
asonix
ef57576c57 Bump version 2023-07-27 13:10:00 -05:00
asonix
7438b0c5d0 Use verify spawner in all cases in verify path 2023-07-27 13:09:03 -05:00
asonix
f06316c6b2 Bump version 2023-07-27 12:20:41 -05:00
asonix
f86bbc95ae Pass spawner to digest middleware 2023-07-27 12:20:05 -05:00
asonix
a500824a7d Shorten thread names 2023-07-27 11:21:44 -05:00
asonix
433c981a21 Simplify < RATIO, bump version 2023-07-27 11:10:29 -05:00
asonix
f3ff8ae5f7 Split available signature threads between sign & verify 2023-07-27 11:08:20 -05:00
asonix
f24685e700 Allow naming spawner threads 2023-07-27 10:53:01 -05:00
asonix
5de244b848 Add complete to signature thread duration 2023-07-27 10:39:24 -05:00
asonix
769f7451f9 Simplify signature thread 2023-07-27 10:19:20 -05:00
asonix
fff9bf112d Bump version 2023-07-27 09:57:13 -05:00
asonix
05c266c23c Give verify & admin a different queue than deliver 2023-07-27 09:55:13 -05:00
asonix
2a7fed743f Bump version 2023-07-27 09:26:49 -05:00
asonix
240eee730c Add more metrics around spawn-blocking 2023-07-27 09:26:16 -05:00
asonix
8071c6ce3f Make signature threads configurable 2023-07-26 23:04:04 -05:00
asonix
78dcce5a08 Bump version 2023-07-26 22:52:13 -05:00
asonix
11d81683e3 Add logging around parallelism 2023-07-26 22:52:13 -05:00
asonix
5d526c60fe Clippy :( 2023-07-26 19:29:03 -05:00
asonix
73c7150f97 Use spawner for CPU-bound operations 2023-07-26 18:11:44 -05:00
asonix
7cfebd927e Bump version 2023-07-26 18:04:09 -05:00
asonix
d97cc4e5a4 Use custom threadpool for client signatures 2023-07-26 18:03:21 -05:00
asonix
8ff4961ded Bump version 2023-07-25 16:07:18 -05:00
asonix
970672a392 Make client timeout configurable 2023-07-25 16:06:56 -05:00
asonix
dfbd5c9035 Add deliver_concurrency to readme 2023-07-25 14:48:09 -05:00
asonix
d365e34f47 Bump version 2023-07-25 14:46:44 -05:00
asonix
de97adc2d6 Update dependencies (minor & point) 2023-07-25 14:45:46 -05:00
asonix
d1c6f6ff5d Make delivery concurrency configurable 2023-07-25 14:45:15 -05:00
asonix
582f311a20 Bump version 2023-07-24 13:20:09 -05:00
asonix
09436746c8 Update dependencies 2023-07-24 13:19:40 -05:00
asonix
a65ff19f6a Remove unneeded mut 2023-07-21 16:36:07 -05:00
asonix
bcdef5caa1 Don't clippy dependencies 2023-07-21 16:29:58 -05:00
asonix
4651fcc9d2 Update bcrypt, lru 2023-07-19 20:25:24 -05:00
asonix
fb6d8af1ca Update flake, dependencies 2023-07-19 20:23:44 -05:00
asonix
9779518dc1 Allow rel attribute in local & footer blurb
Patch from Jaehong Kang <sinoru@me.com>
2023-07-16 22:33:43 -05:00
1414cc518c Merge remote-tracking branch 'upstream/main' 2023-07-11 12:42:53 +02:00
asonix
7a00229508 Bump version, update docs 2023-06-23 15:15:27 -05:00
asonix
346664396c Run workers on handler threads 2023-06-23 15:08:59 -05:00
asonix
74f35faa22 Keep client in thread-local storage 2023-06-23 15:01:56 -05:00
asonix
e005adfcf8 Bump version 2023-06-23 14:32:14 -05:00
asonix
d40db33eb5 Don't drop and rebuild clients, share clients better 2023-06-23 14:27:20 -05:00
asonix
246e79b261 Bump version 2023-06-23 13:47:40 -05:00
asonix
8d565a1fbe Add ability to tweak client pool size 2023-06-23 13:46:13 -05:00
asonix
18ff2864a0 Update dependencies (minor & point) 2023-06-23 13:34:39 -05:00
asonix
4b71e56f31 Update nixpkgs 2023-06-23 13:34:21 -05:00
asonix
9b4f6b47a6 cargo update 2023-06-03 13:13:37 -05:00
asonix
5fa1d4983a Update nix 2023-06-03 13:10:19 -05:00
asonix
d69a80ebe8 Update dependencies, not rustls 2023-05-24 10:19:34 -05:00
asonix
a9a47e8ee2 Update flake 2023-04-27 19:54:15 -05:00
asonix
ab2dbfb439 Update metrics, rsa 2023-04-27 19:34:23 -05:00
8bcde3f4e2 Merge branch 'main' into max 2023-04-04 15:04:43 +02:00
asonix
73bf4d1597 Remove unneeded .into_iter() 2023-03-23 14:37:33 -05:00
asonix
2cb5ad9917 Replace Double with Fanout 2023-03-23 13:51:32 -05:00
asonix
afd4105d0f Add flake 2023-03-23 13:51:23 -05:00
c5112cb9bb Merge tag 'v0.3.82' into max 2023-03-05 17:16:33 +01:00
asonix
d644e83733 Bump version 2023-02-25 15:14:24 -06:00
asonix
ae91aa8fa7 Update bcrypt 2023-02-25 15:06:18 -06:00
asonix
73c016d418 Update deps 2023-02-25 15:04:30 -06:00
asonix
a1ea5d676c Rework misskey fetch to reuse deliver plumbing
Only count server errors towards failed breakers
2023-02-25 15:02:16 -06:00
perillamint
667d586160 Send dummy JSON when trying Misskey API endpoint
From Misskey 13, Misskey expects valid JSON (does not care its content
though) in POST body. To workaround this, send empty JSON object when
requesting Misskey API endpoint
2023-02-25 14:34:38 -06:00
perillamint
4a7775b56d Misskey metadata support
This commit implements misskey metadata support and corresponding test
for From<MskyMeta> implementation

Also, it is good to note that, Misskey does not return 404 but 200 OK
when they poked at nonexistant endpoint, so the implementation should
handle for invalid json case
2023-02-25 14:34:22 -06:00
asonix
9b809913ad Add note about JSON-LD problems 2023-02-11 18:16:06 -06:00
asonix
a952b528df Use transpose in a couple places 2023-02-05 21:09:47 -06:00
asonix
b5138fc16d Bump version 2023-01-29 13:23:11 -06:00
asonix
0e9b88a7ae Bump deps 2023-01-29 13:21:53 -06:00
asonix
f9cad61049 Add healthcheck for db, new clippy lints 2023-01-29 13:21:36 -06:00
Tealk
96547230bc
update Pleroma text
Signed-off-by: Tealk <tealk@rollenspiel.monster>
2023-01-28 23:45:31 +01:00
asonix
c11ff17192 Bump version 2023-01-23 08:58:07 -06:00
asonix
e93dd2da56 Update teloxide 2023-01-23 08:57:16 -06:00
asonix
34dc1a2281 Update rsa 2023-01-23 08:56:18 -06:00
asonix
9cdebeae4c Update base64, ructe 2023-01-23 08:38:55 -06:00
asonix
662620be46 Only show open_registrations: false when restricted mode is enabled 2023-01-23 08:29:32 -06:00
b94f792a19 Merge tag 'v0.3.79' into max 2023-01-19 21:14:20 +01:00
asonix
5488acb59d Fix docker volume mount in readme 2023-01-03 15:17:56 -06:00
asonix
4998cd3a56 Bump version 2023-01-02 12:43:51 -06:00
asonix
f0a8862922 Don't prometheus exporter for relay client 2023-01-02 12:43:32 -06:00
asonix
b6a10c4e65 Apply patch from perillamint on github
Document REPOSITORY_COMMIT_BASE envvar
2023-01-01 10:29:28 -06:00
asonix
3a14242a91 Apply patch from perillamint on github
Accept REPOSITORY_COMMIT_BASE envvar to build repository url
2023-01-01 10:28:52 -06:00
asonix
f5fed2fce1 Apply patch from perillamint on github
One missing bit Debug implementation for source_url
2023-01-01 10:19:11 -06:00
asonix
5faeaf6371 Revert "Apply patch from perillamint on github"
This reverts commit f291b24269.
2023-01-01 10:01:39 -06:00
asonix
f291b24269 Apply patch from perillamint on github
Show repository URL with commit reference
2023-01-01 09:47:21 -06:00
asonix
5f5c34640f Apply patch from perillamint on github
use git hash to describe version number
2023-01-01 09:46:44 -06:00
asonix
d4e51a1afa Add scrape endpoint to .env 2022-12-26 11:20:02 -06:00
asonix
fafba69258 Add optional prometheus scrape endpoint 2022-12-26 10:57:16 -06:00
asonix
07b961c28f Bump deps 2022-12-21 16:59:19 -06:00
asonix
30dd16a889 Bump version 2022-12-21 16:58:17 -06:00
asonix
88b0383084 Keep track of when servers were last seen 2022-12-21 16:51:17 -06:00
asonix
b49eeaf822 Bump version 2022-12-19 22:25:27 -06:00
asonix
943f679a69 Allow activities without IDs, fetch actor unchecked 2022-12-19 22:24:58 -06:00
asonix
37b2afe344 Bump version 2022-12-19 21:46:27 -06:00
asonix
4e5fabce5f Also debug Kind in inbox 2022-12-19 21:45:52 -06:00
asonix
689d85befb Bump version 2022-12-19 21:07:25 -06:00
asonix
40eb12258d Record id in inbox route 2022-12-19 21:05:53 -06:00
asonix
efcec29d7b Remove unused docker-related files 2022-12-19 16:32:16 -06:00
asonix
62a886d0bf Bump version 2022-12-19 16:31:51 -06:00
asonix
163e480076 Update deps 2022-12-19 16:30:48 -06:00
asonix
675fddcfeb Support Remove activity, forward verbatim 2022-12-19 16:08:39 -06:00
asonix
359ec68aa0 Add example systemd configuration 2022-12-19 15:52:47 -06:00
asonix
565a94d756 clippy 2022-12-19 12:23:06 -06:00
asonix
815c18b899 Update version number in various places 2022-12-19 12:17:08 -06:00
asonix
fbcbf141dd Bump version 2022-12-19 11:46:49 -06:00
asonix
cf7a25f935 Consider NoSignature a BadRequest 2022-12-19 11:44:50 -06:00
asonix
b56bddccb4 Allow Signature to be missing if kind is Delete, return early without additional processing 2022-12-19 11:39:30 -06:00
asonix
886c7d0ac6 Apply patch from perallamint on github
Temporary fix: allow signing bypass for 410 gone actors
DIRTY FIX: implement sigcheck_bypass for 410'ing actors
2022-12-19 09:44:04 -06:00
asonix
178d23bcbd Bump deps 2022-12-14 20:17:14 -06:00
asonix
549eb47202 Bump version 2022-12-13 23:41:06 -06:00
asonix
5968cb8953 bump deps 2022-12-13 23:40:53 -06:00
asonix
c5e254dad6 Update deps 2022-12-13 23:37:09 -06:00
asonix
430ebec810 Improve tracing, immediately stringify spantrace, remove join macros 2022-12-13 23:36:40 -06:00
asonix
c15f591bc8 Add punctuation to readme 2022-12-13 10:56:25 -06:00
asonix
5d69eaf2ab Add note about Add activity to README 2022-12-13 10:46:58 -06:00
asonix
43b70f88a7 Apply patch from perallamint on github
clippy: unnecessary lifetime annotation on static strings

Since string literal constant already has static lifetime, it is not
necessary to explicitly annotate it with 'static.
2022-12-13 10:39:25 -06:00
asonix
a0dc2363f6 Add support for Add activity - forward verbatim 2022-12-13 10:35:16 -06:00
asonix
9d68ccd834 Update deps 2022-12-12 11:06:23 -06:00
asonix
a8b8325557 Update deps 2022-12-12 10:56:53 -06:00
asonix
6082def854 Bump version 2022-12-09 18:04:15 -06:00
asonix
31021e80e4 Bump deps 2022-12-09 18:03:36 -06:00
asonix
f4db90b699 Use sync RwLock for lru access 2022-12-09 17:47:45 -06:00
asonix
d834537300 Bump http-signature-normalization-actix 2022-12-08 21:15:43 -06:00
asonix
c18760d57f Bump version 2022-12-08 15:14:12 -06:00
asonix
8575439d88 Bump deps 2022-12-08 15:14:04 -06:00
asonix
c543e8b4eb Bump version 2022-12-06 18:53:36 -06:00
asonix
a0fbf9d236 Bump activitystreams again 2022-12-06 18:53:19 -06:00
asonix
b9dba28207 Bump activitystreams 2022-12-06 18:21:55 -06:00
asonix
b5dc3e7c08 Wrap whole main in actix_rt, fixes opentelemetry 2022-12-06 17:55:02 -06:00
4e5e257f24 Fix merge fail 2022-11-28 10:08:58 +01:00
f0ae726c9d Small translation errors 2022-11-28 10:00:05 +01:00
112ed7cedd Merge tag 'v0.3.66' into max 2022-11-28 09:54:00 +01:00
41034f7334 Mini fixes 2022-11-28 09:14:46 +01:00
e89ee20d38 Merge branch 'upstream' into max 2022-11-19 14:09:39 +01:00
1d967dc3b4 Merge branch 'cmdline-ls' into max 2022-11-11 10:47:37 +01:00
41c406d0c3 Make the info webpage french 2022-11-11 10:04:27 +01:00
68 changed files with 3789 additions and 2157 deletions

View File

@ -1,2 +1,2 @@
[build]
# rustflags = ["--cfg", "tokio_unstable"]
rustflags = ["--cfg", "tokio_unstable"]

View File

@ -22,7 +22,7 @@ steps:
pull: always
commands:
- rustup component add clippy
- cargo clippy -- -D warnings
- cargo clippy --no-deps -- -D warnings
trigger:
event:

10
.env
View File

@ -4,8 +4,10 @@ HTTPS=false
DEBUG=true
RESTRICTED_MODE=true
VALIDATE_SIGNATURES=false
API_TOKEN=somesecretpassword
FOOTER_BLURB="Contact <a href=\"https://masto.asonix.dog/@asonix\">@asonix</a> for inquiries"
LOCAL_DOMAINS="masto.asonix.dog"
LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>"
API_TOKEN=kjsdhfkwjenrkajhsdakjsnd
FOOTER_BLURB="Opéré par <a href=\"https://mastodon.xolus.net/@max\">@max</a>"
LOCAL_DOMAINS="xolus.net"
LOCAL_BLURB="<p>Relais ActivityPub francophone</p>"
# OPENTELEMETRY_URL=http://localhost:4317
PROMETHEUS_ADDR=127.0.0.1
PROMETHEUS_PORT=9000

3
.gitignore vendored
View File

@ -1,3 +1,6 @@
/target
/artifacts
/sled
/.direnv
/.envrc
/result

2892
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,11 @@
[package]
name = "ap-relay"
description = "A simple activitypub relay"
version = "0.3.66"
version = "0.3.108"
authors = ["asonix <asonix@asonix.dog>"]
license = "AGPL-3.0"
readme = "README.md"
repository = "https://git.asonix.dog/asonix/ap-relay"
repository = "https://git.asonix.dog/asonix/relay"
keywords = ["activitypub", "relay"]
edition = "2021"
build = "src/build.rs"
@ -15,89 +15,97 @@ name = "relay"
path = "src/main.rs"
[features]
console = ["console-subscriber"]
console = ["dep:console-subscriber"]
default = []
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0"
actix-rt = "2.7.0"
actix-web = { version = "4.0.1", default-features = false, features = [
"rustls",
"compress-brotli",
"compress-gzip",
] }
actix-webfinger = "0.4.0"
activitystreams = "0.7.0-alpha.19"
activitystreams-ext = "0.1.0-alpha.2"
actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_22"] }
actix-webfinger = { version = "0.5.0", default-features = false }
activitystreams = "0.7.0-alpha.25"
activitystreams-ext = "0.1.0-alpha.3"
ammonia = "3.1.0"
awc = { version = "3.0.0", default-features = false, features = ["rustls"] }
bcrypt = "0.13"
base64 = "0.13"
async-cpupool = "0.2.0"
bcrypt = "0.15"
base64 = "0.21"
clap = { version = "4.0.0", features = ["derive"] }
config = "0.13.0"
console-subscriber = { version = "0.1", optional = true }
color-eyre = "0.6.2"
config = { version = "0.14.0", default-features = false, features = ["toml", "json", "yaml"] }
console-subscriber = { version = "0.2", optional = true }
dashmap = "5.1.0"
dotenv = "0.15.0"
futures-util = "0.3.17"
lru = "0.8.0"
metrics = "0.20.1"
metrics-util = "0.14.0"
flume = "0.11.0"
lru = "0.12.0"
metrics = "0.22.0"
metrics-exporter-prometheus = { version = "0.13.0", default-features = false, features = [
"http-listener",
] }
metrics-util = "0.16.0"
mime = "0.3.16"
minify-html = "0.10.0"
opentelemetry = { version = "0.18", features = ["rt-tokio"] }
opentelemetry-otlp = "0.11"
minify-html = "0.15.0"
opentelemetry = "0.21"
opentelemetry_sdk = { version = "0.21", features = ["rt-tokio"] }
opentelemetry-otlp = "0.14"
pin-project-lite = "0.2.9"
quanta = "0.10.1"
# pinned to metrics-util
quanta = "0.12.0"
rand = "0.8"
rsa = "0.7"
rsa-magic-public-key = "0.6.0"
rustls = "0.20.7"
rustls-pemfile = "1.0.1"
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls", "stream"]}
reqwest-middleware = "0.2"
reqwest-tracing = "0.4.5"
ring = "0.17.5"
rsa = { version = "0.9" }
rsa-magic-public-key = "0.8.0"
rustls = "0.22.0"
rustls-channel-resolver = "0.2.0"
rustls-pemfile = "2"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
sha2 = { version = "0.10", features = ["oid"] }
signature = "1.6.4"
sled = "0.34.7"
teloxide = { version = "0.11.1", default-features = false, features = [
streem = "0.2.0"
teloxide = { version = "0.12.0", default-features = false, features = [
"ctrlc_handler",
"macros",
"rustls",
] }
thiserror = "1.0"
time = { version = "0.3.17", features = ["serde"] }
tracing = "0.1"
tracing-awc = "0.1.6"
tracing-error = "0.2"
tracing-futures = "0.2"
tracing-log = "0.1"
tracing-opentelemetry = "0.18"
tracing-log = "0.2"
tracing-opentelemetry = "0.22"
tracing-subscriber = { version = "0.3", features = [
"ansi",
"env-filter",
"fmt",
] }
tokio = { version = "1", features = ["macros", "sync"] }
tokio = { version = "1", features = ["full", "tracing"] }
uuid = { version = "1", features = ["v4", "serde"] }
[dependencies.background-jobs]
version = "0.14.0"
version = "0.18.0"
default-features = false
features = ["background-jobs-actix", "error-logging"]
features = ["error-logging", "metrics", "tokio"]
[dependencies.http-signature-normalization-actix]
version = "0.6.0"
version = "0.11.0"
default-features = false
features = ["client", "server", "sha-2"]
features = ["server", "ring"]
[dependencies.http-signature-normalization-reqwest]
version = "0.11.0"
default-features = false
features = ["middleware", "ring"]
[dependencies.tracing-actix-web]
version = "0.6.1"
version = "0.7.9"
[build-dependencies]
anyhow = "1.0"
color-eyre = "0.6.2"
dotenv = "0.15.0"
ructe = { version = "0.15.0", features = ["sass", "mime03"] }
toml = "0.5.8"
ructe = { version = "0.17.0", features = ["sass", "mime03"] }
toml = "0.8.0"
[profile.dev.package.rsa]
opt-level = 3

View File

@ -6,11 +6,11 @@ _A simple and efficient activitypub relay_
If running docker, you can start the relay with the following command:
```
$ sudo docker run --rm -it \
-v "./:/mnt/" \
-v "$(pwd):/mnt/" \
-e ADDR=0.0.0.0 \
-e SLED_PATH=/mnt/sled/db-0.34 \
-p 8080:8080 \
asonix/relay:0.3.52
asonix/relay:0.3.85
```
This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080
#### Cargo
@ -103,6 +103,11 @@ TLS_CERT=/path/to/cert
FOOTER_BLURB="Contact <a href=\"https://masto.asonix.dog/@asonix\">@asonix</a> for inquiries"
LOCAL_DOMAINS=masto.asonix.dog
LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>"
PROMETHEUS_ADDR=0.0.0.0
PROMETHEUS_PORT=9000
CLIENT_TIMEOUT=10
DELIVER_CONCURRENCY=8
SIGNATURE_THREADS=2
```
#### Descriptions
@ -128,6 +133,8 @@ Where to store the on-disk database of connected servers. This defaults to `./sl
The log level to print. Available levels are `ERROR`, `WARN`, `INFO`, `DEBUG`, and `TRACE`. You can also specify module paths to enable some logs but not others, such as `RUST_LOG=warn,tracing_actix_web=info,relay=info`. This defaults to `warn`
##### `SOURCE_REPO`
The URL to the source code for the relay. This defaults to `https://git.asonix.dog/asonix/relay`, but should be changed if you're running a fork hosted elsewhere.
##### `REPOSITORY_COMMIT_BASE`
The base path of the repository commit hash reference. For example, `/src/commit/` for Gitea, `/tree/` for GitLab.
##### `API_TOKEN`
The Secret token used to access the admin APIs. This must be set for the commandline to function
##### `OPENTELEMETRY_URL`
@ -146,6 +153,26 @@ Optional - Add custom notes in the footer of the page
Optional - domains of mastodon servers run by the same admin as the relay
##### `LOCAL_BLURB`
Optional - description for the relay
##### `PROMETHEUS_ADDR`
Optional - Address to bind to for serving the prometheus scrape endpoint
##### `PROMETHEUS_PORT`
Optional - Port to bind to for serving the prometheus scrape endpoint
##### `CLIENT_TIMEOUT`
Optional - How long the relay will hold open a connection (in seconds) to a remote server during
fetches and deliveries. This defaults to 10
##### `DELIVER_CONCURRENCY`
Optional - How many deliver requests the relay should allow to be in-flight per thread. the default
is 8
##### `SIGNATURE_THREADS`
Optional - Override number of threads used for signing and verifying requests. Default is
`std::thread::available_parallelism()` (It tries to detect how many cores you have). If it cannot
detect the correct number of cores, it falls back to 1.
##### 'PROXY_URL'
Optional - URL of an HTTP proxy to forward outbound requests through
##### 'PROXY_USERNAME'
Optional - username to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
##### 'PROXY_PASSWORD'
Optional - password to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
### Subscribing
Mastodon admins can subscribe to this relay by adding the `/inbox` route to their relay settings.
@ -165,10 +192,16 @@ example, if the server is `https://relay.my.tld`, the correct URL would be
- Follow Public, become a listener of the relay
- Undo Follow {self-actor}, stop listening on the relay, an Undo Follow will be sent back
- Undo Follow Public, stop listening on the relay
- Delete {anything}, the Delete {anything} is relayed verbatim to listening servers
- Delete {anything}, the Delete {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
- Update {anything}, the Update {anything} is relayed verbatim to listening servers
- Update {anything}, the Update {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
- Add {anything}, the Add {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
- Remove {anything}, the Remove {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
@ -176,6 +209,9 @@ example, if the server is `https://relay.my.tld`, the correct URL would be
- Webfinger
- NodeInfo
### Known issues
Pleroma and Akkoma do not support validating JSON-LD signatures, meaning many activities such as Delete, Update, Add, and Remove will be rejected with a message similar to `WARN: Response from https://example.com/inbox, "Invalid HTTP Signature"`. This is normal and not an issue with the relay.
### Contributing
Feel free to open issues for anything you find an issue with. Please note that any contributed code will be licensed under the AGPLv3.

View File

@ -1,41 +0,0 @@
ARG REPO_ARCH=amd64
# cross-build environment
FROM asonix/rust-builder:$REPO_ARCH-latest AS builder
ARG TAG=main
ARG BINARY=relay
ARG PROJECT=relay
ARG GIT_REPOSITORY=https://git.asonix.dog/asonix/$PROJECT
ENV \
BINARY=${BINARY}
ADD \
--chown=build:build \
$GIT_REPOSITORY/archive/$TAG.tar.gz \
/opt/build/repo.tar.gz
RUN \
tar zxf repo.tar.gz
WORKDIR /opt/build/$PROJECT
RUN \
build
# production environment
FROM asonix/rust-runner:$REPO_ARCH-latest
ARG BINARY=relay
ENV \
BINARY=${BINARY}
COPY \
--from=builder \
/opt/build/binary \
/usr/bin/${BINARY}
ENTRYPOINT ["/sbin/tini", "--"]
CMD /usr/bin/${BINARY}

View File

@ -1,37 +0,0 @@
#!/usr/bin/env bash
function require() {
if [ "$1" = "" ]; then
echo "input '$2' required"
print_help
exit 1
fi
}
function print_help() {
echo "deploy.sh"
echo ""
echo "Usage:"
echo " deploy.sh [repo] [tag] [arch]"
echo ""
echo "Args:"
echo " repo: The docker repository to publish the image"
echo " tag: The tag applied to the docker image"
echo " arch: The architecuture of the doker image"
}
REPO=$1
TAG=$2
ARCH=$3
require "$REPO" repo
require "$TAG" tag
require "$ARCH" arch
sudo docker build \
--pull \
--build-arg TAG=$TAG \
--build-arg REPO_ARCH=$ARCH \
-t $REPO:$ARCH-$TAG \
-f Dockerfile \
.

View File

@ -1,87 +0,0 @@
#!/usr/bin/env bash
function require() {
if [ "$1" = "" ]; then
echo "input '$2' required"
print_help
exit 1
fi
}
function print_help() {
echo "deploy.sh"
echo ""
echo "Usage:"
echo " deploy.sh [tag] [branch] [push]"
echo ""
echo "Args:"
echo " tag: The git tag to be applied to the repository and docker build"
echo " branch: The git branch to use for tagging and publishing"
echo " push: Whether or not to push the image"
echo ""
echo "Examples:"
echo " ./deploy.sh v0.3.0-alpha.13 main true"
echo " ./deploy.sh v0.3.0-alpha.13-shell-out asonix/shell-out false"
}
function build_image() {
tag=$1
arch=$2
push=$3
./build-image.sh asonix/relay $tag $arch
sudo docker tag asonix/relay:$arch-$tag asonix/relay:$arch-latest
if [ "$push" == "true" ]; then
sudo docker push asonix/relay:$arch-$tag
sudo docker push asonix/relay:$arch-latest
fi
}
# Creating the new tag
new_tag="$1"
branch="$2"
push=$3
require "$new_tag" "tag"
require "$branch" "branch"
require "$push" "push"
if ! sudo docker run --rm -it arm64v8/alpine:3.11 /bin/sh -c 'echo "docker is configured correctly"'
then
echo "docker is not configured to run on qemu-emulated architectures, fixing will require sudo"
sudo docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
fi
set -xe
git checkout $branch
# Changing the docker-compose prod
sed -i "s/asonix\/relay:.*/asonix\/relay:$new_tag/" docker-compose.yml
git add ../prod/docker-compose.yml
# The commit
git commit -m"Version $new_tag"
git tag $new_tag
# Push
git push origin $new_tag
git push
# Build for arm64v8, arm32v7 and amd64
build_image $new_tag arm64v8 $push
build_image $new_tag arm32v7 $push
build_image $new_tag amd64 $push
# Build for other archs
# TODO
if [ "$push" == "true" ]; then
./manifest.sh relay $new_tag
./manifest.sh relay latest
# pushd ../../
# cargo publish
# popd
fi

View File

@ -2,7 +2,7 @@ version: '3.3'
services:
relay:
image: asonix/relay:v0.3.8
image: asonix/relay:0.3.85
ports:
- "8079:8079"
restart: always
@ -14,6 +14,7 @@ services:
- RESTRICTED_MODE=false
- VALIDATE_SIGNATURES=true
- HTTPS=true
- DATABASE_URL=postgres://pg_user:pg_pass@pg_host:pg_port/pg_database
- SLED_PATH=/mnt/sled/db-0.34
- PRETTY_LOG=false
- PUBLISH_BLOCKS=true
- API_TOKEN=somepasswordishtoken

View File

@ -1,43 +0,0 @@
#!/usr/bin/env bash
function require() {
if [ "$1" = "" ]; then
echo "input '$2' required"
print_help
exit 1
fi
}
function print_help() {
echo "deploy.sh"
echo ""
echo "Usage:"
echo " manifest.sh [repo] [tag]"
echo ""
echo "Args:"
echo " repo: The docker repository to update"
echo " tag: The git tag to be applied to the image manifest"
}
REPO=$1
TAG=$2
require "$REPO" "repo"
require "$TAG" "tag"
set -xe
sudo docker manifest create asonix/$REPO:$TAG \
-a asonix/$REPO:arm64v8-$TAG \
-a asonix/$REPO:arm32v7-$TAG \
-a asonix/$REPO:amd64-$TAG
sudo docker manifest annotate asonix/$REPO:$TAG \
asonix/$REPO:arm64v8-$TAG --os linux --arch arm64 --variant v8
sudo docker manifest annotate asonix/$REPO:$TAG \
asonix/$REPO:arm32v7-$TAG --os linux --arch arm --variant v7
sudo docker manifest annotate asonix/$REPO:$TAG \
asonix/$REPO:amd64-$TAG --os linux --arch amd64
sudo docker manifest push asonix/$REPO:$TAG --purge

61
flake.lock Normal file
View File

@ -0,0 +1,61 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1701680307,
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1705133751,
"narHash": "sha256-rCIsyE80jgiOU78gCWN3A0wE0tR2GI5nH6MlS+HaaSQ=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9b19f5e77dd906cb52dade0b7bd280339d2a1f3d",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

34
flake.nix Normal file
View File

@ -0,0 +1,34 @@
{
description = "relay";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs {
inherit system;
};
in
{
packages = rec {
relay = pkgs.callPackage ./relay.nix { };
default = relay;
};
apps = rec {
dev = flake-utils.lib.mkApp { drv = self.packages.${system}.pict-rs-proxy; };
default = dev;
};
devShell = with pkgs; mkShell {
nativeBuildInputs = [ cargo cargo-outdated cargo-zigbuild clippy gcc protobuf rust-analyzer rustc rustfmt ];
RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
};
});
}

23
relay.nix Normal file
View File

@ -0,0 +1,23 @@
{ lib
, nixosTests
, rustPlatform
}:
rustPlatform.buildRustPackage {
pname = "relay";
version = "0.3.108";
src = ./.;
cargoLock.lockFile = ./Cargo.lock;
RUSTFLAGS = "--cfg tokio_unstable";
nativeBuildInputs = [ ];
passthru.tests = { inherit (nixosTests) relay; };
meta = with lib; {
description = "An ActivityPub relay";
homepage = "https://git.asonix.dog/asonix/relay";
license = with licenses; [ agpl3Plus ];
};
}

View File

@ -231,6 +231,21 @@ footer {
}
}
.open {
color: blue;
font-weight: bold;
}
.moderated {
font-weight: bold;
color: green;
}
.closed {
font-weight: bold;
color: red;
}
@media(max-width: 700px) {
header .header-text {
padding: 24px;

View File

@ -1,4 +1,6 @@
use activitystreams::iri_string::types::IriString;
use std::collections::{BTreeMap, BTreeSet};
use time::OffsetDateTime;
pub mod client;
pub mod routes;
@ -22,3 +24,9 @@ pub(crate) struct BlockedDomains {
pub(crate) struct ConnectedActors {
pub(crate) connected_actors: Vec<IriString>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct LastSeen {
pub(crate) last_seen: BTreeMap<OffsetDateTime, BTreeSet<String>>,
pub(crate) never: Vec<String>,
}

View File

@ -1,14 +1,16 @@
use crate::{
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains},
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen},
collector::Snapshot,
config::{AdminUrlKind, Config},
error::{Error, ErrorKind},
extractors::XApiToken,
};
use awc::Client;
use actix_web::http::header::Header;
use reqwest_middleware::ClientWithMiddleware;
use serde::de::DeserializeOwned;
pub(crate) async fn allow(
client: &Client,
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
@ -16,7 +18,7 @@ pub(crate) async fn allow(
}
pub(crate) async fn disallow(
client: &Client,
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
@ -24,7 +26,7 @@ pub(crate) async fn disallow(
}
pub(crate) async fn block(
client: &Client,
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
@ -32,31 +34,50 @@ pub(crate) async fn block(
}
pub(crate) async fn unblock(
client: &Client,
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Unblock).await
}
pub(crate) async fn allowed(client: &Client, config: &Config) -> Result<AllowedDomains, Error> {
pub(crate) async fn allowed(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<AllowedDomains, Error> {
get_results(client, config, AdminUrlKind::Allowed).await
}
pub(crate) async fn blocked(client: &Client, config: &Config) -> Result<BlockedDomains, Error> {
pub(crate) async fn blocked(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<BlockedDomains, Error> {
get_results(client, config, AdminUrlKind::Blocked).await
}
pub(crate) async fn connected(client: &Client, config: &Config) -> Result<ConnectedActors, Error> {
pub(crate) async fn connected(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<ConnectedActors, Error> {
get_results(client, config, AdminUrlKind::Connected).await
}
pub(crate) async fn stats(client: &Client, config: &Config) -> Result<Snapshot, Error> {
pub(crate) async fn stats(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<Snapshot, Error> {
get_results(client, config, AdminUrlKind::Stats).await
}
pub(crate) async fn last_seen(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<LastSeen, Error> {
get_results(client, config, AdminUrlKind::LastSeen).await
}
async fn get_results<T: DeserializeOwned>(
client: &Client,
client: &ClientWithMiddleware,
config: &Config,
url_kind: AdminUrlKind,
) -> Result<T, Error> {
@ -64,9 +85,9 @@ async fn get_results<T: DeserializeOwned>(
let iri = config.generate_admin_url(url_kind);
let mut res = client
let res = client
.get(iri.as_str())
.insert_header(x_api_token)
.header(XApiToken::name(), x_api_token.to_string())
.send()
.await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
@ -84,7 +105,7 @@ async fn get_results<T: DeserializeOwned>(
}
async fn post_domains(
client: &Client,
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
url_kind: AdminUrlKind,
@ -95,8 +116,9 @@ async fn post_domains(
let res = client
.post(iri.as_str())
.insert_header(x_api_token)
.send_json(&Domains { domains })
.header(XApiToken::name(), x_api_token.to_string())
.json(&Domains { domains })
.send()
.await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;

View File

@ -1,5 +1,5 @@
use crate::{
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains},
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen},
collector::{MemoryCollector, Snapshot},
error::Error,
extractors::Admin,
@ -8,6 +8,8 @@ use actix_web::{
web::{Data, Json},
HttpResponse,
};
use std::collections::{BTreeMap, BTreeSet};
use time::OffsetDateTime;
pub(crate) async fn allow(
admin: Admin,
@ -69,3 +71,20 @@ pub(crate) async fn stats(
) -> Result<Json<Snapshot>, Error> {
Ok(Json(collector.snapshot()))
}
pub(crate) async fn last_seen(admin: Admin) -> Result<Json<LastSeen>, Error> {
let nodes = admin.db_ref().last_seen().await?;
let mut last_seen: BTreeMap<OffsetDateTime, BTreeSet<String>> = BTreeMap::new();
let mut never = Vec::new();
for (domain, datetime) in nodes {
if let Some(datetime) = datetime {
last_seen.entry(datetime).or_default().insert(domain);
} else {
never.push(domain);
}
}
Ok(Json(LastSeen { last_seen, never }))
}

View File

@ -34,11 +34,13 @@ pub struct PublicKey {
#[serde(rename_all = "PascalCase")]
pub enum ValidTypes {
Accept,
Add,
Announce,
Create,
Delete,
Follow,
Reject,
Remove,
Undo,
Update,
}

View File

@ -17,11 +17,22 @@ pub(crate) struct Args {
#[arg(short, long, help = "Get statistics from the server")]
stats: bool,
#[arg(
short,
long,
help = "List domains by when they were last succesfully contacted"
)]
contacted: bool,
}
impl Args {
pub(crate) fn any(&self) -> bool {
!self.blocks.is_empty() || !self.allowed.is_empty() || self.list || self.stats
!self.blocks.is_empty()
|| !self.allowed.is_empty()
|| self.list
|| self.stats
|| self.contacted
}
pub(crate) fn new() -> Self {
@ -47,4 +58,8 @@ impl Args {
pub(crate) fn stats(&self) -> bool {
self.stats
}
pub(crate) fn contacted(&self) -> bool {
self.contacted
}
}

View File

@ -5,7 +5,8 @@ fn git_info() {
if let Ok(output) = Command::new("git").args(["rev-parse", "HEAD"]).output() {
if output.status.success() {
let git_hash = String::from_utf8_lossy(&output.stdout);
println!("cargo:rustc-env=GIT_HASH={}", git_hash);
println!("cargo:rustc-env=GIT_HASH={git_hash}");
println!("cargo:rustc-env=GIT_SHORT_HASH={}", &git_hash[..8])
}
}
@ -15,15 +16,15 @@ fn git_info() {
{
if output.status.success() {
let git_branch = String::from_utf8_lossy(&output.stdout);
println!("cargo:rustc-env=GIT_BRANCH={}", git_branch);
println!("cargo:rustc-env=GIT_BRANCH={git_branch}");
}
}
}
fn version_info() -> Result<(), anyhow::Error> {
fn version_info() -> color_eyre::Result<()> {
let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml");
let mut file = File::open(&cargo_toml)?;
let mut file = File::open(cargo_toml)?;
let mut cargo_data = String::new();
file.read_to_string(&mut cargo_data)?;
@ -31,17 +32,17 @@ fn version_info() -> Result<(), anyhow::Error> {
let data: toml::Value = toml::from_str(&cargo_data)?;
if let Some(version) = data["package"]["version"].as_str() {
println!("cargo:rustc-env=PKG_VERSION={}", version);
println!("cargo:rustc-env=PKG_VERSION={version}");
}
if let Some(name) = data["package"]["name"].as_str() {
println!("cargo:rustc-env=PKG_NAME={}", name);
println!("cargo:rustc-env=PKG_NAME={name}");
}
Ok(())
}
fn main() -> Result<(), anyhow::Error> {
fn main() -> color_eyre::Result<()> {
dotenv::dotenv().ok();
git_info();

View File

@ -1,4 +1,4 @@
use metrics::{Key, Recorder, SetRecorderError};
use metrics::{Key, Metadata, Recorder, SetRecorderError};
use metrics_util::{
registry::{AtomicStorage, GenerationalStorage, Recency, Registry},
MetricKindMask, Summary,
@ -40,11 +40,11 @@ impl std::fmt::Display for Counter {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{}: {}", k, v))
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{} - {}", labels, self.value)
write!(f, "{labels} - {}", self.value)
}
}
@ -59,11 +59,11 @@ impl std::fmt::Display for Gauge {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{}: {}", k, v))
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{} - {}", labels, self.value)
write!(f, "{labels} - {}", self.value)
}
}
@ -78,7 +78,7 @@ impl std::fmt::Display for Histogram {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{}: {}", k, v))
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
@ -87,15 +87,15 @@ impl std::fmt::Display for Histogram {
.iter()
.map(|(k, v)| {
if let Some(v) = v {
format!("{}: {:.6}", k, v)
format!("{k}: {v:.6}")
} else {
format!("{}: None,", k)
format!("{k}: None,")
}
})
.collect::<Vec<_>>()
.join(", ");
write!(f, "{} - {}", labels, value)
write!(f, "{labels} - {value}")
}
}
@ -159,7 +159,7 @@ impl Snapshot {
let entry = merging.entry(name).or_insert_with(HashMap::new);
for counter in counters {
let mut merge_counter = entry
let merge_counter = entry
.entry(counter.labels.clone())
.or_insert_with(MergeCounter::default);
if key == *start {
@ -172,18 +172,18 @@ impl Snapshot {
continue;
}
println!("\t{}", key);
println!("\t{key}");
for counter in counters {
println!("\t\t{}", counter);
println!("\t\t{counter}");
}
}
for (key, counters) in merging {
println!("\t{}", key);
println!("\t{key}");
for (_, counter) in counters {
if let Some(counter) = counter.merge() {
println!("\t\t{}", counter);
println!("\t\t{counter}");
}
}
}
@ -192,10 +192,10 @@ impl Snapshot {
if !self.gauges.is_empty() {
println!("Gauges");
for (key, gauges) in self.gauges {
println!("\t{}", key);
println!("\t{key}");
for gauge in gauges {
println!("\t\t{}", gauge);
println!("\t\t{gauge}");
}
}
}
@ -203,10 +203,10 @@ impl Snapshot {
if !self.histograms.is_empty() {
println!("Histograms");
for (key, histograms) in self.histograms {
println!("\t{}", key);
println!("\t{key}");
for histogram in histograms {
println!("\t\t{}", histogram);
println!("\t\t{histogram}");
}
}
}
@ -216,7 +216,6 @@ impl Snapshot {
fn key_to_parts(key: &Key) -> (String, Vec<(String, String)>) {
let labels = key
.labels()
.into_iter()
.map(|label| (label.key().to_string(), label.value().to_string()))
.collect();
let name = key.name().to_string();
@ -290,7 +289,7 @@ impl Inner {
}
let mut d = self.distributions.write().unwrap();
let outer_entry = d.entry(name.clone()).or_insert_with(BTreeMap::new);
let outer_entry = d.entry(name.clone()).or_default();
let entry = outer_entry
.entry(labels)
@ -348,10 +347,6 @@ impl MemoryCollector {
}
}
pub(crate) fn install(&self) -> Result<(), SetRecorderError> {
metrics::set_boxed_recorder(Box::new(self.clone()))
}
pub(crate) fn snapshot(&self) -> Snapshot {
self.inner.snapshot()
}
@ -364,6 +359,10 @@ impl MemoryCollector {
let mut d = self.inner.descriptions.write().unwrap();
d.entry(key.as_str().to_owned()).or_insert(description);
}
pub(crate) fn install(&self) -> Result<(), SetRecorderError<Self>> {
metrics::set_global_recorder(self.clone())
}
}
impl Recorder for MemoryCollector {
@ -394,19 +393,19 @@ impl Recorder for MemoryCollector {
self.add_description_if_missing(&key, description)
}
fn register_counter(&self, key: &Key) -> metrics::Counter {
fn register_counter(&self, key: &Key, _: &Metadata<'_>) -> metrics::Counter {
self.inner
.registry
.get_or_create_counter(key, |c| c.clone().into())
}
fn register_gauge(&self, key: &Key) -> metrics::Gauge {
fn register_gauge(&self, key: &Key, _: &Metadata<'_>) -> metrics::Gauge {
self.inner
.registry
.get_or_create_gauge(key, |c| c.clone().into())
}
fn register_histogram(&self, key: &Key) -> metrics::Histogram {
fn register_histogram(&self, key: &Key, _: &Metadata<'_>) -> metrics::Histogram {
self.inner
.registry
.get_or_create_histogram(key, |c| c.clone().into())

View File

@ -1,22 +1,22 @@
use crate::{
data::{ActorCache, State},
error::Error,
extractors::{AdminConfig, XApiToken},
middleware::MyVerify,
requests::Requests,
};
use activitystreams::{
iri,
iri_string::{
format::ToDedicatedString,
resolve::FixedBaseResolver,
types::{IriAbsoluteString, IriFragmentStr, IriRelativeStr, IriString},
},
};
use config::Environment;
use http_signature_normalization_actix::prelude::{VerifyDigest, VerifySignature};
use rustls::{Certificate, PrivateKey};
use sha2::{Digest, Sha256};
use std::{io::BufReader, net::IpAddr, path::PathBuf};
use http_signature_normalization_actix::{digest::ring::Sha256, prelude::VerifyDigest};
use rustls::sign::CertifiedKey;
use std::{
net::{IpAddr, SocketAddr},
path::PathBuf,
};
use uuid::Uuid;
#[derive(Clone, Debug, serde::Deserialize)]
@ -31,6 +31,7 @@ pub(crate) struct ParsedConfig {
publish_blocks: bool,
sled_path: PathBuf,
source_repo: IriString,
repository_commit_base: String,
opentelemetry_url: Option<IriString>,
telegram_token: Option<String>,
telegram_admin_handle: Option<String>,
@ -40,6 +41,14 @@ pub(crate) struct ParsedConfig {
footer_blurb: Option<String>,
local_domains: Option<String>,
local_blurb: Option<String>,
prometheus_addr: Option<IpAddr>,
prometheus_port: Option<u16>,
deliver_concurrency: u64,
client_timeout: u64,
proxy_url: Option<IriString>,
proxy_username: Option<String>,
proxy_password: Option<String>,
signature_threads: Option<usize>,
}
#[derive(Clone)]
@ -62,6 +71,11 @@ pub struct Config {
footer_blurb: Option<String>,
local_domains: Vec<String>,
local_blurb: Option<String>,
prometheus_config: Option<PrometheusConfig>,
deliver_concurrency: u64,
client_timeout: u64,
proxy_config: Option<ProxyConfig>,
signature_threads: Option<usize>,
}
#[derive(Clone)]
@ -70,6 +84,18 @@ struct TlsConfig {
cert: PathBuf,
}
#[derive(Clone, Debug)]
struct PrometheusConfig {
addr: IpAddr,
port: u16,
}
#[derive(Clone, Debug)]
struct ProxyConfig {
url: IriString,
auth: Option<(String, String)>,
}
#[derive(Debug)]
pub enum UrlKind {
Activity,
@ -94,6 +120,7 @@ pub enum AdminUrlKind {
Blocked,
Connected,
Stats,
LastSeen,
}
impl std::fmt::Debug for Config {
@ -121,6 +148,11 @@ impl std::fmt::Debug for Config {
.field("footer_blurb", &self.footer_blurb)
.field("local_domains", &self.local_domains)
.field("local_blurb", &self.local_blurb)
.field("prometheus_config", &self.prometheus_config)
.field("deliver_concurrency", &self.deliver_concurrency)
.field("client_timeout", &self.client_timeout)
.field("proxy_config", &self.proxy_config)
.field("signature_threads", &self.signature_threads)
.finish()
}
}
@ -138,6 +170,7 @@ impl Config {
.set_default("publish_blocks", false)?
.set_default("sled_path", "./sled/db-0-34")?
.set_default("source_repo", "https://git.asonix.dog/asonix/relay")?
.set_default("repository_commit_base", "/src/commit/")?
.set_default("opentelemetry_url", None as Option<&str>)?
.set_default("telegram_token", None as Option<&str>)?
.set_default("telegram_admin_handle", None as Option<&str>)?
@ -147,13 +180,21 @@ impl Config {
.set_default("footer_blurb", None as Option<&str>)?
.set_default("local_domains", None as Option<&str>)?
.set_default("local_blurb", None as Option<&str>)?
.set_default("prometheus_addr", None as Option<&str>)?
.set_default("prometheus_port", None as Option<u16>)?
.set_default("deliver_concurrency", 8u64)?
.set_default("client_timeout", 10u64)?
.set_default("proxy_url", None as Option<&str>)?
.set_default("proxy_username", None as Option<&str>)?
.set_default("proxy_password", None as Option<&str>)?
.set_default("signature_threads", None as Option<u64>)?
.add_source(Environment::default())
.build()?;
let config: ParsedConfig = config.try_deserialize()?;
let scheme = if config.https { "https" } else { "http" };
let base_uri = iri!(format!("{}://{}", scheme, config.hostname)).into_absolute();
let base_uri = iri!(format!("{scheme}://{}", config.hostname)).into_absolute();
let tls = match (config.tls_key, config.tls_cert) {
(Some(key), Some(cert)) => Some(TlsConfig { key, cert }),
@ -175,6 +216,49 @@ impl Config {
.map(|d| d.to_string())
.collect();
let prometheus_config = match (config.prometheus_addr, config.prometheus_port) {
(Some(addr), Some(port)) => Some(PrometheusConfig { addr, port }),
(Some(_), None) => {
tracing::warn!("PROMETHEUS_ADDR is set but PROMETHEUS_PORT is not set, not building Prometheus config");
None
}
(None, Some(_)) => {
tracing::warn!("PROMETHEUS_PORT is set but PROMETHEUS_ADDR is not set, not building Prometheus config");
None
}
(None, None) => None,
};
let proxy_config = match (config.proxy_username, config.proxy_password) {
(Some(username), Some(password)) => config.proxy_url.map(|url| ProxyConfig {
url,
auth: Some((username, password)),
}),
(Some(_), None) => {
tracing::warn!(
"PROXY_USERNAME is set but PROXY_PASSWORD is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, Some(_)) => {
tracing::warn!(
"PROXY_PASSWORD is set but PROXY_USERNAME is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, None) => config.proxy_url.map(|url| ProxyConfig { url, auth: None }),
};
let source_url = match Self::git_hash() {
Some(hash) => format!(
"{}{}{hash}",
config.source_repo, config.repository_commit_base
)
.parse()
.expect("constructed source URL is valid"),
None => config.source_repo.clone(),
};
Ok(Config {
hostname: config.hostname,
addr: config.addr,
@ -185,7 +269,7 @@ impl Config {
publish_blocks: config.publish_blocks,
base_uri,
sled_path: config.sled_path,
source_repo: config.source_repo,
source_repo: source_url,
opentelemetry_url: config.opentelemetry_url,
telegram_token: config.telegram_token,
telegram_admin_handle: config.telegram_admin_handle,
@ -194,52 +278,81 @@ impl Config {
footer_blurb: config.footer_blurb,
local_domains,
local_blurb: config.local_blurb,
prometheus_config,
deliver_concurrency: config.deliver_concurrency,
client_timeout: config.client_timeout,
proxy_config,
signature_threads: config.signature_threads,
})
}
pub(crate) fn open_keys(&self) -> Result<Option<(Vec<Certificate>, PrivateKey)>, Error> {
pub(crate) fn signature_threads(&self) -> usize {
self.signature_threads
.unwrap_or_else(|| {
std::thread::available_parallelism()
.map(usize::from)
.map_err(|e| tracing::warn!("Failed to get parallelism, {e}"))
.unwrap_or(1)
})
.max(1)
}
pub(crate) fn client_timeout(&self) -> u64 {
self.client_timeout
}
pub(crate) fn deliver_concurrency(&self) -> u64 {
self.deliver_concurrency
}
pub(crate) fn prometheus_bind_address(&self) -> Option<SocketAddr> {
let config = self.prometheus_config.as_ref()?;
Some((config.addr, config.port).into())
}
pub(crate) async fn open_keys(&self) -> Result<Option<CertifiedKey>, Error> {
let tls = if let Some(tls) = &self.tls {
tls
} else {
tracing::warn!("No TLS config present");
tracing::info!("No TLS config present");
return Ok(None);
};
let mut certs_reader = BufReader::new(std::fs::File::open(&tls.cert)?);
let certs = rustls_pemfile::certs(&mut certs_reader)?;
let certs_bytes = tokio::fs::read(&tls.cert).await?;
let certs =
rustls_pemfile::certs(&mut certs_bytes.as_slice()).collect::<Result<Vec<_>, _>>()?;
if certs.is_empty() {
tracing::warn!("No certs read from certificate file");
return Ok(None);
}
let mut key_reader = BufReader::new(std::fs::File::open(&tls.key)?);
let key = rustls_pemfile::read_one(&mut key_reader)?;
let certs = certs.into_iter().map(Certificate).collect();
let key = if let Some(key) = key {
match key {
rustls_pemfile::Item::RSAKey(der) => PrivateKey(der),
rustls_pemfile::Item::PKCS8Key(der) => PrivateKey(der),
rustls_pemfile::Item::ECKey(der) => PrivateKey(der),
_ => {
tracing::warn!("Unknown key format: {:?}", key);
return Ok(None);
}
}
let key_bytes = tokio::fs::read(&tls.key).await?;
let key = if let Some(key) = rustls_pemfile::private_key(&mut key_bytes.as_slice())? {
key
} else {
tracing::warn!("Failed to read private key");
return Ok(None);
};
Ok(Some((certs, key)))
let key = rustls::crypto::ring::sign::any_supported_type(&key)?;
Ok(Some(CertifiedKey::new(certs, key)))
}
pub(crate) fn footer_blurb(&self) -> Option<crate::templates::Html<String>> {
if let Some(blurb) = &self.footer_blurb {
if !blurb.is_empty() {
return Some(crate::templates::Html(ammonia::clean(blurb)));
return Some(crate::templates::Html(
ammonia::Builder::new()
.add_tag_attributes("a", &["rel"])
.add_tag_attributes("area", &["rel"])
.add_tag_attributes("link", &["rel"])
.link_rel(None)
.clean(blurb)
.to_string(),
));
}
}
@ -249,7 +362,15 @@ impl Config {
pub(crate) fn local_blurb(&self) -> Option<crate::templates::Html<String>> {
if let Some(blurb) = &self.local_blurb {
if !blurb.is_empty() {
return Some(crate::templates::Html(ammonia::clean(blurb)));
return Some(crate::templates::Html(
ammonia::Builder::new()
.add_tag_attributes("a", &["rel"])
.add_tag_attributes("area", &["rel"])
.add_tag_attributes("link", &["rel"])
.link_rel(None)
.clean(blurb)
.to_string(),
));
}
}
@ -276,19 +397,6 @@ impl Config {
}
}
pub(crate) fn signature_middleware(
&self,
requests: Requests,
actors: ActorCache,
state: State,
) -> VerifySignature<MyVerify> {
if self.validate_signatures {
VerifySignature::new(MyVerify(requests, actors, state), Default::default())
} else {
VerifySignature::new(MyVerify(requests, actors, state), Default::default()).optional()
}
}
pub(crate) fn x_api_token(&self) -> Option<XApiToken> {
self.api_token.clone().map(XApiToken::new)
}
@ -298,7 +406,7 @@ impl Config {
match AdminConfig::build(api_token) {
Ok(conf) => Some(actix_web::web::Data::new(conf)),
Err(e) => {
tracing::error!("Error creating admin config: {}", e);
tracing::error!("Error creating admin config: {e}");
None
}
}
@ -337,7 +445,7 @@ impl Config {
pub(crate) fn software_version() -> String {
if let Some(git) = Self::git_version() {
return format!("v{}-{}", Self::version(), git);
return format!("v{}-{git}", Self::version());
}
format!("v{}", Self::version())
@ -345,9 +453,9 @@ impl Config {
fn git_version() -> Option<String> {
let branch = Self::git_branch()?;
let hash = Self::git_hash()?;
let hash = Self::git_short_hash()?;
Some(format!("{}-{}", branch, hash))
Some(format!("{branch}-{hash}"))
}
fn name() -> &'static str {
@ -366,6 +474,10 @@ impl Config {
option_env!("GIT_HASH")
}
fn git_short_hash() -> Option<&'static str> {
option_env!("GIT_SHORT_HASH")
}
pub(crate) fn user_agent(&self) -> String {
format!(
"{} ({}/{}; +{})",
@ -376,6 +488,12 @@ impl Config {
)
}
pub(crate) fn proxy_config(&self) -> Option<(&IriString, Option<(&str, &str)>)> {
self.proxy_config.as_ref().map(|ProxyConfig { url, auth }| {
(url, auth.as_ref().map(|(u, p)| (u.as_str(), p.as_str())))
})
}
pub(crate) fn source_code(&self) -> &IriString {
&self.source_repo
}
@ -395,37 +513,44 @@ impl Config {
self.do_generate_url(kind).expect("Generated valid IRI")
}
#[tracing::instrument(level = "debug", skip_all, fields(base_uri = tracing::field::debug(&self.base_uri), kind = tracing::field::debug(&kind)))]
fn do_generate_url(&self, kind: UrlKind) -> Result<IriString, Error> {
let iri = match kind {
UrlKind::Activity => FixedBaseResolver::new(self.base_uri.as_ref()).try_resolve(
IriRelativeStr::new(&format!("activity/{}", Uuid::new_v4()))?.as_ref(),
)?,
UrlKind::Activity => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(&format!("activity/{}", Uuid::new_v4()))?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Actor => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("actor")?.as_ref())?,
.resolve(IriRelativeStr::new("actor")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Followers => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("followers")?.as_ref())?,
.resolve(IriRelativeStr::new("followers")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Following => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("following")?.as_ref())?,
.resolve(IriRelativeStr::new("following")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Inbox => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("inbox")?.as_ref())?,
.resolve(IriRelativeStr::new("inbox")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Index => self.base_uri.clone().into(),
UrlKind::MainKey => {
let actor = IriRelativeStr::new("actor")?;
let fragment = IriFragmentStr::new("main-key")?;
let mut resolved =
FixedBaseResolver::new(self.base_uri.as_ref()).try_resolve(actor.as_ref())?;
let mut resolved = FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(actor.as_ref())
.try_to_dedicated_string()?;
resolved.set_fragment(Some(fragment));
resolved
}
UrlKind::Media(uuid) => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new(&format!("media/{}", uuid))?.as_ref())?,
.resolve(IriRelativeStr::new(&format!("media/{uuid}"))?.as_ref())
.try_to_dedicated_string()?,
UrlKind::NodeInfo => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("nodeinfo/2.0.json")?.as_ref())?,
.resolve(IriRelativeStr::new("nodeinfo/2.0.json")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Outbox => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("outbox")?.as_ref())?,
.resolve(IriRelativeStr::new("outbox")?.as_ref())
.try_to_dedicated_string()?,
};
Ok(iri)
@ -437,25 +562,22 @@ impl Config {
}
fn do_generate_admin_url(&self, kind: AdminUrlKind) -> Result<IriString, Error> {
let iri = match kind {
AdminUrlKind::Allow => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("api/v1/admin/allow")?.as_ref())?,
AdminUrlKind::Disallow => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("api/v1/admin/disallow")?.as_ref())?,
AdminUrlKind::Block => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("api/v1/admin/block")?.as_ref())?,
AdminUrlKind::Unblock => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("api/v1/admin/unblock")?.as_ref())?,
AdminUrlKind::Allowed => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("api/v1/admin/allowed")?.as_ref())?,
AdminUrlKind::Blocked => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("api/v1/admin/blocked")?.as_ref())?,
AdminUrlKind::Connected => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("api/v1/admin/connected")?.as_ref())?,
AdminUrlKind::Stats => FixedBaseResolver::new(self.base_uri.as_ref())
.try_resolve(IriRelativeStr::new("api/v1/admin/stats")?.as_ref())?,
let path = match kind {
AdminUrlKind::Allow => "api/v1/admin/allow",
AdminUrlKind::Disallow => "api/v1/admin/disallow",
AdminUrlKind::Block => "api/v1/admin/block",
AdminUrlKind::Unblock => "api/v1/admin/unblock",
AdminUrlKind::Allowed => "api/v1/admin/allowed",
AdminUrlKind::Blocked => "api/v1/admin/blocked",
AdminUrlKind::Connected => "api/v1/admin/connected",
AdminUrlKind::Stats => "api/v1/admin/stats",
AdminUrlKind::LastSeen => "api/v1/admin/last_seen",
};
let iri = FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(path)?.as_ref())
.try_to_dedicated_string()?;
Ok(iri)
}
}

View File

@ -1,9 +1,11 @@
mod actor;
mod last_online;
mod media;
mod node;
mod state;
pub(crate) use actor::ActorCache;
pub(crate) use last_online::LastOnline;
pub(crate) use media::MediaCache;
pub(crate) use node::{Node, NodeCache};
pub(crate) use state::State;

View File

@ -2,7 +2,7 @@ use crate::{
apub::AcceptedActors,
db::{Actor, Db},
error::{Error, ErrorKind},
requests::Requests,
requests::{BreakerStrategy, Requests},
};
use activitystreams::{iri_string::types::IriString, prelude::*};
use std::time::{Duration, SystemTime};
@ -37,7 +37,7 @@ impl ActorCache {
ActorCache { db }
}
#[tracing::instrument(level = "debug" name = "Get Actor", skip_all, fields(id = id.to_string().as_str(), requests))]
#[tracing::instrument(level = "debug" name = "Get Actor", skip_all, fields(id = id.to_string().as_str()))]
pub(crate) async fn get(
&self,
id: &IriString,
@ -56,12 +56,8 @@ impl ActorCache {
#[tracing::instrument(level = "debug", name = "Add Connection", skip(self))]
pub(crate) async fn add_connection(&self, actor: Actor) -> Result<(), Error> {
let add_connection = self.db.add_connection(actor.id.clone());
let save_actor = self.db.save_actor(actor);
tokio::try_join!(add_connection, save_actor)?;
Ok(())
self.db.add_connection(actor.id.clone()).await?;
self.db.save_actor(actor).await
}
#[tracing::instrument(level = "debug", name = "Remove Connection", skip(self))]
@ -69,13 +65,15 @@ impl ActorCache {
self.db.remove_connection(actor.id.clone()).await
}
#[tracing::instrument(level = "debug", name = "Fetch remote actor", skip_all, fields(id = id.to_string().as_str(), requests))]
#[tracing::instrument(level = "debug", name = "Fetch remote actor", skip_all, fields(id = id.to_string().as_str()))]
pub(crate) async fn get_no_cache(
&self,
id: &IriString,
requests: &Requests,
) -> Result<Actor, Error> {
let accepted_actor = requests.fetch::<AcceptedActors>(id.as_str()).await?;
let accepted_actor = requests
.fetch::<AcceptedActors>(id, BreakerStrategy::Require2XX)
.await?;
let input_authority = id.authority_components().ok_or(ErrorKind::MissingDomain)?;
let accepted_actor_id = accepted_actor
@ -101,6 +99,6 @@ impl ActorCache {
fn get_inbox(actor: &AcceptedActors) -> Result<&IriString, Error> {
Ok(actor
.endpoints()?
.and_then(|e| e.shared_inbox)
.and_then(|e| e.shared_inbox.as_ref())
.unwrap_or(actor.inbox()?))
}

28
src/data/last_online.rs Normal file
View File

@ -0,0 +1,28 @@
use activitystreams::iri_string::types::IriStr;
use std::{collections::HashMap, sync::Mutex};
use time::OffsetDateTime;
pub(crate) struct LastOnline {
domains: Mutex<HashMap<String, OffsetDateTime>>,
}
impl LastOnline {
pub(crate) fn mark_seen(&self, iri: &IriStr) {
if let Some(authority) = iri.authority_str() {
self.domains
.lock()
.unwrap()
.insert(authority.to_string(), OffsetDateTime::now_utc());
}
}
pub(crate) fn take(&self) -> HashMap<String, OffsetDateTime> {
std::mem::take(&mut *self.domains.lock().unwrap())
}
pub(crate) fn empty() -> Self {
Self {
domains: Mutex::new(HashMap::default()),
}
}
}

View File

@ -36,11 +36,9 @@ impl NodeCache {
#[tracing::instrument(level = "debug", name = "Get nodes", skip(self))]
pub(crate) async fn nodes(&self) -> Result<Vec<Node>, Error> {
let infos = self.db.connected_info();
let instances = self.db.connected_instance();
let contacts = self.db.connected_contact();
let (infos, instances, contacts) = tokio::try_join!(infos, instances, contacts)?;
let infos = self.db.connected_info().await?;
let instances = self.db.connected_instance().await?;
let contacts = self.db.connected_contact().await?;
let vec = self
.db
@ -184,7 +182,7 @@ impl Node {
let authority = url.authority_str().ok_or(ErrorKind::MissingDomain)?;
let scheme = url.scheme_str();
let base = iri!(format!("{}://{}", scheme, authority));
let base = iri!(format!("{scheme}://{authority}"));
Ok(Node {
base,

View File

@ -1,25 +1,28 @@
use crate::{
config::{Config, UrlKind},
data::NodeCache,
db::Db,
error::Error,
requests::{Breakers, Requests},
spawner::Spawner,
};
use activitystreams::iri_string::types::IriString;
use actix_web::web;
use lru::LruCache;
use rand::thread_rng;
use reqwest_middleware::ClientWithMiddleware;
use rsa::{RsaPrivateKey, RsaPublicKey};
use std::sync::Arc;
use tokio::sync::RwLock;
use std::sync::{Arc, RwLock};
use super::LastOnline;
#[derive(Clone)]
pub struct State {
pub(crate) requests: Requests,
pub(crate) public_key: RsaPublicKey,
private_key: RsaPrivateKey,
object_cache: Arc<RwLock<LruCache<IriString, IriString>>>,
node_cache: NodeCache,
pub(crate) node_cache: NodeCache,
breakers: Breakers,
pub(crate) last_online: Arc<LastOnline>,
pub(crate) db: Db,
}
@ -34,19 +37,6 @@ impl std::fmt::Debug for State {
}
impl State {
pub(crate) fn node_cache(&self) -> NodeCache {
self.node_cache.clone()
}
pub(crate) fn requests(&self, config: &Config) -> Requests {
Requests::new(
config.generate_url(UrlKind::MainKey).to_string(),
self.private_key.clone(),
config.user_agent(),
self.breakers.clone(),
)
}
#[tracing::instrument(
level = "debug",
name = "Get inboxes for other domains",
@ -78,16 +68,25 @@ impl State {
.collect())
}
pub(crate) async fn is_cached(&self, object_id: &IriString) -> bool {
self.object_cache.read().await.contains(object_id)
pub(crate) fn is_cached(&self, object_id: &IriString) -> bool {
self.object_cache.read().unwrap().contains(object_id)
}
pub(crate) async fn cache(&self, object_id: IriString, actor_id: IriString) {
self.object_cache.write().await.put(object_id, actor_id);
pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) {
self.object_cache.write().unwrap().put(object_id, actor_id);
}
pub(crate) fn is_connected(&self, iri: &IriString) -> bool {
self.breakers.should_try(iri)
}
#[tracing::instrument(level = "debug", name = "Building state", skip_all)]
pub(crate) async fn build(db: Db) -> Result<Self, Error> {
pub(crate) async fn build(
db: Db,
key_id: String,
spawner: Spawner,
client: ClientWithMiddleware,
) -> Result<Self, Error> {
let private_key = if let Ok(Some(key)) = db.private_key().await {
tracing::debug!("Using existing key");
key
@ -106,15 +105,28 @@ impl State {
let public_key = private_key.to_public_key();
let state = State {
public_key,
let breakers = Breakers::default();
let last_online = Arc::new(LastOnline::empty());
let requests = Requests::new(
key_id,
private_key,
breakers.clone(),
last_online.clone(),
spawner,
client,
);
let state = State {
requests,
public_key,
object_cache: Arc::new(RwLock::new(LruCache::new(
(1024 * 8).try_into().expect("nonzero"),
))),
node_cache: NodeCache::new(db.clone()),
breakers: Breakers::default(),
breakers,
db,
last_online,
};
Ok(state)

164
src/db.rs
View File

@ -7,8 +7,16 @@ use rsa::{
pkcs8::{DecodePrivateKey, EncodePrivateKey},
RsaPrivateKey,
};
use sled::Tree;
use std::{collections::HashMap, sync::Arc, time::SystemTime};
use sled::{Batch, Tree};
use std::{
collections::{BTreeMap, HashMap},
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::SystemTime,
};
use time::OffsetDateTime;
use uuid::Uuid;
#[derive(Clone, Debug)]
@ -17,6 +25,8 @@ pub(crate) struct Db {
}
struct Inner {
healthz: Tree,
healthz_counter: Arc<AtomicU64>,
actor_id_actor: Tree,
public_key_id_actor_id: Tree,
connected_actor_ids: Tree,
@ -28,6 +38,7 @@ struct Inner {
actor_id_info: Tree,
actor_id_instance: Tree,
actor_id_contact: Tree,
last_seen: Tree,
restricted_mode: bool,
}
@ -236,6 +247,8 @@ impl Db {
fn build_inner(restricted_mode: bool, db: sled::Db) -> Result<Self, Error> {
Ok(Db {
inner: Arc::new(Inner {
healthz: db.open_tree("healthz")?,
healthz_counter: Arc::new(AtomicU64::new(0)),
actor_id_actor: db.open_tree("actor-id-actor")?,
public_key_id_actor_id: db.open_tree("public-key-id-actor-id")?,
connected_actor_ids: db.open_tree("connected-actor-ids")?,
@ -247,6 +260,7 @@ impl Db {
actor_id_info: db.open_tree("actor-id-info")?,
actor_id_instance: db.open_tree("actor-id-instance")?,
actor_id_contact: db.open_tree("actor-id-contact")?,
last_seen: db.open_tree("last-seen")?,
restricted_mode,
}),
})
@ -254,7 +268,7 @@ impl Db {
async fn unblock<T>(
&self,
f: impl Fn(&Inner) -> Result<T, Error> + Send + 'static,
f: impl FnOnce(&Inner) -> Result<T, Error> + Send + 'static,
) -> Result<T, Error>
where
T: Send + 'static,
@ -266,6 +280,63 @@ impl Db {
Ok(t)
}
pub(crate) async fn check_health(&self) -> Result<(), Error> {
let next = self.inner.healthz_counter.fetch_add(1, Ordering::Relaxed);
self.unblock(move |inner| {
inner
.healthz
.insert("healthz", &next.to_be_bytes()[..])
.map_err(Error::from)
})
.await?;
self.inner.healthz.flush_async().await?;
self.unblock(move |inner| inner.healthz.get("healthz").map_err(Error::from))
.await?;
Ok(())
}
pub(crate) async fn mark_last_seen(
&self,
nodes: HashMap<String, OffsetDateTime>,
) -> Result<(), Error> {
let mut batch = Batch::default();
for (domain, datetime) in nodes {
let datetime_string = serde_json::to_vec(&datetime)?;
batch.insert(domain.as_bytes(), datetime_string);
}
self.unblock(move |inner| inner.last_seen.apply_batch(batch).map_err(Error::from))
.await
}
pub(crate) async fn last_seen(
&self,
) -> Result<BTreeMap<String, Option<OffsetDateTime>>, Error> {
self.unblock(|inner| {
let mut map = BTreeMap::new();
for iri in inner.connected() {
let Some(authority_str) = iri.authority_str() else {
continue;
};
if let Some(datetime) = inner.last_seen.get(authority_str)? {
map.insert(
authority_str.to_string(),
Some(serde_json::from_slice(&datetime)?),
);
} else {
map.insert(authority_str.to_string(), None);
}
}
Ok(map)
})
.await
}
pub(crate) async fn connected_ids(&self) -> Result<Vec<IriString>, Error> {
self.unblock(|inner| Ok(inner.connected().collect())).await
}
@ -285,12 +356,12 @@ impl Db {
pub(crate) async fn info(&self, actor_id: IriString) -> Result<Option<Info>, Error> {
self.unblock(move |inner| {
if let Some(ivec) = inner.actor_id_info.get(actor_id.as_str().as_bytes())? {
let info = serde_json::from_slice(&ivec)?;
Ok(Some(info))
} else {
Ok(None)
}
inner
.actor_id_info
.get(actor_id.as_str().as_bytes())?
.map(|ivec| serde_json::from_slice(&ivec))
.transpose()
.map_err(Error::from)
})
.await
}
@ -319,12 +390,12 @@ impl Db {
pub(crate) async fn instance(&self, actor_id: IriString) -> Result<Option<Instance>, Error> {
self.unblock(move |inner| {
if let Some(ivec) = inner.actor_id_instance.get(actor_id.as_str().as_bytes())? {
let instance = serde_json::from_slice(&ivec)?;
Ok(Some(instance))
} else {
Ok(None)
}
inner
.actor_id_instance
.get(actor_id.as_str().as_bytes())?
.map(|ivec| serde_json::from_slice(&ivec))
.transpose()
.map_err(Error::from)
})
.await
}
@ -353,12 +424,12 @@ impl Db {
pub(crate) async fn contact(&self, actor_id: IriString) -> Result<Option<Contact>, Error> {
self.unblock(move |inner| {
if let Some(ivec) = inner.actor_id_contact.get(actor_id.as_str().as_bytes())? {
let contact = serde_json::from_slice(&ivec)?;
Ok(Some(contact))
} else {
Ok(None)
}
inner
.actor_id_contact
.get(actor_id.as_str().as_bytes())?
.map(|ivec| serde_json::from_slice(&ivec))
.transpose()
.map_err(Error::from)
})
.await
}
@ -383,22 +454,20 @@ impl Db {
pub(crate) async fn media_id(&self, url: IriString) -> Result<Option<Uuid>, Error> {
self.unblock(move |inner| {
if let Some(ivec) = inner.media_url_media_id.get(url.as_str().as_bytes())? {
Ok(uuid_from_ivec(ivec))
} else {
Ok(None)
}
Ok(inner
.media_url_media_id
.get(url.as_str().as_bytes())?
.and_then(uuid_from_ivec))
})
.await
}
pub(crate) async fn media_url(&self, id: Uuid) -> Result<Option<IriString>, Error> {
self.unblock(move |inner| {
if let Some(ivec) = inner.media_id_media_url.get(id.as_bytes())? {
Ok(url_from_ivec(ivec))
} else {
Ok(None)
}
Ok(inner
.media_id_media_url
.get(id.as_bytes())?
.and_then(url_from_ivec))
})
.await
}
@ -419,7 +488,7 @@ impl Db {
pub(crate) async fn is_connected(&self, base_id: IriString) -> Result<bool, Error> {
let scheme = base_id.scheme_str();
let authority = base_id.authority_str().ok_or(ErrorKind::MissingDomain)?;
let prefix = format!("{}://{}", scheme, authority);
let prefix = format!("{scheme}://{authority}");
self.unblock(move |inner| {
let connected = inner
@ -438,26 +507,22 @@ impl Db {
public_key_id: IriString,
) -> Result<Option<IriString>, Error> {
self.unblock(move |inner| {
if let Some(ivec) = inner
Ok(inner
.public_key_id_actor_id
.get(public_key_id.as_str().as_bytes())?
{
Ok(url_from_ivec(ivec))
} else {
Ok(None)
}
.and_then(url_from_ivec))
})
.await
}
pub(crate) async fn actor(&self, actor_id: IriString) -> Result<Option<Actor>, Error> {
self.unblock(move |inner| {
if let Some(ivec) = inner.actor_id_actor.get(actor_id.as_str().as_bytes())? {
let actor = serde_json::from_slice(&ivec)?;
Ok(Some(actor))
} else {
Ok(None)
}
inner
.actor_id_actor
.get(actor_id.as_str().as_bytes())?
.map(|ivec| serde_json::from_slice(&ivec))
.transpose()
.map_err(Error::from)
})
.await
}
@ -479,7 +544,7 @@ impl Db {
}
pub(crate) async fn remove_connection(&self, actor_id: IriString) -> Result<(), Error> {
tracing::debug!("Removing Connection: {}", actor_id);
tracing::debug!("Removing Connection: {actor_id}");
self.unblock(move |inner| {
inner
.connected_actor_ids
@ -491,7 +556,7 @@ impl Db {
}
pub(crate) async fn add_connection(&self, actor_id: IriString) -> Result<(), Error> {
tracing::debug!("Adding Connection: {}", actor_id);
tracing::debug!("Adding Connection: {actor_id}");
self.unblock(move |inner| {
inner
.connected_actor_ids
@ -685,6 +750,11 @@ mod tests {
{
let db =
Db::build_inner(true, sled::Config::new().temporary(true).open().unwrap()).unwrap();
actix_rt::System::new().block_on((f)(db));
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on((f)(db));
}
}

View File

@ -1,49 +1,85 @@
use activitystreams::checked::CheckError;
use actix_rt::task::JoinError;
use actix_web::{
error::{BlockingError, ResponseError},
http::StatusCode,
HttpResponse,
};
use http_signature_normalization_actix::PrepareSignError;
use std::{convert::Infallible, fmt::Debug, io};
use tracing_error::SpanTrace;
use background_jobs::BoxError;
use color_eyre::eyre::Error as Report;
use http_signature_normalization_reqwest::SignError;
use std::{convert::Infallible, io, sync::Arc};
use tokio::task::JoinError;
#[derive(Clone)]
struct ArcKind {
kind: Arc<ErrorKind>,
}
impl std::fmt::Debug for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::fmt::Display for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::error::Error for ArcKind {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source()
}
}
pub(crate) struct Error {
context: SpanTrace,
kind: ErrorKind,
kind: ArcKind,
display: Box<str>,
debug: Box<str>,
}
impl Error {
fn kind(&self) -> &ErrorKind {
&self.kind.kind
}
pub(crate) fn is_breaker(&self) -> bool {
matches!(self.kind, ErrorKind::Breaker)
matches!(self.kind(), ErrorKind::Breaker)
}
pub(crate) fn is_not_found(&self) -> bool {
matches!(self.kind, ErrorKind::Status(_, StatusCode::NOT_FOUND))
matches!(self.kind(), ErrorKind::Status(_, StatusCode::NOT_FOUND))
}
pub(crate) fn is_bad_request(&self) -> bool {
matches!(self.kind, ErrorKind::Status(_, StatusCode::BAD_REQUEST))
matches!(self.kind(), ErrorKind::Status(_, StatusCode::BAD_REQUEST))
}
pub(crate) fn is_gone(&self) -> bool {
matches!(self.kind(), ErrorKind::Status(_, StatusCode::GONE))
}
pub(crate) fn is_malformed_json(&self) -> bool {
matches!(self.kind(), ErrorKind::Json(_))
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "{:?}", self.kind)
f.write_str(&self.debug)
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "{}", self.kind)?;
std::fmt::Display::fmt(&self.context, f)
f.write_str(&self.display)
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source()
self.kind().source()
}
}
@ -52,59 +88,79 @@ where
ErrorKind: From<T>,
{
fn from(error: T) -> Self {
let kind = ArcKind {
kind: Arc::new(ErrorKind::from(error)),
};
let report = Report::new(kind.clone());
let display = format!("{report}");
let debug = format!("{report:?}");
Error {
context: SpanTrace::capture(),
kind: error.into(),
kind,
display: Box::from(display),
debug: Box::from(debug),
}
}
}
#[derive(Debug, thiserror::Error)]
pub(crate) enum ErrorKind {
#[error("Error queueing job, {0}")]
Queue(anyhow::Error),
#[error("Error in extractor")]
Extractor(#[from] crate::extractors::ErrorKind),
#[error("Error in configuration, {0}")]
#[error("Error queueing job")]
Queue(#[from] BoxError),
#[error("Error in configuration")]
Config(#[from] config::ConfigError),
#[error("Couldn't parse key, {0}")]
#[error("Couldn't parse key")]
Pkcs8(#[from] rsa::pkcs8::Error),
#[error("Couldn't encode public key, {0}")]
#[error("Couldn't encode public key")]
Spki(#[from] rsa::pkcs8::spki::Error),
#[error("Couldn't parse IRI, {0}")]
#[error("Couldn't sign request")]
SignRequest,
#[error("Couldn't make request")]
Reqwest(#[from] reqwest::Error),
#[error("Couldn't make request")]
ReqwestMiddleware(#[from] reqwest_middleware::Error),
#[error("Couldn't parse IRI")]
ParseIri(#[from] activitystreams::iri_string::validate::Error),
#[error("Couldn't normalize IRI, {0}")]
NormalizeIri(
#[from]
activitystreams::iri_string::task::Error<activitystreams::iri_string::normalize::Error>,
),
#[error("Couldn't normalize IRI")]
NormalizeIri(#[from] std::collections::TryReserveError),
#[error("Couldn't perform IO, {0}")]
#[error("Couldn't perform IO")]
Io(#[from] io::Error),
#[error("Couldn't sign string, {0}")]
Rsa(rsa::errors::Error),
#[error("Couldn't use db, {0}")]
#[error("Couldn't use db")]
Sled(#[from] sled::Error),
#[error("Couldn't do the json thing, {0}")]
#[error("Couldn't do the json thing")]
Json(#[from] serde_json::Error),
#[error("Couldn't build signing string, {0}")]
PrepareSign(#[from] PrepareSignError),
#[error("Couldn't sign request")]
Sign(#[from] SignError),
#[error("Couldn't sign digest")]
Signature(#[from] signature::Error),
Signature(#[from] rsa::signature::Error),
#[error("Couldn't read signature")]
ReadSignature(signature::Error),
#[error("Couldn't prepare TLS private key")]
PrepareKey(#[from] rustls::Error),
#[error("Couldn't verify signature")]
VerifySignature(signature::Error),
VerifySignature,
#[error("Failed to encode key der")]
DerEncode,
#[error("Couldn't parse the signature header")]
HeaderValidation(#[from] actix_web::http::header::InvalidHeaderValue),
@ -125,15 +181,15 @@ pub(crate) enum ErrorKind {
BadActor(String, String),
#[error("Signature verification is required, but no signature was given")]
NoSignature(String),
NoSignature(Option<String>),
#[error("Wrong ActivityPub kind, {0}")]
Kind(String),
#[error("Too many CPUs, {0}")]
#[error("Too many CPUs")]
CpuCount(#[from] std::num::TryFromIntError),
#[error("{0}")]
#[error("Host mismatch")]
HostMismatch(#[from] CheckError),
#[error("Couldn't flush buffer")]
@ -187,7 +243,7 @@ pub(crate) enum ErrorKind {
impl ResponseError for Error {
fn status_code(&self) -> StatusCode {
match self.kind {
match self.kind() {
ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => {
StatusCode::FORBIDDEN
}
@ -196,7 +252,8 @@ impl ResponseError for Error {
ErrorKind::Kind(_)
| ErrorKind::MissingKind
| ErrorKind::MissingId
| ErrorKind::ObjectCount => StatusCode::BAD_REQUEST,
| ErrorKind::ObjectCount
| ErrorKind::NoSignature(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
@ -206,7 +263,7 @@ impl ResponseError for Error {
.insert_header(("Content-Type", "application/activity+json"))
.body(
serde_json::to_string(&serde_json::json!({
"error": self.kind.to_string(),
"error": self.kind().to_string(),
}))
.unwrap_or_else(|_| "{}".to_string()),
)
@ -236,3 +293,15 @@ impl From<rsa::errors::Error> for ErrorKind {
ErrorKind::Rsa(e)
}
}
impl From<http_signature_normalization_actix::Canceled> for ErrorKind {
fn from(_: http_signature_normalization_actix::Canceled) -> Self {
Self::Canceled
}
}
impl From<http_signature_normalization_reqwest::Canceled> for ErrorKind {
fn from(_: http_signature_normalization_reqwest::Canceled) -> Self {
Self::Canceled
}
}

View File

@ -1,20 +1,15 @@
use actix_web::{
dev::Payload,
error::{BlockingError, ParseError},
http::{
header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
StatusCode,
},
error::ParseError,
http::header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
web::Data,
FromRequest, HttpMessage, HttpRequest, HttpResponse, ResponseError,
FromRequest, HttpMessage, HttpRequest,
};
use bcrypt::{BcryptError, DEFAULT_COST};
use futures_util::future::LocalBoxFuture;
use http_signature_normalization_actix::prelude::InvalidHeaderValue;
use http_signature_normalization_actix::{prelude::InvalidHeaderValue, Canceled, Spawn};
use std::{convert::Infallible, str::FromStr, time::Instant};
use tracing_error::SpanTrace;
use crate::db::Db;
use crate::{db::Db, error::Error, future::LocalBoxFuture, spawner::Spawner};
#[derive(Clone)]
pub(crate) struct AdminConfig {
@ -29,7 +24,7 @@ impl AdminConfig {
}
fn verify(&self, token: XApiToken) -> Result<bool, Error> {
bcrypt::verify(&token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify)
bcrypt::verify(token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify)
}
}
@ -37,10 +32,10 @@ pub(crate) struct Admin {
db: Data<Db>,
}
type PrepareTuple = (Data<Db>, Data<AdminConfig>, Data<Spawner>, XApiToken);
impl Admin {
fn prepare_verify(
req: &HttpRequest,
) -> Result<(Data<Db>, Data<AdminConfig>, XApiToken), Error> {
fn prepare_verify(req: &HttpRequest) -> Result<PrepareTuple, Error> {
let hashed_api_token = req
.app_data::<Data<AdminConfig>>()
.ok_or_else(Error::missing_config)?
@ -53,16 +48,23 @@ impl Admin {
.ok_or_else(Error::missing_db)?
.clone();
Ok((db, hashed_api_token, x_api_token))
let spawner = req
.app_data::<Data<Spawner>>()
.ok_or_else(Error::missing_spawner)?
.clone();
Ok((db, hashed_api_token, spawner, x_api_token))
}
#[tracing::instrument(level = "debug", skip_all)]
async fn verify(
hashed_api_token: Data<AdminConfig>,
spawner: Data<Spawner>,
x_api_token: XApiToken,
) -> Result<(), Error> {
let span = tracing::Span::current();
if actix_web::web::block(move || span.in_scope(|| hashed_api_token.verify(x_api_token)))
if spawner
.spawn_blocking(move || span.in_scope(|| hashed_api_token.verify(x_api_token)))
.await
.map_err(Error::canceled)??
{
@ -77,67 +79,42 @@ impl Admin {
}
}
#[derive(Debug, thiserror::Error)]
#[error("Failed authentication")]
pub(crate) struct Error {
context: SpanTrace,
#[source]
kind: ErrorKind,
}
impl Error {
fn invalid() -> Self {
Error {
context: SpanTrace::capture(),
kind: ErrorKind::Invalid,
}
Error::from(ErrorKind::Invalid)
}
fn missing_config() -> Self {
Error {
context: SpanTrace::capture(),
kind: ErrorKind::MissingConfig,
}
Error::from(ErrorKind::MissingConfig)
}
fn missing_db() -> Self {
Error {
context: SpanTrace::capture(),
kind: ErrorKind::MissingDb,
}
Error::from(ErrorKind::MissingDb)
}
fn missing_spawner() -> Self {
Error::from(ErrorKind::MissingSpawner)
}
fn bcrypt_verify(e: BcryptError) -> Self {
Error {
context: SpanTrace::capture(),
kind: ErrorKind::BCryptVerify(e),
}
Error::from(ErrorKind::BCryptVerify(e))
}
fn bcrypt_hash(e: BcryptError) -> Self {
Error {
context: SpanTrace::capture(),
kind: ErrorKind::BCryptHash(e),
}
Error::from(ErrorKind::BCryptHash(e))
}
fn parse_header(e: ParseError) -> Self {
Error {
context: SpanTrace::capture(),
kind: ErrorKind::ParseHeader(e),
}
Error::from(ErrorKind::ParseHeader(e))
}
fn canceled(_: BlockingError) -> Self {
Error {
context: SpanTrace::capture(),
kind: ErrorKind::Canceled,
}
fn canceled(_: Canceled) -> Self {
Error::from(ErrorKind::Canceled)
}
}
#[derive(Debug, thiserror::Error)]
enum ErrorKind {
pub(crate) enum ErrorKind {
#[error("Invalid API Token")]
Invalid,
@ -147,6 +124,9 @@ enum ErrorKind {
#[error("Missing Db")]
MissingDb,
#[error("Missing Spawner")]
MissingSpawner,
#[error("Panic in verify")]
Canceled,
@ -160,20 +140,6 @@ enum ErrorKind {
ParseHeader(#[source] ParseError),
}
impl ResponseError for Error {
fn status_code(&self) -> StatusCode {
match self.kind {
ErrorKind::Invalid | ErrorKind::ParseHeader(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
fn error_response(&self) -> HttpResponse {
HttpResponse::build(self.status_code())
.json(serde_json::json!({ "msg": self.kind.to_string() }))
}
}
impl FromRequest for Admin {
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>;
@ -182,12 +148,10 @@ impl FromRequest for Admin {
let now = Instant::now();
let res = Self::prepare_verify(req);
Box::pin(async move {
let (db, c, t) = res?;
Self::verify(c, t).await?;
metrics::histogram!(
"relay.admin.verify",
now.elapsed().as_micros() as f64 / 1_000_000_f64
);
let (db, c, s, t) = res?;
Self::verify(c, s, t).await?;
metrics::histogram!("relay.admin.verify")
.record(now.elapsed().as_micros() as f64 / 1_000_000_f64);
Ok(Admin { db })
})
}
@ -226,3 +190,9 @@ impl FromStr for XApiToken {
Ok(XApiToken(s.to_string()))
}
}
impl std::fmt::Display for XApiToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}

4
src/future.rs Normal file
View File

@ -0,0 +1,4 @@
use std::{future::Future, pin::Pin};
pub(crate) type LocalBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + 'a>>;
pub(crate) type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;

View File

@ -5,6 +5,7 @@ mod deliver_many;
mod instance;
mod nodeinfo;
mod process_listeners;
mod record_last_online;
pub(crate) use self::{
contact::QueryContact, deliver::Deliver, deliver_many::DeliverMany, instance::QueryInstance,
@ -13,16 +14,17 @@ pub(crate) use self::{
use crate::{
config::Config,
data::{ActorCache, MediaCache, NodeCache, State},
data::{ActorCache, MediaCache, State},
error::{Error, ErrorKind},
jobs::process_listeners::Listeners,
requests::Requests,
jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline},
};
use background_jobs::{
memory_storage::{ActixTimer, Storage},
Job, Manager, QueueHandle, WorkerConfig,
memory_storage::{Storage, TokioTimer},
metrics::MetricsStorage,
tokio::{QueueHandle, WorkerConfig},
Job,
};
use std::{convert::TryFrom, num::NonZeroUsize, time::Duration};
use std::time::Duration;
fn debug_object(activity: &serde_json::Value) -> &serde_json::Value {
let mut object = &activity["object"]["type"];
@ -43,25 +45,28 @@ pub(crate) fn create_workers(
actors: ActorCache,
media: MediaCache,
config: Config,
) -> (Manager, JobServer) {
let parallelism = std::thread::available_parallelism()
.unwrap_or_else(|_| NonZeroUsize::try_from(1).expect("nonzero"));
) -> std::io::Result<JobServer> {
let deliver_concurrency = config.deliver_concurrency();
let shared = WorkerConfig::new_managed(Storage::new(ActixTimer), move |queue_handle| {
JobState::new(
state.clone(),
actors.clone(),
JobServer::new(queue_handle),
media.clone(),
config.clone(),
)
})
let queue_handle = WorkerConfig::new(
MetricsStorage::wrap(Storage::new(TokioTimer)),
move |queue_handle| {
JobState::new(
state.clone(),
actors.clone(),
JobServer::new(queue_handle),
media.clone(),
config.clone(),
)
},
)
.register::<Deliver>()
.register::<DeliverMany>()
.register::<QueryNodeinfo>()
.register::<QueryInstance>()
.register::<Listeners>()
.register::<QueryContact>()
.register::<RecordLastOnline>()
.register::<apub::Announce>()
.register::<apub::Follow>()
.register::<apub::Forward>()
@ -69,24 +74,21 @@ pub(crate) fn create_workers(
.register::<apub::Undo>()
.set_worker_count("maintenance", 2)
.set_worker_count("apub", 2)
.set_worker_count("deliver", 8)
.start_with_threads(parallelism);
.set_worker_count("deliver", deliver_concurrency)
.start()?;
shared.every(Duration::from_secs(60 * 5), Listeners);
queue_handle.every(Duration::from_secs(60 * 5), Listeners)?;
queue_handle.every(Duration::from_secs(60 * 10), RecordLastOnline)?;
let job_server = JobServer::new(shared.queue_handle().clone());
(shared, job_server)
Ok(JobServer::new(queue_handle))
}
#[derive(Clone, Debug)]
pub(crate) struct JobState {
requests: Requests,
state: State,
actors: ActorCache,
config: Config,
media: MediaCache,
node_cache: NodeCache,
job_server: JobServer,
}
@ -112,12 +114,10 @@ impl JobState {
config: Config,
) -> Self {
JobState {
requests: state.requests(&config),
node_cache: state.node_cache(),
state,
actors,
config,
media,
state,
job_server,
}
}

View File

@ -36,13 +36,13 @@ async fn get_inboxes(
state.inboxes_without(&actor.inbox, &authority).await
}
fn prepare_activity<T, U, V, Kind>(
fn prepare_activity<T, U, V>(
mut t: T,
id: impl TryInto<IriString, Error = U>,
to: impl TryInto<IriString, Error = V>,
) -> Result<T, Error>
where
T: ObjectExt<Kind> + BaseExt<Kind>,
T: ObjectExt + BaseExt,
Error: From<U> + From<V>,
{
t.set_id(id.try_into()?)

View File

@ -2,14 +2,14 @@ use crate::{
config::{Config, UrlKind},
db::Actor,
error::Error,
future::BoxFuture,
jobs::{
apub::{get_inboxes, prepare_activity},
DeliverMany, JobState,
},
};
use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString};
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use background_jobs::Job;
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Announce {
@ -42,7 +42,7 @@ impl Announce {
.queue(DeliverMany::new(inboxes, announce)?)
.await?;
state.state.cache(self.object_id, activity_id).await;
state.state.cache(self.object_id, activity_id);
Ok(())
}
}
@ -62,14 +62,15 @@ fn generate_announce(
)
}
impl ActixJob for Announce {
impl Job for Announce {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Announce";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
Box::pin(self.perform(state))
}
}

View File

@ -3,6 +3,7 @@ use crate::{
config::{Config, UrlKind},
db::Actor,
error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo},
};
use activitystreams::{
@ -10,8 +11,7 @@ use activitystreams::{
iri_string::types::IriString,
prelude::*,
};
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use background_jobs::Job;
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Follow {
@ -111,14 +111,15 @@ fn generate_accept_follow(
)
}
impl ActixJob for Follow {
impl Job for Follow {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Follow";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
Box::pin(self.perform(state))
}
}

View File

@ -2,11 +2,11 @@ use crate::{
apub::AcceptedActivities,
db::Actor,
error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::get_inboxes, DeliverMany, JobState},
};
use activitystreams::prelude::*;
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use background_jobs::Job;
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Forward {
@ -47,14 +47,15 @@ impl Forward {
}
}
impl ActixJob for Forward {
impl Job for Forward {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Forward";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
Box::pin(self.perform(state))
}
}

View File

@ -2,10 +2,10 @@ use crate::{
config::UrlKind,
db::Actor,
error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState},
};
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use background_jobs::Job;
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Reject(pub(crate) Actor);
@ -33,14 +33,15 @@ impl Reject {
}
}
impl ActixJob for Reject {
impl Job for Reject {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Reject";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
Box::pin(self.perform(state))
}
}

View File

@ -3,11 +3,11 @@ use crate::{
config::UrlKind,
db::Actor,
error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState},
};
use activitystreams::prelude::BaseExt;
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use background_jobs::Job;
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Undo {
@ -48,14 +48,15 @@ impl Undo {
}
}
impl ActixJob for Undo {
impl Job for Undo {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Undo";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
Box::pin(self.perform(state))
}
}

View File

@ -1,11 +1,12 @@
use crate::{
apub::AcceptedActors,
error::{Error, ErrorKind},
future::BoxFuture,
jobs::JobState,
requests::BreakerStrategy,
};
use activitystreams::{iri_string::types::IriString, object::Image, prelude::*};
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use background_jobs::Job;
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryContact {
@ -32,6 +33,7 @@ impl QueryContact {
async fn perform(self, state: JobState) -> Result<(), Error> {
let contact_outdated = state
.state
.node_cache
.is_contact_outdated(self.actor_id.clone())
.await;
@ -41,8 +43,9 @@ impl QueryContact {
}
let contact = match state
.state
.requests
.fetch::<AcceptedActors>(self.contact_id.as_str())
.fetch::<AcceptedActors>(&self.contact_id, BreakerStrategy::Allow404AndBelow)
.await
{
Ok(contact) => contact,
@ -57,6 +60,7 @@ impl QueryContact {
to_contact(contact).ok_or(ErrorKind::Extract("contact"))?;
state
.state
.node_cache
.set_contact(self.actor_id, username, display_name, url, avatar)
.await?;
@ -81,15 +85,16 @@ fn to_contact(contact: AcceptedActors) -> Option<(String, String, IriString, Iri
Some((username, display_name, url, avatar))
}
impl ActixJob for QueryContact {
impl Job for QueryContact {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryContact";
const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
Box::pin(self.perform(state))
}
}

View File

@ -1,10 +1,11 @@
use crate::{
error::Error,
future::BoxFuture,
jobs::{debug_object, JobState},
requests::BreakerStrategy,
};
use activitystreams::iri_string::types::IriString;
use background_jobs::{ActixJob, Backoff};
use std::{future::Future, pin::Pin};
use background_jobs::{Backoff, Job};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Deliver {
@ -34,8 +35,13 @@ impl Deliver {
}
#[tracing::instrument(name = "Deliver", skip(state))]
async fn permform(self, state: JobState) -> Result<(), Error> {
if let Err(e) = state.requests.deliver(self.to, &self.data).await {
async fn perform(self, state: JobState) -> Result<(), Error> {
if let Err(e) = state
.state
.requests
.deliver(&self.to, &self.data, BreakerStrategy::Allow401AndBelow)
.await
{
if e.is_breaker() {
tracing::debug!("Not trying due to failed breaker");
return Ok(());
@ -50,15 +56,16 @@ impl Deliver {
}
}
impl ActixJob for Deliver {
impl Job for Deliver {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Deliver";
const QUEUE: &'static str = "deliver";
const BACKOFF: Backoff = Backoff::Exponential(8);
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.permform(state).await.map_err(Into::into) })
Box::pin(self.perform(state))
}
}

View File

@ -1,10 +1,10 @@
use crate::{
error::Error,
future::BoxFuture,
jobs::{debug_object, Deliver, JobState},
};
use activitystreams::iri_string::types::IriString;
use background_jobs::ActixJob;
use futures_util::future::LocalBoxFuture;
use background_jobs::Job;
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct DeliverMany {
@ -45,14 +45,15 @@ impl DeliverMany {
}
}
impl ActixJob for DeliverMany {
impl Job for DeliverMany {
type State = JobState;
type Future = LocalBoxFuture<'static, Result<(), anyhow::Error>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::DeliverMany";
const QUEUE: &'static str = "deliver";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
Box::pin(self.perform(state))
}
}

File diff suppressed because one or more lines are too long

View File

@ -1,17 +1,18 @@
use crate::{
error::{Error, ErrorKind},
future::BoxFuture,
jobs::{Boolish, JobState, QueryContact},
requests::BreakerStrategy,
};
use activitystreams::{iri, iri_string::types::IriString, primitives::OneOrMany};
use background_jobs::ActixJob;
use std::{fmt::Debug, future::Future, pin::Pin};
use background_jobs::Job;
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryNodeinfo {
actor_id: IriString,
}
impl Debug for QueryNodeinfo {
impl std::fmt::Debug for QueryNodeinfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("QueryNodeinfo")
.field("actor_id", &self.actor_id.to_string())
@ -27,6 +28,7 @@ impl QueryNodeinfo {
#[tracing::instrument(name = "Query node info", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
if !state
.state
.node_cache
.is_nodeinfo_outdated(self.actor_id.clone())
.await
@ -39,11 +41,12 @@ impl QueryNodeinfo {
.authority_str()
.ok_or(ErrorKind::MissingDomain)?;
let scheme = self.actor_id.scheme_str();
let well_known_uri = iri!(format!("{}://{}/.well-known/nodeinfo", scheme, authority));
let well_known_uri = iri!(format!("{scheme}://{authority}/.well-known/nodeinfo"));
let well_known = match state
.state
.requests
.fetch_json::<WellKnown>(well_known_uri.as_str())
.fetch_json::<WellKnown>(&well_known_uri, BreakerStrategy::Allow404AndBelow)
.await
{
Ok(well_known) => well_known,
@ -55,12 +58,17 @@ impl QueryNodeinfo {
};
let href = if let Some(link) = well_known.links.into_iter().find(|l| l.rel.is_supported()) {
link.href
iri!(&link.href)
} else {
return Ok(());
};
let nodeinfo = match state.requests.fetch_json::<Nodeinfo>(&href).await {
let nodeinfo = match state
.state
.requests
.fetch_json::<Nodeinfo>(&href, BreakerStrategy::Require2XX)
.await
{
Ok(nodeinfo) => nodeinfo,
Err(e) if e.is_breaker() => {
tracing::debug!("Not retrying due to failed breaker");
@ -70,6 +78,7 @@ impl QueryNodeinfo {
};
state
.state
.node_cache
.set_info(
self.actor_id.clone(),
@ -83,7 +92,7 @@ impl QueryNodeinfo {
.metadata
.and_then(|meta| meta.into_iter().next().and_then(|meta| meta.staff_accounts))
{
if let Some(contact_id) = accounts.get(0) {
if let Some(contact_id) = accounts.first() {
state
.job_server
.queue(QueryContact::new(self.actor_id, contact_id.clone()))
@ -95,15 +104,16 @@ impl QueryNodeinfo {
}
}
impl ActixJob for QueryNodeinfo {
impl Job for QueryNodeinfo {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryNodeinfo";
const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
Box::pin(self.perform(state))
}
}
@ -168,7 +178,7 @@ impl<'de> serde::de::Visitor<'de> for SupportedVersionVisitor {
type Value = SupportedVersion;
fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "a string starting with '{}'", SUPPORTED_VERSIONS)
write!(f, "a string starting with '{SUPPORTED_VERSIONS}'")
}
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
@ -187,7 +197,7 @@ impl<'de> serde::de::Visitor<'de> for SupportedNodeinfoVisitor {
type Value = SupportedNodeinfo;
fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "a string starting with '{}'", SUPPORTED_NODEINFO)
write!(f, "a string starting with '{SUPPORTED_NODEINFO}'")
}
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>

View File

@ -1,9 +1,9 @@
use crate::{
error::Error,
future::BoxFuture,
jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState},
};
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use background_jobs::Job;
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Listeners;
@ -23,14 +23,15 @@ impl Listeners {
}
}
impl ActixJob for Listeners {
impl Job for Listeners {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Listeners";
const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
Box::pin(self.perform(state))
}
}

View File

@ -0,0 +1,28 @@
use crate::{error::Error, future::BoxFuture, jobs::JobState};
use background_jobs::{Backoff, Job};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct RecordLastOnline;
impl RecordLastOnline {
#[tracing::instrument(skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
let nodes = state.state.last_online.take();
state.state.db.mark_last_seen(nodes).await
}
}
impl Job for RecordLastOnline {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::RecordLastOnline";
const QUEUE: &'static str = "maintenance";
const BACKOFF: Backoff = Backoff::Linear(1);
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))
}
}

View File

@ -1,18 +1,27 @@
// need this for ructe
#![allow(clippy::needless_borrow)]
use std::time::Duration;
use activitystreams::iri_string::types::IriString;
use actix_web::{middleware::Compress, web, App, HttpServer};
use collector::MemoryCollector;
#[cfg(feature = "console")]
use console_subscriber::ConsoleLayer;
use opentelemetry::{sdk::Resource, KeyValue};
use error::Error;
use http_signature_normalization_actix::middleware::VerifySignature;
use metrics_exporter_prometheus::PrometheusBuilder;
use metrics_util::layers::FanoutBuilder;
use opentelemetry::KeyValue;
use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::Resource;
use reqwest_middleware::ClientWithMiddleware;
use rustls::ServerConfig;
use tokio::task::JoinHandle;
use tracing_actix_web::TracingLogger;
use tracing_error::ErrorLayer;
use tracing_log::LogTracer;
use tracing_subscriber::{filter::Targets, fmt::format::FmtSpan, layer::SubscriberExt, Layer};
use tracing_subscriber::{filter::Targets, layer::SubscriberExt, Layer};
mod admin;
mod apub;
@ -23,35 +32,39 @@ mod data;
mod db;
mod error;
mod extractors;
mod future;
mod jobs;
mod middleware;
mod requests;
mod routes;
mod spawner;
mod telegram;
use crate::config::UrlKind;
use self::{
args::Args,
config::Config,
data::{ActorCache, MediaCache, State},
db::Db,
jobs::create_workers,
middleware::{DebugPayload, RelayResolver, Timings},
routes::{actor, inbox, index, nodeinfo, nodeinfo_meta, statics},
middleware::{DebugPayload, MyVerify, RelayResolver, Timings},
routes::{actor, healthz, inbox, index, nodeinfo, nodeinfo_meta, statics},
spawner::Spawner,
};
fn init_subscriber(
software_name: &'static str,
opentelemetry_url: Option<&IriString>,
) -> Result<(), anyhow::Error> {
) -> color_eyre::Result<()> {
LogTracer::init()?;
color_eyre::install()?;
let targets: Targets = std::env::var("RUST_LOG")
.unwrap_or_else(|_| "warn,actix_web=debug,actix_server=debug,tracing_actix_web=info".into())
.unwrap_or_else(|_| "info".into())
.parse()?;
let format_layer = tracing_subscriber::fmt::layer()
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(targets.clone());
let format_layer = tracing_subscriber::fmt::layer().with_filter(targets.clone());
#[cfg(feature = "console")]
let console_layer = ConsoleLayer::builder()
@ -68,18 +81,19 @@ fn init_subscriber(
let subscriber = subscriber.with(console_layer);
if let Some(url) = opentelemetry_url {
let tracer =
opentelemetry_otlp::new_pipeline()
.tracing()
.with_trace_config(opentelemetry::sdk::trace::config().with_resource(
Resource::new(vec![KeyValue::new("service.name", software_name)]),
))
.with_exporter(
opentelemetry_otlp::new_exporter()
.tonic()
.with_endpoint(url.as_str()),
)
.install_batch(opentelemetry::runtime::Tokio)?;
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_trace_config(
opentelemetry_sdk::trace::config().with_resource(Resource::new(vec![
KeyValue::new("service.name", software_name),
])),
)
.with_exporter(
opentelemetry_otlp::new_exporter()
.tonic()
.with_endpoint(url.as_str()),
)
.install_batch(opentelemetry_sdk::runtime::Tokio)?;
let otel_layer = tracing_opentelemetry::layer()
.with_tracer(tracer)
@ -94,42 +108,94 @@ fn init_subscriber(
Ok(())
}
fn main() -> Result<(), anyhow::Error> {
fn build_client(
user_agent: &str,
timeout_seconds: u64,
proxy: Option<(&IriString, Option<(&str, &str)>)>,
) -> Result<ClientWithMiddleware, Error> {
let builder = reqwest::Client::builder().user_agent(user_agent.to_string());
let builder = if let Some((url, auth)) = proxy {
let proxy = reqwest::Proxy::all(url.as_str())?;
let proxy = if let Some((username, password)) = auth {
proxy.basic_auth(username, password)
} else {
proxy
};
builder.proxy(proxy)
} else {
builder
};
let client = builder
.timeout(Duration::from_secs(timeout_seconds))
.build()?;
let client_with_middleware = reqwest_middleware::ClientBuilder::new(client)
.with(reqwest_tracing::TracingMiddleware::default())
.build();
Ok(client_with_middleware)
}
#[tokio::main]
async fn main() -> color_eyre::Result<()> {
dotenv::dotenv().ok();
let config = Config::build()?;
init_subscriber(Config::software_name(), config.opentelemetry_url())?;
let collector = MemoryCollector::new();
collector.install()?;
let args = Args::new();
if args.any() {
return client_main(config, args);
client_main(config, args).await??;
return Ok(());
}
tracing::warn!("Opening DB");
let collector = MemoryCollector::new();
if let Some(bind_addr) = config.prometheus_bind_address() {
let (recorder, exporter) = PrometheusBuilder::new()
.with_http_listener(bind_addr)
.build()?;
tokio::spawn(exporter);
let recorder = FanoutBuilder::default()
.add_recorder(recorder)
.add_recorder(collector.clone())
.build();
metrics::set_global_recorder(recorder).map_err(|e| color_eyre::eyre::eyre!("{e}"))?;
} else {
collector.install()?;
}
tracing::info!("Opening DB");
let db = Db::build(&config)?;
tracing::warn!("Building caches");
tracing::info!("Building caches");
let actors = ActorCache::new(db.clone());
let media = MediaCache::new(db.clone());
server_main(db, actors, media, collector, config)?;
server_main(db, actors, media, collector, config).await?;
tracing::warn!("Application exit");
tracing::info!("Application exit");
Ok(())
}
#[actix_rt::main]
async fn client_main(config: Config, args: Args) -> Result<(), anyhow::Error> {
actix_rt::spawn(do_client_main(config, args)).await?
fn client_main(config: Config, args: Args) -> JoinHandle<color_eyre::Result<()>> {
tokio::spawn(do_client_main(config, args))
}
async fn do_client_main(config: Config, args: Args) -> Result<(), anyhow::Error> {
let client = requests::build_client(&config.user_agent());
async fn do_client_main(config: Config, args: Args) -> color_eyre::Result<()> {
let client = build_client(
&config.user_agent(),
config.client_timeout(),
config.proxy_config(),
)?;
if !args.blocks().is_empty() || !args.allowed().is_empty() {
if args.undo() {
@ -142,6 +208,39 @@ async fn do_client_main(config: Config, args: Args) -> Result<(), anyhow::Error>
println!("Updated lists");
}
if args.contacted() {
let last_seen = admin::client::last_seen(&client, &config).await?;
let mut report = String::from("Contacted:");
if !last_seen.never.is_empty() {
report += "\nNever seen:\n";
}
for domain in last_seen.never {
report += "\t";
report += &domain;
report += "\n";
}
if !last_seen.last_seen.is_empty() {
report += "\nSeen:\n";
}
for (datetime, domains) in last_seen.last_seen {
for domain in domains {
report += "\t";
report += &datetime.to_string();
report += " - ";
report += &domain;
report += "\n";
}
}
report += "\n";
println!("{report}");
}
if args.list() {
let (blocked, allowed, connected) = tokio::try_join!(
admin::client::blocked(&client, &config),
@ -174,49 +273,71 @@ async fn do_client_main(config: Config, args: Args) -> Result<(), anyhow::Error>
Ok(())
}
#[actix_rt::main]
const VERIFY_RATIO: usize = 7;
async fn server_main(
db: Db,
actors: ActorCache,
media: MediaCache,
collector: MemoryCollector,
config: Config,
) -> Result<(), anyhow::Error> {
actix_rt::spawn(do_server_main(db, actors, media, collector, config)).await?
}
) -> color_eyre::Result<()> {
let client = build_client(
&config.user_agent(),
config.client_timeout(),
config.proxy_config(),
)?;
async fn do_server_main(
db: Db,
actors: ActorCache,
media: MediaCache,
collector: MemoryCollector,
config: Config,
) -> Result<(), anyhow::Error> {
tracing::warn!("Creating state");
let state = State::build(db.clone()).await?;
tracing::info!("Creating state");
tracing::warn!("Creating workers");
let (manager, job_server) =
create_workers(state.clone(), actors.clone(), media.clone(), config.clone());
let (signature_threads, verify_threads) = match config.signature_threads() {
0 | 1 => (1, 1),
n if n <= VERIFY_RATIO => (n, 1),
n => {
let verify_threads = (n / VERIFY_RATIO).max(1);
let signature_threads = n.saturating_sub(verify_threads).max(VERIFY_RATIO);
(signature_threads, verify_threads)
}
};
let verify_spawner = Spawner::build("verify-cpu", verify_threads.try_into()?)?;
let sign_spawner = Spawner::build("sign-cpu", signature_threads.try_into()?)?;
let key_id = config.generate_url(UrlKind::MainKey).to_string();
let state = State::build(db.clone(), key_id, sign_spawner.clone(), client).await?;
if let Some((token, admin_handle)) = config.telegram_info() {
tracing::warn!("Creating telegram handler");
tracing::info!("Creating telegram handler");
telegram::start(admin_handle.to_owned(), db.clone(), token);
}
let keys = config.open_keys()?;
let cert_resolver = config
.open_keys()
.await?
.map(rustls_channel_resolver::channel::<32>);
let bind_address = config.bind_address();
let sign_spawner2 = sign_spawner.clone();
let verify_spawner2 = verify_spawner.clone();
let config2 = config.clone();
let server = HttpServer::new(move || {
let job_server =
create_workers(state.clone(), actors.clone(), media.clone(), config.clone())
.expect("Failed to create job server");
let app = App::new()
.app_data(web::Data::new(db.clone()))
.app_data(web::Data::new(state.clone()))
.app_data(web::Data::new(state.requests(&config)))
.app_data(web::Data::new(
state.requests.clone().spawner(verify_spawner.clone()),
))
.app_data(web::Data::new(actors.clone()))
.app_data(web::Data::new(config.clone()))
.app_data(web::Data::new(job_server.clone()))
.app_data(web::Data::new(job_server))
.app_data(web::Data::new(media.clone()))
.app_data(web::Data::new(collector.clone()));
.app_data(web::Data::new(collector.clone()))
.app_data(web::Data::new(verify_spawner.clone()));
let app = if let Some(data) = config.admin_config() {
app.app_data(data)
@ -227,15 +348,20 @@ async fn do_server_main(
app.wrap(Compress::default())
.wrap(TracingLogger::default())
.wrap(Timings)
.route("/healthz", web::get().to(healthz))
.service(web::resource("/").route(web::get().to(index)))
.service(web::resource("/media/{path}").route(web::get().to(routes::media)))
.service(
web::resource("/inbox")
.wrap(config.digest_middleware())
.wrap(config.signature_middleware(
state.requests(&config),
actors.clone(),
state.clone(),
.wrap(config.digest_middleware().spawner(verify_spawner.clone()))
.wrap(VerifySignature::new(
MyVerify(
state.requests.clone().spawner(verify_spawner.clone()),
actors.clone(),
state.clone(),
verify_spawner.clone(),
),
http_signature_normalization_actix::Config::new(),
))
.wrap(DebugPayload(config.debug()))
.route(web::post().to(inbox)),
@ -258,33 +384,48 @@ async fn do_server_main(
.route("/allowed", web::get().to(admin::routes::allowed))
.route("/blocked", web::get().to(admin::routes::blocked))
.route("/connected", web::get().to(admin::routes::connected))
.route("/stats", web::get().to(admin::routes::stats)),
.route("/stats", web::get().to(admin::routes::stats))
.route("/last_seen", web::get().to(admin::routes::last_seen)),
),
)
});
if let Some((certs, key)) = keys {
tracing::warn!("Binding to {}:{} with TLS", bind_address.0, bind_address.1);
if let Some((cert_tx, cert_rx)) = cert_resolver {
let handle = tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(30));
interval.tick().await;
loop {
interval.tick().await;
match config2.open_keys().await {
Ok(Some(key)) => cert_tx.update(key),
Ok(None) => tracing::warn!("Missing TLS keys"),
Err(e) => tracing::error!("Failed to read TLS keys {e}"),
}
}
});
tracing::info!("Binding to {}:{} with TLS", bind_address.0, bind_address.1);
let server_config = ServerConfig::builder()
.with_safe_default_cipher_suites()
.with_safe_default_kx_groups()
.with_safe_default_protocol_versions()?
.with_no_client_auth()
.with_single_cert(certs, key)?;
.with_cert_resolver(cert_rx);
server
.bind_rustls(bind_address, server_config)?
.bind_rustls_0_22(bind_address, server_config)?
.run()
.await?;
handle.abort();
let _ = handle.await;
} else {
tracing::warn!("Binding to {}:{}", bind_address.0, bind_address.1);
tracing::info!("Binding to {}:{}", bind_address.0, bind_address.1);
server.bind(bind_address)?.run().await?;
}
tracing::warn!("Server closed");
sign_spawner2.close().await;
verify_spawner2.close().await;
drop(manager);
tracing::warn!("Main complete");
tracing::info!("Server closed");
Ok(())
}

View File

@ -4,14 +4,11 @@ use actix_web::{
web::BytesMut,
HttpMessage,
};
use futures_util::{
future::TryFutureExt,
stream::{once, TryStreamExt},
};
use std::{
future::{ready, Ready},
task::{Context, Poll},
};
use streem::IntoStreamer;
#[derive(Clone, Debug)]
pub(crate) struct DebugPayload(pub bool);
@ -53,19 +50,23 @@ where
fn call(&self, mut req: ServiceRequest) -> Self::Future {
if self.0 && req.method() == Method::POST {
let pl = req.take_payload();
let mut pl = req.take_payload().into_streamer();
req.set_payload(Payload::Stream {
payload: Box::pin(once(
pl.try_fold(BytesMut::new(), |mut acc, bytes| async {
acc.extend(bytes);
Ok(acc)
})
.map_ok(|bytes| {
let bytes = bytes.freeze();
tracing::info!("{}", String::from_utf8_lossy(&bytes));
bytes
}),
)),
payload: Box::pin(streem::try_from_fn(|yielder| async move {
let mut buf = BytesMut::new();
while let Some(bytes) = pl.try_next().await? {
buf.extend(bytes);
}
let bytes = buf.freeze();
tracing::info!("{}", String::from_utf8_lossy(&bytes));
yielder.yield_ok(bytes).await;
Ok(())
})),
});
self.1.call(req)

View File

@ -40,7 +40,7 @@ impl Drop for LogOnDrop {
fn drop(&mut self) {
if self.arm {
let duration = self.begin.elapsed();
metrics::histogram!("relay.request.complete", duration, "path" => self.path.clone(), "method" => self.method.clone());
metrics::histogram!("relay.request.complete", "path" => self.path.clone(), "method" => self.method.clone()).record(duration);
}
}
}

View File

@ -2,18 +2,17 @@ use crate::{
apub::AcceptedActors,
data::{ActorCache, State},
error::{Error, ErrorKind},
requests::Requests,
requests::{BreakerStrategy, Requests},
spawner::Spawner,
};
use activitystreams::{base::BaseExt, iri, iri_string::types::IriString};
use actix_web::web;
use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm};
use rsa::{pkcs1v15::VerifyingKey, pkcs8::DecodePublicKey, RsaPublicKey};
use sha2::{Digest, Sha256};
use signature::{DigestVerifier, Signature};
use base64::{engine::general_purpose::STANDARD, Engine};
use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm, Spawn};
use rsa::{pkcs1::EncodeRsaPublicKey, pkcs8::DecodePublicKey, RsaPublicKey};
use std::{future::Future, pin::Pin};
#[derive(Clone, Debug)]
pub(crate) struct MyVerify(pub Requests, pub ActorCache, pub State);
pub(crate) struct MyVerify(pub Requests, pub ActorCache, pub State, pub Spawner);
impl MyVerify {
#[tracing::instrument("Verify request", skip(self, signature, signing_string))]
@ -53,7 +52,13 @@ impl MyVerify {
None => (),
};
let res = do_verify(&actor.public_key, signature.clone(), signing_string.clone()).await;
let res = do_verify(
&self.3,
&actor.public_key,
signature.clone(),
signing_string.clone(),
)
.await;
if let Err(e) = res {
if !was_cached {
@ -65,11 +70,21 @@ impl MyVerify {
actor_id
} else {
self.0
.fetch::<PublicKeyResponse>(public_key_id.as_str())
.await?
.actor_id()
.ok_or(ErrorKind::MissingId)?
match self
.0
.fetch::<PublicKeyResponse>(&public_key_id, BreakerStrategy::Require2XX)
.await
{
Ok(res) => res.actor_id().ok_or(ErrorKind::MissingId),
Err(e) => {
if e.is_gone() {
tracing::warn!("Actor gone: {public_key_id}");
return Ok(false);
} else {
return Err(e);
}
}
}?
};
// Previously we verified the sig from an actor's local cache
@ -77,7 +92,7 @@ impl MyVerify {
// Now we make sure we fetch an updated actor
let actor = self.1.get_no_cache(&actor_id, &self.0).await?;
do_verify(&actor.public_key, signature, signing_string).await?;
do_verify(&self.3, &actor.public_key, signature, signing_string).await?;
Ok(true)
}
@ -108,28 +123,34 @@ impl PublicKeyResponse {
#[tracing::instrument("Verify signature")]
async fn do_verify(
spawner: &Spawner,
public_key: &str,
signature: String,
signing_string: String,
) -> Result<(), Error> {
let public_key = RsaPublicKey::from_public_key_pem(public_key.trim())?;
let public_key_der = public_key
.to_pkcs1_der()
.map_err(|_| ErrorKind::DerEncode)?;
let public_key = ring::signature::UnparsedPublicKey::new(
&ring::signature::RSA_PKCS1_2048_8192_SHA256,
public_key_der,
);
let span = tracing::Span::current();
web::block(move || {
span.in_scope(|| {
let decoded = base64::decode(signature)?;
let signature = Signature::from_bytes(&decoded).map_err(ErrorKind::ReadSignature)?;
let hashed = Sha256::new_with_prefix(signing_string.as_bytes());
spawner
.spawn_blocking(move || {
span.in_scope(|| {
let decoded = STANDARD.decode(signature)?;
let verifying_key = VerifyingKey::new_with_prefix(public_key);
verifying_key
.verify_digest(hashed, &signature)
.map_err(ErrorKind::VerifySignature)?;
public_key
.verify(signing_string.as_bytes(), decoded.as_slice())
.map_err(|_| ErrorKind::VerifySignature)?;
Ok(()) as Result<(), Error>
Ok(()) as Result<(), Error>
})
})
})
.await??;
.await??;
Ok(())
}
@ -159,20 +180,20 @@ mod tests {
use crate::apub::AcceptedActors;
use rsa::{pkcs8::DecodePublicKey, RsaPublicKey};
const ASONIX_DOG_ACTOR: &'static str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://w3id.org/security/v1",{"manuallyApprovesFollowers":"as:manuallyApprovesFollowers","toot":"http://joinmastodon.org/ns#","featured":{"@id":"toot:featured","@type":"@id"},"featuredTags":{"@id":"toot:featuredTags","@type":"@id"},"alsoKnownAs":{"@id":"as:alsoKnownAs","@type":"@id"},"movedTo":{"@id":"as:movedTo","@type":"@id"},"schema":"http://schema.org#","PropertyValue":"schema:PropertyValue","value":"schema:value","discoverable":"toot:discoverable","Device":"toot:Device","Ed25519Signature":"toot:Ed25519Signature","Ed25519Key":"toot:Ed25519Key","Curve25519Key":"toot:Curve25519Key","EncryptedMessage":"toot:EncryptedMessage","publicKeyBase64":"toot:publicKeyBase64","deviceId":"toot:deviceId","claim":{"@type":"@id","@id":"toot:claim"},"fingerprintKey":{"@type":"@id","@id":"toot:fingerprintKey"},"identityKey":{"@type":"@id","@id":"toot:identityKey"},"devices":{"@type":"@id","@id":"toot:devices"},"messageFranking":"toot:messageFranking","messageType":"toot:messageType","cipherText":"toot:cipherText","suspended":"toot:suspended"}],"id":"https://masto.asonix.dog/actor","type":"Application","inbox":"https://masto.asonix.dog/actor/inbox","outbox":"https://masto.asonix.dog/actor/outbox","preferredUsername":"masto.asonix.dog","url":"https://masto.asonix.dog/about/more?instance_actor=true","manuallyApprovesFollowers":true,"publicKey":{"id":"https://masto.asonix.dog/actor#main-key","owner":"https://masto.asonix.dog/actor","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n"},"endpoints":{"sharedInbox":"https://masto.asonix.dog/inbox"}}"#;
const KARJALAZET_RELAY: &'static str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://pleroma.karjalazet.se/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[],"capabilities":{},"discoverable":false,"endpoints":{"oauthAuthorizationEndpoint":"https://pleroma.karjalazet.se/oauth/authorize","oauthRegistrationEndpoint":"https://pleroma.karjalazet.se/api/v1/apps","oauthTokenEndpoint":"https://pleroma.karjalazet.se/oauth/token","sharedInbox":"https://pleroma.karjalazet.se/inbox","uploadMedia":"https://pleroma.karjalazet.se/api/ap/upload_media"},"featured":"https://pleroma.karjalazet.se/relay/collections/featured","followers":"https://pleroma.karjalazet.se/relay/followers","following":"https://pleroma.karjalazet.se/relay/following","id":"https://pleroma.karjalazet.se/relay","inbox":"https://pleroma.karjalazet.se/relay/inbox","manuallyApprovesFollowers":false,"name":null,"outbox":"https://pleroma.karjalazet.se/relay/outbox","preferredUsername":"relay","publicKey":{"id":"https://pleroma.karjalazet.se/relay#main-key","owner":"https://pleroma.karjalazet.se/relay","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"","tag":[],"type":"Person","url":"https://pleroma.karjalazet.se/relay"}"#;
const ASONIX_DOG_KEY: &'static str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n";
const KARJALAZET_KEY: &'static str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n";
const ASONIX_DOG_ACTOR: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://w3id.org/security/v1",{"manuallyApprovesFollowers":"as:manuallyApprovesFollowers","toot":"http://joinmastodon.org/ns#","featured":{"@id":"toot:featured","@type":"@id"},"featuredTags":{"@id":"toot:featuredTags","@type":"@id"},"alsoKnownAs":{"@id":"as:alsoKnownAs","@type":"@id"},"movedTo":{"@id":"as:movedTo","@type":"@id"},"schema":"http://schema.org#","PropertyValue":"schema:PropertyValue","value":"schema:value","discoverable":"toot:discoverable","Device":"toot:Device","Ed25519Signature":"toot:Ed25519Signature","Ed25519Key":"toot:Ed25519Key","Curve25519Key":"toot:Curve25519Key","EncryptedMessage":"toot:EncryptedMessage","publicKeyBase64":"toot:publicKeyBase64","deviceId":"toot:deviceId","claim":{"@type":"@id","@id":"toot:claim"},"fingerprintKey":{"@type":"@id","@id":"toot:fingerprintKey"},"identityKey":{"@type":"@id","@id":"toot:identityKey"},"devices":{"@type":"@id","@id":"toot:devices"},"messageFranking":"toot:messageFranking","messageType":"toot:messageType","cipherText":"toot:cipherText","suspended":"toot:suspended"}],"id":"https://masto.asonix.dog/actor","type":"Application","inbox":"https://masto.asonix.dog/actor/inbox","outbox":"https://masto.asonix.dog/actor/outbox","preferredUsername":"masto.asonix.dog","url":"https://masto.asonix.dog/about/more?instance_actor=true","manuallyApprovesFollowers":true,"publicKey":{"id":"https://masto.asonix.dog/actor#main-key","owner":"https://masto.asonix.dog/actor","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n"},"endpoints":{"sharedInbox":"https://masto.asonix.dog/inbox"}}"#;
const KARJALAZET_RELAY: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://pleroma.karjalazet.se/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[],"capabilities":{},"discoverable":false,"endpoints":{"oauthAuthorizationEndpoint":"https://pleroma.karjalazet.se/oauth/authorize","oauthRegistrationEndpoint":"https://pleroma.karjalazet.se/api/v1/apps","oauthTokenEndpoint":"https://pleroma.karjalazet.se/oauth/token","sharedInbox":"https://pleroma.karjalazet.se/inbox","uploadMedia":"https://pleroma.karjalazet.se/api/ap/upload_media"},"featured":"https://pleroma.karjalazet.se/relay/collections/featured","followers":"https://pleroma.karjalazet.se/relay/followers","following":"https://pleroma.karjalazet.se/relay/following","id":"https://pleroma.karjalazet.se/relay","inbox":"https://pleroma.karjalazet.se/relay/inbox","manuallyApprovesFollowers":false,"name":null,"outbox":"https://pleroma.karjalazet.se/relay/outbox","preferredUsername":"relay","publicKey":{"id":"https://pleroma.karjalazet.se/relay#main-key","owner":"https://pleroma.karjalazet.se/relay","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"","tag":[],"type":"Person","url":"https://pleroma.karjalazet.se/relay"}"#;
const ASONIX_DOG_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n";
const KARJALAZET_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n";
#[test]
fn handles_masto_keys() {
println!("{}", ASONIX_DOG_KEY);
println!("{ASONIX_DOG_KEY}");
let _ = RsaPublicKey::from_public_key_pem(ASONIX_DOG_KEY.trim()).unwrap();
}
#[test]
fn handles_pleromo_keys() {
println!("{}", KARJALAZET_KEY);
println!("{KARJALAZET_KEY}");
let _ = RsaPublicKey::from_public_key_pem(KARJALAZET_KEY.trim()).unwrap();
}

View File

@ -1,10 +1,10 @@
use crate::{
config::{Config, UrlKind},
data::State,
future::LocalBoxFuture,
};
use actix_web::web::Data;
use actix_webfinger::{Resolver, Webfinger};
use futures_util::future::LocalBoxFuture;
use rsa_magic_public_key::AsMagicPublicKey;
pub(crate) struct RelayResolver;

View File

@ -1,29 +1,39 @@
use crate::error::{Error, ErrorKind};
use crate::{
data::LastOnline,
error::{Error, ErrorKind},
spawner::Spawner,
};
use activitystreams::iri_string::types::IriString;
use actix_web::http::header::Date;
use awc::{error::SendRequestError, Client, ClientResponse};
use base64::{engine::general_purpose::STANDARD, Engine};
use dashmap::DashMap;
use http_signature_normalization_actix::prelude::*;
use rand::thread_rng;
use rsa::{pkcs1v15::SigningKey, RsaPrivateKey};
use sha2::{Digest, Sha256};
use signature::RandomizedSigner;
use http_signature_normalization_reqwest::{digest::ring::Sha256, prelude::*};
use reqwest_middleware::ClientWithMiddleware;
use ring::{
rand::SystemRandom,
signature::{RsaKeyPair, RSA_PKCS1_SHA256},
};
use rsa::{pkcs1::EncodeRsaPrivateKey, RsaPrivateKey};
use std::{
cell::RefCell,
rc::Rc,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
sync::Arc,
time::{Duration, SystemTime},
};
use tracing_awc::Tracing;
const ONE_SECOND: u64 = 1;
const ONE_MINUTE: u64 = 60 * ONE_SECOND;
const ONE_HOUR: u64 = 60 * ONE_MINUTE;
const ONE_DAY: u64 = 24 * ONE_HOUR;
#[derive(Debug)]
pub(crate) enum BreakerStrategy {
// Requires a successful response
Require2XX,
// Allows HTTP 2xx-401
Allow401AndBelow,
// Allows HTTP 2xx-404
Allow404AndBelow,
}
#[derive(Clone)]
pub(crate) struct Breakers {
inner: Arc<DashMap<String, Breaker>>,
@ -36,7 +46,7 @@ impl std::fmt::Debug for Breakers {
}
impl Breakers {
fn should_try(&self, url: &IriString) -> bool {
pub(crate) fn should_try(&self, url: &IriString) -> bool {
if let Some(authority) = url.authority_str() {
if let Some(breaker) = self.inner.get(authority) {
breaker.should_try()
@ -54,7 +64,7 @@ impl Breakers {
if let Some(mut breaker) = self.inner.get_mut(authority) {
breaker.fail();
if !breaker.should_try() {
tracing::warn!("Failed breaker for {}", authority);
tracing::warn!("Failed breaker for {authority}");
}
false
} else {
@ -138,190 +148,202 @@ impl Default for Breaker {
#[derive(Clone)]
pub(crate) struct Requests {
client: Rc<RefCell<Client>>,
consecutive_errors: Rc<AtomicUsize>,
error_limit: usize,
client: ClientWithMiddleware,
key_id: String,
user_agent: String,
private_key: RsaPrivateKey,
config: Config,
private_key: Arc<RsaKeyPair>,
rng: SystemRandom,
config: Config<Spawner>,
breakers: Breakers,
last_online: Arc<LastOnline>,
}
impl std::fmt::Debug for Requests {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Requests")
.field("error_limit", &self.error_limit)
.field("key_id", &self.key_id)
.field("user_agent", &self.user_agent)
.field("config", &self.config)
.field("breakers", &self.breakers)
.finish()
}
}
pub(crate) fn build_client(user_agent: &str) -> Client {
Client::builder()
.wrap(Tracing)
.add_default_header(("User-Agent", user_agent.to_string()))
.timeout(Duration::from_secs(15))
.finish()
}
impl Requests {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
key_id: String,
private_key: RsaPrivateKey,
user_agent: String,
breakers: Breakers,
last_online: Arc<LastOnline>,
spawner: Spawner,
client: ClientWithMiddleware,
) -> Self {
let private_key_der = private_key.to_pkcs1_der().expect("Can encode der");
let private_key = ring::signature::RsaKeyPair::from_der(private_key_der.as_bytes())
.expect("Key is valid");
Requests {
client: Rc::new(RefCell::new(build_client(&user_agent))),
consecutive_errors: Rc::new(AtomicUsize::new(0)),
error_limit: 3,
client,
key_id,
user_agent,
private_key,
config: Config::default().mastodon_compat(),
private_key: Arc::new(private_key),
rng: SystemRandom::new(),
config: Config::new_with_spawner(spawner).mastodon_compat(),
breakers,
last_online,
}
}
pub(crate) fn spawner(mut self, spawner: Spawner) -> Self {
self.config = self.config.set_spawner(spawner);
self
}
pub(crate) fn reset_breaker(&self, iri: &IriString) {
self.breakers.succeed(iri);
}
fn count_err(&self) {
let count = self.consecutive_errors.fetch_add(1, Ordering::Relaxed);
if count + 1 >= self.error_limit {
tracing::warn!("{} consecutive errors, rebuilding http client", count + 1);
*self.client.borrow_mut() = build_client(&self.user_agent);
self.reset_err();
}
}
fn reset_err(&self) {
self.consecutive_errors.swap(0, Ordering::Relaxed);
}
async fn check_response(
&self,
parsed_url: &IriString,
res: Result<ClientResponse, SendRequestError>,
) -> Result<ClientResponse, Error> {
strategy: BreakerStrategy,
res: Result<reqwest::Response, reqwest_middleware::Error>,
) -> Result<reqwest::Response, Error> {
if res.is_err() {
self.count_err();
self.breakers.fail(&parsed_url);
}
let mut res =
res.map_err(|e| ErrorKind::SendRequest(parsed_url.to_string(), e.to_string()))?;
let res = res?;
self.reset_err();
let status = res.status();
if !res.status().is_success() {
let success = match strategy {
BreakerStrategy::Require2XX => status.is_success(),
BreakerStrategy::Allow401AndBelow => (200..=401).contains(&status.as_u16()),
BreakerStrategy::Allow404AndBelow => (200..=404).contains(&status.as_u16()),
};
if !success {
self.breakers.fail(&parsed_url);
if let Ok(bytes) = res.body().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
if !s.is_empty() {
tracing::warn!("Response from {}, {}", parsed_url, s);
}
if let Ok(s) = res.text().await {
if !s.is_empty() {
tracing::debug!("Response from {parsed_url}, {s}");
}
}
return Err(ErrorKind::Status(parsed_url.to_string(), res.status()).into());
return Err(ErrorKind::Status(parsed_url.to_string(), status).into());
}
self.breakers.succeed(&parsed_url);
// only actually succeed a breaker on 2xx response
if status.is_success() {
self.last_online.mark_seen(&parsed_url);
self.breakers.succeed(&parsed_url);
}
Ok(res)
}
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch_json<T>(&self, url: &str) -> Result<T, Error>
pub(crate) async fn fetch_json<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
self.do_fetch(url, "application/json").await
self.do_fetch(url, "application/json", strategy).await
}
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch_json_msky<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
let body = self
.do_deliver(
url,
&serde_json::json!({}),
"application/json",
"application/json",
strategy,
)
.await?
.bytes()
.await?;
Ok(serde_json::from_slice(&body)?)
}
#[tracing::instrument(name = "Fetch Activity+Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch<T>(&self, url: &str) -> Result<T, Error>
pub(crate) async fn fetch<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
self.do_fetch(url, "application/activity+json").await
self.do_fetch(url, "application/activity+json", strategy)
.await
}
async fn do_fetch<T>(&self, url: &str, accept: &str) -> Result<T, Error>
async fn do_fetch<T>(
&self,
url: &IriString,
accept: &str,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
let parsed_url = url.parse::<IriString>()?;
if !self.breakers.should_try(&parsed_url) {
return Err(ErrorKind::Breaker.into());
}
let signer = self.signer();
let span = tracing::Span::current();
let client: Client = self.client.borrow().clone();
let res = client
.get(url)
.insert_header(("Accept", accept))
.insert_header(Date(SystemTime::now().into()))
.signature(
self.config.clone(),
self.key_id.clone(),
move |signing_string| {
span.record("signing_string", signing_string);
span.in_scope(|| signer.sign(signing_string))
},
)
let body = self
.do_fetch_response(url, accept, strategy)
.await?
.send()
.await;
.bytes()
.await?;
let mut res = self.check_response(&parsed_url, res).await?;
let body = res
.body()
.await
.map_err(|e| ErrorKind::ReceiveResponse(url.to_string(), e.to_string()))?;
Ok(serde_json::from_slice(body.as_ref())?)
Ok(serde_json::from_slice(&body)?)
}
#[tracing::instrument(name = "Fetch response", skip(self), fields(signing_string))]
pub(crate) async fn fetch_response(&self, url: IriString) -> Result<ClientResponse, Error> {
if !self.breakers.should_try(&url) {
pub(crate) async fn fetch_response(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
self.do_fetch_response(url, "*/*", strategy).await
}
pub(crate) async fn do_fetch_response(
&self,
url: &IriString,
accept: &str,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
if !self.breakers.should_try(url) {
return Err(ErrorKind::Breaker.into());
}
let signer = self.signer();
let span = tracing::Span::current();
let client: Client = self.client.borrow().clone();
let res = client
let request = self
.client
.get(url.as_str())
.insert_header(("Accept", "*/*"))
.insert_header(Date(SystemTime::now().into()))
.no_decompress()
.signature(
self.config.clone(),
self.key_id.clone(),
move |signing_string| {
span.record("signing_string", signing_string);
span.in_scope(|| signer.sign(signing_string))
},
)
.await?
.send()
.await;
.header("Accept", accept)
.header("Date", Date(SystemTime::now().into()).to_string())
.signature(&self.config, self.key_id.clone(), move |signing_string| {
span.record("signing_string", signing_string);
span.in_scope(|| signer.sign(signing_string))
})
.await?;
let res = self.check_response(&url, res).await?;
let res = self.client.execute(request).await;
let res = self.check_response(url, strategy, res).await?;
Ok(res)
}
@ -331,7 +353,34 @@ impl Requests {
skip_all,
fields(inbox = inbox.to_string().as_str(), signing_string)
)]
pub(crate) async fn deliver<T>(&self, inbox: IriString, item: &T) -> Result<(), Error>
pub(crate) async fn deliver<T>(
&self,
inbox: &IriString,
item: &T,
strategy: BreakerStrategy,
) -> Result<(), Error>
where
T: serde::ser::Serialize + std::fmt::Debug,
{
self.do_deliver(
inbox,
item,
"application/activity+json",
"application/activity+json",
strategy,
)
.await?;
Ok(())
}
async fn do_deliver<T>(
&self,
inbox: &IriString,
item: &T,
content_type: &str,
accept: &str,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error>
where
T: serde::ser::Serialize + std::fmt::Debug,
{
@ -343,12 +392,12 @@ impl Requests {
let span = tracing::Span::current();
let item_string = serde_json::to_string(item)?;
let client: Client = self.client.borrow().clone();
let (req, body) = client
let request = self
.client
.post(inbox.as_str())
.insert_header(("Accept", "application/activity+json"))
.insert_header(("Content-Type", "application/activity+json"))
.insert_header(Date(SystemTime::now().into()))
.header("Accept", accept)
.header("Content-Type", content_type)
.header("Date", Date(SystemTime::now().into()).to_string())
.signature_with_digest(
self.config.clone(),
self.key_id.clone(),
@ -359,31 +408,41 @@ impl Requests {
span.in_scope(|| signer.sign(signing_string))
},
)
.await?
.split();
.await?;
let res = req.send_body(body).await;
let res = self.client.execute(request).await;
self.check_response(&inbox, res).await?;
let res = self.check_response(inbox, strategy, res).await?;
Ok(())
Ok(res)
}
fn signer(&self) -> Signer {
Signer {
private_key: self.private_key.clone(),
rng: self.rng.clone(),
}
}
}
struct Signer {
private_key: RsaPrivateKey,
private_key: Arc<RsaKeyPair>,
rng: SystemRandom,
}
impl Signer {
fn sign(&self, signing_string: &str) -> Result<String, Error> {
let signing_key = SigningKey::<Sha256>::new_with_prefix(self.private_key.clone());
let signature = signing_key.try_sign_with_rng(thread_rng(), signing_string.as_bytes())?;
Ok(base64::encode(signature.as_ref()))
let mut signature = vec![0; self.private_key.public().modulus_len()];
self.private_key
.sign(
&RSA_PKCS1_SHA256,
&self.rng,
signing_string.as_bytes(),
&mut signature,
)
.map_err(|_| ErrorKind::SignRequest)?;
Ok(STANDARD.encode(&signature))
}
}

View File

@ -1,4 +1,5 @@
mod actor;
mod healthz;
mod inbox;
mod index;
mod media;
@ -7,6 +8,7 @@ mod statics;
pub(crate) use self::{
actor::route as actor,
healthz::route as healthz,
inbox::route as inbox,
index::route as index,
media::route as media,

7
src/routes/healthz.rs Normal file
View File

@ -0,0 +1,7 @@
use crate::{data::State, error::Error};
use actix_web::{web, HttpResponse};
pub(crate) async fn route(state: web::Data<State>) -> Result<HttpResponse, Error> {
state.db.check_health().await?;
Ok(HttpResponse::Ok().finish())
}

View File

@ -16,7 +16,8 @@ use activitystreams::{
use actix_web::{web, HttpResponse};
use http_signature_normalization_actix::prelude::{DigestVerified, SignatureVerified};
#[tracing::instrument(name = "Inbox", skip_all)]
#[tracing::instrument(name = "Inbox", skip_all, fields(id = tracing::field::debug(&input.id_unchecked()), kind = tracing::field::debug(&input.kind())))]
#[allow(clippy::too_many_arguments)]
pub(crate) async fn route(
state: web::Data<State>,
actors: web::Data<ActorCache>,
@ -24,22 +25,48 @@ pub(crate) async fn route(
client: web::Data<Requests>,
jobs: web::Data<JobServer>,
input: web::Json<AcceptedActivities>,
verified: Option<(SignatureVerified, DigestVerified)>,
digest_verified: Option<DigestVerified>,
signature_verified: Option<SignatureVerified>,
) -> Result<HttpResponse, Error> {
let input = input.into_inner();
let actor = actors
.get(
input.actor()?.as_single_id().ok_or(ErrorKind::MissingId)?,
&client,
)
.await?
.into_inner();
let kind = input.kind().ok_or(ErrorKind::MissingKind)?;
let is_allowed = state.db.is_allowed(actor.id.clone());
let is_connected = state.db.is_connected(actor.id.clone());
if digest_verified.is_some() && signature_verified.is_none() && *kind == ValidTypes::Delete {
return Ok(accepted(serde_json::json!({})));
} else if config.validate_signatures()
&& (digest_verified.is_none() || signature_verified.is_none())
{
return Err(ErrorKind::NoSignature(None).into());
}
let (is_allowed, is_connected) = tokio::try_join!(is_allowed, is_connected)?;
let actor_id = if input.id_unchecked().is_some() {
input.actor()?.as_single_id().ok_or(ErrorKind::MissingId)?
} else {
input
.actor_unchecked()
.as_single_id()
.ok_or(ErrorKind::MissingId)?
};
let actor = actors.get(actor_id, &client).await?.into_inner();
if let Some(verified) = signature_verified {
if actor.public_key_id.as_str() != verified.key_id() {
tracing::error!("Actor signed with wrong key");
return Err(ErrorKind::BadActor(
actor.public_key_id.to_string(),
verified.key_id().to_owned(),
)
.into());
}
} else if config.validate_signatures() {
tracing::error!("This case should never be reachable, since I handle signature checks earlier in the flow. If you see this in a log it means I did it wrong");
return Err(ErrorKind::NoSignature(Some(actor.public_key_id.to_string())).into());
}
let is_allowed = state.db.is_allowed(actor.id.clone()).await?;
let is_connected = state.db.is_connected(actor.id.clone()).await?;
if !is_allowed {
return Err(ErrorKind::NotAllowed(actor.id.to_string()).into());
@ -49,29 +76,16 @@ pub(crate) async fn route(
return Err(ErrorKind::NotSubscribed(actor.id.to_string()).into());
}
if config.validate_signatures() && verified.is_none() {
return Err(ErrorKind::NoSignature(actor.public_key_id.to_string()).into());
} else if config.validate_signatures() {
if let Some((verified, _)) = verified {
if actor.public_key_id.as_str() != verified.key_id() {
tracing::error!("Bad actor, more info: {:?}", input);
return Err(ErrorKind::BadActor(
actor.public_key_id.to_string(),
verified.key_id().to_owned(),
)
.into());
}
}
}
match input.kind().ok_or(ErrorKind::MissingKind)? {
match kind {
ValidTypes::Accept => handle_accept(&config, input).await?,
ValidTypes::Reject => handle_reject(&config, &jobs, input, actor).await?,
ValidTypes::Announce | ValidTypes::Create => {
handle_announce(&state, &jobs, input, actor).await?
}
ValidTypes::Follow => handle_follow(&config, &jobs, input, actor).await?,
ValidTypes::Delete | ValidTypes::Update => handle_forward(&jobs, input, actor).await?,
ValidTypes::Add | ValidTypes::Delete | ValidTypes::Remove | ValidTypes::Update => {
handle_forward(&jobs, input, actor).await?
}
ValidTypes::Undo => handle_undo(&config, &jobs, input, actor, is_connected).await?,
};
@ -203,7 +217,7 @@ async fn handle_announce(
.as_single_id()
.ok_or(ErrorKind::MissingId)?;
if state.is_cached(object_id).await {
if state.is_cached(object_id) {
return Err(ErrorKind::Duplicate.into());
}

View File

@ -14,8 +14,12 @@ const MINIFY_CONFIG: minify_html::Cfg = minify_html::Cfg {
keep_html_and_head_opening_tags: false,
keep_spaces_between_attributes: true,
keep_comments: false,
minify_js: true,
keep_input_type_text_attr: true,
keep_ssi_comments: false,
preserve_brace_template_syntax: false,
preserve_chevron_percent_template_syntax: false,
minify_css: true,
minify_js: true,
remove_bangs: true,
remove_processing_instructions: true,
};
@ -33,12 +37,16 @@ pub(crate) async fn route(
state: web::Data<State>,
config: web::Data<Config>,
) -> Result<HttpResponse, Error> {
let all_nodes = state.node_cache().nodes().await?;
let all_nodes = state.node_cache.nodes().await?;
let mut nodes = Vec::new();
let mut local = Vec::new();
for node in all_nodes {
if !state.is_connected(&node.base) {
continue;
}
if node
.base
.authority_str()
@ -71,7 +79,7 @@ pub(crate) async fn route(
let mut buf = BufWriter::new(Vec::new());
crate::templates::index(&mut buf, &local, &nodes, &config)?;
crate::templates::index_html(&mut buf, &local, &nodes, &config)?;
let html = buf.into_inner().map_err(|e| {
tracing::error!("Error rendering template, {}", e.error());
ErrorKind::FlushBuffer

View File

@ -1,4 +1,8 @@
use crate::{data::MediaCache, error::Error, requests::Requests};
use crate::{
data::MediaCache,
error::Error,
requests::{BreakerStrategy, Requests},
};
use actix_web::{body::BodyStream, web, HttpResponse};
use uuid::Uuid;
@ -11,7 +15,9 @@ pub(crate) async fn route(
let uuid = uuid.into_inner();
if let Some(url) = media.get_url(uuid).await? {
let res = requests.fetch_response(url).await?;
let res = requests
.fetch_response(&url, BreakerStrategy::Allow404AndBelow)
.await?;
let mut response = HttpResponse::build(res.status());
@ -19,7 +25,7 @@ pub(crate) async fn route(
response.insert_header((name.clone(), value.clone()));
}
return Ok(response.body(BodyStream::new(res)));
return Ok(response.body(BodyStream::new(res.bytes_stream())));
}
Ok(HttpResponse::NotFound().finish())

View File

@ -24,18 +24,18 @@ struct Links {
links: Vec<Link>,
}
#[tracing::instrument(name = "NodeInfo")]
#[tracing::instrument(name = "NodeInfo", skip_all)]
pub(crate) async fn route(
config: web::Data<Config>,
state: web::Data<State>,
) -> web::Json<NodeInfo> {
let (inboxes, blocks) = tokio::join!(state.db.inboxes(), async {
if config.publish_blocks() {
Some(state.db.blocks().await.unwrap_or_default())
} else {
None
}
});
let inboxes = state.db.inboxes().await;
let blocks = if config.publish_blocks() {
Some(state.db.blocks().await.unwrap_or_default())
} else {
None
};
let peers = inboxes
.unwrap_or_default()
@ -44,6 +44,8 @@ pub(crate) async fn route(
.map(|s| s.to_owned())
.collect();
let open_registrations = !config.restricted_mode();
web::Json(NodeInfo {
version: NodeInfoVersion,
software: Software {
@ -55,7 +57,7 @@ pub(crate) async fn route(
inbound: vec![],
outbound: vec![],
},
open_registrations: false,
open_registrations,
usage: Usage {
users: Users {
total: 1,

View File

@ -5,7 +5,7 @@ use actix_web::{
};
#[allow(clippy::async_yields_async)]
#[tracing::instrument(name = "Statistics")]
#[tracing::instrument(name = "Statics")]
pub(crate) async fn route(filename: web::Path<String>) -> HttpResponse {
if let Some(data) = StaticFile::get(&filename.into_inner()) {
HttpResponse::Ok()

92
src/spawner.rs Normal file
View File

@ -0,0 +1,92 @@
use async_cpupool::CpuPool;
use http_signature_normalization_actix::{Canceled, Spawn};
use std::time::Duration;
#[derive(Clone)]
pub(crate) struct Spawner {
pool: CpuPool,
}
impl Spawner {
pub(crate) fn build(name: &'static str, threads: u16) -> color_eyre::Result<Self> {
let pool = CpuPool::configure()
.name(name)
.max_threads(threads)
.build()?;
Ok(Spawner { pool })
}
pub(crate) async fn close(self) {
self.pool.close().await;
}
}
impl std::fmt::Debug for Spawner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Spawner").finish()
}
}
async fn timer<Fut>(fut: Fut) -> Fut::Output
where
Fut: std::future::Future,
{
let id = uuid::Uuid::new_v4();
metrics::counter!("relay.spawner.wait-timer.start").increment(1);
let mut interval = tokio::time::interval(Duration::from_secs(5));
// pass the first tick (instant)
interval.tick().await;
let mut fut = std::pin::pin!(fut);
let mut counter = 0;
loop {
tokio::select! {
out = &mut fut => {
metrics::counter!("relay.spawner.wait-timer.end").increment(1);
return out;
}
_ = interval.tick() => {
counter += 1;
metrics::counter!("relay.spawner.wait-timer.pending").increment(1);
tracing::warn!("Blocking operation {id} is taking a long time, {} seconds", counter * 5);
}
}
}
}
impl Spawn for Spawner {
type Future<T> = std::pin::Pin<Box<dyn std::future::Future<Output = Result<T, Canceled>>>>;
fn spawn_blocking<Func, Out>(&self, func: Func) -> Self::Future<Out>
where
Func: FnOnce() -> Out + Send + 'static,
Out: Send + 'static,
{
let pool = self.pool.clone();
Box::pin(async move { timer(pool.spawn(func)).await.map_err(|_| Canceled) })
}
}
impl http_signature_normalization_reqwest::Spawn for Spawner {
type Future<T> = std::pin::Pin<Box<dyn std::future::Future<Output = Result<T, http_signature_normalization_reqwest::Canceled>> + Send>> where T: Send;
fn spawn_blocking<Func, Out>(&self, func: Func) -> Self::Future<Out>
where
Func: FnOnce() -> Out + Send + 'static,
Out: Send + 'static,
{
let pool = self.pool.clone();
Box::pin(async move {
timer(pool.spawn(func))
.await
.map_err(|_| http_signature_normalization_reqwest::Canceled)
})
}
}

View File

@ -46,7 +46,7 @@ pub(crate) fn start(admin_handle: String, db: Db, token: &str) {
let bot = Bot::new(token);
let admin_handle = Arc::new(admin_handle);
actix_rt::spawn(async move {
tokio::spawn(async move {
let command_handler = teloxide::filter_command::<Command, _>().endpoint(
move |bot: Bot, msg: Message, cmd: Command| {
let admin_handle = admin_handle.clone();
@ -89,19 +89,19 @@ async fn answer(bot: Bot, msg: Message, cmd: Command, db: Db) -> ResponseResult<
.await?;
}
Command::Block { domain } if db.add_blocks(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{} has been blocked", domain))
bot.send_message(msg.chat.id, format!("{domain} has been blocked"))
.await?;
}
Command::Unblock { domain } if db.remove_blocks(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{} has been unblocked", domain))
bot.send_message(msg.chat.id, format!("{domain} has been unblocked"))
.await?;
}
Command::Allow { domain } if db.add_allows(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{} has been allowed", domain))
bot.send_message(msg.chat.id, format!("{domain} has been allowed"))
.await?;
}
Command::Disallow { domain } if db.remove_allows(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{} has been disallowed", domain))
bot.send_message(msg.chat.id, format!("{domain} has been disallowed"))
.await?;
}
Command::ListAllowed => {

View File

@ -0,0 +1,15 @@
[Unit]
Description=Activitypub Relay
Documentation=https://git.asonix.dog/asonix/relay
Wants=network.target
After=network.target
[Install]
WantedBy=multi-user.target
[Service]
Type=simple
EnvironmentFile=/etc/systemd/system/example-relay.service.env
ExecStart=/path/to/relay
Restart=always

View File

@ -0,0 +1,19 @@
HOSTNAME='relay.example.com'
ADDR='0.0.0.0'
PORT='8080'
RESTRICTED_MODE='true'
VALIDATE_SIGNATURES='true'
HTTPS='true'
PRETTY_LOG='false'
PUBLISH_BLOCKS='true'
DEBUG='false'
SLED_PATH='/opt/sled'
TELEGRAM_ADMIN_HANDLE='myhandle'
RUST_BACKTRACE='full'
FOOTER_BLURB='Contact <a href="https://masto.example.com/@example">@example</a> for inquiries.'
LOCAL_DOMAINS='masto.example.com'
LOCAL_BLURB='<p>An ActivityPub relay for servers. Currently running somewhere. Let me know if you want to join!</p>'
OPENTELEMETRY_URL='http://otel.example.com:4317'
API_TOKEN='blahblahblahblahblahblahblah'
TELEGRAM_TOKEN='blahblahblahblahblahblahblah'

View File

@ -0,0 +1,11 @@
[Unit]
Description=Activitypub Relay Socket
Before=multi-user.target
After=network.target
[Socket]
Service=example-relay.service
ListenStream=8080
[Install]
WantedBy=sockets.target

View File

@ -1,7 +1,7 @@
@use crate::{
config::{Config, UrlKind},
data::Node,
templates::{info, instance, statics::index_css},
templates::{info_html, instance_html, statics::index_css},
};
@(local: &[Node], nodes: &[Node], config: &Config)
@ -9,29 +9,29 @@ templates::{info, instance, statics::index_css},
<!doctype html>
<html>
<head lang="en">
<head lang="fr">
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>@config.hostname() | ActivityPub Relay</title>
<title>@config.hostname() | Relais ActivityPub</title>
<link rel="stylesheet" href="/static/@index_css.name" type="text/css" />
</head>
<body>
<header>
<div class="header-text">
<h1>@Config::software_name()<span class="smaller">@Config::software_version()</span></h1>
<p>on @config.hostname()</p>
<h1>@config.hostname()</h1>
<p>@Config::software_name() <span class="smaller">@Config::software_version()</span></p>
</div>
</header>
<main>
@if !local.is_empty() || config.local_blurb().is_some() {
<article>
<h3>About</h3>
<h3>A Propos</h3>
<section class="local-explainer">
@if let Some(blurb) = config.local_blurb() {
@blurb
@blurb
} else {
<p>These domains are run by the same admins as this relay.</p>
<p>Ces domaines sont administrés par la même équipe que ce relais.</p>
}
</section>
@if !local.is_empty() {
@ -39,13 +39,13 @@ templates::{info, instance, statics::index_css},
@for node in local {
@if let Some(inst) = node.instance.as_ref() {
<li>
@:instance(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(),
@:instance_html(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(),
&node.base)
</li>
} else {
@if let Some(inf) = node.info.as_ref() {
<li>
@:info(inf, &node.base)
@:info_html(inf, &node.base)
</li>
}
}
@ -55,55 +55,52 @@ templates::{info, instance, statics::index_css},
</article>
}
<article>
<h3>Joining</h3>
<a name="#joining"><h3>Comment Rejoindre</h3></a>
<section class="joining">
@if config.restricted_mode() {
<h4>
This relay is Restricted
Ce relais est restreint.
</h4>
<p>
This relay is currently in Restricted Mode, which means servers need to be approved ahead of time by the relay
administrator. Please contact the admin before attempting to join.
Ce relais est restreint; Les instances doivent être approuvées avant de pouvoir se connecter. Merci de
contacter l'<a href="https://mastodon.xolus.net/@@max">administrateur</a> avant de tenter d'y connecter votre instance.
</p>
} else {
<p>
If you are the admin of a server that supports activitypub relays, you can add
this relay to your server.
Ce relais est ouvert; Vous pouvez l'ajouter à la configuration de votre serveur supportant ActivityPub.
</p>
}
<h4>Mastodon</h4>
<p>
Mastodon admins can add this relay by adding
<pre>@config.generate_url(UrlKind::Inbox)</pre> in their relay settings.
Les administrateurs de Mastodon peuvent connecter ce relais en ajoutant
<pre>@config.generate_url(UrlKind::Inbox)</pre> dans la liste de leurs relais..
</p>
<h4>Pleroma</h4>
<p>
Pleroma admins can add this relay by adding
Les administrateurs de Pleroma peuvent connecter ce relais en ajoutant
<pre>@config.generate_url(UrlKind::Actor)</pre>
to their relay settings (I don't actually know how pleroma handles adding
relays, is it still a mix command?).
dans la liste de leur relais.
</p>
<h4>Others</h4>
<h4>Autres</h4>
<p>
Consult the documentation for your server. It's likely that it follows either
Mastodon or Pleroma's relay formatting.
Vérifiez la documentation de votre installation, qui suit probablement la convention de Mastodon ou de Pleroma.
</p>
</section>
</article>
@if !nodes.is_empty() {
<article>
<h3>@nodes.len() Connected Servers</h3>
<h3>@nodes.len() instances connectées</h3>
<ul>
@for node in nodes {
@if let Some(inst) = node.instance.as_ref() {
<li>
@:instance(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(),
@:instance_html(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(),
&node.base)
</li>
} else {
@if let Some(inf) = node.info.as_ref() {
<li>
@:info(inf, &node.base)
@:info_html(inf, &node.base)
</li>
}
}
@ -117,7 +114,7 @@ templates::{info, instance, statics::index_css},
<div>@blurb</div>
}
<p>
The source code for this project can be found at
Code source de l'application disponible ici:
<a href="@config.source_code()">@config.source_code()</a>
</p>
</footer>

View File

@ -8,9 +8,9 @@
<h4 class="padded"><a href="@base">@authority</a></h4>
}
<p class="padded">
Running @info.software, version @info.version.
Utilise @info.software, version @info.version.
@if info.reg {
Registration is open
Inscriptions ouvertes
}
</p>
</section>

View File

@ -1,4 +1,4 @@
@use crate::{db::{Contact, Instance}, templates::admin};
@use crate::{db::{Contact, Instance}, templates::admin_html};
@use activitystreams::iri_string::types::IriString;
@(instance: &Instance, software: Option<&str>, contact: Option<&Contact>, base: &IriString)
@ -7,31 +7,33 @@
<h4 class="padded"><a href="@base">@instance.title</a></h4>
<p class="padded">
@if let Some(software) = software {
Running @software, version @instance.version.
Utilise @software, version @instance.version.
}
<br>
@if instance.reg {
<br>Registration is open.
@if instance.requires_approval {
Accounts must be approved by an admin.
}
} else{
Registration is closed
@if instance.requires_approval {
<span class="moderated">Inscriptions soumises à approbation.</span>
} else {
<span class="open">Inscriptions ouvertes.</span>
}
} else {
<span class="closed">Inscriptions fermées.</span>
}
</p>
@if !instance.description.trim().is_empty() || contact.is_some() {
<div class="instance-info">
@if !instance.description.trim().is_empty() {
<h5 class="instance-description">@instance.title's description:</h5>
<div class="description">
<div class="please-stay">
@Html(instance.description.trim())
</div>
<div class="instance-info">
@if !instance.description.trim().is_empty() {
<h5 class="instance-description">Description:</h5>
<div class="description">
<div class="please-stay">
@Html(instance.description.trim())
</div>
</div>
}
@if let Some(contact) = contact {
<h5 class="instance-admin">Administré par:</h5>
@:admin_html(contact, base)
</div>
}
@if let Some(contact) = contact {
<h5 class="instance-admin">@instance.title's admin:</h5>
@:admin(contact, base)
}
</div>
}
</section>