1
0
Fork 1
mirror of https://gitlab.com/mangadex-pub/mangadex_at_home.git synced 2024-01-19 02:48:37 +00:00

Update to next-gen

This commit is contained in:
carbotaniuman 2021-01-24 04:55:11 +00:00
parent a228f72a86
commit 0cb38c66dc
83 changed files with 4456 additions and 2899 deletions

9
.gitignore vendored
View file

@ -104,6 +104,11 @@ gradle-app.setting
nbproject/** nbproject/**
log/** log/**
cache/** images/**
*.db
settings.json *settings.yaml
/cache
docker/data
data.mv.db

View file

@ -28,20 +28,7 @@ publish:
paths: paths:
- "*.jar" - "*.jar"
- "mangadex_at_home-*.zip" - "mangadex_at_home-*.zip"
- settings.sample.json - settings.sample.yaml
publish_latest:
image: alpine
stage: publish
before_script:
- apk update && apk add git
- export VERSION=`git describe --tags --dirty`
script:
- cp build/libs/mangadex_at_home-${VERSION}-all.jar build/libs/mangadex_at_home-latest-all.jar
artifacts:
name: "mangadex_at_home-latest"
paths:
- "build/libs/mangadex_at_home-latest-all.jar"
publish_docker: publish_docker:
image: docker:git image: docker:git
@ -57,4 +44,4 @@ publish_docker:
- mv build/libs/mangadex_at_home-${VERSION}-all.jar build/libs/mangadex_at_home.jar - mv build/libs/mangadex_at_home-${VERSION}-all.jar build/libs/mangadex_at_home.jar
- docker build -t ${CI_REGISTRY_IMAGE}:${VERSION} -t ${CI_REGISTRY_IMAGE}:latest . - docker build -t ${CI_REGISTRY_IMAGE}:${VERSION} -t ${CI_REGISTRY_IMAGE}:latest .
- docker push ${CI_REGISTRY_IMAGE}:${VERSION} - docker push ${CI_REGISTRY_IMAGE}:${VERSION}
- docker push ${CI_REGISTRY_IMAGE}:latest - docker push ${CI_REGISTRY_IMAGE}:latest

View file

@ -17,6 +17,32 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Security ### Security
## [2.0.0-rc1] - 2020-01-23
This release contains many breaking changes! Of note are the changes to the cache folders, database location, and settings format.
### Added
- [2020-01-23] Added `external_max_kilobits_per_second` config option [@carbotaniuman].
- [2020-01-23] Various internal tests to ensure stability [@carbotaniuman].
- [2020-01-23] Added `/prometheus` endpoint for Prometheus stats and eventual integration [@Tristan].
- [2020-01-23] docker-compose for easy spinup of a Prometheus + Grafana stack [@carbotaniuman].
### Changed
- [2020-01-23] Changed the settings to a `settings.yaml` file [@carbotaniuman].
- [2020-01-23] Changed from `cache` to `images` for images folder [@carbotaniuman].
- [2020-01-23] Changed folder structure to be simpler [@carbotaniuman].
- [2020-01-23] Coalesced DB writes to reduce DB load [@carbotaniuman].
- [2020-01-23] Store metadata along with the image to reduce IOPS [@carbotaniuman].
- [2020-01-23] Updated internal dependencies to improve performance [@carbotaniuman].
### Removed
- [2020-01-23] Unceremoniously removed the old WebUI [@carbotaniuman].
### Fixed
- [2020-01-23] Fixed a long-standing cache deadlock [@carbotaniuman].
- [2020-01-23] Fixed another shutdown bug [@carbotaniuman].
- [2020-01-23] Fixed various CPU and memory leaks [@carbotaniuman].
- [2020-01-23] Fixed another shutdown bug [@carbotaniuman].
- [2020-01-23] Fixed data races when storing images [@carbotaniuman].
## [1.2.4] - 2021-01-09 ## [1.2.4] - 2021-01-09
### Fixed ### Fixed
- [2021-01-08] Better exception handling [@carbotaniuman]. - [2021-01-08] Better exception handling [@carbotaniuman].
@ -36,7 +62,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed ### Fixed
- [2020-08-11] Revert WebUI changes in 1.2.1 [@carbotaniuman]. - [2020-08-11] Revert WebUI changes in 1.2.1 [@carbotaniuman].
## [1.2.1] - 2020-08-11 ## [1.2.1] - 2020-08-11
### Added ### Added
- [2020-08-11] New CLI for specifying database location, cache folder, and settings [@carbotaniuman]. - [2020-08-11] New CLI for specifying database location, cache folder, and settings [@carbotaniuman].
@ -254,7 +279,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed ### Fixed
- [2020-06-11] Tweaked logging configuration to reduce log file sizes by [@carbotaniuman]. - [2020-06-11] Tweaked logging configuration to reduce log file sizes by [@carbotaniuman].
[Unreleased]: https://gitlab.com/mangadex/mangadex_at_home/-/compare/1.2.4...HEAD [Unreleased]: https://gitlab.com/mangadex/mangadex_at_home/-/compare/2.0.0-rc1...HEAD
[2.0.0-rc1]: https://gitlab.com/mangadex/mangadex_at_home/-/compare/1.2.4...2.0.0-rc1
[1.2.4]: https://gitlab.com/mangadex/mangadex_at_home/-/compare/1.2.3...1.2.4 [1.2.4]: https://gitlab.com/mangadex/mangadex_at_home/-/compare/1.2.3...1.2.4
[1.2.3]: https://gitlab.com/mangadex/mangadex_at_home/-/compare/1.2.2...1.2.3 [1.2.3]: https://gitlab.com/mangadex/mangadex_at_home/-/compare/1.2.2...1.2.3
[1.2.2]: https://gitlab.com/mangadex/mangadex_at_home/-/compare/1.2.1...1.2.2 [1.2.2]: https://gitlab.com/mangadex/mangadex_at_home/-/compare/1.2.1...1.2.2

View file

@ -1,7 +1,13 @@
FROM openjdk:15-alpine FROM adoptopenjdk:15
WORKDIR /mangahome WORKDIR /mangahome
COPY /build/libs/mangadex_at_home.jar . ADD /build/libs/mangadex_at_home.jar /mangahome/mangadex_at_home.jar
RUN apk update
VOLUME "/mangahome/cache"
EXPOSE 443 8080 EXPOSE 443 8080
CMD java -Dfile-level=off -Dstdout-level=trace -jar mangadex_at_home.jar
STOPSIGNAL 2
CMD exec java \
-Dfile-level=off \
-Dstdout-level=info \
-jar mangadex_at_home.jar

View file

@ -11,3 +11,5 @@
- Run `./gradlew build` in order to build the entire project - Run `./gradlew build` in order to build the entire project
- Find the generated jars in `build/libs`, where the `-all` jar is fat-jar with all dependencies - Find the generated jars in `build/libs`, where the `-all` jar is fat-jar with all dependencies
### Run with [Docker](docker) (& optionally Prometheus+Grafana)

View file

@ -1,16 +1,17 @@
plugins { plugins {
id "jacoco"
id "java" id "java"
id "org.jetbrains.kotlin.jvm" version "1.4.0" id "org.jetbrains.kotlin.jvm" version "1.4.20"
id "org.jetbrains.kotlin.kapt" version "1.4.0" id "org.jetbrains.kotlin.kapt" version "1.4.0"
id "application" id "application"
id "com.github.johnrengelman.shadow" version "5.2.0" id "com.github.johnrengelman.shadow" version "5.2.0"
id "com.diffplug.spotless" version "5.2.0" id "com.diffplug.spotless" version "5.8.2"
id "dev.afanasev.sekret" version "0.0.7" id "dev.afanasev.sekret" version "0.0.7"
} }
group = "com.mangadex" group = "com.mangadex"
version = "git describe --tags --dirty".execute().text.trim() version = "git describe --tags --dirty".execute().text.trim()
mainClassName = "mdnet.base.Main" mainClassName = "mdnet.Main"
repositories { repositories {
mavenCentral() mavenCentral()
@ -18,36 +19,54 @@ repositories {
} }
configurations { configurations {
runtime.exclude group: "org.jetbrains.kotlinx", module: "kotlinx-coroutines-core"
runtime.exclude group: "com.sun.mail", module: "javax.mail" runtime.exclude group: "com.sun.mail", module: "javax.mail"
} }
dependencies { dependencies {
compileOnly group: "dev.afanasev", name: "sekret-annotation", version: "0.0.7" compileOnly group: "dev.afanasev", name: "sekret-annotation", version: "0.0.7"
implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk8"
implementation "org.jetbrains.kotlin:kotlin-reflect"
implementation group: "commons-io", name: "commons-io", version: "2.7" implementation group: "commons-io", name: "commons-io", version: "2.7"
implementation group: "org.apache.commons", name: "commons-compress", version: "1.20"
implementation group: "ch.qos.logback", name: "logback-classic", version: "1.3.0-alpha4"
implementation group: "io.micrometer", name: "micrometer-registry-prometheus", version: "1.6.2"
implementation group: "com.maxmind.geoip2", name: "geoip2", version: "2.15.0"
implementation group: "org.http4k", name: "http4k-core", version: "$http_4k_version" implementation group: "org.http4k", name: "http4k-core", version: "$http_4k_version"
implementation group: "org.http4k", name: "http4k-format-jackson", version: "$http_4k_version" implementation group: "org.http4k", name: "http4k-format-jackson", version: "$http_4k_version"
implementation group: "com.fasterxml.jackson.datatype", name: "jackson-datatype-jsr310", version: "2.11.1" implementation group: "com.fasterxml.jackson.dataformat", name: "jackson-dataformat-yaml", version: "2.12.1"
implementation group: "org.http4k", name: "http4k-client-apache4", version: "$http_4k_version" implementation group: "com.fasterxml.jackson.datatype", name: "jackson-datatype-jsr310", version: "2.12.1"
implementation group: "org.http4k", name: "http4k-client-apache", version: "$http_4k_version"
implementation group: "org.http4k", name: "http4k-metrics-micrometer", version: "$http_4k_version"
implementation group: "org.http4k", name: "http4k-server-netty", version: "$http_4k_version" implementation group: "org.http4k", name: "http4k-server-netty", version: "$http_4k_version"
testImplementation group: "org.http4k", name: "http4k-testing-kotest", version: "$http_4k_version"
runtimeOnly group: "io.netty", name: "netty-tcnative-boringssl-static", version: "2.0.34.Final" runtimeOnly group: "io.netty", name: "netty-tcnative-boringssl-static", version: "2.0.34.Final"
implementation group: "ch.qos.logback", name: "logback-classic", version: "1.3.0-alpha4" implementation group: "com.h2database", name: "h2", version: "1.4.200"
implementation group: "org.jetbrains.exposed", name: "exposed-core", version: "$exposed_version" implementation "org.ktorm:ktorm-core:$ktorm_version"
implementation group: "org.jetbrains.exposed", name: "exposed-dao", version: "$exposed_version" implementation "org.ktorm:ktorm-jackson:$ktorm_version"
implementation group: "org.jetbrains.exposed", name: "exposed-jdbc", version: "$exposed_version"
implementation group: "org.xerial", name: "sqlite-jdbc", version: "3.32.3.2"
implementation "info.picocli:picocli:4.5.0" implementation "info.picocli:picocli:4.5.0"
kapt "info.picocli:picocli-codegen:4.5.0" kapt "info.picocli:picocli-codegen:4.5.0"
testImplementation "io.kotest:kotest-runner-junit5:$kotest_version"
testImplementation "io.kotest:kotest-assertions-core:$kotest_version"
testImplementation "io.mockk:mockk:1.10.4"
} }
test {
useJUnitPlatform()
}
task testDev(type: Test) {
group = "verification"
useJUnitPlatform()
filter {
excludeTestsMatching '*SlowTest'
}
}
kapt { kapt {
arguments { arguments {
arg("project", "${project.group}/${project.name}") arg("project", "${project.group}/${project.name}")
@ -59,8 +78,13 @@ java {
targetCompatibility = JavaVersion.VERSION_1_8 targetCompatibility = JavaVersion.VERSION_1_8
} }
tasks.withType(org.jetbrains.kotlin.gradle.tasks.KotlinCompile).all {
kotlinOptions {
jvmTarget = "1.8"
}
}
spotless { spotless {
lineEndings 'UNIX'
java { java {
targetExclude("build/generated/**/*") targetExclude("build/generated/**/*")
eclipse() eclipse()
@ -69,7 +93,7 @@ spotless {
endWithNewline() endWithNewline()
} }
kotlin { kotlin {
ktlint() ktlint("0.40.0")
trimTrailingWhitespace() trimTrailingWhitespace()
endWithNewline() endWithNewline()
} }
@ -92,7 +116,7 @@ tasks.register("depsize") {
} }
} }
tasks.register("depsize-all-configurations") { tasks.register("depsizeAll") {
description = 'Prints dependencies for all available configurations' description = 'Prints dependencies for all available configurations'
doLast() { doLast() {
configurations configurations
@ -118,7 +142,7 @@ def listConfigurationDependencies(Configuration configuration) {
out << "${String.format(formatStr, (it.length() / 1024))} kb\n" out << "${String.format(formatStr, (it.length() / 1024))} kb\n"
} }
} else { } else {
out << 'No dependencies found'; out << 'No dependencies found'
} }
println(out) println(out)
} }

61
docker/README.md Normal file
View file

@ -0,0 +1,61 @@
# Run with Docker
⚠ This is a bit more involved of a setup than just running the jar ⚠
## Prerequisites
Docker Desktop for your operating system.
Once installed, you can check that it works by opening a command prompt and running
docker run -it hello-world
## Run as a standalone container
Use either a specific image, preferrably the [latest image published](https://gitlab.com/mangadex-pub/mangadex_at_home/container_registry/1200259)
> While it might work, using `registry.gitlab.com/mangadex-pub/mangadex_at_home:latest` is a bad idea as we do not guarantee forward-compatibility
## Run with Prometheus and Grafana (i.e. dashboards)
![](dashboard.png)
### Quickstart
1. Install `docker-compose`. Follow the steps [here](https://docs.docker.com/compose/install/)
2. Copy the `docker` directory somewhere *on the drive you want to use as cache storage**
a. edit `docker-compose.yml` and replace `registry.gitlab.com/mangadex-pub/mangadex_at_home:<version>` with the appropriate version
3. Copy your `settings.json` inside that directory (it should be next to `docker-compose.yml`)
4. Run `docker-compose up -d` from within this directory
5. That's it. You should now check the following:
- There are 3 containers in 'Up' state when running `docker ps` (`mangadex-at-home`, `prometheus` and `grafana`)
- The test image loads at [https://localhost/data/8172a46adc798f4f4ace6663322a383e/B18.png](https://localhost/data/8172a46adc798f4f4ace6663322a383e/B18.png)
- Prometheus loads at [http://localhost:9090](http://localhost:9090)
- Grafana loads at [http://localhost:3000/dashboards](http://localhost:3000/dashboards) and you can open the dashboard
### Notes
The pre-made configuration is hardcoded both public port 443 and this directory structure:
<directory where you run 'docker-compose up'>
Folders/files copied from the git repository
-> prometheus/... - pre-made config
-> grafana/... - pre-made config
-> docker-compose.yml
Your settings.json
-> settings.json
Created by the containers
-> data/
-> cache - the client's image cache
-> prometheus - prometheus database files
-> grafana - grafana files
All of this is configurable to suit your needs but is not recommended unless you are familiar with Docker already.

BIN
docker/dashboard.png Executable file

Binary file not shown.

After

Width:  |  Height:  |  Size: 415 KiB

72
docker/docker-compose.yml Normal file
View file

@ -0,0 +1,72 @@
version: '3.8'
services:
mangadex-at-home:
container_name: mangadex-at-home
image: "registry.gitlab.com/mangadex-pub/mangadex_at_home:<version>"
ports:
- 443:443
volumes:
- ./settings.json:/mangahome/settings.json:ro
- ./data/cache/:/mangahome/data/
environment:
JAVA_TOOL_OPTIONS: "-Xms1G -Xmx1G -XX:+UseG1GC -Xss512K"
command: [
"bash",
"-c",
"java \
-Dfile-level=off \
-Dstdout-level=info \
-jar mangadex_at_home.jar \
--cache /mangahome/data/images \
--database /mangahome/data/metadata"
]
logging:
driver: "json-file"
options:
max-size: "20m"
max-file: "2"
prometheus:
container_name: prometheus
image: prom/prometheus
user: "root"
group_add:
- 0
ports:
- 9090:9090
links:
- mangadex-at-home
volumes:
- ./prometheus/:/etc/prometheus/:ro
- ./data/prometheus/:/prometheus/
logging:
driver: "json-file"
options:
max-size: "20m"
max-file: "2"
grafana:
container_name: grafana
image: grafana/grafana
user: "root"
group_add:
- 0
ports:
- 3000:3000
links:
- prometheus
volumes:
- ./grafana/:/etc/grafana/:ro
- ./data/grafana/:/var/lib/grafana/
environment:
GF_INSTALL_PLUGINS: "grafana-worldmap-panel"
logging:
driver: "json-file"
options:
max-size: "20m"
max-file: "2"
networks:
mangadex-at-home: { }

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,8 @@
[auth.anonymous]
enabled = true
# Organization name that should be used for unauthenticated users
org_name = Main Org.
# Role for unauthenticated users, other valid values are `Editor` and `Admin`
org_role = Admin

View file

@ -0,0 +1,18 @@
apiVersion: 1
providers:
# <string> an unique provider name. Required
- name: 'MangaDex@Home dashboards provider'
# <string> provider type. Default to 'file'
type: file
# <bool> disable dashboard deletion
disableDeletion: true
# <int> how often Grafana will scan for changed dashboards
updateIntervalSeconds: 10
# <bool> allow updating provisioned dashboards from the UI
allowUiUpdates: false
options:
# <string, required> path to dashboard files on disk. Required when using the 'file' type
path: /etc/grafana/dashboards
# <bool> use folder names from filesystem to create folders in Grafana
foldersFromFilesStructure: true

View file

@ -0,0 +1,10 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://prometheus:9090
isDefault: true
version: 1
editable: false

View file

@ -0,0 +1,13 @@
global:
scrape_interval: 20s
scrape_timeout: 10s
scrape_configs:
- job_name: 'mangadex-at-home'
scheme: https
tls_config:
insecure_skip_verify: true
metrics_path: /prometheus
static_configs:
- targets:
- "mangadex-at-home:443"

View file

@ -1,2 +1,4 @@
http_4k_version=3.258.0 http_4k_version=4.1.0.0
exposed_version=0.26.2 exposed_version=0.26.2
kotest_version=4.4.0.RC1
ktorm_version=3.2.0

View file

@ -1 +1,16 @@
pluginManagement {
repositories {
gradlePluginPortal()
jcenter()
google()
}
resolutionStrategy {
eachPlugin {
if (requested.id.id == "com.squareup.sqldelight") {
useModule("com.squareup.sqldelight:gradle-plugin:${requested.version}")
}
}
}
}
rootProject.name = 'mangadex_at_home' rootProject.name = 'mangadex_at_home'

View file

@ -1,17 +0,0 @@
{
"client_secret": "nosenpaithisisoursecret",
"client_hostname": "0.0.0.0", // "0.0.0.0" is the default and binds to everything
"client_port": 443, // 443 is recommended if possible
"client_external_port": 0, //443 is recommended; This port will be send to mdah-backend.
//You need to forward this to the client_port in your router - 0 uses `client_port`
"threads": 16,
"graceful_shutdown_wait_seconds": 60, // Time from graceful shutdown start to force quit
// This rounds down to 15-second increments
"max_cache_size_in_mebibytes": 80000,
"max_kilobits_per_second": 0, // 0 disables max brust limiting
"max_mebibytes_per_hour": 0, // 0 disables hourly bandwidth limiting
"web_settings": { //delete this block to disable webui
"ui_hostname": "127.0.0.1", // "127.0.0.1" is the default and binds to localhost only
"ui_port": 8080
}
}

74
settings.sample.yaml Normal file
View file

@ -0,0 +1,74 @@
---
# ⢸⣿⣿⣿⣿⠃⠄⢀⣴⡾⠃⠄⠄⠄⠄⠄⠈⠺⠟⠛⠛⠛⠛⠻⢿⣿⣿⣿⣿⣶⣤⡀⠄
# ⢸⣿⣿⣿⡟⢀⣴⣿⡿⠁⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⣸⣿⣿⣿⣿⣿⣿⣿⣷
# ⢸⣿⣿⠟⣴⣿⡿⡟⡼⢹⣷⢲⡶⣖⣾⣶⢄⠄⠄⠄⠄⠄⢀⣼⣿⢿⣿⣿⣿⣿⣿⣿⣿
# ⢸⣿⢫⣾⣿⡟⣾⡸⢠⡿⢳⡿⠍⣼⣿⢏⣿⣷⢄⡀⠄⢠⣾⢻⣿⣸⣿⣿⣿⣿⣿⣿⣿
# ⡿⣡⣿⣿⡟⡼⡁⠁⣰⠂⡾⠉⢨⣿⠃⣿⡿⠍⣾⣟⢤⣿⢇⣿⢇⣿⣿⢿⣿⣿⣿⣿⣿
# ⣱⣿⣿⡟⡐⣰⣧⡷⣿⣴⣧⣤⣼⣯⢸⡿⠁⣰⠟⢀⣼⠏⣲⠏⢸⣿⡟⣿⣿⣿⣿⣿⣿
# ⣿⣿⡟⠁⠄⠟⣁⠄⢡⣿⣿⣿⣿⣿⣿⣦⣼⢟⢀⡼⠃⡹⠃⡀⢸⡿⢸⣿⣿⣿⣿⣿⡟
# ⣿⣿⠃⠄⢀⣾⠋⠓⢰⣿⣿⣿⣿⣿⣿⠿⣿⣿⣾⣅⢔⣕⡇⡇⡼⢁⣿⣿⣿⣿⣿⣿⢣
# ⣿⡟⠄⠄⣾⣇⠷⣢⣿⣿⣿⣿⣿⣿⣿⣭⣀⡈⠙⢿⣿⣿⡇⡧⢁⣾⣿⣿⣿⣿⣿⢏⣾
# ⣿⡇⠄⣼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⢻⠇⠄⠄⢿⣿⡇⢡⣾⣿⣿⣿⣿⣿⣏⣼⣿
# ⣿⣷⢰⣿⣿⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⢰⣧⣀⡄⢀⠘⡿⣰⣿⣿⣿⣿⣿⣿⠟⣼⣿⣿
# ⢹⣿⢸⣿⣿⠟⠻⢿⣿⣿⣿⣿⣿⣿⣿⣶⣭⣉⣤⣿⢈⣼⣿⣿⣿⣿⣿⣿⠏⣾⣹⣿⣿
# ⢸⠇⡜⣿⡟⠄⠄⠄⠈⠙⣿⣿⣿⣿⣿⣿⣿⣿⠟⣱⣻⣿⣿⣿⣿⣿⠟⠁⢳⠃⣿⣿⣿
# ⠄⣰⡗⠹⣿⣄⠄⠄⠄⢀⣿⣿⣿⣿⣿⣿⠟⣅⣥⣿⣿⣿⣿⠿⠋⠄⠄⣾⡌⢠⣿⡿⠃
# ⠜⠋⢠⣷⢻⣿⣿⣶⣾⣿⣿⣿⣿⠿⣛⣥⣾⣿⠿⠟⠛⠉⠄⠄
#
# MangaDex@Home configuration file
# We are pleased to have you here
# May fate stay the night with you!
# The size in mebibytes of the cache
# You can use megabytes instead in a pinch,
# but just know the two are **NOT** the same.
max_cache_size_in_mebibytes: 1024
# Optional settings for fancy geoip analytics
metrics_settings:
# whether to enable geoip metrics
enable_geoip: false
# if geoip metrics are enabled, a license is required
# see https://dev.maxmind.com/geoip/geoip2/geolite2/
geoip_license_key: none
server_settings:
# The client secret
# Keep this secret at all costs :P
secret: nosenpaithisisoursecret
# The port for the webserver to listen on
# 443 is recommended for maximum appeal
port: 443
# This controls the value the server receives
# for your uplaod speed
# Keep this as 0 to use the one currently stored
# in the server, or set this higher if needed
# This does not affect `max_kilobits_per_second` in any way
external_max_kilobits_per_second: 0
# Stuff that you probably don't need to change
# The maximum egress rate of the webserver
# Setting this to 0 disables the limiter
# Setting this too low can have adverse effects
# This does not affect `external_max_kilobits_per_second` in any way
max_kilobits_per_second: 0
# The external port to broadcast to the backend
# Keep this at 0 unless you know what you're doing
# 0 means broadcast the same value as `port`
external_port: 0
# How long to wait for the graceful shutdown (Ctrl-C or SIGINT)
# This is rounded to a multiple of 5 seconds
graceful_shutdown_wait_seconds: 60
# The external hostname to listen on
# Keep this at 0.0.0.0 unless you know what you're doing
hostname: 0.0.0.0
# Maximum mebibytes per hour of images to server
# Setting this to 0 disables the limiter
max_mebibytes_per_hour: 0
# Number of threads for Netty worker pool
# Scale this according to your CPU cores
threads: 16

View file

@ -62,7 +62,11 @@ public class CachingInputStream extends ProxyInputStream {
public int read() throws IOException { public int read() throws IOException {
final int ch = super.read(); final int ch = super.read();
if (ch != EOF) { if (ch != EOF) {
cache.write(ch); try {
cache.write(ch);
} catch (IOException ignored) {
// don't let write failures affect the image loading
}
} }
return ch; return ch;
} }
@ -71,7 +75,11 @@ public class CachingInputStream extends ProxyInputStream {
public int read(final byte[] bts, final int st, final int end) throws IOException { public int read(final byte[] bts, final int st, final int end) throws IOException {
final int n = super.read(bts, st, end); final int n = super.read(bts, st, end);
if (n != EOF) { if (n != EOF) {
cache.write(bts, st, n); try {
cache.write(bts, st, n);
} catch (IOException ignored) {
// don't let write failures affect the image loading
}
} }
return n; return n;
} }
@ -80,7 +88,11 @@ public class CachingInputStream extends ProxyInputStream {
public int read(final byte[] bts) throws IOException { public int read(final byte[] bts) throws IOException {
final int n = super.read(bts); final int n = super.read(bts);
if (n != EOF) { if (n != EOF) {
cache.write(bts, 0, n); try {
cache.write(bts, 0, n);
} catch (IOException ignored) {
// don't let write failures affect the image loading
}
} }
return n; return n;
} }

View file

@ -1,987 +0,0 @@
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mdnet.cache;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import java.io.BufferedWriter;
import java.io.Closeable;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* A cache that uses a bounded amount of space on a filesystem. Each cache entry
* has a string key and a fixed number of values. Each key must match the regex
* <strong>[a-z0-9_-]{1,120}</strong>. Values are byte sequences, accessible as
* streams or files. Each value must be between {@code 0} and
* {@code Integer.MAX_VALUE} bytes in length.
*
* <p>
* The cache stores its data in a directory on the filesystem. This directory
* must be exclusive to the cache; the cache may delete or overwrite files from
* its directory. It is an error for multiple processes to use the same cache
* directory at the same time.
*
* <p>
* This cache limits the number of bytes that it will store on the filesystem.
* When the number of stored bytes exceeds the limit, the cache will remove
* entries in the background until the limit is satisfied. The limit is not
* strict: the cache may temporarily exceed it while waiting for files to be
* deleted. The limit does not include filesystem overhead or the cache journal
* so space-sensitive applications should set a conservative limit.
*
* <p>
* Clients call {@link #editImpl} to create or update the values of an entry. An
* entry may have only one editor at one time; if a value is not available to be
* edited then {@link #editImpl} will return null.
* <ul>
* <li>When an entry is being <strong>created</strong> it is necessary to supply
* a full set of values; the empty value should be used as a placeholder if
* necessary.
* <li>When an entry is being <strong>edited</strong>, it is not necessary to
* supply data for every value; values default to their previous value.
* </ul>
* Every {@link #edit} call must be matched by a call to {@link Editor#commit}
* or {@link Editor#abort}. Committing is atomic: a read observes the full set
* of values as they were before or after the commit, but never a mix of values.
*
* <p>
* Clients call {@link #get} to read a snapshot of an entry. The read will
* observe the value at the time that {@link #get} was called. Updates and
* removals after the call do not impact ongoing reads.
*
* <p>
* This class is tolerant of some I/O errors. If files are missing from the
* filesystem, the corresponding entries will be dropped from the cache. If an
* error occurs while writing a cache value, the edit will fail silently.
* Callers should handle other problems by catching {@code IOException} and
* responding appropriately.
*/
public final class DiskLruCache implements Closeable {
private static final String JOURNAL_FILE = "journal";
private static final String JOURNAL_FILE_TEMP = "journal.tmp";
private static final String JOURNAL_FILE_BACKUP = "journal.bkp";
private static final String MAGIC = "libcore.io.DiskLruCache";
private static final String VERSION_1 = "1";
private static final long ANY_SEQUENCE_NUMBER = -1;
public static final Pattern LEGAL_KEY_PATTERN = Pattern.compile("[a-z0-9_-]{1,120}");
public static final Pattern UNSAFE_LEGAL_KEY_PATTERN = Pattern.compile("[a-z0-9_-][\\\\/a-z0-9_-]{0,119}");
private static final String CLEAN = "CLEAN";
private static final String DIRTY = "DIRTY";
private static final String REMOVE = "REMOVE";
private static final String READ = "READ";
/*
* This cache uses a journal file named "journal". A typical journal file looks
* like this: libcore.io.DiskLruCache 1 100 2
*
* CLEAN 3400330d1dfc7f3f7f4b8d4d803dfcf6 832 21054 DIRTY
* 335c4c6028171cfddfbaae1a9c313c52 CLEAN 335c4c6028171cfddfbaae1a9c313c52 3934
* 2342 REMOVE 335c4c6028171cfddfbaae1a9c313c52 DIRTY
* 1ab96a171faeeee38496d8b330771a7a CLEAN 1ab96a171faeeee38496d8b330771a7a 1600
* 234 READ 335c4c6028171cfddfbaae1a9c313c52 READ
* 3400330d1dfc7f3f7f4b8d4d803dfcf6
*
* The first five lines of the journal form its header. They are the constant
* string "libcore.io.DiskLruCache", the disk cache's version, the application's
* version, the value count, and a blank line.
*
* Each of the subsequent lines in the file is a record of the state of a cache
* entry. Each line contains space-separated values: a state, a key, and
* optional state-specific values. o DIRTY lines track that an entry is actively
* being created or updated. Every successful DIRTY action should be followed by
* a CLEAN or REMOVE action. DIRTY lines without a matching CLEAN or REMOVE
* indicate that temporary files may need to be deleted. o CLEAN lines track a
* cache entry that has been successfully published and may be read. A publish
* line is followed by the lengths of each of its values. o READ lines track
* accesses for LRU. o REMOVE lines track entries that have been deleted.
*
* The journal file is appended to as cache operations occur. The journal may
* occasionally be compacted by dropping redundant lines. A temporary file named
* "journal.tmp" will be used during compaction; that file should be deleted if
* it exists when the cache is opened.
*/
private final File directory;
private final File journalFile;
private final File journalFileTmp;
private final File journalFileBackup;
private final int appVersion;
private long maxSize;
private final int valueCount;
private long size = 0;
private Writer journalWriter;
private final LinkedHashMap<String, Entry> lruEntries = new LinkedHashMap<>(0, 0.75f, true);
private int redundantOpCount;
/**
* To differentiate between old and current snapshots, each entry is given a
* sequence number each time an edit is committed. A snapshot is stale if its
* sequence number is not equal to its entry's sequence number.
*/
private long nextSequenceNumber = 0;
/** This cache uses a single background thread to evict entries. */
final ThreadPoolExecutor executorService = new ThreadPoolExecutor(0, 1, 60L, TimeUnit.SECONDS,
new LinkedBlockingQueue<>());
private final Callable<Void> cleanupCallable = new Callable<Void>() {
public Void call() throws Exception {
synchronized (DiskLruCache.this) {
if (journalWriter == null) {
return null; // Closed.
}
trimToSize();
if (journalRebuildRequired()) {
rebuildJournal();
redundantOpCount = 0;
}
}
return null;
}
};
private DiskLruCache(File directory, int appVersion, int valueCount, long maxSize) {
this.directory = directory;
this.appVersion = appVersion;
this.journalFile = new File(directory, JOURNAL_FILE);
this.journalFileTmp = new File(directory, JOURNAL_FILE_TEMP);
this.journalFileBackup = new File(directory, JOURNAL_FILE_BACKUP);
this.valueCount = valueCount;
this.maxSize = maxSize;
}
/**
* Opens the cache in {@code directory}, creating a cache if none exists there.
*
* @param directory
* a writable directory
* @param valueCount
* the number of values per cache entry. Must be positive.
* @param maxSize
* the maximum number of bytes this cache should use to store
* @throws IOException
* if reading or writing the cache directory fails
*/
public static DiskLruCache open(File directory, int appVersion, int valueCount, long maxSize) throws IOException {
if (maxSize <= 0) {
throw new IllegalArgumentException("maxSize <= 0");
}
if (valueCount <= 0) {
throw new IllegalArgumentException("valueCount <= 0");
}
// If a bkp file exists, use it instead.
File backupFile = new File(directory, JOURNAL_FILE_BACKUP);
if (backupFile.exists()) {
File journalFile = new File(directory, JOURNAL_FILE);
// If journal file also exists just delete backup file.
if (journalFile.exists()) {
backupFile.delete();
} else {
renameTo(backupFile, journalFile, false);
}
}
// Prefer to pick up where we left off.
DiskLruCache cache = new DiskLruCache(directory, appVersion, valueCount, maxSize);
if (cache.journalFile.exists()) {
cache.readJournal();
cache.processJournal();
return cache;
}
// Create a new empty cache.
directory.mkdirs();
cache = new DiskLruCache(directory, appVersion, valueCount, maxSize);
cache.rebuildJournal();
return cache;
}
private void readJournal() throws IOException {
try (StrictLineReader reader = new StrictLineReader(new FileInputStream(journalFile), StandardCharsets.UTF_8)) {
String magic = reader.readLine();
String version = reader.readLine();
String appVersionString = reader.readLine();
String valueCountString = reader.readLine();
String blank = reader.readLine();
if (!MAGIC.equals(magic) || !VERSION_1.equals(version)
|| !Integer.toString(appVersion).equals(appVersionString)
|| !Integer.toString(valueCount).equals(valueCountString) || !"".equals(blank)) {
throw new HeaderMismatchException(
new String[]{magic, version, appVersionString, valueCountString, blank},
new String[]{MAGIC, VERSION_1, Integer.toString(appVersion), Integer.toString(valueCount), ""});
}
int lineCount = 0;
while (true) {
try {
readJournalLine(reader.readLine());
lineCount++;
} catch (UnexpectedJournalLineException ignored) {
// just continue and hope nothing breaks
} catch (EOFException e) {
break;
}
}
redundantOpCount = lineCount - lruEntries.size();
// If we ended on a truncated line, rebuild the journal before appending to it.
if (reader.hasUnterminatedLine()) {
rebuildJournal();
} else {
journalWriter = new BufferedWriter(
new OutputStreamWriter(new FileOutputStream(journalFile, true), StandardCharsets.UTF_8));
}
}
}
private void readJournalLine(String line) throws IOException {
int firstSpace = line.indexOf(' ');
if (firstSpace == -1) {
throw new UnexpectedJournalLineException(line);
}
int keyBegin = firstSpace + 1;
int secondSpace = line.indexOf(' ', keyBegin);
final String key;
if (secondSpace == -1) {
key = line.substring(keyBegin);
if (firstSpace == REMOVE.length() && line.startsWith(REMOVE)) {
lruEntries.remove(key);
return;
}
} else {
key = line.substring(keyBegin, secondSpace);
}
Entry entry = lruEntries.get(key);
if (entry == null) {
entry = new Entry(key);
lruEntries.put(key, entry);
}
if (secondSpace != -1 && firstSpace == CLEAN.length() && line.startsWith(CLEAN)) {
String[] parts = line.substring(secondSpace + 1).split(" ");
entry.readable = true;
entry.currentEditor = null;
entry.setLengths(parts);
} else if (secondSpace == -1 && firstSpace == DIRTY.length() && line.startsWith(DIRTY)) {
entry.currentEditor = new Editor(entry);
} else if (secondSpace == -1 && firstSpace == READ.length() && line.startsWith(READ)) {
// This work was already done by calling lruEntries.get().
} else {
throw new UnexpectedJournalLineException(line);
}
}
/**
* Computes the initial size and collects garbage as a part of opening the
* cache. Dirty entries are assumed to be inconsistent and will be deleted.
*/
private void processJournal() throws IOException {
deleteIfExists(journalFileTmp);
for (Iterator<Entry> i = lruEntries.values().iterator(); i.hasNext();) {
Entry entry = i.next();
if (entry.currentEditor == null) {
for (int t = 0; t < valueCount; t++) {
size += entry.lengths[t];
}
} else {
entry.currentEditor = null;
for (int t = 0; t < valueCount; t++) {
deleteIfExists(entry.getCleanFile(t));
deleteIfExists(entry.getDirtyFile(t));
}
i.remove();
}
}
}
/**
* Creates a new journal that omits redundant information. This replaces the
* current journal if it exists.
*/
private synchronized void rebuildJournal() throws IOException {
if (journalWriter != null) {
journalWriter.close();
}
try (Writer writer = new BufferedWriter(
new OutputStreamWriter(new FileOutputStream(journalFileTmp), StandardCharsets.UTF_8))) {
writer.write(MAGIC);
writer.write("\n");
writer.write(VERSION_1);
writer.write("\n");
writer.write(Integer.toString(appVersion));
writer.write("\n");
writer.write(Integer.toString(valueCount));
writer.write("\n");
writer.write("\n");
for (Entry entry : lruEntries.values()) {
if (entry.currentEditor != null) {
writer.write(DIRTY + ' ' + entry.key + '\n');
} else {
writer.write(CLEAN + ' ' + entry.key + entry.getLengths() + '\n');
}
}
}
if (journalFile.exists()) {
renameTo(journalFile, journalFileBackup, true);
}
renameTo(journalFileTmp, journalFile, false);
journalFileBackup.delete();
journalWriter = new BufferedWriter(
new OutputStreamWriter(new FileOutputStream(journalFile, true), StandardCharsets.UTF_8));
}
private static void deleteIfExists(File file) throws IOException {
if (file.exists() && !file.delete()) {
throw new IOException();
}
}
private static void renameTo(File from, File to, boolean deleteDestination) throws IOException {
if (deleteDestination) {
deleteIfExists(to);
}
if (!from.renameTo(to)) {
throw new IOException();
}
}
/**
* Returns a snapshot of the entry named {@code key}, or null if it doesn't
* exist is not currently readable. If a value is returned, it is moved to the
* head of the LRU queue.
*/
public Snapshot get(String key) throws IOException {
validateKey(key);
return getImpl(key);
}
/**
* Returns a snapshot of the entry named {@code key}, or null if it doesn't
* exist is not currently readable. If a value is returned, it is moved to the
* head of the LRU queue. Unsafe as it allows arbitrary directories to be
* accessed!
*/
public Snapshot getUnsafe(String key) throws IOException {
validateUnsafeKey(key);
return getImpl(key);
}
private synchronized Snapshot getImpl(String key) throws IOException {
checkNotClosed();
Entry entry = lruEntries.get(key);
if (entry == null) {
return null;
}
if (!entry.readable) {
return null;
}
// Open all streams eagerly to guarantee that we see a single published
// snapshot. If we opened streams lazily then the streams could come
// from different edits.
InputStream[] ins = new InputStream[valueCount];
try {
for (int i = 0; i < valueCount; i++) {
ins[i] = new FileInputStream(entry.getCleanFile(i));
}
} catch (FileNotFoundException e) {
// A file must have been deleted manually!
for (int i = 0; i < valueCount; i++) {
if (ins[i] != null) {
try {
ins[i].close();
} catch (IOException ignored) {
}
} else {
break;
}
}
return null;
}
redundantOpCount++;
journalWriter.append(READ + ' ').append(key).append(String.valueOf('\n'));
if (journalRebuildRequired()) {
executorService.submit(cleanupCallable);
}
return new Snapshot(key, entry.sequenceNumber, ins, entry.lengths);
}
/**
* Returns an editor for the entry named {@code key}, or null if another edit is
* in progress.
*/
public Editor edit(String key) throws IOException {
validateKey(key);
return editImpl(key, ANY_SEQUENCE_NUMBER);
}
/**
* Returns an editor for the entry named {@code key}, or null if another edit is
* in progress. Unsafe as it allows arbitrary directories to be accessed!
*/
public Editor editUnsafe(String key) throws IOException {
validateUnsafeKey(key);
return editImpl(key, ANY_SEQUENCE_NUMBER);
}
private synchronized Editor editImpl(String key, long expectedSequenceNumber) throws IOException {
checkNotClosed();
Entry entry = lruEntries.get(key);
if (expectedSequenceNumber != ANY_SEQUENCE_NUMBER
&& (entry == null || entry.sequenceNumber != expectedSequenceNumber)) {
return null; // Snapshot is stale.
}
if (entry == null) {
entry = new Entry(key);
lruEntries.put(key, entry);
} else if (entry.currentEditor != null) {
return null; // Another edit is in progress.
}
Editor editor = new Editor(entry);
entry.currentEditor = editor;
// Flush the journal before creating files to prevent file leaks.
journalWriter.write(DIRTY + ' ' + key + '\n');
journalWriter.flush();
return editor;
}
/** Returns the directory where this cache stores its data. */
public File getDirectory() {
return directory;
}
/**
* Returns the maximum number of bytes that this cache should use to store its
* data.
*/
public synchronized long getMaxSize() {
return maxSize;
}
/**
* Changes the maximum number of bytes the cache can store and queues a job to
* trim the existing store, if necessary.
*/
public synchronized void setMaxSize(long maxSize) {
this.maxSize = maxSize;
executorService.submit(cleanupCallable);
}
/**
* Returns the number of bytes currently being used to store the values in this
* cache. This may be greater than the max size if a background deletion is
* pending.
*/
public synchronized long size() {
return size;
}
private synchronized void completeEdit(Editor editor, boolean success) throws IOException {
Entry entry = editor.entry;
if (entry.currentEditor != editor) {
throw new IllegalStateException();
}
// If this edit is creating the entry for the first time, every index must have
// a value.
if (success && !entry.readable) {
for (int i = 0; i < valueCount; i++) {
if (!editor.written[i]) {
editor.abort();
throw new IllegalStateException("Newly created entry didn't create value for index " + i);
}
if (!entry.getDirtyFile(i).exists()) {
editor.abort();
return;
}
}
}
for (int i = 0; i < valueCount; i++) {
File dirty = entry.getDirtyFile(i);
if (success) {
if (dirty.exists()) {
File clean = entry.getCleanFile(i);
dirty.renameTo(clean);
long oldLength = entry.lengths[i];
long newLength = clean.length();
entry.lengths[i] = newLength;
size = size - oldLength + newLength;
}
} else {
deleteIfExists(dirty);
}
}
redundantOpCount++;
entry.currentEditor = null;
if (entry.readable | success) {
entry.readable = true;
journalWriter.write(CLEAN + ' ' + entry.key + entry.getLengths() + '\n');
if (success) {
entry.sequenceNumber = nextSequenceNumber++;
}
} else {
lruEntries.remove(entry.key);
journalWriter.write(REMOVE + ' ' + entry.key + '\n');
}
journalWriter.flush();
if (size > maxSize || journalRebuildRequired()) {
executorService.submit(cleanupCallable);
}
}
/**
* We only rebuild the journal when it will halve the size of the journal and
* eliminate at least 2000 ops.
*/
private boolean journalRebuildRequired() {
final int redundantOpCompactThreshold = 2000;
return redundantOpCount >= redundantOpCompactThreshold //
&& redundantOpCount >= lruEntries.size();
}
/**
* Drops the entry for {@code key} if it exists and can be removed. Entries
* actively being edited cannot be removed.
*
* @return true if an entry was removed.
*/
public boolean remove(String key) throws IOException {
validateKey(key);
return removeImpl(key);
}
/**
* Drops the entry for {@code key} if it exists and can be removed. Entries
* actively being edited cannot be removed. Unsafe as it allows arbitrary
* directories to be accessed!
*
* @return true if an entry was removed.
*/
public boolean removeUnsafe(String key) throws IOException {
validateUnsafeKey(key);
return removeImpl(key);
}
private synchronized boolean removeImpl(String key) throws IOException {
checkNotClosed();
Entry entry = lruEntries.get(key);
if (entry == null || entry.currentEditor != null) {
return false;
}
for (int i = 0; i < valueCount; i++) {
File file = entry.getCleanFile(i);
if (file.exists() && !file.delete()) {
throw new IOException("failed to delete " + file);
}
size -= entry.lengths[i];
entry.lengths[i] = 0;
}
redundantOpCount++;
journalWriter.append(REMOVE).append(' ').append(key).append('\n');
lruEntries.remove(key);
if (journalRebuildRequired()) {
executorService.submit(cleanupCallable);
}
return true;
}
/** Returns true if this cache has been closed. */
public synchronized boolean isClosed() {
return journalWriter == null;
}
private void checkNotClosed() {
if (journalWriter == null) {
throw new IllegalStateException("cache is closed");
}
}
/** Force buffered operations to the filesystem. */
public synchronized void flush() throws IOException {
checkNotClosed();
trimToSize();
journalWriter.flush();
}
/** Closes this cache. Stored values will remain on the filesystem. */
public synchronized void close() throws IOException {
if (journalWriter == null) {
return; // Already closed.
}
for (Entry entry : new ArrayList<>(lruEntries.values())) {
if (entry.currentEditor != null) {
entry.currentEditor.abort();
}
}
trimToSize();
journalWriter.close();
journalWriter = null;
}
private void trimToSize() throws IOException {
while (size > maxSize) {
Map.Entry<String, Entry> toEvict = lruEntries.entrySet().iterator().next();
removeImpl(toEvict.getKey());
}
}
/**
* Closes the cache and deletes all of its stored values. This will delete all
* files in the cache directory including files that weren't created by the
* cache.
*/
public void delete() throws IOException {
close();
FileUtils.deleteDirectory(directory);
}
private void validateKey(String key) {
Matcher matcher = LEGAL_KEY_PATTERN.matcher(key);
if (!matcher.matches()) {
throw new IllegalArgumentException("Keys must match regex " + LEGAL_KEY_PATTERN + ": \"" + key + "\"");
}
}
private void validateUnsafeKey(String key) {
Matcher matcher = UNSAFE_LEGAL_KEY_PATTERN.matcher(key);
if (!matcher.matches()) {
throw new IllegalArgumentException(
"Unsafe keys must match regex " + UNSAFE_LEGAL_KEY_PATTERN + ": \"" + key + "\"");
}
}
/** A snapshot of the values for an entry. */
public final class Snapshot implements Closeable {
private final String key;
private final long sequenceNumber;
private final InputStream[] ins;
private final long[] lengths;
private Snapshot(String key, long sequenceNumber, InputStream[] ins, long[] lengths) {
this.key = key;
this.sequenceNumber = sequenceNumber;
this.ins = ins;
this.lengths = lengths;
}
/**
* Returns an editor for this snapshot's entry, or null if either the entry has
* changed since this snapshot was created or if another edit is in progress.
*/
public Editor edit() throws IOException {
return DiskLruCache.this.editImpl(key, sequenceNumber);
}
/** Returns the unbuffered stream with the value for {@code index}. */
public InputStream getInputStream(int index) {
return ins[index];
}
/**
* Returns the string value for {@code index}. This consumes the InputStream!
*/
public String getString(int index) throws IOException {
try (InputStream in = getInputStream(index)) {
return IOUtils.toString(in, StandardCharsets.UTF_8);
}
}
/** Returns the byte length of the value for {@code index}. */
public long getLength(int index) {
return lengths[index];
}
public void close() {
for (InputStream in : ins) {
try {
in.close();
} catch (IOException ignored) {
}
}
}
}
private static final OutputStream NULL_OUTPUT_STREAM = new OutputStream() {
@Override
public void write(int b) {
// Eat all writes silently. Nom nom.
}
};
/** Edits the values for an entry. */
public final class Editor {
private final Entry entry;
private final boolean[] written;
private boolean hasErrors;
private boolean committed;
private Editor(Entry entry) {
this.entry = entry;
this.written = (entry.readable) ? null : new boolean[valueCount];
}
/**
* Returns an unbuffered input stream to read the last committed value, or null
* if no value has been committed.
*/
public synchronized InputStream newInputStream(int index) {
synchronized (DiskLruCache.this) {
if (entry.currentEditor != this) {
throw new IllegalStateException();
}
if (!entry.readable) {
return null;
}
try {
return new FileInputStream(entry.getCleanFile(index));
} catch (FileNotFoundException e) {
return null;
}
}
}
/**
* Returns a new unbuffered output stream to write the value at {@code index}.
* If the underlying output stream encounters errors when writing to the
* filesystem, this edit will be aborted when {@link #commit} is called. The
* returned output stream does not throw IOExceptions.
*/
public synchronized OutputStream newOutputStream(int index) {
if (index < 0 || index >= valueCount) {
throw new IllegalArgumentException("Expected index " + index + " to "
+ "be greater than 0 and less than the maximum value count " + "of " + valueCount);
}
synchronized (DiskLruCache.this) {
if (entry.currentEditor != this) {
throw new IllegalStateException();
}
if (!entry.readable) {
written[index] = true;
}
File dirtyFile = entry.getDirtyFile(index);
FileOutputStream outputStream;
try {
outputStream = new FileOutputStream(dirtyFile);
} catch (FileNotFoundException e) {
// Attempt to recreate the cache directory.
dirtyFile.getParentFile().mkdirs();
try {
outputStream = new FileOutputStream(dirtyFile);
} catch (FileNotFoundException e2) {
// We are unable to recover. Silently eat the writes.
return NULL_OUTPUT_STREAM;
}
}
return new FaultHidingOutputStream(outputStream);
}
}
/**
* Returns the last committed value as a string, or null if no value has been
* committed.
*/
public String getString(int index) throws IOException {
try (InputStream in = newInputStream(index)) {
return in != null ? IOUtils.toString(in, StandardCharsets.UTF_8) : null;
}
}
/**
* Write a string to the specified index.
*/
public void setString(int index, String value) throws IOException {
try (OutputStream out = newOutputStream(index)) {
IOUtils.write(value, out, StandardCharsets.UTF_8);
}
}
/**
* Commits this edit so it is visible to readers. This releases the edit lock so
* another edit may be started on the same key.
*/
public void commit() throws IOException {
if (hasErrors) {
completeEdit(this, false);
removeImpl(entry.key); // The previous entry is stale.
} else {
completeEdit(this, true);
}
committed = true;
}
/**
* Aborts this edit. This releases the edit lock so another edit may be started
* on the same key.
*/
public void abort() throws IOException {
completeEdit(this, false);
}
public long getLength(int index) {
return entry.getDirtyFile(index).length();
}
public void abortUnlessCommitted() {
if (!committed) {
try {
abort();
} catch (IOException ignored) {
}
}
}
private class FaultHidingOutputStream extends FilterOutputStream {
private FaultHidingOutputStream(OutputStream out) {
super(out);
}
@Override
public void write(int oneByte) {
try {
out.write(oneByte);
} catch (IOException e) {
hasErrors = true;
}
}
@Override
public void write(byte[] buffer, int offset, int length) {
try {
out.write(buffer, offset, length);
} catch (IOException e) {
hasErrors = true;
}
}
@Override
public void close() {
try {
out.close();
} catch (IOException e) {
hasErrors = true;
}
}
@Override
public void flush() {
try {
out.flush();
} catch (IOException e) {
hasErrors = true;
}
}
}
}
private final class Entry {
private final String key;
/** Lengths of this entry's files. */
private final long[] lengths;
/** True if this entry has ever been published. */
private boolean readable;
/** The ongoing edit or null if this entry is not being edited. */
private Editor currentEditor;
/** The sequence number of the most recently committed edit to this entry. */
private long sequenceNumber;
private Entry(String key) {
this.key = key;
this.lengths = new long[valueCount];
}
public String getLengths() {
StringBuilder result = new StringBuilder();
for (long size : lengths) {
result.append(' ').append(size);
}
return result.toString();
}
/** Set lengths using decimal numbers like "10123". */
private void setLengths(String[] strings) throws IOException {
if (strings.length != valueCount) {
throw invalidLengths(strings);
}
try {
for (int i = 0; i < strings.length; i++) {
lengths[i] = Long.parseLong(strings[i]);
}
} catch (NumberFormatException e) {
throw invalidLengths(strings);
}
}
private IOException invalidLengths(String[] strings) throws IOException {
throw new IOException("unexpected journal line: " + java.util.Arrays.toString(strings));
}
public File getCleanFile(int i) {
return new File(directory, key + "." + i);
}
public File getDirtyFile(int i) {
return new File(directory, key + "." + i + ".tmp");
}
}
}

View file

@ -1,25 +0,0 @@
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mdnet.cache;
import java.io.IOException;
import java.util.Arrays;
public class HeaderMismatchException extends IOException {
public HeaderMismatchException(String[] actual, String[] expected) {
super("expected header " + Arrays.toString(expected) + ", found " + Arrays.toString(actual));
}
}

View file

@ -1,215 +0,0 @@
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mdnet.cache;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
/**
* Buffers input from an {@link InputStream} for reading lines.
*
* <p>
* This class is used for buffered reading of lines. For purposes of this class,
* a line ends with "\n" or "\r\n". End of input is reported by throwing
* {@code EOFException}. Unterminated line at end of input is invalid and will
* be ignored, the caller may use {@code
* hasUnterminatedLine()} to detect it after catching the {@code EOFException}.
*
* <p>
* This class is intended for reading input that strictly consists of lines,
* such as line-based cache entries or cache journal. Unlike the
* {@link java.io.BufferedReader} which in conjunction with
* {@link java.io.InputStreamReader} provides similar functionality, this class
* uses different end-of-input reporting and a more restrictive definition of a
* line.
*
* <p>
* This class supports only charsets that encode '\r' and '\n' as a single byte
* with value 13 and 10, respectively, and the representation of no other
* character contains these values. We currently check in constructor that the
* charset is UTF-8.
*/
final class StrictLineReader implements Closeable {
private static final byte CR = (byte) '\r';
private static final byte LF = (byte) '\n';
private final InputStream in;
private final Charset charset;
/*
* Buffered data is stored in {@code buf}. As long as no exception occurs, 0 <=
* pos <= end and the data in the range [pos, end) is buffered for reading. At
* end of input, if there is an unterminated line, we set end == -1, otherwise
* end == pos. If the underlying {@code InputStream} throws an {@code
* IOException}, end may remain as either pos or -1.
*/
private byte[] buf;
private int pos;
private int end;
/**
* Constructs a new {@code LineReader} with the specified charset and the
* default capacity.
*
* @param in
* the {@code InputStream} to read data from.
* @param charset
* the charset used to decode data. Only UTF-8 is supported.
* @throws NullPointerException
* if {@code in} or {@code charset} is null.
* @throws IllegalArgumentException
* if the specified charset is not supported.
*/
public StrictLineReader(InputStream in, Charset charset) {
this(in, 8192, charset);
}
/**
* Constructs a new {@code LineReader} with the specified capacity and charset.
*
* @param in
* the {@code InputStream} to read data from.
* @param capacity
* the capacity of the buffer.
* @param charset
* the charset used to decode data. Only UTF-8 is supported.
* @throws NullPointerException
* if {@code in} or {@code charset} is null.
* @throws IllegalArgumentException
* if {@code capacity} is negative or zero or the specified charset
* is not supported.
*/
public StrictLineReader(InputStream in, int capacity, Charset charset) {
if (in == null || charset == null) {
throw new NullPointerException();
}
if (capacity < 0) {
throw new IllegalArgumentException("capacity <= 0");
}
if (!(charset.equals(StandardCharsets.UTF_8))) {
throw new IllegalArgumentException("Unsupported encoding");
}
this.in = in;
this.charset = charset;
buf = new byte[capacity];
}
/**
* Closes the reader by closing the underlying {@code InputStream} and marking
* this reader as closed.
*
* @throws IOException
* for errors when closing the underlying {@code InputStream}.
*/
public void close() throws IOException {
synchronized (in) {
if (buf != null) {
buf = null;
in.close();
}
}
}
/**
* Reads the next line. A line ends with {@code "\n"} or {@code "\r\n"}, this
* end of line marker is not included in the result.
*
* @return the next line from the input.
* @throws IOException
* for underlying {@code InputStream} errors.
* @throws EOFException
* for the end of source stream.
*/
public String readLine() throws IOException {
synchronized (in) {
if (buf == null) {
throw new IOException("LineReader is closed");
}
// Read more data if we are at the end of the buffered data.
// Though it's an error to read after an exception, we will let {@code
// fillBuf()}
// throw again if that happens; thus we need to handle end == -1 as well as end
// == pos.
if (pos >= end) {
fillBuf();
}
// Try to find LF in the buffered data and return the line if successful.
for (int i = pos; i != end; ++i) {
if (buf[i] == LF) {
int lineEnd = (i != pos && buf[i - 1] == CR) ? i - 1 : i;
String res = new String(buf, pos, lineEnd - pos, charset.name());
pos = i + 1;
return res;
}
}
// Let's anticipate up to 80 characters on top of those already read.
ByteArrayOutputStream out = new ByteArrayOutputStream(end - pos + 80) {
@Override
public String toString() {
int length = (count > 0 && buf[count - 1] == CR) ? count - 1 : count;
try {
return new String(buf, 0, length, charset.name());
} catch (UnsupportedEncodingException e) {
throw new AssertionError(e); // Since we control the charset this will never happen.
}
}
};
while (true) {
out.write(buf, pos, end - pos);
// Mark unterminated line in case fillBuf throws EOFException or IOException.
end = -1;
fillBuf();
// Try to find LF in the buffered data and return the line if successful.
for (int i = pos; i != end; ++i) {
if (buf[i] == LF) {
if (i != pos) {
out.write(buf, pos, i - pos);
}
pos = i + 1;
return out.toString();
}
}
}
}
}
public boolean hasUnterminatedLine() {
return end == -1;
}
/**
* Reads new input data into the buffer. Call only with pos == end or end == -1,
* depending on the desired outcome if the function throws.
*/
private void fillBuf() throws IOException {
int result = in.read(buf, 0, buf.length);
if (result == -1) {
throw new EOFException();
}
pos = 0;
end = result;
}
}

View file

@ -1,9 +0,0 @@
package mdnet.cache;
import java.io.IOException;
public class UnexpectedJournalLineException extends IOException {
public UnexpectedJournalLineException(String unexpectedLine) {
super("unexpected journal line: " + unexpectedLine);
}
}

View file

@ -16,18 +16,22 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>. along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/ */
package mdnet.base /* ktlint-disable no-wildcard-imports */
package mdnet
import com.fasterxml.jackson.databind.DeserializationFeature import com.fasterxml.jackson.databind.DeserializationFeature
import com.fasterxml.jackson.module.kotlin.KotlinModule import com.fasterxml.jackson.module.kotlin.KotlinModule
import java.net.InetAddress import mdnet.ServerHandlerJackson.auto
import mdnet.base.ServerHandlerJackson.auto import mdnet.logging.info
import mdnet.base.settings.DevSettings import mdnet.settings.DevSettings
import mdnet.base.settings.RemoteSettings import mdnet.settings.RemoteSettings
import mdnet.base.settings.ServerSettings import mdnet.settings.ServerSettings
import org.apache.http.client.config.RequestConfig import org.apache.hc.client5.http.impl.DefaultSchemePortResolver
import org.apache.http.impl.client.HttpClients import org.apache.hc.client5.http.impl.classic.HttpClients
import org.http4k.client.Apache4Client import org.apache.hc.client5.http.impl.routing.DefaultRoutePlanner
import org.apache.hc.core5.http.HttpHost
import org.apache.hc.core5.http.protocol.HttpContext
import org.http4k.client.ApacheClient
import org.http4k.core.Body import org.http4k.core.Body
import org.http4k.core.Method import org.http4k.core.Method
import org.http4k.core.Request import org.http4k.core.Request
@ -35,31 +39,37 @@ import org.http4k.format.ConfigurableJackson
import org.http4k.format.asConfigurable import org.http4k.format.asConfigurable
import org.http4k.format.withStandardMappings import org.http4k.format.withStandardMappings
import org.slf4j.LoggerFactory import org.slf4j.LoggerFactory
import java.net.InetAddress
object ServerHandlerJackson : ConfigurableJackson( object ServerHandlerJackson : ConfigurableJackson(
KotlinModule() KotlinModule()
.asConfigurable() .asConfigurable()
.withStandardMappings() .withStandardMappings()
.done() .done()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
) )
class ServerHandler(private val serverSettings: ServerSettings, private val devSettings: DevSettings, private val maxCacheSizeInMebibytes: Long) { class BackendApi(
private val client = Apache4Client(client = HttpClients.custom() private val serverSettings: ServerSettings,
.setDefaultRequestConfig( private val devSettings: DevSettings,
RequestConfig.custom() private val maxCacheSizeInMebibytes: Long
.apply { ) {
if (serverSettings.clientHostname != "0.0.0.0") { private val client = ApacheClient(
setLocalAddress(InetAddress.getByName(serverSettings.clientHostname)) client = HttpClients.custom()
.setRoutePlanner(
object : DefaultRoutePlanner(DefaultSchemePortResolver()) {
override fun determineLocalAddress(firstHop: HttpHost?, context: HttpContext?): InetAddress {
return InetAddress.getByName(serverSettings.hostname)
} }
} }
.build()) )
.build()) .build()
)
fun logoutFromControl(): Boolean { fun logoutFromControl(): Boolean {
LOGGER.info { "Disconnecting from the control server" } LOGGER.info { "Disconnecting from the control server" }
val params = mapOf<String, Any>( val params = mapOf<String, Any>(
"secret" to serverSettings.clientSecret "secret" to serverSettings.secret
) )
val request = STRING_ANY_MAP_LENS(params, Request(Method.POST, getServerAddress() + "stop")) val request = STRING_ANY_MAP_LENS(params, Request(Method.POST, getServerAddress() + "stop"))
@ -69,17 +79,17 @@ class ServerHandler(private val serverSettings: ServerSettings, private val devS
} }
private fun getPingParams(tlsCreatedAt: String? = null): Map<String, Any> = private fun getPingParams(tlsCreatedAt: String? = null): Map<String, Any> =
mapOf<String, Any>( mapOf(
"secret" to serverSettings.clientSecret, "secret" to serverSettings.secret,
"port" to let { "port" to let {
if (serverSettings.clientExternalPort != 0) { if (serverSettings.externalPort != 0) {
serverSettings.clientExternalPort serverSettings.externalPort
} else { } else {
serverSettings.clientPort serverSettings.port
} }
}, },
"disk_space" to maxCacheSizeInMebibytes * 1024 * 1024, "disk_space" to maxCacheSizeInMebibytes * 1024 * 1024,
"network_speed" to serverSettings.maxKilobitsPerSecond * 1000 / 8, "network_speed" to serverSettings.externalMaxKilobitsPerSecond * 1000 / 8,
"build_version" to Constants.CLIENT_BUILD "build_version" to Constants.CLIENT_BUILD
).let { ).let {
if (tlsCreatedAt != null) { if (tlsCreatedAt != null) {
@ -105,7 +115,13 @@ class ServerHandler(private val serverSettings: ServerSettings, private val devS
fun pingControl(old: RemoteSettings): RemoteSettings? { fun pingControl(old: RemoteSettings): RemoteSettings? {
LOGGER.info { "Pinging the control server" } LOGGER.info { "Pinging the control server" }
val request = STRING_ANY_MAP_LENS(getPingParams(old.tls!!.createdAt), Request(Method.POST, getServerAddress() + "ping")) val request = STRING_ANY_MAP_LENS(
getPingParams(old.tls!!.createdAt),
Request(
Method.POST,
getServerAddress() + "ping"
)
)
val response = client(request) val response = client(request)
return if (response.status.successful) { return if (response.status.successful) {
@ -116,17 +132,13 @@ class ServerHandler(private val serverSettings: ServerSettings, private val devS
} }
private fun getServerAddress(): String { private fun getServerAddress(): String {
return if (!devSettings.isDev) return devSettings.devUrl ?: SERVER_ADDRESS
SERVER_ADDRESS
else
SERVER_ADDRESS_DEV
} }
companion object { companion object {
private val LOGGER = LoggerFactory.getLogger(ServerHandler::class.java) private val LOGGER = LoggerFactory.getLogger(BackendApi::class.java)
private val STRING_ANY_MAP_LENS = Body.auto<Map<String, Any>>().toLens() private val STRING_ANY_MAP_LENS = Body.auto<Map<String, Any>>().toLens()
private val SERVER_SETTINGS_LENS = Body.auto<RemoteSettings>().toLens() private val SERVER_SETTINGS_LENS = Body.auto<RemoteSettings>().toLens()
private const val SERVER_ADDRESS = "https://api.mangadex.network/" private const val SERVER_ADDRESS = "https://api.mangadex.network/"
private const val SERVER_ADDRESS_DEV = "https://mangadex-test.net/"
} }
} }

View file

@ -16,7 +16,7 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>. along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/ */
package mdnet.base package mdnet
import java.time.Duration import java.time.Duration

View file

@ -16,14 +16,18 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>. along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/ */
package mdnet.base /* ktlint-disable no-wildcard-imports */
package mdnet
import ch.qos.logback.classic.LoggerContext import ch.qos.logback.classic.LoggerContext
import java.io.File import mdnet.logging.error
import kotlin.system.exitProcess
import mdnet.BuildInfo
import org.slf4j.LoggerFactory import org.slf4j.LoggerFactory
import picocli.CommandLine import picocli.CommandLine
import java.io.File
import java.lang.Exception
import java.nio.file.Path
import java.nio.file.Paths
import kotlin.system.exitProcess
object Main { object Main {
private val LOGGER = LoggerFactory.getLogger(Main::class.java) private val LOGGER = LoggerFactory.getLogger(Main::class.java)
@ -34,7 +38,7 @@ object Main {
} }
fun dieWithError(e: Throwable): Nothing { fun dieWithError(e: Throwable): Nothing {
LOGGER.error(e) { "Critical Error" } LOGGER.error(e) { "Critical Error" }
(LoggerFactory.getILoggerFactory() as LoggerContext).stop() (LoggerFactory.getILoggerFactory() as LoggerContext).stop()
exitProcess(1) exitProcess(1)
} }
@ -48,13 +52,13 @@ object Main {
} }
@CommandLine.Command(name = "java -jar <jar>", usageHelpWidth = 120, version = ["Client Version ${BuildInfo.VERSION} (Build ${Constants.CLIENT_BUILD})"]) @CommandLine.Command(name = "java -jar <jar>", usageHelpWidth = 120, version = ["Client Version ${BuildInfo.VERSION} (Build ${Constants.CLIENT_BUILD})"])
data class ClientArgs( class ClientArgs(
@field:CommandLine.Option(names = ["-s", "--settings"], defaultValue = "settings.json", paramLabel = "<settings>", description = ["the settings file (default: \${DEFAULT-VALUE})"]) @field:CommandLine.Option(names = ["-s", "--settings"], defaultValue = "settings.yaml", paramLabel = "<settings>", description = ["the settings file (default: \${DEFAULT-VALUE})"])
var settingsFile: File = File("settings.json"), var settingsFile: File = File("settings.yaml"),
@field:CommandLine.Option(names = ["-d", "--database"], defaultValue = "cache\${sys:file.separator}data.db", paramLabel = "<settings>", description = ["the database file (default: \${DEFAULT-VALUE})"]) @field:CommandLine.Option(names = ["-d", "--database"], defaultValue = ".\${sys:file.separator}metadata", paramLabel = "<settings>", description = ["the database file (default: \${DEFAULT-VALUE})"])
var databaseFile: File = File("cache${File.separator}data.db"), var databaseFile: File = File(".${File.separator}metadata"),
@field:CommandLine.Option(names = ["-c", "--cache"], defaultValue = "cache", paramLabel = "<settings>", description = ["the cache folder (default: \${DEFAULT-VALUE})"]) @field:CommandLine.Option(names = ["-c", "--cache"], defaultValue = "images", paramLabel = "<settings>", description = ["the cache folder (default: \${DEFAULT-VALUE})"])
var cacheFolder: File = File("cache"), var cacheFolder: Path = Paths.get("images"),
@field:CommandLine.Option(names = ["-h", "--help"], usageHelp = true, description = ["show this help message and exit"]) @field:CommandLine.Option(names = ["-h", "--help"], usageHelp = true, description = ["show this help message and exit"])
var helpRequested: Boolean = false, var helpRequested: Boolean = false,
@field:CommandLine.Option(names = ["-v", "--version"], versionHelp = true, description = ["show the version message and exit"]) @field:CommandLine.Option(names = ["-v", "--version"], versionHelp = true, description = ["show the version message and exit"])
@ -66,7 +70,8 @@ data class ClientArgs(
) )
println() println()
println("Copyright (c) 2020, MangaDex Network") println("Copyright (c) 2020, MangaDex Network")
println(""" println(
"""
Mangadex@Home is free software: you can redistribute it and/or modify Mangadex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or the Free Software Foundation, either version 3 of the License, or
@ -79,13 +84,26 @@ data class ClientArgs(
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with Mangadex@Home. If not, see <https://www.gnu.org/licenses/>. along with Mangadex@Home. If not, see <https://www.gnu.org/licenses/>.
""".trimIndent()) """.trimIndent()
)
val client = MangaDexClient(settingsFile, databaseFile, cacheFolder) val client = MangaDexClient(settingsFile, databaseFile, cacheFolder)
Runtime.getRuntime().addShutdownHook(Thread { val hook = Thread {
client.shutdown() client.shutdown()
(LoggerFactory.getILoggerFactory() as LoggerContext).stop() (LoggerFactory.getILoggerFactory() as LoggerContext).stop()
}) }
client.runLoop() Runtime.getRuntime().addShutdownHook(
hook
)
try {
client.runLoop()
} catch (e: Exception) {
Runtime.getRuntime().removeShutdownHook(
hook
)
hook.run()
throw e
}
} }
} }

View file

@ -17,46 +17,45 @@ You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>. along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/ */
/* ktlint-disable no-wildcard-imports */ /* ktlint-disable no-wildcard-imports */
package mdnet.base package mdnet
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.core.JsonProcessingException import com.fasterxml.jackson.core.JsonProcessingException
import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.databind.exc.UnrecognizedPropertyException import com.fasterxml.jackson.databind.exc.UnrecognizedPropertyException
import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
import com.fasterxml.jackson.module.kotlin.KotlinModule
import com.fasterxml.jackson.module.kotlin.readValue import com.fasterxml.jackson.module.kotlin.readValue
import mdnet.Main.dieWithError
import mdnet.cache.ImageStorage
import mdnet.logging.info
import mdnet.logging.warn
import mdnet.settings.ClientSettings
import org.ktorm.database.Database
import org.slf4j.LoggerFactory
import java.io.File import java.io.File
import java.io.FileReader import java.io.FileReader
import java.io.IOException import java.io.IOException
import java.nio.file.Path
import java.util.concurrent.CountDownLatch import java.util.concurrent.CountDownLatch
import java.util.concurrent.Executors import java.util.concurrent.Executors
import java.util.concurrent.ScheduledFuture import java.util.concurrent.ScheduledFuture
import java.util.concurrent.TimeUnit import java.util.concurrent.TimeUnit
import java.util.regex.Pattern import java.util.regex.Pattern
import mdnet.base.Main.dieWithError
import mdnet.base.server.getUiServer
import mdnet.base.settings.*
import mdnet.cache.DiskLruCache
import mdnet.cache.HeaderMismatchException
import org.http4k.server.Http4kServer
import org.jetbrains.exposed.sql.Database
import org.slf4j.LoggerFactory
// Exception class to handle when Client Settings have invalid values // Exception class to handle when Client Settings have invalid values
class ClientSettingsException(message: String) : Exception(message) class ClientSettingsException(message: String) : Exception(message)
class MangaDexClient(private val settingsFile: File, databaseFile: File, cacheFolder: File) { class MangaDexClient(private val settingsFile: File, databaseFile: File, cacheFolder: Path) {
// this must remain single-threaded because of how the state mechanism works // this must remain single-threaded because of how the state mechanism works
private val executor = Executors.newSingleThreadScheduledExecutor() private val executor = Executors.newSingleThreadScheduledExecutor()
private lateinit var scheduledFuture: ScheduledFuture<*> private var scheduledFuture: ScheduledFuture<*>? = null
private val database: Database private val database: Database
private val cache: DiskLruCache private val storage: ImageStorage
private var settings: ClientSettings private var settings: ClientSettings
// state that must only be accessed from the thread on the executor // state that must only be accessed from the thread on the executor
private var imageServer: ServerManager? = null private var imageServer: ServerManager? = null
private var webUi: Http4kServer? = null
// end protected state // end protected state
init { init {
@ -74,51 +73,30 @@ class MangaDexClient(private val settingsFile: File, databaseFile: File, cacheFo
LOGGER.info { "Client settings loaded: $settings" } LOGGER.info { "Client settings loaded: $settings" }
database = Database.connect("jdbc:sqlite:$databaseFile", "org.sqlite.JDBC") database = Database.connect("jdbc:h2:$databaseFile", "org.h2.Driver")
storage = ImageStorage(
try { maxSize = (settings.maxCacheSizeInMebibytes * 1024 * 1024 * 0.95).toLong(), /* MiB to bytes */
cache = DiskLruCache.open( cacheFolder,
cacheFolder, 1, 1, database
(settings.maxCacheSizeInMebibytes * 1024 * 1024 * 0.8).toLong() /* MiB to bytes */ )
)
} catch (e: HeaderMismatchException) {
LOGGER.warn { "Cache version may be outdated - remove if necessary" }
dieWithError(e)
} catch (e: IOException) {
dieWithError(e)
}
} }
fun runLoop() { fun runLoop() {
LOGGER.info { "Mangadex@Home Client initialized - starting normal operation." } LOGGER.info { "Mangadex@Home Client initialized - starting normal operation" }
scheduledFuture = executor.scheduleWithFixedDelay({
try {
// this blocks the executor, so no worries about concurrency
reloadClientSettings()
} catch (e: Exception) {
LOGGER.warn(e) { "Reload of ClientSettings failed" }
}
}, 1, 1, TimeUnit.MINUTES)
startImageServer() startImageServer()
startWebUi()
}
// Precondition: settings must be filled with up-to-date settings and `imageServer` must not be null scheduledFuture = executor.scheduleWithFixedDelay(
private fun startWebUi() { {
settings.webSettings?.let { webSettings -> try {
val imageServer = requireNotNull(imageServer) // this blocks the executor, so no worries about concurrency
reloadClientSettings()
if (webUi != null) { } catch (e: Exception) {
throw AssertionError() LOGGER.warn(e) { "Reload of ClientSettings failed" }
} }
LOGGER.info { "WebUI starting" } },
webUi = getUiServer(webSettings, imageServer.statistics, imageServer.statsMap).also { 1, 1, TimeUnit.MINUTES
it.start() )
}
LOGGER.info { "WebUI started" }
}
} }
// Precondition: settings must be filled with up-to-date settings // Precondition: settings must be filled with up-to-date settings
@ -127,7 +105,13 @@ class MangaDexClient(private val settingsFile: File, databaseFile: File, cacheFo
throw AssertionError() throw AssertionError()
} }
LOGGER.info { "Server manager starting" } LOGGER.info { "Server manager starting" }
imageServer = ServerManager(settings.serverSettings, settings.devSettings, settings.maxCacheSizeInMebibytes, cache, database).also { imageServer = ServerManager(
settings.serverSettings,
settings.devSettings,
settings.maxCacheSizeInMebibytes,
settings.metricsSettings,
storage
).also {
it.start() it.start()
} }
LOGGER.info { "Server manager started" } LOGGER.info { "Server manager started" }
@ -140,38 +124,28 @@ class MangaDexClient(private val settingsFile: File, databaseFile: File, cacheFo
LOGGER.info { "Server manager stopped" } LOGGER.info { "Server manager stopped" }
} }
private fun stopWebUi() {
LOGGER.info { "WebUI stopping" }
requireNotNull(webUi).stop()
webUi = null
LOGGER.info { "WebUI stopped" }
}
fun shutdown() { fun shutdown() {
LOGGER.info { "Mangadex@Home Client shutting down" } LOGGER.info { "Mangadex@Home Client shutting down" }
val latch = CountDownLatch(1) val latch = CountDownLatch(1)
scheduledFuture.cancel(false) scheduledFuture?.cancel(false)
executor.schedule({ executor.schedule(
if (webUi != null) { {
stopWebUi() if (imageServer != null) {
} stopImageServer()
if (imageServer != null) { }
stopImageServer()
}
try { storage.close()
cache.close() latch.countDown()
} catch (e: IOException) { },
LOGGER.error(e) { "Cache failed to close" } 0, TimeUnit.SECONDS
} )
latch.countDown()
}, 0, TimeUnit.SECONDS)
latch.await() latch.await()
executor.shutdown() executor.shutdown()
executor.awaitTermination(10, TimeUnit.SECONDS)
LOGGER.info { "Mangadex@Home Client has shut down" } LOGGER.info { "Mangadex@Home Client has shut down" }
} }
@ -190,30 +164,17 @@ class MangaDexClient(private val settingsFile: File, databaseFile: File, cacheFo
} }
LOGGER.info { "New settings loaded: $newSettings" } LOGGER.info { "New settings loaded: $newSettings" }
cache.maxSize = (newSettings.maxCacheSizeInMebibytes * 1024 * 1024 * 0.8).toLong() storage.maxSize = (newSettings.maxCacheSizeInMebibytes * 1024 * 1024 * 0.95).toLong()
val restartServer = newSettings.serverSettings != settings.serverSettings || val restartServer = newSettings.serverSettings != settings.serverSettings ||
newSettings.devSettings != settings.devSettings newSettings.devSettings != settings.devSettings ||
newSettings.metricsSettings != settings.metricsSettings
val stopWebUi = restartServer || newSettings.webSettings != settings.webSettings
val startWebUi = stopWebUi && newSettings.webSettings != null
if (stopWebUi) {
LOGGER.info { "Stopping WebUI to reload ClientSettings" }
if (webUi != null) {
stopWebUi()
}
}
if (restartServer) { if (restartServer) {
stopImageServer() stopImageServer()
startImageServer() startImageServer()
} }
if (startWebUi) {
startWebUi()
}
settings = newSettings settings = newSettings
} catch (e: UnrecognizedPropertyException) { } catch (e: UnrecognizedPropertyException) {
LOGGER.warn { "Settings file is invalid: '$e.propertyName' is not a valid setting" } LOGGER.warn { "Settings file is invalid: '$e.propertyName' is not a valid setting" }
@ -227,22 +188,22 @@ class MangaDexClient(private val settingsFile: File, databaseFile: File, cacheFo
} }
private fun validateSettings(settings: ClientSettings) { private fun validateSettings(settings: ClientSettings) {
if (settings.maxCacheSizeInMebibytes < 1024) { // if (settings.maxCacheSizeInMebibytes < 1024) {
throw ClientSettingsException("Config Error: Invalid max cache size, must be >= 1024 MiB (1GiB)") // throw ClientSettingsException("Config Error: Invalid max cache size, must be >= 1024 MiB (1GiB)")
} // }
fun isSecretValid(clientSecret: String): Boolean { fun isSecretValid(clientSecret: String): Boolean {
return Pattern.matches("^[a-zA-Z0-9]{$CLIENT_KEY_LENGTH}$", clientSecret) return Pattern.matches("^[a-zA-Z0-9]{$CLIENT_KEY_LENGTH}$", clientSecret)
} }
settings.serverSettings.let { settings.serverSettings.let {
if (!isSecretValid(it.clientSecret)) { if (!isSecretValid(it.secret)) {
throw ClientSettingsException("Config Error: API Secret is invalid, must be 52 alphanumeric characters") throw ClientSettingsException("Config Error: API Secret is invalid, must be 52 alphanumeric characters")
} }
if (it.clientPort == 0) { if (it.port == 0) {
throw ClientSettingsException("Config Error: Invalid port number") throw ClientSettingsException("Config Error: Invalid port number")
} }
if (it.clientPort in Constants.RESTRICTED_PORTS) { if (it.port in Constants.RESTRICTED_PORTS) {
throw ClientSettingsException("Config Error: Unsafe port number") throw ClientSettingsException("Config Error: Unsafe port number")
} }
if (it.threads < 4) { if (it.threads < 4) {
@ -258,11 +219,6 @@ class MangaDexClient(private val settingsFile: File, databaseFile: File, cacheFo
throw ClientSettingsException("Config Error: Graceful shutdown wait must be >= 15") throw ClientSettingsException("Config Error: Graceful shutdown wait must be >= 15")
} }
} }
settings.webSettings?.let {
if (it.uiPort == 0) {
throw ClientSettingsException("Config Error: Invalid UI port number")
}
}
} }
private fun readClientSettings(): ClientSettings { private fun readClientSettings(): ClientSettings {
@ -272,6 +228,6 @@ class MangaDexClient(private val settingsFile: File, databaseFile: File, cacheFo
companion object { companion object {
private const val CLIENT_KEY_LENGTH = 52 private const val CLIENT_KEY_LENGTH = 52
private val LOGGER = LoggerFactory.getLogger(MangaDexClient::class.java) private val LOGGER = LoggerFactory.getLogger(MangaDexClient::class.java)
private val JACKSON: ObjectMapper = jacksonObjectMapper().configure(JsonParser.Feature.ALLOW_COMMENTS, true) private val JACKSON: ObjectMapper = ObjectMapper(YAMLFactory()).registerModule(KotlinModule())
} }
} }

View file

@ -0,0 +1,311 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
/* ktlint-disable no-wildcard-imports */
package mdnet
import io.micrometer.prometheus.PrometheusConfig
import io.micrometer.prometheus.PrometheusMeterRegistry
import mdnet.cache.ImageStorage
import mdnet.data.Statistics
import mdnet.logging.error
import mdnet.logging.info
import mdnet.logging.warn
import mdnet.metrics.DefaultMicrometerMetrics
import mdnet.server.getServer
import mdnet.settings.DevSettings
import mdnet.settings.MetricsSettings
import mdnet.settings.RemoteSettings
import mdnet.settings.ServerSettings
import org.http4k.server.Http4kServer
import org.slf4j.LoggerFactory
import java.util.concurrent.CountDownLatch
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicReference
sealed class State
// server is not running
data class Uninitialized(val serverSettings: ServerSettings, val devSettings: DevSettings) : State()
// server has shut down
object Shutdown : State()
// server is in the process of stopping
data class GracefulStop(
val lastRunning: Running,
val counts: Int = 0,
val nextState: State = Uninitialized(lastRunning.serverSettings, lastRunning.devSettings),
val action: () -> Unit = {}
) : State()
// server is currently running
data class Running(val server: Http4kServer, val settings: RemoteSettings, val serverSettings: ServerSettings, val devSettings: DevSettings) : State()
class ServerManager(
serverSettings: ServerSettings,
devSettings: DevSettings,
maxCacheSizeInMebibytes: Long,
private val metricsSettings: MetricsSettings,
private val storage: ImageStorage
) {
// this must remain single-threaded because of how the state mechanism works
private val executor = Executors.newSingleThreadScheduledExecutor()
private val registry = PrometheusMeterRegistry(PrometheusConfig.DEFAULT)
private val statistics: AtomicReference<Statistics> = AtomicReference(
Statistics()
)
// state that must only be accessed from the thread on the executor
private var state: State
private var backendApi: BackendApi
// end protected state
init {
state = Uninitialized(serverSettings, devSettings)
backendApi = BackendApi(serverSettings, devSettings, maxCacheSizeInMebibytes)
}
fun start() {
LOGGER.info { "Image server starting" }
DefaultMicrometerMetrics(registry, storage.cacheDirectory)
loginAndStartServer()
var lastBytesSent = statistics.get().bytesSent
executor.scheduleAtFixedRate(
{
try {
lastBytesSent = statistics.get().bytesSent
val state = this.state
if (state is GracefulStop && state.nextState != Shutdown) {
LOGGER.info { "Aborting graceful shutdown started due to hourly bandwidth limit" }
this.state = state.lastRunning
}
if (state is Uninitialized) {
LOGGER.info { "Restarting server stopped due to hourly bandwidth limit" }
loginAndStartServer()
}
} catch (e: Exception) {
LOGGER.warn(e) { "Hourly bandwidth check failed" }
}
},
1, 1, TimeUnit.HOURS
)
var lastRequests = 0L
executor.scheduleAtFixedRate(
{
try {
val state = this.state
if (state is GracefulStop) {
val timesToWait = state.lastRunning.serverSettings.gracefulShutdownWaitSeconds / 5
val requestCounters = registry.find("http.server.request.latency").timers()
println(requestCounters)
val curRequests = requestCounters.map { it.count() }.sum()
val noRequests = lastRequests >= curRequests
when {
state.counts == 0 -> {
LOGGER.info { "Starting graceful stop" }
logout()
lastRequests = curRequests
this.state = state.copy(counts = state.counts + 1)
}
state.counts == timesToWait || noRequests -> {
if (noRequests) {
LOGGER.info { "No requests received, stopping" }
} else {
LOGGER.info { "Max tries attempted (${state.counts} out of $timesToWait), shutting down" }
}
stopServer(state.nextState)
state.action()
}
else -> {
LOGGER.info {
"Waiting another 5 seconds for graceful stop (${state.counts} out of $timesToWait)"
}
lastRequests = curRequests
this.state = state.copy(counts = state.counts + 1)
}
}
}
} catch (e: Exception) {
LOGGER.error(e) { "Main loop failed" }
}
},
5, 5, TimeUnit.SECONDS
)
executor.scheduleWithFixedDelay(
{
try {
val state = this.state
if (state is Running) {
val currentBytesSent = statistics.get().bytesSent - lastBytesSent
if (state.serverSettings.maxMebibytesPerHour != 0L && state.serverSettings.maxMebibytesPerHour * 1024 * 1024 /* MiB to bytes */ < currentBytesSent) {
LOGGER.info { "Stopping image server as hourly bandwidth limit reached" }
this.state = GracefulStop(lastRunning = state)
} else {
pingControl()
}
}
} catch (e: Exception) {
LOGGER.warn(e) { "Bandwidth shutdown checker/ping failed" }
}
},
45, 45, TimeUnit.SECONDS
)
LOGGER.info { "Image server has started" }
}
private fun pingControl() {
// this is currentSettings, other is newSettings
// if tls is null that means same as previous ping
fun RemoteSettings.logicalEqual(other: RemoteSettings): Boolean {
val test = if (other.tls != null) {
other
} else {
other.copy(tls = this.tls)
}
return this == test
}
val state = this.state as Running
val newSettings = backendApi.pingControl(state.settings)
if (newSettings != null) {
LOGGER.info { "Server settings received: $newSettings" }
warmBasedOnSettings(newSettings)
if (!state.settings.logicalEqual(newSettings)) {
LOGGER.info { "Doing internal restart of HTTP server to refresh settings" }
this.state = GracefulStop(lastRunning = state) {
loginAndStartServer()
}
}
} else {
LOGGER.info { "Server ping failed - ignoring" }
}
}
private fun loginAndStartServer() {
val state = this.state as Uninitialized
val remoteSettings = backendApi.loginToControl()
?: throw RuntimeException("Failed to get a login response from server")
LOGGER.info { "Server settings received: $remoteSettings" }
warmBasedOnSettings(remoteSettings)
val server = getServer(
storage,
remoteSettings,
state.serverSettings,
statistics,
metricsSettings,
registry
).start()
this.state = Running(server, remoteSettings, state.serverSettings, state.devSettings)
LOGGER.info { "Internal HTTP server was successfully started" }
}
private fun logout() {
backendApi.logoutFromControl()
}
private fun stopServer(nextState: State) {
val state = this.state.let {
when (it) {
is Running ->
it
is GracefulStop ->
it.lastRunning
else ->
throw AssertionError()
}
}
LOGGER.info { "Image server stopping" }
state.server.stop()
LOGGER.info { "Image server has stopped" }
this.state = nextState
}
fun shutdown() {
LOGGER.info { "Image server shutting down" }
val latch = CountDownLatch(1)
executor.schedule(
{
val state = this.state
if (state is Running) {
this.state = GracefulStop(state, nextState = Shutdown) {
latch.countDown()
}
} else if (state is GracefulStop) {
this.state = state.copy(nextState = Shutdown) {
latch.countDown()
}
} else if (state is Uninitialized || state is Shutdown) {
this.state = Shutdown
latch.countDown()
}
},
0, TimeUnit.SECONDS
)
latch.await()
executor.shutdown()
LOGGER.info { "Image server has shut down" }
}
private fun warmBasedOnSettings(settings: RemoteSettings) {
if (settings.latestBuild > Constants.CLIENT_BUILD) {
LOGGER.warn {
"Outdated build detected! Latest: ${settings.latestBuild}, Current: ${Constants.CLIENT_BUILD}"
}
}
if (settings.paused) {
LOGGER.warn {
"Your client is paused by the backend and will not serve any images!"
}
}
if (settings.compromised) {
LOGGER.warn {
"Your client secret is compromised and it will not serve any images!"
}
}
}
companion object {
private val LOGGER = LoggerFactory.getLogger(ServerManager::class.java)
}
}

View file

@ -1,282 +0,0 @@
package mdnet.base
import com.fasterxml.jackson.core.JsonProcessingException
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.databind.SerializationFeature
import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper
import com.fasterxml.jackson.module.kotlin.readValue
import java.lang.RuntimeException
import java.time.Instant
import java.util.Collections
import java.util.LinkedHashMap
import java.util.concurrent.CountDownLatch
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.atomic.AtomicReference
import mdnet.base.data.Statistics
import mdnet.base.server.getServer
import mdnet.base.settings.DevSettings
import mdnet.base.settings.RemoteSettings
import mdnet.base.settings.ServerSettings
import mdnet.cache.DiskLruCache
import org.http4k.server.Http4kServer
import org.jetbrains.exposed.sql.Database
import org.slf4j.LoggerFactory
sealed class State
// server is not running
data class Uninitialized(val serverSettings: ServerSettings, val devSettings: DevSettings) : State()
// server has shut down
object Shutdown : State()
// server is in the process of stopping
data class GracefulStop(val lastRunning: Running, val counts: Int = 0, val nextState: State = Uninitialized(lastRunning.serverSettings, lastRunning.devSettings), val action: () -> Unit = {}) : State()
// server is currently running
data class Running(val server: Http4kServer, val settings: RemoteSettings, val serverSettings: ServerSettings, val devSettings: DevSettings) : State()
class ServerManager(serverSettings: ServerSettings, devSettings: DevSettings, maxCacheSizeInMebibytes: Long, private val cache: DiskLruCache, private val database: Database) {
// this must remain single-threaded because of how the state mechanism works
private val executor = Executors.newSingleThreadScheduledExecutor()
// state that must only be accessed from the thread on the executor
private var state: State
private var serverHandler: ServerHandler
// end protected state
val statsMap: MutableMap<Instant, Statistics> = Collections
.synchronizedMap(object : LinkedHashMap<Instant, Statistics>(240) {
override fun removeEldestEntry(eldest: Map.Entry<Instant, Statistics>): Boolean {
return this.size > 240
}
})
val statistics: AtomicReference<Statistics> = AtomicReference(
Statistics()
)
private val isHandled: AtomicBoolean = AtomicBoolean(false)
init {
state = Uninitialized(serverSettings, devSettings)
serverHandler = ServerHandler(serverSettings, devSettings, maxCacheSizeInMebibytes)
cache.get("statistics")?.use {
try {
statistics.set(JACKSON.readValue<Statistics>(it.getInputStream(0)))
} catch (_: JsonProcessingException) {
cache.remove("statistics")
}
}
}
fun start() {
LOGGER.info { "Image server starting" }
loginAndStartServer()
statsMap[Instant.now()] = statistics.get()
executor.scheduleAtFixedRate({
try {
if (state is Running || state is GracefulStop || state is Uninitialized) {
statistics.updateAndGet {
it.copy(bytesOnDisk = cache.size())
}
statsMap[Instant.now()] = statistics.get()
val editor = cache.edit("statistics")
if (editor != null) {
JACKSON.writeValue(editor.newOutputStream(0), statistics.get())
editor.commit()
}
}
} catch (e: Exception) {
LOGGER.warn(e) { "Statistics update failed" }
}
}, 15, 15, TimeUnit.SECONDS)
var lastBytesSent = statistics.get().bytesSent
executor.scheduleAtFixedRate({
try {
lastBytesSent = statistics.get().bytesSent
val state = this.state
if (state is GracefulStop && state.nextState != Shutdown) {
LOGGER.info { "Aborting graceful shutdown started due to hourly bandwidth limit" }
this.state = state.lastRunning
}
if (state is Uninitialized) {
LOGGER.info { "Restarting server stopped due to hourly bandwidth limit" }
loginAndStartServer()
}
} catch (e: Exception) {
LOGGER.warn(e) { "Hourly bandwidth check failed" }
}
}, 1, 1, TimeUnit.HOURS)
executor.scheduleAtFixedRate({
try {
val state = this.state
if (state is GracefulStop) {
val timesToWait = state.lastRunning.serverSettings.gracefulShutdownWaitSeconds / 15
when {
state.counts == 0 -> {
LOGGER.info { "Starting graceful stop" }
logout()
isHandled.set(false)
this.state = state.copy(counts = state.counts + 1)
}
state.counts == timesToWait || !isHandled.get() -> {
if (!isHandled.get()) {
LOGGER.info { "No requests received, stopping" }
} else {
LOGGER.info { "Max tries attempted (${state.counts} out of $timesToWait), shutting down" }
}
stopServer(state.nextState)
state.action()
}
else -> {
LOGGER.info {
"Waiting another 15 seconds for graceful stop (${state.counts} out of $timesToWait)"
}
isHandled.set(false)
this.state = state.copy(counts = state.counts + 1)
}
}
}
} catch (e: Exception) {
LOGGER.error(e) { "Main loop failed" }
}
}, 15, 15, TimeUnit.SECONDS)
executor.scheduleWithFixedDelay({
try {
val state = this.state
if (state is Running) {
val currentBytesSent = statistics.get().bytesSent - lastBytesSent
if (state.serverSettings.maxMebibytesPerHour != 0L && state.serverSettings.maxMebibytesPerHour * 1024 * 1024 /* MiB to bytes */ < currentBytesSent) {
LOGGER.info { "Stopping image server as hourly bandwidth limit reached" }
this.state = GracefulStop(lastRunning = state)
} else {
pingControl()
}
}
} catch (e: Exception) {
LOGGER.warn(e) { "Bandwidth shutdown checker/ping failed" }
}
}, 45, 45, TimeUnit.SECONDS)
LOGGER.info { "Image server has started" }
}
private fun pingControl() {
// this is currentSettings, other is newSettings
// if tls is null that means same as previous ping
fun RemoteSettings.logicalEqual(other: RemoteSettings): Boolean {
val test = if (other.tls != null) {
other
} else {
other.copy(tls = this.tls)
}
return this == test
}
val state = this.state as Running
val newSettings = serverHandler.pingControl(state.settings)
if (newSettings != null) {
LOGGER.info { "Server settings received: $newSettings" }
if (newSettings.latestBuild > Constants.CLIENT_BUILD) {
LOGGER.warn {
"Outdated build detected! Latest: ${newSettings.latestBuild}, Current: ${Constants.CLIENT_BUILD}"
}
}
if (!state.settings.logicalEqual(newSettings)) {
// certificates or upstream url must have changed, restart webserver
LOGGER.info { "Doing internal restart of HTTP server to refresh settings" }
this.state = GracefulStop(lastRunning = state) {
loginAndStartServer()
}
}
} else {
LOGGER.info { "Server ping failed - ignoring" }
}
}
private fun loginAndStartServer() {
val state = this.state as Uninitialized
val remoteSettings = serverHandler.loginToControl()
?: throw RuntimeException("Failed to get a login response from server")
val server = getServer(cache, database, remoteSettings, state.serverSettings, statistics, isHandled).start()
if (remoteSettings.latestBuild > Constants.CLIENT_BUILD) {
LOGGER.warn {
"Outdated build detected! Latest: ${remoteSettings.latestBuild}, Current: ${Constants.CLIENT_BUILD}"
}
}
this.state = Running(server, remoteSettings, state.serverSettings, state.devSettings)
LOGGER.info { "Internal HTTP server was successfully started" }
}
private fun logout() {
serverHandler.logoutFromControl()
}
private fun stopServer(nextState: State) {
val state = this.state.let {
when (it) {
is Running ->
it
is GracefulStop ->
it.lastRunning
else ->
throw AssertionError()
}
}
LOGGER.info { "Image server stopping" }
state.server.stop()
LOGGER.info { "Image server has stopped" }
this.state = nextState
}
fun shutdown() {
LOGGER.info { "Image server shutting down" }
val latch = CountDownLatch(1)
executor.schedule({
val state = this.state
if (state is Running) {
this.state = GracefulStop(state, nextState = Shutdown) {
latch.countDown()
}
} else if (state is GracefulStop) {
this.state = state.copy(nextState = Shutdown) {
latch.countDown()
}
} else if (state is Uninitialized || state is Shutdown) {
this.state = Shutdown
latch.countDown()
}
}, 0, TimeUnit.SECONDS)
latch.await()
executor.shutdown()
LOGGER.info { "Image server has shut down" }
}
companion object {
private val LOGGER = LoggerFactory.getLogger(ServerManager::class.java)
private val JACKSON: ObjectMapper = jacksonObjectMapper().enable(SerializationFeature.INDENT_OUTPUT)
}
}

View file

@ -1,76 +0,0 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
package mdnet.base.netty
import io.netty.bootstrap.ServerBootstrap
import io.netty.channel.ChannelFactory
import io.netty.channel.ChannelFuture
import io.netty.channel.ChannelInitializer
import io.netty.channel.ChannelOption
import io.netty.channel.ServerChannel
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.SocketChannel
import io.netty.channel.socket.nio.NioServerSocketChannel
import io.netty.handler.codec.http.HttpObjectAggregator
import io.netty.handler.codec.http.HttpServerCodec
import io.netty.handler.codec.http.HttpServerKeepAliveHandler
import io.netty.handler.stream.ChunkedWriteHandler
import java.net.InetSocketAddress
import org.http4k.core.HttpHandler
import org.http4k.server.Http4kChannelHandler
import org.http4k.server.Http4kServer
import org.http4k.server.ServerConfig
class WebUiNetty(private val hostname: String, private val port: Int) : ServerConfig {
override fun toServer(httpHandler: HttpHandler): Http4kServer = object : Http4kServer {
private val masterGroup = NioEventLoopGroup()
private val workerGroup = NioEventLoopGroup()
private lateinit var closeFuture: ChannelFuture
private lateinit var address: InetSocketAddress
override fun start(): Http4kServer = apply {
val bootstrap = ServerBootstrap()
bootstrap.group(masterGroup, workerGroup)
.channelFactory(ChannelFactory<ServerChannel> { NioServerSocketChannel() })
.childHandler(object : ChannelInitializer<SocketChannel>() {
public override fun initChannel(ch: SocketChannel) {
ch.pipeline().addLast("codec", HttpServerCodec())
ch.pipeline().addLast("keepAlive", HttpServerKeepAliveHandler())
ch.pipeline().addLast("aggregator", HttpObjectAggregator(Int.MAX_VALUE))
ch.pipeline().addLast("streamer", ChunkedWriteHandler())
ch.pipeline().addLast("handler", Http4kChannelHandler(httpHandler))
}
})
.option(ChannelOption.SO_BACKLOG, 1000)
.childOption(ChannelOption.SO_KEEPALIVE, true)
val channel = bootstrap.bind(InetSocketAddress(hostname, port)).sync().channel()
address = channel.localAddress() as InetSocketAddress
closeFuture = channel.closeFuture()
}
override fun stop() = apply {
closeFuture.cancel(false)
workerGroup.shutdownGracefully()
masterGroup.shutdownGracefully()
}
override fun port(): Int = address.port
}
}

View file

@ -1,424 +0,0 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
/* ktlint-disable no-wildcard-imports */
package mdnet.base.server
import com.fasterxml.jackson.core.JsonProcessingException
import com.fasterxml.jackson.databind.DeserializationFeature
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule
import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper
import com.fasterxml.jackson.module.kotlin.readValue
import java.io.BufferedInputStream
import java.io.BufferedOutputStream
import java.io.File
import java.io.InputStream
import java.lang.IllegalArgumentException
import java.time.Clock
import java.time.OffsetDateTime
import java.util.*
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.atomic.AtomicReference
import javax.crypto.Cipher
import javax.crypto.CipherInputStream
import javax.crypto.CipherOutputStream
import mdnet.base.Constants
import mdnet.base.data.ImageData
import mdnet.base.data.ImageDatum
import mdnet.base.data.Statistics
import mdnet.base.data.Token
import mdnet.base.info
import mdnet.base.netty.Netty
import mdnet.base.settings.RemoteSettings
import mdnet.base.settings.ServerSettings
import mdnet.base.trace
import mdnet.base.warn
import mdnet.cache.CachingInputStream
import mdnet.cache.DiskLruCache
import mdnet.security.TweetNaclFast
import org.apache.http.client.config.CookieSpecs
import org.apache.http.client.config.RequestConfig
import org.apache.http.impl.client.HttpClients
import org.http4k.client.Apache4Client
import org.http4k.core.*
import org.http4k.filter.CachingFilters
import org.http4k.lens.LensFailure
import org.http4k.lens.Path
import org.http4k.routing.bind
import org.http4k.routing.routes
import org.http4k.server.Http4kServer
import org.http4k.server.asServer
import org.jetbrains.exposed.exceptions.ExposedSQLException
import org.jetbrains.exposed.sql.Database
import org.jetbrains.exposed.sql.SchemaUtils
import org.jetbrains.exposed.sql.transactions.transaction
import org.slf4j.LoggerFactory
private val LOGGER = LoggerFactory.getLogger(ImageServer::class.java)
class ImageServer(
private val cache: DiskLruCache,
private val database: Database,
private val statistics: AtomicReference<Statistics>,
private val remoteSettings: RemoteSettings,
private val client: HttpHandler
) {
init {
synchronized(database) {
transaction(database) {
SchemaUtils.create(ImageData)
}
}
}
private val executor = Executors.newCachedThreadPool()
fun handler(dataSaver: Boolean, tokenized: Boolean = false): HttpHandler {
val box = TweetNaclFast.SecretBox(remoteSettings.tokenKey)
return baseHandler().then { request ->
val chapterHash = Path.of("chapterHash")(request)
val fileName = Path.of("fileName")(request)
val sanitizedUri = if (dataSaver) {
"/data-saver"
} else {
"/data"
} + "/$chapterHash/$fileName"
if (!request.referrerMatches(ALLOWED_REFERER_DOMAINS)) {
LOGGER.info { "Request for $sanitizedUri rejected due to non-allowed referrer ${request.header("Referer")}" }
return@then Response(Status.FORBIDDEN)
}
if ((tokenized || remoteSettings.forceTokens) && !isTestImage(chapterHash)) {
val tokenArr = try {
val toDecode = try {
Path.of("token")(request)
} catch (e: LensFailure) {
LOGGER.info(e) { "Request for $sanitizedUri rejected for missing token" }
return@then Response(Status.FORBIDDEN).body("Token is missing")
}
Base64.getUrlDecoder().decode(toDecode)
} catch (e: IllegalArgumentException) {
LOGGER.info(e) { "Request for $sanitizedUri rejected for non-base64 token" }
return@then Response(Status.FORBIDDEN).body("Token is invalid base64")
}
if (tokenArr.size < 24) {
LOGGER.info { "Request for $sanitizedUri rejected for invalid token" }
return@then Response(Status.FORBIDDEN)
}
val token = try {
JACKSON.readValue<Token>(
box.open(tokenArr.sliceArray(24 until tokenArr.size), tokenArr.sliceArray(0 until 24)).apply {
if (this == null) {
LOGGER.info { "Request for $sanitizedUri rejected for invalid token" }
return@then Response(Status.FORBIDDEN)
}
}
)
} catch (e: JsonProcessingException) {
LOGGER.info(e) { "Request for $sanitizedUri rejected for invalid token" }
return@then Response(Status.FORBIDDEN).body("Token is invalid")
}
if (OffsetDateTime.now().isAfter(token.expires)) {
LOGGER.info { "Request for $sanitizedUri rejected for expired token" }
return@then Response(Status.GONE).body("Token has expired")
}
if (token.hash != chapterHash) {
LOGGER.info { "Request for $sanitizedUri rejected for inapplicable token" }
return@then Response(Status.FORBIDDEN).body("Token is inapplicable for the image")
}
}
statistics.getAndUpdate {
it.copy(requestsServed = it.requestsServed + 1)
}
val rc4Bytes = if (dataSaver) {
md5Bytes("saver$chapterHash.$fileName")
} else {
md5Bytes("$chapterHash.$fileName")
}
val imageId = printHexString(rc4Bytes)
val snapshot = cache.getUnsafe(imageId.toCacheId())
val imageDatum = synchronized(database) {
transaction(database) {
ImageDatum.findById(imageId)
}
}
if (snapshot != null && imageDatum != null && imageDatum.contentType.isImageMimetype()) {
request.handleCacheHit(sanitizedUri, getRc4(rc4Bytes), snapshot, imageDatum)
} else {
if (snapshot != null) {
snapshot.close()
LOGGER.warn { "Removing broken cache file for $sanitizedUri" }
cache.removeUnsafe(imageId.toCacheId())
cache.flush()
}
request.handleCacheMiss(sanitizedUri, getRc4(rc4Bytes), imageId, imageDatum)
}
}
}
/**
* Filters referrers based on passed (sub)domains. Ignores `scheme` (protocol) in URL
*/
private fun Request.referrerMatches(allowedDomains: List<String>, permitBlank: Boolean = true): Boolean {
val referer = this.header("Referer") ?: return permitBlank // Referrer was misspelled as "Referer" and now we're stuck with it -_-
if (referer == "") return permitBlank
return allowedDomains.any {
referer.substringAfter("//") // Ignore scheme
.substringBefore("/") // Ignore path
.substringBefore(":")
.endsWith(it)
}
}
private fun Request.handleCacheHit(sanitizedUri: String, cipher: Cipher, snapshot: DiskLruCache.Snapshot, imageDatum: ImageDatum): Response {
// our files never change, so it's safe to use the browser cache
return if (this.header("If-Modified-Since") != null) {
statistics.getAndUpdate {
it.copy(browserCached = it.browserCached + 1)
}
LOGGER.info { "Request for $sanitizedUri cached by browser" }
val lastModified = imageDatum.lastModified
snapshot.close()
Response(Status.NOT_MODIFIED)
.header("Last-Modified", lastModified)
} else {
statistics.getAndUpdate {
it.copy(cacheHits = it.cacheHits + 1)
}
LOGGER.info { "Request for $sanitizedUri hit cache" }
respondWithImage(
CipherInputStream(BufferedInputStream(snapshot.getInputStream(0)), cipher),
snapshot.getLength(0).toString(), imageDatum.contentType, imageDatum.lastModified,
true
)
}
}
private fun Request.handleCacheMiss(sanitizedUri: String, cipher: Cipher, imageId: String, imageDatum: ImageDatum?): Response {
LOGGER.info { "Request for $sanitizedUri missed cache" }
statistics.getAndUpdate {
it.copy(cacheMisses = it.cacheMisses + 1)
}
val mdResponse = client(Request(Method.GET, "${remoteSettings.imageServer}$sanitizedUri"))
if (mdResponse.status != Status.OK) {
LOGGER.trace { "Upstream query for $sanitizedUri errored with status ${mdResponse.status}" }
mdResponse.close()
return Response(mdResponse.status)
}
val contentType = mdResponse.header("Content-Type")!!
val contentLength = mdResponse.header("Content-Length")
val lastModified = mdResponse.header("Last-Modified")
if (!contentType.isImageMimetype()) {
LOGGER.warn { "Upstream query for $sanitizedUri returned bad mimetype $contentType" }
mdResponse.close()
return Response(Status.INTERNAL_SERVER_ERROR)
}
LOGGER.trace { "Upstream query for $sanitizedUri succeeded" }
val editor = cache.editUnsafe(imageId.toCacheId())
// A null editor means that this file is being written to
// concurrently so we skip the cache process
return if (editor != null && contentLength != null && lastModified != null) {
LOGGER.trace { "Request for $sanitizedUri is being cached and served" }
if (imageDatum == null) {
try {
synchronized(database) {
transaction(database) {
ImageDatum.new(imageId) {
this.contentType = contentType
this.lastModified = lastModified
}
}
}
} catch (_: ExposedSQLException) {
// some other code got to the database first, fall back to just serving
editor.abort()
LOGGER.trace { "Request for $sanitizedUri is being served" }
respondWithImage(mdResponse.body.stream, contentLength, contentType, lastModified, false)
}
}
val tee = CachingInputStream(
mdResponse.body.stream,
executor, CipherOutputStream(BufferedOutputStream(editor.newOutputStream(0)), cipher)
) {
try {
if (editor.getLength(0) == contentLength.toLong()) {
LOGGER.info { "Cache download for $sanitizedUri committed" }
editor.commit()
cache.flush()
} else {
LOGGER.warn { "Cache download for $sanitizedUri aborted" }
editor.abort()
}
} catch (e: Exception) {
LOGGER.warn(e) { "Cache go/no go for $sanitizedUri failed" }
}
}
respondWithImage(tee, contentLength, contentType, lastModified, false)
} else {
editor?.abort()
LOGGER.trace { "Request for $sanitizedUri is being served" }
respondWithImage(mdResponse.body.stream, contentLength, contentType, lastModified, false)
}
}
private fun String.toCacheId() =
this.substring(0, 8).replace("..(?!$)".toRegex(), "$0 ").split(" ".toRegex())
.plus(this).joinToString(File.separator)
private fun respondWithImage(input: InputStream, length: String?, type: String, lastModified: String?, cached: Boolean): Response =
Response(Status.OK)
.header("Content-Type", type)
.header("X-Content-Type-Options", "nosniff")
.let {
if (length != null) {
it.body(input, length.toLong()).header("Content-Length", length)
} else {
it.body(input).header("Transfer-Encoding", "chunked")
}
}
.let {
if (lastModified != null) {
it.header("Last-Modified", lastModified)
} else {
it
}
}
.header("X-Cache", if (cached) "HIT" else "MISS")
companion object {
private val JACKSON: ObjectMapper = jacksonObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.registerModule(JavaTimeModule())
private val ALLOWED_REFERER_DOMAINS = listOf("mangadex.org", "mangadex.network") // TODO: Factor out hardcoded domains?
private fun baseHandler(): Filter =
CachingFilters.Response.MaxAge(Clock.systemUTC(), Constants.MAX_AGE_CACHE)
.then(Filter { next: HttpHandler ->
{ request: Request ->
val response = next(request)
response.header("access-control-allow-origin", "https://mangadex.org")
.header("access-control-allow-headers", "*")
.header("access-control-allow-methods", "GET")
.header("timing-allow-origin", "https://mangadex.org")
}
})
private fun isTestImage(chapter: String): Boolean {
return chapter == "1b682e7b24ae7dbdc5064eeeb8e8e353" || chapter == "8172a46adc798f4f4ace6663322a383e"
}
}
}
private fun String.isImageMimetype() = this.toLowerCase().startsWith("image/")
fun getServer(cache: DiskLruCache, database: Database, remoteSettings: RemoteSettings, serverSettings: ServerSettings, statistics: AtomicReference<Statistics>, isHandled: AtomicBoolean): Http4kServer {
val client = Apache4Client(responseBodyMode = BodyMode.Stream, client = HttpClients.custom()
.disableConnectionState()
.setDefaultRequestConfig(
RequestConfig.custom()
.setCookieSpec(CookieSpecs.IGNORE_COOKIES)
.setConnectTimeout(3000)
.setSocketTimeout(3000)
.setConnectionRequestTimeout(3000)
.build())
.setMaxConnTotal(3000)
.setMaxConnPerRoute(3000)
.build())
val imageServer = ImageServer(cache, database, statistics, remoteSettings, client)
return addCommonHeaders()
.then(timeRequest())
.then(setHandled(isHandled))
.then(catchAllHideDetails())
.then(
routes(
"/data/{chapterHash}/{fileName}" bind Method.GET to imageServer.handler(dataSaver = false),
"/data-saver/{chapterHash}/{fileName}" bind Method.GET to imageServer.handler(dataSaver = true),
"/{token}/data/{chapterHash}/{fileName}" bind Method.GET to imageServer.handler(
dataSaver = false,
tokenized = true
),
"/{token}/data-saver/{chapterHash}/{fileName}" bind Method.GET to imageServer.handler(
dataSaver = true,
tokenized = true
)
)
)
.asServer(Netty(remoteSettings.tls!!, serverSettings, statistics))
}
fun setHandled(isHandled: AtomicBoolean): Filter {
return Filter { next: HttpHandler ->
{
isHandled.set(true)
next(it)
}
}
}
fun timeRequest(): Filter {
return Filter { next: HttpHandler ->
{ request: Request ->
val cleanedUri = request.uri.path.let {
if (it.startsWith("/data")) {
it
} else {
it.replaceBefore("/data", "/{token}")
}
}
LOGGER.info { "Request for $cleanedUri received from ${request.source?.address}" }
val start = System.currentTimeMillis()
val response = next(request)
val latency = System.currentTimeMillis() - start
LOGGER.info { "Request for $cleanedUri completed (TTFB) in ${latency}ms" }
response.header("X-Time-Taken", latency.toString())
}
}
}

View file

@ -1,65 +0,0 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
/* ktlint-disable no-wildcard-imports */
package mdnet.base.server
import java.time.Instant
import java.util.concurrent.atomic.AtomicReference
import mdnet.base.data.Statistics
import mdnet.base.netty.WebUiNetty
import mdnet.base.settings.WebSettings
import org.http4k.core.Body
import org.http4k.core.Method
import org.http4k.core.Response
import org.http4k.core.Status
import org.http4k.core.then
import org.http4k.filter.ServerFilters
import org.http4k.format.Jackson.auto
import org.http4k.routing.ResourceLoader
import org.http4k.routing.bind
import org.http4k.routing.routes
import org.http4k.routing.singlePageApp
import org.http4k.server.Http4kServer
import org.http4k.server.asServer
fun getUiServer(
webSettings: WebSettings,
statistics: AtomicReference<Statistics>,
statsMap: Map<Instant, Statistics>
): Http4kServer {
val statsMapLens = Body.auto<Map<Instant, Statistics>>().toLens()
return catchAllHideDetails()
.then(ServerFilters.CatchLensFailure)
.then(addCommonHeaders())
.then(
routes(
"/api/stats" bind Method.GET to {
statsMapLens(mapOf(Instant.now() to statistics.get()), Response(Status.OK))
},
"/api/pastStats" bind Method.GET to {
synchronized(statsMap) {
statsMapLens(statsMap, Response(Status.OK))
}
},
singlePageApp(ResourceLoader.Classpath("/webui"))
)
)
.asServer(WebUiNetty(webSettings.uiHostname, webSettings.uiPort))
}

View file

@ -1,86 +0,0 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
package mdnet.base.settings
import com.fasterxml.jackson.annotation.JsonUnwrapped
import com.fasterxml.jackson.databind.PropertyNamingStrategy
import com.fasterxml.jackson.databind.annotation.JsonNaming
import dev.afanasev.sekret.Secret
@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy::class)
class ClientSettings(
val maxCacheSizeInMebibytes: Long = 20480,
val webSettings: WebSettings? = null,
val devSettings: DevSettings = DevSettings(isDev = false)
) {
// FIXME: jackson doesn't work with data classes and JsonUnwrapped
// fix this in 2.0 when we can break the settings file
// and remove the `@JsonUnwrapped`
@field:JsonUnwrapped
lateinit var serverSettings: ServerSettings
override fun equals(other: Any?): Boolean {
if (this === other) return true
if (javaClass != other?.javaClass) return false
other as ClientSettings
if (maxCacheSizeInMebibytes != other.maxCacheSizeInMebibytes) return false
if (webSettings != other.webSettings) return false
if (devSettings != other.devSettings) return false
if (serverSettings != other.serverSettings) return false
return true
}
override fun hashCode(): Int {
var result = maxCacheSizeInMebibytes.hashCode()
result = 31 * result + (webSettings?.hashCode() ?: 0)
result = 31 * result + devSettings.hashCode()
result = 31 * result + serverSettings.hashCode()
return result
}
override fun toString(): String {
return "ClientSettings(maxCacheSizeInMebibytes=$maxCacheSizeInMebibytes, webSettings=$webSettings, devSettings=$devSettings, serverSettings=$serverSettings)"
}
}
@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy::class)
data class ServerSettings(
val maxMebibytesPerHour: Long = 0,
val maxKilobitsPerSecond: Long = 0,
val clientHostname: String = "0.0.0.0",
val clientPort: Int = 443,
val clientExternalPort: Int = 0,
@field:Secret val clientSecret: String = "PASTE-YOUR-SECRET-HERE",
val threads: Int = 4,
val gracefulShutdownWaitSeconds: Int = 60
)
@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy::class)
data class WebSettings(
val uiHostname: String = "127.0.0.1",
val uiPort: Int = 8080
)
@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy::class)
data class DevSettings(
val isDev: Boolean = false
)

View file

@ -0,0 +1,362 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
/* ktlint-disable no-wildcard-imports */
package mdnet.cache
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.databind.PropertyNamingStrategies
import com.fasterxml.jackson.databind.annotation.JsonNaming
import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper
import com.fasterxml.jackson.module.kotlin.readValue
import mdnet.logging.info
import mdnet.logging.trace
import org.apache.commons.io.file.PathUtils
import org.ktorm.database.Database
import org.ktorm.dsl.*
import org.slf4j.LoggerFactory
import java.io.*
import java.nio.file.*
import java.sql.SQLIntegrityConstraintViolationException
import java.time.Instant
import java.util.UUID
import java.util.concurrent.*
@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class)
data class ImageMetadata(
val contentType: String,
val lastModified: String,
val size: Int,
)
data class Image(val data: ImageMetadata, val stream: InputStream)
/**
* A storage for images that handles LRU removal as well as database metadata storage. This cache
* will ensure the the storage size (excluding the database) will be below [maxSize] over time,
* but there may be temporary peaks or overages. It will cache the files in [cacheDirectory], and
* store associated metadata in the [database].
*
* @constructor Creates an `ImageStorage`, creating necessary tables in the database.
*/
class ImageStorage(
var maxSize: Long,
val cacheDirectory: Path,
private val database: Database,
autoPrune: Boolean = true
) {
private val tempCacheDirectory = cacheDirectory.resolve("tmp")
private val evictor: ScheduledExecutorService = Executors.newScheduledThreadPool(2)
private val queue = LinkedBlockingQueue<String>(1000)
/**
* Returns the size in bytes of the images stored in this cache, not including metadata.
* This is cached for performance on a call to [calculateSize].
*/
@Volatile
var size: Long = 0
private set
init {
Files.createDirectories(tempCacheDirectory)
PathUtils.cleanDirectory(tempCacheDirectory)
// create tables in database
LOGGER.info { "Creating tables if not already present" }
database.useConnection { conn ->
conn.prepareStatement(INIT_TABLE).use {
it.execute()
}
}
calculateSize()
LOGGER.info { "Cache at $size out of $maxSize bytes" }
evictor.scheduleWithFixedDelay(
{
val toUpdate = HashSet<String>()
queue.drainTo(toUpdate)
val now = Instant.now()
LOGGER.info { "Updating LRU times for ${toUpdate.size} entries" }
database.batchUpdate(DbImage) {
for (id in toUpdate) {
item {
set(DbImage.accessed, now)
where {
DbImage.id eq id
}
}
}
}
calculateSize()
},
1, 1, TimeUnit.MINUTES
)
// evict LRU cache every 3 minutes
if (autoPrune) {
evictor.scheduleWithFixedDelay(
{
calculateSize()
pruneImages()
},
0, 3, TimeUnit.MINUTES
)
}
}
/**
* Prunes excess images from the cache in order to meet
* the [maxSize] property and not waste disk space. It is recommended
* to call [calculateSize] beforehand to update [size].
*/
fun pruneImages() {
LOGGER.info { "Cache at $size out of $maxSize bytes" }
// we need to prune the cache now
if (size > maxSize * 0.95) {
val toClear = size - (maxSize * 0.9).toLong()
LOGGER.info { "Evicting at least $toClear bytes from cache" }
val list = database.useConnection { conn ->
conn.prepareStatement(IMAGES_TO_PRUNE).apply {
setLong(1, toClear)
}.use { stmt ->
stmt.executeQuery().let {
val ret = ArrayList<String>()
while (it.next()) {
ret.add(it.getString(1))
}
ret
}
}
}
for (id in list) {
LOGGER.info { "Evicting images $id from cache" }
deleteImage(id)
}
}
}
/**
* Loads the image with the specified [id]. This method will return null
* if the image is not committed, the id does not exist, or an [IOException]
* occurs when loading the image.
*
* @param id the id of the image to load
* @return the [Image] associated with the id or null.
*/
fun loadImage(id: String): Image? {
return try {
// this try catch handles the case where the image has been deleted
// we assume total control over the directory, so this file open
// cannot fail due to any other reason
val stream = try {
Files.newInputStream(getPath(id)).also {
queue.offer(id)
}
} catch (e: IOException) {
// image does not exist or has been deleted
return null
}
val data = JACKSON.readValue<ImageMetadata>(
DataInputStream(stream).readUTF()
)
Image(data, stream)
} catch (e: IOException) {
null
}
}
/**
* Stores an image with the specified [id], which must be at least 3 characters long.
* This method returns a writer that allows one to stream data in.
*
* @param id the id of the image to store
* @param metadata the metadata associated with the image
* @return the [Writer] associated with the id or null.
*/
fun storeImage(id: String, metadata: ImageMetadata): Writer? {
if (id.length < 3) {
throw IllegalArgumentException("id length needs to be at least 3")
}
// don't make high cache utilization worse
if (size >= maxSize * 0.95) {
return null
}
return try {
WriterImpl(id, metadata)
} catch (e: FileAlreadyExistsException) {
null
}
}
private fun deleteImage(id: String) {
database.useTransaction {
val path = getTempPath()
try {
Files.move(
getPath(id),
path,
StandardCopyOption.ATOMIC_MOVE
)
Files.deleteIfExists(path)
} catch (e: IOException) {
LOGGER.trace(e) { "Deleting image failed, ignoring" }
// a failure means the image did not exist
} finally {
database.delete(DbImage) {
DbImage.id eq id
}
}
}
}
/**
* Updates the cached size using data from the database
*/
fun calculateSize() {
size = database.useConnection { conn ->
conn.prepareStatement(SIZE_TAKEN_SQL).use { stmt ->
stmt.executeQuery().let {
it.next()
it.getLong(1)
}
}
}
}
fun close() {
evictor.shutdown()
evictor.awaitTermination(10, TimeUnit.SECONDS)
}
/**
* A writer for storing images and allow incremental streaming
*/
interface Writer {
/**
* The output stream associated with this writer
*/
val stream: OutputStream
/**
* Commit bytes written to the output stream if the number of bytes
* written to the output stream excluding the metadata is exactly [bytes]
* bytes, else abort
* @return true if the data was commited, false otherwise
*/
fun commit(bytes: Int): Boolean
/**
* Revert bytes written to the output stream, undo changes,
* allowing another writer to try again
*/
fun abort()
}
private inner class WriterImpl(private val id: String, metadata: ImageMetadata) : Writer {
val tempPath = getTempPath()
override val stream: OutputStream
val metadataSize: Int
init {
stream = Files.newOutputStream(tempPath, StandardOpenOption.CREATE_NEW)
val dataOutputStream = DataOutputStream(stream)
dataOutputStream.writeUTF(
JACKSON.writeValueAsString(metadata)
)
metadataSize = dataOutputStream.size()
// Don't close the `dataOutputStream` because
// we need to write to the underlying stream
}
override fun commit(bytes: Int): Boolean {
stream.flush()
stream.close()
if (Files.size(tempPath).toInt() != metadataSize + bytes) {
abort()
return false
}
Files.createDirectories(getPath(id).parent)
try {
database.insert(DbImage) {
set(DbImage.id, id)
set(DbImage.accessed, Instant.now())
set(DbImage.size, metadataSize + bytes)
}
} catch (e: SQLIntegrityConstraintViolationException) {
// someone got to us before this (TOCTOU)
abort()
return false
}
Files.move(
tempPath,
getPath(id),
StandardCopyOption.ATOMIC_MOVE
)
return true
}
override fun abort() {
stream.flush()
stream.close()
Files.deleteIfExists(tempPath)
// remove the database entry if it somehow exists
// this really shouldn't happen but just in case
database.delete(DbImage) {
DbImage.id eq id
}
}
}
private fun getPath(id: String): Path {
return cacheDirectory.resolve(id.toCachePath())
}
private fun getTempPath(): Path {
return tempCacheDirectory.resolve(UUID.randomUUID().toString())
}
companion object {
private val LOGGER = LoggerFactory.getLogger(ImageStorage::class.java)
private fun String.toCachePath() =
this.substring(0, 3).replace(".(?!$)".toRegex(), "$0 ").split(" ".toRegex()).reversed()
.plus(this).joinToString(File.separator)
private val JACKSON: ObjectMapper = jacksonObjectMapper()
}
}

50
src/main/kotlin/mdnet/cache/metadata.kt vendored Normal file
View file

@ -0,0 +1,50 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
/* ktlint-disable no-wildcard-imports */
package mdnet.cache
import org.ktorm.schema.*
import org.ktorm.schema.Table
object DbImage : Table<Nothing>("IMAGES") {
val id = varchar("id").primaryKey()
val accessed = timestamp("accessed")
val size = int("size")
}
const val INIT_TABLE = """
create table if not exists Images(
id varchar primary key not null,
size integer not null,
accessed timestamp not null default CURRENT_TIMESTAMP,
disk_size integer as ((size + 4095) / 4096 * 4096)
);
create index if not exists Images_lastAccessed_idx on Images(accessed, disk_size, id);
"""
const val SIZE_TAKEN_SQL = "select sum(disk_size) from Images"
const val IMAGES_TO_PRUNE = """
select id from (
select id, sum(disk_size)
OVER (order by accessed rows unbounded preceding exclude current row)
as RunningTotal from Images
) as X
WHERE coalesce(X.RunningTotal, 0) <= ?;
"""

View file

@ -16,17 +16,12 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>. along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/ */
package mdnet.base.data package mdnet.data
import com.fasterxml.jackson.databind.PropertyNamingStrategy import com.fasterxml.jackson.databind.PropertyNamingStrategies
import com.fasterxml.jackson.databind.annotation.JsonNaming import com.fasterxml.jackson.databind.annotation.JsonNaming
@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy::class) @JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class)
data class Statistics( data class Statistics(
val requestsServed: Int = 0,
val cacheHits: Int = 0,
val cacheMisses: Int = 0,
val browserCached: Int = 0,
val bytesSent: Long = 0, val bytesSent: Long = 0,
val bytesOnDisk: Long = 0
) )

View file

@ -16,11 +16,11 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>. along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/ */
package mdnet.base.data package mdnet.data
import com.fasterxml.jackson.databind.PropertyNamingStrategy import com.fasterxml.jackson.databind.PropertyNamingStrategies
import com.fasterxml.jackson.databind.annotation.JsonNaming import com.fasterxml.jackson.databind.annotation.JsonNaming
import java.time.OffsetDateTime import java.time.OffsetDateTime
@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy::class) @JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class)
data class Token(val expires: OffsetDateTime, val ip: String, val hash: String, val clientId: String) data class Token(val expires: OffsetDateTime, val ip: String, val hash: String, val clientId: String)

View file

@ -1,4 +1,22 @@
package mdnet.base /*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
package mdnet.logging
import org.slf4j.Logger import org.slf4j.Logger

View file

@ -0,0 +1,53 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
/* ktlint-disable no-wildcard-imports */
package mdnet.metrics
import io.micrometer.core.instrument.Tag
import io.micrometer.core.instrument.binder.jvm.DiskSpaceMetrics
import io.micrometer.core.instrument.binder.jvm.JvmGcMetrics
import io.micrometer.core.instrument.binder.jvm.JvmHeapPressureMetrics
import io.micrometer.core.instrument.binder.jvm.JvmMemoryMetrics
import io.micrometer.core.instrument.binder.jvm.JvmThreadMetrics
import io.micrometer.core.instrument.binder.logging.LogbackMetrics
import io.micrometer.core.instrument.binder.system.FileDescriptorMetrics
import io.micrometer.core.instrument.binder.system.ProcessorMetrics
import io.micrometer.core.instrument.binder.system.UptimeMetrics
import io.micrometer.prometheus.PrometheusMeterRegistry
import mdnet.BuildInfo
import java.nio.file.Path
class DefaultMicrometerMetrics(registry: PrometheusMeterRegistry, cacheDirectory: Path) {
init {
UptimeMetrics(
mutableListOf(
Tag.of("version", BuildInfo.VERSION)
)
).bindTo(registry)
JvmMemoryMetrics().bindTo(registry)
JvmGcMetrics().bindTo(registry)
ProcessorMetrics().bindTo(registry)
JvmThreadMetrics().bindTo(registry)
JvmHeapPressureMetrics().bindTo(registry)
FileDescriptorMetrics().bindTo(registry)
LogbackMetrics().bindTo(registry)
DiskSpaceMetrics(cacheDirectory.toFile()).bindTo(registry)
}
}

View file

@ -0,0 +1,140 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
/* ktlint-disable no-wildcard-imports */
package mdnet.metrics
import com.maxmind.db.CHMCache
import com.maxmind.geoip2.DatabaseReader
import com.maxmind.geoip2.exception.GeoIp2Exception
import io.micrometer.prometheus.PrometheusMeterRegistry
import mdnet.logging.debug
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream
import org.apache.commons.io.IOUtils
import org.http4k.core.Filter
import org.http4k.core.HttpHandler
import org.http4k.core.Method
import org.http4k.core.Request
import org.http4k.core.Status
import org.http4k.filter.gunzippedStream
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import java.net.InetAddress
import java.net.UnknownHostException
import java.nio.file.Files
class GeoIpMetricsFilter(
private val databaseReader: DatabaseReader?,
private val registry: PrometheusMeterRegistry
) : Filter {
override fun invoke(next: HttpHandler): HttpHandler {
return {
if (databaseReader != null && (it.uri.path != "/prometheus")) {
inspectAndRecordSourceCountry(it)
}
next(it)
}
}
private fun inspectAndRecordSourceCountry(request: Request) {
val sourceIp =
request.headerValues("Forwarded").firstOrNull() // try Forwarded (rare but standard)
?: request.headerValues("X-Forwarded-For").firstOrNull() // X-Forwarded-For (common but technically wrong)
?: request.source?.address // source (in case of no proxying, or with proxy-protocol)
sourceIp.apply {
try {
val inetAddress = InetAddress.getByName(sourceIp)
if (!inetAddress.isLoopbackAddress && !inetAddress.isAnyLocalAddress) {
val country = databaseReader!!.country(inetAddress)
recordCountry(country.country.isoCode)
}
} catch (e: GeoIp2Exception) {
// do not disclose ip here, for privacy of logs
LOGGER.warn("Cannot resolve the country of the request's IP!")
} catch (e: UnknownHostException) {
LOGGER.warn("Cannot resolve source IP of the request!")
}
}
}
private fun recordCountry(code: String) {
registry.counter(
"requests_country_counts",
"country", code
).increment()
}
companion object {
private val LOGGER: Logger = LoggerFactory.getLogger(GeoIpMetricsFilter::class.java)
}
}
class GeoIpMetricsFilterBuilder(
private val enableGeoIp: Boolean,
private val license: String,
private val registry: PrometheusMeterRegistry,
private val client: HttpHandler
) {
fun build(): GeoIpMetricsFilter {
return if (enableGeoIp) {
LOGGER.info("GeoIp initialising")
val databaseReader = initDatabase()
LOGGER.info("GeoIp initialised")
GeoIpMetricsFilter(databaseReader, registry)
} else {
GeoIpMetricsFilter(null, registry)
}
}
private fun initDatabase(): DatabaseReader {
val databaseFileDir = Files.createTempDirectory("mangadex-geoip")
val databaseFile = Files.createTempFile(databaseFileDir, "geoip2_country", ".mmdb")
val geoIpDatabaseUri = GEOIP2_COUNTRY_URI_FORMAT.format(license)
val response = client(Request(Method.GET, geoIpDatabaseUri))
if (response.status != Status.OK) {
throw IllegalStateException("Couldn't download GeoIP 2 database (http status: ${response.status})")
}
response.use {
val archiveStream = TarArchiveInputStream(it.body.gunzippedStream().stream)
var entry = archiveStream.nextTarEntry
while (!entry.name.endsWith(".mmdb")) {
LOGGER.debug { "Skipped non-database file: ${entry.name}" }
entry = archiveStream.nextTarEntry
}
// reads only the current entry to its end
val dbBytes = IOUtils.toByteArray(archiveStream)
Files.write(databaseFile, dbBytes)
}
return DatabaseReader
.Builder(databaseFile.toFile())
.withCache(CHMCache())
.build()
}
companion object {
private val LOGGER = LoggerFactory.getLogger(GeoIpMetricsFilterBuilder::class.java)
private const val GEOIP2_COUNTRY_URI_FORMAT: String =
"https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=%s&suffix=tar.gz"
}
}

View file

@ -16,24 +16,21 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>. along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/ */
package mdnet.base.data /* ktlint-disable no-wildcard-imports */
package mdnet.metrics
import org.jetbrains.exposed.dao.Entity import org.http4k.core.HttpTransaction
import org.jetbrains.exposed.dao.EntityClass import org.http4k.filter.HttpTransactionLabeler
import org.jetbrains.exposed.dao.id.EntityID
import org.jetbrains.exposed.dao.id.IdTable
object ImageData : IdTable<String>() { class PostTransactionLabeler : HttpTransactionLabeler {
// md5 hex strings are 32 characters long override fun invoke(transaction: HttpTransaction): HttpTransaction {
override val id = varchar("id", 32).entityId() return transaction.copy(
override val primaryKey = PrimaryKey(id) labels = mapOf(
"method" to transaction.request.method.toString(),
val contentType = varchar("contentType", 20) "status" to transaction.response.status.code.toString(),
val lastModified = varchar("lastModified", 29) "path" to transaction.routingGroup,
} "cache" to (transaction.response.header("X-Cache") ?: "MISS").toUpperCase()
)
class ImageDatum(id: EntityID<String>) : Entity<String>(id) { )
companion object : EntityClass<String, ImageDatum>(ImageData) }
var contentType by ImageData.contentType
var lastModified by ImageData.lastModified
} }

View file

@ -17,7 +17,7 @@ You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>. along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/ */
/* ktlint-disable no-wildcard-imports */ /* ktlint-disable no-wildcard-imports */
package mdnet.base.netty package mdnet.netty
import io.netty.bootstrap.ServerBootstrap import io.netty.bootstrap.ServerBootstrap
import io.netty.channel.* import io.netty.channel.*
@ -34,6 +34,18 @@ import io.netty.handler.timeout.WriteTimeoutException
import io.netty.handler.timeout.WriteTimeoutHandler import io.netty.handler.timeout.WriteTimeoutHandler
import io.netty.handler.traffic.GlobalTrafficShapingHandler import io.netty.handler.traffic.GlobalTrafficShapingHandler
import io.netty.handler.traffic.TrafficCounter import io.netty.handler.traffic.TrafficCounter
import io.netty.util.concurrent.DefaultEventExecutorGroup
import mdnet.Constants
import mdnet.data.Statistics
import mdnet.logging.info
import mdnet.logging.trace
import mdnet.settings.ServerSettings
import mdnet.settings.TlsCert
import org.http4k.core.HttpHandler
import org.http4k.server.Http4kChannelHandler
import org.http4k.server.Http4kServer
import org.http4k.server.ServerConfig
import org.slf4j.LoggerFactory
import java.io.ByteArrayInputStream import java.io.ByteArrayInputStream
import java.io.IOException import java.io.IOException
import java.io.InputStream import java.io.InputStream
@ -44,29 +56,19 @@ import java.security.cert.CertificateFactory
import java.security.cert.X509Certificate import java.security.cert.X509Certificate
import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.atomic.AtomicReference
import javax.net.ssl.SSLException import javax.net.ssl.SSLException
import mdnet.base.Constants
import mdnet.base.data.Statistics
import mdnet.base.info
import mdnet.base.settings.ServerSettings
import mdnet.base.settings.TlsCert
import mdnet.base.trace
import org.http4k.core.HttpHandler
import org.http4k.server.Http4kChannelHandler
import org.http4k.server.Http4kServer
import org.http4k.server.ServerConfig
import org.slf4j.LoggerFactory
private val LOGGER = LoggerFactory.getLogger("AppNetty")
class Netty(private val tls: TlsCert, private val serverSettings: ServerSettings, private val statistics: AtomicReference<Statistics>) : ServerConfig { class Netty(private val tls: TlsCert, private val serverSettings: ServerSettings, private val statistics: AtomicReference<Statistics>) : ServerConfig {
override fun toServer(httpHandler: HttpHandler): Http4kServer = object : Http4kServer { override fun toServer(httpHandler: HttpHandler): Http4kServer = object : Http4kServer {
private val masterGroup = NioEventLoopGroup(serverSettings.threads) private val masterGroup = NioEventLoopGroup()
private val workerGroup = NioEventLoopGroup(serverSettings.threads) private val workerGroup = NioEventLoopGroup()
private val executor = DefaultEventExecutorGroup(serverSettings.threads)
private lateinit var closeFuture: ChannelFuture private lateinit var closeFuture: ChannelFuture
private lateinit var address: InetSocketAddress private lateinit var address: InetSocketAddress
private val burstLimiter = object : GlobalTrafficShapingHandler( private val burstLimiter = object : GlobalTrafficShapingHandler(
workerGroup, serverSettings.maxKilobitsPerSecond * 1000L / 8L, 0, 50) { workerGroup, serverSettings.maxKilobitsPerSecond * 1000L / 8L, 0, 50
) {
override fun doAccounting(counter: TrafficCounter) { override fun doAccounting(counter: TrafficCounter) {
statistics.getAndUpdate { statistics.getAndUpdate {
it.copy(bytesSent = it.bytesSent + counter.cumulativeWrittenBytes()) it.copy(bytesSent = it.bytesSent + counter.cumulativeWrittenBytes())
@ -80,33 +82,36 @@ class Netty(private val tls: TlsCert, private val serverSettings: ServerSettings
val certs = getX509Certs(tls.certificate) val certs = getX509Certs(tls.certificate)
val sslContext = SslContextBuilder val sslContext = SslContextBuilder
.forServer(getPrivateKey(tls.privateKey), certs) .forServer(getPrivateKey(tls.privateKey), certs)
.protocols("TLSv1.3", "TLSv1.2", "TLSv1.1", "TLSv1") .protocols("TLSv1.3", "TLSv1.2", "TLSv1.1", "TLSv1")
.build() .build()
val bootstrap = ServerBootstrap() val bootstrap = ServerBootstrap()
bootstrap.group(masterGroup, workerGroup) bootstrap.group(masterGroup, workerGroup)
.channelFactory(ChannelFactory<ServerChannel> { NioServerSocketChannel() }) .channelFactory(ChannelFactory<ServerChannel> { NioServerSocketChannel() })
.childHandler(object : ChannelInitializer<SocketChannel>() { .childHandler(object : ChannelInitializer<SocketChannel>() {
public override fun initChannel(ch: SocketChannel) { public override fun initChannel(ch: SocketChannel) {
ch.pipeline().addLast("ssl", sslContext.newHandler(ch.alloc())) ch.pipeline().addLast("ssl", sslContext.newHandler(ch.alloc()))
ch.pipeline().addLast("codec", HttpServerCodec()) ch.pipeline().addLast("codec", HttpServerCodec())
ch.pipeline().addLast("keepAlive", HttpServerKeepAliveHandler()) ch.pipeline().addLast("keepAlive", HttpServerKeepAliveHandler())
ch.pipeline().addLast("aggregator", HttpObjectAggregator(65536)) ch.pipeline().addLast("aggregator", HttpObjectAggregator(65536))
ch.pipeline().addLast("burstLimiter", burstLimiter) ch.pipeline().addLast("burstLimiter", burstLimiter)
ch.pipeline().addLast("readTimeoutHandler", ReadTimeoutHandler(Constants.MAX_READ_TIME_SECONDS)) ch.pipeline().addLast("readTimeoutHandler", ReadTimeoutHandler(Constants.MAX_READ_TIME_SECONDS))
ch.pipeline().addLast("writeTimeoutHandler", WriteTimeoutHandler(Constants.MAX_WRITE_TIME_SECONDS)) ch.pipeline().addLast("writeTimeoutHandler", WriteTimeoutHandler(Constants.MAX_WRITE_TIME_SECONDS))
ch.pipeline().addLast("streamer", ChunkedWriteHandler()) ch.pipeline().addLast("streamer", ChunkedWriteHandler())
ch.pipeline().addLast("handler", Http4kChannelHandler(httpHandler)) ch.pipeline().addLast(executor, "handler", Http4kChannelHandler(httpHandler))
ch.pipeline().addLast("exceptions", object : ChannelInboundHandlerAdapter() { ch.pipeline().addLast(
"exceptions",
object : ChannelInboundHandlerAdapter() {
override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) { override fun exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) {
if (cause is SSLException || (cause is DecoderException && cause.cause is SSLException)) { if (cause is SSLException || (cause is DecoderException && cause.cause is SSLException)) {
LOGGER.trace { "Ignored invalid SSL connection" } LOGGER.trace { "Ignored invalid SSL connection" }
LOGGER.trace(cause) { "Exception in pipeline" }
} else if (cause is IOException || cause is SocketException) { } else if (cause is IOException || cause is SocketException) {
LOGGER.info { "User (downloader) abruptly closed the connection" } LOGGER.info { "User (downloader) abruptly closed the connection" }
LOGGER.trace(cause) { "Exception in pipeline" } LOGGER.trace(cause) { "Exception in pipeline" }
@ -114,13 +119,14 @@ class Netty(private val tls: TlsCert, private val serverSettings: ServerSettings
ctx.fireExceptionCaught(cause) ctx.fireExceptionCaught(cause)
} }
} }
}) }
} )
}) }
.option(ChannelOption.SO_BACKLOG, 1000) })
.childOption(ChannelOption.SO_KEEPALIVE, true) .option(ChannelOption.SO_BACKLOG, 1000)
.childOption(ChannelOption.SO_KEEPALIVE, true)
val channel = bootstrap.bind(InetSocketAddress(serverSettings.clientHostname, serverSettings.clientPort)).sync().channel() val channel = bootstrap.bind(InetSocketAddress(serverSettings.hostname, serverSettings.port)).sync().channel()
address = channel.localAddress() as InetSocketAddress address = channel.localAddress() as InetSocketAddress
closeFuture = channel.closeFuture() closeFuture = channel.closeFuture()
} }
@ -129,9 +135,14 @@ class Netty(private val tls: TlsCert, private val serverSettings: ServerSettings
closeFuture.cancel(false) closeFuture.cancel(false)
workerGroup.shutdownGracefully() workerGroup.shutdownGracefully()
masterGroup.shutdownGracefully() masterGroup.shutdownGracefully()
executor.shutdownGracefully()
} }
override fun port(): Int = if (serverSettings.clientPort > 0) serverSettings.clientPort else address.port override fun port(): Int = if (serverSettings.port > 0) serverSettings.port else address.port
}
companion object {
private val LOGGER = LoggerFactory.getLogger(Netty::class.java)
} }
} }

View file

@ -19,7 +19,7 @@
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE. // SOFTWARE.
package mdnet.base.netty package mdnet.netty
import java.io.ByteArrayOutputStream import java.io.ByteArrayOutputStream
import java.security.KeyFactory import java.security.KeyFactory
@ -35,7 +35,8 @@ private const val PKCS_8_PEM_FOOTER = "-----END PRIVATE KEY-----"
fun loadKey(keyDataString: String): PrivateKey? { fun loadKey(keyDataString: String): PrivateKey? {
if (keyDataString.contains(PKCS_1_PEM_HEADER)) { if (keyDataString.contains(PKCS_1_PEM_HEADER)) {
val fixedString = keyDataString.replace(PKCS_1_PEM_HEADER, "").replace( val fixedString = keyDataString.replace(PKCS_1_PEM_HEADER, "").replace(
PKCS_1_PEM_FOOTER, "") PKCS_1_PEM_FOOTER, ""
)
return readPkcs1PrivateKey( return readPkcs1PrivateKey(
base64Decode( base64Decode(
fixedString fixedString
@ -44,7 +45,8 @@ fun loadKey(keyDataString: String): PrivateKey? {
} }
if (keyDataString.contains(PKCS_8_PEM_HEADER)) { if (keyDataString.contains(PKCS_8_PEM_HEADER)) {
val fixedString = keyDataString.replace(PKCS_8_PEM_HEADER, "").replace( val fixedString = keyDataString.replace(PKCS_8_PEM_HEADER, "").replace(
PKCS_8_PEM_FOOTER, "") PKCS_8_PEM_FOOTER, ""
)
return readPkcs1PrivateKey( return readPkcs1PrivateKey(
base64Decode( base64Decode(
fixedString fixedString
@ -69,10 +71,10 @@ private fun readPkcs1PrivateKey(pkcs1Bytes: ByteArray): PrivateKey? {
val pkcs1Length = pkcs1Bytes.size val pkcs1Length = pkcs1Bytes.size
val totalLength = pkcs1Length + 22 val totalLength = pkcs1Length + 22
val pkcs8Header = byteArrayOf( val pkcs8Header = byteArrayOf(
0x30, 0x82.toByte(), (totalLength shr 8 and 0xff).toByte(), (totalLength and 0xff).toByte(), 0x30, 0x82.toByte(), (totalLength shr 8 and 0xff).toByte(), (totalLength and 0xff).toByte(),
0x2, 0x1, 0x0, // Integer (0) 0x2, 0x1, 0x0, // Integer (0)
0x30, 0xD, 0x6, 0x9, 0x2A, 0x86.toByte(), 0x48, 0x86.toByte(), 0xF7.toByte(), 0xD, 0x1, 0x1, 0x1, 0x5, 0x0, 0x30, 0xD, 0x6, 0x9, 0x2A, 0x86.toByte(), 0x48, 0x86.toByte(), 0xF7.toByte(), 0xD, 0x1, 0x1, 0x1, 0x5, 0x0,
0x4, 0x82.toByte(), (pkcs1Length shr 8 and 0xff).toByte(), (pkcs1Length and 0xff).toByte() 0x4, 0x82.toByte(), (pkcs1Length shr 8 and 0xff).toByte(), (pkcs1Length and 0xff).toByte()
) )
val pkcs8bytes = join(pkcs8Header, pkcs1Bytes) val pkcs8bytes = join(pkcs8Header, pkcs1Bytes)
return readPkcs8PrivateKey(pkcs8bytes) return readPkcs8PrivateKey(pkcs8bytes)
@ -86,21 +88,22 @@ private fun join(byteArray1: ByteArray, byteArray2: ByteArray): ByteArray {
} }
private val b64ints = intArrayOf( private val b64ints = intArrayOf(
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63, 52, 53, 54, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1, -1, 26, 27, 28, 29, 30, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1, -1, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 48, 49, 50, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1) -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
)
private fun base64Decode(value: String): ByteArray { private fun base64Decode(value: String): ByteArray {
val valueBytes = value.toByteArray() val valueBytes = value.toByteArray()

View file

@ -0,0 +1,400 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
/* ktlint-disable no-wildcard-imports */
package mdnet.server
import com.fasterxml.jackson.core.JsonProcessingException
import com.fasterxml.jackson.databind.DeserializationFeature
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule
import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper
import com.fasterxml.jackson.module.kotlin.readValue
import io.micrometer.core.instrument.FunctionCounter
import io.micrometer.core.instrument.Timer
import io.micrometer.prometheus.PrometheusMeterRegistry
import mdnet.Constants
import mdnet.cache.CachingInputStream
import mdnet.cache.Image
import mdnet.cache.ImageMetadata
import mdnet.cache.ImageStorage
import mdnet.data.Statistics
import mdnet.data.Token
import mdnet.logging.info
import mdnet.logging.trace
import mdnet.logging.warn
import mdnet.metrics.GeoIpMetricsFilterBuilder
import mdnet.metrics.PostTransactionLabeler
import mdnet.netty.Netty
import mdnet.security.TweetNaclFast
import mdnet.settings.MetricsSettings
import mdnet.settings.RemoteSettings
import mdnet.settings.ServerSettings
import org.apache.hc.client5.http.config.RequestConfig
import org.apache.hc.client5.http.cookie.StandardCookieSpec
import org.apache.hc.client5.http.impl.classic.HttpClients
import org.apache.hc.client5.http.impl.io.PoolingHttpClientConnectionManagerBuilder
import org.apache.hc.core5.util.Timeout
import org.http4k.client.ApacheClient
import org.http4k.core.*
import org.http4k.filter.CachingFilters
import org.http4k.filter.ClientFilters
import org.http4k.filter.MicrometerMetrics
import org.http4k.filter.ServerFilters
import org.http4k.lens.LensFailure
import org.http4k.lens.Path
import org.http4k.routing.bind
import org.http4k.routing.routes
import org.http4k.server.Http4kServer
import org.http4k.server.asServer
import org.slf4j.LoggerFactory
import java.io.BufferedInputStream
import java.io.BufferedOutputStream
import java.io.InputStream
import java.time.Clock
import java.time.OffsetDateTime
import java.util.*
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicReference
private val LOGGER = LoggerFactory.getLogger(ImageServer::class.java)
private val JACKSON: ObjectMapper = jacksonObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
.registerModule(JavaTimeModule())
class ImageServer(
private val storage: ImageStorage,
private val statistics: AtomicReference<Statistics>,
private val client: HttpHandler,
registry: PrometheusMeterRegistry
) {
private val executor = Executors.newCachedThreadPool()
private val cacheLookupTimer = Timer
.builder("cache_lookup")
.publishPercentiles(0.5, 0.75, 0.9, 0.99)
.register(registry)
// This is part of the ImageServer, and it expects `chapterHash` and `fileName` path segments.
fun handler(dataSaver: Boolean): HttpHandler = baseHandler().then { request ->
val chapterHash = Path.of("chapterHash")(request)
val fileName = Path.of("fileName")(request)
val sanitizedUri = if (dataSaver) {
"/data-saver"
} else {
"/data"
} + "/$chapterHash/$fileName"
val imageId = if (dataSaver) {
md5Bytes("saver$chapterHash.$fileName")
} else {
md5Bytes("$chapterHash.$fileName")
}.let {
printHexString(it)
}
val image: Image? = cacheLookupTimer.recordCallable { storage.loadImage(imageId) }
if (image != null) {
request.handleCacheHit(sanitizedUri, image)
} else {
request.handleCacheMiss(sanitizedUri, imageId)
}
}
private fun Request.handleCacheHit(sanitizedUri: String, image: Image): Response {
// our files never change, so it's safe to use the browser cache
return if (this.header("If-Modified-Since") != null) {
LOGGER.info { "Request for $sanitizedUri cached by browser" }
val lastModified = image.data.lastModified
Response(Status.NOT_MODIFIED)
.header("Last-Modified", lastModified)
} else {
LOGGER.info { "Request for $sanitizedUri hit cache" }
respondWithImage(
BufferedInputStream(image.stream),
image.data.size, image.data.contentType, image.data.lastModified,
true
)
}
}
private fun Request.handleCacheMiss(sanitizedUri: String, imageId: String): Response {
LOGGER.info { "Request for $sanitizedUri missed cache" }
val mdResponse = client(Request(Method.GET, sanitizedUri))
if (mdResponse.status != Status.OK) {
LOGGER.trace { "Upstream query for $sanitizedUri errored with status ${mdResponse.status}" }
mdResponse.close()
return Response(mdResponse.status)
}
val contentType = mdResponse.header("Content-Type")!!
val contentLength = mdResponse.header("Content-Length")?.toInt()
val lastModified = mdResponse.header("Last-Modified")
if (!contentType.isImageMimetype()) {
LOGGER.warn { "Upstream query for $sanitizedUri returned bad mimetype $contentType" }
mdResponse.close()
return Response(Status.INTERNAL_SERVER_ERROR)
}
// bad upstream responses mean we can't cache, so bail
if (contentLength == null || lastModified == null) {
LOGGER.trace { "Request for $sanitizedUri is being served due to upstream issues" }
return respondWithImage(mdResponse.body.stream, contentLength, contentType, lastModified, false)
}
LOGGER.trace { "Upstream query for $sanitizedUri succeeded" }
val writer = storage.storeImage(imageId, ImageMetadata(contentType, lastModified, contentLength))
// A null writer means that this file is being written to
// concurrently so we skip the cache process
return if (writer != null) {
LOGGER.trace { "Request for $sanitizedUri is being cached and served" }
val tee = CachingInputStream(
mdResponse.body.stream,
executor, BufferedOutputStream(writer.stream),
) {
try {
if (writer.commit(contentLength)) {
LOGGER.info { "Cache download for $sanitizedUri committed" }
} else {
LOGGER.warn { "Cache download for $sanitizedUri aborted" }
}
} catch (e: Exception) {
LOGGER.warn(e) { "Cache go/no go for $sanitizedUri failed" }
}
}
respondWithImage(tee, contentLength, contentType, lastModified, false)
} else {
LOGGER.trace { "Request for $sanitizedUri is being served" }
respondWithImage(mdResponse.body.stream, contentLength, contentType, lastModified, false)
}
}
private fun respondWithImage(input: InputStream, length: Int?, type: String, lastModified: String?, cached: Boolean): Response =
Response(Status.OK)
.header("Content-Type", type)
.header("X-Content-Type-Options", "nosniff")
.let {
if (length != null) {
it.body(input, length.toLong()).header("Content-Length", length.toString())
} else {
it.body(input).header("Transfer-Encoding", "chunked")
}
}
.let {
if (lastModified != null) {
it.header("Last-Modified", lastModified)
} else {
it
}
}
.header("X-Cache", if (cached) "HIT" else "MISS")
companion object {
private fun baseHandler(): Filter =
CachingFilters.Response.MaxAge(Clock.systemUTC(), Constants.MAX_AGE_CACHE)
.then { next: HttpHandler ->
{ request: Request ->
val response = next(request)
response.header("access-control-allow-origin", "https://mangadex.org")
.header("access-control-allow-headers", "*")
.header("access-control-allow-methods", "GET")
.header("timing-allow-origin", "https://mangadex.org")
}
}
}
}
private fun String.isImageMimetype() = this.toLowerCase().startsWith("image/")
fun getServer(
storage: ImageStorage,
remoteSettings: RemoteSettings,
serverSettings: ServerSettings,
statistics: AtomicReference<Statistics>,
metricsSettings: MetricsSettings,
registry: PrometheusMeterRegistry,
): Http4kServer {
val apache = ApacheClient(
responseBodyMode = BodyMode.Stream,
client = HttpClients.custom()
.disableConnectionState()
.setDefaultRequestConfig(
RequestConfig.custom()
.setCookieSpec(StandardCookieSpec.IGNORE)
.setConnectTimeout(Timeout.ofSeconds(2))
.setResponseTimeout(Timeout.ofSeconds(2))
.setConnectionRequestTimeout(Timeout.ofSeconds(1))
.build()
)
.setConnectionManager(
PoolingHttpClientConnectionManagerBuilder.create()
.setMaxConnTotal(3000)
.setMaxConnPerRoute(100)
.build()
)
.build()
)
val client =
ClientFilters.SetBaseUriFrom(remoteSettings.imageServer)
.then(ClientFilters.MicrometerMetrics.RequestCounter(registry))
.then(ClientFilters.MicrometerMetrics.RequestTimer(registry))
.then(apache)
val imageServer = ImageServer(
storage = storage,
statistics = statistics,
client = client,
registry = registry
)
FunctionCounter.builder(
"client_sent_bytes",
statistics,
{ it.get().bytesSent.toDouble() }
).register(registry)
val verifier = tokenVerifier(
tokenKey = remoteSettings.tokenKey,
shouldVerify = { chapter, _ ->
chapter != "1b682e7b24ae7dbdc5064eeeb8e8e353" && chapter != "8172a46adc798f4f4ace6663322a383e"
}
)
return addCommonHeaders()
.then(timeRequest())
.then(catchAllHideDetails())
.then(
routes(
"/{token}/data/{chapterHash}/{fileName}" bind Method.GET to verifier.then(
imageServer.handler(
dataSaver = false,
)
),
"/{token}/data-saver/{chapterHash}/{fileName}" bind Method.GET to verifier.then(
imageServer.handler(
dataSaver = true,
)
),
"/data/{chapterHash}/{fileName}" bind Method.GET to verifier.then(
imageServer.handler(
dataSaver = false,
)
),
"/data-saver/{chapterHash}/{fileName}" bind Method.GET to verifier.then(
imageServer.handler(
dataSaver = true,
)
),
"/prometheus" bind Method.GET to {
Response(Status.OK).body(registry.scrape())
}
).withFilter(
ServerFilters.MicrometerMetrics.RequestTimer(registry, labeler = PostTransactionLabeler())
).withFilter(
GeoIpMetricsFilterBuilder(metricsSettings.enableGeoip, metricsSettings.geoipLicenseKey, registry, apache).build()
)
)
.asServer(Netty(remoteSettings.tls!!, serverSettings, statistics))
}
fun timeRequest(): Filter {
return Filter { next: HttpHandler ->
{ request: Request ->
val cleanedUri = request.uri.path.replaceBefore("/data", "/{token}")
LOGGER.info { "Request for $cleanedUri received" }
val start = System.currentTimeMillis()
val response = next(request)
val latency = System.currentTimeMillis() - start
LOGGER.info { "Request for $cleanedUri completed (TTFB) in ${latency}ms" }
response
}
}
}
fun tokenVerifier(tokenKey: ByteArray, shouldVerify: (String, String) -> Boolean): Filter {
val box = TweetNaclFast.SecretBox(tokenKey)
return Filter { next ->
then@{
val chapterHash = Path.of("chapterHash")(it)
val fileName = Path.of("fileName")(it)
if (shouldVerify(chapterHash, fileName)) {
val cleanedUri = it.uri.path.replaceBefore("/data", "/{token}")
val tokenArr = try {
val toDecode = try {
Path.of("token")(it)
} catch (e: LensFailure) {
LOGGER.info(e) { "Request for $cleanedUri rejected for missing token" }
return@then Response(Status.FORBIDDEN).body("Token is missing")
}
Base64.getUrlDecoder().decode(toDecode)
} catch (e: IllegalArgumentException) {
LOGGER.info(e) { "Request for $cleanedUri rejected for non-base64 token" }
return@then Response(Status.FORBIDDEN).body("Token is invalid base64")
}
if (tokenArr.size < 24) {
LOGGER.info { "Request for $cleanedUri rejected for invalid token" }
return@then Response(Status.FORBIDDEN)
}
val token = try {
JACKSON.readValue<Token>(
box.open(tokenArr.sliceArray(24 until tokenArr.size), tokenArr.sliceArray(0 until 24)).apply {
if (this == null) {
LOGGER.info { "Request for $cleanedUri rejected for invalid token" }
return@then Response(Status.FORBIDDEN)
}
}
)
} catch (e: JsonProcessingException) {
LOGGER.info(e) { "Request for $cleanedUri rejected for invalid token" }
return@then Response(Status.FORBIDDEN).body("Token is invalid")
}
if (OffsetDateTime.now().isAfter(token.expires)) {
LOGGER.info { "Request for $cleanedUri rejected for expired token" }
return@then Response(Status.GONE).body("Token has expired")
}
if (token.hash != chapterHash) {
LOGGER.info { "Request for $cleanedUri rejected for inapplicable token" }
return@then Response(Status.FORBIDDEN).body("Token is inapplicable for the image")
}
}
return@then next(it)
}
}
}

View file

@ -17,21 +17,21 @@ You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>. along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/ */
/* ktlint-disable no-wildcard-imports */ /* ktlint-disable no-wildcard-imports */
package mdnet.base.server package mdnet.server
import java.time.ZoneOffset
import java.time.ZonedDateTime
import java.time.format.DateTimeFormatter
import java.util.*
import mdnet.BuildInfo import mdnet.BuildInfo
import mdnet.base.Constants import mdnet.Constants
import mdnet.base.warn import mdnet.logging.warn
import org.http4k.core.Filter import org.http4k.core.Filter
import org.http4k.core.HttpHandler import org.http4k.core.HttpHandler
import org.http4k.core.Request import org.http4k.core.Request
import org.http4k.core.Response import org.http4k.core.Response
import org.http4k.core.Status import org.http4k.core.Status
import org.slf4j.LoggerFactory import org.slf4j.LoggerFactory
import java.time.ZoneOffset
import java.time.ZonedDateTime
import java.time.format.DateTimeFormatter
import java.util.*
private val HTTP_TIME_FORMATTER = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss O", Locale.ENGLISH) private val HTTP_TIME_FORMATTER = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss O", Locale.ENGLISH)
private val LOGGER = LoggerFactory.getLogger("Application") private val LOGGER = LoggerFactory.getLogger("Application")
@ -41,7 +41,7 @@ fun addCommonHeaders(): Filter {
{ request: Request -> { request: Request ->
val response = next(request) val response = next(request)
response.header("Date", HTTP_TIME_FORMATTER.format(ZonedDateTime.now(ZoneOffset.UTC))) response.header("Date", HTTP_TIME_FORMATTER.format(ZonedDateTime.now(ZoneOffset.UTC)))
.header("Server", "Mangadex@Home Node ${BuildInfo.VERSION} (${Constants.CLIENT_BUILD})") .header("Server", "MangaDex@Home Node ${BuildInfo.VERSION} (${Constants.CLIENT_BUILD})")
} }
} }
} }

View file

@ -17,21 +17,13 @@ You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>. along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/ */
/* ktlint-disable no-wildcard-imports */ /* ktlint-disable no-wildcard-imports */
package mdnet.base.server package mdnet.server
import java.security.MessageDigest import java.security.MessageDigest
import javax.crypto.Cipher
import javax.crypto.spec.SecretKeySpec
fun getRc4(key: ByteArray): Cipher {
val rc4 = Cipher.getInstance("RC4")
rc4.init(Cipher.ENCRYPT_MODE, SecretKeySpec(key, "RC4"))
return rc4
}
private val DIGEST = MessageDigest.getInstance("MD5")
fun md5Bytes(stringToHash: String): ByteArray { fun md5Bytes(stringToHash: String): ByteArray {
val digest = MessageDigest.getInstance("MD5") return DIGEST.digest(stringToHash.toByteArray())
return digest.digest(stringToHash.toByteArray())
} }
fun printHexString(bytes: ByteArray): String { fun printHexString(bytes: ByteArray): String {

View file

@ -0,0 +1,56 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
package mdnet.settings
import com.fasterxml.jackson.databind.PropertyNamingStrategies
import com.fasterxml.jackson.databind.annotation.JsonNaming
import dev.afanasev.sekret.Secret
@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class)
data class ClientSettings(
val maxCacheSizeInMebibytes: Long,
val serverSettings: ServerSettings,
val devSettings: DevSettings = DevSettings(),
val metricsSettings: MetricsSettings = MetricsSettings(),
)
@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class)
data class ServerSettings(
@field:Secret val secret: String,
val externalPort: Int = 0,
val gracefulShutdownWaitSeconds: Int = 60,
val hostname: String = "0.0.0.0",
val maxKilobitsPerSecond: Long = 0,
val externalMaxKilobitsPerSecond: Long = 0,
val maxMebibytesPerHour: Long = 0,
val port: Int = 443,
val threads: Int = 4,
)
@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class)
data class DevSettings(
val devUrl: String? = null
)
@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class)
data class MetricsSettings(
val enableGeoip: Boolean = false,
@field:Secret val geoipLicenseKey: String = "none"
)

View file

@ -16,21 +16,22 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>. along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/ */
package mdnet.base.settings package mdnet.settings
import com.fasterxml.jackson.databind.PropertyNamingStrategy import com.fasterxml.jackson.databind.PropertyNamingStrategies
import com.fasterxml.jackson.databind.annotation.JsonNaming import com.fasterxml.jackson.databind.annotation.JsonNaming
import dev.afanasev.sekret.Secret import dev.afanasev.sekret.Secret
import org.http4k.core.Uri
@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy::class) @JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class)
data class RemoteSettings( data class RemoteSettings(
val imageServer: String, val imageServer: Uri,
val latestBuild: Int, val latestBuild: Int,
val url: String, val url: Uri,
@field:Secret val tokenKey: ByteArray, @field:Secret val tokenKey: ByteArray,
val compromised: Boolean, val compromised: Boolean,
val paused: Boolean, val paused: Boolean,
val forceTokens: Boolean = false, val forceDisableTokens: Boolean = false,
val tls: TlsCert? val tls: TlsCert?
) { ) {
override fun equals(other: Any?): Boolean { override fun equals(other: Any?): Boolean {
@ -45,6 +46,7 @@ data class RemoteSettings(
if (!tokenKey.contentEquals(other.tokenKey)) return false if (!tokenKey.contentEquals(other.tokenKey)) return false
if (compromised != other.compromised) return false if (compromised != other.compromised) return false
if (paused != other.paused) return false if (paused != other.paused) return false
if (forceDisableTokens != other.forceDisableTokens) return false
if (tls != other.tls) return false if (tls != other.tls) return false
return true return true
@ -57,12 +59,13 @@ data class RemoteSettings(
result = 31 * result + tokenKey.contentHashCode() result = 31 * result + tokenKey.contentHashCode()
result = 31 * result + compromised.hashCode() result = 31 * result + compromised.hashCode()
result = 31 * result + paused.hashCode() result = 31 * result + paused.hashCode()
result = 31 * result + forceDisableTokens.hashCode()
result = 31 * result + (tls?.hashCode() ?: 0) result = 31 * result + (tls?.hashCode() ?: 0)
return result return result
} }
} }
@JsonNaming(PropertyNamingStrategy.SnakeCaseStrategy::class) @JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy::class)
data class TlsCert( data class TlsCert(
val createdAt: String, val createdAt: String,
@field:Secret val privateKey: String, @field:Secret val privateKey: String,

View file

@ -13,30 +13,26 @@
</rollingPolicy>--> </rollingPolicy>-->
<encoder> <encoder>
<pattern>%d{YYYY-MM-dd HH:mm:ss} %-5level %logger{36} - %msg%n</pattern> <pattern>%d{yyyy-MM-dd HH:mm:ss} %-5level %logger{36} - %msg%n</pattern>
</encoder> </encoder>
</appender> </appender>
<appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">
<queueSize>1024</queueSize>
<appender-ref ref="FILE" />
</appender>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>${stdout-level:-INFO}</level> <level>${stdout-level:-INFO}</level>
</filter> </filter>
<encoder> <encoder>
<pattern>%d{YYYY-MM-dd HH:mm:ss} %-5level %logger{36} - %msg%n</pattern> <pattern>%d{yyyy-MM-dd HH:mm:ss} %-5level %logger{36} - %msg%n</pattern>
</encoder> </encoder>
</appender> </appender>
<root level="TRACE"> <root level="TRACE">
<appender-ref ref="STDOUT"/> <appender-ref ref="STDOUT"/>
<appender-ref ref="ASYNC"/> <appender-ref ref="FILE"/>
</root> </root>
<logger name="Exposed" level="ERROR"/>
<logger name="io.netty" level="INFO"/> <logger name="io.netty" level="INFO"/>
<logger name="org.apache" level="WARN"/>
<logger name="org.apache.hc.client5" level="ERROR"/>
</configuration> </configuration>

View file

@ -1 +0,0 @@
.vue-resizable-handle{background-image:none!important}.xterm{font-feature-settings:"liga" 0;position:relative;-moz-user-select:none;user-select:none;-ms-user-select:none;-webkit-user-select:none}.xterm.focus,.xterm:focus{outline:none}.xterm .xterm-helpers{position:absolute;top:0;z-index:5}.xterm .xterm-helper-textarea{position:absolute;opacity:0;left:-9999em;top:0;width:0;height:0;z-index:-5;white-space:nowrap;overflow:hidden;resize:none}.xterm .composition-view{background:#000;color:#fff;display:none;position:absolute;white-space:nowrap;z-index:1}.xterm .composition-view.active{display:block}.xterm .xterm-viewport{background-color:#000;overflow-y:scroll;cursor:default;position:absolute;right:0;left:0;top:0;bottom:0}.xterm .xterm-screen{position:relative}.xterm .xterm-screen canvas{position:absolute;left:0;top:0}.xterm .xterm-scroll-area{visibility:hidden}.xterm-char-measure-element{display:inline-block;visibility:hidden;position:absolute;top:0;left:-9999em;line-height:normal}.xterm{cursor:text}.xterm.enable-mouse-events{cursor:default}.xterm.xterm-cursor-pointer{cursor:pointer}.xterm.column-select.focus{cursor:crosshair}.xterm .xterm-accessibility,.xterm .xterm-message{position:absolute;left:0;top:0;bottom:0;right:0;z-index:10;color:transparent}.xterm .live-region{position:absolute;left:-9999px;width:1px;height:1px;overflow:hidden}.xterm-dim{opacity:.5}.xterm-underline{text-decoration:underline}.xterm ::-webkit-scrollbar{width:7px}.xterm ::-webkit-scrollbar-track{background-color:transparent}.xterm ::-webkit-scrollbar-thumb{background-color:#fff}

View file

@ -1 +0,0 @@
.echarts{width:600px;height:400px}

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 267 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 578 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.2 KiB

View file

@ -1,3 +0,0 @@
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M8.00251 14.9297L0 1.07422H6.14651L8.00251 4.27503L9.84583 1.07422H16L8.00251 14.9297Z" fill="black"/>
</svg>

Before

Width:  |  Height:  |  Size: 215 B

View file

@ -1 +0,0 @@
<!DOCTYPE html><html lang=en><head><meta charset=utf-8><meta http-equiv=X-UA-Compatible content="IE=edge"><meta name=viewport content="width=device-width,initial-scale=1"><!--[if IE]><link rel="icon" href="favicon.ico"><![endif]--><title>MD@H Client</title><link rel=stylesheet href="https://fonts.googleapis.com/css?family=Roboto:100,300,400,500,700,900"><link rel=stylesheet href=https://cdn.jsdelivr.net/npm/@mdi/font@latest/css/materialdesignicons.min.css><script src=https://cdn.jsdelivr.net/npm/echarts@4.1.0/dist/echarts.js></script><script src=https://cdn.jsdelivr.net/npm/vue-echarts@4.0.2></script><script src=https://unpkg.com/xterm@4.0.0/lib/xterm.js></script><link rel=text/css href=node_modules/xterm/css/xterm.css><link href=css/chunk-7577183e.6dc57fe0.css rel=prefetch><link href=js/chunk-7577183e.d6d29bcc.js rel=prefetch><link href=css/app.14a6e628.css rel=preload as=style><link href=css/chunk-vendors.b02cf67a.css rel=preload as=style><link href=js/app.ede7edb7.js rel=preload as=script><link href=js/chunk-vendors.1256013f.js rel=preload as=script><link href=css/chunk-vendors.b02cf67a.css rel=stylesheet><link href=css/app.14a6e628.css rel=stylesheet><link rel=icon type=image/png sizes=32x32 href=img/icons/favicon-32x32.png><link rel=icon type=image/png sizes=16x16 href=img/icons/favicon-16x16.png><link rel=manifest href=manifest.json><meta name=theme-color content=#f79421><meta name=apple-mobile-web-app-capable content=yes><meta name=apple-mobile-web-app-status-bar-style content=black><meta name=apple-mobile-web-app-title content="MD@H Client Interface"><link rel=apple-touch-icon href=img/icons/apple-touch-icon-152x152.png><link rel=mask-icon href=img/icons/safari-pinned-tab.svg color=#f79421><meta name=msapplication-TileImage content=img/icons/msapplication-icon-144x144.png><meta name=msapplication-TileColor content=#000000></head><body style="overflow: hidden"><noscript><div style="background-color: #0980e8; position: absolute; top: 0; left: 0; width: 100%; height: 100%; user-select: none"><div style="position: absolute; top: 15%; left: 20%; width: 60%; font-family: Segoe UI; color: white;"><p style="font-size: 180px; margin: 0">:(</p><p style="font-size: 30px; margin-top: 50px">It appears that you don't have javascript enabled.<br>This isn't a big deal, but it just means that you've killed my wonderful web UI.<br>How evil of you...</p><p style="font-size: 10px; margin-top: 10px">Really though ;-;<br>I put in a lot of work and I'm very sad that you choose to disable the one thing that I needed :/</p></div></div></noscript><div id=app></div><script src=js/chunk-vendors.1256013f.js></script><script src=js/app.ede7edb7.js></script></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
{"name":"MD@H Client Interface","short_name":"MD@H","theme_color":"#f79421","icons":[{"src":"./img/icons/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"./img/icons/android-chrome-512x512.png","sizes":"512x512","type":"image/png"},{"src":"./img/icons/android-chrome-maskable-192x192.png","sizes":"192x192","type":"image/png","purpose":"maskable"},{"src":"./img/icons/android-chrome-maskable-512x512.png","sizes":"512x512","type":"image/png","purpose":"maskable"}],"start_url":"","display":"standalone","background_color":"#000000"}

View file

@ -1,38 +0,0 @@
self.__precacheManifest = (self.__precacheManifest || []).concat([
{
"revision": "bb309837f2cf709edac5",
"url": "css/app.14a6e628.css"
},
{
"revision": "f9df31735412a9a525ef",
"url": "css/chunk-7577183e.6dc57fe0.css"
},
{
"revision": "027724770bde7ff56e58",
"url": "css/chunk-vendors.b02cf67a.css"
},
{
"revision": "7968686572fffa22fa9bdf28cc308706",
"url": "index.html"
},
{
"revision": "bb309837f2cf709edac5",
"url": "js/app.ede7edb7.js"
},
{
"revision": "f9df31735412a9a525ef",
"url": "js/chunk-7577183e.d6d29bcc.js"
},
{
"revision": "027724770bde7ff56e58",
"url": "js/chunk-vendors.1256013f.js"
},
{
"revision": "134416f208a045e960280cbf5c867e5c",
"url": "manifest.json"
},
{
"revision": "b6216d61c03e6ce0c9aea6ca7808f7ca",
"url": "robots.txt"
}
]);

View file

@ -1,2 +0,0 @@
User-agent: *
Disallow:

View file

@ -1,3 +0,0 @@
importScripts("precache-manifest.9917f0a006705c9b6b6c1abfab436c1f.js", "https://storage.googleapis.com/workbox-cdn/releases/4.3.1/workbox-sw.js");

View file

@ -0,0 +1,26 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
/* ktlint-disable no-wildcard-imports */
package mdnet
import io.kotest.core.config.AbstractProjectConfig
object ProjectConfig : AbstractProjectConfig() {
override val parallelism = 4
}

View file

@ -0,0 +1,195 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
/* ktlint-disable no-wildcard-imports */
package mdnet.cache
import io.kotest.assertions.throwables.shouldThrow
import io.kotest.assertions.timing.eventually
import io.kotest.core.spec.IsolationMode
import io.kotest.core.spec.style.FreeSpec
import io.kotest.engine.spec.tempdir
import io.kotest.engine.spec.tempfile
import io.kotest.matchers.booleans.shouldBeTrue
import io.kotest.matchers.longs.shouldBeGreaterThan
import io.kotest.matchers.longs.shouldBeZero
import io.kotest.matchers.nulls.shouldBeNull
import io.kotest.matchers.nulls.shouldNotBeNull
import io.kotest.matchers.shouldBe
import org.apache.commons.io.IOUtils
import org.ktorm.database.Database
import kotlin.time.ExperimentalTime
import kotlin.time.minutes
class ImageStorageTest : FreeSpec() {
override fun isolationMode() = IsolationMode.InstancePerTest
init {
val imageStorage = ImageStorage(
maxSize = 5,
cacheDirectory = tempdir().toPath(),
database = Database.connect("jdbc:h2:${tempfile()}"),
autoPrune = false,
)
val testMeta = ImageMetadata("a", "a", 123)
"storeImage()" - {
"should throw exception when length too short" {
for (i in listOf("", "a", "aa")) {
shouldThrow<IllegalArgumentException> {
imageStorage.storeImage(i, testMeta)
}
}
}
"when writer committed" - {
val writer = imageStorage.storeImage("test", testMeta)
writer.shouldNotBeNull()
writer.stream.write(ByteArray(12))
writer.commit(12).shouldBeTrue()
"should not update size until calculated" {
imageStorage.size.shouldBeZero()
}
"should update size when calculated" {
imageStorage.calculateSize()
imageStorage.size.shouldBeGreaterThan(0)
}
}
"when writer aborted" - {
val writer = imageStorage.storeImage("test", testMeta)
writer.shouldNotBeNull()
writer.stream.write(ByteArray(12))
writer.abort()
"should not update size" {
imageStorage.size.shouldBeZero()
}
}
}
"loadImage()" - {
"should load committed data" - {
val data = byteArrayOf(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
val writer = imageStorage.storeImage("test", testMeta)
writer.shouldNotBeNull()
writer.stream.write(data)
writer.commit(data.size).shouldBeTrue()
val image = imageStorage.loadImage("test")
image.shouldNotBeNull()
image.data.shouldBe(testMeta)
IOUtils.toByteArray(image.stream).shouldBe(data)
}
"should not load aborted data" {
val data = byteArrayOf(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
val writer = imageStorage.storeImage("test", testMeta)
writer.shouldNotBeNull()
writer.stream.write(data)
writer.abort()
val image = imageStorage.loadImage("test")
image.shouldBeNull()
}
}
"pruneImage()" - {
"should prune if insufficient size" {
val writer = imageStorage.storeImage("test", testMeta)
writer.shouldNotBeNull()
writer.stream.write(ByteArray(12))
writer.commit(12).shouldBeTrue()
imageStorage.calculateSize()
imageStorage.size.shouldBeGreaterThan(0)
imageStorage.pruneImages()
imageStorage.calculateSize()
imageStorage.size.shouldBeZero()
}
"should not prune if enough size" {
imageStorage.maxSize = 10000
val writer = imageStorage.storeImage("test", testMeta)
writer.shouldNotBeNull()
writer.stream.write(ByteArray(12))
writer.commit(12).shouldBeTrue()
imageStorage.calculateSize()
imageStorage.size.shouldBeGreaterThan(0)
imageStorage.pruneImages()
imageStorage.calculateSize()
imageStorage.size.shouldBeGreaterThan(0)
}
}
}
}
@ExperimentalTime
class ImageStorageSlowTest : FreeSpec() {
override fun isolationMode() = IsolationMode.InstancePerTest
init {
val imageStorage = ImageStorage(
maxSize = 4097,
cacheDirectory = tempdir().toPath(),
database = Database.connect("jdbc:h2:${tempfile()}"),
)
"autoPrune" - {
"should update size eventually" {
val writer = imageStorage.storeImage("test", ImageMetadata("a", "a", 4096))
writer.shouldNotBeNull()
writer.stream.write(ByteArray(4096))
writer.commit(4096).shouldBeTrue()
eventually(5.minutes) {
imageStorage.size.shouldBeGreaterThan(0)
}
}
"should prune if insufficient size eventually" {
imageStorage.maxSize = 10000
val writer = imageStorage.storeImage("test", ImageMetadata("a", "a", 123))
writer.shouldNotBeNull()
writer.stream.write(ByteArray(8192))
writer.commit(8192).shouldBeTrue()
eventually(5.minutes) {
imageStorage.size.shouldBeZero()
}
}
}
}
}

View file

@ -0,0 +1,93 @@
package mdnet.metrics
import com.maxmind.geoip2.DatabaseReader
import com.maxmind.geoip2.model.CountryResponse
import com.maxmind.geoip2.record.Country
import io.kotest.core.spec.IsolationMode
import io.kotest.core.spec.style.FreeSpec
import io.micrometer.core.instrument.Counter
import io.micrometer.prometheus.PrometheusMeterRegistry
import io.mockk.confirmVerified
import io.mockk.every
import io.mockk.mockk
import io.mockk.verify
import org.http4k.core.Method
import org.http4k.core.Request
import org.http4k.core.RequestSource
import org.http4k.core.Response
import org.http4k.core.Status
import org.http4k.kotest.shouldHaveStatus
import java.net.InetAddress
class GeoIpMetricsFilterTest : FreeSpec() {
override fun isolationMode() = IsolationMode.InstancePerTest
init {
val registry = mockk<PrometheusMeterRegistry>()
val databaseReader = mockk<DatabaseReader>()
val geoIpMetricsFilter = GeoIpMetricsFilter(databaseReader, registry)
val filterRequest = geoIpMetricsFilter { Response(Status.OK) }
"invalid source doesn't fail the image serving" {
val address = "not a resolvable inetaddress"
val request: Request = Request(Method.GET, "whatever")
.source(RequestSource(address = address))
val response = filterRequest(request)
response.shouldHaveStatus(Status.OK)
}
"invalid header doesn't fail the image serving" {
val address = "not a resolvable inetaddress"
val request: Request = Request(Method.GET, "whatever")
.header("X-Forwarded-For", address)
val response = filterRequest(request)
response.shouldHaveStatus(Status.OK)
}
"valid header and country resolved" {
val address = "195.154.69.12"
val countryCode = "COUNTRY_CODE"
val countryResponse = mockk<CountryResponse>()
val country = mockk<Country>()
val counter = mockk<Counter>(relaxUnitFun = true)
every { country.isoCode } returns countryCode
every { countryResponse.country } returns country
every { databaseReader.country(InetAddress.getByName(address)) } returns countryResponse
every {
registry.counter(
"requests_country_counts",
"country", countryCode
)
} returns counter
val request: Request = Request(Method.GET, "whatever")
.header("X-Forwarded-For", address)
val response = filterRequest(request)
response shouldHaveStatus Status.OK
verify {
registry.counter(
"requests_country_counts",
"country", countryCode
)
}
confirmVerified(registry)
verify {
counter.increment()
}
confirmVerified(counter)
}
}
}

View file

@ -0,0 +1,294 @@
/*
Mangadex@Home
Copyright (c) 2020, MangaDex Network
This file is part of MangaDex@Home.
MangaDex@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MangaDex@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this MangaDex@Home. If not, see <http://www.gnu.org/licenses/>.
*/
/* ktlint-disable no-wildcard-imports */
package mdnet.server
import io.kotest.assertions.withClue
import io.kotest.core.spec.IsolationMode
import io.kotest.core.spec.style.FreeSpec
import io.kotest.engine.spec.tempdir
import io.kotest.engine.spec.tempfile
import io.kotest.matchers.shouldBe
import io.micrometer.prometheus.PrometheusConfig
import io.micrometer.prometheus.PrometheusMeterRegistry
import io.mockk.confirmVerified
import io.mockk.every
import io.mockk.mockk
import io.mockk.verify
import kotlinx.coroutines.delay
import mdnet.cache.ImageStorage
import mdnet.data.Statistics
import mdnet.security.TweetNaclFast
import org.apache.commons.io.IOUtils
import org.http4k.core.HttpHandler
import org.http4k.core.Method
import org.http4k.core.Request
import org.http4k.core.Response
import org.http4k.core.Status
import org.http4k.core.then
import org.http4k.kotest.shouldHaveHeader
import org.http4k.kotest.shouldHaveStatus
import org.http4k.kotest.shouldNotHaveStatus
import org.http4k.routing.bind
import org.http4k.routing.routes
import org.ktorm.database.Database
import java.io.ByteArrayInputStream
import java.util.concurrent.atomic.AtomicReference
class ImageServerTest : FreeSpec() {
override fun isolationMode() = IsolationMode.InstancePerTest
init {
val registry = PrometheusMeterRegistry(PrometheusConfig.DEFAULT)
val mockData = byteArrayOf(72, 66, 67, 66, 65, 66, 73, 69, 65, 67)
"correct upstream responses" - {
val client = mockk<HttpHandler>()
every {
client(any())
} answers {
correctMockResponse(mockData)
}
"mocked noop cache" - {
val storage = mockk<ImageStorage>()
every {
storage.loadImage(any())
} returns null
every {
storage.storeImage(any(), any())
} returns null
val server = ImageServer(
storage,
AtomicReference(Statistics()),
client,
registry
)
val handler = routes(
"/{token}/data/{chapterHash}/{fileName}" bind Method.GET to server.handler(
dataSaver = false,
),
"/{token}/data-saver/{chapterHash}/{fileName}" bind Method.GET to server.handler(
dataSaver = true,
),
"/data/{chapterHash}/{fileName}" bind Method.GET to server.handler(
dataSaver = false,
),
"/data-saver/{chapterHash}/{fileName}" bind Method.GET to server.handler(
dataSaver = true,
),
)
"directly proxied" {
for (i in 0..2) {
val response = handler(Request(Method.GET, "/data/02181a8f5fe8cd408720a771dd129fd8/T2.png"))
withClue("should be directly proxied") {
verify(exactly = i + 1) {
client(any())
}
confirmVerified(client)
}
response.shouldHaveStatus(Status.OK)
response.shouldHaveHeader("Content-Length", mockData.size.toString())
IOUtils.toByteArray(response.body.stream).shouldBe(mockData)
response.close()
}
}
}
"with real cache" - {
val storage = ImageStorage(
maxSize = 100000,
cacheDirectory = tempdir().toPath(),
database = Database.connect("jdbc:h2:${tempfile()}"),
autoPrune = false,
)
val server = ImageServer(
storage,
AtomicReference(Statistics()),
client,
registry
)
val handler = routes(
"/data/{chapterHash}/{fileName}" bind Method.GET to server.handler(
dataSaver = false,
),
"/data-saver/{chapterHash}/{fileName}" bind Method.GET to server.handler(
dataSaver = true,
),
)
"respects cache" {
for (i in 0..2) {
val response = handler(Request(Method.GET, "/data/02181a8f5fe8cd408720a771dd129fd8/T2.png"))
withClue("should only be downloaded once") {
verify(exactly = 1) {
client(any())
}
confirmVerified(client)
}
response.shouldHaveStatus(Status.OK)
response.shouldHaveHeader("Content-Length", mockData.size.toString())
IOUtils.toByteArray(response.body.stream).shouldBe(mockData)
response.close()
// wait for the executor to commit
delay(100)
}
}
}
}
"failed upstream responses" - {
val client = mockk<HttpHandler>()
val storage = ImageStorage(
maxSize = 100000,
cacheDirectory = tempdir().toPath(),
database = Database.connect("jdbc:h2:${tempfile()}"),
autoPrune = false,
)
val server = ImageServer(
storage,
AtomicReference(Statistics()),
client,
registry
)
val handler = routes(
"/data/{chapterHash}/{fileName}" bind Method.GET to server.handler(
dataSaver = false,
),
"/data-saver/{chapterHash}/{fileName}" bind Method.GET to server.handler(
dataSaver = true,
),
)
"does not cache failures" {
val errStatus = Status.NOT_FOUND
every {
client(any())
} returns Response(errStatus)
for (i in 0..2) {
val response = handler(Request(Method.GET, "/data/02181a8f5fe8cd408720a771dd129fd8/T2.png"))
withClue("should be directly proxied") {
verify(exactly = i + 1) {
client(any())
}
confirmVerified(client)
}
response.shouldHaveStatus(errStatus)
response.close()
}
}
"errors on bad content type" {
every {
client(any())
} answers {
correctMockResponse(mockData)
.replaceHeader("Content-Type", "text/html")
}
for (i in 0..2) {
val response = handler(Request(Method.GET, "/data/02181a8f5fe8cd408720a771dd129fd8/T2.png"))
withClue("should be directly proxied") {
verify(exactly = i + 1) {
client(any())
}
confirmVerified(client)
}
response.status.shouldBe(Status.INTERNAL_SERVER_ERROR)
response.close()
}
}
"still works on no content-length" {
every {
client(any())
} answers {
correctMockResponse(mockData)
.removeHeader("Content-Length")
}
for (i in 0..2) {
val response = handler(Request(Method.GET, "/data/02181a8f5fe8cd408720a771dd129fd8/T2.png"))
response.shouldHaveStatus(Status.OK)
IOUtils.toByteArray(response.body.stream).shouldBe(mockData)
response.close()
}
}
}
}
private fun correctMockResponse(data: ByteArray) =
Response(Status.OK)
.body(ByteArrayInputStream(data))
.header("Content-Type", "image/jpg")
.header("Content-Length", "${data.size}")
.header("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT")
}
class TokenVerifierTest : FreeSpec() {
init {
val remoteKeys = TweetNaclFast.Box.keyPair()
val clientKeys = TweetNaclFast.Box.keyPair()
val box = TweetNaclFast.Box(clientKeys.publicKey, remoteKeys.secretKey)
val backend = tokenVerifier(box.before()) { _, _ ->
true
}.then {
Response(Status.OK)
}
val handler = routes(
"/data/{chapterHash}/{fileName}" bind Method.GET to backend,
"/data-saver/{chapterHash}/{fileName}" bind Method.GET to backend,
"/{token}/data/{chapterHash}/{fileName}" bind Method.GET to backend,
"/{token}/data-saver/{chapterHash}/{fileName}" bind Method.GET to backend,
)
"invalid" - {
"missing token should fail" {
val response = handler(Request(Method.GET, "/data/02181a8f5fe8cd408720a771dd129fd8/T2.png"))
response.shouldNotHaveStatus(Status.OK)
}
"too short token should fail" {
val response = handler(Request(Method.GET, "/a/data/02181a8f5fe8cd408720a771dd129fd8/T2.png"))
response.shouldNotHaveStatus(Status.OK)
}
}
}
}