Merge branch 'webui' of https://gitlab.com/mangadex/mangadex_at_home
Conflicts: build.gradle src/main/java/mdnet/base/MangaDexClient.java src/main/java/mdnet/base/settings/ClientSettings.java src/main/java/mdnet/base/settings/WebSettings.java src/main/kotlin/mdnet/base/web/Application.kt src/main/resources/webui/dataReceive.js
This commit is contained in:
commit
1098664814
|
@ -22,8 +22,12 @@ dependencies {
|
|||
implementation group: "org.http4k", name: "http4k-core", version: "$http_4k_version"
|
||||
implementation group: "org.http4k", name: "http4k-server-netty", version: "$http_4k_version"
|
||||
implementation group: "org.http4k", name: "http4k-client-apache", version: "$http_4k_version"
|
||||
implementation group: "org.http4k", name: "http4k-format-gson", version: "3.249.0"
|
||||
|
||||
implementation group: "commons-io", name: "commons-io", version: "2.7"
|
||||
compile "org.java-websocket:Java-WebSocket:1.5.1"
|
||||
|
||||
implementation group: 'org.java-websocket', name: 'Java-WebSocket', version: '1.5.1'
|
||||
|
||||
implementation "ch.qos.logback:logback-classic:$logback_version"
|
||||
runtimeOnly 'io.netty:netty-tcnative-boringssl-static:2.0.30.Final'
|
||||
}
|
||||
|
@ -35,12 +39,14 @@ java {
|
|||
|
||||
spotless {
|
||||
java {
|
||||
indentWithSpaces(4)
|
||||
eclipse()
|
||||
removeUnusedImports()
|
||||
trimTrailingWhitespace()
|
||||
endWithNewline()
|
||||
}
|
||||
kotlin {
|
||||
indentWithSpaces(4)
|
||||
ktlint()
|
||||
trimTrailingWhitespace()
|
||||
endWithNewline()
|
||||
|
|
|
@ -2,21 +2,29 @@ package mdnet.base;
|
|||
|
||||
import com.google.gson.Gson;
|
||||
import com.google.gson.GsonBuilder;
|
||||
import com.google.gson.reflect.TypeToken;
|
||||
import mdnet.base.settings.ClientSettings;
|
||||
import mdnet.base.web.ApplicationKt;
|
||||
import mdnet.base.web.WebUiKt;
|
||||
import mdnet.cache.DiskLruCache;
|
||||
import mdnet.webui.WebConsole;
|
||||
import org.http4k.server.Http4kServer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.*;
|
||||
import java.time.Instant;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class MangaDexClient {
|
||||
private final static Gson GSON = new GsonBuilder().setPrettyPrinting().create();
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(MangaDexClient.class);
|
||||
|
||||
// This lock protects the Http4kServer from concurrent restart attempts
|
||||
|
@ -24,13 +32,25 @@ public class MangaDexClient {
|
|||
private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
|
||||
private final ServerHandler serverHandler;
|
||||
private final ClientSettings clientSettings;
|
||||
private final AtomicReference<Statistics> statistics;
|
||||
private ServerSettings serverSettings;
|
||||
|
||||
// if this is null, then the server has shutdown
|
||||
private Http4kServer engine;
|
||||
private final Map<Instant, Statistics> statsMap = Collections
|
||||
.synchronizedMap(new LinkedHashMap<Instant, Statistics>(80) {
|
||||
@Override
|
||||
protected boolean removeEldestEntry(Map.Entry eldest) {
|
||||
return this.size() > 80;
|
||||
}
|
||||
});
|
||||
private final AtomicReference<Statistics> statistics;
|
||||
|
||||
private ServerSettings serverSettings;
|
||||
private Http4kServer engine; // if this is null, then the server has shutdown
|
||||
private Http4kServer webUi;
|
||||
private DiskLruCache cache;
|
||||
|
||||
// these variables are for runLoop();
|
||||
private int counter = 0;
|
||||
private long lastBytesSent = 0;
|
||||
|
||||
public MangaDexClient(ClientSettings clientSettings) {
|
||||
this.clientSettings = clientSettings;
|
||||
this.serverHandler = new ServerHandler(clientSettings);
|
||||
|
@ -39,14 +59,23 @@ public class MangaDexClient {
|
|||
try {
|
||||
cache = DiskLruCache.open(new File("cache"), 3, 3,
|
||||
clientSettings.getMaxCacheSizeMib() * 1024 * 1024 /* MiB to bytes */);
|
||||
|
||||
DiskLruCache.Snapshot snapshot = cache.get("statistics");
|
||||
if (snapshot != null) {
|
||||
String json = snapshot.getString(0);
|
||||
snapshot.close();
|
||||
statistics.set(GSON.fromJson(json, new TypeToken<ArrayList<Statistics>>() {
|
||||
}.getType()));
|
||||
} else {
|
||||
statistics.set(new Statistics());
|
||||
}
|
||||
lastBytesSent = statistics.get().getBytesSent();
|
||||
} catch (IOException e) {
|
||||
MangaDexClient.dieWithError(e);
|
||||
}
|
||||
}
|
||||
|
||||
// This function also does most of the program initialization.
|
||||
public void runLoop() {
|
||||
statistics.set(new Statistics());
|
||||
loginAndStartServer();
|
||||
if (serverSettings.getLatestBuild() > Constants.CLIENT_BUILD) {
|
||||
if (LOGGER.isWarnEnabled()) {
|
||||
|
@ -55,23 +84,21 @@ public class MangaDexClient {
|
|||
}
|
||||
}
|
||||
|
||||
statsMap.put(Instant.now(), statistics.get());
|
||||
|
||||
if (clientSettings.getWebSettings() != null) {
|
||||
webUi = WebUiKt.getUiServer(clientSettings.getWebSettings(), statistics, statsMap);
|
||||
webUi.start();
|
||||
}
|
||||
|
||||
if (LOGGER.isInfoEnabled()) {
|
||||
LOGGER.info("MDNet initialization completed successfully. Starting normal operation.");
|
||||
}
|
||||
|
||||
// we don't really care about the Atomic part here
|
||||
AtomicInteger counter = new AtomicInteger();
|
||||
// ping keep-alive every 45 seconds
|
||||
executorService.scheduleAtFixedRate(() -> {
|
||||
int num = counter.get();
|
||||
if (num == 80) {
|
||||
counter.set(0);
|
||||
|
||||
// if server is stopped due to egress limits, restart it
|
||||
if (LOGGER.isInfoEnabled()) {
|
||||
LOGGER.info("Hourly update: refreshing statistics");
|
||||
}
|
||||
statistics.set(new Statistics());
|
||||
if (counter == 80) {
|
||||
counter = 0;
|
||||
lastBytesSent = statistics.get().getBytesSent();
|
||||
|
||||
if (engine == null) {
|
||||
if (LOGGER.isInfoEnabled()) {
|
||||
|
@ -81,16 +108,19 @@ public class MangaDexClient {
|
|||
loginAndStartServer();
|
||||
}
|
||||
} else {
|
||||
counter.set(num + 1);
|
||||
counter++;
|
||||
}
|
||||
|
||||
statsMap.put(Instant.now(), statistics.get());
|
||||
|
||||
// if the server is offline then don't try and refresh certs
|
||||
if (engine == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (clientSettings.getMaxBandwidthMibPerHour() != 0 && clientSettings.getMaxBandwidthMibPerHour() * 1024
|
||||
* 1024 /* MiB to bytes */ < statistics.get().getBytesSent().get()) {
|
||||
long currentBytesSent = statistics.get().getBytesSent() - lastBytesSent;
|
||||
if (clientSettings.getMaxBandwidthMibPerHour() != 0
|
||||
&& clientSettings.getMaxBandwidthMibPerHour() * 1024 * 1024 /* MiB to bytes */ < currentBytesSent) {
|
||||
if (LOGGER.isInfoEnabled()) {
|
||||
LOGGER.info("Shutting down server as hourly bandwidth limit reached");
|
||||
}
|
||||
|
@ -164,6 +194,12 @@ public class MangaDexClient {
|
|||
|
||||
logoutAndStopServer();
|
||||
}
|
||||
webUi.close();
|
||||
try {
|
||||
cache.close();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Cache failed to close", e);
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
@ -178,31 +214,21 @@ public class MangaDexClient {
|
|||
MangaDexClient.dieWithError("Expected one argument: path to config file, or nothing");
|
||||
}
|
||||
|
||||
Gson gson = new GsonBuilder().setPrettyPrinting().create();
|
||||
ClientSettings settings;
|
||||
|
||||
try {
|
||||
settings = gson.fromJson(new FileReader(file), ClientSettings.class);
|
||||
settings = GSON.fromJson(new FileReader(file), ClientSettings.class);
|
||||
} catch (FileNotFoundException ignored) {
|
||||
settings = new ClientSettings();
|
||||
LOGGER.warn("Settings file {} not found, generating file", file);
|
||||
try (FileWriter writer = new FileWriter(file)) {
|
||||
writer.write(gson.toJson(settings));
|
||||
writer.write(GSON.toJson(settings));
|
||||
} catch (IOException e) {
|
||||
MangaDexClient.dieWithError(e);
|
||||
}
|
||||
}
|
||||
|
||||
if (!ClientSettings.isSecretValid(settings.getClientSecret()))
|
||||
MangaDexClient.dieWithError("Config Error: API Secret is invalid, must be 52 alphanumeric characters");
|
||||
|
||||
if (settings.getClientPort() == 0) {
|
||||
MangaDexClient.dieWithError("Config Error: Invalid port number");
|
||||
}
|
||||
|
||||
if (settings.getMaxCacheSizeMib() < 1024) {
|
||||
MangaDexClient.dieWithError("Config Error: Invalid max cache size, must be >= 1024 MiB (1GiB)");
|
||||
}
|
||||
validateSettings(settings);
|
||||
|
||||
if (LOGGER.isInfoEnabled()) {
|
||||
LOGGER.info("Client settings loaded: {}", settings);
|
||||
|
@ -211,24 +237,6 @@ public class MangaDexClient {
|
|||
MangaDexClient client = new MangaDexClient(settings);
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(client::shutdown));
|
||||
client.runLoop();
|
||||
|
||||
if (settings.getWebSettings() != null) {
|
||||
// java.io.ByteArrayOutputStream out = new java.io.ByteArrayOutputStream();
|
||||
// System.setOut(new java.io.PrintStream(out));
|
||||
// TODO: system.out redirect
|
||||
ClientSettings finalSettings = settings;
|
||||
new Thread(() -> {
|
||||
WebConsole webConsole = new WebConsole(finalSettings.getWebSettings().getClientWebsocketPort()) {
|
||||
@Override
|
||||
protected void parseMessage(String message) {
|
||||
System.out.println(message);
|
||||
// TODO: something happens here
|
||||
// the message should be formatted in json
|
||||
}
|
||||
};
|
||||
// TODO: webConsole.sendMessage(t,m) whenever system.out is written to
|
||||
}).start();
|
||||
}
|
||||
}
|
||||
|
||||
public static void dieWithError(Throwable e) {
|
||||
|
@ -240,8 +248,48 @@ public class MangaDexClient {
|
|||
|
||||
public static void dieWithError(String error) {
|
||||
if (LOGGER.isErrorEnabled()) {
|
||||
LOGGER.error("Critical Error: " + error);
|
||||
LOGGER.error("Critical Error: {}", error);
|
||||
}
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
public static void validateSettings(ClientSettings settings) {
|
||||
if (!isSecretValid(settings.getClientSecret()))
|
||||
MangaDexClient.dieWithError("Config Error: API Secret is invalid, must be 52 alphanumeric characters");
|
||||
|
||||
if (settings.getClientPort() == 0) {
|
||||
MangaDexClient.dieWithError("Config Error: Invalid port number");
|
||||
}
|
||||
|
||||
if (settings.getMaxCacheSizeMib() < 1024) {
|
||||
MangaDexClient.dieWithError("Config Error: Invalid max cache size, must be >= 1024 MiB (1GiB)");
|
||||
}
|
||||
|
||||
if (settings.getThreads() < 4) {
|
||||
MangaDexClient.dieWithError("Config Error: Invalid number of threads, must be >= 8");
|
||||
}
|
||||
|
||||
if (settings.getMaxBandwidthMibPerHour() < 0) {
|
||||
MangaDexClient.dieWithError("Config Error: Max bandwidth must be >= 0");
|
||||
}
|
||||
|
||||
if (settings.getMaxBurstRateKibPerSecond() < 0) {
|
||||
MangaDexClient.dieWithError("Config Error: Max burst rate must be >= 0");
|
||||
}
|
||||
|
||||
if (settings.getWebSettings() != null) {
|
||||
if (settings.getWebSettings().getUiPort() == 0) {
|
||||
MangaDexClient.dieWithError("Config Error: Invalid UI port number");
|
||||
}
|
||||
|
||||
if (settings.getWebSettings().getUiWebsocketPort() == 0) {
|
||||
MangaDexClient.dieWithError("Config Error: Invalid websocket port number");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean isSecretValid(String clientSecret) {
|
||||
final int CLIENT_KEY_LENGTH = 52;
|
||||
return Pattern.matches("^[a-zA-Z0-9]{" + CLIENT_KEY_LENGTH + "}$", clientSecret);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
package mdnet.base;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class Statistics {
|
||||
private final AtomicInteger requestsServed;
|
||||
private final AtomicInteger cacheHits;
|
||||
private final AtomicInteger cacheMisses;
|
||||
private final AtomicLong bytesSent;
|
||||
|
||||
public Statistics() {
|
||||
requestsServed = new AtomicInteger();
|
||||
cacheHits = new AtomicInteger();
|
||||
cacheMisses = new AtomicInteger();
|
||||
bytesSent = new AtomicLong();
|
||||
}
|
||||
|
||||
public AtomicInteger getRequestsServed() {
|
||||
return requestsServed;
|
||||
}
|
||||
|
||||
public AtomicInteger getCacheHits() {
|
||||
return cacheHits;
|
||||
}
|
||||
|
||||
public AtomicInteger getCacheMisses() {
|
||||
return cacheMisses;
|
||||
}
|
||||
|
||||
public AtomicLong getBytesSent() {
|
||||
return bytesSent;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Statistics{" + "requestsServed=" + requestsServed + ", cacheHits=" + cacheHits + ", cacheMisses="
|
||||
+ cacheMisses + ", bytesSent=" + bytesSent + '}';
|
||||
}
|
||||
}
|
|
@ -3,7 +3,6 @@ package mdnet.base.settings;
|
|||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public final class ClientSettings {
|
||||
@SerializedName("max_cache_size_mib")
|
||||
|
@ -75,9 +74,4 @@ public final class ClientSettings {
|
|||
+ maxBandwidthMibPerHour + ", maxBurstRateKibPerSecond=" + maxBurstRateKibPerSecond + ", clientPort="
|
||||
+ clientPort + ", clientSecret='" + "<hidden>" + '\'' + ", threads=" + getThreads() + '}';
|
||||
}
|
||||
|
||||
public static boolean isSecretValid(String clientSecret) {
|
||||
final int CLIENT_KEY_LENGTH = 52;
|
||||
return Pattern.matches("^[a-zA-Z0-9]{" + CLIENT_KEY_LENGTH + "}$", clientSecret);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,23 +3,31 @@ package mdnet.base.settings;
|
|||
import com.google.gson.annotations.SerializedName;
|
||||
|
||||
public final class WebSettings {
|
||||
@SerializedName("client_websocket_port")
|
||||
private final int clientWebsocketPort;
|
||||
@SerializedName("ui_websocket_port")
|
||||
private final int uiWebsocketPort;
|
||||
@SerializedName("ui_port")
|
||||
private final int uiPort;
|
||||
|
||||
public WebSettings() {
|
||||
this.clientWebsocketPort = 33333;
|
||||
this.uiWebsocketPort = 33333;
|
||||
this.uiPort = 8080;
|
||||
}
|
||||
|
||||
public WebSettings(int clientWebsocketPort) {
|
||||
this.clientWebsocketPort = clientWebsocketPort;
|
||||
public WebSettings(int uiWebsocketPort, int uiPort) {
|
||||
this.uiWebsocketPort = uiWebsocketPort;
|
||||
this.uiPort = uiPort;
|
||||
}
|
||||
|
||||
public int getClientWebsocketPort() {
|
||||
return clientWebsocketPort;
|
||||
public int getUiWebsocketPort() {
|
||||
return uiWebsocketPort;
|
||||
}
|
||||
|
||||
public int getUiPort() {
|
||||
return uiPort;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "WebSettings{" + "clientWebsocketPort=" + clientWebsocketPort + '}';
|
||||
return "WebSettings{" + "uiWebsocketPort=" + uiWebsocketPort + ", uiPort=" + uiPort + '}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package mdnet.base;
|
||||
package mdnet.cache;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.io.input.ProxyInputStream;
|
36
src/main/java/mdnet/cache/DiskLruCache.java
vendored
36
src/main/java/mdnet/cache/DiskLruCache.java
vendored
|
@ -82,10 +82,9 @@ import java.util.regex.Pattern;
|
|||
* <li>When an entry is being <strong>edited</strong>, it is not necessary to
|
||||
* supply data for every value; values default to their previous value.
|
||||
* </ul>
|
||||
* Every {@link #editImpl} call must be matched by a call to
|
||||
* {@link Editor#commit} or {@link Editor#abort}. Committing is atomic: a read
|
||||
* observes the full set of values as they were before or after the commit, but
|
||||
* never a mix of values.
|
||||
* Every {@link #edit} call must be matched by a call to {@link Editor#commit}
|
||||
* or {@link Editor#abort}. Committing is atomic: a read observes the full set
|
||||
* of values as they were before or after the commit, but never a mix of values.
|
||||
*
|
||||
* <p>
|
||||
* Clients call {@link #get} to read a snapshot of an entry. The read will
|
||||
|
@ -412,7 +411,7 @@ public final class DiskLruCache implements Closeable {
|
|||
return getImpl(key);
|
||||
}
|
||||
|
||||
public synchronized Snapshot getImpl(String key) throws IOException {
|
||||
private synchronized Snapshot getImpl(String key) throws IOException {
|
||||
checkNotClosed();
|
||||
Entry entry = lruEntries.get(key);
|
||||
if (entry == null) {
|
||||
|
@ -967,20 +966,7 @@ public final class DiskLruCache implements Closeable {
|
|||
Path oldCache = Paths.get(directory + File.separator + key + "." + i);
|
||||
Path newCache = Paths.get(directory + subKeyPath + File.separator + key + "." + i);
|
||||
|
||||
File newCacheDirectory = new File(directory + subKeyPath, key + "." + i + ".tmp");
|
||||
newCacheDirectory.getParentFile().mkdirs();
|
||||
|
||||
if (Files.exists(oldCache)) {
|
||||
try {
|
||||
Files.move(oldCache, newCache, StandardCopyOption.ATOMIC_MOVE);
|
||||
} catch (FileAlreadyExistsException faee) {
|
||||
try {
|
||||
Files.delete(oldCache);
|
||||
} catch (IOException ex) {
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
}
|
||||
}
|
||||
migrateCacheFile(i, oldCache, newCache);
|
||||
|
||||
return new File(directory + subKeyPath, key + "." + i);
|
||||
}
|
||||
|
@ -990,6 +976,12 @@ public final class DiskLruCache implements Closeable {
|
|||
Path oldCache = Paths.get(directory + File.separator + key + "." + i + ".tmp");
|
||||
Path newCache = Paths.get(directory + subKeyPath + File.separator + key + "." + i + ".tmp");
|
||||
|
||||
migrateCacheFile(i, oldCache, newCache);
|
||||
|
||||
return new File(directory + subKeyPath, key + "." + i + ".tmp");
|
||||
}
|
||||
|
||||
private void migrateCacheFile(int i, Path oldCache, Path newCache) {
|
||||
File newCacheDirectory = new File(directory + subKeyPath, key + "." + i + ".tmp");
|
||||
newCacheDirectory.getParentFile().mkdirs();
|
||||
|
||||
|
@ -999,13 +991,11 @@ public final class DiskLruCache implements Closeable {
|
|||
} catch (FileAlreadyExistsException faee) {
|
||||
try {
|
||||
Files.delete(oldCache);
|
||||
} catch (IOException ex) {
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
}
|
||||
|
||||
return new File(directory + subKeyPath, key + "." + i + ".tmp");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,26 +37,26 @@ import javax.net.ssl.SSLException
|
|||
|
||||
private val LOGGER = LoggerFactory.getLogger("Application")
|
||||
|
||||
class Netty(private val tls: ServerSettings.TlsCert, private val clientSettings: ClientSettings, private val stats: AtomicReference<Statistics>) : ServerConfig {
|
||||
private val threadsToAllocate = clientSettings.getThreads()
|
||||
|
||||
class Netty(private val tls: ServerSettings.TlsCert, private val clientSettings: ClientSettings, private val statistics: AtomicReference<Statistics>) : ServerConfig {
|
||||
override fun toServer(httpHandler: HttpHandler): Http4kServer = object : Http4kServer {
|
||||
private val masterGroup = NioEventLoopGroup(threadsToAllocate)
|
||||
private val workerGroup = NioEventLoopGroup(threadsToAllocate)
|
||||
private val masterGroup = NioEventLoopGroup(clientSettings.threads)
|
||||
private val workerGroup = NioEventLoopGroup(clientSettings.threads)
|
||||
private lateinit var closeFuture: ChannelFuture
|
||||
private lateinit var address: InetSocketAddress
|
||||
|
||||
private val burstLimiter = object : GlobalTrafficShapingHandler(
|
||||
workerGroup, 1024 * clientSettings.maxBurstRateKibPerSecond, 0, 50) {
|
||||
override fun doAccounting(counter: TrafficCounter) {
|
||||
stats.get().bytesSent.getAndAdd(counter.cumulativeWrittenBytes())
|
||||
statistics.getAndUpdate {
|
||||
it.copy(bytesSent = it.bytesSent + counter.cumulativeWrittenBytes())
|
||||
}
|
||||
counter.resetCumulativeTime()
|
||||
}
|
||||
}
|
||||
|
||||
override fun start(): Http4kServer = apply {
|
||||
if (LOGGER.isInfoEnabled) {
|
||||
LOGGER.info("Starting webserver with {} threads", threadsToAllocate)
|
||||
LOGGER.info("Starting webserver with {} threads", clientSettings.threads)
|
||||
}
|
||||
|
||||
val (mainCert, chainCert) = getX509Certs(tls.certificate)
|
||||
|
|
11
src/main/kotlin/mdnet/base/Statistics.kt
Normal file
11
src/main/kotlin/mdnet/base/Statistics.kt
Normal file
|
@ -0,0 +1,11 @@
|
|||
package mdnet.base
|
||||
|
||||
import com.google.gson.annotations.SerializedName
|
||||
|
||||
data class Statistics(
|
||||
@field:SerializedName("requests_served") val requestsServed: Int = 0,
|
||||
@field:SerializedName("cache_hits") val cacheHits: Int = 0,
|
||||
@field:SerializedName("cache_misses") val cacheMisses: Int = 0,
|
||||
@field:SerializedName("browser_cached") val browserCached: Int = 0,
|
||||
@field:SerializedName("bytes_sent") val bytesSent: Long = 0
|
||||
)
|
|
@ -1,15 +1,18 @@
|
|||
/* ktlint-disable no-wildcard-imports */
|
||||
package mdnet.base
|
||||
package mdnet.base.web
|
||||
|
||||
import mdnet.base.Constants
|
||||
import mdnet.base.Netty
|
||||
import mdnet.base.ServerSettings
|
||||
import mdnet.base.Statistics
|
||||
import mdnet.base.settings.ClientSettings
|
||||
import mdnet.cache.CachingInputStream
|
||||
import mdnet.cache.DiskLruCache
|
||||
import org.apache.http.client.config.CookieSpecs
|
||||
import org.apache.http.client.config.RequestConfig
|
||||
import org.apache.http.impl.client.HttpClients
|
||||
import org.http4k.client.ApacheClient
|
||||
import org.http4k.core.BodyMode
|
||||
import org.http4k.core.Filter
|
||||
import org.http4k.core.HttpHandler
|
||||
import org.http4k.core.Method
|
||||
import org.http4k.core.Request
|
||||
import org.http4k.core.Response
|
||||
|
@ -28,10 +31,6 @@ import java.io.BufferedInputStream
|
|||
import java.io.BufferedOutputStream
|
||||
import java.io.InputStream
|
||||
import java.security.MessageDigest
|
||||
import java.time.ZoneOffset
|
||||
import java.time.ZonedDateTime
|
||||
import java.time.format.DateTimeFormatter
|
||||
import java.util.*
|
||||
import java.util.concurrent.Executors
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
import javax.crypto.Cipher
|
||||
|
@ -40,7 +39,7 @@ import javax.crypto.CipherOutputStream
|
|||
import javax.crypto.spec.SecretKeySpec
|
||||
|
||||
private val LOGGER = LoggerFactory.getLogger("Application")
|
||||
private val THREADS_TO_ALLOCATE = 262144 // 2**18 // Honestly, no reason to not just let 'er rip. Inactive connections will expire on their own :D
|
||||
private const val THREADS_TO_ALLOCATE = 262144 // 2**18 // Honestly, no reason to not just let 'er rip. Inactive connections will expire on their own :D
|
||||
|
||||
fun getServer(cache: DiskLruCache, serverSettings: ServerSettings, clientSettings: ClientSettings, statistics: AtomicReference<Statistics>): Http4kServer {
|
||||
val executor = Executors.newCachedThreadPool()
|
||||
|
@ -58,7 +57,6 @@ fun getServer(cache: DiskLruCache, serverSettings: ServerSettings, clientSetting
|
|||
.build())
|
||||
.setMaxConnTotal(THREADS_TO_ALLOCATE)
|
||||
.setMaxConnPerRoute(THREADS_TO_ALLOCATE)
|
||||
// Have it at the maximum open sockets a user can have in most modern OSes. No reason to limit this, just limit it at the Netty side.
|
||||
.build())
|
||||
|
||||
val app = { dataSaver: Boolean ->
|
||||
|
@ -82,8 +80,9 @@ fun getServer(cache: DiskLruCache, serverSettings: ServerSettings, clientSetting
|
|||
md5Bytes("$chapterHash.$fileName")
|
||||
}
|
||||
val cacheId = printHexString(rc4Bytes)
|
||||
|
||||
statistics.get().requestsServed.incrementAndGet()
|
||||
statistics.getAndUpdate {
|
||||
it.copy(requestsServed = it.requestsServed + 1)
|
||||
}
|
||||
|
||||
// Netty doesn't do Content-Length or Content-Type, so we have the pleasure of doing that ourselves
|
||||
fun respondWithImage(input: InputStream, length: String?, type: String, lastModified: String?): Response =
|
||||
|
@ -112,10 +111,12 @@ fun getServer(cache: DiskLruCache, serverSettings: ServerSettings, clientSetting
|
|||
|
||||
val snapshot = cache.get(cacheId)
|
||||
if (snapshot != null) {
|
||||
statistics.get().cacheHits.incrementAndGet()
|
||||
|
||||
// our files never change, so it's safe to use the browser cache
|
||||
if (request.header("If-Modified-Since") != null) {
|
||||
statistics.getAndUpdate {
|
||||
it.copy(browserCached = it.browserCached + 1)
|
||||
}
|
||||
|
||||
if (LOGGER.isInfoEnabled) {
|
||||
LOGGER.info("Request for $sanitizedUri cached by browser")
|
||||
}
|
||||
|
@ -126,6 +127,10 @@ fun getServer(cache: DiskLruCache, serverSettings: ServerSettings, clientSetting
|
|||
Response(Status.NOT_MODIFIED)
|
||||
.header("Last-Modified", lastModified)
|
||||
} else {
|
||||
statistics.getAndUpdate {
|
||||
it.copy(cacheHits = it.cacheHits + 1)
|
||||
}
|
||||
|
||||
if (LOGGER.isInfoEnabled) {
|
||||
LOGGER.info("Request for $sanitizedUri hit cache")
|
||||
}
|
||||
|
@ -136,7 +141,10 @@ fun getServer(cache: DiskLruCache, serverSettings: ServerSettings, clientSetting
|
|||
)
|
||||
}
|
||||
} else {
|
||||
statistics.get().cacheMisses.incrementAndGet()
|
||||
statistics.getAndUpdate {
|
||||
it.copy(cacheMisses = it.cacheMisses + 1)
|
||||
}
|
||||
|
||||
if (LOGGER.isInfoEnabled) {
|
||||
LOGGER.info("Request for $sanitizedUri missed cache")
|
||||
}
|
||||
|
@ -224,33 +232,6 @@ private fun getRc4(key: ByteArray): Cipher {
|
|||
return rc4
|
||||
}
|
||||
|
||||
private val HTTP_TIME_FORMATTER = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss O", Locale.ENGLISH)
|
||||
|
||||
private fun addCommonHeaders(): Filter {
|
||||
return Filter { next: HttpHandler ->
|
||||
{ request: Request ->
|
||||
val response = next(request)
|
||||
response.header("Date", HTTP_TIME_FORMATTER.format(ZonedDateTime.now(ZoneOffset.UTC)))
|
||||
.header("Server", "Mangadex@Home Node ${Constants.CLIENT_VERSION} (${Constants.CLIENT_BUILD})")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun catchAllHideDetails(): Filter {
|
||||
return Filter { next: HttpHandler ->
|
||||
{ request: Request ->
|
||||
try {
|
||||
next(request)
|
||||
} catch (e: Exception) {
|
||||
if (LOGGER.isWarnEnabled) {
|
||||
LOGGER.warn("Request error detected", e)
|
||||
}
|
||||
Response(Status.INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun md5Bytes(stringToHash: String): ByteArray {
|
||||
val digest = MessageDigest.getInstance("MD5")
|
||||
return digest.digest(stringToHash.toByteArray())
|
48
src/main/kotlin/mdnet/base/web/WebUi.kt
Normal file
48
src/main/kotlin/mdnet/base/web/WebUi.kt
Normal file
|
@ -0,0 +1,48 @@
|
|||
/* ktlint-disable no-wildcard-imports */
|
||||
package mdnet.base.web
|
||||
|
||||
import mdnet.base.Statistics
|
||||
import mdnet.base.settings.WebSettings
|
||||
import org.http4k.core.Body
|
||||
import org.http4k.core.Method
|
||||
import org.http4k.core.Response
|
||||
import org.http4k.core.Status
|
||||
import org.http4k.core.then
|
||||
import org.http4k.filter.ServerFilters
|
||||
import org.http4k.routing.ResourceLoader
|
||||
import org.http4k.routing.bind
|
||||
import org.http4k.routing.routes
|
||||
import org.http4k.routing.singlePageApp
|
||||
import org.http4k.server.Http4kServer
|
||||
import org.http4k.server.Netty
|
||||
import org.http4k.server.asServer
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
import org.http4k.format.Gson.auto
|
||||
import java.time.Instant
|
||||
|
||||
fun getUiServer(
|
||||
webSettings: WebSettings,
|
||||
statistics: AtomicReference<Statistics>,
|
||||
statsMap: Map<Instant, Statistics>
|
||||
): Http4kServer {
|
||||
val statisticsLens = Body.auto<Statistics>().toLens()
|
||||
val statsMapLens = Body.auto<Map<Instant, Statistics>>().toLens()
|
||||
|
||||
return catchAllHideDetails()
|
||||
.then(ServerFilters.CatchLensFailure)
|
||||
.then(addCommonHeaders())
|
||||
.then(
|
||||
routes(
|
||||
"/api/stats" bind Method.GET to {
|
||||
statisticsLens(statistics.get(), Response(Status.OK))
|
||||
},
|
||||
"/api/pastStats" bind Method.GET to {
|
||||
synchronized(statsMap) {
|
||||
statsMapLens(statsMap, Response(Status.OK))
|
||||
}
|
||||
},
|
||||
singlePageApp(ResourceLoader.Classpath("/webui"))
|
||||
)
|
||||
)
|
||||
.asServer(Netty(webSettings.uiPort))
|
||||
}
|
43
src/main/kotlin/mdnet/base/web/common.kt
Normal file
43
src/main/kotlin/mdnet/base/web/common.kt
Normal file
|
@ -0,0 +1,43 @@
|
|||
/* ktlint-disable no-wildcard-imports */
|
||||
package mdnet.base.web
|
||||
|
||||
import mdnet.base.Constants
|
||||
import org.http4k.core.Filter
|
||||
import org.http4k.core.HttpHandler
|
||||
import org.http4k.core.Request
|
||||
import org.http4k.core.Response
|
||||
import org.http4k.core.Status
|
||||
import org.slf4j.LoggerFactory
|
||||
import java.time.ZoneOffset
|
||||
import java.time.ZonedDateTime
|
||||
import java.time.format.DateTimeFormatter
|
||||
import java.util.*
|
||||
|
||||
private val HTTP_TIME_FORMATTER = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss O", Locale.ENGLISH)
|
||||
|
||||
private val LOGGER = LoggerFactory.getLogger("Application")
|
||||
|
||||
fun addCommonHeaders(): Filter {
|
||||
return Filter { next: HttpHandler ->
|
||||
{ request: Request ->
|
||||
val response = next(request)
|
||||
response.header("Date", HTTP_TIME_FORMATTER.format(ZonedDateTime.now(ZoneOffset.UTC)))
|
||||
.header("Server", "Mangadex@Home Node ${Constants.CLIENT_VERSION} (${Constants.CLIENT_BUILD})")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fun catchAllHideDetails(): Filter {
|
||||
return Filter { next: HttpHandler ->
|
||||
{ request: Request ->
|
||||
try {
|
||||
next(request)
|
||||
} catch (e: Exception) {
|
||||
if (LOGGER.isWarnEnabled) {
|
||||
LOGGER.warn("Request error detected", e)
|
||||
}
|
||||
Response(Status.INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,4 +1,5 @@
|
|||
<configuration>
|
||||
<shutdownHook/>
|
||||
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>${file-level:-TRACE}</level>
|
||||
|
|
|
@ -477,6 +477,7 @@ function updateWithMessage(m) {
|
|||
updateConsole(result.data, 2);
|
||||
break;
|
||||
case "stats":
|
||||
|
||||
updateValues();
|
||||
break;
|
||||
default:
|
||||
|
|
Loading…
Reference in a new issue