Add hourly cache of stats
This commit is contained in:
parent
505c917e56
commit
9f1f30eace
|
@ -4,17 +4,19 @@ import com.google.gson.Gson;
|
|||
import com.google.gson.GsonBuilder;
|
||||
import com.google.gson.reflect.TypeToken;
|
||||
import mdnet.base.settings.ClientSettings;
|
||||
import mdnet.base.settings.WebSettings;
|
||||
import mdnet.base.web.ApplicationKt;
|
||||
import mdnet.base.web.WebUiKt;
|
||||
import mdnet.cache.DiskLruCache;
|
||||
import mdnet.webui.WebConsole;
|
||||
import org.http4k.server.Http4kServer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.*;
|
||||
import java.time.Instant;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -30,6 +32,14 @@ public class MangaDexClient {
|
|||
private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
|
||||
private final ServerHandler serverHandler;
|
||||
private final ClientSettings clientSettings;
|
||||
|
||||
private final Map<Instant, Statistics> statsMap = Collections
|
||||
.synchronizedMap(new LinkedHashMap<Instant, Statistics>(80) {
|
||||
@Override
|
||||
protected boolean removeEldestEntry(Map.Entry eldest) {
|
||||
return this.size() > 80;
|
||||
}
|
||||
});
|
||||
private final AtomicReference<Statistics> statistics;
|
||||
|
||||
private ServerSettings serverSettings;
|
||||
|
@ -74,8 +84,10 @@ public class MangaDexClient {
|
|||
}
|
||||
}
|
||||
|
||||
statsMap.put(Instant.now(), statistics.get());
|
||||
|
||||
if (clientSettings.getWebSettings() != null) {
|
||||
webUi = WebUiKt.getUiServer(clientSettings.getWebSettings(), statistics);
|
||||
webUi = WebUiKt.getUiServer(clientSettings.getWebSettings(), statistics, statsMap);
|
||||
webUi.start();
|
||||
}
|
||||
|
||||
|
@ -99,6 +111,8 @@ public class MangaDexClient {
|
|||
counter++;
|
||||
}
|
||||
|
||||
statsMap.put(Instant.now(), statistics.get());
|
||||
|
||||
// if the server is offline then don't try and refresh certs
|
||||
if (engine == null) {
|
||||
return;
|
||||
|
@ -216,23 +230,6 @@ public class MangaDexClient {
|
|||
|
||||
validateSettings(settings);
|
||||
|
||||
if (settings.getWebSettings() != null) {
|
||||
WebSettings webSettings = settings.getWebSettings();
|
||||
|
||||
// TODO: system.out redirect
|
||||
new Thread(() -> {
|
||||
WebConsole webConsole = new WebConsole(webSettings.getUiWebsocketPort()) {
|
||||
@Override
|
||||
protected void parseMessage(String message) {
|
||||
System.out.println(message);
|
||||
// TODO: something happens here
|
||||
// the message should be formatted in json
|
||||
}
|
||||
};
|
||||
// TODO: webConsole.sendMessage(t,m) whenever system.out is written to
|
||||
}).start();
|
||||
}
|
||||
|
||||
if (LOGGER.isInfoEnabled()) {
|
||||
LOGGER.info("Client settings loaded: {}", settings);
|
||||
}
|
||||
|
|
27
src/main/java/mdnet/cache/DiskLruCache.java
vendored
27
src/main/java/mdnet/cache/DiskLruCache.java
vendored
|
@ -966,20 +966,7 @@ public final class DiskLruCache implements Closeable {
|
|||
Path oldCache = Paths.get(directory + File.separator + key + "." + i);
|
||||
Path newCache = Paths.get(directory + subKeyPath + File.separator + key + "." + i);
|
||||
|
||||
File newCacheDirectory = new File(directory + subKeyPath, key + "." + i + ".tmp");
|
||||
newCacheDirectory.getParentFile().mkdirs();
|
||||
|
||||
if (Files.exists(oldCache)) {
|
||||
try {
|
||||
Files.move(oldCache, newCache, StandardCopyOption.ATOMIC_MOVE);
|
||||
} catch (FileAlreadyExistsException faee) {
|
||||
try {
|
||||
Files.delete(oldCache);
|
||||
} catch (IOException ex) {
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
}
|
||||
}
|
||||
migrateCacheFile(i, oldCache, newCache);
|
||||
|
||||
return new File(directory + subKeyPath, key + "." + i);
|
||||
}
|
||||
|
@ -989,6 +976,12 @@ public final class DiskLruCache implements Closeable {
|
|||
Path oldCache = Paths.get(directory + File.separator + key + "." + i + ".tmp");
|
||||
Path newCache = Paths.get(directory + subKeyPath + File.separator + key + "." + i + ".tmp");
|
||||
|
||||
migrateCacheFile(i, oldCache, newCache);
|
||||
|
||||
return new File(directory + subKeyPath, key + "." + i + ".tmp");
|
||||
}
|
||||
|
||||
private void migrateCacheFile(int i, Path oldCache, Path newCache) {
|
||||
File newCacheDirectory = new File(directory + subKeyPath, key + "." + i + ".tmp");
|
||||
newCacheDirectory.getParentFile().mkdirs();
|
||||
|
||||
|
@ -998,13 +991,11 @@ public final class DiskLruCache implements Closeable {
|
|||
} catch (FileAlreadyExistsException faee) {
|
||||
try {
|
||||
Files.delete(oldCache);
|
||||
} catch (IOException ex) {
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
}
|
||||
|
||||
return new File(directory + subKeyPath, key + "." + i + ".tmp");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,105 +0,0 @@
|
|||
package mdnet.webui;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.ByteBuffer;
|
||||
import org.java_websocket.WebSocket;
|
||||
import org.java_websocket.handshake.ClientHandshake;
|
||||
import org.java_websocket.server.WebSocketServer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public abstract class WebConsole extends WebSocketServer {
|
||||
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(WebConsole.class);
|
||||
|
||||
public WebConsole(int port) {
|
||||
super(new InetSocketAddress(port));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onOpen(WebSocket conn, ClientHandshake handshake) {
|
||||
LOGGER.info("Webclient {} connected", conn);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(WebSocket conn, int code, String reason, boolean remote) {
|
||||
LOGGER.info("Webclient {} disconnected: {} ", conn, reason);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onMessage(WebSocket conn, String message) {
|
||||
parseMessage(message);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onMessage(WebSocket conn, ByteBuffer message) {
|
||||
// parseMessage(message.array().toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onError(WebSocket conn, Exception ex) {
|
||||
ex.printStackTrace();
|
||||
if (conn != null) {
|
||||
// some errors like port binding failed may not be assignable to a specific
|
||||
// websocket
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onStart() {
|
||||
LOGGER.info("Listening for connections on port: {}", this.getPort());
|
||||
setConnectionLostTimeout(0);
|
||||
setConnectionLostTimeout(100);
|
||||
}
|
||||
|
||||
protected abstract void parseMessage(String message);
|
||||
|
||||
// void parseCommand(String x) {
|
||||
// switch (x) {
|
||||
// case "help":
|
||||
// this.broadcast(formatMessage("command", "Available commands:"));
|
||||
// this.broadcast(formatMessage("command", "you"));
|
||||
// this.broadcast(formatMessage("command", "are"));
|
||||
// this.broadcast(formatMessage("command", "big"));
|
||||
// this.broadcast(formatMessage("command", "gay"));
|
||||
// break;
|
||||
// case "stop":
|
||||
// this.broadcast(formatMessage("command", "Mangadex Client has shut down,
|
||||
// shutting down web client now"));
|
||||
// return;
|
||||
// default:
|
||||
// this.broadcast(formatMessage("command", "That command was not recognized"));
|
||||
// this.broadcast(formatMessage("command", "Try help for a list of available
|
||||
// commands"));
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
|
||||
public void sendMessage(String type, Object message) {
|
||||
// JSONObject out = new JSONObject();
|
||||
// switch (type) {
|
||||
// case "command" :
|
||||
// out.put("type", "command");
|
||||
// out.put("data", message.toString());
|
||||
// break;
|
||||
// case "stats" :
|
||||
// out.put("type", "stats");
|
||||
// AtomicReference<Statistics> temp = (AtomicReference<Statistics>) message;
|
||||
// out.put("hits", temp.get().getCacheHits());
|
||||
// out.put("misses", temp.get().getCacheMisses());
|
||||
// out.put("bytes_sent", temp.get().getBytesSent());
|
||||
// out.put("req_served", temp.get().getRequestsServed());
|
||||
// out.put("dataval", "empty");
|
||||
// out.put("dataval", "empty");
|
||||
// out.put("dataval", "empty");
|
||||
// break;
|
||||
// case "auth" :
|
||||
// break;
|
||||
// default :
|
||||
// out.put("type", "command");
|
||||
// out.put("data", message.toString());
|
||||
// break;
|
||||
// }
|
||||
// broadcast(out.toString());
|
||||
}
|
||||
}
|
|
@ -18,9 +18,15 @@ import org.http4k.server.Netty
|
|||
import org.http4k.server.asServer
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
import org.http4k.format.Gson.auto
|
||||
import java.time.Instant
|
||||
|
||||
fun getUiServer(webSettings: WebSettings, statistics: AtomicReference<Statistics>): Http4kServer {
|
||||
fun getUiServer(
|
||||
webSettings: WebSettings,
|
||||
statistics: AtomicReference<Statistics>,
|
||||
statsMap: Map<Instant, Statistics>
|
||||
): Http4kServer {
|
||||
val statisticsLens = Body.auto<Statistics>().toLens()
|
||||
val statsMapLens = Body.auto<Map<Instant, Statistics>>().toLens()
|
||||
|
||||
return catchAllHideDetails()
|
||||
.then(ServerFilters.CatchLensFailure)
|
||||
|
@ -30,6 +36,11 @@ fun getUiServer(webSettings: WebSettings, statistics: AtomicReference<Statistics
|
|||
"/api/stats" bind Method.GET to {
|
||||
statisticsLens(statistics.get(), Response(Status.OK))
|
||||
},
|
||||
"/api/pastStats" bind Method.GET to {
|
||||
synchronized(statsMap) {
|
||||
statsMapLens(statsMap, Response(Status.OK))
|
||||
}
|
||||
},
|
||||
singlePageApp(ResourceLoader.Classpath("/webui"))
|
||||
)
|
||||
)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
<configuration>
|
||||
<shutdownHook/>
|
||||
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>log/latest.log</file>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||
|
|
Loading…
Reference in a new issue