" + '}';
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o)
+ return true;
+ if (o == null || getClass() != o.getClass())
+ return false;
+
+ ServerSettings that = (ServerSettings) o;
+
+ if (!imageServer.equals(that.imageServer))
+ return false;
+ return Objects.equals(tls, that.tls);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = imageServer.hashCode();
+ result = 31 * result + (tls != null ? tls.hashCode() : 0);
+ return result;
+ }
+
+ public static final class TlsCert {
+ @SerializedName("created_at")
+ private final String createdAt;
+ @SerializedName("private_key")
+ private final String privateKey;
+ private final String certificate;
+
+ public TlsCert(String createdAt, String privateKey, String certificate) {
+ this.createdAt = Objects.requireNonNull(createdAt);
+ this.privateKey = Objects.requireNonNull(privateKey);
+ this.certificate = Objects.requireNonNull(certificate);
+ }
+
+ public String getCreatedAt() {
+ return createdAt;
+ }
+
+ public String getPrivateKey() {
+ return privateKey;
+ }
+
+ public String getCertificate() {
+ return certificate;
+ }
+
+ @Override
+ public String toString() {
+ return "TlsCert{" + "createdAt='" + createdAt + '\'' + ", privateKey='" + privateKey + '\''
+ + ", certificate='" + certificate + '\'' + '}';
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o)
+ return true;
+ if (o == null || getClass() != o.getClass())
+ return false;
+
+ TlsCert tlsCert = (TlsCert) o;
+
+ if (!createdAt.equals(tlsCert.createdAt))
+ return false;
+ if (!privateKey.equals(tlsCert.privateKey))
+ return false;
+ return certificate.equals(tlsCert.certificate);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = createdAt.hashCode();
+ result = 31 * result + privateKey.hashCode();
+ result = 31 * result + certificate.hashCode();
+ return result;
+ }
+ }
+}
diff --git a/src/main/java/mdnet/base/Statistics.java b/src/main/java/mdnet/base/Statistics.java
new file mode 100644
index 0000000..29651d3
--- /dev/null
+++ b/src/main/java/mdnet/base/Statistics.java
@@ -0,0 +1,40 @@
+package mdnet.base;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class Statistics {
+ private final AtomicInteger requestsServed;
+ private final AtomicInteger cacheHits;
+ private final AtomicInteger cacheMisses;
+ private final AtomicLong bytesSent;
+
+ public Statistics() {
+ requestsServed = new AtomicInteger();
+ cacheHits = new AtomicInteger();
+ cacheMisses = new AtomicInteger();
+ bytesSent = new AtomicLong();
+ }
+
+ public AtomicInteger getRequestsServed() {
+ return requestsServed;
+ }
+
+ public AtomicInteger getCacheHits() {
+ return cacheHits;
+ }
+
+ public AtomicInteger getCacheMisses() {
+ return cacheMisses;
+ }
+
+ public AtomicLong getBytesSent() {
+ return bytesSent;
+ }
+
+ @Override
+ public String toString() {
+ return "Statistics{" + "requestsServed=" + requestsServed + ", cacheHits=" + cacheHits + ", cacheMisses="
+ + cacheMisses + ", bytesSent=" + bytesSent + '}';
+ }
+}
diff --git a/src/main/java/mdnet/cache/DiskLruCache.java b/src/main/java/mdnet/cache/DiskLruCache.java
new file mode 100644
index 0000000..f983a86
--- /dev/null
+++ b/src/main/java/mdnet/cache/DiskLruCache.java
@@ -0,0 +1,949 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package mdnet.cache;
+
+import org.apache.commons.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedWriter;
+import java.io.Closeable;
+import java.io.EOFException;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * A cache that uses a bounded amount of space on a filesystem. Each cache entry
+ * has a string key and a fixed number of values. Each key must match the regex
+ * [a-z0-9_-]{1,120}. Values are byte sequences, accessible as
+ * streams or files. Each value must be between {@code 0} and
+ * {@code Integer.MAX_VALUE} bytes in length.
+ *
+ *
+ * The cache stores its data in a directory on the filesystem. This directory
+ * must be exclusive to the cache; the cache may delete or overwrite files from
+ * its directory. It is an error for multiple processes to use the same cache
+ * directory at the same time.
+ *
+ *
+ * This cache limits the number of bytes that it will store on the filesystem.
+ * When the number of stored bytes exceeds the limit, the cache will remove
+ * entries in the background until the limit is satisfied. The limit is not
+ * strict: the cache may temporarily exceed it while waiting for files to be
+ * deleted. The limit does not include filesystem overhead or the cache journal
+ * so space-sensitive applications should set a conservative limit.
+ *
+ *
+ * Clients call {@link #edit} to create or update the values of an entry. An
+ * entry may have only one editor at one time; if a value is not available to be
+ * edited then {@link #edit} will return null.
+ *
+ * - When an entry is being created it is necessary to supply
+ * a full set of values; the empty value should be used as a placeholder if
+ * necessary.
+ *
- When an entry is being edited, it is not necessary to
+ * supply data for every value; values default to their previous value.
+ *
+ * Every {@link #edit} call must be matched by a call to {@link Editor#commit}
+ * or {@link Editor#abort}. Committing is atomic: a read observes the full set
+ * of values as they were before or after the commit, but never a mix of values.
+ *
+ *
+ * Clients call {@link #get} to read a snapshot of an entry. The read will
+ * observe the value at the time that {@link #get} was called. Updates and
+ * removals after the call do not impact ongoing reads.
+ *
+ *
+ * This class is tolerant of some I/O errors. If files are missing from the
+ * filesystem, the corresponding entries will be dropped from the cache. If an
+ * error occurs while writing a cache value, the edit will fail silently.
+ * Callers should handle other problems by catching {@code IOException} and
+ * responding appropriately.
+ */
+public final class DiskLruCache implements Closeable {
+ private final static Logger LOGGER = LoggerFactory.getLogger(DiskLruCache.class);
+
+ private static final String JOURNAL_FILE = "journal";
+ private static final String JOURNAL_FILE_TEMP = "journal.tmp";
+ private static final String JOURNAL_FILE_BACKUP = "journal.bkp";
+
+ private static final String MAGIC = "libcore.io.DiskLruCache";
+ private static final String VERSION_1 = "1";
+ private static final long ANY_SEQUENCE_NUMBER = -1;
+ private static final String STRING_KEY_PATTERN = "[a-z0-9_-]{1,120}";
+ public static final Pattern LEGAL_KEY_PATTERN = Pattern.compile(STRING_KEY_PATTERN);
+ private static final String CLEAN = "CLEAN";
+ private static final String DIRTY = "DIRTY";
+ private static final String REMOVE = "REMOVE";
+ private static final String READ = "READ";
+
+ /*
+ * This cache uses a journal file named "journal". A typical journal file looks
+ * like this: libcore.io.DiskLruCache 1 100 2
+ *
+ * CLEAN 3400330d1dfc7f3f7f4b8d4d803dfcf6 832 21054 DIRTY
+ * 335c4c6028171cfddfbaae1a9c313c52 CLEAN 335c4c6028171cfddfbaae1a9c313c52 3934
+ * 2342 REMOVE 335c4c6028171cfddfbaae1a9c313c52 DIRTY
+ * 1ab96a171faeeee38496d8b330771a7a CLEAN 1ab96a171faeeee38496d8b330771a7a 1600
+ * 234 READ 335c4c6028171cfddfbaae1a9c313c52 READ
+ * 3400330d1dfc7f3f7f4b8d4d803dfcf6
+ *
+ * The first five lines of the journal form its header. They are the constant
+ * string "libcore.io.DiskLruCache", the disk cache's version, the application's
+ * version, the value count, and a blank line.
+ *
+ * Each of the subsequent lines in the file is a record of the state of a cache
+ * entry. Each line contains space-separated values: a state, a key, and
+ * optional state-specific values. o DIRTY lines track that an entry is actively
+ * being created or updated. Every successful DIRTY action should be followed by
+ * a CLEAN or REMOVE action. DIRTY lines without a matching CLEAN or REMOVE
+ * indicate that temporary files may need to be deleted. o CLEAN lines track a
+ * cache entry that has been successfully published and may be read. A publish
+ * line is followed by the lengths of each of its values. o READ lines track
+ * accesses for LRU. o REMOVE lines track entries that have been deleted.
+ *
+ * The journal file is appended to as cache operations occur. The journal may
+ * occasionally be compacted by dropping redundant lines. A temporary file named
+ * "journal.tmp" will be used during compaction; that file should be deleted if
+ * it exists when the cache is opened.
+ */
+
+ private final File directory;
+ private final File journalFile;
+ private final File journalFileTmp;
+ private final File journalFileBackup;
+ private final int appVersion;
+ private long maxSize;
+ private final int valueCount;
+ private long size = 0;
+ private Writer journalWriter;
+ private final LinkedHashMap lruEntries = new LinkedHashMap<>(0, 0.75f, true);
+ private int redundantOpCount;
+
+ /**
+ * To differentiate between old and current snapshots, each entry is given a
+ * sequence number each time an edit is committed. A snapshot is stale if its
+ * sequence number is not equal to its entry's sequence number.
+ */
+ private long nextSequenceNumber = 0;
+
+ /** This cache uses a single background thread to evict entries. */
+ final ThreadPoolExecutor executorService = new ThreadPoolExecutor(0, 1, 60L, TimeUnit.SECONDS,
+ new LinkedBlockingQueue<>());
+ private final Callable cleanupCallable = new Callable() {
+ public Void call() throws Exception {
+ synchronized (DiskLruCache.this) {
+ if (journalWriter == null) {
+ return null; // Closed.
+ }
+ trimToSize();
+ if (journalRebuildRequired()) {
+ rebuildJournal();
+ redundantOpCount = 0;
+ }
+ }
+ return null;
+ }
+ };
+
+ private DiskLruCache(File directory, int appVersion, int valueCount, long maxSize) {
+ this.directory = directory;
+ this.appVersion = appVersion;
+ this.journalFile = new File(directory, JOURNAL_FILE);
+ this.journalFileTmp = new File(directory, JOURNAL_FILE_TEMP);
+ this.journalFileBackup = new File(directory, JOURNAL_FILE_BACKUP);
+ this.valueCount = valueCount;
+ this.maxSize = maxSize;
+ }
+
+ /**
+ * Opens the cache in {@code directory}, creating a cache if none exists there.
+ *
+ * @param directory
+ * a writable directory
+ * @param valueCount
+ * the number of values per cache entry. Must be positive.
+ * @param maxSize
+ * the maximum number of bytes this cache should use to store
+ * @throws IOException
+ * if reading or writing the cache directory fails
+ */
+ public static DiskLruCache open(File directory, int appVersion, int valueCount, long maxSize) throws IOException {
+ if (maxSize <= 0) {
+ throw new IllegalArgumentException("maxSize <= 0");
+ }
+ if (valueCount <= 0) {
+ throw new IllegalArgumentException("valueCount <= 0");
+ }
+
+ // If a bkp file exists, use it instead.
+ File backupFile = new File(directory, JOURNAL_FILE_BACKUP);
+ if (backupFile.exists()) {
+ File journalFile = new File(directory, JOURNAL_FILE);
+ // If journal file also exists just delete backup file.
+ if (journalFile.exists()) {
+ backupFile.delete();
+ } else {
+ renameTo(backupFile, journalFile, false);
+ }
+ }
+
+ // Prefer to pick up where we left off.
+ DiskLruCache cache = new DiskLruCache(directory, appVersion, valueCount, maxSize);
+ if (cache.journalFile.exists()) {
+ try {
+ cache.readJournal();
+ cache.processJournal();
+ return cache;
+ } catch (IOException journalIsCorrupt) {
+ if (LOGGER.isWarnEnabled()) {
+ LOGGER.warn("DiskLruCache " + directory + " is corrupt - removing", journalIsCorrupt);
+ }
+ cache.delete();
+ }
+ }
+
+ // Create a new empty cache.
+ directory.mkdirs();
+ cache = new DiskLruCache(directory, appVersion, valueCount, maxSize);
+ cache.rebuildJournal();
+ return cache;
+ }
+
+ private void readJournal() throws IOException {
+ StrictLineReader reader = new StrictLineReader(new FileInputStream(journalFile), StandardCharsets.UTF_8);
+ try {
+ String magic = reader.readLine();
+ String version = reader.readLine();
+ String appVersionString = reader.readLine();
+ String valueCountString = reader.readLine();
+ String blank = reader.readLine();
+ if (!MAGIC.equals(magic) || !VERSION_1.equals(version)
+ || !Integer.toString(appVersion).equals(appVersionString)
+ || !Integer.toString(valueCount).equals(valueCountString) || !"".equals(blank)) {
+ throw new IOException("unexpected journal header: [" + magic + ", " + version + ", " + valueCountString
+ + ", " + blank + "]");
+ }
+
+ int lineCount = 0;
+ while (true) {
+ try {
+ readJournalLine(reader.readLine());
+ lineCount++;
+ } catch (EOFException endOfJournal) {
+ break;
+ }
+ }
+ redundantOpCount = lineCount - lruEntries.size();
+
+ // If we ended on a truncated line, rebuild the journal before appending to it.
+ if (reader.hasUnterminatedLine()) {
+ rebuildJournal();
+ } else {
+ journalWriter = new BufferedWriter(
+ new OutputStreamWriter(new FileOutputStream(journalFile, true), StandardCharsets.UTF_8));
+ }
+ } finally {
+ Util.closeQuietly(reader);
+ }
+ }
+
+ private void readJournalLine(String line) throws IOException {
+ int firstSpace = line.indexOf(' ');
+ if (firstSpace == -1) {
+ throw new IOException("unexpected journal line: " + line);
+ }
+
+ int keyBegin = firstSpace + 1;
+ int secondSpace = line.indexOf(' ', keyBegin);
+ final String key;
+ if (secondSpace == -1) {
+ key = line.substring(keyBegin);
+ if (firstSpace == REMOVE.length() && line.startsWith(REMOVE)) {
+ lruEntries.remove(key);
+ return;
+ }
+ } else {
+ key = line.substring(keyBegin, secondSpace);
+ }
+
+ Entry entry = lruEntries.get(key);
+ if (entry == null) {
+ entry = new Entry(key);
+ lruEntries.put(key, entry);
+ }
+
+ if (secondSpace != -1 && firstSpace == CLEAN.length() && line.startsWith(CLEAN)) {
+ String[] parts = line.substring(secondSpace + 1).split(" ");
+ entry.readable = true;
+ entry.currentEditor = null;
+ entry.setLengths(parts);
+ } else if (secondSpace == -1 && firstSpace == DIRTY.length() && line.startsWith(DIRTY)) {
+ entry.currentEditor = new Editor(entry);
+ } else if (secondSpace == -1 && firstSpace == READ.length() && line.startsWith(READ)) {
+ // This work was already done by calling lruEntries.get().
+ } else {
+ throw new IOException("unexpected journal line: " + line);
+ }
+ }
+
+ /**
+ * Computes the initial size and collects garbage as a part of opening the
+ * cache. Dirty entries are assumed to be inconsistent and will be deleted.
+ */
+ private void processJournal() throws IOException {
+ deleteIfExists(journalFileTmp);
+ for (Iterator i = lruEntries.values().iterator(); i.hasNext();) {
+ Entry entry = i.next();
+ if (entry.currentEditor == null) {
+ for (int t = 0; t < valueCount; t++) {
+ size += entry.lengths[t];
+ }
+ } else {
+ entry.currentEditor = null;
+ for (int t = 0; t < valueCount; t++) {
+ deleteIfExists(entry.getCleanFile(t));
+ deleteIfExists(entry.getDirtyFile(t));
+ }
+ i.remove();
+ }
+ }
+ }
+
+ /**
+ * Creates a new journal that omits redundant information. This replaces the
+ * current journal if it exists.
+ */
+ private synchronized void rebuildJournal() throws IOException {
+ if (journalWriter != null) {
+ journalWriter.close();
+ }
+
+ Writer writer = new BufferedWriter(
+ new OutputStreamWriter(new FileOutputStream(journalFileTmp), StandardCharsets.UTF_8));
+ try {
+ writer.write(MAGIC);
+ writer.write("\n");
+ writer.write(VERSION_1);
+ writer.write("\n");
+ writer.write(Integer.toString(appVersion));
+ writer.write("\n");
+ writer.write(Integer.toString(valueCount));
+ writer.write("\n");
+ writer.write("\n");
+
+ for (Entry entry : lruEntries.values()) {
+ if (entry.currentEditor != null) {
+ writer.write(DIRTY + ' ' + entry.key + '\n');
+ } else {
+ writer.write(CLEAN + ' ' + entry.key + entry.getLengths() + '\n');
+ }
+ }
+ } finally {
+ Util.closeQuietly(writer);
+ }
+
+ if (journalFile.exists()) {
+ renameTo(journalFile, journalFileBackup, true);
+ }
+ renameTo(journalFileTmp, journalFile, false);
+ journalFileBackup.delete();
+
+ journalWriter = new BufferedWriter(
+ new OutputStreamWriter(new FileOutputStream(journalFile, true), StandardCharsets.UTF_8));
+ }
+
+ private static void deleteIfExists(File file) throws IOException {
+ if (file.exists() && !file.delete()) {
+ throw new IOException();
+ }
+ }
+
+ private static void renameTo(File from, File to, boolean deleteDestination) throws IOException {
+ if (deleteDestination) {
+ deleteIfExists(to);
+ }
+ if (!from.renameTo(to)) {
+ throw new IOException();
+ }
+ }
+
+ /**
+ * Returns a snapshot of the entry named {@code key}, or null if it doesn't
+ * exist is not currently readable. If a value is returned, it is moved to the
+ * head of the LRU queue.
+ */
+ public synchronized Snapshot get(String key) throws IOException {
+ checkNotClosed();
+ validateKey(key);
+ Entry entry = lruEntries.get(key);
+ if (entry == null) {
+ return null;
+ }
+
+ if (!entry.readable) {
+ return null;
+ }
+
+ // Open all streams eagerly to guarantee that we see a single published
+ // snapshot. If we opened streams lazily then the streams could come
+ // from different edits.
+ InputStream[] ins = new InputStream[valueCount];
+ try {
+ for (int i = 0; i < valueCount; i++) {
+ ins[i] = new FileInputStream(entry.getCleanFile(i));
+ }
+ } catch (FileNotFoundException e) {
+ // A file must have been deleted manually!
+ for (int i = 0; i < valueCount; i++) {
+ if (ins[i] != null) {
+ Util.closeQuietly(ins[i]);
+ } else {
+ break;
+ }
+ }
+ return null;
+ }
+
+ redundantOpCount++;
+ journalWriter.append(READ + ' ').append(key).append(String.valueOf('\n'));
+ if (journalRebuildRequired()) {
+ executorService.submit(cleanupCallable);
+ }
+
+ return new Snapshot(key, entry.sequenceNumber, ins, entry.lengths);
+ }
+
+ /**
+ * Returns an editor for the entry named {@code key}, or null if another edit is
+ * in progress.
+ */
+ public Editor edit(String key) throws IOException {
+ return edit(key, ANY_SEQUENCE_NUMBER);
+ }
+
+ private synchronized Editor edit(String key, long expectedSequenceNumber) throws IOException {
+ checkNotClosed();
+ validateKey(key);
+ Entry entry = lruEntries.get(key);
+ if (expectedSequenceNumber != ANY_SEQUENCE_NUMBER
+ && (entry == null || entry.sequenceNumber != expectedSequenceNumber)) {
+ return null; // Snapshot is stale.
+ }
+ if (entry == null) {
+ entry = new Entry(key);
+ lruEntries.put(key, entry);
+ } else if (entry.currentEditor != null) {
+ return null; // Another edit is in progress.
+ }
+
+ Editor editor = new Editor(entry);
+ entry.currentEditor = editor;
+
+ // Flush the journal before creating files to prevent file leaks.
+ journalWriter.write(DIRTY + ' ' + key + '\n');
+ journalWriter.flush();
+ return editor;
+ }
+
+ /** Returns the directory where this cache stores its data. */
+ public File getDirectory() {
+ return directory;
+ }
+
+ /**
+ * Returns the maximum number of bytes that this cache should use to store its
+ * data.
+ */
+ public synchronized long getMaxSize() {
+ return maxSize;
+ }
+
+ /**
+ * Changes the maximum number of bytes the cache can store and queues a job to
+ * trim the existing store, if necessary.
+ */
+ public synchronized void setMaxSize(long maxSize) {
+ this.maxSize = maxSize;
+ executorService.submit(cleanupCallable);
+ }
+
+ /**
+ * Returns the number of bytes currently being used to store the values in this
+ * cache. This may be greater than the max size if a background deletion is
+ * pending.
+ */
+ public synchronized long size() {
+ return size;
+ }
+
+ private synchronized void completeEdit(Editor editor, boolean success) throws IOException {
+ Entry entry = editor.entry;
+ if (entry.currentEditor != editor) {
+ throw new IllegalStateException();
+ }
+
+ // If this edit is creating the entry for the first time, every index must have
+ // a value.
+ if (success && !entry.readable) {
+ for (int i = 0; i < valueCount; i++) {
+ if (!editor.written[i]) {
+ editor.abort();
+ throw new IllegalStateException("Newly created entry didn't create value for index " + i);
+ }
+ if (!entry.getDirtyFile(i).exists()) {
+ editor.abort();
+ return;
+ }
+ }
+ }
+
+ for (int i = 0; i < valueCount; i++) {
+ File dirty = entry.getDirtyFile(i);
+ if (success) {
+ if (dirty.exists()) {
+ File clean = entry.getCleanFile(i);
+ dirty.renameTo(clean);
+ long oldLength = entry.lengths[i];
+ long newLength = clean.length();
+ entry.lengths[i] = newLength;
+ size = size - oldLength + newLength;
+ }
+ } else {
+ deleteIfExists(dirty);
+ }
+ }
+
+ redundantOpCount++;
+ entry.currentEditor = null;
+ if (entry.readable | success) {
+ entry.readable = true;
+ journalWriter.write(CLEAN + ' ' + entry.key + entry.getLengths() + '\n');
+ if (success) {
+ entry.sequenceNumber = nextSequenceNumber++;
+ }
+ } else {
+ lruEntries.remove(entry.key);
+ journalWriter.write(REMOVE + ' ' + entry.key + '\n');
+ }
+ journalWriter.flush();
+
+ if (size > maxSize || journalRebuildRequired()) {
+ executorService.submit(cleanupCallable);
+ }
+ }
+
+ /**
+ * We only rebuild the journal when it will halve the size of the journal and
+ * eliminate at least 2000 ops.
+ */
+ private boolean journalRebuildRequired() {
+ final int redundantOpCompactThreshold = 2000;
+ return redundantOpCount >= redundantOpCompactThreshold //
+ && redundantOpCount >= lruEntries.size();
+ }
+
+ /**
+ * Drops the entry for {@code key} if it exists and can be removed. Entries
+ * actively being edited cannot be removed.
+ *
+ * @return true if an entry was removed.
+ */
+ public synchronized boolean remove(String key) throws IOException {
+ checkNotClosed();
+ validateKey(key);
+ Entry entry = lruEntries.get(key);
+ if (entry == null || entry.currentEditor != null) {
+ return false;
+ }
+
+ for (int i = 0; i < valueCount; i++) {
+ File file = entry.getCleanFile(i);
+ if (file.exists() && !file.delete()) {
+ throw new IOException("failed to delete " + file);
+ }
+ size -= entry.lengths[i];
+ entry.lengths[i] = 0;
+ }
+
+ redundantOpCount++;
+ journalWriter.append(REMOVE + ' ' + key + '\n');
+ lruEntries.remove(key);
+
+ if (journalRebuildRequired()) {
+ executorService.submit(cleanupCallable);
+ }
+
+ return true;
+ }
+
+ /** Returns true if this cache has been closed. */
+ public synchronized boolean isClosed() {
+ return journalWriter == null;
+ }
+
+ private void checkNotClosed() {
+ if (journalWriter == null) {
+ throw new IllegalStateException("cache is closed");
+ }
+ }
+
+ /** Force buffered operations to the filesystem. */
+ public synchronized void flush() throws IOException {
+ checkNotClosed();
+ trimToSize();
+ journalWriter.flush();
+ }
+
+ /** Closes this cache. Stored values will remain on the filesystem. */
+ public synchronized void close() throws IOException {
+ if (journalWriter == null) {
+ return; // Already closed.
+ }
+ for (Entry entry : new ArrayList<>(lruEntries.values())) {
+ if (entry.currentEditor != null) {
+ entry.currentEditor.abort();
+ }
+ }
+ trimToSize();
+ journalWriter.close();
+ journalWriter = null;
+ }
+
+ private void trimToSize() throws IOException {
+ while (size > maxSize) {
+ Map.Entry toEvict = lruEntries.entrySet().iterator().next();
+ remove(toEvict.getKey());
+ }
+ }
+
+ /**
+ * Closes the cache and deletes all of its stored values. This will delete all
+ * files in the cache directory including files that weren't created by the
+ * cache.
+ */
+ public void delete() throws IOException {
+ close();
+ Util.deleteContents(directory);
+ }
+
+ private void validateKey(String key) {
+ Matcher matcher = LEGAL_KEY_PATTERN.matcher(key);
+ if (!matcher.matches()) {
+ throw new IllegalArgumentException("keys must match regex " + STRING_KEY_PATTERN + ": \"" + key + "\"");
+ }
+ }
+
+ /** A snapshot of the values for an entry. */
+ public final class Snapshot implements Closeable {
+ private final String key;
+ private final long sequenceNumber;
+ private final InputStream[] ins;
+ private final long[] lengths;
+
+ private Snapshot(String key, long sequenceNumber, InputStream[] ins, long[] lengths) {
+ this.key = key;
+ this.sequenceNumber = sequenceNumber;
+ this.ins = ins;
+ this.lengths = lengths;
+ }
+
+ /**
+ * Returns an editor for this snapshot's entry, or null if either the entry has
+ * changed since this snapshot was created or if another edit is in progress.
+ */
+ public Editor edit() throws IOException {
+ return DiskLruCache.this.edit(key, sequenceNumber);
+ }
+
+ /** Returns the unbuffered stream with the value for {@code index}. */
+ public InputStream getInputStream(int index) {
+ return ins[index];
+ }
+
+ /** Returns the string value for {@code index}. */
+ public String getString(int index) throws IOException {
+ try (InputStream in = getInputStream(index)) {
+ return IOUtils.toString(in, StandardCharsets.UTF_8);
+ }
+ }
+
+ /** Returns the byte length of the value for {@code index}. */
+ public long getLength(int index) {
+ return lengths[index];
+ }
+
+ public void close() {
+ for (InputStream in : ins) {
+ Util.closeQuietly(in);
+ }
+ }
+ }
+
+ private static final OutputStream NULL_OUTPUT_STREAM = new OutputStream() {
+ @Override
+ public void write(int b) {
+ // Eat all writes silently. Nom nom.
+ }
+ };
+
+ /** Edits the values for an entry. */
+ public final class Editor {
+ private final Entry entry;
+ private final boolean[] written;
+ private boolean hasErrors;
+ private boolean committed;
+
+ private Editor(Entry entry) {
+ this.entry = entry;
+ this.written = (entry.readable) ? null : new boolean[valueCount];
+ }
+
+ /**
+ * Returns an unbuffered input stream to read the last committed value, or null
+ * if no value has been committed.
+ */
+ public InputStream newInputStream(int index) throws IOException {
+ synchronized (DiskLruCache.this) {
+ if (entry.currentEditor != this) {
+ throw new IllegalStateException();
+ }
+ if (!entry.readable) {
+ return null;
+ }
+ try {
+ return new FileInputStream(entry.getCleanFile(index));
+ } catch (FileNotFoundException e) {
+ return null;
+ }
+ }
+ }
+
+ /**
+ * Returns the last committed value as a string, or null if no value has been
+ * committed.
+ */
+ public String getString(int index) throws IOException {
+ InputStream in = newInputStream(index);
+ try {
+ return in != null ? IOUtils.toString(in, StandardCharsets.UTF_8) : null;
+ } finally {
+ Util.closeQuietly(in);
+ }
+ }
+
+ /**
+ * Write a string to the specified index.
+ */
+ public void setString(int index, String value) throws IOException {
+ OutputStream out = newOutputStream(index);
+ try {
+ IOUtils.write(value, out, StandardCharsets.UTF_8);
+ } finally {
+ Util.closeQuietly(out);
+ }
+ }
+
+ /**
+ * Returns a new unbuffered output stream to write the value at {@code index}.
+ * If the underlying output stream encounters errors when writing to the
+ * filesystem, this edit will be aborted when {@link #commit} is called. The
+ * returned output stream does not throw IOExceptions.
+ */
+ public OutputStream newOutputStream(int index) {
+ if (index < 0 || index >= valueCount) {
+ throw new IllegalArgumentException("Expected index " + index + " to "
+ + "be greater than 0 and less than the maximum value count " + "of " + valueCount);
+ }
+ synchronized (DiskLruCache.this) {
+ if (entry.currentEditor != this) {
+ throw new IllegalStateException();
+ }
+ if (!entry.readable) {
+ written[index] = true;
+ }
+ File dirtyFile = entry.getDirtyFile(index);
+ FileOutputStream outputStream;
+ try {
+ outputStream = new FileOutputStream(dirtyFile);
+ } catch (FileNotFoundException e) {
+ // Attempt to recreate the cache directory.
+ directory.mkdirs();
+ try {
+ outputStream = new FileOutputStream(dirtyFile);
+ } catch (FileNotFoundException e2) {
+ // We are unable to recover. Silently eat the writes.
+ return NULL_OUTPUT_STREAM;
+ }
+ }
+ return new FaultHidingOutputStream(outputStream);
+ }
+ }
+
+ /**
+ * Commits this edit so it is visible to readers. This releases the edit lock so
+ * another edit may be started on the same key.
+ */
+ public void commit() throws IOException {
+ if (hasErrors) {
+ completeEdit(this, false);
+ remove(entry.key); // The previous entry is stale.
+ } else {
+ completeEdit(this, true);
+ }
+ committed = true;
+ }
+
+ /**
+ * Aborts this edit. This releases the edit lock so another edit may be started
+ * on the same key.
+ */
+ public void abort() throws IOException {
+ completeEdit(this, false);
+ }
+
+ public long getLength(int index) {
+ return entry.getDirtyFile(index).length();
+ }
+
+ public void abortUnlessCommitted() {
+ if (!committed) {
+ try {
+ abort();
+ } catch (IOException ignored) {
+ }
+ }
+ }
+
+ private class FaultHidingOutputStream extends FilterOutputStream {
+ private FaultHidingOutputStream(OutputStream out) {
+ super(out);
+ }
+
+ @Override
+ public void write(int oneByte) {
+ try {
+ out.write(oneByte);
+ } catch (IOException e) {
+ hasErrors = true;
+ }
+ }
+
+ @Override
+ public void write(byte[] buffer, int offset, int length) {
+ try {
+ out.write(buffer, offset, length);
+ } catch (IOException e) {
+ hasErrors = true;
+ }
+ }
+
+ @Override
+ public void close() {
+ try {
+ out.close();
+ } catch (IOException e) {
+ hasErrors = true;
+ }
+ }
+
+ @Override
+ public void flush() {
+ try {
+ out.flush();
+ } catch (IOException e) {
+ hasErrors = true;
+ }
+ }
+ }
+ }
+
+ private final class Entry {
+ private final String key;
+
+ /** Lengths of this entry's files. */
+ private final long[] lengths;
+
+ /** True if this entry has ever been published. */
+ private boolean readable;
+
+ /** The ongoing edit or null if this entry is not being edited. */
+ private Editor currentEditor;
+
+ /** The sequence number of the most recently committed edit to this entry. */
+ private long sequenceNumber;
+
+ private Entry(String key) {
+ this.key = key;
+ this.lengths = new long[valueCount];
+ }
+
+ public String getLengths() {
+ StringBuilder result = new StringBuilder();
+ for (long size : lengths) {
+ result.append(' ').append(size);
+ }
+ return result.toString();
+ }
+
+ /** Set lengths using decimal numbers like "10123". */
+ private void setLengths(String[] strings) throws IOException {
+ if (strings.length != valueCount) {
+ throw invalidLengths(strings);
+ }
+
+ try {
+ for (int i = 0; i < strings.length; i++) {
+ lengths[i] = Long.parseLong(strings[i]);
+ }
+ } catch (NumberFormatException e) {
+ throw invalidLengths(strings);
+ }
+ }
+
+ private IOException invalidLengths(String[] strings) throws IOException {
+ throw new IOException("unexpected journal line: " + java.util.Arrays.toString(strings));
+ }
+
+ public File getCleanFile(int i) {
+ return new File(directory, key + "." + i);
+ }
+
+ public File getDirtyFile(int i) {
+ return new File(directory, key + "." + i + ".tmp");
+ }
+ }
+}
diff --git a/src/main/java/mdnet/cache/StrictLineReader.java b/src/main/java/mdnet/cache/StrictLineReader.java
new file mode 100644
index 0000000..8677a9c
--- /dev/null
+++ b/src/main/java/mdnet/cache/StrictLineReader.java
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package mdnet.cache;
+
+import java.io.ByteArrayOutputStream;
+import java.io.Closeable;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Buffers input from an {@link InputStream} for reading lines.
+ *
+ *
+ * This class is used for buffered reading of lines. For purposes of this class,
+ * a line ends with "\n" or "\r\n". End of input is reported by throwing
+ * {@code EOFException}. Unterminated line at end of input is invalid and will
+ * be ignored, the caller may use {@code
+ * hasUnterminatedLine()} to detect it after catching the {@code EOFException}.
+ *
+ *
+ * This class is intended for reading input that strictly consists of lines,
+ * such as line-based cache entries or cache journal. Unlike the
+ * {@link java.io.BufferedReader} which in conjunction with
+ * {@link java.io.InputStreamReader} provides similar functionality, this class
+ * uses different end-of-input reporting and a more restrictive definition of a
+ * line.
+ *
+ *
+ * This class supports only charsets that encode '\r' and '\n' as a single byte
+ * with value 13 and 10, respectively, and the representation of no other
+ * character contains these values. We currently check in constructor that the
+ * charset is UTF-8.
+ */
+final class StrictLineReader implements Closeable {
+ private static final byte CR = (byte) '\r';
+ private static final byte LF = (byte) '\n';
+
+ private final InputStream in;
+ private final Charset charset;
+
+ /*
+ * Buffered data is stored in {@code buf}. As long as no exception occurs, 0 <=
+ * pos <= end and the data in the range [pos, end) is buffered for reading. At
+ * end of input, if there is an unterminated line, we set end == -1, otherwise
+ * end == pos. If the underlying {@code InputStream} throws an {@code
+ * IOException}, end may remain as either pos or -1.
+ */
+ private byte[] buf;
+ private int pos;
+ private int end;
+
+ /**
+ * Constructs a new {@code LineReader} with the specified charset and the
+ * default capacity.
+ *
+ * @param in
+ * the {@code InputStream} to read data from.
+ * @param charset
+ * the charset used to decode data. Only UTF-8 is supported.
+ * @throws NullPointerException
+ * if {@code in} or {@code charset} is null.
+ * @throws IllegalArgumentException
+ * if the specified charset is not supported.
+ */
+ public StrictLineReader(InputStream in, Charset charset) {
+ this(in, 8192, charset);
+ }
+
+ /**
+ * Constructs a new {@code LineReader} with the specified capacity and charset.
+ *
+ * @param in
+ * the {@code InputStream} to read data from.
+ * @param capacity
+ * the capacity of the buffer.
+ * @param charset
+ * the charset used to decode data. Only UTF-8 is supported.
+ * @throws NullPointerException
+ * if {@code in} or {@code charset} is null.
+ * @throws IllegalArgumentException
+ * if {@code capacity} is negative or zero or the specified charset
+ * is not supported.
+ */
+ public StrictLineReader(InputStream in, int capacity, Charset charset) {
+ if (in == null || charset == null) {
+ throw new NullPointerException();
+ }
+ if (capacity < 0) {
+ throw new IllegalArgumentException("capacity <= 0");
+ }
+ if (!(charset.equals(StandardCharsets.UTF_8))) {
+ throw new IllegalArgumentException("Unsupported encoding");
+ }
+
+ this.in = in;
+ this.charset = charset;
+ buf = new byte[capacity];
+ }
+
+ /**
+ * Closes the reader by closing the underlying {@code InputStream} and marking
+ * this reader as closed.
+ *
+ * @throws IOException
+ * for errors when closing the underlying {@code InputStream}.
+ */
+ public void close() throws IOException {
+ synchronized (in) {
+ if (buf != null) {
+ buf = null;
+ in.close();
+ }
+ }
+ }
+
+ /**
+ * Reads the next line. A line ends with {@code "\n"} or {@code "\r\n"}, this
+ * end of line marker is not included in the result.
+ *
+ * @return the next line from the input.
+ * @throws IOException
+ * for underlying {@code InputStream} errors.
+ * @throws EOFException
+ * for the end of source stream.
+ */
+ public String readLine() throws IOException {
+ synchronized (in) {
+ if (buf == null) {
+ throw new IOException("LineReader is closed");
+ }
+
+ // Read more data if we are at the end of the buffered data.
+ // Though it's an error to read after an exception, we will let {@code
+ // fillBuf()}
+ // throw again if that happens; thus we need to handle end == -1 as well as end
+ // == pos.
+ if (pos >= end) {
+ fillBuf();
+ }
+ // Try to find LF in the buffered data and return the line if successful.
+ for (int i = pos; i != end; ++i) {
+ if (buf[i] == LF) {
+ int lineEnd = (i != pos && buf[i - 1] == CR) ? i - 1 : i;
+ String res = new String(buf, pos, lineEnd - pos, charset.name());
+ pos = i + 1;
+ return res;
+ }
+ }
+
+ // Let's anticipate up to 80 characters on top of those already read.
+ ByteArrayOutputStream out = new ByteArrayOutputStream(end - pos + 80) {
+ @Override
+ public String toString() {
+ int length = (count > 0 && buf[count - 1] == CR) ? count - 1 : count;
+ try {
+ return new String(buf, 0, length, charset.name());
+ } catch (UnsupportedEncodingException e) {
+ throw new AssertionError(e); // Since we control the charset this will never happen.
+ }
+ }
+ };
+
+ while (true) {
+ out.write(buf, pos, end - pos);
+ // Mark unterminated line in case fillBuf throws EOFException or IOException.
+ end = -1;
+ fillBuf();
+ // Try to find LF in the buffered data and return the line if successful.
+ for (int i = pos; i != end; ++i) {
+ if (buf[i] == LF) {
+ if (i != pos) {
+ out.write(buf, pos, i - pos);
+ }
+ pos = i + 1;
+ return out.toString();
+ }
+ }
+ }
+ }
+ }
+
+ public boolean hasUnterminatedLine() {
+ return end == -1;
+ }
+
+ /**
+ * Reads new input data into the buffer. Call only with pos == end or end == -1,
+ * depending on the desired outcome if the function throws.
+ */
+ private void fillBuf() throws IOException {
+ int result = in.read(buf, 0, buf.length);
+ if (result == -1) {
+ throw new EOFException();
+ }
+ pos = 0;
+ end = result;
+ }
+}
diff --git a/src/main/java/mdnet/cache/Util.java b/src/main/java/mdnet/cache/Util.java
new file mode 100644
index 0000000..74078e8
--- /dev/null
+++ b/src/main/java/mdnet/cache/Util.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package mdnet.cache;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.IOException;
+
+/** Junk drawer of utility methods. */
+final class Util {
+ private Util() {
+ }
+
+ /**
+ * Deletes the contents of {@code dir}. Throws an IOException if any file could
+ * not be deleted, or if {@code dir} is not a readable directory.
+ */
+ static void deleteContents(File dir) throws IOException {
+ File[] files = dir.listFiles();
+ if (files == null) {
+ throw new IOException("not a readable directory: " + dir);
+ }
+ for (File file : files) {
+ if (file.isDirectory()) {
+ deleteContents(file);
+ }
+ if (!file.delete()) {
+ throw new IOException("failed to delete file: " + file);
+ }
+ }
+ }
+
+ static void closeQuietly(/* Auto */Closeable closeable) {
+ if (closeable != null) {
+ try {
+ closeable.close();
+ } catch (RuntimeException rethrown) {
+ throw rethrown;
+ } catch (Exception ignored) {
+ }
+ }
+ }
+}
diff --git a/src/main/kotlin/mdnet/base/Application.kt b/src/main/kotlin/mdnet/base/Application.kt
new file mode 100644
index 0000000..fe7c339
--- /dev/null
+++ b/src/main/kotlin/mdnet/base/Application.kt
@@ -0,0 +1,184 @@
+/* ktlint-disable no-wildcard-imports */
+package mdnet.base
+
+import mdnet.cache.DiskLruCache
+import org.apache.http.client.config.CookieSpecs
+import org.apache.http.client.config.RequestConfig
+import org.apache.http.impl.client.HttpClients
+import org.http4k.client.ApacheClient
+import org.http4k.core.BodyMode
+import org.http4k.core.Filter
+import org.http4k.core.HttpHandler
+import org.http4k.core.Method
+import org.http4k.core.Request
+import org.http4k.core.Response
+import org.http4k.core.Status
+import org.http4k.core.then
+import org.http4k.filter.MaxAgeTtl
+import org.http4k.filter.ServerFilters
+import org.http4k.lens.Path
+import org.http4k.routing.bind
+import org.http4k.routing.routes
+import org.http4k.server.Http4kServer
+import org.http4k.server.asServer
+import org.slf4j.LoggerFactory
+import java.io.InputStream
+import java.security.MessageDigest
+import java.time.ZoneOffset
+import java.time.ZonedDateTime
+import java.time.format.DateTimeFormatter
+import java.util.*
+import java.util.concurrent.Executors
+import java.util.concurrent.atomic.AtomicReference
+import javax.crypto.Cipher
+import javax.crypto.CipherInputStream
+import javax.crypto.CipherOutputStream
+import javax.crypto.spec.SecretKeySpec
+
+private val LOGGER = LoggerFactory.getLogger("Application")
+
+fun getServer(cache: DiskLruCache, serverSettings: ServerSettings, clientSettings: ClientSettings, statistics: AtomicReference): Http4kServer {
+ val executor = Executors.newCachedThreadPool()
+
+ val client = ApacheClient(responseBodyMode = BodyMode.Stream, client = HttpClients.custom()
+ .setDefaultRequestConfig(RequestConfig.custom()
+ .setCookieSpec(CookieSpecs.IGNORE_COOKIES)
+ .build())
+ .build())
+
+ val app = { request: Request ->
+
+ val chapterHash = Path.of("chapterHash")(request)
+ val fileName = Path.of("fileName")(request)
+ val cacheId = md5String("$chapterHash.$fileName")
+
+ statistics.get().requestsServed.incrementAndGet()
+
+ // Netty doesn't do Content-Length or Content-Type, so we have the pleasure of doing that ourselves
+ fun respond(input: InputStream, length: String, type: String): Response =
+ Response(Status.OK).header("Content-Length", length)
+ .header("Content-Type", type)
+ .header("X-Content-Type-Options", "nosniff")
+ .body(input, length.toLong())
+
+ val snapshot = cache.get(cacheId)
+ if (snapshot != null) {
+ statistics.get().cacheHits.incrementAndGet()
+ if (LOGGER.isTraceEnabled) {
+ LOGGER.trace("Request for $chapterHash/$fileName hit cache")
+ }
+
+ respond(CipherInputStream(snapshot.getInputStream(0), getRc4(cacheId)),
+ snapshot.getLength(0).toString(), snapshot.getString(1))
+ } else {
+ statistics.get().cacheMisses.incrementAndGet()
+ if (LOGGER.isTraceEnabled) {
+ LOGGER.trace("Request for $chapterHash/$fileName missed cache")
+ }
+ val mdResponse = client(Request(Method.GET, "${serverSettings.imageServer}${request.uri}"))
+
+ if (mdResponse.status != Status.OK) {
+ if (LOGGER.isTraceEnabled) {
+ LOGGER.trace("Request for $chapterHash/$fileName errored with status {}", mdResponse.status)
+ }
+ mdResponse.close()
+ Response(mdResponse.status)
+ } else {
+ val contentLength = mdResponse.header("Content-Length")!!
+ val contentType = mdResponse.header("Content-Type")!!
+
+ val editor = cache.edit(cacheId)
+
+ // A null editor means that this file is being written to
+ // concurrently so we skip the cache process
+ if (editor != null) {
+ if (LOGGER.isTraceEnabled) {
+ LOGGER.trace("Request for $chapterHash/$fileName is being cached and served")
+ }
+ editor.setString(1, contentType)
+
+ val tee = CachingInputStream(mdResponse.body.stream,
+ executor, CipherOutputStream(editor.newOutputStream(0), getRc4(cacheId))) {
+ // Note: if neither of the options get called/are in the log
+ // check that tee gets closed and for exceptions in this lambda
+ if (editor.getLength(0) == contentLength.toLong()) {
+ if (LOGGER.isTraceEnabled) {
+ LOGGER.trace("Cache download $chapterHash/$fileName committed")
+ }
+
+ editor.commit()
+ } else {
+ if (LOGGER.isTraceEnabled) {
+ LOGGER.trace("Cache download $chapterHash/$fileName aborted")
+ }
+
+ editor.abort()
+ }
+ }
+ respond(tee, contentLength, contentType)
+ } else {
+ if (LOGGER.isTraceEnabled) {
+ LOGGER.trace("Request for $chapterHash/$fileName is being served")
+ }
+
+ respond(mdResponse.body.stream, contentLength, contentType)
+ }
+ }
+ }
+ }
+
+ return catchAllHideDetails()
+ .then(ServerFilters.CatchLensFailure)
+ .then(addCommonHeaders())
+ .then(
+ routes(
+ "/data/{chapterHash}/{fileName}" bind Method.GET to app
+ )
+ )
+ .asServer(Netty(serverSettings.tls, clientSettings, statistics))
+}
+
+private fun getRc4(key: String): Cipher {
+ val rc4 = Cipher.getInstance("RC4")
+ rc4.init(Cipher.ENCRYPT_MODE, SecretKeySpec(key.toByteArray(), "RC4"))
+ return rc4
+}
+
+private val HTTP_TIME_FORMATTER = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss O", Locale.ENGLISH)
+
+private fun addCommonHeaders(): Filter {
+ return Filter { next: HttpHandler ->
+ { request: Request ->
+ val now = ZonedDateTime.now(ZoneOffset.UTC)
+ val response = next(request)
+ response.header("Date", HTTP_TIME_FORMATTER.format(now))
+ .header("Server", "Mangadex@Home Node")
+ .header("Cache-Control", listOf("public", MaxAgeTtl(Constants.MAX_AGE_CACHE).toHeaderValue()).joinToString(", "))
+ .header("Expires", HTTP_TIME_FORMATTER.format(now.plusSeconds(Constants.MAX_AGE_CACHE.seconds)))
+ .header("Cache-Control", "public, max-age=604800") // 1 week browser cache
+ .header("Timing-Allow-Origin", "https://mangadex.org")
+ }
+ }
+}
+
+private fun catchAllHideDetails(): Filter {
+ return Filter { next: HttpHandler ->
+ { request: Request ->
+ try {
+ next(request)
+ } catch (e: Exception) {
+ Response(Status.INTERNAL_SERVER_ERROR)
+ }
+ }
+ }
+}
+
+private fun md5String(stringToHash: String): String {
+ val digest = MessageDigest.getInstance("MD5")
+
+ val sb = StringBuilder()
+ for (b in digest.digest(stringToHash.toByteArray())) {
+ sb.append(String.format("%02x", b))
+ }
+ return sb.toString()
+}
diff --git a/src/main/kotlin/mdnet/base/Keys.kt b/src/main/kotlin/mdnet/base/Keys.kt
new file mode 100644
index 0000000..02a0767
--- /dev/null
+++ b/src/main/kotlin/mdnet/base/Keys.kt
@@ -0,0 +1,145 @@
+//The code below is adapted from from https://github.com/Mastercard/client-encryption-java/blob/master/src/main/java/com/mastercard/developer/utils/EncryptionUtils.java
+//
+//Copyright (c) 2019 Mastercard
+//
+//Permission is hereby granted, free of charge, to any person obtaining a copy
+//of this software and associated documentation files (the "Software"), to deal
+//in the Software without restriction, including without limitation the rights
+//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+//copies of the Software, and to permit persons to whom the Software is
+//furnished to do so, subject to the following conditions:
+//
+//The above copyright notice and this permission notice shall be included in all
+//copies or substantial portions of the Software.
+//
+//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+//SOFTWARE.
+package mdnet.base
+
+import java.io.ByteArrayInputStream
+import java.io.ByteArrayOutputStream
+import java.io.InputStream
+import java.security.KeyFactory
+import java.security.PrivateKey
+import java.security.cert.CertificateFactory
+import java.security.cert.X509Certificate
+import java.security.spec.InvalidKeySpecException
+import java.security.spec.PKCS8EncodedKeySpec
+
+private const val PKCS_1_PEM_HEADER = "-----BEGIN RSA PRIVATE KEY-----"
+private const val PKCS_1_PEM_FOOTER = "-----END RSA PRIVATE KEY-----"
+private const val PKCS_8_PEM_HEADER = "-----BEGIN PRIVATE KEY-----"
+private const val PKCS_8_PEM_FOOTER = "-----END PRIVATE KEY-----"
+
+fun getX509Cert(certificate: String): X509Certificate {
+ val targetStream: InputStream = ByteArrayInputStream(certificate.toByteArray())
+ return CertificateFactory.getInstance("X509").generateCertificate(targetStream) as X509Certificate
+}
+
+fun getPrivateKey(privateKey: String): PrivateKey {
+ return loadKey(privateKey)!!
+}
+
+fun loadKey(keyDataString: String): PrivateKey? {
+ if (keyDataString.contains(PKCS_1_PEM_HEADER)) {
+ // OpenSSL / PKCS#1 Base64 PEM encoded file
+ val fixedString = keyDataString.replace(PKCS_1_PEM_HEADER, "").replace(PKCS_1_PEM_FOOTER, "")
+ return readPkcs1PrivateKey(base64Decode(fixedString))
+ }
+ if (keyDataString.contains(PKCS_8_PEM_HEADER)) {
+ // PKCS#8 Base64 PEM encoded file
+ val fixedString = keyDataString.replace(PKCS_8_PEM_HEADER, "").replace(PKCS_8_PEM_FOOTER, "")
+ return readPkcs1PrivateKey(base64Decode(fixedString))
+ }
+
+ return null
+}
+
+private fun readPkcs8PrivateKey(pkcs8Bytes: ByteArray): PrivateKey? {
+ val keyFactory = KeyFactory.getInstance("RSA", "SunRsaSign")
+ val keySpec = PKCS8EncodedKeySpec(pkcs8Bytes)
+ return try {
+ keyFactory.generatePrivate(keySpec)
+ } catch (e: InvalidKeySpecException) {
+ throw IllegalArgumentException("Unexpected key format!", e)
+ }
+}
+
+private fun readPkcs1PrivateKey(pkcs1Bytes: ByteArray): PrivateKey? {
+ // We can't use Java internal APIs to parse ASN.1 structures, so we build a PKCS#8 key Java can understand
+ val pkcs1Length = pkcs1Bytes.size
+ val totalLength = pkcs1Length + 22
+ val pkcs8Header = byteArrayOf(
+ 0x30, 0x82.toByte(), (totalLength shr 8 and 0xff).toByte(), (totalLength and 0xff).toByte(), // Sequence + total length
+ 0x2, 0x1, 0x0, // Integer (0)
+ 0x30, 0xD, 0x6, 0x9, 0x2A, 0x86.toByte(), 0x48, 0x86.toByte(), 0xF7.toByte(), 0xD, 0x1, 0x1, 0x1, 0x5, 0x0, // Sequence: 1.2.840.113549.1.1.1, NULL
+ 0x4, 0x82.toByte(), (pkcs1Length shr 8 and 0xff).toByte(), (pkcs1Length and 0xff).toByte() // Octet string + length
+ )
+ val pkcs8bytes = join(pkcs8Header, pkcs1Bytes)
+ return readPkcs8PrivateKey(pkcs8bytes)
+}
+
+private fun join(byteArray1: ByteArray, byteArray2: ByteArray): ByteArray {
+ val bytes = ByteArray(byteArray1.size + byteArray2.size)
+ System.arraycopy(byteArray1, 0, bytes, 0, byteArray1.size)
+ System.arraycopy(byteArray2, 0, bytes, byteArray1.size, byteArray2.size)
+ return bytes
+}
+
+private val b64ints = intArrayOf(
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2,
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1, -1, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)
+
+private fun base64Decode(value: String): ByteArray {
+ val valueBytes = value.toByteArray()
+ val outputStream = ByteArrayOutputStream()
+ var i = 0
+ while (i < valueBytes.size) {
+ var b: Int
+ b = if (b64ints[valueBytes[i].toInt()] != -1) {
+ b64ints[valueBytes[i].toInt()] and 0xFF shl 18
+ } else {
+ i++
+ continue
+ }
+ var num = 0
+ if (i + 1 < valueBytes.size && b64ints[valueBytes[i + 1].toInt()] != -1) {
+ b = b or (b64ints[valueBytes[i + 1].toInt()] and 0xFF shl 12)
+ num++
+ }
+ if (i + 2 < valueBytes.size && b64ints[valueBytes[i + 2].toInt()] != -1) {
+ b = b or (b64ints[valueBytes[i + 2].toInt()] and 0xFF shl 6)
+ num++
+ }
+ if (i + 3 < valueBytes.size && b64ints[valueBytes[i + 3].toInt()] != -1) {
+ b = b or (b64ints[valueBytes[i + 3].toInt()] and 0xFF)
+ num++
+ }
+ while (num > 0) {
+ val c = b and 0xFF0000 shr 16
+ outputStream.write(c)
+ b = b shl 8
+ num--
+ }
+ i += 4
+ }
+ return outputStream.toByteArray()
+}
diff --git a/src/main/kotlin/mdnet/base/Netty.kt b/src/main/kotlin/mdnet/base/Netty.kt
new file mode 100644
index 0000000..6e18e13
--- /dev/null
+++ b/src/main/kotlin/mdnet/base/Netty.kt
@@ -0,0 +1,126 @@
+package mdnet.base
+
+import io.netty.bootstrap.ServerBootstrap
+import io.netty.buffer.Unpooled
+import io.netty.channel.ChannelFactory
+import io.netty.channel.ChannelFuture
+import io.netty.channel.ChannelHandler.Sharable
+import io.netty.channel.ChannelHandlerContext
+import io.netty.channel.ChannelInboundHandlerAdapter
+import io.netty.channel.ChannelInitializer
+import io.netty.channel.ChannelOption
+import io.netty.channel.ServerChannel
+import io.netty.channel.nio.NioEventLoopGroup
+import io.netty.channel.socket.SocketChannel
+import io.netty.channel.socket.nio.NioServerSocketChannel
+import io.netty.handler.codec.http.DefaultFullHttpResponse
+import io.netty.handler.codec.http.HttpHeaderNames
+import io.netty.handler.codec.http.HttpObjectAggregator
+import io.netty.handler.codec.http.HttpResponseStatus
+import io.netty.handler.codec.http.HttpServerCodec
+import io.netty.handler.codec.http.HttpUtil
+import io.netty.handler.codec.http.HttpVersion
+import io.netty.handler.ssl.OptionalSslHandler
+import io.netty.handler.ssl.SslContextBuilder
+import io.netty.handler.ssl.SslHandler
+import io.netty.handler.stream.ChunkedWriteHandler
+import io.netty.handler.traffic.GlobalTrafficShapingHandler
+import io.netty.handler.traffic.TrafficCounter
+import org.http4k.core.HttpHandler
+import org.http4k.server.Http4kChannelHandler
+import org.http4k.server.Http4kServer
+import org.http4k.server.ServerConfig
+import java.net.InetSocketAddress
+import java.nio.charset.StandardCharsets
+import java.util.concurrent.TimeUnit
+import java.util.concurrent.atomic.AtomicInteger
+import java.util.concurrent.atomic.AtomicReference
+
+@Sharable
+class ConnectionCounter : ChannelInboundHandlerAdapter() {
+ private val connections = AtomicInteger()
+
+ override fun channelActive(ctx: ChannelHandlerContext) {
+ val sslHandler = ctx.pipeline()[SslHandler::class.java]
+
+ if (sslHandler != null) {
+ sslHandler.handshakeFuture().addListener {
+ handleConnection(ctx)
+ }
+ } else {
+ handleConnection(ctx)
+ }
+ }
+
+ private fun handleConnection(ctx: ChannelHandlerContext) {
+ if (connections.incrementAndGet() <= Constants.MAX_CONCURRENT_CONNECTIONS) {
+ super.channelActive(ctx)
+ } else {
+ val response = Unpooled.copiedBuffer(Constants.OVERLOADED_MESSAGE, StandardCharsets.UTF_8)
+ val res =
+ DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.SERVICE_UNAVAILABLE, response)
+ res.headers().set(HttpHeaderNames.CONTENT_TYPE, "text/html; charset=UTF-8")
+ HttpUtil.setContentLength(res, response.readableBytes().toLong())
+
+ ctx.writeAndFlush(res)
+ ctx.close()
+ }
+ }
+
+ override fun channelInactive(ctx: ChannelHandlerContext) {
+ super.channelInactive(ctx)
+ connections.decrementAndGet()
+ }
+}
+
+class Netty(private val tls: ServerSettings.TlsCert, private val clientSettings: ClientSettings, private val stats: AtomicReference) : ServerConfig {
+ override fun toServer(httpHandler: HttpHandler): Http4kServer = object : Http4kServer {
+ private val masterGroup = NioEventLoopGroup()
+ private val workerGroup = NioEventLoopGroup()
+ private lateinit var closeFuture: ChannelFuture
+ private lateinit var address: InetSocketAddress
+
+ private val counter = ConnectionCounter()
+ private val burstLimiter = object : GlobalTrafficShapingHandler(
+ workerGroup, 1024 * clientSettings.maxBurstRateKibPerSecond, 0, 50) {
+ override fun doAccounting(counter: TrafficCounter) {
+ stats.get().bytesSent.getAndAdd(counter.cumulativeWrittenBytes())
+ }
+ }
+
+ override fun start(): Http4kServer = apply {
+ val sslContext = SslContextBuilder.forServer(getPrivateKey(tls.privateKey), getX509Cert(tls.certificate)).build()
+
+ val bootstrap = ServerBootstrap()
+ bootstrap.group(masterGroup, workerGroup)
+ .channelFactory(ChannelFactory { NioServerSocketChannel() })
+ .childHandler(object : ChannelInitializer() {
+ public override fun initChannel(ch: SocketChannel) {
+
+ ch.pipeline().addLast("ssl", OptionalSslHandler(sslContext))
+
+ ch.pipeline().addLast("codec", HttpServerCodec())
+ ch.pipeline().addLast("counter", counter)
+ ch.pipeline().addLast("aggregator", HttpObjectAggregator(65536))
+ ch.pipeline().addLast("burstLimiter", burstLimiter)
+ ch.pipeline().addLast("streamer", ChunkedWriteHandler())
+ ch.pipeline().addLast("handler", Http4kChannelHandler(httpHandler))
+ }
+ })
+ .option(ChannelOption.SO_BACKLOG, 1000)
+ .childOption(ChannelOption.SO_KEEPALIVE, true)
+
+ val channel = bootstrap.bind(clientSettings.clientPort).sync().channel()
+ address = channel.localAddress() as InetSocketAddress
+ closeFuture = channel.closeFuture()
+ }
+
+ override fun stop() = apply {
+ masterGroup.shutdownGracefully(5, 15, TimeUnit.SECONDS).sync()
+ workerGroup.shutdownGracefully(5, 15, TimeUnit.SECONDS).sync()
+ closeFuture.sync()
+ }
+
+ override fun port(): Int = if (clientSettings.clientPort > 0) clientSettings.clientPort else address.port
+ }
+}
diff --git a/src/main/resources/logback.xml b/src/main/resources/logback.xml
new file mode 100644
index 0000000..369fc65
--- /dev/null
+++ b/src/main/resources/logback.xml
@@ -0,0 +1,31 @@
+
+
+ log/latest.log
+
+ log/logFile.%d{yyyy-MM-dd}.log
+ 14
+ 5MB
+
+
+
+ %d{YYYY-MM-dd HH:mm:ss} %-5level %logger{36} - %msg%n
+
+
+
+
+
+
+
+
+
+ %d{YYYY-MM-dd HH:mm:ss} %-5level %logger{36} - %msg%n
+
+
+
+
+
+
+
+
+
+