/*
* <h3>Cache Optimization</h3>
* <p>To measure cache effectiveness, this class tracks three statistics:
* <ul>
* <li><strong>{@linkplain #requestCount() Request Count:}</strong> the number of HTTP
* requests issued since this cache was created.
* <li><strong>{@linkplain #networkCount() Network Count:}</strong> the number of those
* requests that required network use.
* <li><strong>{@linkplain #hitCount() Hit Count:}</strong> the number of those requests
* whose responses were served by the cache.
* </ul>
*
* Sometimes a request will result in a conditional cache hit. If the cache contains a stale copy of
* the response, the client will issue a conditional {@code GET}. The server will then send either
* the updated response if it has changed, or a short 'not modified' response if the client's copy
* is still valid. Such responses increment both the network count and hit count.
*
* <p>The best way to improve the cache hit rate is by configuring the web server to return
* cacheable responses. Although this client honors all <a
* href="http://tools.ietf.org/html/rfc7234">HTTP/1.1 (RFC 7234)</a> cache headers, it doesn't cache
* partial responses.
*/
// File: CacheStrategy.java/**
* Given a request and cached response, this figures out whether to use the network, the cache, or
* both.
*
* <p>Selecting a cache strategy may add conditions to the request (like the "If-Modified-Since"
* header for conditional GETs) or warnings to the cached response (if the cached data is
* potentially stale).
*/publicfinalclassCacheStrategy {
/** The request to send on the network, or null if this call doesn't use the network. */publicfinal Request networkRequest;
/** The cached response to return or validate; or null if this call doesn't use a cache. */publicfinal Response cacheResponse;
......
}
publicfinalclassDiskLruCacheimplementsCloseable, Flushable {
final FileSystem fileSystem;
final File directory;
privatefinal File journalFile;
privatefinal File journalFileTmp;
privatefinal File journalFileBackup;
privatefinalint appVersion;
privatelong maxSize;
finalint valueCount;
privatelongsize=0;
BufferedSink journalWriter;
final LinkedHashMap<String, Entry> lruEntries = newLinkedHashMap<>(0, 0.75f, true);
// Must be read and written when synchronized on 'this'.boolean initialized;
boolean closed;
boolean mostRecentTrimFailed;
boolean mostRecentRebuildFailed;
/**
* To differentiate between old and current snapshots, each entry is given a sequence number each time
* an edit is committed. A snapshot is stale if its sequence number is not equal to its entry's sequence number.
*/privatelongnextSequenceNumber=0;
/** Used to run 'cleanupRunnable' for journal rebuilds. */privatefinal Executor executor;
privatefinalRunnablecleanupRunnable=newRunnable() {
publicvoidrun() {
......
}
};
...
}
privatefinalclassEntry {
final String key;
/** Lengths of this entry's files. */finallong[] lengths;
final File[] cleanFiles;
final File[] dirtyFiles;
/** True if this entry has ever been published. */boolean readable;
/** The ongoing edit or null if this entry is not being edited. */
Editor currentEditor;
/** The sequence number of the most recently committed edit to this entry. */long sequenceNumber;
Entry(String key) {
this.key = key;
lengths = newlong[valueCount];
cleanFiles = newFile[valueCount];
dirtyFiles = newFile[valueCount];
// The names are repetitive so re-use the same builder to avoid allocations.StringBuilderfileBuilder=newStringBuilder(key).append('.');
inttruncateTo= fileBuilder.length();
for (inti=0; i < valueCount; i++) {
fileBuilder.append(i);
cleanFiles[i] = newFile(directory, fileBuilder.toString());
fileBuilder.append(".tmp");
dirtyFiles[i] = newFile(directory, fileBuilder.toString());
fileBuilder.setLength(truncateTo);
}
}
...
/**
* Returns a snapshot of this entry. This opens all streams eagerly to guarantee that we see a
* single published snapshot. If we opened streams lazily then the streams could come from
* different edits.
*/
Snapshot snapshot() {
if (!Thread.holdsLock(DiskLruCache.this)) thrownewAssertionError();
Source[] sources = newSource[valueCount];
long[] lengths = this.lengths.clone();
// Defensive copy since these can be zeroed out.try {
for (inti=0; i < valueCount; i++) {
sources[i] = fileSystem.source(cleanFiles[i]);
}
returnnewSnapshot(key, sequenceNumber, sources, lengths);
} catch (FileNotFoundException e) {
// A file must have been deleted manually!for (inti=0; i < valueCount; i++) {
if (sources[i] != null) {
Util.closeQuietly(sources[i]);
} else {
break;
}
}
// Since the entry is no longer valid, remove it so the metadata is accurate (i.e. the cache// size.)try {
removeEntry(this);
} catch (IOException ignored) {
}
returnnull;
}
}
}
/**
* We only rebuild the journal when it will halve the size of the journal and eliminate at least
* 2000 ops.
*/booleanjournalRebuildRequired() {
finalintredundantOpCompactThreshold=2000;
return redundantOpCount >= redundantOpCompactThreshold && redundantOpCount >= lruEntries.size();
}
// File: DiskLruCache.java/**
* Returns a snapshot of the entry named {@code key}, or null if it doesn't exist is not currently
* readable. If a value is returned, it is moved to the head of the LRU queue.
*/publicsynchronized Snapshot get(String key)throws IOException {
initialize();
checkNotClosed();
validateKey(key);
Entryentry= lruEntries.get(key);
if (entry == null || !entry.readable) returnnull;
Snapshotsnapshot= entry.snapshot();
if (snapshot == null) returnnull;
redundantOpCount++;
//日志记录
journalWriter.writeUtf8(READ).writeByte(' ').writeUtf8(key).writeByte('\n');
if (journalRebuildRequired()) {
executor.execute(cleanupRunnable);
}
return snapshot;
}
// File: okhttp3.Cache.java
CacheRequest put(Response response) {
StringrequestMethod= response.request().method();
if (HttpMethod.invalidatesCache(response.request().method())) {
try {
remove(response.request());
} catch (IOException ignored) {
// The cache cannot be written.
}
returnnull;
}
if (!requestMethod.equals("GET")) {
// Don't cache non-GET responses. We're technically allowed to cache// HEAD requests and some POST requests, but the complexity of doing// so is high and the benefit is low.returnnull;
}
if (HttpHeaders.hasVaryAll(response)) {
returnnull;
}
Entryentry=newEntry(response);
DiskLruCache.Editoreditor=null;
try {
editor = cache.edit(key(response.request().url()));
if (editor == null) {
returnnull;
}
entry.writeTo(editor);
returnnewCacheRequestImpl(editor);
} catch (IOException e) {
abortQuietly(editor);
returnnull;
}
}
// File: okhttp3.internal.cache.DiskLruCache.javasynchronized Editor edit(String key, long expectedSequenceNumber)throws IOException {
initialize();
checkNotClosed();
validateKey(key);
Entryentry= lruEntries.get(key);
if (expectedSequenceNumber != ANY_SEQUENCE_NUMBER && (entry == null || entry.sequenceNumber != expectedSequenceNumber)) {
returnnull; // Snapshot is stale.
}
if (entry != null && entry.currentEditor != null) {
returnnull; // 当前 cache entry 正在被其他对象操作
}
if (mostRecentTrimFailed || mostRecentRebuildFailed) {
// The OS has become our enemy! If the trim job failed, it means we are storing more data than// requested by the user. Do not allow edits so we do not go over that limit any further. If// the journal rebuild failed, the journal writer will not be active, meaning we will not be// able to record the edit, causing file leaks. In both cases, we want to retry the clean up// so we can get out of this state!
executor.execute(cleanupRunnable);
returnnull;
}
// 日志接入 DIRTY 记录
journalWriter.writeUtf8(DIRTY).writeByte(' ').writeUtf8(key).writeByte('\n');
journalWriter.flush();
if (hasJournalErrors) {
returnnull; // Don't edit; the journal can't be written.
}
if (entry == null) {
entry = newEntry(key);
lruEntries.put(key, entry);
}
Editoreditor=newEditor(entry);
entry.currentEditor = editor;
return editor;
}