|
16 | 16 | import com.github.benmanes.caffeine.cache.Cache; |
17 | 17 | import com.github.benmanes.caffeine.cache.Caffeine; |
18 | 18 | import com.github.benmanes.caffeine.cache.stats.CacheStats; |
19 | | -import com.google.common.annotations.VisibleForTesting; |
20 | 19 | import com.google.common.base.Preconditions; |
21 | 20 | import com.google.common.primitives.Longs; |
22 | | -import com.google.devtools.build.lib.profiler.Profiler; |
23 | | -import com.google.devtools.build.lib.profiler.ProfilerTask; |
24 | 21 | import java.io.IOException; |
25 | 22 |
|
26 | 23 | /** |
|
41 | 38 | * fail. |
42 | 39 | */ |
43 | 40 | public class DigestUtils { |
44 | | - // Object to synchronize on when serializing large file reads. |
45 | | - private static final Object DIGEST_LOCK = new Object(); |
46 | | - |
47 | 41 | // Typical size for a digest byte array. |
48 | 42 | public static final int ESTIMATED_SIZE = 32; |
49 | 43 |
|
50 | | - // Files of this size or less are assumed to be readable in one seek. |
51 | | - // (This is the default readahead window on Linux.) |
52 | | - @VisibleForTesting // the unittest is in a different package! |
53 | | - public static final int MULTI_THREADED_DIGEST_MAX_FILE_SIZE = 128 * 1024; |
54 | | - |
55 | 44 | /** |
56 | 45 | * Keys used to cache the values of the digests for files where we don't have fast digests. |
57 | 46 | * |
@@ -126,19 +115,6 @@ public int hashCode() { |
126 | 115 | /** Private constructor to prevent instantiation of utility class. */ |
127 | 116 | private DigestUtils() {} |
128 | 117 |
|
129 | | - /** |
130 | | - * Obtain file's digset using synchronized method, ensuring that system is not overloaded in case |
131 | | - * when multiple threads are requesting digest calculations and underlying file system cannot |
132 | | - * provide it via extended attribute. |
133 | | - */ |
134 | | - private static byte[] getDigestInExclusiveMode(Path path) throws IOException { |
135 | | - long startTime = Profiler.nanoTimeMaybe(); |
136 | | - synchronized (DIGEST_LOCK) { |
137 | | - Profiler.instance().logSimpleTask(startTime, ProfilerTask.WAIT, path.getPathString()); |
138 | | - return path.getDigest(); |
139 | | - } |
140 | | - } |
141 | | - |
142 | 118 | /** |
143 | 119 | * Enables the caching of file digests based on file status data. |
144 | 120 | * |
@@ -221,16 +197,7 @@ public static byte[] manuallyComputeDigest(Path path, long fileSize) throws IOEx |
221 | 197 | } |
222 | 198 | } |
223 | 199 |
|
224 | | - // Compute digest from the file contents. |
225 | | - if (fileSize > MULTI_THREADED_DIGEST_MAX_FILE_SIZE) { |
226 | | - // We'll have to read file content in order to calculate the digest. |
227 | | - // We avoid overlapping this process for multiple large files, as |
228 | | - // seeking back and forth between them will result in an overall loss of |
229 | | - // throughput. |
230 | | - digest = getDigestInExclusiveMode(path); |
231 | | - } else { |
232 | | - digest = path.getDigest(); |
233 | | - } |
| 200 | + digest = path.getDigest(); |
234 | 201 |
|
235 | 202 | Preconditions.checkNotNull(digest, "Missing digest for %s (size %s)", path, fileSize); |
236 | 203 | if (cache != null) { |
|
0 commit comments