mirror of https://github.com/sbt/sbt.git
Add per-cache file structure lock
To be acquired when creating directories or locks in particular, so that these don't hinder each other.
This commit is contained in:
parent
dfc11151b1
commit
83a08d172c
|
|
@ -107,11 +107,55 @@ object Cache {
|
|||
helper(alreadyDownloaded)
|
||||
}
|
||||
|
||||
private def withLockFor[T](file: File)(f: => FileError \/ T): FileError \/ T = {
|
||||
private val processStructureLocks = new ConcurrentHashMap[File, AnyRef]
|
||||
|
||||
/**
|
||||
* Should be acquired when doing operations changing the file structure of the cache (creating
|
||||
* new directories, creating / acquiring locks, ...), so that these don't hinder each other.
|
||||
*
|
||||
* Should hopefully address some transient errors seen on the CI of ensime-server.
|
||||
*/
|
||||
private def withStructureLock[T](cache: File)(f: => T): T = {
|
||||
|
||||
val intraProcessLock = Option(processStructureLocks.get(cache)).getOrElse {
|
||||
val lock = new AnyRef
|
||||
val prev = Option(processStructureLocks.putIfAbsent(cache, lock))
|
||||
prev.getOrElse(lock)
|
||||
}
|
||||
|
||||
intraProcessLock.synchronized {
|
||||
val lockFile = new File(cache, ".structure.lock")
|
||||
lockFile.getParentFile.mkdirs()
|
||||
var out = new FileOutputStream(lockFile)
|
||||
|
||||
try {
|
||||
var lock: FileLock = null
|
||||
try {
|
||||
lock = out.getChannel.lock()
|
||||
|
||||
try f
|
||||
finally {
|
||||
lock.release()
|
||||
lock = null
|
||||
out.close()
|
||||
out = null
|
||||
lockFile.delete()
|
||||
}
|
||||
}
|
||||
finally if (lock != null) lock.release()
|
||||
} finally if (out != null) out.close()
|
||||
}
|
||||
}
|
||||
|
||||
private def withLockFor[T](cache: File, file: File)(f: => FileError \/ T): FileError \/ T = {
|
||||
val lockFile = new File(file.getParentFile, s"${file.getName}.lock")
|
||||
|
||||
lockFile.getParentFile.mkdirs()
|
||||
var out = new FileOutputStream(lockFile)
|
||||
var out: FileOutputStream = null
|
||||
|
||||
withStructureLock(cache) {
|
||||
lockFile.getParentFile.mkdirs()
|
||||
out = new FileOutputStream(lockFile)
|
||||
}
|
||||
|
||||
try {
|
||||
var lock: FileLock = null
|
||||
|
|
@ -378,7 +422,7 @@ object Cache {
|
|||
def remote(file: File, url: String): EitherT[Task, FileError, Unit] =
|
||||
EitherT {
|
||||
Task {
|
||||
withLockFor(file) {
|
||||
withLockFor(cache, file) {
|
||||
downloading(url, file, logger) {
|
||||
val tmp = temporaryFile(file)
|
||||
|
||||
|
|
@ -416,14 +460,18 @@ object Cache {
|
|||
|
||||
val result =
|
||||
try {
|
||||
tmp.getParentFile.mkdirs()
|
||||
val out = new FileOutputStream(tmp, partialDownload)
|
||||
val out = withStructureLock(cache) {
|
||||
tmp.getParentFile.mkdirs()
|
||||
new FileOutputStream(tmp, partialDownload)
|
||||
}
|
||||
try \/-(readFullyTo(in, out, logger, url, if (partialDownload) alreadyDownloaded else 0L))
|
||||
finally out.close()
|
||||
} finally in.close()
|
||||
|
||||
file.getParentFile.mkdirs()
|
||||
NioFiles.move(tmp.toPath, file.toPath, StandardCopyOption.ATOMIC_MOVE)
|
||||
withStructureLock(cache) {
|
||||
file.getParentFile.mkdirs()
|
||||
NioFiles.move(tmp.toPath, file.toPath, StandardCopyOption.ATOMIC_MOVE)
|
||||
}
|
||||
|
||||
for (lastModified <- Option(conn.getLastModified) if lastModified > 0L)
|
||||
file.setLastModified(lastModified)
|
||||
|
|
|
|||
Loading…
Reference in New Issue