Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(31)

Unified Diff: infra/libs/service_utils/daemon.py

Issue 1096683003: Add flock and timeout to infra/libs. (Closed) Base URL: https://chromium.googlesource.com/infra/infra.git@master
Patch Set: Improve tests and coverage. Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | infra/libs/service_utils/test/daemon_test.py » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: infra/libs/service_utils/daemon.py
diff --git a/infra/libs/service_utils/daemon.py b/infra/libs/service_utils/daemon.py
new file mode 100644
index 0000000000000000000000000000000000000000..f37566f400d396f836fb1285e3702835ce58fab8
--- /dev/null
+++ b/infra/libs/service_utils/daemon.py
@@ -0,0 +1,110 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Locking, timeout, and other process management functions."""
+
+import contextlib
+import fcntl
+import os
+import sys
+import tempfile
+
+
+@contextlib.contextmanager
+def _auto_closing_fd(*args, **kwargs):
+ """Opens a file, yields its fd, and closes it when done."""
+
+ fd = os.open(*args, **kwargs)
+ yield fd
+ os.close(fd)
+
+
+class LockAlreadyLocked(RuntimeError):
+ """Exception used when a lock couldn't be acquired."""
+ pass
+
+
+@contextlib.contextmanager
+def flock(lockfile, lockdir=None):
+ """Keeps a critical section from executing concurrently using a file lock.
+
+ This only protects critical sections across processes, not threads. For
+ multithreaded programs. use threading.Lock or threading.RLock.
+
+ Implementation based on http://goo.gl/dNf7fv (see John Mudd's comment) and
+ http://stackoverflow.com/a/18745264/3984761. This implementation creates the
+ lockfile if it doesn't exist and removes it when the critical section exits.
+ It raises LockAlreadyLocked if it cannot acquire a lock.
+
+ Note 1: this method only works for lockfiles on local filesystems with
+ appropriate locking semantics (extfs, HFS+). It is unwise to use this on
+ NFS-mounted filesystems.
+
+ Note 2: be careful when forking processes within the lock, forked processes
+ inherit open file descriptors.
+
+ Example usage:
+
+ try:
+ with daemon.flock('toaster'):
+ put_bread_in_toaster()
+ except daemon.LockAlreadyLocked:
+ print 'toaster is occupied!'
+ """
+
+ if sys.platform.startswith('win'): # pragma: no cover
+ raise NotImplementedError
+
+ lockdir = lockdir or tempfile.gettempdir()
+ full_lockfile = os.path.join(lockdir, lockfile)
+
+ with _auto_closing_fd(
+ full_lockfile, os.O_CREAT | os.O_TRUNC | os.O_WRONLY) as fd:
+ try:
+ # Request exclusive (EX) non-blocking (NB) advisory lock.
+ fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+ except IOError:
+ # Could not obtain lock.
+ raise LockAlreadyLocked()
+
+ try:
+ held_inode = os.fstat(fd).st_ino
+ file_inode = os.stat(full_lockfile).st_ino
+
+ if held_inode != file_inode:
+ # The file was deleted under us, another process has created it again
+ # and may get a lock on it. That process doesn't know about the lock
+ # we have on the (now deleted) file, so we need to bail.
+ raise LockAlreadyLocked()
+ except OSError:
+ # File has been deleted under us. We have to exit because another process
+ # might try to create it and obtain a lock, not knowing that we had a
+ # lock on the (now deleted) file.
+ raise LockAlreadyLocked()
+
+ yield
+
+ try:
+ # The order of these two operations is very important. We need to delete
+ # the file before we release the lock. If we release the lock before we
+ # delete the file, we run the risk of another process obtaining a lock on
+ # the file we're about to delete. If the delete happens while the other
+ # critical section is running, a third process could create the file, get
+ # a lock on it, and run a second critical section simultaneously. Deleting
+ # before unlocking prevents this scenario.
+ os.unlink(full_lockfile)
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ except OSError:
+ # If the file was deleted for some other reason, don't sweat it.
+ pass
+
+
+def add_timeout(cmd, timeout_secs):
+ """Adds a timeout to a command using linux's (gnu) /bin/timeout."""
+
+ if sys.platform.startswith('win') or sys.platform.startswith('darwin'):
+ raise NotImplementedError # pragma: no cover
+
+ return ['timeout', str(timeout_secs)] + cmd
« no previous file with comments | « no previous file | infra/libs/service_utils/test/daemon_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698