source: trunk/packages/invirt-base/python/invirt/config.py @ 1422

Last change on this file since 1422 was 1422, checked in by price, 16 years ago

use /etc/invirt/conf.d/ too in invirt.config

File size: 4.4 KB
RevLine 
[1197]1from __future__ import with_statement
2
[784]3import json
[778]4from invirt.common import *
[1422]5import os
[816]6from os import rename
[781]7from os.path import getmtime
[1197]8from contextlib import closing
[1421]9import yaml
[726]10
[1421]11try:    loader = yaml.CSafeLoader
12except: loader = yaml.SafeLoader
13
[1422]14src_path    = '/etc/invirt/master.yaml'
15src_dirpath = '/etc/invirt/conf.d'
16cache_path  = '/var/lib/invirt/cache.json'
17lock_path   = '/var/lib/invirt/cache.lock'
[726]18
[1422]19def augment(d1, d2):
20    """Splice dict-tree d2 into d1.  Return d1.
21
22    Example:
23    >>> d = {'a': {'b': 1}, 'c': 2}
24    >>> augment(d, {'a': {'d': 3}})
25    {'a': {'b', 1, 'd': 3}, 'c': 2}
26    >>> d
27    {'a': {'b', 1, 'd': 3}, 'c': 2}
28    """
29    for k in d2:
30        if k in d1 and isinstance(d1[k], dict):
31            augment(d1[k], d2[k])
32        else:
33            d1[k] = d2[k]
34    return d1
35
36def list_files():
37    yield src_path
38    for name in os.listdir(src_dirpath):
39        yield os.path.join(src_dirpath, name)
40
[1421]41def load_master():
[1422]42    config = dict()
43    for filename in list_files():
44        with closing(file(filename)) as f:
45            augment(config, yaml.load(f, loader))
46    return config
[1421]47
48def get_src_mtime():
[1422]49    return max(max(getmtime(filename) for filename in list_files()),
50               getmtime(src_dirpath))
[1421]51
[1420]52def load(force_refresh = False):
[771]53    """
54    Try loading the configuration from the faster-to-load JSON cache at
55    cache_path.  If it doesn't exist or is outdated, load the configuration
56    instead from the original YAML file at src_path and regenerate the cache.
57    I assume I have the permissions to write to the cache directory.
58    """
[806]59
[807]60    # Namespace container for state variables, so that they can be updated by
61    # closures.
[793]62    ns = struct()
63
[771]64    if force_refresh:
[806]65        do_refresh = True
[771]66    else:
[1421]67        src_mtime = get_src_mtime()
[807]68        try:            cache_mtime = getmtime(cache_path)
69        except OSError: do_refresh  = True
70        else:           do_refresh  = src_mtime + 1 >= cache_mtime
[771]71
[807]72        # We chose not to simply say
73        #
74        #   do_refresh = src_mtime >= cache_time
75        #
76        # because between the getmtime(src_path) and the time the cache is
77        # rewritten, the master configuration may have been updated, so future
78        # checks here would find a cache with a newer mtime than the master
79        # (and thus treat the cache as containing the latest version of the
80        # master).  The +1 means that for at least a full second following the
81        # update to the master, this function will refresh the cache, giving us
82        # 1 second to write the cache.  Note that if it takes longer than 1
83        # second to write the cache, then this situation could still arise.
84        #
85        # The getmtime calls should logically be part of the same transaction
86        # as the rest of this function (cache read + conditional cache
87        # refresh), but to wrap everything in an flock would cause the
88        # following cache read to be less streamlined.
89
[806]90    if not do_refresh:
[793]91        # Try reading from the cache first.  This must be transactionally
92        # isolated from concurrent writes to prevent reading an incomplete
93        # (changing) version of the data (but the transaction can share the
[806]94        # lock with other concurrent reads).  This isolation is accomplished
95        # using an atomic filesystem rename in the refreshing stage.
[1197]96        try: 
97            with closing(file(cache_path)) as f:
98                ns.cfg = json.read(f.read())
[806]99        except: do_refresh = True
[778]100
[806]101    if do_refresh:
[781]102        # Atomically reload the source and regenerate the cache.  The read and
103        # write must be a single transaction, or a stale version may be
[806]104        # written (if another read/write of a more recent configuration
105        # is interleaved).  The final atomic rename is to keep this
106        # transactionally isolated from the above cache read.  If we fail to
107        # acquire the lock, just try to load the master configuration.
108        try:
[1197]109            with lock_file(lock_path):
[1421]110                ns.cfg = load_master()
[1197]111                try: 
112                    with closing(file(cache_path + '.tmp', 'w')) as f:
113                        f.write(json.write(ns.cfg))
[806]114                except: pass # silent failure
[816]115                else: rename(cache_path + '.tmp', cache_path)
[806]116        except IOError:
[1421]117            ns.cfg = load_master()
[793]118    return ns.cfg
[771]119
[778]120dicts = load()
121structs = dicts2struct(dicts)
122
[726]123# vim:et:sw=4:ts=4
Note: See TracBrowser for help on using the repository browser.