diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index e6c0e9feb4de9efdb97397b85979f7a4ffcbcfd0..7ada27609eac55be4598a5635e99c99c821b353c 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -93,14 +93,14 @@ from charmhelpers.contrib.network.ip import (
     format_ipv6_addr,
     is_bridge_member,
     is_ipv6_disabled,
+    get_relation_ip,
 )
 from charmhelpers.contrib.openstack.utils import (
     config_flags_parser,
-    get_host_ip,
-    git_determine_usr_bin,
-    git_determine_python_path,
     enable_memcache,
     snap_install_requested,
+    CompareOpenStackReleases,
+    os_release,
 )
 from charmhelpers.core.unitdata import kv
 
@@ -332,10 +332,7 @@ class IdentityServiceContext(OSContextGenerator):
         self.rel_name = rel_name
         self.interfaces = [self.rel_name]
 
-    def __call__(self):
-        log('Generating template context for ' + self.rel_name, level=DEBUG)
-        ctxt = {}
-
+    def _setup_pki_cache(self):
         if self.service and self.service_user:
             # This is required for pki token signing if we don't want /tmp to
             # be used.
@@ -345,6 +342,15 @@ class IdentityServiceContext(OSContextGenerator):
                 mkdir(path=cachedir, owner=self.service_user,
                       group=self.service_user, perms=0o700)
 
+            return cachedir
+        return None
+
+    def __call__(self):
+        log('Generating template context for ' + self.rel_name, level=DEBUG)
+        ctxt = {}
+
+        cachedir = self._setup_pki_cache()
+        if cachedir:
             ctxt['signing_dir'] = cachedir
 
         for rid in relation_ids(self.rel_name):
@@ -383,6 +389,62 @@ class IdentityServiceContext(OSContextGenerator):
         return {}
 
 
+class IdentityCredentialsContext(IdentityServiceContext):
+    '''Context for identity-credentials interface type'''
+
+    def __init__(self,
+                 service=None,
+                 service_user=None,
+                 rel_name='identity-credentials'):
+        super(IdentityCredentialsContext, self).__init__(service,
+                                                         service_user,
+                                                         rel_name)
+
+    def __call__(self):
+        log('Generating template context for ' + self.rel_name, level=DEBUG)
+        ctxt = {}
+
+        cachedir = self._setup_pki_cache()
+        if cachedir:
+            ctxt['signing_dir'] = cachedir
+
+        for rid in relation_ids(self.rel_name):
+            self.related = True
+            for unit in related_units(rid):
+                rdata = relation_get(rid=rid, unit=unit)
+                credentials_host = rdata.get('credentials_host')
+                credentials_host = (
+                    format_ipv6_addr(credentials_host) or credentials_host
+                )
+                auth_host = rdata.get('auth_host')
+                auth_host = format_ipv6_addr(auth_host) or auth_host
+                svc_protocol = rdata.get('credentials_protocol') or 'http'
+                auth_protocol = rdata.get('auth_protocol') or 'http'
+                api_version = rdata.get('api_version') or '2.0'
+                ctxt.update({
+                    'service_port': rdata.get('credentials_port'),
+                    'service_host': credentials_host,
+                    'auth_host': auth_host,
+                    'auth_port': rdata.get('auth_port'),
+                    'admin_tenant_name': rdata.get('credentials_project'),
+                    'admin_tenant_id': rdata.get('credentials_project_id'),
+                    'admin_user': rdata.get('credentials_username'),
+                    'admin_password': rdata.get('credentials_password'),
+                    'service_protocol': svc_protocol,
+                    'auth_protocol': auth_protocol,
+                    'api_version': api_version
+                })
+
+                if float(api_version) > 2:
+                    ctxt.update({'admin_domain_name':
+                                 rdata.get('domain')})
+
+                if self.context_complete(ctxt):
+                    return ctxt
+
+        return {}
+
+
 class AMQPContext(OSContextGenerator):
 
     def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
@@ -564,11 +626,6 @@ class HAProxyContext(OSContextGenerator):
         if not relation_ids('cluster') and not self.singlenode_mode:
             return {}
 
-        if config('prefer-ipv6'):
-            addr = get_ipv6_addr(exc_list=[config('vip')])[0]
-        else:
-            addr = get_host_ip(unit_get('private-address'))
-
         l_unit = local_unit().replace('/', '-')
         cluster_hosts = {}
 
@@ -576,7 +633,15 @@ class HAProxyContext(OSContextGenerator):
         # and associated backends
         for addr_type in ADDRESS_TYPES:
             cfg_opt = 'os-{}-network'.format(addr_type)
-            laddr = get_address_in_network(config(cfg_opt))
+            # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather
+            # than 'internal'
+            if addr_type == 'internal':
+                _addr_map_type = INTERNAL
+            else:
+                _addr_map_type = addr_type
+            # Network spaces aware
+            laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'],
+                                    config(cfg_opt))
             if laddr:
                 netmask = get_netmask_for_address(laddr)
                 cluster_hosts[laddr] = {
@@ -587,15 +652,19 @@ class HAProxyContext(OSContextGenerator):
                 }
                 for rid in relation_ids('cluster'):
                     for unit in sorted(related_units(rid)):
+                        # API Charms will need to set {addr_type}-address with
+                        # get_relation_ip(addr_type)
                         _laddr = relation_get('{}-address'.format(addr_type),
                                               rid=rid, unit=unit)
                         if _laddr:
                             _unit = unit.replace('/', '-')
                             cluster_hosts[laddr]['backends'][_unit] = _laddr
 
-        # NOTE(jamespage) add backend based on private address - this
-        # with either be the only backend or the fallback if no acls
+        # NOTE(jamespage) add backend based on get_relation_ip - this
+        # will either be the only backend or the fallback if no acls
         # match in the frontend
+        # Network spaces aware
+        addr = get_relation_ip('cluster')
         cluster_hosts[addr] = {}
         netmask = get_netmask_for_address(addr)
         cluster_hosts[addr] = {
@@ -605,6 +674,8 @@ class HAProxyContext(OSContextGenerator):
         }
         for rid in relation_ids('cluster'):
             for unit in sorted(related_units(rid)):
+                # API Charms will need to set their private-address with
+                # get_relation_ip('cluster')
                 _laddr = relation_get('private-address',
                                       rid=rid, unit=unit)
                 if _laddr:
@@ -1321,8 +1392,6 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
             "public_processes": int(math.ceil(self.public_process_weight *
                                               total_processes)),
             "threads": 1,
-            "usr_bin": git_determine_usr_bin(),
-            "python_path": git_determine_python_path(),
         }
         return ctxt
 
@@ -1566,8 +1635,18 @@ class InternalEndpointContext(OSContextGenerator):
     endpoints by default so this allows admins to optionally use internal
     endpoints.
     """
+    def __init__(self, ost_rel_check_pkg_name):
+        self.ost_rel_check_pkg_name = ost_rel_check_pkg_name
+
     def __call__(self):
-        return {'use_internal_endpoints': config('use-internal-endpoints')}
+        ctxt = {'use_internal_endpoints': config('use-internal-endpoints')}
+        rel = os_release(self.ost_rel_check_pkg_name, base='icehouse')
+        if CompareOpenStackReleases(rel) >= 'pike':
+            ctxt['volume_api_version'] = '3'
+        else:
+            ctxt['volume_api_version'] = '2'
+
+        return ctxt
 
 
 class AppArmorContext(OSContextGenerator):
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 9e5af342cb652eb3feccb84d15c97c89c1c9ff22..b753275d9709990c8f05ff0ed307beb4a09697c6 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -23,7 +23,6 @@ import sys
 import re
 import itertools
 import functools
-import shutil
 
 import six
 import traceback
@@ -47,7 +46,6 @@ from charmhelpers.core.hookenv import (
     related_units,
     relation_ids,
     relation_set,
-    service_name,
     status_set,
     hook_name,
     application_version_set,
@@ -68,11 +66,6 @@ from charmhelpers.contrib.network.ip import (
     port_has_listener,
 )
 
-from charmhelpers.contrib.python.packages import (
-    pip_create_virtualenv,
-    pip_install,
-)
-
 from charmhelpers.core.host import (
     lsb_release,
     mounts,
@@ -84,7 +77,6 @@ from charmhelpers.core.host import (
 )
 from charmhelpers.fetch import (
     apt_cache,
-    install_remote,
     import_key as fetch_import_key,
     add_source as fetch_add_source,
     SourceConfigError,
@@ -278,27 +270,6 @@ PACKAGE_CODENAMES = {
     ]),
 }
 
-GIT_DEFAULT_REPOS = {
-    'requirements': 'git://github.com/openstack/requirements',
-    'cinder': 'git://github.com/openstack/cinder',
-    'glance': 'git://github.com/openstack/glance',
-    'horizon': 'git://github.com/openstack/horizon',
-    'keystone': 'git://github.com/openstack/keystone',
-    'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
-    'neutron': 'git://github.com/openstack/neutron',
-    'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
-    'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
-    'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
-    'nova': 'git://github.com/openstack/nova',
-}
-
-GIT_DEFAULT_BRANCHES = {
-    'liberty': 'stable/liberty',
-    'mitaka': 'stable/mitaka',
-    'newton': 'stable/newton',
-    'master': 'master',
-}
-
 DEFAULT_LOOPBACK_SIZE = '5G'
 
 
@@ -530,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False):
     if _os_rel:
         return _os_rel
     _os_rel = (
-        git_os_codename_install_source(config('openstack-origin-git')) or
         get_os_codename_package(package, fatal=False) or
         get_os_codename_install_source(config('openstack-origin')) or
         base)
@@ -656,11 +626,6 @@ def openstack_upgrade_available(package):
     else:
         avail_vers = get_os_version_install_source(src)
     apt.init()
-    if "swift" in package:
-        major_cur_vers = cur_vers.split('.', 1)[0]
-        major_avail_vers = avail_vers.split('.', 1)[0]
-        major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
-        return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
     return apt.version_compare(avail_vers, cur_vers) == 1
 
 
@@ -771,417 +736,6 @@ def os_requires_version(ostack_release, pkg):
     return wrap
 
 
-def git_install_requested():
-    """
-    Returns true if openstack-origin-git is specified.
-    """
-    return config('openstack-origin-git') is not None
-
-
-def git_os_codename_install_source(projects_yaml):
-    """
-    Returns OpenStack codename of release being installed from source.
-    """
-    if git_install_requested():
-        projects = _git_yaml_load(projects_yaml)
-
-        if projects in GIT_DEFAULT_BRANCHES.keys():
-            if projects == 'master':
-                return 'ocata'
-            return projects
-
-        if 'release' in projects:
-            if projects['release'] == 'master':
-                return 'ocata'
-            return projects['release']
-
-    return None
-
-
-def git_default_repos(projects_yaml):
-    """
-    Returns default repos if a default openstack-origin-git value is specified.
-    """
-    service = service_name()
-    core_project = service
-
-    for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES):
-        if projects_yaml == default:
-
-            # add the requirements repo first
-            repo = {
-                'name': 'requirements',
-                'repository': GIT_DEFAULT_REPOS['requirements'],
-                'branch': branch,
-            }
-            repos = [repo]
-
-            # neutron-* and nova-* charms require some additional repos
-            if service in ['neutron-api', 'neutron-gateway',
-                           'neutron-openvswitch']:
-                core_project = 'neutron'
-                if service == 'neutron-api':
-                    repo = {
-                        'name': 'networking-hyperv',
-                        'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
-                        'branch': branch,
-                    }
-                    repos.append(repo)
-                for project in ['neutron-fwaas', 'neutron-lbaas',
-                                'neutron-vpnaas', 'nova']:
-                    repo = {
-                        'name': project,
-                        'repository': GIT_DEFAULT_REPOS[project],
-                        'branch': branch,
-                    }
-                    repos.append(repo)
-
-            elif service in ['nova-cloud-controller', 'nova-compute']:
-                core_project = 'nova'
-                repo = {
-                    'name': 'neutron',
-                    'repository': GIT_DEFAULT_REPOS['neutron'],
-                    'branch': branch,
-                }
-                repos.append(repo)
-            elif service == 'openstack-dashboard':
-                core_project = 'horizon'
-
-            # finally add the current service's core project repo
-            repo = {
-                'name': core_project,
-                'repository': GIT_DEFAULT_REPOS[core_project],
-                'branch': branch,
-            }
-            repos.append(repo)
-
-            return yaml.dump(dict(repositories=repos, release=default))
-
-    return projects_yaml
-
-
-def _git_yaml_load(projects_yaml):
-    """
-    Load the specified yaml into a dictionary.
-    """
-    if not projects_yaml:
-        return None
-
-    return yaml.load(projects_yaml)
-
-
-requirements_dir = None
-
-
-def git_clone_and_install(projects_yaml, core_project):
-    """
-    Clone/install all specified OpenStack repositories.
-
-    The expected format of projects_yaml is:
-
-        repositories:
-          - {name: keystone,
-             repository: 'git://git.openstack.org/openstack/keystone.git',
-             branch: 'stable/icehouse'}
-          - {name: requirements,
-             repository: 'git://git.openstack.org/openstack/requirements.git',
-             branch: 'stable/icehouse'}
-
-        directory: /mnt/openstack-git
-        http_proxy: squid-proxy-url
-        https_proxy: squid-proxy-url
-
-    The directory, http_proxy, and https_proxy keys are optional.
-
-    """
-    global requirements_dir
-    parent_dir = '/mnt/openstack-git'
-    http_proxy = None
-
-    projects = _git_yaml_load(projects_yaml)
-    _git_validate_projects_yaml(projects, core_project)
-
-    old_environ = dict(os.environ)
-
-    if 'http_proxy' in projects.keys():
-        http_proxy = projects['http_proxy']
-        os.environ['http_proxy'] = projects['http_proxy']
-    if 'https_proxy' in projects.keys():
-        os.environ['https_proxy'] = projects['https_proxy']
-
-    if 'directory' in projects.keys():
-        parent_dir = projects['directory']
-
-    pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
-
-    # Upgrade setuptools and pip from default virtualenv versions. The default
-    # versions in trusty break master OpenStack branch deployments.
-    for p in ['pip', 'setuptools']:
-        pip_install(p, upgrade=True, proxy=http_proxy,
-                    venv=os.path.join(parent_dir, 'venv'))
-
-    constraints = None
-    for p in projects['repositories']:
-        repo = p['repository']
-        branch = p['branch']
-        depth = '1'
-        if 'depth' in p.keys():
-            depth = p['depth']
-        if p['name'] == 'requirements':
-            repo_dir = _git_clone_and_install_single(repo, branch, depth,
-                                                     parent_dir, http_proxy,
-                                                     update_requirements=False)
-            requirements_dir = repo_dir
-            constraints = os.path.join(repo_dir, "upper-constraints.txt")
-            # upper-constraints didn't exist until after icehouse
-            if not os.path.isfile(constraints):
-                constraints = None
-            # use constraints unless project yaml sets use_constraints to false
-            if 'use_constraints' in projects.keys():
-                if not projects['use_constraints']:
-                    constraints = None
-        else:
-            repo_dir = _git_clone_and_install_single(repo, branch, depth,
-                                                     parent_dir, http_proxy,
-                                                     update_requirements=True,
-                                                     constraints=constraints)
-
-    os.environ = old_environ
-
-
-def _git_validate_projects_yaml(projects, core_project):
-    """
-    Validate the projects yaml.
-    """
-    _git_ensure_key_exists('repositories', projects)
-
-    for project in projects['repositories']:
-        _git_ensure_key_exists('name', project.keys())
-        _git_ensure_key_exists('repository', project.keys())
-        _git_ensure_key_exists('branch', project.keys())
-
-    if projects['repositories'][0]['name'] != 'requirements':
-        error_out('{} git repo must be specified first'.format('requirements'))
-
-    if projects['repositories'][-1]['name'] != core_project:
-        error_out('{} git repo must be specified last'.format(core_project))
-
-    _git_ensure_key_exists('release', projects)
-
-
-def _git_ensure_key_exists(key, keys):
-    """
-    Ensure that key exists in keys.
-    """
-    if key not in keys:
-        error_out('openstack-origin-git key \'{}\' is missing'.format(key))
-
-
-def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
-                                  update_requirements, constraints=None):
-    """
-    Clone and install a single git repository.
-    """
-    if not os.path.exists(parent_dir):
-        juju_log('Directory already exists at {}. '
-                 'No need to create directory.'.format(parent_dir))
-        os.mkdir(parent_dir)
-
-    juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
-    repo_dir = install_remote(
-        repo, dest=parent_dir, branch=branch, depth=depth)
-
-    venv = os.path.join(parent_dir, 'venv')
-
-    if update_requirements:
-        if not requirements_dir:
-            error_out('requirements repo must be cloned before '
-                      'updating from global requirements.')
-        _git_update_requirements(venv, repo_dir, requirements_dir)
-
-    juju_log('Installing git repo from dir: {}'.format(repo_dir))
-    if http_proxy:
-        pip_install(repo_dir, proxy=http_proxy, venv=venv,
-                    constraints=constraints)
-    else:
-        pip_install(repo_dir, venv=venv, constraints=constraints)
-
-    return repo_dir
-
-
-def _git_update_requirements(venv, package_dir, reqs_dir):
-    """
-    Update from global requirements.
-
-    Update an OpenStack git directory's requirements.txt and
-    test-requirements.txt from global-requirements.txt.
-    """
-    orig_dir = os.getcwd()
-    os.chdir(reqs_dir)
-    python = os.path.join(venv, 'bin/python')
-    cmd = [python, 'update.py', package_dir]
-    try:
-        subprocess.check_call(cmd)
-    except subprocess.CalledProcessError:
-        package = os.path.basename(package_dir)
-        error_out("Error updating {} from "
-                  "global-requirements.txt".format(package))
-    os.chdir(orig_dir)
-
-
-def git_pip_venv_dir(projects_yaml):
-    """
-    Return the pip virtualenv path.
-    """
-    parent_dir = '/mnt/openstack-git'
-
-    projects = _git_yaml_load(projects_yaml)
-
-    if 'directory' in projects.keys():
-        parent_dir = projects['directory']
-
-    return os.path.join(parent_dir, 'venv')
-
-
-def git_src_dir(projects_yaml, project):
-    """
-    Return the directory where the specified project's source is located.
-    """
-    parent_dir = '/mnt/openstack-git'
-
-    projects = _git_yaml_load(projects_yaml)
-
-    if 'directory' in projects.keys():
-        parent_dir = projects['directory']
-
-    for p in projects['repositories']:
-        if p['name'] == project:
-            return os.path.join(parent_dir, os.path.basename(p['repository']))
-
-    return None
-
-
-def git_yaml_value(projects_yaml, key):
-    """
-    Return the value in projects_yaml for the specified key.
-    """
-    projects = _git_yaml_load(projects_yaml)
-
-    if key in projects.keys():
-        return projects[key]
-
-    return None
-
-
-def git_generate_systemd_init_files(templates_dir):
-    """
-    Generate systemd init files.
-
-    Generates and installs systemd init units and script files based on the
-    *.init.in files contained in the templates_dir directory.
-
-    This code is based on the openstack-pkg-tools package and its init
-    script generation, which is used by the OpenStack packages.
-    """
-    for f in os.listdir(templates_dir):
-        # Create the init script and systemd unit file from the template
-        if f.endswith(".init.in"):
-            init_in_file = f
-            init_file = f[:-8]
-            service_file = "{}.service".format(init_file)
-
-            init_in_source = os.path.join(templates_dir, init_in_file)
-            init_source = os.path.join(templates_dir, init_file)
-            service_source = os.path.join(templates_dir, service_file)
-
-            init_dest = os.path.join('/etc/init.d', init_file)
-            service_dest = os.path.join('/lib/systemd/system', service_file)
-
-            shutil.copyfile(init_in_source, init_source)
-            with open(init_source, 'a') as outfile:
-                template = ('/usr/share/openstack-pkg-tools/'
-                            'init-script-template')
-                with open(template) as infile:
-                    outfile.write('\n\n{}'.format(infile.read()))
-
-            cmd = ['pkgos-gen-systemd-unit', init_in_source]
-            subprocess.check_call(cmd)
-
-            if os.path.exists(init_dest):
-                os.remove(init_dest)
-            if os.path.exists(service_dest):
-                os.remove(service_dest)
-            shutil.copyfile(init_source, init_dest)
-            shutil.copyfile(service_source, service_dest)
-            os.chmod(init_dest, 0o755)
-
-    for f in os.listdir(templates_dir):
-        # If there's a service.in file, use it instead of the generated one
-        if f.endswith(".service.in"):
-            service_in_file = f
-            service_file = f[:-3]
-
-            service_in_source = os.path.join(templates_dir, service_in_file)
-            service_source = os.path.join(templates_dir, service_file)
-            service_dest = os.path.join('/lib/systemd/system', service_file)
-
-            shutil.copyfile(service_in_source, service_source)
-
-            if os.path.exists(service_dest):
-                os.remove(service_dest)
-            shutil.copyfile(service_source, service_dest)
-
-    for f in os.listdir(templates_dir):
-        # Generate the systemd unit if there's no existing .service.in
-        if f.endswith(".init.in"):
-            init_in_file = f
-            init_file = f[:-8]
-            service_in_file = "{}.service.in".format(init_file)
-            service_file = "{}.service".format(init_file)
-
-            init_in_source = os.path.join(templates_dir, init_in_file)
-            service_in_source = os.path.join(templates_dir, service_in_file)
-            service_source = os.path.join(templates_dir, service_file)
-            service_dest = os.path.join('/lib/systemd/system', service_file)
-
-            if not os.path.exists(service_in_source):
-                cmd = ['pkgos-gen-systemd-unit', init_in_source]
-                subprocess.check_call(cmd)
-
-                if os.path.exists(service_dest):
-                    os.remove(service_dest)
-                shutil.copyfile(service_source, service_dest)
-
-
-def git_determine_usr_bin():
-    """Return the /usr/bin path for Apache2 config.
-
-    The /usr/bin path will be located in the virtualenv if the charm
-    is configured to deploy from source.
-    """
-    if git_install_requested():
-        projects_yaml = config('openstack-origin-git')
-        projects_yaml = git_default_repos(projects_yaml)
-        return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
-    else:
-        return '/usr/bin'
-
-
-def git_determine_python_path():
-    """Return the python-path for Apache2 config.
-
-    Returns 'None' unless the charm is configured to deploy from source,
-    in which case the path of the virtualenv's site-packages is returned.
-    """
-    if git_install_requested():
-        projects_yaml = config('openstack-origin-git')
-        projects_yaml = git_default_repos(projects_yaml)
-        return os.path.join(git_pip_venv_dir(projects_yaml),
-                            'lib/python2.7/site-packages')
-    else:
-        return None
-
-
 def os_workload_status(configs, required_interfaces, charm_func=None):
     """
     Decorator to set workload status based on complete contexts
@@ -1615,27 +1169,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
     """
     ret = False
 
-    if git_install_requested():
-        action_set({'outcome': 'installed from source, skipped upgrade.'})
-    else:
-        if openstack_upgrade_available(package):
-            if config('action-managed-upgrade'):
-                juju_log('Upgrading OpenStack release')
-
-                try:
-                    upgrade_callback(configs=configs)
-                    action_set({'outcome': 'success, upgrade completed.'})
-                    ret = True
-                except Exception:
-                    action_set({'outcome': 'upgrade failed, see traceback.'})
-                    action_set({'traceback': traceback.format_exc()})
-                    action_fail('do_openstack_upgrade resulted in an '
-                                'unexpected error')
-            else:
-                action_set({'outcome': 'action-managed-upgrade config is '
-                                       'False, skipped upgrade.'})
+    if openstack_upgrade_available(package):
+        if config('action-managed-upgrade'):
+            juju_log('Upgrading OpenStack release')
+
+            try:
+                upgrade_callback(configs=configs)
+                action_set({'outcome': 'success, upgrade completed.'})
+                ret = True
+            except Exception:
+                action_set({'outcome': 'upgrade failed, see traceback.'})
+                action_set({'traceback': traceback.format_exc()})
+                action_fail('do_openstack_upgrade resulted in an '
+                            'unexpected error')
         else:
-            action_set({'outcome': 'no upgrade available.'})
+            action_set({'outcome': 'action-managed-upgrade config is '
+                                   'False, skipped upgrade.'})
+    else:
+        action_set({'outcome': 'no upgrade available.'})
 
     return ret
 
@@ -2045,14 +1596,25 @@ def token_cache_pkgs(source=None, release=None):
 
 def update_json_file(filename, items):
     """Updates the json `filename` with a given dict.
-    :param filename: json filename (i.e.: /etc/glance/policy.json)
+    :param filename: path to json file (e.g. /etc/glance/policy.json)
     :param items: dict of items to update
     """
+    if not items:
+        return
+
     with open(filename) as fd:
         policy = json.load(fd)
+
+    # Compare before and after and if nothing has changed don't write the file
+    # since that could cause unnecessary service restarts.
+    before = json.dumps(policy, indent=4, sort_keys=True)
     policy.update(items)
+    after = json.dumps(policy, indent=4, sort_keys=True)
+    if before == after:
+        return
+
     with open(filename, "w") as fd:
-        fd.write(json.dumps(policy, indent=4))
+        fd.write(after)
 
 
 @cached
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
index 0d9bacfdbc78f0c97f74b7a4f24405f3b842d196..e13e60a6c483492d04d67389606984a2827aa1c1 100644
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None):
         assert isinstance(valid_range, list), \
             "valid_range must be a list, was given {}".format(valid_range)
         # If we're dealing with strings
-        if valid_type is six.string_types:
+        if isinstance(value, six.string_types):
             assert value in valid_range, \
                 "{} is not in the list {}".format(value, valid_range)
         # Integer, float should have a min and max
@@ -517,7 +517,8 @@ def pool_set(service, pool_name, key, value):
     :param value:
     :return: None.  Can raise CalledProcessError
     """
-    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
+    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
+           str(value).lower()]
     try:
         check_call(cmd)
     except CalledProcessError:
@@ -621,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
     :param durability_estimator: int
     :return: None.  Can raise CalledProcessError
     """
+    version = ceph_version()
+
     # Ensure this failure_domain is allowed by Ceph
     validator(failure_domain, six.string_types,
               ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
 
     cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
-           'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
-           'ruleset_failure_domain=' + failure_domain]
+           'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
+           ]
     if locality is not None and durability_estimator is not None:
         raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
 
+    # failure_domain changed in luminous
+    if version and version >= '12.0.0':
+        cmd.append('crush-failure-domain=' + failure_domain)
+    else:
+        cmd.append('ruleset-failure-domain=' + failure_domain)
+
     # Add plugin specific information
     if locality is not None:
         # For local erasure codes
@@ -1064,14 +1073,24 @@ class CephBrokerRq(object):
         self.ops = []
 
     def add_op_request_access_to_group(self, name, namespace=None,
-                                       permission=None, key_name=None):
+                                       permission=None, key_name=None,
+                                       object_prefix_permissions=None):
         """
         Adds the requested permissions to the current service's Ceph key,
-        allowing the key to access only the specified pools
+        allowing the key to access only the specified pools or
+        object prefixes. object_prefix_permissions should be a dictionary
+        keyed on the permission with the corresponding value being a list
+        of prefixes to apply that permission to.
+            {
+                'rwx': ['prefix1', 'prefix2'],
+                'class-read': ['prefix3']}
         """
-        self.ops.append({'op': 'add-permissions-to-key', 'group': name,
-                         'namespace': namespace, 'name': key_name or service_name(),
-                         'group-permission': permission})
+        self.ops.append({
+            'op': 'add-permissions-to-key', 'group': name,
+            'namespace': namespace,
+            'name': key_name or service_name(),
+            'group-permission': permission,
+            'object-prefix-permissions': object_prefix_permissions})
 
     def add_op_create_pool(self, name, replica_count=3, pg_num=None,
                            weight=None, group=None, namespace=None):
@@ -1107,7 +1126,10 @@ class CephBrokerRq(object):
     def _ops_equal(self, other):
         if len(self.ops) == len(other.ops):
             for req_no in range(0, len(self.ops)):
-                for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
+                for key in [
+                        'replicas', 'name', 'op', 'pg_num', 'weight',
+                        'group', 'group-namespace', 'group-permission',
+                        'object-prefix-permissions']:
                     if self.ops[req_no].get(key) != other.ops[req_no].get(key):
                         return False
         else:
diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py
index 7f2a0604931fd7b9ee15224971b4a3f5ed79f3ef..79a7a245bb5eb0938620c95b60166179c6b986a7 100644
--- a/hooks/charmhelpers/contrib/storage/linux/lvm.py
+++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py
@@ -12,6 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import functools
 from subprocess import (
     CalledProcessError,
     check_call,
@@ -101,3 +102,52 @@ def create_lvm_volume_group(volume_group, block_device):
     :block_device: str: Full path of PV-initialized block device.
     '''
     check_call(['vgcreate', volume_group, block_device])
+
+
+def list_logical_volumes(select_criteria=None, path_mode=False):
+    '''
+    List logical volumes
+
+    :param select_criteria: str: Limit list to those volumes matching this
+                                 criteria (see 'lvs -S help' for more details)
+    :param path_mode: bool: return logical volume name in 'vg/lv' format, this
+                            format is required for some commands like lvextend
+    :returns: [str]: List of logical volumes
+    '''
+    lv_diplay_attr = 'lv_name'
+    if path_mode:
+        # Parsing output logic relies on the column order
+        lv_diplay_attr = 'vg_name,' + lv_diplay_attr
+    cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
+    if select_criteria:
+        cmd.extend(['--select', select_criteria])
+    lvs = []
+    for lv in check_output(cmd).decode('UTF-8').splitlines():
+        if not lv:
+            continue
+        if path_mode:
+            lvs.append('/'.join(lv.strip().split()))
+        else:
+            lvs.append(lv.strip())
+    return lvs
+
+
+list_thin_logical_volume_pools = functools.partial(
+    list_logical_volumes,
+    select_criteria='lv_attr =~ ^t')
+
+list_thin_logical_volumes = functools.partial(
+    list_logical_volumes,
+    select_criteria='lv_attr =~ ^V')
+
+
+def extend_logical_volume_by_device(lv_name, block_device):
+    '''
+    Extends the size of logical volume lv_name by the amount of free space on
+    physical volume block_device.
+
+    :param lv_name: str: name of logical volume to be extended (vg/lv format)
+    :param block_device: str: name of block_device to be allocated to lv_name
+    '''
+    cmd = ['lvextend', lv_name, block_device]
+    check_call(cmd)
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 5a88f798e89546ad9128cb7d4a1cb8bd6e69a644..211ae87de0bb66f941dd4b0ad9eaecee5190f3ce 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -39,6 +39,7 @@ if not six.PY3:
 else:
     from collections import UserDict
 
+
 CRITICAL = "CRITICAL"
 ERROR = "ERROR"
 WARNING = "WARNING"
@@ -344,6 +345,7 @@ class Config(dict):
 
         """
         with open(self.path, 'w') as f:
+            os.fchmod(f.fileno(), 0o600)
             json.dump(self, f)
 
     def _implicit_save(self):
diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py
index 7af875c2fcc1e2e38f9267bfdc60ab5a2a499c18..6d7b4942115d23f9400d8a00da9f0189940bd0d8 100644
--- a/hooks/charmhelpers/core/unitdata.py
+++ b/hooks/charmhelpers/core/unitdata.py
@@ -175,6 +175,8 @@ class Storage(object):
             else:
                 self.db_path = os.path.join(
                     os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+        with open(self.db_path, 'a') as f:
+            os.fchmod(f.fileno(), 0o600)
         self.conn = sqlite3.connect('%s' % self.db_path)
         self.cursor = self.conn.cursor()
         self.revision = None
diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py
index 5a88f798e89546ad9128cb7d4a1cb8bd6e69a644..211ae87de0bb66f941dd4b0ad9eaecee5190f3ce 100644
--- a/tests/charmhelpers/core/hookenv.py
+++ b/tests/charmhelpers/core/hookenv.py
@@ -39,6 +39,7 @@ if not six.PY3:
 else:
     from collections import UserDict
 
+
 CRITICAL = "CRITICAL"
 ERROR = "ERROR"
 WARNING = "WARNING"
@@ -344,6 +345,7 @@ class Config(dict):
 
         """
         with open(self.path, 'w') as f:
+            os.fchmod(f.fileno(), 0o600)
             json.dump(self, f)
 
     def _implicit_save(self):
diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py
index 7af875c2fcc1e2e38f9267bfdc60ab5a2a499c18..6d7b4942115d23f9400d8a00da9f0189940bd0d8 100644
--- a/tests/charmhelpers/core/unitdata.py
+++ b/tests/charmhelpers/core/unitdata.py
@@ -175,6 +175,8 @@ class Storage(object):
             else:
                 self.db_path = os.path.join(
                     os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+        with open(self.db_path, 'a') as f:
+            os.fchmod(f.fileno(), 0o600)
         self.conn = sqlite3.connect('%s' % self.db_path)
         self.cursor = self.conn.cursor()
         self.revision = None