diff --git a/README.md b/README.md
index 04ad9a0e629aa21096ee12aeeaa59cc29322c489..3461b68957d93640d91b4c3dcee1b49c882c0c61 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
 Overview
 ========
 
-This charm provides Keystone, the Openstack identity service. It's target
+This charm provides Keystone, the Openstack identity service. Its target
 platform is (ideally) Ubuntu LTS + Openstack.
 
 This is a modified version,  which adds support for Identity
@@ -14,7 +14,7 @@ The following interfaces are provided:
 
     - nrpe-external-master: Used to generate Nagios checks.
 
-    - identity-service: Openstack API endpoints request an entry in the 
+    - identity-service: Openstack API endpoints request an entry in the
       Keystone service catalog + endpoint template catalog. When a relation
       is established, Keystone receives: service name, region, public_url,
       admin_url and internal_url. It first checks that the requested service
@@ -97,33 +97,28 @@ If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are se
 SSL/HTTPS
 ---------
 
-Support for SSL and https endpoint is provided via a set of configuration
-options on the charm. There are two types supported;
+Support for SSL and https endpoint is provided via various charm configuration
+options.
 
-use-https - if enabled this option tells Keystone to configure the identity
-endpoint as https. Under this model the keystone charm will either use the CA
-as provided by the user (see ssl_* options below) or will generate its own and
-sync across peers. The cert will be distributed to all service endpoints which
-will be configured to use https.
+To enable SSL and https endpoint with a charm-generated CA, set the following
+configuration options:
 
-https-service-endpoints - if enabled this option tells Keystone to configure
-ALL endpoints as https. Under this model the keystone charm will either use the
-CA as provided by the user (see ssl_* options below) or will generate its own
-and sync across peers. The cert will be distributed to all service endpoints
-which will be configured to use https as well as configuring themselves to be
-used as https.
+- use-https - if enabled this option tells Keystone to configure the identity
+  endpoint as https, and the keystone charm will generate its own CA and sync
+  across peers. The cert will be distributed to all service endpoints which
+  will be configured to use https.
 
-When configuring the charms to use SSL there are three charm config options as
-ssl_ca, ssl_cert and ssl_key.
+- https-service-endpoints - if enabled this option tells Keystone to configure
+  ALL endpoints as https. Under this model the keystone charm will generate its
+  own CA and sync across peers. The cert will be distributed to all service
+  endpoints which will be configured to use https as well as configuring
+  themselves to be used as https.
 
-- The user can provide their own CA, SSL cert and key using the options ssl_ca,
-  ssl_cert, ssl_key.
-
-- The user can provide SSL cert and key using ssl_cert and ssl_key when the cert
-  is signed by a trusted CA.
-
-- If not provided, the keystone charm will automatically generate a CA and certs
-  to distribute to endpoints.
+To enable SSL and https endpoint with your own CA, SSL cert, and key set the
+following configuration options: ssl_ca, ssl_cert, and ssl_key. The user can
+provide SSL cert and key using ssl_cert and ssl_key only when the cert is
+signed by a trusted CA. These options should not be used with use-https and
+https-service-endpoints.
 
 When the charm configures itself as a CA (generally only recommended for test
 purposes) it will elect an "ssl-cert-master" whose duty is to generate the CA
@@ -169,7 +164,7 @@ To use this feature, use the --bind option when deploying the charm:
 
     juju deploy keystone --bind "public=public-space internal=internal-space admin=admin-space shared-db=internal-space"
 
-alternatively these can also be provided as part of a juju native bundle configuration:
+Alternatively these can also be provided as part of a juju native bundle configuration:
 
     keystone:
       charm: cs:xenial/keystone
@@ -182,7 +177,7 @@ alternatively these can also be provided as part of a juju native bundle configu
 
 NOTE: Spaces must be configured in the underlying provider prior to attempting to use them.
 
-NOTE: Existing deployments using os-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set.
+NOTE: Existing deployments using os\-\*-network configuration options will continue to function; these options are preferred over any network space binding provided if set.
 
 Federated Authentication
 ------------------------
diff --git a/actions.yaml b/actions.yaml
index 81bbd5b52380bb3c5ce6be4b5d3a211b7c24ab67..767bc8abaa8cdd3cfd50094ae41c1499354ca761 100644
--- a/actions.yaml
+++ b/actions.yaml
@@ -1,5 +1,3 @@
-git-reinstall:
-  description: Reinstall keystone from the openstack-origin-git repositories.
 pause:
   description: |
     Pause keystone services.
diff --git a/actions/git-reinstall b/actions/git-reinstall
deleted file mode 120000
index ff684984335b3d70ce6f61f699745480fd8e0136..0000000000000000000000000000000000000000
--- a/actions/git-reinstall
+++ /dev/null
@@ -1 +0,0 @@
-git_reinstall.py
\ No newline at end of file
diff --git a/actions/git_reinstall.py b/actions/git_reinstall.py
deleted file mode 100755
index 2abc2e405272d0950d129b6efd36223f02ada341..0000000000000000000000000000000000000000
--- a/actions/git_reinstall.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import traceback
-
-from charmhelpers.contrib.openstack.utils import (
-    git_install_requested,
-)
-
-from charmhelpers.core.hookenv import (
-    action_set,
-    action_fail,
-    config,
-)
-
-from hooks.keystone_utils import (
-    git_install,
-)
-
-from hooks.keystone_hooks import (
-    config_changed,
-)
-
-
-def git_reinstall():
-    """Reinstall from source and restart services.
-
-    If the openstack-origin-git config option was used to install openstack
-    from source git repositories, then this action can be used to reinstall
-    from updated git repositories, followed by a restart of services."""
-    if not git_install_requested():
-        action_fail('openstack-origin-git is not configured')
-        return
-
-    try:
-        git_install(config('openstack-origin-git'))
-        config_changed()
-    except:
-        action_set({'traceback': traceback.format_exc()})
-        action_fail('git-reinstall resulted in an unexpected error')
-
-
-if __name__ == '__main__':
-    git_reinstall()
diff --git a/charmhelpers/contrib/hahelpers/apache.py b/charmhelpers/contrib/hahelpers/apache.py
index 22acb683e6cec8648451543d235286be151dbf2a..605a1becd92a4eb5683d3db28a2267373780a736 100644
--- a/charmhelpers/contrib/hahelpers/apache.py
+++ b/charmhelpers/contrib/hahelpers/apache.py
@@ -65,7 +65,8 @@ def get_ca_cert():
     if ca_cert is None:
         log("Inspecting identity-service relations for CA SSL certificate.",
             level=INFO)
-        for r_id in relation_ids('identity-service'):
+        for r_id in (relation_ids('identity-service') +
+                     relation_ids('identity-credentials')):
             for unit in relation_list(r_id):
                 if ca_cert is None:
                     ca_cert = relation_get('ca_cert',
@@ -76,7 +77,7 @@ def get_ca_cert():
 def retrieve_ca_cert(cert_file):
     cert = None
     if os.path.isfile(cert_file):
-        with open(cert_file, 'r') as crt:
+        with open(cert_file, 'rb') as crt:
             cert = crt.read()
     return cert
 
diff --git a/charmhelpers/contrib/hahelpers/cluster.py b/charmhelpers/contrib/hahelpers/cluster.py
index 4207e42c2209958d075bd3a3776d382463d3f6f2..4a737e24e885cd767c7185eebd84a3afdf244b13 100644
--- a/charmhelpers/contrib/hahelpers/cluster.py
+++ b/charmhelpers/contrib/hahelpers/cluster.py
@@ -223,6 +223,11 @@ def https():
         return True
     if config_get('ssl_cert') and config_get('ssl_key'):
         return True
+    for r_id in relation_ids('certificates'):
+        for unit in relation_list(r_id):
+            ca = relation_get('ca', rid=r_id, unit=unit)
+            if ca:
+                return True
     for r_id in relation_ids('identity-service'):
         for unit in relation_list(r_id):
             # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
@@ -371,6 +376,7 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
     ''' Distribute operations by waiting based on modulo_distribution
 
     If modulo and or wait are not set, check config_get for those values.
+    If config values are not set, default to modulo=3 and wait=30.
 
     :param modulo: int The modulo number creates the group distribution
     :param wait: int The constant time wait value
@@ -382,10 +388,17 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
     :side effect: Calls time.sleep()
     '''
     if modulo is None:
-        modulo = config_get('modulo-nodes')
+        modulo = config_get('modulo-nodes') or 3
     if wait is None:
-        wait = config_get('known-wait')
-    calculated_wait = modulo_distribution(modulo=modulo, wait=wait)
+        wait = config_get('known-wait') or 30
+    if juju_is_leader():
+        # The leader should never wait
+        calculated_wait = 0
+    else:
+        # non_zero_wait=True guarantees the non-leader who gets modulo 0
+        # will still wait
+        calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
+                                              non_zero_wait=True)
     msg = "Waiting {} seconds for {} ...".format(calculated_wait,
                                                  operation_name)
     log(msg, DEBUG)
diff --git a/charmhelpers/contrib/network/ip.py b/charmhelpers/contrib/network/ip.py
index a871ce3701ffd416de391bc4404e1acc0fa62c8c..b13277bb57c9227b1d9dfecf4f6750740e5a262a 100644
--- a/charmhelpers/contrib/network/ip.py
+++ b/charmhelpers/contrib/network/ip.py
@@ -27,6 +27,7 @@ from charmhelpers.core.hookenv import (
     network_get_primary_address,
     unit_get,
     WARNING,
+    NoNetworkBinding,
 )
 
 from charmhelpers.core.host import (
@@ -109,7 +110,12 @@ def get_address_in_network(network, fallback=None, fatal=False):
         _validate_cidr(network)
         network = netaddr.IPNetwork(network)
         for iface in netifaces.interfaces():
-            addresses = netifaces.ifaddresses(iface)
+            try:
+                addresses = netifaces.ifaddresses(iface)
+            except ValueError:
+                # If an instance was deleted between
+                # netifaces.interfaces() run and now, its interfaces are gone
+                continue
             if network.version == 4 and netifaces.AF_INET in addresses:
                 for addr in addresses[netifaces.AF_INET]:
                     cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
@@ -578,6 +584,9 @@ def get_relation_ip(interface, cidr_network=None):
     except NotImplementedError:
         # If network-get is not available
         address = get_host_ip(unit_get('private-address'))
+    except NoNetworkBinding:
+        log("No network binding for {}".format(interface), WARNING)
+        address = get_host_ip(unit_get('private-address'))
 
     if config('prefer-ipv6'):
         # Currently IPv6 has priority, eventually we want IPv6 to just be
diff --git a/charmhelpers/contrib/openstack/amulet/deployment.py b/charmhelpers/contrib/openstack/amulet/deployment.py
index 5afbbd87c13e2b168b088c4da51b3b63ab4d07a2..1c96752a49fb36f389cd1ede38b31afb94127e42 100644
--- a/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -21,6 +21,9 @@ from collections import OrderedDict
 from charmhelpers.contrib.amulet.deployment import (
     AmuletDeployment
 )
+from charmhelpers.contrib.openstack.amulet.utils import (
+    OPENSTACK_RELEASES_PAIRS
+)
 
 DEBUG = logging.DEBUG
 ERROR = logging.ERROR
@@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
            release.
            """
         # Must be ordered by OpenStack release (not by Ubuntu release):
-        (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
-         self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
-         self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
-         self.xenial_pike, self.artful_pike, self.xenial_queens,
-         self.bionic_queens,) = range(13)
+        for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
+            setattr(self, os_pair, i)
 
         releases = {
             ('trusty', None): self.trusty_icehouse,
@@ -291,6 +291,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
             ('zesty', None): self.zesty_ocata,
             ('artful', None): self.artful_pike,
             ('bionic', None): self.bionic_queens,
+            ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
+            ('cosmic', None): self.cosmic_rocky,
         }
         return releases[(self.series, self.openstack)]
 
@@ -306,6 +308,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
             ('zesty', 'ocata'),
             ('artful', 'pike'),
             ('bionic', 'queens'),
+            ('cosmic', 'rocky'),
         ])
         if self.openstack:
             os_origin = self.openstack.split(':')[1]
diff --git a/charmhelpers/contrib/openstack/amulet/utils.py b/charmhelpers/contrib/openstack/amulet/utils.py
index b71b2b1910959f5dbe7860ff3d14d45b8e9e2d90..ef4ab54bc8d1a988f827d2b766c3d1f20f0238e1 100644
--- a/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/charmhelpers/contrib/openstack/amulet/utils.py
@@ -40,6 +40,7 @@ import novaclient
 import pika
 import swiftclient
 
+from charmhelpers.core.decorators import retry_on_exception
 from charmhelpers.contrib.amulet.utils import (
     AmuletUtils
 )
@@ -50,6 +51,13 @@ ERROR = logging.ERROR
 
 NOVA_CLIENT_VERSION = "2"
 
+OPENSTACK_RELEASES_PAIRS = [
+    'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
+    'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
+    'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
+    'xenial_pike', 'artful_pike', 'xenial_queens',
+    'bionic_queens', 'bionic_rocky', 'cosmic_rocky']
+
 
 class OpenStackAmuletUtils(AmuletUtils):
     """OpenStack amulet utilities.
@@ -63,7 +71,34 @@ class OpenStackAmuletUtils(AmuletUtils):
         super(OpenStackAmuletUtils, self).__init__(log_level)
 
     def validate_endpoint_data(self, endpoints, admin_port, internal_port,
-                               public_port, expected):
+                               public_port, expected, openstack_release=None):
+        """Validate endpoint data. Pick the correct validator based on
+           OpenStack release. Expected data should be in the v2 format:
+           {
+               'id': id,
+               'region': region,
+               'adminurl': adminurl,
+               'internalurl': internalurl,
+               'publicurl': publicurl,
+               'service_id': service_id}
+
+           """
+        validation_function = self.validate_v2_endpoint_data
+        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
+        if openstack_release and openstack_release >= xenial_queens:
+                validation_function = self.validate_v3_endpoint_data
+                expected = {
+                    'id': expected['id'],
+                    'region': expected['region'],
+                    'region_id': 'RegionOne',
+                    'url': self.valid_url,
+                    'interface': self.not_null,
+                    'service_id': expected['service_id']}
+        return validation_function(endpoints, admin_port, internal_port,
+                                   public_port, expected)
+
+    def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
+                                  public_port, expected):
         """Validate endpoint data.
 
            Validate actual endpoint data vs expected endpoint data. The ports
@@ -92,7 +127,7 @@ class OpenStackAmuletUtils(AmuletUtils):
             return 'endpoint not found'
 
     def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
-                                  public_port, expected):
+                                  public_port, expected, expected_num_eps=3):
         """Validate keystone v3 endpoint data.
 
         Validate the v3 endpoint data which has changed from v2.  The
@@ -138,10 +173,89 @@ class OpenStackAmuletUtils(AmuletUtils):
                 if ret:
                     return 'unexpected endpoint data - {}'.format(ret)
 
-        if len(found) != 3:
+        if len(found) != expected_num_eps:
             return 'Unexpected number of endpoints found'
 
-    def validate_svc_catalog_endpoint_data(self, expected, actual):
+    def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
+        """Convert v2 endpoint data into v3.
+
+           {
+               'service_name1': [
+                   {
+                       'adminURL': adminURL,
+                       'id': id,
+                       'region': region.
+                       'publicURL': publicURL,
+                       'internalURL': internalURL
+                   }],
+               'service_name2': [
+                   {
+                       'adminURL': adminURL,
+                       'id': id,
+                       'region': region.
+                       'publicURL': publicURL,
+                       'internalURL': internalURL
+                   }],
+           }
+          """
+        self.log.warn("Endpoint ID and Region ID validation is limited to not "
+                      "null checks after v2 to v3 conversion")
+        for svc in ep_data.keys():
+            assert len(ep_data[svc]) == 1, "Unknown data format"
+            svc_ep_data = ep_data[svc][0]
+            ep_data[svc] = [
+                {
+                    'url': svc_ep_data['adminURL'],
+                    'interface': 'admin',
+                    'region': svc_ep_data['region'],
+                    'region_id': self.not_null,
+                    'id': self.not_null},
+                {
+                    'url': svc_ep_data['publicURL'],
+                    'interface': 'public',
+                    'region': svc_ep_data['region'],
+                    'region_id': self.not_null,
+                    'id': self.not_null},
+                {
+                    'url': svc_ep_data['internalURL'],
+                    'interface': 'internal',
+                    'region': svc_ep_data['region'],
+                    'region_id': self.not_null,
+                    'id': self.not_null}]
+        return ep_data
+
+    def validate_svc_catalog_endpoint_data(self, expected, actual,
+                                           openstack_release=None):
+        """Validate service catalog endpoint data. Pick the correct validator
+           for the OpenStack version. Expected data should be in the v2 format:
+           {
+               'service_name1': [
+                   {
+                       'adminURL': adminURL,
+                       'id': id,
+                       'region': region.
+                       'publicURL': publicURL,
+                       'internalURL': internalURL
+                   }],
+               'service_name2': [
+                   {
+                       'adminURL': adminURL,
+                       'id': id,
+                       'region': region.
+                       'publicURL': publicURL,
+                       'internalURL': internalURL
+                   }],
+           }
+
+           """
+        validation_function = self.validate_v2_svc_catalog_endpoint_data
+        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
+        if openstack_release and openstack_release >= xenial_queens:
+            validation_function = self.validate_v3_svc_catalog_endpoint_data
+            expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
+        return validation_function(expected, actual)
+
+    def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
         """Validate service catalog endpoint data.
 
            Validate a list of actual service catalog endpoints vs a list of
@@ -310,6 +424,7 @@ class OpenStackAmuletUtils(AmuletUtils):
         self.log.debug('Checking if tenant exists ({})...'.format(tenant))
         return tenant in [t.name for t in keystone.tenants.list()]
 
+    @retry_on_exception(num_retries=5, base_delay=1)
     def keystone_wait_for_propagation(self, sentry_relation_pairs,
                                       api_version):
         """Iterate over list of sentry and relation tuples and verify that
@@ -328,7 +443,7 @@ class OpenStackAmuletUtils(AmuletUtils):
             if rel.get('api_version') != str(api_version):
                 raise Exception("api_version not propagated through relation"
                                 " data yet ('{}' != '{}')."
-                                "".format(rel['api_version'], api_version))
+                                "".format(rel.get('api_version'), api_version))
 
     def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
                                        api_version):
@@ -350,16 +465,13 @@ class OpenStackAmuletUtils(AmuletUtils):
         deployment._auto_wait_for_status()
         self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
 
-    def authenticate_cinder_admin(self, keystone_sentry, username,
-                                  password, tenant, api_version=2):
+    def authenticate_cinder_admin(self, keystone, api_version=2):
         """Authenticates admin user with cinder."""
-        # NOTE(beisner): cinder python client doesn't accept tokens.
-        keystone_ip = keystone_sentry.info['public-address']
-        ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
+        self.log.debug('Authenticating cinder admin...')
         _clients = {
             1: cinder_client.Client,
             2: cinder_clientv2.Client}
-        return _clients[api_version](username, password, tenant, ept)
+        return _clients[api_version](session=keystone.session)
 
     def authenticate_keystone(self, keystone_ip, username, password,
                               api_version=False, admin_port=False,
@@ -367,13 +479,36 @@ class OpenStackAmuletUtils(AmuletUtils):
                               project_domain_name=None, project_name=None):
         """Authenticate with Keystone"""
         self.log.debug('Authenticating with keystone...')
-        port = 5000
-        if admin_port:
-            port = 35357
-        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
-                                        port)
-        if not api_version or api_version == 2:
-            ep = base_ep + "/v2.0"
+        if not api_version:
+            api_version = 2
+        sess, auth = self.get_keystone_session(
+            keystone_ip=keystone_ip,
+            username=username,
+            password=password,
+            api_version=api_version,
+            admin_port=admin_port,
+            user_domain_name=user_domain_name,
+            domain_name=domain_name,
+            project_domain_name=project_domain_name,
+            project_name=project_name
+        )
+        if api_version == 2:
+            client = keystone_client.Client(session=sess)
+        else:
+            client = keystone_client_v3.Client(session=sess)
+        # This populates the client.service_catalog
+        client.auth_ref = auth.get_access(sess)
+        return client
+
+    def get_keystone_session(self, keystone_ip, username, password,
+                             api_version=False, admin_port=False,
+                             user_domain_name=None, domain_name=None,
+                             project_domain_name=None, project_name=None):
+        """Return a keystone session object"""
+        ep = self.get_keystone_endpoint(keystone_ip,
+                                        api_version=api_version,
+                                        admin_port=admin_port)
+        if api_version == 2:
             auth = v2.Password(
                 username=username,
                 password=password,
@@ -381,12 +516,7 @@ class OpenStackAmuletUtils(AmuletUtils):
                 auth_url=ep
             )
             sess = keystone_session.Session(auth=auth)
-            client = keystone_client.Client(session=sess)
-            # This populates the client.service_catalog
-            client.auth_ref = auth.get_access(sess)
-            return client
         else:
-            ep = base_ep + "/v3"
             auth = v3.Password(
                 user_domain_name=user_domain_name,
                 username=username,
@@ -397,10 +527,57 @@ class OpenStackAmuletUtils(AmuletUtils):
                 auth_url=ep
             )
             sess = keystone_session.Session(auth=auth)
-            client = keystone_client_v3.Client(session=sess)
-            # This populates the client.service_catalog
-            client.auth_ref = auth.get_access(sess)
-            return client
+        return (sess, auth)
+
+    def get_keystone_endpoint(self, keystone_ip, api_version=None,
+                              admin_port=False):
+        """Return keystone endpoint"""
+        port = 5000
+        if admin_port:
+            port = 35357
+        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
+                                        port)
+        if api_version == 2:
+            ep = base_ep + "/v2.0"
+        else:
+            ep = base_ep + "/v3"
+        return ep
+
+    def get_default_keystone_session(self, keystone_sentry,
+                                     openstack_release=None, api_version=2):
+        """Return a keystone session object and client object assuming standard
+           default settings
+
+           Example call in amulet tests:
+               self.keystone_session, self.keystone = u.get_default_keystone_session(
+                   self.keystone_sentry,
+                   openstack_release=self._get_openstack_release())
+
+           The session can then be used to auth other clients:
+               neutronclient.Client(session=session)
+               aodh_client.Client(session=session)
+               eyc
+        """
+        self.log.debug('Authenticating keystone admin...')
+        # 11 => xenial_queens
+        if api_version == 3 or (openstack_release and openstack_release >= 11):
+            client_class = keystone_client_v3.Client
+            api_version = 3
+        else:
+            client_class = keystone_client.Client
+        keystone_ip = keystone_sentry.info['public-address']
+        session, auth = self.get_keystone_session(
+            keystone_ip,
+            api_version=api_version,
+            username='admin',
+            password='openstack',
+            project_name='admin',
+            user_domain_name='admin_domain',
+            project_domain_name='admin_domain')
+        client = client_class(session=session)
+        # This populates the client.service_catalog
+        client.auth_ref = auth.get_access(session)
+        return session, client
 
     def authenticate_keystone_admin(self, keystone_sentry, user, password,
                                     tenant=None, api_version=None,
@@ -858,9 +1035,12 @@ class OpenStackAmuletUtils(AmuletUtils):
         :returns: List of pool name, object count, kb disk space used
         """
         df = self.get_ceph_df(sentry_unit)
-        pool_name = df['pools'][pool_id]['name']
-        obj_count = df['pools'][pool_id]['stats']['objects']
-        kb_used = df['pools'][pool_id]['stats']['kb_used']
+        for pool in df['pools']:
+            if pool['id'] == pool_id:
+                pool_name = pool['name']
+                obj_count = pool['stats']['objects']
+                kb_used = pool['stats']['kb_used']
+
         self.log.debug('Ceph {} pool (ID {}): {} objects, '
                        '{} kb used'.format(pool_name, pool_id,
                                            obj_count, kb_used))
diff --git a/charmhelpers/contrib/openstack/cert_utils.py b/charmhelpers/contrib/openstack/cert_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..de853b5371e915fb58bd6b29c1ea16a90ce7d08f
--- /dev/null
+++ b/charmhelpers/contrib/openstack/cert_utils.py
@@ -0,0 +1,227 @@
+# Copyright 2014-2018 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Common python helper functions used for OpenStack charm certificats.
+
+import os
+import json
+
+from charmhelpers.contrib.network.ip import (
+    get_hostname,
+    resolve_network_cidr,
+)
+from charmhelpers.core.hookenv import (
+    local_unit,
+    network_get_primary_address,
+    config,
+    relation_get,
+    unit_get,
+    NoNetworkBinding,
+    log,
+    WARNING,
+)
+from charmhelpers.contrib.openstack.ip import (
+    ADMIN,
+    resolve_address,
+    get_vip_in_network,
+    INTERNAL,
+    PUBLIC,
+    ADDRESS_MAP)
+
+from charmhelpers.core.host import (
+    mkdir,
+    write_file,
+)
+
+from charmhelpers.contrib.hahelpers.apache import (
+    install_ca_cert
+)
+
+
+class CertRequest(object):
+
+    """Create a request for certificates to be generated
+    """
+
+    def __init__(self, json_encode=True):
+        self.entries = []
+        self.hostname_entry = None
+        self.json_encode = json_encode
+
+    def add_entry(self, net_type, cn, addresses):
+        """Add a request to the batch
+
+        :param net_type: str netwrok space name request is for
+        :param cn: str Canonical Name for certificate
+        :param addresses: [] List of addresses to be used as SANs
+        """
+        self.entries.append({
+            'cn': cn,
+            'addresses': addresses})
+
+    def add_hostname_cn(self):
+        """Add a request for the hostname of the machine"""
+        ip = unit_get('private-address')
+        addresses = [ip]
+        # If a vip is being used without os-hostname config or
+        # network spaces then we need to ensure the local units
+        # cert has the approriate vip in the SAN list
+        vip = get_vip_in_network(resolve_network_cidr(ip))
+        if vip:
+            addresses.append(vip)
+        self.hostname_entry = {
+            'cn': get_hostname(ip),
+            'addresses': addresses}
+
+    def add_hostname_cn_ip(self, addresses):
+        """Add an address to the SAN list for the hostname request
+
+        :param addr: [] List of address to be added
+        """
+        for addr in addresses:
+            if addr not in self.hostname_entry['addresses']:
+                self.hostname_entry['addresses'].append(addr)
+
+    def get_request(self):
+        """Generate request from the batched up entries
+
+        """
+        if self.hostname_entry:
+            self.entries.append(self.hostname_entry)
+        request = {}
+        for entry in self.entries:
+            sans = sorted(list(set(entry['addresses'])))
+            request[entry['cn']] = {'sans': sans}
+        if self.json_encode:
+            return {'cert_requests': json.dumps(request, sort_keys=True)}
+        else:
+            return {'cert_requests': request}
+
+
+def get_certificate_request(json_encode=True):
+    """Generate a certificatee requests based on the network confioguration
+
+    """
+    req = CertRequest(json_encode=json_encode)
+    req.add_hostname_cn()
+    # Add os-hostname entries
+    for net_type in [INTERNAL, ADMIN, PUBLIC]:
+        net_config = config(ADDRESS_MAP[net_type]['override'])
+        try:
+            net_addr = resolve_address(endpoint_type=net_type)
+            ip = network_get_primary_address(
+                ADDRESS_MAP[net_type]['binding'])
+            addresses = [net_addr, ip]
+            vip = get_vip_in_network(resolve_network_cidr(ip))
+            if vip:
+                addresses.append(vip)
+            if net_config:
+                req.add_entry(
+                    net_type,
+                    net_config,
+                    addresses)
+            else:
+                # There is network address with no corresponding hostname.
+                # Add the ip to the hostname cert to allow for this.
+                req.add_hostname_cn_ip(addresses)
+        except NoNetworkBinding:
+            log("Skipping request for certificate for ip in {} space, no "
+                "local address found".format(net_type), WARNING)
+    return req.get_request()
+
+
+def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
+    """Create symlinks for SAN records
+
+    :param ssl_dir: str Directory to create symlinks in
+    :param custom_hostname_link: str Additional link to be created
+    """
+    hostname = get_hostname(unit_get('private-address'))
+    hostname_cert = os.path.join(
+        ssl_dir,
+        'cert_{}'.format(hostname))
+    hostname_key = os.path.join(
+        ssl_dir,
+        'key_{}'.format(hostname))
+    # Add links to hostname cert, used if os-hostname vars not set
+    for net_type in [INTERNAL, ADMIN, PUBLIC]:
+        try:
+            addr = resolve_address(endpoint_type=net_type)
+            cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
+            key = os.path.join(ssl_dir, 'key_{}'.format(addr))
+            if os.path.isfile(hostname_cert) and not os.path.isfile(cert):
+                os.symlink(hostname_cert, cert)
+                os.symlink(hostname_key, key)
+        except NoNetworkBinding:
+            log("Skipping creating cert symlink for ip in {} space, no "
+                "local address found".format(net_type), WARNING)
+    if custom_hostname_link:
+        custom_cert = os.path.join(
+            ssl_dir,
+            'cert_{}'.format(custom_hostname_link))
+        custom_key = os.path.join(
+            ssl_dir,
+            'key_{}'.format(custom_hostname_link))
+        if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert):
+            os.symlink(hostname_cert, custom_cert)
+            os.symlink(hostname_key, custom_key)
+
+
+def install_certs(ssl_dir, certs, chain=None):
+    """Install the certs passed into the ssl dir and append the chain if
+       provided.
+
+    :param ssl_dir: str Directory to create symlinks in
+    :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}}
+    :param chain: str Chain to be appended to certs
+    """
+    for cn, bundle in certs.items():
+        cert_filename = 'cert_{}'.format(cn)
+        key_filename = 'key_{}'.format(cn)
+        cert_data = bundle['cert']
+        if chain:
+            # Append chain file so that clients that trust the root CA will
+            # trust certs signed by an intermediate in the chain
+            cert_data = cert_data + chain
+        write_file(
+            path=os.path.join(ssl_dir, cert_filename),
+            content=cert_data, perms=0o640)
+        write_file(
+            path=os.path.join(ssl_dir, key_filename),
+            content=bundle['key'], perms=0o640)
+
+
+def process_certificates(service_name, relation_id, unit,
+                         custom_hostname_link=None):
+    """Process the certificates supplied down the relation
+
+    :param service_name: str Name of service the certifcates are for.
+    :param relation_id: str Relation id providing the certs
+    :param unit: str Unit providing the certs
+    :param custom_hostname_link: str Name of custom link to create
+    """
+    data = relation_get(rid=relation_id, unit=unit)
+    ssl_dir = os.path.join('/etc/apache2/ssl/', service_name)
+    mkdir(path=ssl_dir)
+    name = local_unit().replace('/', '_')
+    certs = data.get('{}.processed_requests'.format(name))
+    chain = data.get('chain')
+    ca = data.get('ca')
+    if certs:
+        certs = json.loads(certs)
+        install_ca_cert(ca.encode())
+        install_certs(ssl_dir, certs, chain)
+        create_ip_cert_links(
+            ssl_dir,
+            custom_hostname_link=custom_hostname_link)
diff --git a/charmhelpers/contrib/openstack/context.py b/charmhelpers/contrib/openstack/context.py
index e6c0e9feb4de9efdb97397b85979f7a4ffcbcfd0..f3741b0e5ed0a8724fd009b4c0d1b54876618a47 100644
--- a/charmhelpers/contrib/openstack/context.py
+++ b/charmhelpers/contrib/openstack/context.py
@@ -93,14 +93,14 @@ from charmhelpers.contrib.network.ip import (
     format_ipv6_addr,
     is_bridge_member,
     is_ipv6_disabled,
+    get_relation_ip,
 )
 from charmhelpers.contrib.openstack.utils import (
     config_flags_parser,
-    get_host_ip,
-    git_determine_usr_bin,
-    git_determine_python_path,
     enable_memcache,
     snap_install_requested,
+    CompareOpenStackReleases,
+    os_release,
 )
 from charmhelpers.core.unitdata import kv
 
@@ -190,8 +190,8 @@ class OSContextGenerator(object):
 class SharedDBContext(OSContextGenerator):
     interfaces = ['shared-db']
 
-    def __init__(self,
-                 database=None, user=None, relation_prefix=None, ssl_dir=None):
+    def __init__(self, database=None, user=None, relation_prefix=None,
+                 ssl_dir=None, relation_id=None):
         """Allows inspecting relation for settings prefixed with
         relation_prefix. This is useful for parsing access for multiple
         databases returned via the shared-db interface (eg, nova_password,
@@ -202,6 +202,7 @@ class SharedDBContext(OSContextGenerator):
         self.user = user
         self.ssl_dir = ssl_dir
         self.rel_name = self.interfaces[0]
+        self.relation_id = relation_id
 
     def __call__(self):
         self.database = self.database or config('database')
@@ -235,7 +236,12 @@ class SharedDBContext(OSContextGenerator):
         if self.relation_prefix:
             password_setting = self.relation_prefix + '_password'
 
-        for rid in relation_ids(self.interfaces[0]):
+        if self.relation_id:
+            rids = [self.relation_id]
+        else:
+            rids = relation_ids(self.interfaces[0])
+
+        for rid in rids:
             self.related = True
             for unit in related_units(rid):
                 rdata = relation_get(rid=rid, unit=unit)
@@ -332,10 +338,7 @@ class IdentityServiceContext(OSContextGenerator):
         self.rel_name = rel_name
         self.interfaces = [self.rel_name]
 
-    def __call__(self):
-        log('Generating template context for ' + self.rel_name, level=DEBUG)
-        ctxt = {}
-
+    def _setup_pki_cache(self):
         if self.service and self.service_user:
             # This is required for pki token signing if we don't want /tmp to
             # be used.
@@ -345,6 +348,15 @@ class IdentityServiceContext(OSContextGenerator):
                 mkdir(path=cachedir, owner=self.service_user,
                       group=self.service_user, perms=0o700)
 
+            return cachedir
+        return None
+
+    def __call__(self):
+        log('Generating template context for ' + self.rel_name, level=DEBUG)
+        ctxt = {}
+
+        cachedir = self._setup_pki_cache()
+        if cachedir:
             ctxt['signing_dir'] = cachedir
 
         for rid in relation_ids(self.rel_name):
@@ -378,6 +390,63 @@ class IdentityServiceContext(OSContextGenerator):
                     # so a missing value just indicates keystone needs
                     # upgrading
                     ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
+                    ctxt['admin_domain_id'] = rdata.get('service_domain_id')
+                    return ctxt
+
+        return {}
+
+
+class IdentityCredentialsContext(IdentityServiceContext):
+    '''Context for identity-credentials interface type'''
+
+    def __init__(self,
+                 service=None,
+                 service_user=None,
+                 rel_name='identity-credentials'):
+        super(IdentityCredentialsContext, self).__init__(service,
+                                                         service_user,
+                                                         rel_name)
+
+    def __call__(self):
+        log('Generating template context for ' + self.rel_name, level=DEBUG)
+        ctxt = {}
+
+        cachedir = self._setup_pki_cache()
+        if cachedir:
+            ctxt['signing_dir'] = cachedir
+
+        for rid in relation_ids(self.rel_name):
+            self.related = True
+            for unit in related_units(rid):
+                rdata = relation_get(rid=rid, unit=unit)
+                credentials_host = rdata.get('credentials_host')
+                credentials_host = (
+                    format_ipv6_addr(credentials_host) or credentials_host
+                )
+                auth_host = rdata.get('auth_host')
+                auth_host = format_ipv6_addr(auth_host) or auth_host
+                svc_protocol = rdata.get('credentials_protocol') or 'http'
+                auth_protocol = rdata.get('auth_protocol') or 'http'
+                api_version = rdata.get('api_version') or '2.0'
+                ctxt.update({
+                    'service_port': rdata.get('credentials_port'),
+                    'service_host': credentials_host,
+                    'auth_host': auth_host,
+                    'auth_port': rdata.get('auth_port'),
+                    'admin_tenant_name': rdata.get('credentials_project'),
+                    'admin_tenant_id': rdata.get('credentials_project_id'),
+                    'admin_user': rdata.get('credentials_username'),
+                    'admin_password': rdata.get('credentials_password'),
+                    'service_protocol': svc_protocol,
+                    'auth_protocol': auth_protocol,
+                    'api_version': api_version
+                })
+
+                if float(api_version) > 2:
+                    ctxt.update({'admin_domain_name':
+                                 rdata.get('domain')})
+
+                if self.context_complete(ctxt):
                     return ctxt
 
         return {}
@@ -385,11 +454,13 @@ class IdentityServiceContext(OSContextGenerator):
 
 class AMQPContext(OSContextGenerator):
 
-    def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
+    def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None,
+                 relation_id=None):
         self.ssl_dir = ssl_dir
         self.rel_name = rel_name
         self.relation_prefix = relation_prefix
         self.interfaces = [rel_name]
+        self.relation_id = relation_id
 
     def __call__(self):
         log('Generating template context for amqp', level=DEBUG)
@@ -410,7 +481,11 @@ class AMQPContext(OSContextGenerator):
             raise OSContextError
 
         ctxt = {}
-        for rid in relation_ids(self.rel_name):
+        if self.relation_id:
+            rids = [self.relation_id]
+        else:
+            rids = relation_ids(self.rel_name)
+        for rid in rids:
             ha_vip_only = False
             self.related = True
             transport_hosts = None
@@ -555,7 +630,9 @@ class HAProxyContext(OSContextGenerator):
     """
     interfaces = ['cluster']
 
-    def __init__(self, singlenode_mode=False):
+    def __init__(self, singlenode_mode=False,
+                 address_types=ADDRESS_TYPES):
+        self.address_types = address_types
         self.singlenode_mode = singlenode_mode
 
     def __call__(self):
@@ -564,19 +641,22 @@ class HAProxyContext(OSContextGenerator):
         if not relation_ids('cluster') and not self.singlenode_mode:
             return {}
 
-        if config('prefer-ipv6'):
-            addr = get_ipv6_addr(exc_list=[config('vip')])[0]
-        else:
-            addr = get_host_ip(unit_get('private-address'))
-
         l_unit = local_unit().replace('/', '-')
         cluster_hosts = {}
 
         # NOTE(jamespage): build out map of configured network endpoints
         # and associated backends
-        for addr_type in ADDRESS_TYPES:
+        for addr_type in self.address_types:
             cfg_opt = 'os-{}-network'.format(addr_type)
-            laddr = get_address_in_network(config(cfg_opt))
+            # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather
+            # than 'internal'
+            if addr_type == 'internal':
+                _addr_map_type = INTERNAL
+            else:
+                _addr_map_type = addr_type
+            # Network spaces aware
+            laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'],
+                                    config(cfg_opt))
             if laddr:
                 netmask = get_netmask_for_address(laddr)
                 cluster_hosts[laddr] = {
@@ -587,15 +667,19 @@ class HAProxyContext(OSContextGenerator):
                 }
                 for rid in relation_ids('cluster'):
                     for unit in sorted(related_units(rid)):
+                        # API Charms will need to set {addr_type}-address with
+                        # get_relation_ip(addr_type)
                         _laddr = relation_get('{}-address'.format(addr_type),
                                               rid=rid, unit=unit)
                         if _laddr:
                             _unit = unit.replace('/', '-')
                             cluster_hosts[laddr]['backends'][_unit] = _laddr
 
-        # NOTE(jamespage) add backend based on private address - this
-        # with either be the only backend or the fallback if no acls
+        # NOTE(jamespage) add backend based on get_relation_ip - this
+        # will either be the only backend or the fallback if no acls
         # match in the frontend
+        # Network spaces aware
+        addr = get_relation_ip('cluster')
         cluster_hosts[addr] = {}
         netmask = get_netmask_for_address(addr)
         cluster_hosts[addr] = {
@@ -605,6 +689,8 @@ class HAProxyContext(OSContextGenerator):
         }
         for rid in relation_ids('cluster'):
             for unit in sorted(related_units(rid)):
+                # API Charms will need to set their private-address with
+                # get_relation_ip('cluster')
                 _laddr = relation_get('private-address',
                                       rid=rid, unit=unit)
                 if _laddr:
@@ -715,17 +801,18 @@ class ApacheSSLContext(OSContextGenerator):
         ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
         mkdir(path=ssl_dir)
         cert, key = get_cert(cn)
-        if cn:
-            cert_filename = 'cert_{}'.format(cn)
-            key_filename = 'key_{}'.format(cn)
-        else:
-            cert_filename = 'cert'
-            key_filename = 'key'
+        if cert and key:
+            if cn:
+                cert_filename = 'cert_{}'.format(cn)
+                key_filename = 'key_{}'.format(cn)
+            else:
+                cert_filename = 'cert'
+                key_filename = 'key'
 
-        write_file(path=os.path.join(ssl_dir, cert_filename),
-                   content=b64decode(cert))
-        write_file(path=os.path.join(ssl_dir, key_filename),
-                   content=b64decode(key))
+            write_file(path=os.path.join(ssl_dir, cert_filename),
+                       content=b64decode(cert), perms=0o640)
+            write_file(path=os.path.join(ssl_dir, key_filename),
+                       content=b64decode(key), perms=0o640)
 
     def configure_ca(self):
         ca_cert = get_ca_cert()
@@ -797,23 +884,31 @@ class ApacheSSLContext(OSContextGenerator):
         if not self.external_ports or not https():
             return {}
 
-        self.configure_ca()
+        use_keystone_ca = True
+        for rid in relation_ids('certificates'):
+            if related_units(rid):
+                use_keystone_ca = False
+
+        if use_keystone_ca:
+            self.configure_ca()
+
         self.enable_modules()
 
         ctxt = {'namespace': self.service_namespace,
                 'endpoints': [],
                 'ext_ports': []}
 
-        cns = self.canonical_names()
-        if cns:
-            for cn in cns:
-                self.configure_cert(cn)
-        else:
-            # Expect cert/key provided in config (currently assumed that ca
-            # uses ip for cn)
-            for net_type in (INTERNAL, ADMIN, PUBLIC):
-                cn = resolve_address(endpoint_type=net_type)
-                self.configure_cert(cn)
+        if use_keystone_ca:
+            cns = self.canonical_names()
+            if cns:
+                for cn in cns:
+                    self.configure_cert(cn)
+            else:
+                # Expect cert/key provided in config (currently assumed that ca
+                # uses ip for cn)
+                for net_type in (INTERNAL, ADMIN, PUBLIC):
+                    cn = resolve_address(endpoint_type=net_type)
+                    self.configure_cert(cn)
 
         addresses = self.get_network_addresses()
         for address, endpoint in addresses:
@@ -1321,8 +1416,6 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
             "public_processes": int(math.ceil(self.public_process_weight *
                                               total_processes)),
             "threads": 1,
-            "usr_bin": git_determine_usr_bin(),
-            "python_path": git_determine_python_path(),
         }
         return ctxt
 
@@ -1570,6 +1663,82 @@ class InternalEndpointContext(OSContextGenerator):
         return {'use_internal_endpoints': config('use-internal-endpoints')}
 
 
+class VolumeAPIContext(InternalEndpointContext):
+    """Volume API context.
+
+    This context provides information regarding the volume endpoint to use
+    when communicating between services. It determines which version of the
+    API is appropriate for use.
+
+    This value will be determined in the resulting context dictionary
+    returned from calling the VolumeAPIContext object. Information provided
+    by this context is as follows:
+
+        volume_api_version: the volume api version to use, currently
+            'v2' or 'v3'
+        volume_catalog_info: the information to use for a cinder client
+            configuration that consumes API endpoints from the keystone
+            catalog. This is defined as the type:name:endpoint_type string.
+    """
+    # FIXME(wolsen) This implementation is based on the provider being able
+    # to specify the package version to check but does not guarantee that the
+    # volume service api version selected is available. In practice, it is
+    # quite likely the volume service *is* providing the v3 volume service.
+    # This should be resolved when the service-discovery spec is implemented.
+    def __init__(self, pkg):
+        """
+        Creates a new VolumeAPIContext for use in determining which version
+        of the Volume API should be used for communication. A package codename
+        should be supplied for determining the currently installed OpenStack
+        version.
+
+        :param pkg: the package codename to use in order to determine the
+            component version (e.g. nova-common). See
+            charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more.
+        """
+        super(VolumeAPIContext, self).__init__()
+        self._ctxt = None
+        if not pkg:
+            raise ValueError('package name must be provided in order to '
+                             'determine current OpenStack version.')
+        self.pkg = pkg
+
+    @property
+    def ctxt(self):
+        if self._ctxt is not None:
+            return self._ctxt
+        self._ctxt = self._determine_ctxt()
+        return self._ctxt
+
+    def _determine_ctxt(self):
+        """Determines the Volume API endpoint information.
+
+        Determines the appropriate version of the API that should be used
+        as well as the catalog_info string that would be supplied. Returns
+        a dict containing the volume_api_version and the volume_catalog_info.
+        """
+        rel = os_release(self.pkg, base='icehouse')
+        version = '2'
+        if CompareOpenStackReleases(rel) >= 'pike':
+            version = '3'
+
+        service_type = 'volumev{version}'.format(version=version)
+        service_name = 'cinderv{version}'.format(version=version)
+        endpoint_type = 'publicURL'
+        if config('use-internal-endpoints'):
+            endpoint_type = 'internalURL'
+        catalog_info = '{type}:{name}:{endpoint}'.format(
+            type=service_type, name=service_name, endpoint=endpoint_type)
+
+        return {
+            'volume_api_version': version,
+            'volume_catalog_info': catalog_info,
+        }
+
+    def __call__(self):
+        return self.ctxt
+
+
 class AppArmorContext(OSContextGenerator):
     """Base class for apparmor contexts."""
 
@@ -1705,3 +1874,31 @@ class MemcacheContext(OSContextGenerator):
                     ctxt['memcache_server_formatted'],
                     ctxt['memcache_port'])
         return ctxt
+
+
+class EnsureDirContext(OSContextGenerator):
+    '''
+    Serves as a generic context to create a directory as a side-effect.
+
+    Useful for software that supports drop-in files (.d) in conjunction
+    with config option-based templates. Examples include:
+        * OpenStack oslo.policy drop-in files;
+        * systemd drop-in config files;
+        * other software that supports overriding defaults with .d files
+
+    Another use-case is when a subordinate generates a configuration for
+    primary to render in a separate directory.
+
+    Some software requires a user to create a target directory to be
+    scanned for drop-in files with a specific format. This is why this
+    context is needed to do that before rendering a template.
+   '''
+
+    def __init__(self, dirname, **kwargs):
+        '''Used merely to ensure that a given directory exists.'''
+        self.dirname = dirname
+        self.kwargs = kwargs
+
+    def __call__(self):
+        mkdir(self.dirname, **self.kwargs)
+        return {}
diff --git a/charmhelpers/contrib/openstack/files/check_haproxy.sh b/charmhelpers/contrib/openstack/files/check_haproxy.sh
index 7aab129a7ecde2ba76cce08e355efcc1d47eaa45..1df55db4816ec51d6732d68ea0a1e25e6f7b116e 100755
--- a/charmhelpers/contrib/openstack/files/check_haproxy.sh
+++ b/charmhelpers/contrib/openstack/files/check_haproxy.sh
@@ -9,7 +9,7 @@
 CRITICAL=0
 NOTACTIVE=''
 LOGFILE=/var/log/nagios/check_haproxy.log
-AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $4}')
+AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
 
 typeset -i N_INSTANCES=0
 for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
diff --git a/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
index 3ebb5329bc5a66ddbf73d813b2ee55e2fa6e87c7..91ce0246e66115994c3f518b36448f70100ecfc7 100755
--- a/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
+++ b/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
@@ -10,7 +10,7 @@
 CURRQthrsh=0
 MAXQthrsh=100
 
-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
+AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
 
 HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
 
diff --git a/charmhelpers/contrib/openstack/ha/utils.py b/charmhelpers/contrib/openstack/ha/utils.py
index 9a4d79c12fb9768eefd3930276696ba4ee987250..6060ae50b63677126e1941295487cd4460803b10 100644
--- a/charmhelpers/contrib/openstack/ha/utils.py
+++ b/charmhelpers/contrib/openstack/ha/utils.py
@@ -23,6 +23,8 @@
 Helpers for high availability.
 """
 
+import json
+
 import re
 
 from charmhelpers.core.hookenv import (
@@ -32,6 +34,7 @@ from charmhelpers.core.hookenv import (
     config,
     status_set,
     DEBUG,
+    WARNING,
 )
 
 from charmhelpers.core.host import (
@@ -40,6 +43,23 @@ from charmhelpers.core.host import (
 
 from charmhelpers.contrib.openstack.ip import (
     resolve_address,
+    is_ipv6,
+)
+
+from charmhelpers.contrib.network.ip import (
+    get_iface_for_address,
+    get_netmask_for_address,
+)
+
+from charmhelpers.contrib.hahelpers.cluster import (
+    get_hacluster_config
+)
+
+JSON_ENCODE_OPTIONS = dict(
+    sort_keys=True,
+    allow_nan=False,
+    indent=None,
+    separators=(',', ':'),
 )
 
 
@@ -53,8 +73,8 @@ class DNSHAException(Exception):
 def update_dns_ha_resource_params(resources, resource_params,
                                   relation_id=None,
                                   crm_ocf='ocf:maas:dns'):
-    """ Check for os-*-hostname settings and update resource dictionaries for
-    the HA relation.
+    """ Configure DNS-HA resources based on provided configuration and
+    update resource dictionaries for the HA relation.
 
     @param resources: Pointer to dictionary of resources.
                       Usually instantiated in ha_joined().
@@ -64,7 +84,85 @@ def update_dns_ha_resource_params(resources, resource_params,
     @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
                     DNS HA
     """
+    _relation_data = {'resources': {}, 'resource_params': {}}
+    update_hacluster_dns_ha(charm_name(),
+                            _relation_data,
+                            crm_ocf)
+    resources.update(_relation_data['resources'])
+    resource_params.update(_relation_data['resource_params'])
+    relation_set(relation_id=relation_id, groups=_relation_data['groups'])
+
+
+def assert_charm_supports_dns_ha():
+    """Validate prerequisites for DNS HA
+    The MAAS client is only available on Xenial or greater
+
+    :raises DNSHAException: if release is < 16.04
+    """
+    if lsb_release().get('DISTRIB_RELEASE') < '16.04':
+        msg = ('DNS HA is only supported on 16.04 and greater '
+               'versions of Ubuntu.')
+        status_set('blocked', msg)
+        raise DNSHAException(msg)
+    return True
+
+
+def expect_ha():
+    """ Determine if the unit expects to be in HA
+
+    Check for VIP or dns-ha settings which indicate the unit should expect to
+    be related to hacluster.
+
+    @returns boolean
+    """
+    return config('vip') or config('dns-ha')
+
+
+def generate_ha_relation_data(service):
+    """ Generate relation data for ha relation
+
+    Based on configuration options and unit interfaces, generate a json
+    encoded dict of relation data items for the hacluster relation,
+    providing configuration for DNS HA or VIP's + haproxy clone sets.
+
+    @returns dict: json encoded data for use with relation_set
+    """
+    _haproxy_res = 'res_{}_haproxy'.format(service)
+    _relation_data = {
+        'resources': {
+            _haproxy_res: 'lsb:haproxy',
+        },
+        'resource_params': {
+            _haproxy_res: 'op monitor interval="5s"'
+        },
+        'init_services': {
+            _haproxy_res: 'haproxy'
+        },
+        'clones': {
+            'cl_{}_haproxy'.format(service): _haproxy_res
+        },
+    }
+
+    if config('dns-ha'):
+        update_hacluster_dns_ha(service, _relation_data)
+    else:
+        update_hacluster_vip(service, _relation_data)
+
+    return {
+        'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS)
+        for k, v in _relation_data.items() if v
+    }
 
+
+def update_hacluster_dns_ha(service, relation_data,
+                            crm_ocf='ocf:maas:dns'):
+    """ Configure DNS-HA resources based on provided configuration
+
+    @param service: Name of the service being configured
+    @param relation_data: Pointer to dictionary of relation data.
+    @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
+                    DNS HA
+    """
     # Validate the charm environment for DNS HA
     assert_charm_supports_dns_ha()
 
@@ -93,7 +191,7 @@ def update_dns_ha_resource_params(resources, resource_params,
             status_set('blocked', msg)
             raise DNSHAException(msg)
 
-        hostname_key = 'res_{}_{}_hostname'.format(charm_name(), endpoint_type)
+        hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type)
         if hostname_key in hostname_group:
             log('DNS HA: Resource {}: {} already exists in '
                 'hostname group - skipping'.format(hostname_key, hostname),
@@ -101,42 +199,67 @@ def update_dns_ha_resource_params(resources, resource_params,
             continue
 
         hostname_group.append(hostname_key)
-        resources[hostname_key] = crm_ocf
-        resource_params[hostname_key] = (
-            'params fqdn="{}" ip_address="{}" '
-            ''.format(hostname, resolve_address(endpoint_type=endpoint_type,
-                                                override=False)))
+        relation_data['resources'][hostname_key] = crm_ocf
+        relation_data['resource_params'][hostname_key] = (
+            'params fqdn="{}" ip_address="{}"'
+            .format(hostname, resolve_address(endpoint_type=endpoint_type,
+                                              override=False)))
 
     if len(hostname_group) >= 1:
         log('DNS HA: Hostname group is set with {} as members. '
             'Informing the ha relation'.format(' '.join(hostname_group)),
             DEBUG)
-        relation_set(relation_id=relation_id, groups={
-            'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
+        relation_data['groups'] = {
+            'grp_{}_hostnames'.format(service): ' '.join(hostname_group)
+        }
     else:
         msg = 'DNS HA: Hostname group has no members.'
         status_set('blocked', msg)
         raise DNSHAException(msg)
 
 
-def assert_charm_supports_dns_ha():
-    """Validate prerequisites for DNS HA
-    The MAAS client is only available on Xenial or greater
+def update_hacluster_vip(service, relation_data):
+    """ Configure VIP resources based on provided configuration
+
+    @param service: Name of the service being configured
+    @param relation_data: Pointer to dictionary of relation data.
     """
-    if lsb_release().get('DISTRIB_RELEASE') < '16.04':
-        msg = ('DNS HA is only supported on 16.04 and greater '
-               'versions of Ubuntu.')
-        status_set('blocked', msg)
-        raise DNSHAException(msg)
-    return True
+    cluster_config = get_hacluster_config()
+    vip_group = []
+    for vip in cluster_config['vip'].split():
+        if is_ipv6(vip):
+            res_neutron_vip = 'ocf:heartbeat:IPv6addr'
+            vip_params = 'ipv6addr'
+        else:
+            res_neutron_vip = 'ocf:heartbeat:IPaddr2'
+            vip_params = 'ip'
 
+        iface = (get_iface_for_address(vip) or
+                 config('vip_iface'))
+        netmask = (get_netmask_for_address(vip) or
+                   config('vip_cidr'))
 
-def expect_ha():
-    """ Determine if the unit expects to be in HA
+        if iface is not None:
+            vip_key = 'res_{}_{}_vip'.format(service, iface)
+            if vip_key in vip_group:
+                if vip not in relation_data['resource_params'][vip_key]:
+                    vip_key = '{}_{}'.format(vip_key, vip_params)
+                else:
+                    log("Resource '%s' (vip='%s') already exists in "
+                        "vip group - skipping" % (vip_key, vip), WARNING)
+                    continue
 
-    Check for VIP or dns-ha settings which indicate the unit should expect to
-    be related to hacluster.
+            relation_data['resources'][vip_key] = res_neutron_vip
+            relation_data['resource_params'][vip_key] = (
+                'params {ip}="{vip}" cidr_netmask="{netmask}" '
+                'nic="{iface}"'.format(ip=vip_params,
+                                       vip=vip,
+                                       iface=iface,
+                                       netmask=netmask)
+            )
+            vip_group.append(vip_key)
 
-    @returns boolean
-    """
-    return config('vip') or config('dns-ha')
+    if len(vip_group) >= 1:
+        relation_data['groups'] = {
+            'grp_{}_vips'.format(service): ' '.join(vip_group)
+        }
diff --git a/charmhelpers/contrib/openstack/ip.py b/charmhelpers/contrib/openstack/ip.py
index d1476b1ab21d40934db6eb0cc0d2174d41b1df72..73102af7d5eec9fc0255acfeea211310b8d3794d 100644
--- a/charmhelpers/contrib/openstack/ip.py
+++ b/charmhelpers/contrib/openstack/ip.py
@@ -184,3 +184,13 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
                          "clustered=%s)" % (net_type, clustered))
 
     return resolved_address
+
+
+def get_vip_in_network(network):
+    matching_vip = None
+    vips = config('vip')
+    if vips:
+        for vip in vips.split():
+            if is_address_in_network(network, vip):
+                matching_vip = vip
+    return matching_vip
diff --git a/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charmhelpers/contrib/openstack/templates/haproxy.cfg
index 55270795dd9d604d04f76c18428961dadccfc097..0081fccbe4a9cd369247d99ae64c3cc2ba308140 100644
--- a/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ b/charmhelpers/contrib/openstack/templates/haproxy.cfg
@@ -17,22 +17,22 @@ defaults
 {%- if haproxy_queue_timeout %}
     timeout queue {{ haproxy_queue_timeout }}
 {%- else %}
-    timeout queue 5000
+    timeout queue 9000
 {%- endif %}
 {%- if haproxy_connect_timeout %}
     timeout connect {{ haproxy_connect_timeout }}
 {%- else %}
-    timeout connect 5000
+    timeout connect 9000
 {%- endif %}
 {%- if haproxy_client_timeout %}
     timeout client {{ haproxy_client_timeout }}
 {%- else %}
-    timeout client 30000
+    timeout client 90000
 {%- endif %}
 {%- if haproxy_server_timeout %}
     timeout server {{ haproxy_server_timeout }}
 {%- else %}
-    timeout server 30000
+    timeout server 90000
 {%- endif %}
 
 listen stats
diff --git a/charmhelpers/contrib/openstack/templates/section-oslo-middleware b/charmhelpers/contrib/openstack/templates/section-oslo-middleware
new file mode 100644
index 0000000000000000000000000000000000000000..dd73230a42aa037582989979c1bc8132d30b9b38
--- /dev/null
+++ b/charmhelpers/contrib/openstack/templates/section-oslo-middleware
@@ -0,0 +1,5 @@
+[oslo_middleware]
+
+# Bug #1758675
+enable_proxy_headers_parsing = true
+
diff --git a/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/charmhelpers/contrib/openstack/templates/section-oslo-notifications
index 5dccd4bb3943ff209bd820908baf7e77cb44649a..021a3c250822637c5fdf511024095730412ab098 100644
--- a/charmhelpers/contrib/openstack/templates/section-oslo-notifications
+++ b/charmhelpers/contrib/openstack/templates/section-oslo-notifications
@@ -5,4 +5,7 @@ transport_url = {{ transport_url }}
 {% if notification_topics -%}
 topics = {{ notification_topics }}
 {% endif -%}
+{% if notification_format -%}
+notification_format = {{ notification_format }}
+{% endif -%}
 {% endif -%}
diff --git a/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
index a3841ea6dcb50a16b204d88c1e944f6e678e52ee..b241bbfc7ff28c6293d740255bbe82875cd8bc13 100644
--- a/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
+++ b/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
@@ -22,9 +22,6 @@ Listen {{ public_port }}
 {% if port -%}
 <VirtualHost *:{{ port }}>
     WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-{% if python_path -%}
-                      python-path={{ python_path }} \
-{% endif -%}
                       display-name=%{GROUP}
     WSGIProcessGroup {{ service_name }}
     WSGIScriptAlias / {{ script }}
@@ -36,7 +33,7 @@ Listen {{ public_port }}
     ErrorLog /var/log/apache2/{{ service_name }}_error.log
     CustomLog /var/log/apache2/{{ service_name }}_access.log combined
 
-    <Directory {{ usr_bin }}>
+    <Directory /usr/bin>
         <IfVersion >= 2.4>
             Require all granted
         </IfVersion>
@@ -51,9 +48,6 @@ Listen {{ public_port }}
 {% if admin_port -%}
 <VirtualHost *:{{ admin_port }}>
     WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-{% if python_path -%}
-                      python-path={{ python_path }} \
-{% endif -%}
                       display-name=%{GROUP}
     WSGIProcessGroup {{ service_name }}-admin
     WSGIScriptAlias / {{ admin_script }}
@@ -65,7 +59,7 @@ Listen {{ public_port }}
     ErrorLog /var/log/apache2/{{ service_name }}_error.log
     CustomLog /var/log/apache2/{{ service_name }}_access.log combined
 
-    <Directory {{ usr_bin }}>
+    <Directory /usr/bin>
         <IfVersion >= 2.4>
             Require all granted
         </IfVersion>
@@ -86,9 +80,6 @@ Listen {{ public_port }}
     Alias /eds /var/www/html/eds
 
     WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-{% if python_path -%}
-                      python-path={{ python_path }} \
-{% endif -%}
                       display-name=%{GROUP}
     WSGIProcessGroup {{ service_name }}-public
     WSGIScriptAlias / {{ public_script }}
@@ -101,7 +92,7 @@ Listen {{ public_port }}
     CustomLog /var/log/apache2/{{ service_name }}_access.log combined
     WSGIScriptAlias ^(/v3/OS-FEDERATION/identity_providers/.*?/protocols/.*?/auth)$ /usr/bin/keystone-wsgi-public/$1
 
-    <Directory {{ usr_bin }}>
+    <Directory /usr/bin>
         <IfVersion >= 2.4>
             Require all granted
         </IfVersion>
diff --git a/charmhelpers/contrib/openstack/templating.py b/charmhelpers/contrib/openstack/templating.py
index edefcfe900eef85f9441428259bd9997a9801af9..bbb6a7e4b69afbc78dd919d2f84f59d2015d5722 100644
--- a/charmhelpers/contrib/openstack/templating.py
+++ b/charmhelpers/contrib/openstack/templating.py
@@ -94,7 +94,8 @@ class OSConfigTemplate(object):
     Associates a config file template with a list of context generators.
     Responsible for constructing a template context based on those generators.
     """
-    def __init__(self, config_file, contexts):
+
+    def __init__(self, config_file, contexts, config_template=None):
         self.config_file = config_file
 
         if hasattr(contexts, '__call__'):
@@ -104,6 +105,8 @@ class OSConfigTemplate(object):
 
         self._complete_contexts = []
 
+        self.config_template = config_template
+
     def context(self):
         ctxt = {}
         for context in self.contexts:
@@ -125,6 +128,11 @@ class OSConfigTemplate(object):
         self.context()
         return self._complete_contexts
 
+    @property
+    def is_string_template(self):
+        """:returns: Boolean if this instance is a template initialised with a string"""
+        return self.config_template is not None
+
 
 class OSConfigRenderer(object):
     """
@@ -149,6 +157,10 @@ class OSConfigRenderer(object):
                          contexts=[context.IdentityServiceContext()])
         configs.register(config_file='/etc/haproxy/haproxy.conf',
                          contexts=[context.HAProxyContext()])
+        configs.register(config_file='/etc/keystone/policy.d/extra.cfg',
+                         contexts=[context.ExtraPolicyContext()
+                                   context.KeystoneContext()],
+                         config_template=hookenv.config('extra-policy'))
         # write out a single config
         configs.write('/etc/nova/nova.conf')
         # write out all registered configs
@@ -219,14 +231,23 @@ class OSConfigRenderer(object):
             else:
                 apt_install('python3-jinja2')
 
-    def register(self, config_file, contexts):
+    def register(self, config_file, contexts, config_template=None):
         """
         Register a config file with a list of context generators to be called
         during rendering.
+        config_template can be used to load a template from a string instead of
+        using template loaders and template files.
+        :param config_file (str): a path where a config file will be rendered
+        :param contexts (list): a list of context dictionaries with kv pairs
+        :param config_template (str): an optional template string to use
         """
-        self.templates[config_file] = OSConfigTemplate(config_file=config_file,
-                                                       contexts=contexts)
-        log('Registered config file: %s' % config_file, level=INFO)
+        self.templates[config_file] = OSConfigTemplate(
+            config_file=config_file,
+            contexts=contexts,
+            config_template=config_template
+        )
+        log('Registered config file: {}'.format(config_file),
+            level=INFO)
 
     def _get_tmpl_env(self):
         if not self._tmpl_env:
@@ -236,32 +257,58 @@ class OSConfigRenderer(object):
     def _get_template(self, template):
         self._get_tmpl_env()
         template = self._tmpl_env.get_template(template)
-        log('Loaded template from %s' % template.filename, level=INFO)
+        log('Loaded template from {}'.format(template.filename),
+            level=INFO)
+        return template
+
+    def _get_template_from_string(self, ostmpl):
+        '''
+        Get a jinja2 template object from a string.
+        :param ostmpl: OSConfigTemplate to use as a data source.
+        '''
+        self._get_tmpl_env()
+        template = self._tmpl_env.from_string(ostmpl.config_template)
+        log('Loaded a template from a string for {}'.format(
+            ostmpl.config_file),
+            level=INFO)
         return template
 
     def render(self, config_file):
         if config_file not in self.templates:
-            log('Config not registered: %s' % config_file, level=ERROR)
+            log('Config not registered: {}'.format(config_file), level=ERROR)
             raise OSConfigException
-        ctxt = self.templates[config_file].context()
-
-        _tmpl = os.path.basename(config_file)
-        try:
-            template = self._get_template(_tmpl)
-        except exceptions.TemplateNotFound:
-            # if no template is found with basename, try looking for it
-            # using a munged full path, eg:
-            #   /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
-            _tmpl = '_'.join(config_file.split('/')[1:])
+
+        ostmpl = self.templates[config_file]
+        ctxt = ostmpl.context()
+
+        if ostmpl.is_string_template:
+            template = self._get_template_from_string(ostmpl)
+            log('Rendering from a string template: '
+                '{}'.format(config_file),
+                level=INFO)
+        else:
+            _tmpl = os.path.basename(config_file)
             try:
                 template = self._get_template(_tmpl)
-            except exceptions.TemplateNotFound as e:
-                log('Could not load template from %s by %s or %s.' %
-                    (self.templates_dir, os.path.basename(config_file), _tmpl),
-                    level=ERROR)
-                raise e
-
-        log('Rendering from template: %s' % _tmpl, level=INFO)
+            except exceptions.TemplateNotFound:
+                # if no template is found with basename, try looking
+                # for it using a munged full path, eg:
+                # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
+                _tmpl = '_'.join(config_file.split('/')[1:])
+                try:
+                    template = self._get_template(_tmpl)
+                except exceptions.TemplateNotFound as e:
+                    log('Could not load template from {} by {} or {}.'
+                        ''.format(
+                            self.templates_dir,
+                            os.path.basename(config_file),
+                            _tmpl
+                        ),
+                        level=ERROR)
+                    raise e
+
+            log('Rendering from template: {}'.format(config_file),
+                level=INFO)
         return template.render(ctxt)
 
     def write(self, config_file):
diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py
index 8a541d4087662120d26f480262181413380374b8..0180e5553854cbb730dcd5a2e865ef1106ca5f18 100644
--- a/charmhelpers/contrib/openstack/utils.py
+++ b/charmhelpers/contrib/openstack/utils.py
@@ -23,7 +23,6 @@ import sys
 import re
 import itertools
 import functools
-import shutil
 
 import six
 import traceback
@@ -47,7 +46,6 @@ from charmhelpers.core.hookenv import (
     related_units,
     relation_ids,
     relation_set,
-    service_name,
     status_set,
     hook_name,
     application_version_set,
@@ -68,11 +66,6 @@ from charmhelpers.contrib.network.ip import (
     port_has_listener,
 )
 
-from charmhelpers.contrib.python.packages import (
-    pip_create_virtualenv,
-    pip_install,
-)
-
 from charmhelpers.core.host import (
     lsb_release,
     mounts,
@@ -84,7 +77,6 @@ from charmhelpers.core.host import (
 )
 from charmhelpers.fetch import (
     apt_cache,
-    install_remote,
     import_key as fetch_import_key,
     add_source as fetch_add_source,
     SourceConfigError,
@@ -141,6 +133,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
     ('zesty', 'ocata'),
     ('artful', 'pike'),
     ('bionic', 'queens'),
+    ('cosmic', 'rocky'),
 ])
 
 
@@ -159,6 +152,7 @@ OPENSTACK_CODENAMES = OrderedDict([
     ('2017.1', 'ocata'),
     ('2017.2', 'pike'),
     ('2018.1', 'queens'),
+    ('2018.2', 'rocky'),
 ])
 
 # The ugly duckling - must list releases oldest to newest
@@ -190,7 +184,9 @@ SWIFT_CODENAMES = OrderedDict([
     ('pike',
         ['2.13.0', '2.15.0']),
     ('queens',
-        ['2.16.0']),
+        ['2.16.0', '2.17.0']),
+    ('rocky',
+        ['2.18.0']),
 ])
 
 # >= Liberty version->codename mapping
@@ -278,27 +274,6 @@ PACKAGE_CODENAMES = {
     ]),
 }
 
-GIT_DEFAULT_REPOS = {
-    'requirements': 'git://github.com/openstack/requirements',
-    'cinder': 'git://github.com/openstack/cinder',
-    'glance': 'git://github.com/openstack/glance',
-    'horizon': 'git://github.com/openstack/horizon',
-    'keystone': 'git://github.com/openstack/keystone',
-    'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
-    'neutron': 'git://github.com/openstack/neutron',
-    'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
-    'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
-    'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
-    'nova': 'git://github.com/openstack/nova',
-}
-
-GIT_DEFAULT_BRANCHES = {
-    'liberty': 'stable/liberty',
-    'mitaka': 'stable/mitaka',
-    'newton': 'stable/newton',
-    'master': 'master',
-}
-
 DEFAULT_LOOPBACK_SIZE = '5G'
 
 
@@ -335,7 +310,7 @@ def get_os_codename_install_source(src):
 
     if src.startswith('cloud:'):
         ca_rel = src.split(':')[1]
-        ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
+        ca_rel = ca_rel.split('-')[1].split('/')[0]
         return ca_rel
 
     # Best guess match based on deb string provided
@@ -392,6 +367,8 @@ def get_swift_codename(version):
             releases = UBUNTU_OPENSTACK_RELEASE
             release = [k for k, v in six.iteritems(releases) if codename in v]
             ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
+            if six.PY3:
+                ret = ret.decode('UTF-8')
             if codename in ret or release[0] in ret:
                 return codename
     elif len(codenames) == 1:
@@ -528,7 +505,6 @@ def os_release(package, base='essex', reset_cache=False):
     if _os_rel:
         return _os_rel
     _os_rel = (
-        git_os_codename_install_source(config('openstack-origin-git')) or
         get_os_codename_package(package, fatal=False) or
         get_os_codename_install_source(config('openstack-origin')) or
         base)
@@ -654,11 +630,6 @@ def openstack_upgrade_available(package):
     else:
         avail_vers = get_os_version_install_source(src)
     apt.init()
-    if "swift" in package:
-        major_cur_vers = cur_vers.split('.', 1)[0]
-        major_avail_vers = avail_vers.split('.', 1)[0]
-        major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
-        return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
     return apt.version_compare(avail_vers, cur_vers) == 1
 
 
@@ -769,417 +740,6 @@ def os_requires_version(ostack_release, pkg):
     return wrap
 
 
-def git_install_requested():
-    """
-    Returns true if openstack-origin-git is specified.
-    """
-    return config('openstack-origin-git') is not None
-
-
-def git_os_codename_install_source(projects_yaml):
-    """
-    Returns OpenStack codename of release being installed from source.
-    """
-    if git_install_requested():
-        projects = _git_yaml_load(projects_yaml)
-
-        if projects in GIT_DEFAULT_BRANCHES.keys():
-            if projects == 'master':
-                return 'ocata'
-            return projects
-
-        if 'release' in projects:
-            if projects['release'] == 'master':
-                return 'ocata'
-            return projects['release']
-
-    return None
-
-
-def git_default_repos(projects_yaml):
-    """
-    Returns default repos if a default openstack-origin-git value is specified.
-    """
-    service = service_name()
-    core_project = service
-
-    for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES):
-        if projects_yaml == default:
-
-            # add the requirements repo first
-            repo = {
-                'name': 'requirements',
-                'repository': GIT_DEFAULT_REPOS['requirements'],
-                'branch': branch,
-            }
-            repos = [repo]
-
-            # neutron-* and nova-* charms require some additional repos
-            if service in ['neutron-api', 'neutron-gateway',
-                           'neutron-openvswitch']:
-                core_project = 'neutron'
-                if service == 'neutron-api':
-                    repo = {
-                        'name': 'networking-hyperv',
-                        'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
-                        'branch': branch,
-                    }
-                    repos.append(repo)
-                for project in ['neutron-fwaas', 'neutron-lbaas',
-                                'neutron-vpnaas', 'nova']:
-                    repo = {
-                        'name': project,
-                        'repository': GIT_DEFAULT_REPOS[project],
-                        'branch': branch,
-                    }
-                    repos.append(repo)
-
-            elif service in ['nova-cloud-controller', 'nova-compute']:
-                core_project = 'nova'
-                repo = {
-                    'name': 'neutron',
-                    'repository': GIT_DEFAULT_REPOS['neutron'],
-                    'branch': branch,
-                }
-                repos.append(repo)
-            elif service == 'openstack-dashboard':
-                core_project = 'horizon'
-
-            # finally add the current service's core project repo
-            repo = {
-                'name': core_project,
-                'repository': GIT_DEFAULT_REPOS[core_project],
-                'branch': branch,
-            }
-            repos.append(repo)
-
-            return yaml.dump(dict(repositories=repos, release=default))
-
-    return projects_yaml
-
-
-def _git_yaml_load(projects_yaml):
-    """
-    Load the specified yaml into a dictionary.
-    """
-    if not projects_yaml:
-        return None
-
-    return yaml.load(projects_yaml)
-
-
-requirements_dir = None
-
-
-def git_clone_and_install(projects_yaml, core_project):
-    """
-    Clone/install all specified OpenStack repositories.
-
-    The expected format of projects_yaml is:
-
-        repositories:
-          - {name: keystone,
-             repository: 'git://git.openstack.org/openstack/keystone.git',
-             branch: 'stable/icehouse'}
-          - {name: requirements,
-             repository: 'git://git.openstack.org/openstack/requirements.git',
-             branch: 'stable/icehouse'}
-
-        directory: /mnt/openstack-git
-        http_proxy: squid-proxy-url
-        https_proxy: squid-proxy-url
-
-    The directory, http_proxy, and https_proxy keys are optional.
-
-    """
-    global requirements_dir
-    parent_dir = '/mnt/openstack-git'
-    http_proxy = None
-
-    projects = _git_yaml_load(projects_yaml)
-    _git_validate_projects_yaml(projects, core_project)
-
-    old_environ = dict(os.environ)
-
-    if 'http_proxy' in projects.keys():
-        http_proxy = projects['http_proxy']
-        os.environ['http_proxy'] = projects['http_proxy']
-    if 'https_proxy' in projects.keys():
-        os.environ['https_proxy'] = projects['https_proxy']
-
-    if 'directory' in projects.keys():
-        parent_dir = projects['directory']
-
-    pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
-
-    # Upgrade setuptools and pip from default virtualenv versions. The default
-    # versions in trusty break master OpenStack branch deployments.
-    for p in ['pip', 'setuptools']:
-        pip_install(p, upgrade=True, proxy=http_proxy,
-                    venv=os.path.join(parent_dir, 'venv'))
-
-    constraints = None
-    for p in projects['repositories']:
-        repo = p['repository']
-        branch = p['branch']
-        depth = '1'
-        if 'depth' in p.keys():
-            depth = p['depth']
-        if p['name'] == 'requirements':
-            repo_dir = _git_clone_and_install_single(repo, branch, depth,
-                                                     parent_dir, http_proxy,
-                                                     update_requirements=False)
-            requirements_dir = repo_dir
-            constraints = os.path.join(repo_dir, "upper-constraints.txt")
-            # upper-constraints didn't exist until after icehouse
-            if not os.path.isfile(constraints):
-                constraints = None
-            # use constraints unless project yaml sets use_constraints to false
-            if 'use_constraints' in projects.keys():
-                if not projects['use_constraints']:
-                    constraints = None
-        else:
-            repo_dir = _git_clone_and_install_single(repo, branch, depth,
-                                                     parent_dir, http_proxy,
-                                                     update_requirements=True,
-                                                     constraints=constraints)
-
-    os.environ = old_environ
-
-
-def _git_validate_projects_yaml(projects, core_project):
-    """
-    Validate the projects yaml.
-    """
-    _git_ensure_key_exists('repositories', projects)
-
-    for project in projects['repositories']:
-        _git_ensure_key_exists('name', project.keys())
-        _git_ensure_key_exists('repository', project.keys())
-        _git_ensure_key_exists('branch', project.keys())
-
-    if projects['repositories'][0]['name'] != 'requirements':
-        error_out('{} git repo must be specified first'.format('requirements'))
-
-    if projects['repositories'][-1]['name'] != core_project:
-        error_out('{} git repo must be specified last'.format(core_project))
-
-    _git_ensure_key_exists('release', projects)
-
-
-def _git_ensure_key_exists(key, keys):
-    """
-    Ensure that key exists in keys.
-    """
-    if key not in keys:
-        error_out('openstack-origin-git key \'{}\' is missing'.format(key))
-
-
-def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
-                                  update_requirements, constraints=None):
-    """
-    Clone and install a single git repository.
-    """
-    if not os.path.exists(parent_dir):
-        juju_log('Directory already exists at {}. '
-                 'No need to create directory.'.format(parent_dir))
-        os.mkdir(parent_dir)
-
-    juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
-    repo_dir = install_remote(
-        repo, dest=parent_dir, branch=branch, depth=depth)
-
-    venv = os.path.join(parent_dir, 'venv')
-
-    if update_requirements:
-        if not requirements_dir:
-            error_out('requirements repo must be cloned before '
-                      'updating from global requirements.')
-        _git_update_requirements(venv, repo_dir, requirements_dir)
-
-    juju_log('Installing git repo from dir: {}'.format(repo_dir))
-    if http_proxy:
-        pip_install(repo_dir, proxy=http_proxy, venv=venv,
-                    constraints=constraints)
-    else:
-        pip_install(repo_dir, venv=venv, constraints=constraints)
-
-    return repo_dir
-
-
-def _git_update_requirements(venv, package_dir, reqs_dir):
-    """
-    Update from global requirements.
-
-    Update an OpenStack git directory's requirements.txt and
-    test-requirements.txt from global-requirements.txt.
-    """
-    orig_dir = os.getcwd()
-    os.chdir(reqs_dir)
-    python = os.path.join(venv, 'bin/python')
-    cmd = [python, 'update.py', package_dir]
-    try:
-        subprocess.check_call(cmd)
-    except subprocess.CalledProcessError:
-        package = os.path.basename(package_dir)
-        error_out("Error updating {} from "
-                  "global-requirements.txt".format(package))
-    os.chdir(orig_dir)
-
-
-def git_pip_venv_dir(projects_yaml):
-    """
-    Return the pip virtualenv path.
-    """
-    parent_dir = '/mnt/openstack-git'
-
-    projects = _git_yaml_load(projects_yaml)
-
-    if 'directory' in projects.keys():
-        parent_dir = projects['directory']
-
-    return os.path.join(parent_dir, 'venv')
-
-
-def git_src_dir(projects_yaml, project):
-    """
-    Return the directory where the specified project's source is located.
-    """
-    parent_dir = '/mnt/openstack-git'
-
-    projects = _git_yaml_load(projects_yaml)
-
-    if 'directory' in projects.keys():
-        parent_dir = projects['directory']
-
-    for p in projects['repositories']:
-        if p['name'] == project:
-            return os.path.join(parent_dir, os.path.basename(p['repository']))
-
-    return None
-
-
-def git_yaml_value(projects_yaml, key):
-    """
-    Return the value in projects_yaml for the specified key.
-    """
-    projects = _git_yaml_load(projects_yaml)
-
-    if key in projects.keys():
-        return projects[key]
-
-    return None
-
-
-def git_generate_systemd_init_files(templates_dir):
-    """
-    Generate systemd init files.
-
-    Generates and installs systemd init units and script files based on the
-    *.init.in files contained in the templates_dir directory.
-
-    This code is based on the openstack-pkg-tools package and its init
-    script generation, which is used by the OpenStack packages.
-    """
-    for f in os.listdir(templates_dir):
-        # Create the init script and systemd unit file from the template
-        if f.endswith(".init.in"):
-            init_in_file = f
-            init_file = f[:-8]
-            service_file = "{}.service".format(init_file)
-
-            init_in_source = os.path.join(templates_dir, init_in_file)
-            init_source = os.path.join(templates_dir, init_file)
-            service_source = os.path.join(templates_dir, service_file)
-
-            init_dest = os.path.join('/etc/init.d', init_file)
-            service_dest = os.path.join('/lib/systemd/system', service_file)
-
-            shutil.copyfile(init_in_source, init_source)
-            with open(init_source, 'a') as outfile:
-                template = ('/usr/share/openstack-pkg-tools/'
-                            'init-script-template')
-                with open(template) as infile:
-                    outfile.write('\n\n{}'.format(infile.read()))
-
-            cmd = ['pkgos-gen-systemd-unit', init_in_source]
-            subprocess.check_call(cmd)
-
-            if os.path.exists(init_dest):
-                os.remove(init_dest)
-            if os.path.exists(service_dest):
-                os.remove(service_dest)
-            shutil.copyfile(init_source, init_dest)
-            shutil.copyfile(service_source, service_dest)
-            os.chmod(init_dest, 0o755)
-
-    for f in os.listdir(templates_dir):
-        # If there's a service.in file, use it instead of the generated one
-        if f.endswith(".service.in"):
-            service_in_file = f
-            service_file = f[:-3]
-
-            service_in_source = os.path.join(templates_dir, service_in_file)
-            service_source = os.path.join(templates_dir, service_file)
-            service_dest = os.path.join('/lib/systemd/system', service_file)
-
-            shutil.copyfile(service_in_source, service_source)
-
-            if os.path.exists(service_dest):
-                os.remove(service_dest)
-            shutil.copyfile(service_source, service_dest)
-
-    for f in os.listdir(templates_dir):
-        # Generate the systemd unit if there's no existing .service.in
-        if f.endswith(".init.in"):
-            init_in_file = f
-            init_file = f[:-8]
-            service_in_file = "{}.service.in".format(init_file)
-            service_file = "{}.service".format(init_file)
-
-            init_in_source = os.path.join(templates_dir, init_in_file)
-            service_in_source = os.path.join(templates_dir, service_in_file)
-            service_source = os.path.join(templates_dir, service_file)
-            service_dest = os.path.join('/lib/systemd/system', service_file)
-
-            if not os.path.exists(service_in_source):
-                cmd = ['pkgos-gen-systemd-unit', init_in_source]
-                subprocess.check_call(cmd)
-
-                if os.path.exists(service_dest):
-                    os.remove(service_dest)
-                shutil.copyfile(service_source, service_dest)
-
-
-def git_determine_usr_bin():
-    """Return the /usr/bin path for Apache2 config.
-
-    The /usr/bin path will be located in the virtualenv if the charm
-    is configured to deploy from source.
-    """
-    if git_install_requested():
-        projects_yaml = config('openstack-origin-git')
-        projects_yaml = git_default_repos(projects_yaml)
-        return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
-    else:
-        return '/usr/bin'
-
-
-def git_determine_python_path():
-    """Return the python-path for Apache2 config.
-
-    Returns 'None' unless the charm is configured to deploy from source,
-    in which case the path of the virtualenv's site-packages is returned.
-    """
-    if git_install_requested():
-        projects_yaml = config('openstack-origin-git')
-        projects_yaml = git_default_repos(projects_yaml)
-        return os.path.join(git_pip_venv_dir(projects_yaml),
-                            'lib/python2.7/site-packages')
-    else:
-        return None
-
-
 def os_workload_status(configs, required_interfaces, charm_func=None):
     """
     Decorator to set workload status based on complete contexts
@@ -1613,27 +1173,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
     """
     ret = False
 
-    if git_install_requested():
-        action_set({'outcome': 'installed from source, skipped upgrade.'})
-    else:
-        if openstack_upgrade_available(package):
-            if config('action-managed-upgrade'):
-                juju_log('Upgrading OpenStack release')
-
-                try:
-                    upgrade_callback(configs=configs)
-                    action_set({'outcome': 'success, upgrade completed.'})
-                    ret = True
-                except Exception:
-                    action_set({'outcome': 'upgrade failed, see traceback.'})
-                    action_set({'traceback': traceback.format_exc()})
-                    action_fail('do_openstack_upgrade resulted in an '
-                                'unexpected error')
-            else:
-                action_set({'outcome': 'action-managed-upgrade config is '
-                                       'False, skipped upgrade.'})
+    if openstack_upgrade_available(package):
+        if config('action-managed-upgrade'):
+            juju_log('Upgrading OpenStack release')
+
+            try:
+                upgrade_callback(configs=configs)
+                action_set({'outcome': 'success, upgrade completed.'})
+                ret = True
+            except Exception:
+                action_set({'outcome': 'upgrade failed, see traceback.'})
+                action_set({'traceback': traceback.format_exc()})
+                action_fail('do_openstack_upgrade resulted in an '
+                            'unexpected error')
         else:
-            action_set({'outcome': 'no upgrade available.'})
+            action_set({'outcome': 'action-managed-upgrade config is '
+                                   'False, skipped upgrade.'})
+    else:
+        action_set({'outcome': 'no upgrade available.'})
 
     return ret
 
@@ -2043,14 +1600,25 @@ def token_cache_pkgs(source=None, release=None):
 
 def update_json_file(filename, items):
     """Updates the json `filename` with a given dict.
-    :param filename: json filename (i.e.: /etc/glance/policy.json)
+    :param filename: path to json file (e.g. /etc/glance/policy.json)
     :param items: dict of items to update
     """
+    if not items:
+        return
+
     with open(filename) as fd:
         policy = json.load(fd)
+
+    # Compare before and after and if nothing has changed don't write the file
+    # since that could cause unnecessary service restarts.
+    before = json.dumps(policy, indent=4, sort_keys=True)
     policy.update(items)
+    after = json.dumps(policy, indent=4, sort_keys=True)
+    if before == after:
+        return
+
     with open(filename, "w") as fd:
-        fd.write(json.dumps(policy, indent=4))
+        fd.write(after)
 
 
 @cached
diff --git a/charmhelpers/contrib/openstack/vaultlocker.py b/charmhelpers/contrib/openstack/vaultlocker.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8e4bf88f41cb5684ef4357515f8104a475927cf
--- /dev/null
+++ b/charmhelpers/contrib/openstack/vaultlocker.py
@@ -0,0 +1,126 @@
+# Copyright 2018 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+
+import charmhelpers.contrib.openstack.alternatives as alternatives
+import charmhelpers.contrib.openstack.context as context
+
+import charmhelpers.core.hookenv as hookenv
+import charmhelpers.core.host as host
+import charmhelpers.core.templating as templating
+import charmhelpers.core.unitdata as unitdata
+
+VAULTLOCKER_BACKEND = 'charm-vaultlocker'
+
+
+class VaultKVContext(context.OSContextGenerator):
+    """Vault KV context for interaction with vault-kv interfaces"""
+    interfaces = ['secrets-storage']
+
+    def __init__(self, secret_backend=None):
+        super(context.OSContextGenerator, self).__init__()
+        self.secret_backend = (
+            secret_backend or 'charm-{}'.format(hookenv.service_name())
+        )
+
+    def __call__(self):
+        db = unitdata.kv()
+        last_token = db.get('last-token')
+        secret_id = db.get('secret-id')
+        for relation_id in hookenv.relation_ids(self.interfaces[0]):
+            for unit in hookenv.related_units(relation_id):
+                data = hookenv.relation_get(unit=unit,
+                                            rid=relation_id)
+                vault_url = data.get('vault_url')
+                role_id = data.get('{}_role_id'.format(hookenv.local_unit()))
+                token = data.get('{}_token'.format(hookenv.local_unit()))
+
+                if all([vault_url, role_id, token]):
+                    token = json.loads(token)
+                    vault_url = json.loads(vault_url)
+
+                    # Tokens may change when secret_id's are being
+                    # reissued - if so use token to get new secret_id
+                    if token != last_token:
+                        secret_id = retrieve_secret_id(
+                            url=vault_url,
+                            token=token
+                        )
+                        db.set('secret-id', secret_id)
+                        db.set('last-token', token)
+                        db.flush()
+
+                    ctxt = {
+                        'vault_url': vault_url,
+                        'role_id': json.loads(role_id),
+                        'secret_id': secret_id,
+                        'secret_backend': self.secret_backend,
+                    }
+                    vault_ca = data.get('vault_ca')
+                    if vault_ca:
+                        ctxt['vault_ca'] = json.loads(vault_ca)
+                    self.complete = True
+                    return ctxt
+        return {}
+
+
+def write_vaultlocker_conf(context, priority=100):
+    """Write vaultlocker configuration to disk and install alternative
+
+    :param context: Dict of data from vault-kv relation
+    :ptype: context: dict
+    :param priority: Priority of alternative configuration
+    :ptype: priority: int"""
+    charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format(
+        hookenv.service_name()
+    )
+    host.mkdir(os.path.dirname(charm_vl_path), perms=0o700)
+    templating.render(source='vaultlocker.conf.j2',
+                      target=charm_vl_path,
+                      context=context, perms=0o600),
+    alternatives.install_alternative('vaultlocker.conf',
+                                     '/etc/vaultlocker/vaultlocker.conf',
+                                     charm_vl_path, priority)
+
+
+def vault_relation_complete(backend=None):
+    """Determine whether vault relation is complete
+
+    :param backend: Name of secrets backend requested
+    :ptype backend: string
+    :returns: whether the relation to vault is complete
+    :rtype: bool"""
+    vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
+    vault_kv()
+    return vault_kv.complete
+
+
+# TODO: contrib a high level unwrap method to hvac that works
+def retrieve_secret_id(url, token):
+    """Retrieve a response-wrapped secret_id from Vault
+
+    :param url: URL to Vault Server
+    :ptype url: str
+    :param token: One shot Token to use
+    :ptype token: str
+    :returns: secret_id to use for Vault Access
+    :rtype: str"""
+    import hvac
+    client = hvac.Client(url=url, token=token)
+    response = client._post('/v1/sys/wrapping/unwrap')
+    if response.status_code == 200:
+        data = response.json()
+        return data['data']['secret_id']
diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py
index 392316126b3799796c786aead740f88e87b8a71e..76828201628776a5c860805825711d38c47db4a5 100644
--- a/charmhelpers/contrib/storage/linux/ceph.py
+++ b/charmhelpers/contrib/storage/linux/ceph.py
@@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None):
         assert isinstance(valid_range, list), \
             "valid_range must be a list, was given {}".format(valid_range)
         # If we're dealing with strings
-        if valid_type is six.string_types:
+        if isinstance(value, six.string_types):
             assert value in valid_range, \
                 "{} is not in the list {}".format(value, valid_range)
         # Integer, float should have a min and max
@@ -291,7 +291,7 @@ class Pool(object):
 
 class ReplicatedPool(Pool):
     def __init__(self, service, name, pg_num=None, replicas=2,
-                 percent_data=10.0):
+                 percent_data=10.0, app_name=None):
         super(ReplicatedPool, self).__init__(service=service, name=name)
         self.replicas = replicas
         if pg_num:
@@ -301,6 +301,10 @@ class ReplicatedPool(Pool):
             self.pg_num = min(pg_num, max_pgs)
         else:
             self.pg_num = self.get_pgs(self.replicas, percent_data)
+        if app_name:
+            self.app_name = app_name
+        else:
+            self.app_name = 'unknown'
 
     def create(self):
         if not pool_exists(self.service, self.name):
@@ -313,6 +317,12 @@ class ReplicatedPool(Pool):
                 update_pool(client=self.service,
                             pool=self.name,
                             settings={'size': str(self.replicas)})
+                try:
+                    set_app_name_for_pool(client=self.service,
+                                          pool=self.name,
+                                          name=self.app_name)
+                except CalledProcessError:
+                    log('Could not set app name for pool {}'.format(self.name, level=WARNING))
             except CalledProcessError:
                 raise
 
@@ -320,10 +330,14 @@ class ReplicatedPool(Pool):
 # Default jerasure erasure coded pool
 class ErasurePool(Pool):
     def __init__(self, service, name, erasure_code_profile="default",
-                 percent_data=10.0):
+                 percent_data=10.0, app_name=None):
         super(ErasurePool, self).__init__(service=service, name=name)
         self.erasure_code_profile = erasure_code_profile
         self.percent_data = percent_data
+        if app_name:
+            self.app_name = app_name
+        else:
+            self.app_name = 'unknown'
 
     def create(self):
         if not pool_exists(self.service, self.name):
@@ -355,6 +369,12 @@ class ErasurePool(Pool):
                    'erasure', self.erasure_code_profile]
             try:
                 check_call(cmd)
+                try:
+                    set_app_name_for_pool(client=self.service,
+                                          pool=self.name,
+                                          name=self.app_name)
+                except CalledProcessError:
+                    log('Could not set app name for pool {}'.format(self.name, level=WARNING))
             except CalledProcessError:
                 raise
 
@@ -377,12 +397,12 @@ def get_mon_map(service):
         try:
             return json.loads(mon_status)
         except ValueError as v:
-            log("Unable to parse mon_status json: {}. Error: {}".format(
-                mon_status, v.message))
+            log("Unable to parse mon_status json: {}. Error: {}"
+                .format(mon_status, str(v)))
             raise
     except CalledProcessError as e:
-        log("mon_status command failed with message: {}".format(
-            e.message))
+        log("mon_status command failed with message: {}"
+            .format(str(e)))
         raise
 
 
@@ -517,7 +537,8 @@ def pool_set(service, pool_name, key, value):
     :param value:
     :return: None.  Can raise CalledProcessError
     """
-    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
+    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
+           str(value).lower()]
     try:
         check_call(cmd)
     except CalledProcessError:
@@ -621,16 +642,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
     :param durability_estimator: int
     :return: None.  Can raise CalledProcessError
     """
+    version = ceph_version()
+
     # Ensure this failure_domain is allowed by Ceph
     validator(failure_domain, six.string_types,
               ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
 
     cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
-           'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
-           'ruleset_failure_domain=' + failure_domain]
+           'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
+           ]
     if locality is not None and durability_estimator is not None:
         raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
 
+    # failure_domain changed in luminous
+    if version and version >= '12.0.0':
+        cmd.append('crush-failure-domain=' + failure_domain)
+    else:
+        cmd.append('ruleset-failure-domain=' + failure_domain)
+
     # Add plugin specific information
     if locality is not None:
         # For local erasure codes
@@ -769,6 +798,25 @@ def update_pool(client, pool, settings):
     check_call(cmd)
 
 
+def set_app_name_for_pool(client, pool, name):
+    """
+    Calls `osd pool application enable` for the specified pool name
+
+    :param client: Name of the ceph client to use
+    :type client: str
+    :param pool: Pool to set app name for
+    :type pool: str
+    :param name: app name for the specified pool
+    :type name: str
+
+    :raises: CalledProcessError if ceph call fails
+    """
+    if ceph_version() >= '12.0.0':
+        cmd = ['ceph', '--id', client, 'osd', 'pool',
+               'application', 'enable', pool, name]
+        check_call(cmd)
+
+
 def create_pool(service, name, replicas=3, pg_num=None):
     """Create a new RADOS pool."""
     if pool_exists(service, name):
@@ -1064,14 +1112,24 @@ class CephBrokerRq(object):
         self.ops = []
 
     def add_op_request_access_to_group(self, name, namespace=None,
-                                       permission=None, key_name=None):
+                                       permission=None, key_name=None,
+                                       object_prefix_permissions=None):
         """
         Adds the requested permissions to the current service's Ceph key,
-        allowing the key to access only the specified pools
+        allowing the key to access only the specified pools or
+        object prefixes. object_prefix_permissions should be a dictionary
+        keyed on the permission with the corresponding value being a list
+        of prefixes to apply that permission to.
+            {
+                'rwx': ['prefix1', 'prefix2'],
+                'class-read': ['prefix3']}
         """
-        self.ops.append({'op': 'add-permissions-to-key', 'group': name,
-                         'namespace': namespace, 'name': key_name or service_name(),
-                         'group-permission': permission})
+        self.ops.append({
+            'op': 'add-permissions-to-key', 'group': name,
+            'namespace': namespace,
+            'name': key_name or service_name(),
+            'group-permission': permission,
+            'object-prefix-permissions': object_prefix_permissions})
 
     def add_op_create_pool(self, name, replica_count=3, pg_num=None,
                            weight=None, group=None, namespace=None):
@@ -1107,7 +1165,10 @@ class CephBrokerRq(object):
     def _ops_equal(self, other):
         if len(self.ops) == len(other.ops):
             for req_no in range(0, len(self.ops)):
-                for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
+                for key in [
+                        'replicas', 'name', 'op', 'pg_num', 'weight',
+                        'group', 'group-namespace', 'group-permission',
+                        'object-prefix-permissions']:
                     if self.ops[req_no].get(key) != other.ops[req_no].get(key):
                         return False
         else:
diff --git a/charmhelpers/contrib/storage/linux/lvm.py b/charmhelpers/contrib/storage/linux/lvm.py
index 7f2a0604931fd7b9ee15224971b4a3f5ed79f3ef..c8bde69263f0e917d32d0e5d70abba1409b26012 100644
--- a/charmhelpers/contrib/storage/linux/lvm.py
+++ b/charmhelpers/contrib/storage/linux/lvm.py
@@ -12,6 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import functools
 from subprocess import (
     CalledProcessError,
     check_call,
@@ -101,3 +102,81 @@ def create_lvm_volume_group(volume_group, block_device):
     :block_device: str: Full path of PV-initialized block device.
     '''
     check_call(['vgcreate', volume_group, block_device])
+
+
+def list_logical_volumes(select_criteria=None, path_mode=False):
+    '''
+    List logical volumes
+
+    :param select_criteria: str: Limit list to those volumes matching this
+                                 criteria (see 'lvs -S help' for more details)
+    :param path_mode: bool: return logical volume name in 'vg/lv' format, this
+                            format is required for some commands like lvextend
+    :returns: [str]: List of logical volumes
+    '''
+    lv_diplay_attr = 'lv_name'
+    if path_mode:
+        # Parsing output logic relies on the column order
+        lv_diplay_attr = 'vg_name,' + lv_diplay_attr
+    cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
+    if select_criteria:
+        cmd.extend(['--select', select_criteria])
+    lvs = []
+    for lv in check_output(cmd).decode('UTF-8').splitlines():
+        if not lv:
+            continue
+        if path_mode:
+            lvs.append('/'.join(lv.strip().split()))
+        else:
+            lvs.append(lv.strip())
+    return lvs
+
+
+list_thin_logical_volume_pools = functools.partial(
+    list_logical_volumes,
+    select_criteria='lv_attr =~ ^t')
+
+list_thin_logical_volumes = functools.partial(
+    list_logical_volumes,
+    select_criteria='lv_attr =~ ^V')
+
+
+def extend_logical_volume_by_device(lv_name, block_device):
+    '''
+    Extends the size of logical volume lv_name by the amount of free space on
+    physical volume block_device.
+
+    :param lv_name: str: name of logical volume to be extended (vg/lv format)
+    :param block_device: str: name of block_device to be allocated to lv_name
+    '''
+    cmd = ['lvextend', lv_name, block_device]
+    check_call(cmd)
+
+
+def create_logical_volume(lv_name, volume_group, size=None):
+    '''
+    Create a new logical volume in an existing volume group
+
+    :param lv_name: str: name of logical volume to be created.
+    :param volume_group: str: Name of volume group to use for the new volume.
+    :param size: str: Size of logical volume to create (100% if not supplied)
+    :raises subprocess.CalledProcessError: in the event that the lvcreate fails.
+    '''
+    if size:
+        check_call([
+            'lvcreate',
+            '--yes',
+            '-L',
+            '{}'.format(size),
+            '-n', lv_name, volume_group
+        ])
+    # create the lv with all the space available, this is needed because the
+    # system call is different for LVM
+    else:
+        check_call([
+            'lvcreate',
+            '--yes',
+            '-l',
+            '100%FREE',
+            '-n', lv_name, volume_group
+        ])
diff --git a/charmhelpers/contrib/storage/linux/utils.py b/charmhelpers/contrib/storage/linux/utils.py
index c9428894317a3285c870a08e8f3b53846739c552..6f846b056c27baeb2ffd848fed0e0aaa89f3b5ce 100644
--- a/charmhelpers/contrib/storage/linux/utils.py
+++ b/charmhelpers/contrib/storage/linux/utils.py
@@ -67,3 +67,19 @@ def is_device_mounted(device):
     except Exception:
         return False
     return bool(re.search(r'MOUNTPOINT=".+"', out))
+
+
+def mkfs_xfs(device, force=False):
+    """Format device with XFS filesystem.
+
+    By default this should fail if the device already has a filesystem on it.
+    :param device: Full path to device to format
+    :ptype device: tr
+    :param force: Force operation
+    :ptype: force: boolean"""
+    cmd = ['mkfs.xfs']
+    if force:
+        cmd.append("-f")
+
+    cmd += ['-i', 'size=1024', device]
+    check_call(cmd)
diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py
index b2d0cc758cf63ed36028bc77aa59f937fb52e806..ed7af39e36fa0b921d42edb94cff997bf01135d1 100644
--- a/charmhelpers/core/hookenv.py
+++ b/charmhelpers/core/hookenv.py
@@ -27,6 +27,7 @@ import glob
 import os
 import json
 import yaml
+import re
 import subprocess
 import sys
 import errno
@@ -39,6 +40,7 @@ if not six.PY3:
 else:
     from collections import UserDict
 
+
 CRITICAL = "CRITICAL"
 ERROR = "ERROR"
 WARNING = "WARNING"
@@ -66,7 +68,7 @@ def cached(func):
     @wraps(func)
     def wrapper(*args, **kwargs):
         global cache
-        key = str((func, args, kwargs))
+        key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
         try:
             return cache[key]
         except KeyError:
@@ -288,7 +290,7 @@ class Config(dict):
         self.implicit_save = True
         self._prev_dict = None
         self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
-        if os.path.exists(self.path):
+        if os.path.exists(self.path) and os.stat(self.path).st_size:
             self.load_previous()
         atexit(self._implicit_save)
 
@@ -308,7 +310,11 @@ class Config(dict):
         """
         self.path = path or self.path
         with open(self.path) as f:
-            self._prev_dict = json.load(f)
+            try:
+                self._prev_dict = json.load(f)
+            except ValueError as e:
+                log('Unable to parse previous config data - {}'.format(str(e)),
+                    level=ERROR)
         for k, v in copy.deepcopy(self._prev_dict).items():
             if k not in self:
                 self[k] = v
@@ -344,6 +350,7 @@ class Config(dict):
 
         """
         with open(self.path, 'w') as f:
+            os.fchmod(f.fileno(), 0o600)
             json.dump(self, f)
 
     def _implicit_save(self):
@@ -351,23 +358,40 @@ class Config(dict):
             self.save()
 
 
-@cached
+_cache_config = None
+
+
 def config(scope=None):
-    """Juju charm configuration"""
-    config_cmd_line = ['config-get']
-    if scope is not None:
-        config_cmd_line.append(scope)
-    else:
-        config_cmd_line.append('--all')
-    config_cmd_line.append('--format=json')
+    """
+    Get the juju charm configuration (scope==None) or individual key,
+    (scope=str).  The returned value is a Python data structure loaded as
+    JSON from the Juju config command.
+
+    :param scope: If set, return the value for the specified key.
+    :type scope: Optional[str]
+    :returns: Either the whole config as a Config, or a key from it.
+    :rtype: Any
+    """
+    global _cache_config
+    config_cmd_line = ['config-get', '--all', '--format=json']
+    try:
+        # JSON Decode Exception for Python3.5+
+        exc_json = json.decoder.JSONDecodeError
+    except AttributeError:
+        # JSON Decode Exception for Python2.7 through Python3.4
+        exc_json = ValueError
     try:
-        print("CONFIG_CMD_LINE: " + ' '.join(config_cmd_line))
-        config_data = json.loads(
+        if _cache_config is None:
+            config_data = json.loads(
                 subprocess.check_output(config_cmd_line).decode('UTF-8'))
+            _cache_config = Config(config_data)
         if scope is not None:
-            return config_data
-        return Config(config_data)
-    except ValueError:
+            return _cache_config.get(scope)
+        return _cache_config
+    except (exc_json, UnicodeDecodeError) as e:
+        log('Unable to parse output from config-get: config_cmd_line="{}" '
+            'message="{}"'
+            .format(config_cmd_line, str(e)), level=ERROR)
         return None
 
 
@@ -819,6 +843,10 @@ class Hooks(object):
         return wrapper
 
 
+class NoNetworkBinding(Exception):
+    pass
+
+
 def charm_dir():
     """Return the root directory of the current charm"""
     d = os.environ.get('JUJU_CHARM_DIR')
@@ -944,6 +972,13 @@ def application_version_set(version):
         log("Application Version: {}".format(version))
 
 
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def goal_state():
+    """Juju goal state values"""
+    cmd = ['goal-state', '--format=json']
+    return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def is_leader():
     """Does the current unit hold the juju leadership
@@ -1038,7 +1073,6 @@ def juju_version():
                                    universal_newlines=True).strip()
 
 
-@cached
 def has_juju_version(minimum_version):
     """Return True if the Juju version is at least the provided version"""
     return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@@ -1098,6 +1132,8 @@ def _run_atexit():
 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def network_get_primary_address(binding):
     '''
+    Deprecated since Juju 2.3; use network_get()
+
     Retrieve the primary network address for a named binding
 
     :param binding: string. The name of a relation of extra-binding
@@ -1105,10 +1141,19 @@ def network_get_primary_address(binding):
     :raise: NotImplementedError if run on Juju < 2.0
     '''
     cmd = ['network-get', '--primary-address', binding]
-    return subprocess.check_output(cmd).decode('UTF-8').strip()
+    try:
+        response = subprocess.check_output(
+            cmd,
+            stderr=subprocess.STDOUT).decode('UTF-8').strip()
+    except CalledProcessError as e:
+        if 'no network config found for binding' in e.output.decode('UTF-8'):
+            raise NoNetworkBinding("No network binding for {}"
+                                   .format(binding))
+        else:
+            raise
+    return response
 
 
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def network_get(endpoint, relation_id=None):
     """
     Retrieve the network details for a relation endpoint
@@ -1116,24 +1161,20 @@ def network_get(endpoint, relation_id=None):
     :param endpoint: string. The name of a relation endpoint
     :param relation_id: int. The ID of the relation for the current context.
     :return: dict. The loaded YAML output of the network-get query.
-    :raise: NotImplementedError if run on Juju < 2.1
+    :raise: NotImplementedError if request not supported by the Juju version.
     """
+    if not has_juju_version('2.2'):
+        raise NotImplementedError(juju_version())  # earlier versions require --primary-address
+    if relation_id and not has_juju_version('2.3'):
+        raise NotImplementedError  # 2.3 added the -r option
+
     cmd = ['network-get', endpoint, '--format', 'yaml']
     if relation_id:
         cmd.append('-r')
         cmd.append(relation_id)
-    try:
-        response = subprocess.check_output(
-            cmd,
-            stderr=subprocess.STDOUT).decode('UTF-8').strip()
-    except CalledProcessError as e:
-        # Early versions of Juju 2.0.x required the --primary-address argument.
-        # We catch that condition here and raise NotImplementedError since
-        # the requested semantics are not available - the caller can then
-        # use the network_get_primary_address() method instead.
-        if '--primary-address is currently required' in e.output.decode('UTF-8'):
-            raise NotImplementedError
-        raise
+    response = subprocess.check_output(
+        cmd,
+        stderr=subprocess.STDOUT).decode('UTF-8').strip()
     return yaml.safe_load(response)
 
 
@@ -1189,9 +1230,23 @@ def iter_units_for_relation_name(relation_name):
 
 def ingress_address(rid=None, unit=None):
     """
-    Retrieve the ingress-address from a relation when available. Otherwise,
-    return the private-address. This function is to be used on the consuming
-    side of the relation.
+    Retrieve the ingress-address from a relation when available.
+    Otherwise, return the private-address.
+
+    When used on the consuming side of the relation (unit is a remote
+    unit), the ingress-address is the IP address that this unit needs
+    to use to reach the provided service on the remote unit.
+
+    When used on the providing side of the relation (unit == local_unit()),
+    the ingress-address is the IP address that is advertised to remote
+    units on this relation. Remote units need to use this address to
+    reach the local provided service on this unit.
+
+    Note that charms may document some other method to use in
+    preference to the ingress_address(), such as an address provided
+    on a different relation attribute or a service discovery mechanism.
+    This allows charms to redirect inbound connections to their peers
+    or different applications such as load balancers.
 
     Usage:
     addresses = [ingress_address(rid=u.rid, unit=u.unit)
@@ -1205,3 +1260,40 @@ def ingress_address(rid=None, unit=None):
     settings = relation_get(rid=rid, unit=unit)
     return (settings.get('ingress-address') or
             settings.get('private-address'))
+
+
+def egress_subnets(rid=None, unit=None):
+    """
+    Retrieve the egress-subnets from a relation.
+
+    This function is to be used on the providing side of the
+    relation, and provides the ranges of addresses that client
+    connections may come from. The result is uninteresting on
+    the consuming side of a relation (unit == local_unit()).
+
+    Returns a stable list of subnets in CIDR format.
+    eg. ['192.168.1.0/24', '2001::F00F/128']
+
+    If egress-subnets is not available, falls back to using the published
+    ingress-address, or finally private-address.
+
+    :param rid: string relation id
+    :param unit: string unit name
+    :side effect: calls relation_get
+    :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
+    """
+    def _to_range(addr):
+        if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
+            addr += '/32'
+        elif ':' in addr and '/' not in addr:  # IPv6
+            addr += '/128'
+        return addr
+
+    settings = relation_get(rid=rid, unit=unit)
+    if 'egress-subnets' in settings:
+        return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
+    if 'ingress-address' in settings:
+        return [_to_range(settings['ingress-address'])]
+    if 'private-address' in settings:
+        return [_to_range(settings['private-address'])]
+    return []  # Should never happen
diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py
index 5cc5c86b701fc5375f387eb01a0d2b76c184c263..322ab2acd71bb02f13d2d739e74d0ddc62774d9e 100644
--- a/charmhelpers/core/host.py
+++ b/charmhelpers/core/host.py
@@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
         with open(path, 'wb') as target:
             os.fchown(target.fileno(), uid, gid)
             os.fchmod(target.fileno(), perms)
+            if six.PY3 and isinstance(content, six.string_types):
+                content = content.encode('UTF-8')
             target.write(content)
         return
     # the contents were the same, but we might still need to change the
@@ -991,7 +993,7 @@ def updatedb(updatedb_text, new_path):
     return output
 
 
-def modulo_distribution(modulo=3, wait=30):
+def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
     """ Modulo distribution
 
     This helper uses the unit number, a modulo value and a constant wait time
@@ -1013,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30):
 
     @param modulo: int The modulo number creates the group distribution
     @param wait: int The constant time wait value
+    @param non_zero_wait: boolean Override unit % modulo == 0,
+                          return modulo * wait. Used to avoid collisions with
+                          leader nodes which are often given priority.
     @return: int Calculated time to wait for unit operation
     """
     unit_number = int(local_unit().split('/')[1])
-    return (unit_number % modulo) * wait
+    calculated_wait_time = (unit_number % modulo) * wait
+    if non_zero_wait and calculated_wait_time == 0:
+        return modulo * wait
+    else:
+        return calculated_wait_time
diff --git a/charmhelpers/core/host_factory/ubuntu.py b/charmhelpers/core/host_factory/ubuntu.py
index d8dc378a5dad29c271a89289e4b815e2c2c99060..99451b59789a822b4f5a96d7310965f1c8921898 100644
--- a/charmhelpers/core/host_factory/ubuntu.py
+++ b/charmhelpers/core/host_factory/ubuntu.py
@@ -20,6 +20,7 @@ UBUNTU_RELEASES = (
     'yakkety',
     'zesty',
     'artful',
+    'bionic',
 )
 
 
diff --git a/charmhelpers/core/services/base.py b/charmhelpers/core/services/base.py
index ca9dc996bd7d7fc2a18b7d9a9ee51adff171bda9..179ad4f0c367dd6b13c10b201c3752d1c8daf05e 100644
--- a/charmhelpers/core/services/base.py
+++ b/charmhelpers/core/services/base.py
@@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback):
     """
     def __call__(self, manager, service_name, event_name):
         service = manager.get_service(service_name)
-        new_ports = service.get('ports', [])
+        # turn this generator into a list,
+        # as we'll be going over it multiple times
+        new_ports = list(service.get('ports', []))
         port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
         if os.path.exists(port_file):
             with open(port_file) as fp:
                 old_ports = fp.read().split(',')
             for old_port in old_ports:
-                if bool(old_port):
-                    old_port = int(old_port)
-                    if old_port not in new_ports:
-                        hookenv.close_port(old_port)
+                if bool(old_port) and not self.ports_contains(old_port, new_ports):
+                    hookenv.close_port(old_port)
         with open(port_file, 'w') as fp:
             fp.write(','.join(str(port) for port in new_ports))
         for port in new_ports:
+            # A port is either a number or 'ICMP'
+            protocol = 'TCP'
+            if str(port).upper() == 'ICMP':
+                protocol = 'ICMP'
             if event_name == 'start':
-                hookenv.open_port(port)
+                hookenv.open_port(port, protocol)
             elif event_name == 'stop':
-                hookenv.close_port(port)
+                hookenv.close_port(port, protocol)
+
+    def ports_contains(self, port, ports):
+        if not bool(port):
+            return False
+        if str(port).upper() != 'ICMP':
+            port = int(port)
+        return port in ports
 
 
 def service_stop(service_name):
diff --git a/charmhelpers/core/sysctl.py b/charmhelpers/core/sysctl.py
index 6e413e31480e5fb4bcb703d58b1e87f98adc53af..1f188d8c653f9bf793e18ed484635fce310543cc 100644
--- a/charmhelpers/core/sysctl.py
+++ b/charmhelpers/core/sysctl.py
@@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
 def create(sysctl_dict, sysctl_file):
     """Creates a sysctl.conf file from a YAML associative array
 
-    :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
+    :param sysctl_dict: a dict or YAML-formatted string of sysctl
+                        options eg "{ 'kernel.max_pid': 1337 }"
     :type sysctl_dict: str
     :param sysctl_file: path to the sysctl file to be saved
     :type sysctl_file: str or unicode
     :returns: None
     """
-    try:
-        sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
-    except yaml.YAMLError:
-        log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
-            level=ERROR)
-        return
+    if type(sysctl_dict) is not dict:
+        try:
+            sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
+        except yaml.YAMLError:
+            log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
+                level=ERROR)
+            return
+    else:
+        sysctl_dict_parsed = sysctl_dict
 
     with open(sysctl_file, "w") as fd:
         for key, value in sysctl_dict_parsed.items():
diff --git a/charmhelpers/core/templating.py b/charmhelpers/core/templating.py
index 7b801a34a5e6585485347f7a97bc18a10a093d03..9014015c14ee0b48c775562cd4f0d30884944439 100644
--- a/charmhelpers/core/templating.py
+++ b/charmhelpers/core/templating.py
@@ -20,7 +20,8 @@ from charmhelpers.core import hookenv
 
 
 def render(source, target, context, owner='root', group='root',
-           perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
+           perms=0o444, templates_dir=None, encoding='UTF-8',
+           template_loader=None, config_template=None):
     """
     Render a template.
 
@@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root',
     The context should be a dict containing the values to be replaced in the
     template.
 
+    config_template may be provided to render from a provided template instead
+    of loading from a file.
+
     The `owner`, `group`, and `perms` options will be passed to `write_file`.
 
     If omitted, `templates_dir` defaults to the `templates` folder in the charm.
@@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root',
         if templates_dir is None:
             templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
         template_env = Environment(loader=FileSystemLoader(templates_dir))
-    try:
-        source = source
-        template = template_env.get_template(source)
-    except exceptions.TemplateNotFound as e:
-        hookenv.log('Could not load template %s from %s.' %
-                    (source, templates_dir),
-                    level=hookenv.ERROR)
-        raise e
+
+    # load from a string if provided explicitly
+    if config_template is not None:
+        template = template_env.from_string(config_template)
+    else:
+        try:
+            source = source
+            template = template_env.get_template(source)
+        except exceptions.TemplateNotFound as e:
+            hookenv.log('Could not load template %s from %s.' %
+                        (source, templates_dir),
+                        level=hookenv.ERROR)
+            raise e
     content = template.render(context)
     if target is not None:
         target_dir = os.path.dirname(target)
diff --git a/charmhelpers/core/unitdata.py b/charmhelpers/core/unitdata.py
index 7af875c2fcc1e2e38f9267bfdc60ab5a2a499c18..ab554327b343f896880523fc627c1abea84be29a 100644
--- a/charmhelpers/core/unitdata.py
+++ b/charmhelpers/core/unitdata.py
@@ -166,6 +166,10 @@ class Storage(object):
 
     To support dicts, lists, integer, floats, and booleans values
     are automatically json encoded/decoded.
+
+    Note: to facilitate unit testing, ':memory:' can be passed as the
+    path parameter which causes sqlite3 to only build the db in memory.
+    This should only be used for testing purposes.
     """
     def __init__(self, path=None):
         self.db_path = path
@@ -175,6 +179,9 @@ class Storage(object):
             else:
                 self.db_path = os.path.join(
                     os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+        if self.db_path != ':memory:':
+            with open(self.db_path, 'a') as f:
+                os.fchmod(f.fileno(), 0o600)
         self.conn = sqlite3.connect('%s' % self.db_path)
         self.cursor = self.conn.cursor()
         self.revision = None
diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py
index 910e96a66f45b8a173147141e7c5108d685b7b7a..736be713db66bff7d4d8e742d5b68d45a2270c00 100644
--- a/charmhelpers/fetch/ubuntu.py
+++ b/charmhelpers/fetch/ubuntu.py
@@ -44,6 +44,7 @@ ARCH_TO_PROPOSED_POCKET = {
     'x86_64': PROPOSED_POCKET,
     'ppc64le': PROPOSED_PORTS_POCKET,
     'aarch64': PROPOSED_PORTS_POCKET,
+    's390x': PROPOSED_PORTS_POCKET,
 }
 CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
 CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
@@ -157,6 +158,14 @@ CLOUD_ARCHIVE_POCKETS = {
     'queens/proposed': 'xenial-proposed/queens',
     'xenial-queens/proposed': 'xenial-proposed/queens',
     'xenial-proposed/queens': 'xenial-proposed/queens',
+    # Rocky
+    'rocky': 'bionic-updates/rocky',
+    'bionic-rocky': 'bionic-updates/rocky',
+    'bionic-rocky/updates': 'bionic-updates/rocky',
+    'bionic-updates/rocky': 'bionic-updates/rocky',
+    'rocky/proposed': 'bionic-proposed/rocky',
+    'bionic-rocky/proposed': 'bionic-proposed/rocky',
+    'bionic-proposed/rocky': 'bionic-proposed/rocky',
 }
 
 
diff --git a/config.yaml b/config.yaml
index 2656d0fbee9cc16544f44d339be8f87d18b3f9fe..a0c4a8c49f97d21a8f4e5fe75535e2917b000681 100644
--- a/config.yaml
+++ b/config.yaml
@@ -35,31 +35,6 @@ options:
       NOTE: updating this setting to a source that is known to provide
       a later version of OpenStack will trigger a software upgrade unless
       action-managed-upgrade is set to True.
-  openstack-origin-git:
-    type: string
-    default:
-    description: |
-      Specifies a default OpenStack release name, or a YAML dictionary
-      listing the git repositories to install from.
-      .
-      The default Openstack release name may be one of the following, where
-      the corresponding OpenStack github branch will be used:
-        * mitaka
-        * newton
-        * ocata
-        * pike
-        * master
-      .
-      The YAML must minimally include requirements and keystone repositories,
-      and may also include repositories for other dependencies:
-        repositories:
-        - {name: requirements,
-           repository: 'git://github.com/openstack/requirements',
-           branch: master}
-        - {name: keystone,
-           repository: 'git://github.com/openstack/keystone',
-           branch: master}
-        release: master
   action-managed-upgrade:
     type: boolean
     default: False
@@ -105,7 +80,8 @@ options:
     default: None
     description: |
       Admin password. To be used *for testing only*. Randomly generated by
-      default.
+      default. To retreive generated password,
+      juju run --unit keystone/0 leader-get admin_passwd
   admin-token:
     type: string
     default: None
@@ -144,37 +120,42 @@ options:
   enable-pki:
     type: string
     default: "false"
-    description: Enable PKI token signing.
+    description: |
+      Enable PKI token signing.
+      .
+      [DEPRECATED] This option should no longer be used.
+      This option will be removed in a future release.
   preferred-api-version:
     type: int
-    default: 2
+    default:
     description: |
       Use this keystone api version for keystone endpoints and advertise this
-      version to identity client charms.
+      version to identity client charms.  For OpenStack releases < Queens this
+      option defaults to 2; for Queens or later it defaults to 3.
   haproxy-server-timeout:
     type: int
     default:
     description: |
       Server timeout configuration in ms for haproxy, used in HA
-      configurations. If not provided, default value of 30000ms is used.
+      configurations. If not provided, default value of 90000ms is used.
   haproxy-client-timeout:
     type: int
     default:
     description: |
       Client timeout configuration in ms for haproxy, used in HA
-      configurations. If not provided, default value of 30000ms is used.
+      configurations. If not provided, default value of 90000ms is used.
   haproxy-queue-timeout:
     type: int
     default:
     description: |
       Queue timeout configuration in ms for haproxy, used in HA
-      configurations. If not provided, default value of 5000ms is used.
+      configurations. If not provided, default value of 9000ms is used.
   haproxy-connect-timeout:
     type: int
     default:
     description: |
       Connect timeout configuration in ms for haproxy, used in HA
-      configurations. If not provided, default value of 5000ms is used.
+      configurations. If not provided, default value of 9000ms is used.
   database:
     type: string
     default: "keystone"
@@ -553,11 +534,24 @@ options:
   https-service-endpoints:
     type: string
     default: "False"
-    description: Manage SSL certificates for all service endpoints.
+    description: |
+      Manage SSL certificates for all service endpoints. This option
+      should be False when specifying ssl\_\* options.
+      .
+      [DEPRECATED] This option should no longer be used.
+      Provide SSL certificate data through the ssl\_\* options.
+      This option will be removed in a future release.
   use-https:
     type: string
     default: "no"
-    description: Use SSL for Keystone itself. Set to 'yes' to enable it.
+    description: |
+      Use SSL for Keystone itself using a charm-generated CA. Set to
+      'yes' to enable it. This option should be 'no' when specifying ssl\_\*
+      options.
+      .
+      [DEPRECATED] This option should no longer be used.
+      Provide SSL certificate data through the ssl\_\* options.
+      This option will be removed in a future release.
   ssl_cert:
     type: string
     default:
diff --git a/hooks/pgsql-db-relation-changed b/hooks/certificates-relation-changed
similarity index 100%
rename from hooks/pgsql-db-relation-changed
rename to hooks/certificates-relation-changed
diff --git a/hooks/pgsql-db-relation-joined b/hooks/certificates-relation-departed
similarity index 100%
rename from hooks/pgsql-db-relation-joined
rename to hooks/certificates-relation-departed
diff --git a/hooks/certificates-relation-joined b/hooks/certificates-relation-joined
new file mode 120000
index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0
--- /dev/null
+++ b/hooks/certificates-relation-joined
@@ -0,0 +1 @@
+keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/keystone-fid-service-provider-relation-broken b/hooks/keystone-fid-service-provider-relation-broken
new file mode 120000
index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0
--- /dev/null
+++ b/hooks/keystone-fid-service-provider-relation-broken
@@ -0,0 +1 @@
+keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/keystone-fid-service-provider-relation-changed b/hooks/keystone-fid-service-provider-relation-changed
new file mode 120000
index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0
--- /dev/null
+++ b/hooks/keystone-fid-service-provider-relation-changed
@@ -0,0 +1 @@
+keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/keystone-fid-service-provider-relation-departed b/hooks/keystone-fid-service-provider-relation-departed
new file mode 120000
index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0
--- /dev/null
+++ b/hooks/keystone-fid-service-provider-relation-departed
@@ -0,0 +1 @@
+keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/keystone-fid-service-provider-relation-joined b/hooks/keystone-fid-service-provider-relation-joined
new file mode 120000
index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0
--- /dev/null
+++ b/hooks/keystone-fid-service-provider-relation-joined
@@ -0,0 +1 @@
+keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/keystone_context.py b/hooks/keystone_context.py
index 2534315261aace71042b39b06438cd029c808ff0..c194a2b753c222d639a1597e0be99dd344a411d4 100644
--- a/hooks/keystone_context.py
+++ b/hooks/keystone_context.py
@@ -12,7 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import hashlib
 import os
 import shutil
 import tarfile
@@ -28,8 +27,8 @@ from base64 import b64decode
 from charmhelpers.core.host import (
     mkdir,
     write_file,
-    service_restart,
 )
+import json
 
 from charmhelpers.contrib.openstack import context
 
@@ -45,113 +44,14 @@ from charmhelpers.core.hookenv import (
     config,
     log,
     leader_get,
-    DEBUG,
     INFO,
+    related_units,
+    relation_ids,
+    relation_get,
 )
 
-from charmhelpers.core.strutils import (
-    bool_from_string,
-)
-
-from charmhelpers.contrib.hahelpers.apache import install_ca_cert
-
-CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
-
-
-def is_cert_provided_in_config():
-    cert = config('ssl_cert')
-    key = config('ssl_key')
-    return bool(cert and key)
-
-
-class SSLContext(context.ApacheSSLContext):
-
-    def configure_cert(self, cn):
-        from keystone_utils import (
-            SSH_USER,
-            get_ca,
-            ensure_permissions,
-            is_ssl_cert_master,
-            KEYSTONE_USER,
-        )
-
-        # Ensure ssl dir exists whether master or not
-        perms = 0o775
-        mkdir(path=self.ssl_dir, owner=SSH_USER, group=KEYSTONE_USER,
-              perms=perms)
-        # Ensure accessible by keystone ssh user and group (for sync)
-        ensure_permissions(self.ssl_dir, user=SSH_USER, group=KEYSTONE_USER,
-                           perms=perms)
-
-        if not is_cert_provided_in_config() and not is_ssl_cert_master():
-            log("Not ssl-cert-master - skipping apache cert config until "
-                "master is elected", level=INFO)
-            return
-
-        log("Creating apache ssl certs in %s" % (self.ssl_dir), level=INFO)
-
-        cert = config('ssl_cert')
-        key = config('ssl_key')
-
-        if not (cert and key):
-            ca = get_ca(user=SSH_USER)
-            cert, key = ca.get_cert_and_key(common_name=cn)
-        else:
-            cert = b64decode(cert)
-            key = b64decode(key)
-
-        write_file(path=os.path.join(self.ssl_dir, 'cert_{}'.format(cn)),
-                   content=cert, owner=SSH_USER, group=KEYSTONE_USER,
-                   perms=0o644)
-        write_file(path=os.path.join(self.ssl_dir, 'key_{}'.format(cn)),
-                   content=key, owner=SSH_USER, group=KEYSTONE_USER,
-                   perms=0o644)
-
-    def configure_ca(self):
-        from keystone_utils import (
-            SSH_USER,
-            get_ca,
-            ensure_permissions,
-            is_ssl_cert_master,
-            KEYSTONE_USER,
-        )
-
-        if not is_cert_provided_in_config() and not is_ssl_cert_master():
-            log("Not ssl-cert-master - skipping apache ca config until "
-                "master is elected", level=INFO)
-            return
-
-        cert = config('ssl_cert')
-        key = config('ssl_key')
-
-        ca_cert = config('ssl_ca')
-        if ca_cert:
-            ca_cert = b64decode(ca_cert)
-        elif not (cert and key):
-            # NOTE(hopem): if a cert and key are provided as config we don't
-            # mandate that a CA is also provided since it isn't necessarily
-            # needed. As a result we only generate a custom CA if we are also
-            # generating cert and key.
-            ca = get_ca(user=SSH_USER)
-            ca_cert = ca.get_ca_bundle()
-
-        if ca_cert:
-            # Ensure accessible by keystone ssh user and group (unison)
-            install_ca_cert(ca_cert)
-            ensure_permissions(CA_CERT_PATH, user=SSH_USER,
-                               group=KEYSTONE_USER, perms=0o0644)
-
-    def canonical_names(self):
-        addresses = self.get_network_addresses()
-        addrs = []
-        for address, endpoint in addresses:
-            addrs.append(endpoint)
-
-        return list(set(addrs))
-
-
-class ApacheSSLContext(SSLContext):
 
+class ApacheSSLContext(context.ApacheSSLContext):
     interfaces = ['https']
     external_ports = []
     service_namespace = 'keystone'
@@ -161,31 +61,13 @@ class ApacheSSLContext(SSLContext):
         # late import to work around circular dependency
         from keystone_utils import (
             determine_ports,
-            update_hash_from_path,
         )
 
-        ssl_paths = [CA_CERT_PATH, self.ssl_dir]
-
         self.external_ports = determine_ports()
-        before = hashlib.sha256()
-        for path in ssl_paths:
-            update_hash_from_path(before, path)
-
-        ret = super(ApacheSSLContext, self).__call__()
+        return super(ApacheSSLContext, self).__call__()
 
-        after = hashlib.sha256()
-        for path in ssl_paths:
-            update_hash_from_path(after, path)
-
-        # Ensure that apache2 is restarted if these change
-        if before.hexdigest() != after.hexdigest():
-            service_restart('apache2')
-
-        return ret
-
-
-class NginxSSLContext(SSLContext):
 
+class NginxSSLContext(context.ApacheSSLContext):
     interfaces = ['https']
     external_ports = []
     service_namespace = 'keystone'
@@ -196,30 +78,14 @@ class NginxSSLContext(SSLContext):
         # late import to work around circular dependency
         from keystone_utils import (
             determine_ports,
-            update_hash_from_path,
-            APACHE_SSL_DIR
         )
 
-        ssl_paths = [CA_CERT_PATH, APACHE_SSL_DIR]
-
         self.external_ports = determine_ports()
-        before = hashlib.sha256()
-        for path in ssl_paths:
-            update_hash_from_path(before, path)
-
         ret = super(NginxSSLContext, self).__call__()
         if not ret:
             log("SSL not used", level='DEBUG')
             return {}
 
-        after = hashlib.sha256()
-        for path in ssl_paths:
-            update_hash_from_path(after, path)
-
-        # Ensure that Nginx is restarted if these change
-        if before.hexdigest() != after.hexdigest():
-            service_restart('snap.keystone.nginx')
-
         # Transform for use by Nginx
         """
         {'endpoints': [(u'10.5.0.30', u'10.5.0.30', 4990, 4980),
@@ -298,12 +164,12 @@ class KeystoneContext(context.OSContextGenerator):
     def __call__(self):
         from keystone_utils import (
             api_port, set_admin_token, endpoint_url, resolve_address,
-            PUBLIC, ADMIN, PKI_CERTS_DIR, ensure_pki_cert_paths, ADMIN_DOMAIN,
-            snap_install_requested,
+            PUBLIC, ADMIN, ADMIN_DOMAIN,
+            snap_install_requested, get_api_version,
         )
         ctxt = {}
         ctxt['token'] = set_admin_token(config('admin-token'))
-        ctxt['api_version'] = int(config('preferred-api-version'))
+        ctxt['api_version'] = get_api_version()
         ctxt['admin_role'] = config('admin-role')
         if ctxt['api_version'] > 2:
             ctxt['service_tenant_id'] = \
@@ -335,25 +201,6 @@ class KeystoneContext(context.OSContextGenerator):
                 flags = context.config_flags_parser(ldap_flags)
                 ctxt['ldap_config_flags'] = flags
 
-        enable_pki = config('enable-pki')
-        if enable_pki and bool_from_string(enable_pki):
-            log("Enabling PKI", level=DEBUG)
-            ctxt['token_provider'] = 'pki'
-
-            # NOTE(jamespage): Only check PKI configuration if the PKI
-            #                  token format is in use, which has been
-            #                  removed as of OpenStack Ocata.
-            ensure_pki_cert_paths()
-            certs = os.path.join(PKI_CERTS_DIR, 'certs')
-            privates = os.path.join(PKI_CERTS_DIR, 'privates')
-            ctxt['enable_signing'] = True
-            ctxt.update({'certfile': os.path.join(certs, 'signing_cert.pem'),
-                         'keyfile': os.path.join(privates, 'signing_key.pem'),
-                         'ca_certs': os.path.join(certs, 'ca.pem'),
-                         'ca_key': os.path.join(certs, 'ca_key.pem')})
-        else:
-            ctxt['enable_signing'] = False
-
         # Base endpoint URL's which are used in keystone responses
         # to unauthenticated requests to redirect clients to the
         # correct auth URL.
@@ -586,3 +433,46 @@ class IdpFetchContext(context.OSContextGenerator):
                                               get_api_suffix())
         }
         return ctxt
+
+
+class KeystoneFIDServiceProviderContext(context.OSContextGenerator):
+    interfaces = ['keystone-fid-service-provider']
+
+    def __call__(self):
+        fid_sp_keys = ['protocol-name', 'remote-id-attribute']
+        fid_sps = []
+        for rid in relation_ids("keystone-fid-service-provider"):
+            for unit in related_units(rid):
+                rdata = relation_get(unit=unit, rid=rid)
+                if set(rdata).issuperset(set(fid_sp_keys)):
+                    fid_sps.append({
+                        k: json.loads(v) for k, v in rdata.items()
+                        if k in fid_sp_keys
+                    })
+        # populate the context with data from one or more
+        # service providers
+        ctxt = ({'fid_sps': fid_sps}
+                if fid_sps else {})
+        return ctxt
+
+
+class WebSSOTrustedDashboardContext(context.OSContextGenerator):
+    interfaces = ['websso-trusted-dashboard']
+
+    def __call__(self):
+        trusted_dashboard_keys = ['scheme', 'hostname', 'path']
+        trusted_dashboards = set()
+        for rid in relation_ids("websso-trusted-dashboard"):
+            for unit in related_units(rid):
+                rdata = relation_get(unit=unit, rid=rid)
+                if set(rdata).issuperset(set(trusted_dashboard_keys)):
+                    scheme = rdata.get('scheme')
+                    hostname = rdata.get('hostname')
+                    path = rdata.get('path')
+                    url = '{}{}{}'.format(scheme, hostname, path)
+                    trusted_dashboards.add(url)
+        # populate the context with data from one or more
+        # service providers
+        ctxt = ({'trusted_dashboards': trusted_dashboards}
+                if trusted_dashboards else {})
+        return ctxt
diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py
index ed3c9effee40782fb791d81e2692d8702b242ce2..e24aec2d3913c75d65fa0c2df9431f4c0c287a7d 100755
--- a/hooks/keystone_hooks.py
+++ b/hooks/keystone_hooks.py
@@ -16,21 +16,18 @@
 
 import hashlib
 import json
-import os
 import sys
+import os
 
 from subprocess import check_call
 
-from charmhelpers.contrib import unison
 from charmhelpers.core import unitdata
 
 from charmhelpers.core.hookenv import (
     Hooks,
     UnregisteredHookError,
     config,
-    is_relation_made,
     log,
-    local_unit,
     DEBUG,
     INFO,
     WARNING,
@@ -42,29 +39,23 @@ from charmhelpers.core.hookenv import (
     status_set,
     open_port,
     is_leader,
+    relation_id,
 )
 
 from charmhelpers.core.host import (
-    mkdir,
     service_pause,
     service_stop,
     service_start,
     service_restart,
 )
 
-from charmhelpers.core.strutils import (
-    bool_from_string,
-)
-
 from charmhelpers.fetch import (
     apt_install, apt_update,
     filter_installed_packages
 )
 
 from charmhelpers.contrib.openstack.utils import (
-    config_value_changed,
     configure_installation_source,
-    git_install_requested,
     openstack_upgrade_available,
     sync_db_with_multi_ipv6_addresses,
     os_release,
@@ -85,36 +76,22 @@ from keystone_utils import (
     do_openstack_upgrade_reexec,
     ensure_initial_admin,
     get_admin_passwd,
-    git_install,
     migrate_database,
     save_script_rc,
     post_snap_install,
-    synchronize_ca_if_changed,
     register_configs,
     restart_map,
     services,
     CLUSTER_RES,
     KEYSTONE_CONF,
-    KEYSTONE_USER,
     POLICY_JSON,
     TOKEN_FLUSH_CRON_FILE,
-    SSH_USER,
     setup_ipv6,
     send_notifications,
-    check_peer_actions,
-    get_ssl_sync_request_units,
-    is_ssl_cert_master,
     is_db_ready,
-    clear_ssl_synced_units,
     is_db_initialised,
-    update_certs_if_available,
-    ensure_ssl_dir,
-    ensure_pki_dir_permissions,
-    ensure_permissions,
-    force_ssl_sync,
     filter_null,
-    ensure_ssl_dirs,
-    ensure_pki_cert_paths,
+    ensure_permissions,
     is_service_present,
     delete_service_entry,
     assess_status,
@@ -132,13 +109,12 @@ from keystone_utils import (
     ADMIN_DOMAIN,
     ADMIN_PROJECT,
     create_or_show_domain,
-    keystone_service,
+    restart_keystone,
 )
 
 from charmhelpers.contrib.hahelpers.cluster import (
     is_elected_leader,
     get_hacluster_config,
-    peer_units,
     https,
     is_clustered,
 )
@@ -152,12 +128,12 @@ from charmhelpers.payload.execd import execd_preinstall
 from charmhelpers.contrib.peerstorage import (
     peer_retrieve_by_prefix,
     peer_echo,
-    relation_get as relation_get_and_migrate,
 )
 from charmhelpers.contrib.openstack.ip import (
     ADMIN,
     resolve_address,
 )
+
 from charmhelpers.contrib.network.ip import (
     get_iface_for_address,
     get_netmask_for_address,
@@ -170,6 +146,11 @@ from charmhelpers.contrib.charmsupport import nrpe
 
 from charmhelpers.contrib.hardening.harden import harden
 
+from charmhelpers.contrib.openstack.cert_utils import (
+    get_certificate_request,
+    process_certificates,
+)
+
 hooks = Hooks()
 CONFIGS = register_configs()
 
@@ -201,20 +182,12 @@ def install():
         service_start('haproxy')
         if run_in_apache():
             disable_unused_apache_sites()
-            if not git_install_requested():
-                service_pause('keystone')
+            service_pause('keystone')
         install_apache_error_handler(config('no-user-mapping-url'))
 
-    status_set('maintenance', 'Git install')
-    git_install(config('openstack-origin-git'))
-
-    unison.ensure_user(user=SSH_USER, group=SSH_USER)
-    unison.ensure_user(user=SSH_USER, group=KEYSTONE_USER)
-
 
 @hooks.hook('config-changed')
 @restart_on_change(restart_map(), restart_functions=restart_function_map())
-@synchronize_ca_if_changed(fatal=True)
 @harden()
 def config_changed():
     if config('prefer-ipv6'):
@@ -223,40 +196,21 @@ def config_changed():
         sync_db_with_multi_ipv6_addresses(config('database'),
                                           config('database-user'))
 
-    unison.ensure_user(user=SSH_USER, group=SSH_USER)
-    unison.ensure_user(user=SSH_USER, group=KEYSTONE_USER)
-    homedir = unison.get_homedir(SSH_USER)
-    if not os.path.isdir(homedir):
-        mkdir(homedir, SSH_USER, SSH_USER, 0o775)
-
-    if git_install_requested():
-        if config_value_changed('openstack-origin-git'):
-            status_set('maintenance', 'Running Git install')
-            git_install(config('openstack-origin-git'))
-    elif not config('action-managed-upgrade'):
+    if not config('action-managed-upgrade'):
         if openstack_upgrade_available('keystone'):
             status_set('maintenance', 'Running openstack upgrade')
             do_openstack_upgrade_reexec(configs=CONFIGS)
 
     for r_id in relation_ids('cluster'):
-        cluster_joined(rid=r_id, ssl_sync_request=False)
+        cluster_joined(rid=r_id)
 
     config_changed_postupgrade()
 
 
 @hooks.hook('config-changed-postupgrade')
 @restart_on_change(restart_map(), restart_functions=restart_function_map())
-@synchronize_ca_if_changed(fatal=True)
 @harden()
 def config_changed_postupgrade():
-    # Ensure ssl dir exists and is unison-accessible
-    ensure_ssl_dir()
-
-    if not snap_install_requested():
-        check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
-
-    ensure_ssl_dirs()
-
     save_script_rc()
     release = os_release('keystone')
     if run_in_apache(release=release):
@@ -265,8 +219,7 @@ def config_changed_postupgrade():
         # decorator can fire
         apt_install(filter_installed_packages(determine_packages()))
         # when deployed from source, init scripts aren't installed
-        if not git_install_requested():
-            service_pause('keystone')
+        service_pause('keystone')
 
         disable_unused_apache_sites()
         if WSGI_KEYSTONE_API_CONF in CONFIGS.templates:
@@ -301,65 +254,16 @@ def config_changed_postupgrade():
     if snap_install_requested() and not is_unit_paused_set():
         service_restart('snap.keystone.*')
 
-    initialise_pki()
-
     update_all_identity_relation_units()
     update_all_domain_backends()
-
-    # Ensure sync request is sent out (needed for any/all ssl change)
-    send_ssl_sync_request()
+    update_all_fid_backends()
 
     for r_id in relation_ids('ha'):
         ha_joined(relation_id=r_id)
 
 
-@synchronize_ca_if_changed(fatal=True)
-def initialise_pki():
-    """Create certs and keys required for token signing.
-
-    Used for PKI and signing token revocation list.
-
-    NOTE: keystone.conf [signing] section must be up-to-date prior to
-          executing this.
-    """
-    if CompareOpenStackReleases(os_release('keystone-common')) >= 'pike':
-        # pike dropped support for PKI token; skip function
-        return
-    ensure_pki_cert_paths()
-    if not peer_units() or is_ssl_cert_master():
-        log("Ensuring PKI token certs created", level=DEBUG)
-        if snap_install_requested():
-            cmd = ['/snap/bin/keystone-manage', 'pki_setup',
-                   '--keystone-user', KEYSTONE_USER,
-                   '--keystone-group', KEYSTONE_USER]
-            _log_dir = '/var/snap/keystone/common/log'
-        else:
-            cmd = ['keystone-manage', 'pki_setup',
-                   '--keystone-user', KEYSTONE_USER,
-                   '--keystone-group', KEYSTONE_USER]
-            _log_dir = '/var/log/keystone'
-        check_call(cmd)
-
-        # Ensure logfile has keystone perms since we may have just created it
-        # with root.
-        ensure_permissions(_log_dir, user=KEYSTONE_USER,
-                           group=KEYSTONE_USER, perms=0o744)
-        ensure_permissions('{}/keystone.log'.format(_log_dir),
-                           user=KEYSTONE_USER, group=KEYSTONE_USER,
-                           perms=0o644)
-
-    ensure_pki_dir_permissions()
-
-
 @hooks.hook('shared-db-relation-joined')
 def db_joined():
-    if is_relation_made('pgsql-db'):
-        # error, postgresql is used
-        e = ('Attempting to associate a mysql database when there is already '
-             'associated a postgresql one')
-        log(e, level=ERROR)
-        raise Exception(e)
-
     if config('prefer-ipv6'):
         sync_db_with_multi_ipv6_addresses(config('database'),
                                           config('database-user'))
@@ -378,23 +282,9 @@ def db_joined():
                      hostname=host)
 
 
-@hooks.hook('pgsql-db-relation-joined')
-def pgsql_db_joined():
-    if is_relation_made('shared-db'):
-        # raise error
-        e = ('Attempting to associate a postgresql database when there'
-             ' is already associated a mysql one')
-        log(e, level=ERROR)
-        raise Exception(e)
-
-    relation_set(database=config('database'))
-
-
 def update_all_identity_relation_units(check_db_ready=True):
     if is_unit_paused_set():
         return
-    CONFIGS.write_all()
-    configure_https()
     if check_db_ready and not is_db_ready():
         log('Allowed_units list provided and this unit not present',
             level=INFO)
@@ -421,11 +311,6 @@ def update_all_identity_relation_units(check_db_ready=True):
             identity_credentials_changed(relation_id=rid, remote_unit=unit)
 
 
-@synchronize_ca_if_changed(force=True)
-def update_all_identity_relation_units_force_sync():
-    update_all_identity_relation_units()
-
-
 def update_all_domain_backends():
     """Re-trigger hooks for all domain-backend relations/units"""
     for rid in relation_ids('domain-backend'):
@@ -433,6 +318,17 @@ def update_all_domain_backends():
             domain_backend_changed(relation_id=rid, unit=unit)
 
 
+def update_all_fid_backends():
+    if CompareOpenStackReleases(os_release('keystone-common')) < 'ocata':
+        log('Ignoring keystone-fid-service-provider relation as it is'
+            ' not supported on releases older than Ocata')
+        return
+    """If there are any config changes, e.g. for domain or service port
+    make sure to update those for all relation-level buckets"""
+    for rid in relation_ids('keystone-fid-service-provider'):
+        update_keystone_fid_service_provider(relation_id=rid)
+
+
 def leader_init_db_if_ready(use_current_context=False):
     """ Initialise the keystone db if it is ready and mark it as initialised.
 
@@ -464,7 +360,6 @@ def leader_init_db_if_ready(use_current_context=False):
 
 @hooks.hook('shared-db-relation-changed')
 @restart_on_change(restart_map(), restart_functions=restart_function_map())
-@synchronize_ca_if_changed()
 def db_changed():
     if 'shared-db' not in CONFIGS.complete_contexts():
         log('shared-db relation incomplete. Peer not ready?')
@@ -474,28 +369,12 @@ def db_changed():
         if CompareOpenStackReleases(
                 os_release('keystone-common')) >= 'liberty':
             CONFIGS.write(POLICY_JSON)
-
-
-@hooks.hook('pgsql-db-relation-changed')
-@restart_on_change(restart_map(), restart_functions=restart_function_map())
-@synchronize_ca_if_changed()
-def pgsql_db_changed():
-    if 'pgsql-db' not in CONFIGS.complete_contexts():
-        log('pgsql-db relation incomplete. Peer not ready?')
-    else:
-        CONFIGS.write(KEYSTONE_CONF)
-        leader_init_db_if_ready(use_current_context=True)
-        if CompareOpenStackReleases(
-                os_release('keystone-common')) >= 'liberty':
-            CONFIGS.write(POLICY_JSON)
+        update_all_identity_relation_units()
 
 
 @hooks.hook('identity-service-relation-changed')
 @restart_on_change(restart_map(), restart_functions=restart_function_map())
-@synchronize_ca_if_changed()
 def identity_changed(relation_id=None, remote_unit=None):
-    CONFIGS.write_all()
-
     notifications = {}
     if is_elected_leader(CLUSTER_RES):
         if not is_db_ready():
@@ -574,59 +453,8 @@ def identity_credentials_changed(relation_id=None, remote_unit=None):
         log('Deferring identity_credentials_changed() to service leader.')
 
 
-def send_ssl_sync_request():
-    """Set sync request on cluster relation.
-
-    Value set equals number of ssl configs currently enabled so that if they
-    change, we ensure that certs are synced. This setting is consumed by
-    cluster-relation-changed ssl master. We also clear the 'synced' set to
-    guarantee that a sync will occur.
-
-    Note the we do nothing if the setting is already applied.
-    """
-    unit = local_unit().replace('/', '-')
-    # Start with core config (e.g. used for signing revoked token list)
-    ssl_config = 0b1
-
-    use_https = config('use-https')
-    if use_https and bool_from_string(use_https):
-        ssl_config ^= 0b10
-
-    https_service_endpoints = config('https-service-endpoints')
-    if (https_service_endpoints and
-            bool_from_string(https_service_endpoints)):
-        ssl_config ^= 0b100
-
-    enable_pki = config('enable-pki')
-    if enable_pki and bool_from_string(enable_pki):
-        ssl_config ^= 0b1000
-
-    key = 'ssl-sync-required-%s' % (unit)
-    settings = {key: ssl_config}
-
-    prev = 0b0
-    rid = None
-    for rid in relation_ids('cluster'):
-        for unit in related_units(rid):
-            _prev = relation_get(rid=rid, unit=unit, attribute=key) or 0b0
-            if _prev and _prev > prev:
-                prev = bin(_prev)
-
-    if rid and prev ^ ssl_config:
-        if is_leader():
-            clear_ssl_synced_units()
-
-        log("Setting %s=%s" % (key, bin(ssl_config)), level=DEBUG)
-        relation_set(relation_id=rid, relation_settings=settings)
-
-
 @hooks.hook('cluster-relation-joined')
-def cluster_joined(rid=None, ssl_sync_request=True):
-    unison.ssh_authorized_peers(user=SSH_USER,
-                                group=SSH_USER,
-                                peer_interface='cluster',
-                                ensure_local_user=True)
-
+def cluster_joined(rid=None):
     settings = {}
 
     for addr_type in ADDRESS_TYPES:
@@ -640,57 +468,19 @@ def cluster_joined(rid=None, ssl_sync_request=True):
 
     relation_set(relation_id=rid, relation_settings=settings)
 
-    if ssl_sync_request:
-        send_ssl_sync_request()
-
 
 @hooks.hook('cluster-relation-changed')
 @restart_on_change(restart_map(), stopstart=True)
-@update_certs_if_available
 def cluster_changed():
-    unison.ssh_authorized_peers(user=SSH_USER,
-                                group=SSH_USER,
-                                peer_interface='cluster',
-                                ensure_local_user=True)
     # NOTE(jamespage) re-echo passwords for peer storage
-    echo_whitelist = ['_passwd', 'identity-service:',
-                      'db-initialised', 'ssl-cert-available-updates']
-    # Don't echo if leader since a re-election may be in progress.
-    if not is_leader():
-        echo_whitelist.append('ssl-cert-master')
+    echo_whitelist = ['_passwd', 'identity-service:', 'db-initialised']
 
     log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG)
     peer_echo(includes=echo_whitelist, force=True)
 
-    check_peer_actions()
-
-    initialise_pki()
-
-    if is_leader():
-        # Figure out if we need to mandate a sync
-        units = get_ssl_sync_request_units()
-        synced_units = relation_get_and_migrate(attribute='ssl-synced-units',
-                                                unit=local_unit())
-        diff = None
-        if synced_units:
-            synced_units = json.loads(synced_units)
-            diff = set(units).symmetric_difference(set(synced_units))
-    else:
-        units = None
-
-    if units and (not synced_units or diff):
-        log("New peers joined and need syncing - %s" %
-            (', '.join(units)), level=DEBUG)
-        update_all_identity_relation_units_force_sync()
-    else:
-        update_all_identity_relation_units()
+    update_all_identity_relation_units()
 
-    if not is_leader() and is_ssl_cert_master():
-        # Force and sync and trigger a sync master re-election since we are not
-        # leader anymore.
-        force_ssl_sync()
-    else:
-        CONFIGS.write_all()
+    CONFIGS.write_all()
 
 
 @hooks.hook('leader-elected')
@@ -705,8 +495,6 @@ def leader_elected():
 
     update_all_identity_relation_units()
 
-    update_all_identity_relation_units()
-
 
 @hooks.hook('leader-settings-changed')
 @restart_on_change(restart_map(), stopstart=True)
@@ -791,7 +579,6 @@ def ha_joined(relation_id=None):
 
 @hooks.hook('ha-relation-changed')
 @restart_on_change(restart_map(), restart_functions=restart_function_map())
-@synchronize_ca_if_changed()
 def ha_changed():
     CONFIGS.write_all()
 
@@ -799,10 +586,7 @@ def ha_changed():
     if clustered:
         log('Cluster configured, notifying other services and updating '
             'keystone endpoint configuration')
-        if is_ssl_cert_master():
-            update_all_identity_relation_units_force_sync()
-        else:
-            update_all_identity_relation_units()
+        update_all_identity_relation_units()
 
 
 @hooks.hook('identity-admin-relation-changed')
@@ -855,16 +639,11 @@ def domain_backend_changed(relation_id=None, unit=None):
         domain_nonce_key = 'domain-restart-nonce-{}'.format(domain_name)
         db = unitdata.kv()
         if restart_nonce != db.get(domain_nonce_key):
-            if not is_unit_paused_set():
-                if snap_install_requested():
-                    service_restart('snap.keystone.*')
-                else:
-                    service_restart(keystone_service())
+            restart_keystone()
             db.set(domain_nonce_key, restart_nonce)
             db.flush()
 
 
-@synchronize_ca_if_changed(fatal=True)
 def configure_https():
     '''
     Enables SSL API Apache config if appropriate and kicks identity-service
@@ -1045,17 +824,10 @@ def configure_oidc():
 
 @hooks.hook('upgrade-charm')
 @restart_on_change(restart_map(), stopstart=True)
-@synchronize_ca_if_changed()
 @harden()
 def upgrade_charm():
     status_set('maintenance', 'Installing apt packages')
     apt_install(filter_installed_packages(determine_packages()))
-    unison.ssh_authorized_peers(user=SSH_USER,
-                                group=SSH_USER,
-                                peer_interface='cluster',
-                                ensure_local_user=True)
-
-    ensure_ssl_dirs()
 
     if run_in_apache():
         disable_unused_apache_sites()
@@ -1098,6 +870,102 @@ def update_nrpe_config():
     nrpe_setup.write()
 
 
+@hooks.hook('keystone-fid-service-provider-relation-joined',
+            'keystone-fid-service-provider-relation-changed')
+def keystone_fid_service_provider_changed():
+    if get_api_version() < 3:
+        log('Identity federation is only supported with keystone v3')
+        return
+    if CompareOpenStackReleases(os_release('keystone-common')) < 'ocata':
+        log('Ignoring keystone-fid-service-provider relation as it is'
+            ' not supported on releases older than Ocata')
+        return
+    # for the join case a keystone public-facing hostname and service
+    # port need to be set
+    update_keystone_fid_service_provider(relation_id=relation_id())
+
+    # handle relation data updates (if any), e.g. remote_id_attribute
+    # and a restart will be handled via a nonce, not restart_on_change
+    CONFIGS.write(KEYSTONE_CONF)
+
+    # The relation is container-scoped so this keystone unit's unitdata
+    # will only contain a nonce of a single fid subordinate for a given
+    # fid backend (relation id)
+    restart_nonce = relation_get('restart-nonce')
+    if restart_nonce:
+        nonce = json.loads(restart_nonce)
+        # multiplex by relation id for multiple federated identity
+        # provider charms
+        fid_nonce_key = 'fid-restart-nonce-{}'.format(relation_id())
+        db = unitdata.kv()
+        if restart_nonce != db.get(fid_nonce_key):
+            restart_keystone()
+            db.set(fid_nonce_key, nonce)
+            db.flush()
+
+
+@hooks.hook('keystone-fid-service-provider-relation-broken')
+def keystone_fid_service_provider_broken():
+    if CompareOpenStackReleases(os_release('keystone-common')) < 'ocata':
+        log('Ignoring keystone-fid-service-provider relation as it is'
+            ' not supported on releases older than Ocata')
+        return
+
+    restart_keystone()
+
+
+@hooks.hook('websso-trusted-dashboard-relation-joined',
+            'websso-trusted-dashboard-relation-changed',
+            'websso-trusted-dashboard-relation-broken')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+def websso_trusted_dashboard_changed():
+    if get_api_version() < 3:
+        log('WebSSO is only supported with keystone v3')
+        return
+    if CompareOpenStackReleases(os_release('keystone-common')) < 'ocata':
+        log('Ignoring WebSSO relation as it is not supported on'
+            ' releases older than Ocata')
+        return
+    CONFIGS.write(KEYSTONE_CONF)
+
+
+def update_keystone_fid_service_provider(relation_id=None):
+    tls_enabled = (config('ssl_cert') is not None and
+                   config('ssl_key') is not None)
+    # reactive endpoints implementation on the other side, hence
+    # json-encoded values
+    fid_settings = {
+        'hostname': json.dumps(config('os-public-hostname')),
+        'port': json.dumps(config('service-port')),
+        'tls-enabled': json.dumps(tls_enabled),
+    }
+
+    relation_set(relation_id=relation_id,
+                 relation_settings=fid_settings)
+
+
+@hooks.hook('certificates-relation-joined')
+def certs_joined(relation_id=None):
+    relation_set(
+        relation_id=relation_id,
+        relation_settings=get_certificate_request())
+
+
+@hooks.hook('certificates-relation-changed')
+@restart_on_change(restart_map(), stopstart=True)
+def certs_changed(relation_id=None, unit=None):
+    # update_all_identity_relation_units calls the keystone API
+    # so configs need to be written and services restarted
+    # before
+    @restart_on_change(restart_map(), stopstart=True)
+    def write_certs_and_config():
+        process_certificates('keystone', relation_id, unit)
+        configure_https()
+    write_certs_and_config()
+    update_all_identity_relation_units()
+    update_all_domain_backends()
+
+
 def main():
     try:
         hooks.execute(sys.argv)
diff --git a/hooks/keystone_ssl.py b/hooks/keystone_ssl.py
deleted file mode 100644
index 43f96588c44a6d1cb6418cbd7ca5655e15a36b88..0000000000000000000000000000000000000000
--- a/hooks/keystone_ssl.py
+++ /dev/null
@@ -1,358 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import shutil
-import subprocess
-import tarfile
-import tempfile
-
-from charmhelpers.core.hookenv import (
-    log,
-    DEBUG,
-)
-
-CA_EXPIRY = '365'
-ORG_NAME = 'Ubuntu'
-ORG_UNIT = 'Ubuntu Cloud'
-CA_BUNDLE = '/usr/local/share/ca-certificates/juju_ca_cert.crt'
-
-CA_CONFIG = """
-[ ca ]
-default_ca = CA_default
-
-[ CA_default ]
-dir                     = %(ca_dir)s
-policy                  = policy_match
-database                = $dir/index.txt
-serial                  = $dir/serial
-certs                   = $dir/certs
-crl_dir                 = $dir/crl
-new_certs_dir           = $dir/newcerts
-certificate             = $dir/cacert.pem
-private_key             = $dir/private/cacert.key
-RANDFILE                = $dir/private/.rand
-default_md              = default
-
-[ req ]
-default_bits            = 1024
-default_md              = sha1
-
-prompt                  = no
-distinguished_name      = ca_distinguished_name
-
-x509_extensions         = ca_extensions
-
-[ ca_distinguished_name ]
-organizationName        = %(org_name)s
-organizationalUnitName  = %(org_unit_name)s Certificate Authority
-commonName              = %(common_name)s
-
-[ policy_match ]
-countryName             = optional
-stateOrProvinceName     = optional
-organizationName        = match
-organizationalUnitName  = optional
-commonName              = supplied
-
-[ ca_extensions ]
-basicConstraints        = critical,CA:true
-subjectKeyIdentifier    = hash
-authorityKeyIdentifier  = keyid:always, issuer
-keyUsage                = cRLSign, keyCertSign
-"""
-
-SIGNING_CONFIG = """
-[ ca ]
-default_ca = CA_default
-
-[ CA_default ]
-dir                     = %(ca_dir)s
-policy                  = policy_match
-database                = $dir/index.txt
-serial                  = $dir/serial
-certs                   = $dir/certs
-crl_dir                 = $dir/crl
-new_certs_dir           = $dir/newcerts
-certificate             = $dir/cacert.pem
-private_key             = $dir/private/cacert.key
-RANDFILE                = $dir/private/.rand
-default_md              = default
-
-[ req ]
-default_bits            = 1024
-default_md              = sha1
-
-prompt                  = no
-distinguished_name      = req_distinguished_name
-
-x509_extensions         = req_extensions
-
-[ req_distinguished_name ]
-organizationName        = %(org_name)s
-organizationalUnitName  = %(org_unit_name)s Server Farm
-
-[ policy_match ]
-countryName             = optional
-stateOrProvinceName     = optional
-organizationName        = match
-organizationalUnitName  = optional
-commonName              = supplied
-
-[ req_extensions ]
-basicConstraints        = CA:false
-subjectKeyIdentifier    = hash
-authorityKeyIdentifier  = keyid:always, issuer
-keyUsage                = digitalSignature, keyEncipherment, keyAgreement
-extendedKeyUsage        = serverAuth, clientAuth
-"""
-
-# Instance can be appended to this list to represent a singleton
-CA_SINGLETON = []
-
-
-def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT):
-    log('Ensuring certificate authority exists at %s.' % ca_dir, level=DEBUG)
-    if not os.path.exists(ca_dir):
-        log('Initializing new certificate authority at %s' % ca_dir,
-            level=DEBUG)
-        os.mkdir(ca_dir)
-
-    for i in ['certs', 'crl', 'newcerts', 'private']:
-        d = os.path.join(ca_dir, i)
-        if not os.path.exists(d):
-            log('Creating %s.' % d, level=DEBUG)
-            os.mkdir(d)
-    os.chmod(os.path.join(ca_dir, 'private'), 0o710)
-
-    if not os.path.isfile(os.path.join(ca_dir, 'serial')):
-        with open(os.path.join(ca_dir, 'serial'), 'wb') as out:
-            out.write('01\n')
-
-    if not os.path.isfile(os.path.join(ca_dir, 'index.txt')):
-        with open(os.path.join(ca_dir, 'index.txt'), 'wb') as out:
-            out.write('')
-
-    conf = os.path.join(ca_dir, 'ca.cnf')
-    if not os.path.isfile(conf):
-        log('Creating new CA config in %s' % ca_dir, level=DEBUG)
-        with open(conf, 'wb') as out:
-            out.write(CA_CONFIG % locals())
-
-
-def root_ca_crt_key(ca_dir):
-    init = False
-    crt = os.path.join(ca_dir, 'cacert.pem')
-    key = os.path.join(ca_dir, 'private', 'cacert.key')
-    for f in [crt, key]:
-        if not os.path.isfile(f):
-            log('Missing %s, will re-initialize cert+key.' % f, level=DEBUG)
-            init = True
-        else:
-            log('Found %s.' % f, level=DEBUG)
-
-    if init:
-        conf = os.path.join(ca_dir, 'ca.cnf')
-        cmd = ['openssl', 'req', '-config', conf,
-               '-x509', '-nodes', '-newkey', 'rsa', '-days', '21360',
-               '-keyout', key, '-out', crt, '-outform', 'PEM']
-        subprocess.check_call(cmd)
-
-    return crt, key
-
-
-def intermediate_ca_csr_key(ca_dir):
-    log('Creating new intermediate CSR.', level=DEBUG)
-    key = os.path.join(ca_dir, 'private', 'cacert.key')
-    csr = os.path.join(ca_dir, 'cacert.csr')
-    conf = os.path.join(ca_dir, 'ca.cnf')
-    cmd = ['openssl', 'req', '-config', conf, '-sha1', '-newkey', 'rsa',
-           '-nodes', '-keyout', key, '-out', csr, '-outform', 'PEM']
-    subprocess.check_call(cmd)
-    return csr, key
-
-
-def sign_int_csr(ca_dir, csr, common_name):
-    log('Signing certificate request %s.' % csr, level=DEBUG)
-    crt_name = os.path.basename(csr).split('.')[0]
-    crt = os.path.join(ca_dir, 'certs', '%s.crt' % crt_name)
-    subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
-    conf = os.path.join(ca_dir, 'ca.cnf')
-    cmd = ['openssl', 'ca', '-batch', '-config', conf, '-extensions',
-           'ca_extensions', '-days', CA_EXPIRY, '-notext', '-in', csr, '-out',
-           crt, '-subj', subj, '-batch']
-    log("Executing: %s" % ' '.join(cmd), level=DEBUG)
-    subprocess.check_call(cmd)
-    return crt
-
-
-def init_root_ca(ca_dir, common_name):
-    init_ca(ca_dir, common_name)
-    return root_ca_crt_key(ca_dir)
-
-
-def init_intermediate_ca(ca_dir, common_name, root_ca_dir, org_name=ORG_NAME,
-                         org_unit_name=ORG_UNIT):
-    init_ca(ca_dir, common_name)
-    if not os.path.isfile(os.path.join(ca_dir, 'cacert.pem')):
-        csr, key = intermediate_ca_csr_key(ca_dir)
-        crt = sign_int_csr(root_ca_dir, csr, common_name)
-        shutil.copy(crt, os.path.join(ca_dir, 'cacert.pem'))
-    else:
-        log('Intermediate CA certificate already exists.', level=DEBUG)
-
-    conf = os.path.join(ca_dir, 'signing.cnf')
-    if not os.path.isfile(conf):
-        log('Creating new signing config in %s' % ca_dir, level=DEBUG)
-        with open(conf, 'wb') as out:
-            out.write(SIGNING_CONFIG % locals())
-
-
-def create_certificate(ca_dir, service):
-    common_name = service
-    subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
-    csr = os.path.join(ca_dir, 'certs', '%s.csr' % service)
-    key = os.path.join(ca_dir, 'certs', '%s.key' % service)
-    cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa', '-nodes', '-keyout',
-           key, '-out', csr, '-subj', subj]
-    subprocess.check_call(cmd)
-    crt = sign_int_csr(ca_dir, csr, common_name)
-    log('Signed new CSR, crt @ %s' % crt, level=DEBUG)
-    return
-
-
-def update_bundle(bundle_file, new_bundle):
-    return
-    if os.path.isfile(bundle_file):
-        with open(bundle_file, 'r') as f:
-            current = f.read().strip()
-        if new_bundle == current:
-            log('CA Bundle @ %s is up to date.' % bundle_file, level=DEBUG)
-            return
-
-        log('Updating CA bundle @ %s.' % bundle_file, level=DEBUG)
-
-    with open(bundle_file, 'wb') as out:
-        out.write(new_bundle)
-
-    subprocess.check_call(['update-ca-certificates'])
-
-
-def tar_directory(path):
-    cwd = os.getcwd()
-    parent = os.path.dirname(path)
-    directory = os.path.basename(path)
-    tmp = tempfile.TemporaryFile()
-    os.chdir(parent)
-    tarball = tarfile.TarFile(fileobj=tmp, mode='w')
-    tarball.add(directory)
-    tarball.close()
-    tmp.seek(0)
-    out = tmp.read()
-    tmp.close()
-    os.chdir(cwd)
-    return out
-
-
-class JujuCA(object):
-
-    def __init__(self, name, ca_dir, root_ca_dir, user, group):
-        # Root CA
-        cn = '%s Certificate Authority' % name
-        root_crt, root_key = init_root_ca(root_ca_dir, cn)
-        # Intermediate CA
-        cn = '%s Intermediate Certificate Authority' % name
-        init_intermediate_ca(ca_dir, cn, root_ca_dir)
-
-        # Create dirs
-        cmd = ['chown', '-R', '%s.%s' % (user, group), ca_dir]
-        subprocess.check_call(cmd)
-        cmd = ['chown', '-R', '%s.%s' % (user, group), root_ca_dir]
-        subprocess.check_call(cmd)
-
-        self.ca_dir = ca_dir
-        self.root_ca_dir = root_ca_dir
-        self.user = user
-        self.group = group
-        update_bundle(CA_BUNDLE, self.get_ca_bundle())
-
-    def _sign_csr(self, csr, service, common_name):
-        subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
-        crt = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name)
-        conf = os.path.join(self.ca_dir, 'signing.cnf')
-        cmd = ['openssl', 'ca', '-config', conf, '-extensions',
-               'req_extensions', '-days', '365', '-notext', '-in', csr,
-               '-out', crt, '-batch', '-subj', subj]
-        subprocess.check_call(cmd)
-        return crt
-
-    def _create_certificate(self, service, common_name):
-        subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
-        csr = os.path.join(self.ca_dir, 'certs', '%s.csr' % service)
-        key = os.path.join(self.ca_dir, 'certs', '%s.key' % service)
-        cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa', '-nodes',
-               '-keyout', key, '-out', csr, '-subj', subj]
-        subprocess.check_call(cmd)
-        crt = self._sign_csr(csr, service, common_name)
-        cmd = ['chown', '-R', '%s.%s' % (self.user, self.group), self.ca_dir]
-        subprocess.check_call(cmd)
-        log('Signed new CSR, crt @ %s' % crt, level=DEBUG)
-        return crt, key
-
-    def get_key_path(self, cn):
-        return os.path.join(self.ca_dir, 'certs', '%s.key' % cn)
-
-    def get_cert_path(self, cn):
-        return os.path.join(self.ca_dir, 'certs', '%s.crt' % cn)
-
-    def get_cert_and_key(self, common_name):
-        keypath = self.get_key_path(common_name)
-        crtpath = self.get_cert_path(common_name)
-        if not os.path.isfile(crtpath):
-            log("Creating certificate and key for {}.".format(common_name),
-                level=DEBUG)
-            crtpath, keypath = self._create_certificate(common_name,
-                                                        common_name)
-
-        with open(crtpath, 'r') as f:
-            crt = f.read()
-        with open(keypath, 'r') as f:
-            key = f.read()
-        return crt, key
-
-    @property
-    def ca_cert_path(self):
-        return os.path.join(self.ca_dir, 'cacert.pem')
-
-    @property
-    def ca_key_path(self):
-        return os.path.join(self.ca_dir, 'private', 'cacert.key')
-
-    @property
-    def root_ca_cert_path(self):
-        return os.path.join(self.root_ca_dir, 'cacert.pem')
-
-    @property
-    def root_ca_key_path(self):
-        return os.path.join(self.root_ca_dir, 'private', 'cacert.key')
-
-    def get_ca_bundle(self):
-        with open(self.ca_cert_path) as f:
-            int_cert = f.read()
-        with open(self.root_ca_cert_path) as f:
-            root_cert = f.read()
-        # NOTE: ordering of certs in bundle matters!
-        return int_cert + root_cert
diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py
index ba9cf037e4a28e605e1b334677d5956a72dd9dd2..b40b050f328fea9f395cd462346f6d187ea71a75 100644
--- a/hooks/keystone_utils.py
+++ b/hooks/keystone_utils.py
@@ -14,24 +14,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import glob
-import grp
-import hashlib
-import json
 import os
-import pwd
-import re
 import shutil
 import subprocess
-import tarfile
-import threading
 import time
 import urlparse
 import uuid
 import sys
 
 from itertools import chain
-from base64 import b64encode
 from collections import OrderedDict
 from copy import deepcopy
 
@@ -39,7 +30,6 @@ from charmhelpers.contrib.hahelpers.cluster import (
     is_elected_leader,
     determine_api_port,
     https,
-    peer_units,
     get_hacluster_config,
 )
 
@@ -60,18 +50,10 @@ from charmhelpers.contrib.openstack.utils import (
     configure_installation_source,
     error_out,
     get_os_codename_install_source,
-    git_clone_and_install,
-    git_default_repos,
-    git_determine_usr_bin,
-    git_install_requested,
-    git_pip_venv_dir,
-    git_src_dir,
-    git_yaml_value,
     os_release,
     save_script_rc as _save_script_rc,
     pause_unit,
     resume_unit,
-    is_unit_paused_set,
     make_assess_status_func,
     os_application_version_set,
     CompareOpenStackReleases,
@@ -80,24 +62,18 @@ from charmhelpers.contrib.openstack.utils import (
     install_os_snaps,
     get_snaps_install_info_from_origin,
     enable_memcache,
-)
-
-from charmhelpers.contrib.python.packages import (
-    pip_install,
+    is_unit_paused_set,
 )
 
 from charmhelpers.core.strutils import (
     bool_from_string,
 )
 
-import charmhelpers.contrib.unison as unison
-
 from charmhelpers.core.decorators import (
     retry_on_exception,
 )
 
 from charmhelpers.core.hookenv import (
-    charm_dir,
     config,
     leader_get,
     leader_set,
@@ -110,9 +86,6 @@ from charmhelpers.core.hookenv import (
     related_units,
     DEBUG,
     INFO,
-    WARNING,
-    ERROR,
-    is_leader,
 )
 
 from charmhelpers.fetch import (
@@ -123,16 +96,12 @@ from charmhelpers.fetch import (
 )
 
 from charmhelpers.core.host import (
-    adduser,
-    add_group,
-    add_user_to_group,
     mkdir,
+    service_restart,
     service_stop,
     service_start,
-    service_restart,
     pwgen,
     lsb_release,
-    write_file,
     CompareHostReleases,
 )
 
@@ -140,13 +109,11 @@ from charmhelpers.contrib.peerstorage import (
     peer_store_and_set,
     peer_store,
     peer_retrieve,
-    relation_set as relation_set_and_migrate_to_leader,
 )
 
 from charmhelpers.core.templating import render
 
 import keystone_context
-import keystone_ssl as ssl
 
 
 TEMPLATES = 'templates/'
@@ -163,7 +130,6 @@ BASE_PACKAGES = [
     'python-requests',
     'python-six',
     'pwgen',
-    'unison',
     'uuid',
 ]
 
@@ -172,32 +138,11 @@ BASE_PACKAGES_SNAP = [
     'openssl',
     'python-six',
     'pwgen',
-    'unison',
     'uuid',
 ]
 
 VERSION_PACKAGE = 'keystone'
 
-BASE_GIT_PACKAGES = [
-    'libffi-dev',
-    'libmysqlclient-dev',
-    'libssl-dev',
-    'libxml2-dev',
-    'libxslt1-dev',
-    'libyaml-dev',
-    'python-dev',
-    'python-pip',
-    'python-setuptools',
-    'zlib1g-dev',
-]
-
-# ubuntu packages that should not be installed when deploying from git
-GIT_PACKAGE_BLACKLIST = [
-    'keystone',
-]
-
-
-SSH_USER = 'juju_keystone'
 if snap_install_requested():
     SNAP_BASE_DIR = "/snap/keystone/current"
     SNAP_COMMON_DIR = "/var/snap/keystone/common"
@@ -219,17 +164,9 @@ if snap_install_requested():
     STORED_DEFAULT_DOMAIN_ID = ("{}/keystone.default_domain_id"
                                 "".format(SNAP_LIB_DIR))
     SERVICE_PASSWD_PATH = '{}/services.passwd'.format(SNAP_LIB_DIR)
-
-    SSH_USER_HOME = '/home/{}'.format(SSH_USER)
-    SYNC_FLAGS_DIR = '{}/juju_sync_flags/'.format(SSH_USER_HOME)
-    SYNC_DIR = '{}/juju_sync/'.format(SSH_USER_HOME)
-    SSL_SYNC_ARCHIVE = os.path.join(SYNC_DIR, 'juju-ssl-sync.tar')
-    SSL_DIR = '{}/juju_ssl/'.format(SNAP_LIB_DIR)
-    PKI_CERTS_DIR = os.path.join(SSL_DIR, 'pki')
     POLICY_JSON = ('{}/keystone.conf.d/policy.json'
                    ''.format(SNAP_COMMON_KEYSTONE_DIR))
     BASE_SERVICES = ['snap.keystone.uwsgi', 'snap.keystone.nginx']
-    APACHE_SSL_DIR = '{}/keystone'.format(SSL_DIR)
 else:
     APACHE_SSL_DIR = '/etc/apache2/ssl/keystone'
     KEYSTONE_USER = 'keystone'
@@ -243,12 +180,6 @@ else:
     STORED_ADMIN_DOMAIN_ID = "/var/lib/keystone/keystone.admin_domain_id"
     STORED_DEFAULT_DOMAIN_ID = "/var/lib/keystone/keystone.default_domain_id"
     SERVICE_PASSWD_PATH = '/var/lib/keystone/services.passwd'
-
-    SYNC_FLAGS_DIR = '/var/lib/keystone/juju_sync_flags/'
-    SYNC_DIR = '/var/lib/keystone/juju_sync/'
-    SSL_SYNC_ARCHIVE = os.path.join(SYNC_DIR, 'juju-ssl-sync.tar')
-    SSL_DIR = '/var/lib/keystone/juju_ssl/'
-    PKI_CERTS_DIR = os.path.join(SSL_DIR, 'pki')
     POLICY_JSON = '/etc/keystone/policy.json'
     BASE_SERVICES = [
         'keystone',
@@ -281,8 +212,8 @@ KEYSTONE_USER = 'keystone'
 SSL_CA_NAME = 'Ubuntu Cloud'
 CLUSTER_RES = 'grp_ks_vips'
 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
-SSL_SYNC_SEMAPHORE = threading.Semaphore()
 SSL_DIRS = [SSL_DIR, APACHE_SSL_DIR, SHIB_DIR, CA_CERT_PATH]
+CLUSTER_RES = 'grp_ks_vips'
 ADMIN_DOMAIN = 'admin_domain'
 ADMIN_PROJECT = 'admin'
 DEFAULT_DOMAIN = 'default'
@@ -297,12 +228,13 @@ BASE_RESOURCE_MAP = OrderedDict([
         'services': BASE_SERVICES,
         'contexts': [keystone_context.KeystoneContext(),
                      context.SharedDBContext(ssl_dir=KEYSTONE_CONF_DIR),
-                     context.PostgresqlDBContext(),
                      context.SyslogContext(),
                      keystone_context.HAProxyContext(),
                      context.BindHostContext(),
                      context.WorkerConfigContext(),
-                     context.MemcacheContext(package='keystone')],
+                     context.MemcacheContext(package='keystone'),
+                     keystone_context.KeystoneFIDServiceProviderContext(),
+                     keystone_context.WebSSOTrustedDashboardContext()],
     }),
     (KEYSTONE_LOGGER_CONF, {
         'contexts': [keystone_context.KeystoneLoggingContext()],
@@ -318,7 +250,6 @@ BASE_RESOURCE_MAP = OrderedDict([
         'contexts': [keystone_context.KeystoneContext(),
                      keystone_context.NginxSSLContext(),
                      context.SharedDBContext(ssl_dir=KEYSTONE_CONF_DIR),
-                     context.PostgresqlDBContext(),
                      context.SyslogContext(),
                      keystone_context.HAProxyContext(),
                      context.BindHostContext(),
@@ -328,7 +259,6 @@ BASE_RESOURCE_MAP = OrderedDict([
         'services': BASE_SERVICES,
         'contexts': [keystone_context.KeystoneContext(),
                      context.SharedDBContext(ssl_dir=KEYSTONE_CONF_DIR),
-                     context.PostgresqlDBContext(),
                      context.SyslogContext(),
                      keystone_context.HAProxyContext(),
                      keystone_context.NginxSSLContext(),
@@ -500,7 +430,7 @@ valid_services = {
 # The interface is said to be satisfied if anyone of the interfaces in the
 # list has a complete context.
 REQUIRED_INTERFACES = {
-    'database': ['shared-db', 'pgsql-db'],
+    'database': ['shared-db'],
 }
 
 
@@ -568,17 +498,17 @@ def resource_map():
                     svcs.remove('keystone')
                 if 'apache2' not in svcs:
                     svcs.append('apache2')
-            admin_script = os.path.join(git_determine_usr_bin(),
-                                        "keystone-wsgi-admin")
-            public_script = os.path.join(git_determine_usr_bin(),
-                                         "keystone-wsgi-public")
+
             # use single copy:
             keystoneContext = keystone_context.KeystoneContext()
             samlContext = keystone_context.SamlContext()
 
             resource_map[WSGI_KEYSTONE_API_CONF] = {
                 'contexts': [
-                    context.WSGIWorkerConfigContext(name="keystone", admin_script=admin_script, public_script=public_script),  # nopep8
+                    context.WSGIWorkerConfigContext(
+                        name="keystone",
+                        admin_script='/usr/bin/keystone-wsgi-admin',
+                        public_script='/usr/bin/keystone-wsgi-public'),
                     keystoneContext,
                     samlContext,
                     keystone_context.OidcContext()],
@@ -717,9 +647,6 @@ def determine_packages():
         return sorted(pkgs)
     else:
         packages = set(services()).union(BASE_PACKAGES)
-        if git_install_requested():
-            packages |= set(BASE_GIT_PACKAGES)
-            packages -= set(GIT_PACKAGE_BLACKLIST)
         if run_in_apache():
             packages.add('libapache2-mod-wsgi')
             if config('enable-oidc'):
@@ -1208,7 +1135,18 @@ def set_admin_passwd(passwd, user=None):
 
 def get_api_version():
     api_version = config('preferred-api-version')
-    if api_version not in [2, 3]:
+    cmp_release = CompareOpenStackReleases(
+        get_os_codename_install_source(config('openstack-origin'))
+    )
+    if not api_version:
+        # NOTE(jamespage): Queens dropped support for v2, so default
+        #                  to v3.
+        if cmp_release >= 'queens':
+            api_version = 3
+        else:
+            api_version = 2
+    if ((cmp_release < 'queens' and api_version not in [2, 3]) or
+            (cmp_release >= 'queens' and api_version != 3)):
         raise ValueError('Bad preferred-api-version')
     return api_version
 
@@ -1401,16 +1339,6 @@ def is_password_changed(username, passwd):
     return (_passwd is None or passwd != _passwd)
 
 
-def ensure_ssl_dirs():
-    """Ensure unison has access to these dirs."""
-    for path in [SYNC_FLAGS_DIR, SYNC_DIR]:
-        if not os.path.isdir(path):
-            mkdir(path, SSH_USER, KEYSTONE_USER, 0o775)
-        else:
-            ensure_permissions(path, user=SSH_USER, group=KEYSTONE_USER,
-                               perms=0o775)
-
-
 def ensure_permissions(path, user=None, group=None, perms=None, recurse=False,
                        maxdepth=50):
     """Set chownand chmod for path
@@ -1443,550 +1371,6 @@ def ensure_permissions(path, user=None, group=None, perms=None, recurse=False,
                                recurse=recurse, maxdepth=maxdepth - 1)
 
 
-def check_peer_actions():
-    """Honour service action requests from sync master.
-
-    Check for service action request flags, perform the action then delete the
-    flag.
-    """
-    restart = relation_get(attribute='restart-services-trigger')
-    if restart and os.path.isdir(SYNC_FLAGS_DIR):
-        for flagfile in glob.glob(os.path.join(SYNC_FLAGS_DIR, '*')):
-            flag = os.path.basename(flagfile)
-            key = re.compile("^(.+)?\.(.+)?\.(.+)")
-            res = re.search(key, flag)
-            if res:
-                source = res.group(1)
-                service = res.group(2)
-                action = res.group(3)
-            else:
-                key = re.compile("^(.+)?\.(.+)?")
-                res = re.search(key, flag)
-                source = res.group(1)
-                action = res.group(2)
-
-            # Don't execute actions requested by this unit.
-            if local_unit().replace('.', '-') != source:
-                if action == 'restart':
-                    log("Running action='%s' on service '%s'" %
-                        (action, service), level=DEBUG)
-                    service_restart(service)
-                elif action == 'start':
-                    log("Running action='%s' on service '%s'" %
-                        (action, service), level=DEBUG)
-                    service_start(service)
-                elif action == 'stop':
-                    log("Running action='%s' on service '%s'" %
-                        (action, service), level=DEBUG)
-                    service_stop(service)
-                elif action == 'update-ca-certificates':
-                    log("Running %s" % (action), level=DEBUG)
-                    subprocess.check_call(['update-ca-certificates'])
-                elif action == 'ensure-pki-permissions':
-                    log("Running %s" % (action), level=DEBUG)
-                    ensure_pki_dir_permissions()
-                else:
-                    log("Unknown action flag=%s" % (flag), level=WARNING)
-
-            try:
-                os.remove(flagfile)
-            except:
-                pass
-
-
-def create_peer_service_actions(action, services):
-    """Mark remote services for action.
-
-    Default action is restart. These action will be picked up by peer units
-    e.g. we may need to restart services on peer units after certs have been
-    synced.
-    """
-    for service in services:
-        flagfile = os.path.join(SYNC_FLAGS_DIR, '%s.%s.%s' %
-                                (local_unit().replace('/', '-'),
-                                 service.strip(), action))
-        log("Creating action %s" % (flagfile), level=DEBUG)
-        write_file(flagfile, content='', owner=SSH_USER, group=KEYSTONE_USER,
-                   perms=0o744)
-
-
-def create_peer_actions(actions):
-    for action in actions:
-        action = "%s.%s" % (local_unit().replace('/', '-'), action)
-        flagfile = os.path.join(SYNC_FLAGS_DIR, action)
-        log("Creating action %s" % (flagfile), level=DEBUG)
-        write_file(flagfile, content='', owner=SSH_USER, group=KEYSTONE_USER,
-                   perms=0o744)
-
-
-@retry_on_exception(3, base_delay=2, exc_type=subprocess.CalledProcessError)
-def unison_sync(paths_to_sync):
-    """Do unison sync and retry a few times if it fails since peers may not be
-    ready for sync.
-
-    Returns list of synced units or None if one or more peers was not synced.
-    """
-    log('Synchronizing CA (%s) to all peers.' % (', '.join(paths_to_sync)),
-        level=INFO)
-    keystone_gid = grp.getgrnam(KEYSTONE_USER).gr_gid
-
-    # NOTE(dosaboy): This will sync to all peers who have already provided
-    # their ssh keys. If any existing peers have not provided their keys yet,
-    # they will be silently ignored.
-    unison.sync_to_peers(peer_interface='cluster', paths=paths_to_sync,
-                         user=SSH_USER, verbose=True, gid=keystone_gid,
-                         fatal=True)
-
-    synced_units = peer_units()
-    if len(unison.collect_authed_hosts('cluster')) != len(synced_units):
-        log("Not all peer units synced due to missing public keys", level=INFO)
-        return None
-    else:
-        return synced_units
-
-
-def get_ssl_sync_request_units():
-    """Get list of units that have requested to be synced.
-
-    NOTE: this must be called from cluster relation context.
-    """
-    units = []
-    for unit in related_units():
-        settings = relation_get(unit=unit) or {}
-        rkeys = settings.keys()
-        key = re.compile("^ssl-sync-required-(.+)")
-        for rkey in rkeys:
-            res = re.search(key, rkey)
-            if res:
-                units.append(res.group(1))
-
-    return units
-
-
-def is_ssl_cert_master(votes=None):
-    """Return True if this unit is ssl cert master."""
-
-    votes = votes or get_ssl_cert_master_votes()
-    set_votes = set(votes)
-    # Discard unknown votes
-    if 'unknown' in set_votes:
-        set_votes.remove('unknown')
-
-    # This is the elected ssl-cert-master leader
-    if len(set_votes) == 1 and set_votes == set([local_unit()]):
-        log("This unit is the elected ssl-cert-master "
-            "{}".format(votes), level=DEBUG)
-        return True
-
-    # Contested election
-    if len(set_votes) > 1:
-        log("Did not get consensus from peers on who is ssl-cert-master "
-            "{}".format(votes), level=DEBUG)
-        return False
-
-    # Neither the elected ssl-cert-master leader nor the juju leader
-    if not is_leader():
-        return False
-    # Only the juju elected leader continues
-
-    # Singleton
-    if not peer_units():
-        log("This unit is a singleton and thefore ssl-cert-master",
-            level=DEBUG)
-        return True
-
-    # Early in the process and juju leader
-    if not set_votes:
-        log("This unit is the juju leader and there are no votes yet, "
-            "becoming the ssl-cert-master",
-            level=DEBUG)
-        return True
-    elif (len(set_votes) == 1 and set_votes != set([local_unit()]) and
-            is_leader()):
-        log("This unit is the juju leader but not yet ssl-cert-master "
-            "(current votes = {})".format(set_votes), level=DEBUG)
-        return False
-
-    # Should never reach here
-    log("Could not determine the ssl-cert-master. Missing edge case. "
-        "(current votes = {})".format(set_votes),
-        level=ERROR)
-    return False
-
-
-def get_ssl_cert_master_votes():
-    """Returns a list of unique votes."""
-    votes = []
-    # Gather election results from peers. These will need to be consistent.
-    for rid in relation_ids('cluster'):
-        for unit in related_units(rid):
-            m = relation_get(rid=rid, unit=unit,
-                             attribute='ssl-cert-master')
-            if m is not None:
-                votes.append(m)
-
-    return list(set(votes))
-
-
-def ensure_ssl_cert_master():
-    """Ensure that an ssl cert master has been elected.
-
-    Normally the cluster leader will take control but we allow for this to be
-    ignored since this could be called before the cluster is ready.
-    """
-    master_override = False
-    elect = is_elected_leader(CLUSTER_RES)
-
-    # If no peers we allow this unit to elect itsef as master and do
-    # sync immediately.
-    if not peer_units():
-        elect = True
-        master_override = True
-
-    if elect:
-        votes = get_ssl_cert_master_votes()
-        # We expect all peers to echo this setting
-        if not votes or 'unknown' in votes:
-            log("Notifying peers this unit is ssl-cert-master", level=INFO)
-            for rid in relation_ids('cluster'):
-                settings = {'ssl-cert-master': local_unit()}
-                relation_set(relation_id=rid, relation_settings=settings)
-
-            # Return now and wait for cluster-relation-changed (peer_echo) for
-            # sync.
-            return master_override
-        elif not is_ssl_cert_master(votes):
-            if not master_override:
-                log("Conscensus not reached - current master will need to "
-                    "release", level=INFO)
-
-            return master_override
-
-    if not is_ssl_cert_master():
-        log("Not ssl cert master - skipping sync", level=INFO)
-        return False
-
-    return True
-
-
-def stage_paths_for_sync(paths):
-    shutil.rmtree(SYNC_DIR)
-    ensure_ssl_dirs()
-    with tarfile.open(SSL_SYNC_ARCHIVE, 'w') as fd:
-        for path in paths:
-            if os.path.exists(path):
-                log("Adding path '%s' sync tarball" % (path), level=DEBUG)
-                fd.add(path)
-            else:
-                log("Path '%s' does not exist - not adding to sync "
-                    "tarball" % (path), level=INFO)
-
-    ensure_permissions(SYNC_DIR, user=SSH_USER, group=KEYSTONE_USER,
-                       perms=0o775, recurse=True)
-
-
-def is_pki_enabled():
-    enable_pki = config('enable-pki')
-    if enable_pki and bool_from_string(enable_pki):
-        return True
-
-    return False
-
-
-def ensure_pki_cert_paths():
-    certs = os.path.join(PKI_CERTS_DIR, 'certs')
-    privates = os.path.join(PKI_CERTS_DIR, 'privates')
-    not_exists = [p for p in [PKI_CERTS_DIR, certs, privates]
-                  if not os.path.exists(p)]
-    if not_exists:
-        log("Configuring token signing cert paths", level=DEBUG)
-        perms = 0o775
-        for path in not_exists:
-            if not os.path.isdir(path):
-                mkdir(path=path, owner=SSH_USER, group=KEYSTONE_USER,
-                      perms=perms)
-            else:
-                # Ensure accessible by ssh user and group (for sync).
-                ensure_permissions(path, user=SSH_USER, group=KEYSTONE_USER,
-                                   perms=perms)
-
-
-def ensure_pki_dir_permissions():
-    # Ensure accessible by unison user and group (for sync).
-    ensure_permissions(PKI_CERTS_DIR, user=SSH_USER, group=KEYSTONE_USER,
-                       perms=0o775, recurse=True)
-
-
-def update_certs_if_available(f):
-    def _inner_update_certs_if_available(*args, **kwargs):
-        path = None
-        for rid in relation_ids('cluster'):
-            path = relation_get(attribute='ssl-cert-available-updates',
-                                rid=rid, unit=local_unit())
-
-        if path and os.path.exists(path):
-            log("Updating certs from '%s'" % (path), level=DEBUG)
-            with tarfile.open(path) as fd:
-                files = ["/%s" % m.name for m in fd.getmembers()]
-                fd.extractall(path='/')
-
-            for syncfile in files:
-                ensure_permissions(syncfile, user=KEYSTONE_USER,
-                                   group=KEYSTONE_USER,
-                                   perms=0o744, recurse=True)
-
-            # Mark as complete
-            os.rename(path, "%s.complete" % (path))
-        else:
-            log("No cert updates available", level=DEBUG)
-
-        return f(*args, **kwargs)
-
-    return _inner_update_certs_if_available
-
-
-def synchronize_ca(fatal=False):
-    """Broadcast service credentials to peers.
-
-    By default a failure to sync is fatal and will result in a raised
-    exception.
-
-    This function uses a relation setting 'ssl-cert-master' to get some
-    leader stickiness while synchronisation is being carried out. This ensures
-    that the last host to create and broadcast cetificates has the option to
-    complete actions before electing the new leader as sync master.
-
-    Returns a dictionary of settings to be set on the cluster relation.
-    """
-    paths_to_sync = []
-    peer_service_actions = {'restart': []}
-    peer_actions = []
-
-    if bool_from_string(config('https-service-endpoints')):
-        log("Syncing all endpoint certs since https-service-endpoints=True",
-            level=DEBUG)
-        paths_to_sync.append(SSL_DIR)
-        paths_to_sync.append(CA_CERT_PATH)
-        # We need to restart peer apache services to ensure they have picked up
-        # new ssl keys.
-        peer_service_actions['restart'].append('apache2')
-        peer_actions.append('update-ca-certificates')
-
-    if bool_from_string(config('use-https')):
-        log("Syncing keystone-endpoint certs since use-https=True",
-            level=DEBUG)
-        paths_to_sync.append(SSL_DIR)
-        paths_to_sync.append(APACHE_SSL_DIR)
-        paths_to_sync.append(CA_CERT_PATH)
-        # We need to restart peer apache services to ensure they have picked up
-        # new ssl keys.
-        peer_service_actions['restart'].append('apache2')
-        peer_actions.append('update-ca-certificates')
-
-    # NOTE: certs needed for token signing e.g. pki and revocation list query.
-    log("Syncing token certs", level=DEBUG)
-    paths_to_sync.append(PKI_CERTS_DIR)
-    peer_actions.append('ensure-pki-permissions')
-
-    if not paths_to_sync:
-        log("Nothing to sync - skipping", level=DEBUG)
-        return {}
-
-    if not os.path.isdir(SYNC_FLAGS_DIR):
-        mkdir(SYNC_FLAGS_DIR, SSH_USER, KEYSTONE_USER, 0o775)
-
-    restart_trigger = None
-    for action, services in peer_service_actions.iteritems():
-        services = set(services)
-        if services:
-            restart_trigger = str(uuid.uuid4())
-            create_peer_service_actions(action, services)
-
-    create_peer_actions(peer_actions)
-
-    paths_to_sync = list(set(paths_to_sync))
-    stage_paths_for_sync(paths_to_sync)
-
-    hash1 = hashlib.sha256()
-    for path in paths_to_sync:
-        update_hash_from_path(hash1, path)
-
-    cluster_rel_settings = {'ssl-cert-available-updates': SSL_SYNC_ARCHIVE,
-                            'sync-hash': hash1.hexdigest()}
-
-    synced_units = unison_sync([SSL_SYNC_ARCHIVE, SYNC_FLAGS_DIR])
-    if synced_units:
-        # Format here needs to match that used when peers request sync
-        synced_units = [u.replace('/', '-') for u in synced_units]
-        ssl_synced_units = \
-            json.dumps(synced_units)
-        # NOTE(hopem): we pull this onto the leader settings to avoid
-        # unnecessary cluster relation noise. This is possible because the
-        # setting is only needed by the cert master.
-        if 'ssl-synced-units' not in leader_get():
-            rid = relation_ids('cluster')[0]
-            relation_set_and_migrate_to_leader(relation_id=rid,
-                                               **{'ssl-synced-units':
-                                                  ssl_synced_units})
-        else:
-            leader_set({'ssl-synced-units': ssl_synced_units})
-
-    if restart_trigger:
-        log("Sending restart-services-trigger=%s to all peers" %
-            (restart_trigger), level=DEBUG)
-        cluster_rel_settings['restart-services-trigger'] = restart_trigger
-
-    log("Sync complete", level=DEBUG)
-    return cluster_rel_settings
-
-
-def clear_ssl_synced_units():
-    """Clear the 'synced' units record on the cluster relation.
-
-    If new unit sync reauests are set this will ensure that a sync occurs when
-    the sync master receives the requests.
-    """
-    log("Clearing ssl sync units", level=DEBUG)
-    for rid in relation_ids('cluster'):
-        if 'ssl-synced-units' not in leader_get():
-            relation_set_and_migrate_to_leader(relation_id=rid,
-                                               **{'ssl-synced-units': None})
-        else:
-            leader_set({'ssl-synced-units': None})
-
-
-def update_hash_from_path(hash, path, recurse_depth=10):
-    """Recurse through path and update the provided hash for every file found.
-    """
-    if not recurse_depth:
-        log("Max recursion depth (%s) reached for update_hash_from_path() at "
-            "path='%s' - not going any deeper" % (recurse_depth, path),
-            level=WARNING)
-        return
-
-    for p in glob.glob("%s/*" % path):
-        if os.path.isdir(p):
-            update_hash_from_path(hash, p, recurse_depth=recurse_depth - 1)
-        else:
-            with open(p, 'r') as fd:
-                hash.update(fd.read())
-
-
-def synchronize_ca_if_changed(force=False, fatal=False):
-    """Decorator to perform ssl cert sync if decorated function modifies them
-    in any way.
-
-    If force is True a sync is done regardless.
-    """
-    def inner_synchronize_ca_if_changed1(f):
-        def inner_synchronize_ca_if_changed2(*args, **kwargs):
-            # Only sync master can do sync. Ensure (a) we are not nested and
-            # (b) a master is elected and we are it.
-            acquired = SSL_SYNC_SEMAPHORE.acquire(blocking=0)
-            try:
-                if not acquired:
-                    log("Nested sync - ignoring", level=DEBUG)
-                    return f(*args, **kwargs)
-
-                if not ensure_ssl_cert_master():
-                    log("Not ssl-cert-master - ignoring sync", level=DEBUG)
-                    return f(*args, **kwargs)
-
-                peer_settings = {}
-                if not force:
-                    hash1 = hashlib.sha256()
-                    for path in SSL_DIRS:
-                        update_hash_from_path(hash1, path)
-
-                    ret = f(*args, **kwargs)
-
-                    hash2 = hashlib.sha256()
-                    for path in SSL_DIRS:
-                        update_hash_from_path(hash2, path)
-
-                    if hash1.hexdigest() != hash2.hexdigest():
-                        log("SSL certs have changed - syncing peers",
-                            level=DEBUG)
-                        peer_settings = synchronize_ca(fatal=fatal)
-                    else:
-                        log("SSL certs have not changed - skipping sync",
-                            level=DEBUG)
-                else:
-                    ret = f(*args, **kwargs)
-                    log("Doing forced ssl cert sync", level=DEBUG)
-                    peer_settings = synchronize_ca(fatal=fatal)
-
-                # If we are the sync master but not leader, ensure we have
-                # relinquished master status.
-                cluster_rids = relation_ids('cluster')
-                if cluster_rids:
-                    master = relation_get('ssl-cert-master',
-                                          rid=cluster_rids[0],
-                                          unit=local_unit())
-                    if not is_leader() and master == local_unit():
-                        log("Re-electing ssl cert master.", level=INFO)
-                        peer_settings['ssl-cert-master'] = 'unknown'
-
-                    if peer_settings:
-                        relation_set(relation_id=cluster_rids[0],
-                                     relation_settings=peer_settings)
-
-                return ret
-            finally:
-                SSL_SYNC_SEMAPHORE.release()
-
-        return inner_synchronize_ca_if_changed2
-
-    return inner_synchronize_ca_if_changed1
-
-
-@synchronize_ca_if_changed(force=True, fatal=True)
-def force_ssl_sync():
-    """Force SSL sync to all peers.
-
-    This is useful if we need to relinquish ssl-cert-master status while
-    making sure that the new master has up-to-date certs.
-    """
-    return
-
-
-def ensure_ssl_dir():
-    """Ensure juju ssl dir exists and is unsion read/writable."""
-    # NOTE(thedac) Snap service restarts will override permissions
-    # in SNAP_LIB_DIR including SSL_DIR
-    perms = 0o775
-    if not os.path.isdir(SSL_DIR):
-        mkdir(SSL_DIR, SSH_USER, KEYSTONE_USER, perms)
-    else:
-        ensure_permissions(SSL_DIR, user=SSH_USER, group=KEYSTONE_USER,
-                           perms=perms)
-
-
-def get_ca(user=KEYSTONE_USER, group=KEYSTONE_USER):
-    """Initialize a new CA object if one hasn't already been loaded.
-
-    This will create a new CA or load an existing one.
-    """
-    if not ssl.CA_SINGLETON:
-        ensure_ssl_dir()
-        d_name = '_'.join(SSL_CA_NAME.lower().split(' '))
-        ca = ssl.JujuCA(name=SSL_CA_NAME, user=user, group=group,
-                        ca_dir=os.path.join(SSL_DIR,
-                                            '%s_intermediate_ca' % d_name),
-                        root_ca_dir=os.path.join(SSL_DIR,
-                                                 '%s_root_ca' % d_name))
-
-        # Ensure a master is elected. This should cover the following cases:
-        # * single unit == 'oldest' unit is elected as master
-        # * multi unit + not clustered == 'oldest' unit is elcted as master
-        # * multi unit + clustered == cluster leader is elected as master
-        ensure_ssl_cert_master()
-
-        ssl.CA_SINGLETON.append(ca)
-
-    return ssl.CA_SINGLETON[0]
-
-
 def relation_list(rid):
     cmd = [
         'relation-list',
@@ -2105,8 +1489,6 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
             relation_data["api_version"] = get_api_version()
             relation_data["admin_domain_id"] = leader_get(
                 attribute='admin_domain_id')
-            # Get and pass CA bundle settings
-            relation_data.update(get_ssl_ca_settings())
 
             # Allow the remote service to request creation of any additional
             # roles. Currently used by Horizon
@@ -2163,7 +1545,6 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
             endpoints[ep][x] = v
 
         services = []
-        https_cn = None
         for ep in endpoints:
             # weed out any unrelated relation stuff Juju might have added
             # by ensuring each possible endpiont has appropriate fields
@@ -2183,7 +1564,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
                 https_cns.append(urlparse.urlparse(ep['public_url']).hostname)
                 https_cns.append(urlparse.urlparse(ep['admin_url']).hostname)
 
-        service_username = '_'.join(services)
+        service_username = '_'.join(sorted(services))
 
         # If an admin username prefix is provided, ensure all services use it.
         prefix = config('service-admin-prefix')
@@ -2201,8 +1582,10 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
     service_password = create_service_credentials(service_username,
                                                   new_roles=roles)
     service_domain = None
+    service_domain_id = None
     if get_api_version() > 2:
         service_domain = SERVICE_DOMAIN
+        service_domain_id = manager.resolve_domain_id(SERVICE_DOMAIN)
     service_tenant = config('service-tenant')
     service_tenant_id = manager.resolve_tenant_id(service_tenant,
                                                   domain=service_domain)
@@ -2218,6 +1601,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
         "service_username": service_username,
         "service_password": service_password,
         "service_domain": service_domain,
+        "service_domain_id": service_domain_id,
         "service_tenant": service_tenant,
         "service_tenant_id": service_tenant_id,
         "https_keystone": '__null__',
@@ -2230,25 +1614,6 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
         "admin_domain_id": leader_get(attribute='admin_domain_id'),
     }
 
-    # generate or get a new cert/key for service if set to manage certs.
-    https_service_endpoints = config('https-service-endpoints')
-    if https_service_endpoints and bool_from_string(https_service_endpoints):
-        ca = get_ca(user=SSH_USER)
-        # NOTE(jamespage) may have multiple cns to deal with to iterate
-        https_cns = set(https_cns)
-        for https_cn in https_cns:
-            cert, key = ca.get_cert_and_key(common_name=https_cn)
-            relation_data['ssl_cert_{}'.format(https_cn)] = b64encode(cert)
-            relation_data['ssl_key_{}'.format(https_cn)] = b64encode(key)
-
-        # NOTE(jamespage) for backwards compatibility
-        cert, key = ca.get_cert_and_key(common_name=internal_cn)
-        relation_data['ssl_cert'] = b64encode(cert)
-        relation_data['ssl_key'] = b64encode(key)
-
-        # Get and pass CA bundle settings
-        relation_data.update(get_ssl_ca_settings())
-
     peer_store_and_set(relation_id=relation_id, **relation_data)
     # NOTE(dosaboy): '__null__' settings are for peer relation only so that
     # settings can flushed so we filter them out for non-peer relation.
@@ -2314,30 +1679,10 @@ def add_credentials_to_keystone(relation_id=None, remote_unit=None):
     }
     if domain:
         relation_data['domain'] = domain
-    # Get and pass CA bundle settings
-    relation_data.update(get_ssl_ca_settings())
 
     peer_store_and_set(relation_id=relation_id, **relation_data)
 
 
-def get_ssl_ca_settings():
-    """ Get the Certificate Authority settings required to use the CA.
-
-    :returns: Dictionary with https_keystone and ca_cert set
-    """
-    ca_data = {}
-    https_service_endpoints = config('https-service-endpoints')
-    if (https_service_endpoints and
-            bool_from_string(https_service_endpoints)):
-        # Pass CA cert as client will need it to
-        # verify https connections
-        ca = get_ca(user=SSH_USER)
-        ca_bundle = ca.get_ca_bundle()
-        ca_data['https_keystone'] = 'True'
-        ca_data['ca_cert'] = b64encode(ca_bundle)
-    return ca_data
-
-
 def get_protocol():
     """Determine the http protocol
 
@@ -2481,7 +1826,7 @@ def is_db_ready(use_current_context=False, db_rel=None):
     returns True otherwise False.
     """
     key = 'allowed_units'
-    db_rels = ['shared-db', 'pgsql-db']
+    db_rels = ['shared-db']
     if db_rel:
         db_rels = [db_rel]
 
@@ -2517,23 +1862,10 @@ def is_db_ready(use_current_context=False, db_rel=None):
     return not rel_has_units
 
 
-def determine_usr_bin():
-    """Return the /usr/bin path for Apache2 vhost config.
-    The /usr/bin path will be located in the virtualenv if the charm
-    is configured to deploy keystone from source.
-    """
-    if git_install_requested():
-        projects_yaml = config('openstack-origin-git')
-        projects_yaml = git_default_repos(projects_yaml)
-        return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
-    else:
-        return '/usr/bin'
-
-
 def determine_python_path():
     """Return the python-path
 
-    Determine if git or snap installed and return the appropriate python path.
+    Determine if snap installed and return the appropriate python path.
     Returns None unless the charm if neither condition is true.
 
     :returns: string python path or None
@@ -2541,108 +1873,10 @@ def determine_python_path():
     _python_path = 'lib/python2.7/site-packages'
     if snap_install_requested():
         return os.path.join(SNAP_BASE_DIR, _python_path)
-    elif git_install_requested():
-        projects_yaml = config('openstack-origin-git')
-        projects_yaml = git_default_repos(projects_yaml)
-        return os.path.join(git_pip_venv_dir(projects_yaml), _python_path)
     else:
         return None
 
 
-def git_install(projects_yaml):
-    """Perform setup, and install git repos specified in yaml parameter."""
-    if git_install_requested():
-        git_pre_install()
-        projects_yaml = git_default_repos(projects_yaml)
-        git_clone_and_install(projects_yaml, core_project='keystone')
-        git_post_install(projects_yaml)
-
-
-def git_pre_install():
-    """Perform keystone pre-install setup."""
-    dirs = [
-        '/var/lib/keystone',
-        '/var/lib/keystone/cache',
-        '/var/log/keystone',
-    ]
-
-    logs = [
-        '/var/log/keystone/keystone.log',
-    ]
-
-    adduser('keystone', shell='/bin/bash', system_user=True,
-            home_dir='/var/lib/keystone')
-    add_group('keystone', system_group=True)
-    add_user_to_group('keystone', 'keystone')
-
-    for d in dirs:
-        mkdir(d, owner=KEYSTONE_USER, group=KEYSTONE_USER, perms=0o755,
-              force=False)
-
-    for l in logs:
-        write_file(l, '', owner=KEYSTONE_USER, group=KEYSTONE_USER,
-                   perms=0o600)
-
-
-def git_post_install(projects_yaml):
-    """Perform keystone post-install setup."""
-    http_proxy = git_yaml_value(projects_yaml, 'http_proxy')
-    if http_proxy:
-        pip_install('mysql-python', proxy=http_proxy,
-                    venv=git_pip_venv_dir(projects_yaml))
-    else:
-        pip_install('mysql-python',
-                    venv=git_pip_venv_dir(projects_yaml))
-
-    src_etc = os.path.join(git_src_dir(projects_yaml, 'keystone'), 'etc')
-    configs = {
-        'src': src_etc,
-        'dest': '/etc/keystone',
-    }
-
-    if os.path.exists(configs['dest']):
-        shutil.rmtree(configs['dest'])
-    shutil.copytree(configs['src'], configs['dest'])
-
-    # NOTE(coreycb): Need to find better solution than bin symlinks.
-    symlinks = [
-        {'src': os.path.join(git_pip_venv_dir(projects_yaml),
-                             'bin/keystone-manage'),
-         'link': '/usr/local/bin/keystone-manage'},
-    ]
-
-    for s in symlinks:
-        if os.path.lexists(s['link']):
-            os.remove(s['link'])
-        os.symlink(s['src'], s['link'])
-
-    render('git/logging.conf', '/etc/keystone/logging.conf', {}, perms=0o644)
-
-    bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
-    # The charm runs the keystone API under apache2 for openstack liberty
-    # onward.  Prior to liberty upstart is used.
-    if CompareOpenStackReleases(os_release('keystone')) < 'liberty':
-        keystone_context = {
-            'service_description': 'Keystone API server',
-            'service_name': 'Keystone',
-            'user_name': 'keystone',
-            'start_dir': '/var/lib/keystone',
-            'process_name': 'keystone',
-            'executable_name': os.path.join(bin_dir, 'keystone-all'),
-            'config_files': ['/etc/keystone/keystone.conf'],
-        }
-
-        keystone_context['log_file'] = '/var/log/keystone/keystone.log'
-        templates_dir = 'hooks/charmhelpers/contrib/openstack/templates'
-        templates_dir = os.path.join(charm_dir(), templates_dir)
-        render('git.upstart', '/etc/init/keystone.conf', keystone_context,
-               perms=0o644, templates_dir=templates_dir)
-
-    # Don't restart if the unit is supposed to be paused.
-    if not is_unit_paused_set():
-        service_restart(keystone_service())
-
-
 def get_optional_interfaces():
     """Return the optional interfaces that should be checked if the relavent
     relations have appeared.
@@ -2785,3 +2019,11 @@ def post_snap_install():
     if os.path.exists(PASTE_SRC):
         log("Perfoming post snap install tasks", INFO)
         shutil.copy(PASTE_SRC, PASTE_DST)
+
+
+def restart_keystone():
+    if not is_unit_paused_set():
+        if snap_install_requested():
+            service_restart('snap.keystone.*')
+        else:
+            service_restart(keystone_service())
diff --git a/hooks/websso-trusted-dashboard-relation-broken b/hooks/websso-trusted-dashboard-relation-broken
new file mode 120000
index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0
--- /dev/null
+++ b/hooks/websso-trusted-dashboard-relation-broken
@@ -0,0 +1 @@
+keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/websso-trusted-dashboard-relation-changed b/hooks/websso-trusted-dashboard-relation-changed
new file mode 120000
index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0
--- /dev/null
+++ b/hooks/websso-trusted-dashboard-relation-changed
@@ -0,0 +1 @@
+keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/websso-trusted-dashboard-relation-departed b/hooks/websso-trusted-dashboard-relation-departed
new file mode 120000
index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0
--- /dev/null
+++ b/hooks/websso-trusted-dashboard-relation-departed
@@ -0,0 +1 @@
+keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/websso-trusted-dashboard-relation-joined b/hooks/websso-trusted-dashboard-relation-joined
new file mode 120000
index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0
--- /dev/null
+++ b/hooks/websso-trusted-dashboard-relation-joined
@@ -0,0 +1 @@
+keystone_hooks.py
\ No newline at end of file
diff --git a/metadata.yaml b/metadata.yaml
index 431b688f85655318a51fac6cc8085bbec2a7affa..aad95f17986fe67f905910f3ee3805716167261b 100644
--- a/metadata.yaml
+++ b/metadata.yaml
@@ -11,8 +11,8 @@ tags:
   - misc
 series:
   - xenial
-  - artful
-  - zesty
+  - bionic
+  - cosmic
   - trusty
 extra-bindings:
   public:
@@ -33,14 +33,19 @@ provides:
 requires:
   shared-db:
     interface: mysql-shared
-  pgsql-db:
-    interface: pgsql
   ha:
     interface: hacluster
     scope: container
   domain-backend:
     interface: keystone-domain-backend
     scope: container
+  keystone-fid-service-provider:
+    interface: keystone-fid-service-provider
+    scope: container
+  websso-trusted-dashboard:
+    interface: websso-trusted-dashboard
+  certificates:
+    interface: tls-certificates
 peers:
   cluster:
     interface: keystone-ha
diff --git a/templates/git/logging.conf b/templates/git/logging.conf
deleted file mode 100644
index 7a538ae8f1ee5ebc27504b2a7719f5ac0f865119..0000000000000000000000000000000000000000
--- a/templates/git/logging.conf
+++ /dev/null
@@ -1,39 +0,0 @@
-[loggers]
-keys=root
-
-[formatters]
-keys=normal,normal_with_name,debug
-
-[handlers]
-keys=production,file,devel
-
-[logger_root]
-level=WARNING
-handlers=file
-
-[handler_production]
-class=handlers.SysLogHandler
-level=ERROR
-formatter=normal_with_name
-args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
-
-[handler_file]
-class=FileHandler
-level=DEBUG
-formatter=normal_with_name
-args=('/var/log/keystone/keystone.log', 'a')
-
-[handler_devel]
-class=StreamHandler
-level=NOTSET
-formatter=debug
-args=(sys.stdout,)
-
-[formatter_normal]
-format=%(asctime)s %(levelname)s %(message)s
-
-[formatter_normal_with_name]
-format=(%(name)s): %(asctime)s %(levelname)s %(message)s
-
-[formatter_debug]
-format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
diff --git a/templates/mitaka/keystone.conf b/templates/mitaka/keystone.conf
index 1034e0ae50e197ca99d02e6f35aefa23f44ad7a1..edf46e0c2fee86e8e08fdecfdafc3fc315e84842 100644
--- a/templates/mitaka/keystone.conf
+++ b/templates/mitaka/keystone.conf
@@ -67,7 +67,7 @@ driver = {{ assignment_backend }}
 [oauth1]
 
 [auth]
-methods = external,password,token,oauth1{% if enable_oidc %},oidc{% endif %}{% if enable_saml2 %},saml2{% endif %}
+methods = external,password,token,oauth1,totp{% if enable_oidc %},oidc{% endif %}{% if enable_saml2 %},saml2{% endif %}
 password = keystone.auth.plugins.password.Password
 token = keystone.auth.plugins.token.Token
 oauth1 = keystone.auth.plugins.oauth1.OAuth
@@ -136,3 +136,5 @@ group_allow_delete = False
 admin_project_domain_name = {{ admin_domain_name }}
 admin_project_name = admin
 {% endif -%}
+
+{% include "section-oslo-middleware" %}
diff --git a/templates/ocata/keystone.conf b/templates/ocata/keystone.conf
index 1154ebdad49c6035fe8c6a41fd97c03d7adef66d..666ac6c2057f492e3608a2732ff4d59538f4668a 100644
--- a/templates/ocata/keystone.conf
+++ b/templates/ocata/keystone.conf
@@ -67,7 +67,7 @@ driver = {{ assignment_backend }}
 [oauth1]
 
 [auth]
-methods = external,password,token,oauth1{% if enable_oidc %},oidc{% endif %}{% if enable_saml2 %},saml2{% endif %}
+methods = external,password,token,oauth1,mapped,openid,totp{% if enable_oidc %},oidc{% endif %}{% if enable_saml2 %},saml2{% endif %}
 password = keystone.auth.plugins.password.Password
 token = keystone.auth.plugins.token.Token
 oauth1 = keystone.auth.plugins.oauth1.OAuth
@@ -136,3 +136,7 @@ group_allow_delete = False
 admin_project_domain_name = {{ admin_domain_name }}
 admin_project_name = admin
 {% endif -%}
+
+{% include "parts/section-federation" %}
+
+{% include "section-oslo-middleware" %}
diff --git a/templates/openstack_https_frontend.conf b/templates/openstack_https_frontend.conf
new file mode 100644
index 0000000000000000000000000000000000000000..e0e42296cca9c1c2f9ec26b59403a76aa6221e0a
--- /dev/null
+++ b/templates/openstack_https_frontend.conf
@@ -0,0 +1,30 @@
+{% if endpoints -%}
+{% for ext_port in ext_ports -%}
+Listen {{ ext_port }}
+{% endfor -%}
+{% for address, endpoint, ext, int in endpoints -%}
+<VirtualHost {{ address }}:{{ ext }}>
+    ServerName {{ endpoint }}
+    SSLEngine on
+    SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
+    SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
+    SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
+    # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8
+    SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
+    SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
+    ProxyPass / http://localhost:{{ int }}/
+    ProxyPassReverse / http://localhost:{{ int }}/
+    ProxyPreserveHost on
+    RequestHeader set X-Forwarded-Proto "https"
+    IncludeOptional /etc/apache2/mellon*/sp-location*.conf
+</VirtualHost>
+{% endfor -%}
+<Proxy *>
+    Order deny,allow
+    Allow from all
+</Proxy>
+<Location />
+    Order allow,deny
+    Allow from all
+</Location>
+{% endif -%}
diff --git a/templates/parts/section-federation b/templates/parts/section-federation
new file mode 100644
index 0000000000000000000000000000000000000000..65ee99edb6513be50507a625d99de050bbc4775b
--- /dev/null
+++ b/templates/parts/section-federation
@@ -0,0 +1,10 @@
+{% if trusted_dashboards %}
+[federation]
+{% for dashboard_url in trusted_dashboards -%}
+trusted_dashboard = {{ dashboard_url }}
+{% endfor -%}
+{% endif %}
+{% for sp in fid_sps -%}
+[{{ sp['protocol-name'] }}]
+remote_id_attribute = {{ sp['remote-id-attribute'] }}
+{% endfor -%}
diff --git a/templates/queens/policy.json b/templates/queens/policy.json
new file mode 100644
index 0000000000000000000000000000000000000000..1567e86674e83d7514d5d37240bb2b42006032b8
--- /dev/null
+++ b/templates/queens/policy.json
@@ -0,0 +1,260 @@
+{
+    "admin_required": "role:{{ admin_role }}",
+    "cloud_admin": "rule:admin_required and (is_admin_project:True or domain_id:{{ admin_domain_id }} or project_id:{{ service_tenant_id }})",
+    "service_role": "role:service",
+    "service_or_admin": "rule:admin_required or rule:service_role",
+    "owner": "user_id:%(user_id)s or user_id:%(target.token.user_id)s",
+    "admin_or_owner": "(rule:admin_required and domain_id:%(target.token.user.domain.id)s) or rule:owner",
+    "admin_and_matching_domain_id": "rule:admin_required and domain_id:%(domain_id)s",
+    "service_admin_or_owner": "rule:service_or_admin or rule:owner",
+
+    "default": "rule:admin_required",
+
+    "identity:get_region": "",
+    "identity:list_regions": "",
+    "identity:create_region": "rule:cloud_admin",
+    "identity:update_region": "rule:cloud_admin",
+    "identity:delete_region": "rule:cloud_admin",
+
+    "identity:get_service": "rule:admin_required",
+    "identity:list_services": "rule:admin_required",
+    "identity:create_service": "rule:cloud_admin",
+    "identity:update_service": "rule:cloud_admin",
+    "identity:delete_service": "rule:cloud_admin",
+
+    "identity:get_endpoint": "rule:admin_required",
+    "identity:list_endpoints": "rule:admin_required",
+    "identity:create_endpoint": "rule:cloud_admin",
+    "identity:update_endpoint": "rule:cloud_admin",
+    "identity:delete_endpoint": "rule:cloud_admin",
+
+    "identity:get_registered_limit": "",
+    "identity:list_registered_limits": "",
+    "identity:create_registered_limits": "rule:admin_required",
+    "identity:update_registered_limits": "rule:admin_required",
+    "identity:delete_registered_limit": "rule:admin_required",
+
+    "identity:get_limit": "",
+    "identity:list_limits": "",
+    "identity:create_limits": "rule:admin_required",
+    "identity:update_limits": "rule:admin_required",
+    "identity:delete_limit": "rule:admin_required",
+
+    "identity:get_domain": "rule:cloud_admin or rule:admin_and_matching_domain_id or token.project.domain.id:%(target.domain.id)s",
+    "identity:list_domains": "rule:cloud_admin",
+    "identity:create_domain": "rule:cloud_admin",
+    "identity:update_domain": "rule:cloud_admin",
+    "identity:delete_domain": "rule:cloud_admin",
+
+    "admin_and_matching_target_project_domain_id": "rule:admin_required and domain_id:%(target.project.domain_id)s",
+    "admin_and_matching_project_domain_id": "rule:admin_required and domain_id:%(project.domain_id)s",
+    "identity:get_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id or project_id:%(target.project.id)s",
+    "identity:list_projects": "rule:cloud_admin or rule:admin_and_matching_domain_id",
+    "identity:list_user_projects": "rule:owner or rule:admin_and_matching_domain_id",
+    "identity:create_project": "rule:cloud_admin or rule:admin_and_matching_project_domain_id",
+    "identity:update_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id",
+    "identity:delete_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id",
+    "identity:create_project_tag": "rule:admin_required",
+    "identity:delete_project_tag": "rule:admin_required",
+    "identity:get_project_tag": "rule:admin_required",
+    "identity:list_project_tags": "rule:admin_required",
+    "identity:delete_project_tags": "rule:admin_required",
+    "identity:update_project_tags": "rule:admin_required",
+
+    "admin_and_matching_target_user_domain_id": "rule:admin_required and domain_id:%(target.user.domain_id)s",
+    "admin_and_matching_user_domain_id": "rule:admin_required and domain_id:%(user.domain_id)s",
+    "identity:get_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id or rule:owner",
+    "identity:list_users": "rule:cloud_admin or rule:admin_and_matching_domain_id",
+    "identity:create_user": "rule:cloud_admin or rule:admin_and_matching_user_domain_id",
+    "identity:update_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id",
+    "identity:delete_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id",
+
+    "admin_and_matching_target_group_domain_id": "rule:admin_required and domain_id:%(target.group.domain_id)s",
+    "admin_and_matching_group_domain_id": "rule:admin_required and domain_id:%(group.domain_id)s",
+    "identity:get_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:list_groups": "rule:cloud_admin or rule:admin_and_matching_domain_id",
+    "identity:list_groups_for_user": "rule:owner or rule:admin_and_matching_target_user_domain_id",
+    "identity:create_group": "rule:cloud_admin or rule:admin_and_matching_group_domain_id",
+    "identity:update_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:delete_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:list_users_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:remove_user_from_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:check_user_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:add_user_to_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+
+    "identity:get_credential": "rule:admin_required",
+    "identity:list_credentials": "rule:admin_required or user_id:%(user_id)s",
+    "identity:create_credential": "rule:admin_required",
+    "identity:update_credential": "rule:admin_required",
+    "identity:delete_credential": "rule:admin_required",
+
+    "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
+    "identity:ec2_list_credentials": "rule:admin_required or rule:owner",
+    "identity:ec2_create_credential": "rule:admin_required or rule:owner",
+    "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
+
+    "identity:get_role": "rule:admin_required",
+    "identity:list_roles": "rule:admin_required",
+    "identity:create_role": "rule:cloud_admin",
+    "identity:update_role": "rule:cloud_admin",
+    "identity:delete_role": "rule:cloud_admin",
+
+    "identity:get_domain_role": "rule:cloud_admin or rule:get_domain_roles",
+    "identity:list_domain_roles": "rule:cloud_admin or rule:list_domain_roles",
+    "identity:create_domain_role": "rule:cloud_admin or rule:domain_admin_matches_domain_role",
+    "identity:update_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role",
+    "identity:delete_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role",
+    "domain_admin_matches_domain_role": "rule:admin_required and domain_id:%(role.domain_id)s",
+    "get_domain_roles": "rule:domain_admin_matches_target_domain_role or rule:project_admin_matches_target_domain_role",
+    "domain_admin_matches_target_domain_role": "rule:admin_required and domain_id:%(target.role.domain_id)s",
+    "project_admin_matches_target_domain_role": "rule:admin_required and project_domain_id:%(target.role.domain_id)s",
+    "list_domain_roles": "rule:domain_admin_matches_filter_on_list_domain_roles or rule:project_admin_matches_filter_on_list_domain_roles",
+    "domain_admin_matches_filter_on_list_domain_roles": "rule:admin_required and domain_id:%(domain_id)s",
+    "project_admin_matches_filter_on_list_domain_roles": "rule:admin_required and project_domain_id:%(domain_id)s",
+    "admin_and_matching_prior_role_domain_id": "rule:admin_required and domain_id:%(target.prior_role.domain_id)s",
+    "implied_role_matches_prior_role_domain_or_global": "(domain_id:%(target.implied_role.domain_id)s or None:%(target.implied_role.domain_id)s)",
+
+    "identity:get_implied_role": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id",
+    "identity:list_implied_roles": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id",
+    "identity:create_implied_role": "rule:cloud_admin or (rule:admin_and_matching_prior_role_domain_id and rule:implied_role_matches_prior_role_domain_or_global)",
+    "identity:delete_implied_role": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id",
+    "identity:list_role_inference_rules": "rule:cloud_admin",
+    "identity:check_implied_role": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id",
+
+    "identity:list_system_grants_for_user": "rule:admin_required",
+    "identity:check_system_grant_for_user": "rule:admin_required",
+    "identity:create_system_grant_for_user": "rule:admin_required",
+    "identity:revoke_system_grant_for_user": "rule:admin_required",
+
+    "identity:list_system_grants_for_group": "rule:admin_required",
+    "identity:check_system_grant_for_group": "rule:admin_required",
+    "identity:create_system_grant_for_group": "rule:admin_required",
+    "identity:revoke_system_grant_for_group": "rule:admin_required",
+
+    "identity:check_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
+    "identity:list_grants": "rule:cloud_admin or rule:domain_admin_for_list_grants or rule:project_admin_for_list_grants",
+    "identity:create_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
+    "identity:revoke_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
+    "domain_admin_for_grants": "rule:domain_admin_for_global_role_grants or rule:domain_admin_for_domain_role_grants",
+    "domain_admin_for_global_role_grants": "rule:admin_required and None:%(target.role.domain_id)s and rule:domain_admin_grant_match",
+    "domain_admin_for_domain_role_grants": "rule:admin_required and domain_id:%(target.role.domain_id)s and rule:domain_admin_grant_match",
+    "domain_admin_grant_match": "domain_id:%(domain_id)s or domain_id:%(target.project.domain_id)s",
+    "project_admin_for_grants": "rule:project_admin_for_global_role_grants or rule:project_admin_for_domain_role_grants",
+    "project_admin_for_global_role_grants": "rule:admin_required and None:%(target.role.domain_id)s and project_id:%(project_id)s",
+    "project_admin_for_domain_role_grants": "rule:admin_required and project_domain_id:%(target.role.domain_id)s and project_id:%(project_id)s",
+    "domain_admin_for_list_grants": "rule:admin_required and rule:domain_admin_grant_match",
+    "project_admin_for_list_grants": "rule:admin_required and project_id:%(project_id)s",
+
+    "admin_on_domain_filter": "rule:admin_required and domain_id:%(scope.domain.id)s",
+    "admin_on_project_filter": "rule:admin_required and project_id:%(scope.project.id)s",
+    "admin_on_domain_of_project_filter": "rule:admin_required and domain_id:%(target.project.domain_id)s",
+    "identity:list_role_assignments": "rule:cloud_admin or rule:admin_on_domain_filter or rule:admin_on_project_filter",
+    "identity:list_role_assignments_for_tree": "rule:cloud_admin or rule:admin_on_domain_of_project_filter",
+    "identity:get_policy": "rule:cloud_admin",
+    "identity:list_policies": "rule:cloud_admin",
+    "identity:create_policy": "rule:cloud_admin",
+    "identity:update_policy": "rule:cloud_admin",
+    "identity:delete_policy": "rule:cloud_admin",
+
+    "identity:check_token": "rule:admin_or_owner",
+    "identity:validate_token": "rule:service_admin_or_owner",
+    "identity:validate_token_head": "rule:service_or_admin",
+    "identity:revocation_list": "rule:service_or_admin",
+    "identity:revoke_token": "rule:admin_or_owner",
+
+    "identity:create_trust": "user_id:%(trust.trustor_user_id)s",
+    "identity:list_trusts": "",
+    "identity:list_roles_for_trust": "",
+    "identity:get_role_for_trust": "",
+    "identity:delete_trust": "",
+    "identity:get_trust": "",
+
+    "identity:create_consumer": "rule:admin_required",
+    "identity:get_consumer": "rule:admin_required",
+    "identity:list_consumers": "rule:admin_required",
+    "identity:delete_consumer": "rule:admin_required",
+    "identity:update_consumer": "rule:admin_required",
+
+    "identity:authorize_request_token": "rule:admin_required",
+    "identity:list_access_token_roles": "rule:admin_required",
+    "identity:get_access_token_role": "rule:admin_required",
+    "identity:list_access_tokens": "rule:admin_required",
+    "identity:get_access_token": "rule:admin_required",
+    "identity:delete_access_token": "rule:admin_required",
+
+    "identity:list_projects_for_endpoint": "rule:admin_required",
+    "identity:add_endpoint_to_project": "rule:admin_required",
+    "identity:check_endpoint_in_project": "rule:admin_required",
+    "identity:list_endpoints_for_project": "rule:admin_required",
+    "identity:remove_endpoint_from_project": "rule:admin_required",
+
+    "identity:create_endpoint_group": "rule:admin_required",
+    "identity:list_endpoint_groups": "rule:admin_required",
+    "identity:get_endpoint_group": "rule:admin_required",
+    "identity:update_endpoint_group": "rule:admin_required",
+    "identity:delete_endpoint_group": "rule:admin_required",
+    "identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
+    "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
+    "identity:get_endpoint_group_in_project": "rule:admin_required",
+    "identity:list_endpoint_groups_for_project": "rule:admin_required",
+    "identity:add_endpoint_group_to_project": "rule:admin_required",
+    "identity:remove_endpoint_group_from_project": "rule:admin_required",
+
+    "identity:create_identity_provider": "rule:cloud_admin",
+    "identity:list_identity_providers": "rule:cloud_admin",
+    "identity:get_identity_provider": "rule:cloud_admin",
+    "identity:update_identity_provider": "rule:cloud_admin",
+    "identity:delete_identity_provider": "rule:cloud_admin",
+
+    "identity:create_protocol": "rule:cloud_admin",
+    "identity:update_protocol": "rule:cloud_admin",
+    "identity:get_protocol": "rule:cloud_admin",
+    "identity:list_protocols": "rule:cloud_admin",
+    "identity:delete_protocol": "rule:cloud_admin",
+
+    "identity:create_mapping": "rule:cloud_admin",
+    "identity:get_mapping": "rule:cloud_admin",
+    "identity:list_mappings": "rule:cloud_admin",
+    "identity:delete_mapping": "rule:cloud_admin",
+    "identity:update_mapping": "rule:cloud_admin",
+
+    "identity:create_service_provider": "rule:cloud_admin",
+    "identity:list_service_providers": "rule:cloud_admin",
+    "identity:get_service_provider": "rule:cloud_admin",
+    "identity:update_service_provider": "rule:cloud_admin",
+    "identity:delete_service_provider": "rule:cloud_admin",
+
+    "identity:get_auth_catalog": "",
+    "identity:get_auth_projects": "",
+    "identity:get_auth_domains": "",
+    "identity:get_auth_system": "",
+
+    "identity:list_projects_for_user": "",
+    "identity:list_domains_for_user": "",
+
+    "identity:list_revoke_events": "rule:service_or_admin",
+
+    "identity:create_policy_association_for_endpoint": "rule:cloud_admin",
+    "identity:check_policy_association_for_endpoint": "rule:cloud_admin",
+    "identity:delete_policy_association_for_endpoint": "rule:cloud_admin",
+    "identity:create_policy_association_for_service": "rule:cloud_admin",
+    "identity:check_policy_association_for_service": "rule:cloud_admin",
+    "identity:delete_policy_association_for_service": "rule:cloud_admin",
+    "identity:create_policy_association_for_region_and_service": "rule:cloud_admin",
+    "identity:check_policy_association_for_region_and_service": "rule:cloud_admin",
+    "identity:delete_policy_association_for_region_and_service": "rule:cloud_admin",
+    "identity:get_policy_for_endpoint": "rule:cloud_admin",
+    "identity:list_endpoints_for_policy": "rule:cloud_admin",
+
+    "identity:create_domain_config": "rule:cloud_admin",
+    "identity:get_domain_config": "rule:cloud_admin",
+    "identity:get_security_compliance_domain_config": "",
+    "identity:update_domain_config": "rule:cloud_admin",
+    "identity:delete_domain_config": "rule:cloud_admin",
+    "identity:get_domain_config_default": "rule:cloud_admin",
+
+    "identity:get_application_credential": "rule:admin_or_owner",
+    "identity:list_application_credentials": "rule:admin_or_owner",
+    "identity:create_application_credential": "rule:admin_or_owner",
+    "identity:delete_application_credential": "rule:admin_or_owner"
+}
diff --git a/templates/wsgi-openstack-api.conf b/templates/wsgi-openstack-api.conf
new file mode 100644
index 0000000000000000000000000000000000000000..942e2b29d7ceb9a1edba938098d5839f627c2b6e
--- /dev/null
+++ b/templates/wsgi-openstack-api.conf
@@ -0,0 +1,94 @@
+# Configuration file maintained by Juju. Local changes may be overwritten.
+
+{% if port -%}
+Listen {{ port }}
+{% endif -%}
+
+{% if admin_port -%}
+Listen {{ admin_port }}
+{% endif -%}
+
+{% if public_port -%}
+Listen {{ public_port }}
+{% endif -%}
+
+{% if port -%}
+<VirtualHost *:{{ port }}>
+    WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
+                      display-name=%{GROUP}
+    WSGIProcessGroup {{ service_name }}
+    WSGIScriptAlias / {{ script }}
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/apache2/{{ service_name }}_error.log
+    CustomLog /var/log/apache2/{{ service_name }}_access.log combined
+
+    <Directory /usr/bin>
+        <IfVersion >= 2.4>
+            Require all granted
+        </IfVersion>
+        <IfVersion < 2.4>
+            Order allow,deny
+            Allow from all
+        </IfVersion>
+    </Directory>
+    IncludeOptional /etc/apache2/mellon*/sp-location*.conf
+</VirtualHost>
+{% endif -%}
+
+{% if admin_port -%}
+<VirtualHost *:{{ admin_port }}>
+    WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
+                      display-name=%{GROUP}
+    WSGIProcessGroup {{ service_name }}-admin
+    WSGIScriptAlias / {{ admin_script }}
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/apache2/{{ service_name }}_error.log
+    CustomLog /var/log/apache2/{{ service_name }}_access.log combined
+
+    <Directory /usr/bin>
+        <IfVersion >= 2.4>
+            Require all granted
+        </IfVersion>
+        <IfVersion < 2.4>
+            Order allow,deny
+            Allow from all
+        </IfVersion>
+    </Directory>
+    IncludeOptional /etc/apache2/mellon*/sp-location*.conf
+</VirtualHost>
+{% endif -%}
+
+{% if public_port -%}
+<VirtualHost *:{{ public_port }}>
+    WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
+                      display-name=%{GROUP}
+    WSGIProcessGroup {{ service_name }}-public
+    WSGIScriptAlias / {{ public_script }}
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/apache2/{{ service_name }}_error.log
+    CustomLog /var/log/apache2/{{ service_name }}_access.log combined
+
+    <Directory /usr/bin>
+        <IfVersion >= 2.4>
+            Require all granted
+        </IfVersion>
+        <IfVersion < 2.4>
+            Order allow,deny
+            Allow from all
+        </IfVersion>
+    </Directory>
+    IncludeOptional /etc/apache2/mellon*/sp-location*.conf
+</VirtualHost>
+{% endif -%}
diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py
index 7216dafb75f6974fc80fe20576c44ddf82c2506e..70b114411879dce614255a9024954b40198ca964 100644
--- a/tests/basic_deployment.py
+++ b/tests/basic_deployment.py
@@ -21,7 +21,6 @@ Basic keystone amulet functional tests.
 import amulet
 import json
 import os
-import yaml
 
 from charmhelpers.contrib.openstack.amulet.deployment import (
     OpenStackAmuletDeployment
@@ -49,9 +48,8 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
         """Deploy the entire test environment."""
         super(KeystoneBasicDeployment, self).__init__(series, openstack,
                                                       source, stable)
-        self.keystone_num_units = 3
-        self.keystone_api_version = 2
-        self.git = git
+
+        self._initialize_deployment_differences()
 
         self._setup_test_object(snap_source)
         self._add_services()
@@ -65,6 +63,11 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
 
         self.d.sentry.wait()
         self._initialize_tests()
+        self._initialize_test_differences()
+
+    def _initialize_deployment_differences(self):
+        self.keystone_num_units = 3
+        self.keystone_api_version = 2
 
     def _setup_test_object(self, snap_source):
         self.snap_source = snap_source
@@ -142,33 +145,6 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
             'preferred-api-version': self.keystone_api_version,
         })
 
-        if self.git:
-            amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY')
-
-            reqs_repo = 'git://github.com/openstack/requirements'
-            keystone_repo = 'git://github.com/openstack/keystone'
-            if self._get_openstack_release() == self.trusty_icehouse:
-                reqs_repo = 'git://github.com/coreycb/requirements'
-                keystone_repo = 'git://github.com/coreycb/keystone'
-
-            branch = 'stable/' + self._get_openstack_release_string()
-
-            openstack_origin_git = {
-                'repositories': [
-                    {'name': 'requirements',
-                     'repository': reqs_repo,
-                     'branch': branch},
-                    {'name': 'keystone',
-                     'repository': keystone_repo,
-                     'branch': branch},
-                ],
-                'directory': '/mnt/openstack-git',
-                'http_proxy': amulet_http_proxy,
-                'https_proxy': amulet_http_proxy,
-            }
-            self.keystone_config['openstack-origin-git'] = \
-                yaml.dump(openstack_origin_git)
-
         pxc_config = {
             'dataset-size': '25%',
             'max-connections': 1000,
@@ -210,22 +186,23 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
 
     def set_api_version(self, api_version):
         # Avoid costly settings if we are already at the correct api_version
-        if not self.api_change_required(api_version):
-            return True
-        u.log.debug('Setting preferred-api-version={}'.format(api_version))
-        se_rels = []
-        for i in range(0, self.keystone_num_units):
-            se_rels.append(
-                (self.keystone_sentries[i], 'cinder:identity-service'),
-            )
-        # Make config change, wait for propagation
-        u.keystone_configure_api_version(se_rels, self, api_version)
+        if self.api_change_required(api_version):
+            u.log.debug('Setting preferred-api-version={}'.format(api_version))
+            se_rels = []
+            for i in range(0, self.keystone_num_units):
+                se_rels.append(
+                    (self.keystone_sentries[i], 'cinder:identity-service'),
+                )
+            # Make config change, wait for propagation
+            u.keystone_configure_api_version(se_rels, self, api_version)
 
-        # Success if we get here, get and store client.
+        # Store in self.keystone_client
         if api_version == 2:
             self.keystone_v2 = self.get_keystone_client(api_version=2)
+            self.keystone_client = self.keystone_v2
         else:
             self.keystone_v3 = self.get_keystone_client(api_version=3)
+            self.keystone_client = self.keystone_v3
         self.keystone_api_version = api_version
 
     def get_keystone_client(self, api_version=None, keystone_ip=None):
@@ -250,41 +227,42 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
         self.demo_tenant = 'demoTenant'
         self.demo_role = 'demoRole'
         self.demo_user = 'demoUser'
-        if not u.tenant_exists(self.keystone_v2, self.demo_tenant):
-            tenant = self.keystone_v2.tenants.create(
+        if not u.tenant_exists(self.keystone_client, self.demo_tenant):
+            tenant = self.keystone_client.tenants.create(
                 tenant_name=self.demo_tenant,
                 description='demo tenant',
                 enabled=True)
-            self.keystone_v2.roles.create(name=self.demo_role)
-            self.keystone_v2.users.create(name=self.demo_user,
-                                          password='password',
-                                          tenant_id=tenant.id,
-                                          email='demo@demo.com')
+            self.keystone_client.roles.create(name=self.demo_role)
+            self.keystone_client.users.create(name=self.demo_user,
+                                              password='password',
+                                              tenant_id=tenant.id,
+                                              email='demo@demo.com')
 
         # Authenticate keystone demo
         self.keystone_demo = u.authenticate_keystone_user(
-            self.keystone_v2, user=self.demo_user,
+            self.keystone_client, user=self.demo_user,
             password='password', tenant=self.demo_tenant)
 
     def create_users_v3(self):
         # Create a demo tenant/role/user
         self.demo_project = 'demoProject'
         self.demo_user_v3 = 'demoUserV3'
+        self.demo_role = 'demoRoleV3'
         self.demo_domain_admin = 'demoDomainAdminV3'
         self.demo_domain = 'demoDomain'
         try:
-            domain = self.keystone_v3.domains.find(name=self.demo_domain)
+            domain = self.keystone_client.domains.find(name=self.demo_domain)
         except keystoneclient.exceptions.NotFound:
-            domain = self.keystone_v3.domains.create(
+            domain = self.keystone_client.domains.create(
                 self.demo_domain,
                 description='Demo Domain',
                 enabled=True
             )
 
         try:
-            self.keystone_v3.projects.find(name=self.demo_project)
+            self.keystone_client.projects.find(name=self.demo_project)
         except keystoneclient.exceptions.NotFound:
-            self.keystone_v3.projects.create(
+            self.keystone_client.projects.create(
                 self.demo_project,
                 domain,
                 description='Demo Project',
@@ -292,14 +270,14 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
             )
 
         try:
-            self.keystone_v3.roles.find(name=self.demo_role)
+            self.keystone_client.roles.find(name=self.demo_role)
         except keystoneclient.exceptions.NotFound:
-            self.keystone_v3.roles.create(name=self.demo_role)
+            self.keystone_client.roles.create(name=self.demo_role)
 
-        if not self.find_keystone_v3_user(self.keystone_v3,
+        if not self.find_keystone_v3_user(self.keystone_client,
                                           self.demo_user_v3,
                                           self.demo_domain):
-            self.keystone_v3.users.create(
+            self.keystone_client.users.create(
                 self.demo_user_v3,
                 domain=domain.id,
                 project=self.demo_project,
@@ -309,14 +287,14 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
                 enabled=True)
 
         try:
-            self.keystone_v3.roles.find(name='Admin')
+            self.keystone_client.roles.find(name='Admin')
         except keystoneclient.exceptions.NotFound:
-            self.keystone_v3.roles.create(name='Admin')
+            self.keystone_client.roles.create(name='Admin')
 
-        if not self.find_keystone_v3_user(self.keystone_v3,
+        if not self.find_keystone_v3_user(self.keystone_client,
                                           self.demo_domain_admin,
                                           self.demo_domain):
-            user = self.keystone_v3.users.create(
+            user = self.keystone_client.users.create(
                 self.demo_domain_admin,
                 domain=domain.id,
                 project=self.demo_project,
@@ -325,10 +303,10 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
                 description='Demo Admin',
                 enabled=True)
 
-            role = self.keystone_v3.roles.find(name='Admin')
-            u.log.debug("self.keystone_v3.roles.grant('{}', user='{}', "
+            role = self.keystone_client.roles.find(name='Admin')
+            u.log.debug("self.keystone_client.roles.grant('{}', user='{}', "
                         "domain='{}')".format(role.id, user.id, domain.id))
-            self.keystone_v3.roles.grant(
+            self.keystone_client.roles.grant(
                 role.id,
                 user=user.id,
                 domain=domain.id)
@@ -348,6 +326,8 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
         self.keystone_ip = self.keystone_sentries[0].relation(
             'shared-db',
             'percona-cluster:shared-db')['private-address']
+
+    def _initialize_test_differences(self):
         self.set_api_version(2)
         self.create_users_v2()
 
@@ -398,7 +378,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
 
     def test_102_keystone_tenants(self):
         self.set_api_version(2)
-        self.validate_keystone_tenants(self.keystone_v2)
+        self.validate_keystone_tenants(self.keystone_client)
 
     def validate_keystone_roles(self, client):
         """Verify all existing roles."""
@@ -417,7 +397,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
 
     def test_104_keystone_roles(self):
         self.set_api_version(2)
-        self.validate_keystone_roles(self.keystone_v2)
+        self.validate_keystone_roles(self.keystone_client)
 
     def validate_keystone_users(self, client):
         """Verify all existing roles."""
@@ -426,7 +406,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
         if self._get_openstack_release() < self.xenial_pike:
             cinder_user = 'cinder_cinderv2'
         else:
-            cinder_user = 'cinderv3_cinderv2'
+            cinder_user = 'cinderv2_cinderv3'
         base = [
             {'name': 'demoUser',
              'enabled': True,
@@ -473,7 +453,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
 
     def test_106_keystone_users(self):
         self.set_api_version(2)
-        self.validate_keystone_users(self.keystone_v2)
+        self.validate_keystone_users(self.keystone_client)
 
     def is_liberty_or_newer(self):
         # os_release = self._get_openstack_release_string()
@@ -498,15 +478,15 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
     def test_112_keystone_list_resources(self):
         if self.is_mitaka_or_newer():
             self.set_api_version(3)
-            self.validate_keystone_tenants(self.keystone_v3)
-            self.validate_keystone_roles(self.keystone_v3)
-            self.validate_keystone_users(self.keystone_v3)
+            self.validate_keystone_tenants(self.keystone_client)
+            self.validate_keystone_roles(self.keystone_client)
+            self.validate_keystone_users(self.keystone_client)
 
     def test_118_keystone_create_users(self):
         if self.is_mitaka_or_newer():
             self.set_api_version(3)
             self.create_users_v3()
-            actual_user = self.find_keystone_v3_user(self.keystone_v3,
+            actual_user = self.find_keystone_v3_user(self.keystone_client,
                                                      self.demo_user_v3,
                                                      self.demo_domain)
             assert actual_user is not None
@@ -527,7 +507,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
         if self.is_mitaka_or_newer():
             self.set_api_version(3)
             self.create_users_v3()
-            actual_domain = self.keystone_v3.domains.find(
+            actual_domain = self.keystone_client.domains.find(
                 name=self.demo_domain
             )
             expect = {
@@ -626,7 +606,8 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
     def test_140_keystone_endpoint(self):
         """Verify the keystone endpoint data."""
         u.log.debug('Checking keystone api endpoint data...')
-        endpoints = self.keystone_v2.endpoints.list()
+        self.set_api_version(2)
+        endpoints = self.keystone_client.endpoints.list()
         admin_port = '35357'
         internal_port = public_port = '5000'
         expected = {
@@ -646,7 +627,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
     def test_142_cinder_endpoint(self):
         """Verify the cinder endpoint data."""
         u.log.debug('Checking cinder endpoint...')
-        endpoints = self.keystone_v2.endpoints.list()
+        endpoints = self.keystone_client.endpoints.list()
         admin_port = internal_port = public_port = '8776'
         expected = {
             'id': u.not_null,
@@ -713,7 +694,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
             'service_host': u.valid_ip
         }
         if self._get_openstack_release() >= self.xenial_pike:
-            expected['service_username'] = 'cinderv3_cinderv2'
+            expected['service_username'] = 'cinderv2_cinderv3'
         for unit in self.keystone_sentries:
             ret = u.validate_relation_data(unit, relation, expected)
             if ret:
@@ -924,6 +905,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
                 amulet.raise_status(amulet.FAIL, msg=msg)
 
         self.d.configure(juju_service, set_default)
+        self._auto_wait_for_status(exclude_services=self.exclude_services)
 
         u.log.debug('OK')
 
@@ -976,11 +958,11 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
                 message="Unit is ready",
                 timeout=timeout,
                 include_only=['keystone'])
-            domain = self.keystone_v3.domains.find(name='admin_domain')
-            v3_admin_user = self.keystone_v3.users.list(domain=domain)[0]
+            domain = self.keystone_client.domains.find(name='admin_domain')
+            v3_admin_user = self.keystone_client.users.list(domain=domain)[0]
             u.log.debug(v3_admin_user)
-            self.keystone_v3.users.update(user=v3_admin_user,
-                                          password='wrongpass')
+            self.keystone_client.users.update(user=v3_admin_user,
+                                              password='wrongpass')
             u.log.debug('Removing keystone percona-cluster relation')
             self.d.unrelate('keystone:shared-db', 'percona-cluster:shared-db')
             self.d.sentry.wait(timeout=timeout)
@@ -1005,3 +987,179 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
                     amulet.FAIL,
                     msg="Admin user password not reset")
             u.log.debug('OK')
+
+
+class KeystoneV3Deployment(KeystoneBasicDeployment):
+    """Amulet tests on a basic keystone deployment."""
+
+    def _initialize_deployment_differences(self):
+        self.keystone_num_units = 3
+        self.keystone_api_version = 3
+
+    def _initialize_test_differences(self):
+        self.keystone_client = self.get_keystone_client(api_version=3)
+        self.create_users_v3()
+
+    def api_change_required(self, api_version):
+        u.log.warn('This is a Keystone V3 only deployment.')
+        return False
+
+    def set_api_version(self, api_version):
+        u.log.warn('This is a Keystone V3 only deployment. '
+                   'Ignoring request for api version 2')
+
+    def validate_keystone_tenants(self, client):
+        """Verify all existing tenants."""
+        u.log.debug('Checking keystone tenants...')
+        expected = [
+            {'name': 'services',
+             'enabled': True,
+             'description': 'Created by Juju',
+             'id': u.not_null},
+            {'name': 'demoProject',
+             'enabled': True,
+             'description': 'Demo Project',
+             'id': u.not_null},
+            {'name': 'admin',
+             'enabled': True,
+             'description': 'Created by Juju',
+             'id': u.not_null}
+        ]
+        actual = client.projects.list()
+
+        ret = u.validate_tenant_data(expected, actual)
+        if ret:
+            amulet.raise_status(amulet.FAIL, msg=ret)
+
+    def validate_keystone_roles(self, client):
+        """Verify all existing roles."""
+        u.log.debug('Checking keystone roles...')
+        expected = [
+            {'name': 'demoRoleV3',
+             'id': u.not_null},
+            {'name': 'Admin',
+             'id': u.not_null}
+        ]
+        actual = client.roles.list()
+
+        ret = u.validate_role_data(expected, actual)
+        if ret:
+            amulet.raise_status(amulet.FAIL, msg=ret)
+
+    def validate_keystone_users(self, client):
+        """Verify all existing roles."""
+        u.log.debug('Checking keystone users...')
+
+        if self._get_openstack_release() < self.xenial_pike:
+            cinder_user = 'cinder_cinderv2'
+        else:
+            cinder_user = 'cinderv2_cinderv3'
+        base = [
+            {'name': 'demoUserV3',
+             'enabled': True,
+             'id': u.not_null,
+             'email': 'demov3@demo.com'},
+            {'name': 'admin',
+             'enabled': True,
+             'id': u.not_null,
+             'email': 'juju@localhost'},
+            {'name': cinder_user,
+             'enabled': True,
+             'id': u.not_null,
+             'email': u'juju@localhost'}
+        ]
+        expected = []
+        for user_info in base:
+            user_info['default_project_id'] = u.not_null
+            expected.append(user_info)
+        # Ensure list is scoped to the default domain
+        # when checking v3 users (v2->v3 upgrade check)
+        actual = client.users.list(
+            domain=client.domains.find(name=self.DEFAULT_DOMAIN).id
+        )
+        actual += client.users.list(
+            domain=client.domains.find(name=self.demo_domain).id)
+        actual += client.users.list(
+            domain=client.domains.find(name='admin_domain').id)
+        ret = u.validate_user_data(expected, actual,
+                                   api_version=self.keystone_api_version)
+        if ret:
+            amulet.raise_status(amulet.FAIL, msg=ret)
+
+    def test_138_service_catalog(self):
+        """Verify that the service catalog endpoint data is valid."""
+        u.log.debug('Checking keystone service catalog...')
+        expected = {
+            u'identity': [{u'id': u.not_null,
+                           u'interface': u'admin',
+                           u'region': u'RegionOne',
+                           u'region_id': u'RegionOne',
+                           u'url': u.valid_url},
+                          {u'id': u.not_null,
+                           u'interface': u'public',
+                           u'region': u'RegionOne',
+                           u'region_id': u'RegionOne',
+                           u'url': u.valid_url},
+                          {u'id': u.not_null,
+                           u'interface': u'internal',
+                           u'region': u'RegionOne',
+                           u'region_id': u'RegionOne',
+                           u'url': u.valid_url}],
+
+            u'volumev2': [{u'id': u.not_null,
+                           u'interface': u'admin',
+                           u'region': u'RegionOne',
+                           u'region_id': u'RegionOne',
+                           u'url': u.valid_url},
+                          {u'id': u.not_null,
+                           u'interface': u'public',
+                           u'region': u'RegionOne',
+                           u'region_id': u'RegionOne',
+                           u'url': u.valid_url},
+                          {u'id': u.not_null,
+                           u'interface': u'internal',
+                           u'region': u'RegionOne',
+                           u'region_id': u'RegionOne',
+                           u'url': u.valid_url}]}
+
+        actual = self.keystone_client.service_catalog.get_endpoints()
+        ret = u.validate_v3_svc_catalog_endpoint_data(expected, actual)
+        if ret:
+            amulet.raise_status(amulet.FAIL, msg=ret)
+
+    def test_140_keystone_endpoint(self):
+        """Verify the keystone endpoint data."""
+        u.log.debug('Checking keystone api endpoint data...')
+        admin_port = '35357'
+        internal_port = public_port = '5000'
+        expected = {'id': u.not_null,
+                    'region': 'RegionOne',
+                    'region_id': 'RegionOne',
+                    'interface': u.not_null,
+                    'url': u.valid_url,
+                    'service_id': u.not_null}
+
+        endpoints = self.keystone_client.endpoints.list()
+        ret = u.validate_v3_endpoint_data(endpoints, admin_port, internal_port,
+                                          public_port, expected)
+        if ret:
+            amulet.raise_status(amulet.FAIL,
+                                msg='keystone endpoint: {}'.format(ret))
+
+    def test_142_cinder_endpoint(self):
+        """Verify the cinder endpoint data."""
+        u.log.debug('Checking cinder endpoint...')
+        admin_port = internal_port = public_port = '8776'
+        expected = {'id': u.not_null,
+                    'region': 'RegionOne',
+                    'region_id': 'RegionOne',
+                    'interface': u.not_null,
+                    'url': u.valid_url,
+                    'service_id': u.not_null}
+        endpoints = self.keystone_client.endpoints.list()
+        ret = u.validate_v3_endpoint_data(endpoints, admin_port, internal_port,
+                                          public_port, expected,
+                                          expected_num_eps=6)
+        if ret:
+            amulet.raise_status(amulet.FAIL,
+                                msg='cinder endpoint: {}'.format(ret))
diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py
index 9c65518e1c4c6ff6f508ff7e046ce2b91f961f4c..d21d01d8ffe242d686283b0ed977b88be6bfc74e 100644
--- a/tests/charmhelpers/contrib/amulet/deployment.py
+++ b/tests/charmhelpers/contrib/amulet/deployment.py
@@ -50,7 +50,8 @@ class AmuletDeployment(object):
             this_service['units'] = 1
 
         self.d.add(this_service['name'], units=this_service['units'],
-                   constraints=this_service.get('constraints'))
+                   constraints=this_service.get('constraints'),
+                   storage=this_service.get('storage'))
 
         for svc in other_services:
             if 'location' in svc:
@@ -64,7 +65,8 @@ class AmuletDeployment(object):
                 svc['units'] = 1
 
             self.d.add(svc['name'], charm=branch_location, units=svc['units'],
-                       constraints=svc.get('constraints'))
+                       constraints=svc.get('constraints'),
+                       storage=svc.get('storage'))
 
     def _add_relations(self, relations):
         """Add all of the relations for the services."""
diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
index 5afbbd87c13e2b168b088c4da51b3b63ab4d07a2..1c96752a49fb36f389cd1ede38b31afb94127e42 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -21,6 +21,9 @@ from collections import OrderedDict
 from charmhelpers.contrib.amulet.deployment import (
     AmuletDeployment
 )
+from charmhelpers.contrib.openstack.amulet.utils import (
+    OPENSTACK_RELEASES_PAIRS
+)
 
 DEBUG = logging.DEBUG
 ERROR = logging.ERROR
@@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
            release.
            """
         # Must be ordered by OpenStack release (not by Ubuntu release):
-        (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
-         self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
-         self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
-         self.xenial_pike, self.artful_pike, self.xenial_queens,
-         self.bionic_queens,) = range(13)
+        for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
+            setattr(self, os_pair, i)
 
         releases = {
             ('trusty', None): self.trusty_icehouse,
@@ -291,6 +291,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
             ('zesty', None): self.zesty_ocata,
             ('artful', None): self.artful_pike,
             ('bionic', None): self.bionic_queens,
+            ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
+            ('cosmic', None): self.cosmic_rocky,
         }
         return releases[(self.series, self.openstack)]
 
@@ -306,6 +308,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
             ('zesty', 'ocata'),
             ('artful', 'pike'),
             ('bionic', 'queens'),
+            ('cosmic', 'rocky'),
         ])
         if self.openstack:
             os_origin = self.openstack.split(':')[1]
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
index b71b2b1910959f5dbe7860ff3d14d45b8e9e2d90..ef4ab54bc8d1a988f827d2b766c3d1f20f0238e1 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py
@@ -40,6 +40,7 @@ import novaclient
 import pika
 import swiftclient
 
+from charmhelpers.core.decorators import retry_on_exception
 from charmhelpers.contrib.amulet.utils import (
     AmuletUtils
 )
@@ -50,6 +51,13 @@ ERROR = logging.ERROR
 
 NOVA_CLIENT_VERSION = "2"
 
+OPENSTACK_RELEASES_PAIRS = [
+    'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
+    'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
+    'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
+    'xenial_pike', 'artful_pike', 'xenial_queens',
+    'bionic_queens', 'bionic_rocky', 'cosmic_rocky']
+
 
 class OpenStackAmuletUtils(AmuletUtils):
     """OpenStack amulet utilities.
@@ -63,7 +71,34 @@ class OpenStackAmuletUtils(AmuletUtils):
         super(OpenStackAmuletUtils, self).__init__(log_level)
 
     def validate_endpoint_data(self, endpoints, admin_port, internal_port,
-                               public_port, expected):
+                               public_port, expected, openstack_release=None):
+        """Validate endpoint data. Pick the correct validator based on
+           OpenStack release. Expected data should be in the v2 format:
+           {
+               'id': id,
+               'region': region,
+               'adminurl': adminurl,
+               'internalurl': internalurl,
+               'publicurl': publicurl,
+               'service_id': service_id}
+
+           """
+        validation_function = self.validate_v2_endpoint_data
+        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
+        if openstack_release and openstack_release >= xenial_queens:
+                validation_function = self.validate_v3_endpoint_data
+                expected = {
+                    'id': expected['id'],
+                    'region': expected['region'],
+                    'region_id': 'RegionOne',
+                    'url': self.valid_url,
+                    'interface': self.not_null,
+                    'service_id': expected['service_id']}
+        return validation_function(endpoints, admin_port, internal_port,
+                                   public_port, expected)
+
+    def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
+                                  public_port, expected):
         """Validate endpoint data.
 
            Validate actual endpoint data vs expected endpoint data. The ports
@@ -92,7 +127,7 @@ class OpenStackAmuletUtils(AmuletUtils):
             return 'endpoint not found'
 
     def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
-                                  public_port, expected):
+                                  public_port, expected, expected_num_eps=3):
         """Validate keystone v3 endpoint data.
 
         Validate the v3 endpoint data which has changed from v2.  The
@@ -138,10 +173,89 @@ class OpenStackAmuletUtils(AmuletUtils):
                 if ret:
                     return 'unexpected endpoint data - {}'.format(ret)
 
-        if len(found) != 3:
+        if len(found) != expected_num_eps:
             return 'Unexpected number of endpoints found'
 
-    def validate_svc_catalog_endpoint_data(self, expected, actual):
+    def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
+        """Convert v2 endpoint data into v3.
+
+           {
+               'service_name1': [
+                   {
+                       'adminURL': adminURL,
+                       'id': id,
+                       'region': region.
+                       'publicURL': publicURL,
+                       'internalURL': internalURL
+                   }],
+               'service_name2': [
+                   {
+                       'adminURL': adminURL,
+                       'id': id,
+                       'region': region.
+                       'publicURL': publicURL,
+                       'internalURL': internalURL
+                   }],
+           }
+          """
+        self.log.warn("Endpoint ID and Region ID validation is limited to not "
+                      "null checks after v2 to v3 conversion")
+        for svc in ep_data.keys():
+            assert len(ep_data[svc]) == 1, "Unknown data format"
+            svc_ep_data = ep_data[svc][0]
+            ep_data[svc] = [
+                {
+                    'url': svc_ep_data['adminURL'],
+                    'interface': 'admin',
+                    'region': svc_ep_data['region'],
+                    'region_id': self.not_null,
+                    'id': self.not_null},
+                {
+                    'url': svc_ep_data['publicURL'],
+                    'interface': 'public',
+                    'region': svc_ep_data['region'],
+                    'region_id': self.not_null,
+                    'id': self.not_null},
+                {
+                    'url': svc_ep_data['internalURL'],
+                    'interface': 'internal',
+                    'region': svc_ep_data['region'],
+                    'region_id': self.not_null,
+                    'id': self.not_null}]
+        return ep_data
+
+    def validate_svc_catalog_endpoint_data(self, expected, actual,
+                                           openstack_release=None):
+        """Validate service catalog endpoint data. Pick the correct validator
+           for the OpenStack version. Expected data should be in the v2 format:
+           {
+               'service_name1': [
+                   {
+                       'adminURL': adminURL,
+                       'id': id,
+                       'region': region.
+                       'publicURL': publicURL,
+                       'internalURL': internalURL
+                   }],
+               'service_name2': [
+                   {
+                       'adminURL': adminURL,
+                       'id': id,
+                       'region': region.
+                       'publicURL': publicURL,
+                       'internalURL': internalURL
+                   }],
+           }
+
+           """
+        validation_function = self.validate_v2_svc_catalog_endpoint_data
+        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
+        if openstack_release and openstack_release >= xenial_queens:
+            validation_function = self.validate_v3_svc_catalog_endpoint_data
+            expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
+        return validation_function(expected, actual)
+
+    def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
         """Validate service catalog endpoint data.
 
            Validate a list of actual service catalog endpoints vs a list of
@@ -310,6 +424,7 @@ class OpenStackAmuletUtils(AmuletUtils):
         self.log.debug('Checking if tenant exists ({})...'.format(tenant))
         return tenant in [t.name for t in keystone.tenants.list()]
 
+    @retry_on_exception(num_retries=5, base_delay=1)
     def keystone_wait_for_propagation(self, sentry_relation_pairs,
                                       api_version):
         """Iterate over list of sentry and relation tuples and verify that
@@ -328,7 +443,7 @@ class OpenStackAmuletUtils(AmuletUtils):
             if rel.get('api_version') != str(api_version):
                 raise Exception("api_version not propagated through relation"
                                 " data yet ('{}' != '{}')."
-                                "".format(rel['api_version'], api_version))
+                                "".format(rel.get('api_version'), api_version))
 
     def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
                                        api_version):
@@ -350,16 +465,13 @@ class OpenStackAmuletUtils(AmuletUtils):
         deployment._auto_wait_for_status()
         self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
 
-    def authenticate_cinder_admin(self, keystone_sentry, username,
-                                  password, tenant, api_version=2):
+    def authenticate_cinder_admin(self, keystone, api_version=2):
         """Authenticates admin user with cinder."""
-        # NOTE(beisner): cinder python client doesn't accept tokens.
-        keystone_ip = keystone_sentry.info['public-address']
-        ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
+        self.log.debug('Authenticating cinder admin...')
         _clients = {
             1: cinder_client.Client,
             2: cinder_clientv2.Client}
-        return _clients[api_version](username, password, tenant, ept)
+        return _clients[api_version](session=keystone.session)
 
     def authenticate_keystone(self, keystone_ip, username, password,
                               api_version=False, admin_port=False,
@@ -367,13 +479,36 @@ class OpenStackAmuletUtils(AmuletUtils):
                               project_domain_name=None, project_name=None):
         """Authenticate with Keystone"""
         self.log.debug('Authenticating with keystone...')
-        port = 5000
-        if admin_port:
-            port = 35357
-        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
-                                        port)
-        if not api_version or api_version == 2:
-            ep = base_ep + "/v2.0"
+        if not api_version:
+            api_version = 2
+        sess, auth = self.get_keystone_session(
+            keystone_ip=keystone_ip,
+            username=username,
+            password=password,
+            api_version=api_version,
+            admin_port=admin_port,
+            user_domain_name=user_domain_name,
+            domain_name=domain_name,
+            project_domain_name=project_domain_name,
+            project_name=project_name
+        )
+        if api_version == 2:
+            client = keystone_client.Client(session=sess)
+        else:
+            client = keystone_client_v3.Client(session=sess)
+        # This populates the client.service_catalog
+        client.auth_ref = auth.get_access(sess)
+        return client
+
+    def get_keystone_session(self, keystone_ip, username, password,
+                             api_version=False, admin_port=False,
+                             user_domain_name=None, domain_name=None,
+                             project_domain_name=None, project_name=None):
+        """Return a keystone session object"""
+        ep = self.get_keystone_endpoint(keystone_ip,
+                                        api_version=api_version,
+                                        admin_port=admin_port)
+        if api_version == 2:
             auth = v2.Password(
                 username=username,
                 password=password,
@@ -381,12 +516,7 @@ class OpenStackAmuletUtils(AmuletUtils):
                 auth_url=ep
             )
             sess = keystone_session.Session(auth=auth)
-            client = keystone_client.Client(session=sess)
-            # This populates the client.service_catalog
-            client.auth_ref = auth.get_access(sess)
-            return client
         else:
-            ep = base_ep + "/v3"
             auth = v3.Password(
                 user_domain_name=user_domain_name,
                 username=username,
@@ -397,10 +527,57 @@ class OpenStackAmuletUtils(AmuletUtils):
                 auth_url=ep
             )
             sess = keystone_session.Session(auth=auth)
-            client = keystone_client_v3.Client(session=sess)
-            # This populates the client.service_catalog
-            client.auth_ref = auth.get_access(sess)
-            return client
+        return (sess, auth)
+
+    def get_keystone_endpoint(self, keystone_ip, api_version=None,
+                              admin_port=False):
+        """Return keystone endpoint"""
+        port = 5000
+        if admin_port:
+            port = 35357
+        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
+                                        port)
+        if api_version == 2:
+            ep = base_ep + "/v2.0"
+        else:
+            ep = base_ep + "/v3"
+        return ep
+
+    def get_default_keystone_session(self, keystone_sentry,
+                                     openstack_release=None, api_version=2):
+        """Return a keystone session object and client object assuming standard
+           default settings
+
+           Example call in amulet tests:
+               self.keystone_session, self.keystone = u.get_default_keystone_session(
+                   self.keystone_sentry,
+                   openstack_release=self._get_openstack_release())
+
+           The session can then be used to auth other clients:
+               neutronclient.Client(session=session)
+               aodh_client.Client(session=session)
+               eyc
+        """
+        self.log.debug('Authenticating keystone admin...')
+        # 11 => xenial_queens
+        if api_version == 3 or (openstack_release and openstack_release >= 11):
+            client_class = keystone_client_v3.Client
+            api_version = 3
+        else:
+            client_class = keystone_client.Client
+        keystone_ip = keystone_sentry.info['public-address']
+        session, auth = self.get_keystone_session(
+            keystone_ip,
+            api_version=api_version,
+            username='admin',
+            password='openstack',
+            project_name='admin',
+            user_domain_name='admin_domain',
+            project_domain_name='admin_domain')
+        client = client_class(session=session)
+        # This populates the client.service_catalog
+        client.auth_ref = auth.get_access(session)
+        return session, client
 
     def authenticate_keystone_admin(self, keystone_sentry, user, password,
                                     tenant=None, api_version=None,
@@ -858,9 +1035,12 @@ class OpenStackAmuletUtils(AmuletUtils):
         :returns: List of pool name, object count, kb disk space used
         """
         df = self.get_ceph_df(sentry_unit)
-        pool_name = df['pools'][pool_id]['name']
-        obj_count = df['pools'][pool_id]['stats']['objects']
-        kb_used = df['pools'][pool_id]['stats']['kb_used']
+        for pool in df['pools']:
+            if pool['id'] == pool_id:
+                pool_name = pool['name']
+                obj_count = pool['stats']['objects']
+                kb_used = pool['stats']['kb_used']
+
         self.log.debug('Ceph {} pool (ID {}): {} objects, '
                        '{} kb used'.format(pool_name, pool_id,
                                            obj_count, kb_used))
diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py
index 5a88f798e89546ad9128cb7d4a1cb8bd6e69a644..ed7af39e36fa0b921d42edb94cff997bf01135d1 100644
--- a/tests/charmhelpers/core/hookenv.py
+++ b/tests/charmhelpers/core/hookenv.py
@@ -27,6 +27,7 @@ import glob
 import os
 import json
 import yaml
+import re
 import subprocess
 import sys
 import errno
@@ -39,6 +40,7 @@ if not six.PY3:
 else:
     from collections import UserDict
 
+
 CRITICAL = "CRITICAL"
 ERROR = "ERROR"
 WARNING = "WARNING"
@@ -66,7 +68,7 @@ def cached(func):
     @wraps(func)
     def wrapper(*args, **kwargs):
         global cache
-        key = str((func, args, kwargs))
+        key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
         try:
             return cache[key]
         except KeyError:
@@ -288,7 +290,7 @@ class Config(dict):
         self.implicit_save = True
         self._prev_dict = None
         self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
-        if os.path.exists(self.path):
+        if os.path.exists(self.path) and os.stat(self.path).st_size:
             self.load_previous()
         atexit(self._implicit_save)
 
@@ -308,7 +310,11 @@ class Config(dict):
         """
         self.path = path or self.path
         with open(self.path) as f:
-            self._prev_dict = json.load(f)
+            try:
+                self._prev_dict = json.load(f)
+            except ValueError as e:
+                log('Unable to parse previous config data - {}'.format(str(e)),
+                    level=ERROR)
         for k, v in copy.deepcopy(self._prev_dict).items():
             if k not in self:
                 self[k] = v
@@ -344,6 +350,7 @@ class Config(dict):
 
         """
         with open(self.path, 'w') as f:
+            os.fchmod(f.fileno(), 0o600)
             json.dump(self, f)
 
     def _implicit_save(self):
@@ -351,22 +358,40 @@ class Config(dict):
             self.save()
 
 
-@cached
+_cache_config = None
+
+
 def config(scope=None):
-    """Juju charm configuration"""
-    config_cmd_line = ['config-get']
-    if scope is not None:
-        config_cmd_line.append(scope)
-    else:
-        config_cmd_line.append('--all')
-    config_cmd_line.append('--format=json')
+    """
+    Get the juju charm configuration (scope==None) or individual key,
+    (scope=str).  The returned value is a Python data structure loaded as
+    JSON from the Juju config command.
+
+    :param scope: If set, return the value for the specified key.
+    :type scope: Optional[str]
+    :returns: Either the whole config as a Config, or a key from it.
+    :rtype: Any
+    """
+    global _cache_config
+    config_cmd_line = ['config-get', '--all', '--format=json']
+    try:
+        # JSON Decode Exception for Python3.5+
+        exc_json = json.decoder.JSONDecodeError
+    except AttributeError:
+        # JSON Decode Exception for Python2.7 through Python3.4
+        exc_json = ValueError
     try:
-        config_data = json.loads(
-            subprocess.check_output(config_cmd_line).decode('UTF-8'))
+        if _cache_config is None:
+            config_data = json.loads(
+                subprocess.check_output(config_cmd_line).decode('UTF-8'))
+            _cache_config = Config(config_data)
         if scope is not None:
-            return config_data
-        return Config(config_data)
-    except ValueError:
+            return _cache_config.get(scope)
+        return _cache_config
+    except (exc_json, UnicodeDecodeError) as e:
+        log('Unable to parse output from config-get: config_cmd_line="{}" '
+            'message="{}"'
+            .format(config_cmd_line, str(e)), level=ERROR)
         return None
 
 
@@ -818,6 +843,10 @@ class Hooks(object):
         return wrapper
 
 
+class NoNetworkBinding(Exception):
+    pass
+
+
 def charm_dir():
     """Return the root directory of the current charm"""
     d = os.environ.get('JUJU_CHARM_DIR')
@@ -943,6 +972,13 @@ def application_version_set(version):
         log("Application Version: {}".format(version))
 
 
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def goal_state():
+    """Juju goal state values"""
+    cmd = ['goal-state', '--format=json']
+    return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def is_leader():
     """Does the current unit hold the juju leadership
@@ -1037,7 +1073,6 @@ def juju_version():
                                    universal_newlines=True).strip()
 
 
-@cached
 def has_juju_version(minimum_version):
     """Return True if the Juju version is at least the provided version"""
     return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@@ -1097,6 +1132,8 @@ def _run_atexit():
 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def network_get_primary_address(binding):
     '''
+    Deprecated since Juju 2.3; use network_get()
+
     Retrieve the primary network address for a named binding
 
     :param binding: string. The name of a relation of extra-binding
@@ -1104,10 +1141,19 @@ def network_get_primary_address(binding):
     :raise: NotImplementedError if run on Juju < 2.0
     '''
     cmd = ['network-get', '--primary-address', binding]
-    return subprocess.check_output(cmd).decode('UTF-8').strip()
+    try:
+        response = subprocess.check_output(
+            cmd,
+            stderr=subprocess.STDOUT).decode('UTF-8').strip()
+    except CalledProcessError as e:
+        if 'no network config found for binding' in e.output.decode('UTF-8'):
+            raise NoNetworkBinding("No network binding for {}"
+                                   .format(binding))
+        else:
+            raise
+    return response
 
 
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def network_get(endpoint, relation_id=None):
     """
     Retrieve the network details for a relation endpoint
@@ -1115,24 +1161,20 @@ def network_get(endpoint, relation_id=None):
     :param endpoint: string. The name of a relation endpoint
     :param relation_id: int. The ID of the relation for the current context.
     :return: dict. The loaded YAML output of the network-get query.
-    :raise: NotImplementedError if run on Juju < 2.1
+    :raise: NotImplementedError if request not supported by the Juju version.
     """
+    if not has_juju_version('2.2'):
+        raise NotImplementedError(juju_version())  # earlier versions require --primary-address
+    if relation_id and not has_juju_version('2.3'):
+        raise NotImplementedError  # 2.3 added the -r option
+
     cmd = ['network-get', endpoint, '--format', 'yaml']
     if relation_id:
         cmd.append('-r')
         cmd.append(relation_id)
-    try:
-        response = subprocess.check_output(
-            cmd,
-            stderr=subprocess.STDOUT).decode('UTF-8').strip()
-    except CalledProcessError as e:
-        # Early versions of Juju 2.0.x required the --primary-address argument.
-        # We catch that condition here and raise NotImplementedError since
-        # the requested semantics are not available - the caller can then
-        # use the network_get_primary_address() method instead.
-        if '--primary-address is currently required' in e.output.decode('UTF-8'):
-            raise NotImplementedError
-        raise
+    response = subprocess.check_output(
+        cmd,
+        stderr=subprocess.STDOUT).decode('UTF-8').strip()
     return yaml.safe_load(response)
 
 
@@ -1188,9 +1230,23 @@ def iter_units_for_relation_name(relation_name):
 
 def ingress_address(rid=None, unit=None):
     """
-    Retrieve the ingress-address from a relation when available. Otherwise,
-    return the private-address. This function is to be used on the consuming
-    side of the relation.
+    Retrieve the ingress-address from a relation when available.
+    Otherwise, return the private-address.
+
+    When used on the consuming side of the relation (unit is a remote
+    unit), the ingress-address is the IP address that this unit needs
+    to use to reach the provided service on the remote unit.
+
+    When used on the providing side of the relation (unit == local_unit()),
+    the ingress-address is the IP address that is advertised to remote
+    units on this relation. Remote units need to use this address to
+    reach the local provided service on this unit.
+
+    Note that charms may document some other method to use in
+    preference to the ingress_address(), such as an address provided
+    on a different relation attribute or a service discovery mechanism.
+    This allows charms to redirect inbound connections to their peers
+    or different applications such as load balancers.
 
     Usage:
     addresses = [ingress_address(rid=u.rid, unit=u.unit)
@@ -1204,3 +1260,40 @@ def ingress_address(rid=None, unit=None):
     settings = relation_get(rid=rid, unit=unit)
     return (settings.get('ingress-address') or
             settings.get('private-address'))
+
+
+def egress_subnets(rid=None, unit=None):
+    """
+    Retrieve the egress-subnets from a relation.
+
+    This function is to be used on the providing side of the
+    relation, and provides the ranges of addresses that client
+    connections may come from. The result is uninteresting on
+    the consuming side of a relation (unit == local_unit()).
+
+    Returns a stable list of subnets in CIDR format.
+    eg. ['192.168.1.0/24', '2001::F00F/128']
+
+    If egress-subnets is not available, falls back to using the published
+    ingress-address, or finally private-address.
+
+    :param rid: string relation id
+    :param unit: string unit name
+    :side effect: calls relation_get
+    :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
+    """
+    def _to_range(addr):
+        if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
+            addr += '/32'
+        elif ':' in addr and '/' not in addr:  # IPv6
+            addr += '/128'
+        return addr
+
+    settings = relation_get(rid=rid, unit=unit)
+    if 'egress-subnets' in settings:
+        return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
+    if 'ingress-address' in settings:
+        return [_to_range(settings['ingress-address'])]
+    if 'private-address' in settings:
+        return [_to_range(settings['private-address'])]
+    return []  # Should never happen
diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py
index 5cc5c86b701fc5375f387eb01a0d2b76c184c263..322ab2acd71bb02f13d2d739e74d0ddc62774d9e 100644
--- a/tests/charmhelpers/core/host.py
+++ b/tests/charmhelpers/core/host.py
@@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
         with open(path, 'wb') as target:
             os.fchown(target.fileno(), uid, gid)
             os.fchmod(target.fileno(), perms)
+            if six.PY3 and isinstance(content, six.string_types):
+                content = content.encode('UTF-8')
             target.write(content)
         return
     # the contents were the same, but we might still need to change the
@@ -991,7 +993,7 @@ def updatedb(updatedb_text, new_path):
     return output
 
 
-def modulo_distribution(modulo=3, wait=30):
+def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
     """ Modulo distribution
 
     This helper uses the unit number, a modulo value and a constant wait time
@@ -1013,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30):
 
     @param modulo: int The modulo number creates the group distribution
     @param wait: int The constant time wait value
+    @param non_zero_wait: boolean Override unit % modulo == 0,
+                          return modulo * wait. Used to avoid collisions with
+                          leader nodes which are often given priority.
     @return: int Calculated time to wait for unit operation
     """
     unit_number = int(local_unit().split('/')[1])
-    return (unit_number % modulo) * wait
+    calculated_wait_time = (unit_number % modulo) * wait
+    if non_zero_wait and calculated_wait_time == 0:
+        return modulo * wait
+    else:
+        return calculated_wait_time
diff --git a/tests/charmhelpers/core/host_factory/ubuntu.py b/tests/charmhelpers/core/host_factory/ubuntu.py
index d8dc378a5dad29c271a89289e4b815e2c2c99060..99451b59789a822b4f5a96d7310965f1c8921898 100644
--- a/tests/charmhelpers/core/host_factory/ubuntu.py
+++ b/tests/charmhelpers/core/host_factory/ubuntu.py
@@ -20,6 +20,7 @@ UBUNTU_RELEASES = (
     'yakkety',
     'zesty',
     'artful',
+    'bionic',
 )
 
 
diff --git a/tests/charmhelpers/core/services/base.py b/tests/charmhelpers/core/services/base.py
index ca9dc996bd7d7fc2a18b7d9a9ee51adff171bda9..179ad4f0c367dd6b13c10b201c3752d1c8daf05e 100644
--- a/tests/charmhelpers/core/services/base.py
+++ b/tests/charmhelpers/core/services/base.py
@@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback):
     """
     def __call__(self, manager, service_name, event_name):
         service = manager.get_service(service_name)
-        new_ports = service.get('ports', [])
+        # turn this generator into a list,
+        # as we'll be going over it multiple times
+        new_ports = list(service.get('ports', []))
         port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
         if os.path.exists(port_file):
             with open(port_file) as fp:
                 old_ports = fp.read().split(',')
             for old_port in old_ports:
-                if bool(old_port):
-                    old_port = int(old_port)
-                    if old_port not in new_ports:
-                        hookenv.close_port(old_port)
+                if bool(old_port) and not self.ports_contains(old_port, new_ports):
+                    hookenv.close_port(old_port)
         with open(port_file, 'w') as fp:
             fp.write(','.join(str(port) for port in new_ports))
         for port in new_ports:
+            # A port is either a number or 'ICMP'
+            protocol = 'TCP'
+            if str(port).upper() == 'ICMP':
+                protocol = 'ICMP'
             if event_name == 'start':
-                hookenv.open_port(port)
+                hookenv.open_port(port, protocol)
             elif event_name == 'stop':
-                hookenv.close_port(port)
+                hookenv.close_port(port, protocol)
+
+    def ports_contains(self, port, ports):
+        if not bool(port):
+            return False
+        if str(port).upper() != 'ICMP':
+            port = int(port)
+        return port in ports
 
 
 def service_stop(service_name):
diff --git a/tests/charmhelpers/core/sysctl.py b/tests/charmhelpers/core/sysctl.py
index 6e413e31480e5fb4bcb703d58b1e87f98adc53af..1f188d8c653f9bf793e18ed484635fce310543cc 100644
--- a/tests/charmhelpers/core/sysctl.py
+++ b/tests/charmhelpers/core/sysctl.py
@@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
 def create(sysctl_dict, sysctl_file):
     """Creates a sysctl.conf file from a YAML associative array
 
-    :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
+    :param sysctl_dict: a dict or YAML-formatted string of sysctl
+                        options eg "{ 'kernel.max_pid': 1337 }"
     :type sysctl_dict: str
     :param sysctl_file: path to the sysctl file to be saved
     :type sysctl_file: str or unicode
     :returns: None
     """
-    try:
-        sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
-    except yaml.YAMLError:
-        log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
-            level=ERROR)
-        return
+    if type(sysctl_dict) is not dict:
+        try:
+            sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
+        except yaml.YAMLError:
+            log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
+                level=ERROR)
+            return
+    else:
+        sysctl_dict_parsed = sysctl_dict
 
     with open(sysctl_file, "w") as fd:
         for key, value in sysctl_dict_parsed.items():
diff --git a/tests/charmhelpers/core/templating.py b/tests/charmhelpers/core/templating.py
index 7b801a34a5e6585485347f7a97bc18a10a093d03..9014015c14ee0b48c775562cd4f0d30884944439 100644
--- a/tests/charmhelpers/core/templating.py
+++ b/tests/charmhelpers/core/templating.py
@@ -20,7 +20,8 @@ from charmhelpers.core import hookenv
 
 
 def render(source, target, context, owner='root', group='root',
-           perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
+           perms=0o444, templates_dir=None, encoding='UTF-8',
+           template_loader=None, config_template=None):
     """
     Render a template.
 
@@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root',
     The context should be a dict containing the values to be replaced in the
     template.
 
+    config_template may be provided to render from a provided template instead
+    of loading from a file.
+
     The `owner`, `group`, and `perms` options will be passed to `write_file`.
 
     If omitted, `templates_dir` defaults to the `templates` folder in the charm.
@@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root',
         if templates_dir is None:
             templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
         template_env = Environment(loader=FileSystemLoader(templates_dir))
-    try:
-        source = source
-        template = template_env.get_template(source)
-    except exceptions.TemplateNotFound as e:
-        hookenv.log('Could not load template %s from %s.' %
-                    (source, templates_dir),
-                    level=hookenv.ERROR)
-        raise e
+
+    # load from a string if provided explicitly
+    if config_template is not None:
+        template = template_env.from_string(config_template)
+    else:
+        try:
+            source = source
+            template = template_env.get_template(source)
+        except exceptions.TemplateNotFound as e:
+            hookenv.log('Could not load template %s from %s.' %
+                        (source, templates_dir),
+                        level=hookenv.ERROR)
+            raise e
     content = template.render(context)
     if target is not None:
         target_dir = os.path.dirname(target)
diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py
index 7af875c2fcc1e2e38f9267bfdc60ab5a2a499c18..ab554327b343f896880523fc627c1abea84be29a 100644
--- a/tests/charmhelpers/core/unitdata.py
+++ b/tests/charmhelpers/core/unitdata.py
@@ -166,6 +166,10 @@ class Storage(object):
 
     To support dicts, lists, integer, floats, and booleans values
     are automatically json encoded/decoded.
+
+    Note: to facilitate unit testing, ':memory:' can be passed as the
+    path parameter which causes sqlite3 to only build the db in memory.
+    This should only be used for testing purposes.
     """
     def __init__(self, path=None):
         self.db_path = path
@@ -175,6 +179,9 @@ class Storage(object):
             else:
                 self.db_path = os.path.join(
                     os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+        if self.db_path != ':memory:':
+            with open(self.db_path, 'a') as f:
+                os.fchmod(f.fileno(), 0o600)
         self.conn = sqlite3.connect('%s' % self.db_path)
         self.cursor = self.conn.cursor()
         self.revision = None
diff --git a/tests/dfs-basic-trusty-icehouse b/tests/dev-basic-bionic-rocky
similarity index 67%
rename from tests/dfs-basic-trusty-icehouse
rename to tests/dev-basic-bionic-rocky
index e95367b26e54ab278ebd882f63622345d3afb8ba..696c35a97e0bcc0ad8f8d89bc5ee1617ef578ec6 100755
--- a/tests/dfs-basic-trusty-icehouse
+++ b/tests/dev-basic-bionic-rocky
@@ -14,10 +14,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""Amulet tests on a basic keystone git deployment on trusty-icehouse."""
+"""Amulet tests on a basic keystone deployment on bionic-rocky."""
 
-from basic_deployment import KeystoneBasicDeployment
+from basic_deployment import KeystoneV3Deployment
 
 if __name__ == '__main__':
-    deployment = KeystoneBasicDeployment(series='trusty', git=True)
+    deployment = KeystoneV3Deployment(series='bionic',
+                                      openstack='cloud:bionic-rocky',
+                                      source='cloud:bionic-updates/rocky')
     deployment.run_tests()
diff --git a/tests/gate-basic-zesty-ocata b/tests/dev-basic-cosmic-rocky
similarity index 78%
rename from tests/gate-basic-zesty-ocata
rename to tests/dev-basic-cosmic-rocky
index 1350f23d8f9c6b555d459b3737601b0e4ddab51b..c14ca6c46e8f3dc96f4ec6d937a1a074ba7df89d 100755
--- a/tests/gate-basic-zesty-ocata
+++ b/tests/dev-basic-cosmic-rocky
@@ -14,10 +14,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""Amulet tests on a basic keystone deployment on zesty-ocata."""
+"""Amulet tests on a basic keystone deployment on cosmic-rocky."""
 
-from basic_deployment import KeystoneBasicDeployment
+from basic_deployment import KeystoneV3Deployment
 
 if __name__ == '__main__':
-    deployment = KeystoneBasicDeployment(series='zesty')
+    deployment = KeystoneV3Deployment(series='cosmic')
     deployment.run_tests()
diff --git a/tests/dfs-basic-trusty-kilo b/tests/dfs-basic-trusty-kilo
deleted file mode 100755
index 5290f4b142dc34211a5dcbb596036f61a4512b53..0000000000000000000000000000000000000000
--- a/tests/dfs-basic-trusty-kilo
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic keystone git deployment on trusty-kilo."""
-
-from basic_deployment import KeystoneBasicDeployment
-
-if __name__ == '__main__':
-    deployment = KeystoneBasicDeployment(series='trusty',
-                                         openstack='cloud:trusty-kilo',
-                                         source='cloud:trusty-updates/kilo',
-                                         git=True)
-    deployment.run_tests()
diff --git a/tests/gate-basic-artful-pike b/tests/gate-basic-bionic-queens
old mode 100644
new mode 100755
similarity index 78%
rename from tests/gate-basic-artful-pike
rename to tests/gate-basic-bionic-queens
index 4dd01f201e78afbade3fc215001578b47d795a9c..df46adc8c5f979d82f7a81f19d60d04e34043eb9
--- a/tests/gate-basic-artful-pike
+++ b/tests/gate-basic-bionic-queens
@@ -14,10 +14,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""Amulet tests on a basic keystone deployment on artful-pike."""
+"""Amulet tests on a basic keystone deployment on bionic-queens."""
 
-from basic_deployment import KeystoneBasicDeployment
+from basic_deployment import KeystoneV3Deployment
 
 if __name__ == '__main__':
-    deployment = KeystoneBasicDeployment(series='artful')
+    deployment = KeystoneV3Deployment(series='bionic')
     deployment.run_tests()
diff --git a/tests/gate-basic-snap-xenial-ocata b/tests/gate-basic-snap-xenial-ocata
deleted file mode 100755
index 4ba80b62ddd8764323f042dcde5a4e15b33821f9..0000000000000000000000000000000000000000
--- a/tests/gate-basic-snap-xenial-ocata
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic keystone deployment on xenial-ocata."""
-
-from basic_deployment import KeystoneBasicDeployment
-
-if __name__ == '__main__':
-    deployment = KeystoneBasicDeployment(series='xenial',
-                                         openstack='cloud:xenial-ocata',
-                                         snap_source='snap:ocata/edge',
-                                         source='cloud:xenial-updates/ocata')
-    deployment.run_tests()
diff --git a/tests/gate-basic-trusty-liberty b/tests/gate-basic-trusty-liberty
deleted file mode 100755
index 6a176fc2055895ed900143f50957caec8b3e11b0..0000000000000000000000000000000000000000
--- a/tests/gate-basic-trusty-liberty
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic keystone deployment on trusty-liberty."""
-
-from basic_deployment import KeystoneBasicDeployment
-
-if __name__ == '__main__':
-    deployment = KeystoneBasicDeployment(series='trusty',
-                                         openstack='cloud:trusty-liberty',
-                                         source='cloud:trusty-updates/liberty')
-    deployment.run_tests()
diff --git a/tests/gate-basic-xenial-newton b/tests/gate-basic-xenial-newton
deleted file mode 100755
index 150087a0eb65d3a70e71dcb8d9022b0654832605..0000000000000000000000000000000000000000
--- a/tests/gate-basic-xenial-newton
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic keystone deployment on xenial-newton."""
-
-from basic_deployment import KeystoneBasicDeployment
-
-if __name__ == '__main__':
-    deployment = KeystoneBasicDeployment(series='xenial',
-                                         openstack='cloud:xenial-newton',
-                                         source='cloud:xenial-updates/newton')
-    deployment.run_tests()
diff --git a/tests/gate-basic-trusty-kilo b/tests/gate-basic-xenial-queens
similarity index 66%
rename from tests/gate-basic-trusty-kilo
rename to tests/gate-basic-xenial-queens
index a732cb0aa854beb6291312c245f803871cd90e85..1aa6bbc73d57215120cd8736e22ecc78c13bbdd5 100755
--- a/tests/gate-basic-trusty-kilo
+++ b/tests/gate-basic-xenial-queens
@@ -14,12 +14,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""Amulet tests on a basic keystone deployment on trusty-kilo."""
+"""Amulet tests on a basic keystone deployment on xenial-queens."""
 
-from basic_deployment import KeystoneBasicDeployment
+from basic_deployment import KeystoneV3Deployment
 
 if __name__ == '__main__':
-    deployment = KeystoneBasicDeployment(series='trusty',
-                                         openstack='cloud:trusty-kilo',
-                                         source='cloud:trusty-updates/kilo')
+    deployment = KeystoneV3Deployment(series='xenial',
+                                      openstack='cloud:xenial-queens',
+                                      source='cloud:xenial-updates/queens')
     deployment.run_tests()
diff --git a/tox.ini b/tox.ini
index 6d44f4b9affa6fd79582a8d52d93f057011eefcc..930d52644953836b46da027f0787a557efab13e7 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,9 +9,9 @@ skipsdist = True
 setenv = VIRTUAL_ENV={envdir}
          PYTHONHASHSEED=0
          CHARM_DIR={envdir}
-         AMULET_SETUP_TIMEOUT=2700
+         AMULET_SETUP_TIMEOUT=5400
 install_command =
-  pip install --allow-unverified python-apt {opts} {packages}
+  pip install {opts} {packages}
 commands = ostestr {posargs}
 whitelist_externals = juju
 passenv = HOME TERM AMULET_* CS_API_*
@@ -26,6 +26,11 @@ basepython = python3.5
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
 
+[testenv:py36]
+basepython = python3.6
+deps = -r{toxinidir}/requirements.txt
+       -r{toxinidir}/test-requirements.txt
+
 [testenv:pep8]
 basepython = python2.7
 deps = -r{toxinidir}/requirements.txt
@@ -60,7 +65,7 @@ basepython = python2.7
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
 commands =
-    bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy
+    bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy
 
 [testenv:func27-dfs]
 # Charm Functional Test
diff --git a/unit_tests/test_actions_git_reinstall.py b/unit_tests/test_actions_git_reinstall.py
deleted file mode 100644
index fedde505bc76032a58021e81395eaeb91cca9fde..0000000000000000000000000000000000000000
--- a/unit_tests/test_actions_git_reinstall.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-
-from mock import patch, MagicMock
-
-# python-apt is not installed as part of test-requirements but is imported by
-# some charmhelpers modules so create a fake import.
-mock_apt = MagicMock()
-sys.modules['apt'] = mock_apt
-mock_apt.apt_pkg = MagicMock()
-
-# NOTE(hopem): we have to mock hooks.charmhelpers (not charmhelpers)
-#              otherwise the mock is not applied to action.hooks.*
-with patch('hooks.charmhelpers.contrib.hardening.harden.harden') as mock_dec, \
-        patch('hooks.charmhelpers.contrib.openstack.utils.'
-              'snap_install_requested') as snap_install_requested, \
-        patch('hooks.keystone_utils.register_configs') as register_configs, \
-        patch('hooks.keystone_utils.os_release') as os_release:
-
-    snap_install_requested.return_value = False
-    mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
-                            lambda *args, **kwargs: f(*args, **kwargs))
-    os_release.return_value = 'juno'
-    import git_reinstall
-
-from test_utils import (
-    CharmTestCase
-)
-
-TO_PATCH = [
-    'config',
-]
-
-
-openstack_origin_git = \
-    """repositories:
-         - {name: requirements,
-            repository: 'git://git.openstack.org/openstack/requirements',
-            branch: stable/juno}
-         - {name: keystone,
-            repository: 'git://git.openstack.org/openstack/keystone',
-            branch: stable/juno}"""
-
-
-class TestKeystoneActions(CharmTestCase):
-
-    def setUp(self):
-        super(TestKeystoneActions, self).setUp(git_reinstall, TO_PATCH)
-        self.config.side_effect = self.test_config.get
-
-    @patch.object(git_reinstall, 'action_set')
-    @patch.object(git_reinstall, 'action_fail')
-    @patch.object(git_reinstall, 'git_install')
-    @patch.object(git_reinstall, 'config_changed')
-    @patch('charmhelpers.contrib.openstack.utils.config')
-    def test_git_reinstall(self, config, config_changed, git_install,
-                           action_fail, action_set):
-        config.return_value = openstack_origin_git
-        self.test_config.set('openstack-origin-git', openstack_origin_git)
-
-        git_reinstall.git_reinstall()
-
-        git_install.assert_called_with(openstack_origin_git)
-        self.assertTrue(git_install.called)
-        self.assertTrue(config_changed.called)
-        self.assertFalse(action_set.called)
-        self.assertFalse(action_fail.called)
-
-    @patch.object(git_reinstall, 'action_set')
-    @patch.object(git_reinstall, 'action_fail')
-    @patch.object(git_reinstall, 'git_install')
-    @patch.object(git_reinstall, 'config_changed')
-    @patch('charmhelpers.contrib.openstack.utils.config')
-    def test_git_reinstall_not_configured(self, _config, config_changed,
-                                          git_install, action_fail,
-                                          action_set):
-        _config.return_value = None
-
-        git_reinstall.git_reinstall()
-
-        msg = 'openstack-origin-git is not configured'
-        action_fail.assert_called_with(msg)
-        self.assertFalse(git_install.called)
-        self.assertFalse(action_set.called)
-
-    @patch.object(git_reinstall, 'action_set')
-    @patch.object(git_reinstall, 'action_fail')
-    @patch.object(git_reinstall, 'git_install')
-    @patch.object(git_reinstall, 'config_changed')
-    @patch('traceback.format_exc')
-    @patch('charmhelpers.contrib.openstack.utils.config')
-    def test_git_reinstall_exception(self, _config, format_exc,
-                                     config_changed, git_install, action_fail,
-                                     action_set):
-        _config.return_value = openstack_origin_git
-        e = OSError('something bad happened')
-        git_install.side_effect = e
-        traceback = (
-            "Traceback (most recent call last):\n"
-            "  File \"actions/git_reinstall.py\", line 37, in git_reinstall\n"
-            "    git_install(config(\'openstack-origin-git\'))\n"
-            "  File \"/usr/lib/python2.7/dist-packages/mock.py\", line 964, in __call__\n"  # noqa
-            "    return _mock_self._mock_call(*args, **kwargs)\n"
-            "  File \"/usr/lib/python2.7/dist-packages/mock.py\", line 1019, in _mock_call\n"  # noqa
-            "    raise effect\n"
-            "OSError: something bad happened\n")
-        format_exc.return_value = traceback
-
-        git_reinstall.git_reinstall()
-
-        msg = 'git-reinstall resulted in an unexpected error'
-        action_fail.assert_called_with(msg)
-        action_set.assert_called_with({'traceback': traceback})
diff --git a/unit_tests/test_actions_openstack_upgrade.py b/unit_tests/test_actions_openstack_upgrade.py
index 2e6bee02e9baef0f87622247bea3cce10315fd52..2ce82aed609144be90bf376f9adf88891791f43f 100644
--- a/unit_tests/test_actions_openstack_upgrade.py
+++ b/unit_tests/test_actions_openstack_upgrade.py
@@ -46,11 +46,9 @@ class TestKeystoneUpgradeActions(CharmTestCase):
     @patch.object(openstack_upgrade, 'register_configs')
     @patch('charmhelpers.contrib.openstack.utils.config')
     @patch('charmhelpers.contrib.openstack.utils.action_set')
-    @patch('charmhelpers.contrib.openstack.utils.git_install_requested')
     @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
-    def test_openstack_upgrade_true(self, upgrade_avail, git_requested,
+    def test_openstack_upgrade_true(self, upgrade_avail,
                                     action_set, config, reg_configs):
-        git_requested.return_value = False
         upgrade_avail.return_value = True
         config.return_value = True
 
@@ -63,11 +61,9 @@ class TestKeystoneUpgradeActions(CharmTestCase):
     @patch.object(openstack_upgrade, 'register_configs')
     @patch('charmhelpers.contrib.openstack.utils.config')
     @patch('charmhelpers.contrib.openstack.utils.action_set')
-    @patch('charmhelpers.contrib.openstack.utils.git_install_requested')
     @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
-    def test_openstack_upgrade_false(self, upgrade_avail, git_requested,
+    def test_openstack_upgrade_false(self, upgrade_avail,
                                      action_set, config, reg_configs):
-        git_requested.return_value = False
         upgrade_avail.return_value = True
         config.return_value = False
 
diff --git a/unit_tests/test_keystone_contexts.py b/unit_tests/test_keystone_contexts.py
index e245b11b1ff1826e4316f6ea4e1c760e44fd95a4..ba09b17df2547fbf4e4a409be297aa8e2c9a1220 100644
--- a/unit_tests/test_keystone_contexts.py
+++ b/unit_tests/test_keystone_contexts.py
@@ -37,88 +37,31 @@ class TestKeystoneContexts(CharmTestCase):
     def setUp(self):
         super(TestKeystoneContexts, self).setUp(context, TO_PATCH)
 
-    def test_is_cert_provided_in_config(self):
-        config = {'ssl_cert': 'somecert', 'ssl_key': 'greatkey'}
-
-        def fake_config(key):
-            return config.get(key)
-
-        self.config.side_effect = fake_config
-        self.assertTrue(context.is_cert_provided_in_config())
-
-        del config['ssl_cert']
-        self.assertFalse(context.is_cert_provided_in_config())
-
-    @patch.object(context, 'mkdir')
-    @patch('keystone_utils.get_ca')
-    @patch('keystone_utils.ensure_permissions')
-    @patch('keystone_utils.determine_ports', lambda: None)
-    @patch('keystone_utils.is_ssl_cert_master', lambda: False)
-    @patch.object(context, 'is_cert_provided_in_config', lambda: False)
-    @patch.object(context, 'log', lambda *args, **kwargs: None)
-    def test_apache_ssl_context_ssl_not_master(self, mock_ensure_permissions,
-                                               mock_get_ca, mock_mkdir):
-        context.ApacheSSLContext().configure_cert('foo')
-        context.ApacheSSLContext().configure_ca()
-        self.assertTrue(mock_mkdir.called)
-        self.assertTrue(mock_ensure_permissions.called)
-        self.assertFalse(mock_get_ca.called)
-
-    @patch('keystone_utils.ensure_permissions')
-    @patch.object(context, 'install_ca_cert')
-    @patch.object(context, 'b64decode')
-    @patch.object(context, 'mkdir', lambda *args: None)
-    @patch('keystone_utils.get_ca', lambda: None)
-    @patch('keystone_utils.determine_ports', lambda: None)
-    @patch('keystone_utils.is_ssl_cert_master', lambda: True)
-    @patch.object(context, 'log', lambda *args, **kwargs: None)
-    def test_apache_ssl_context_ssl_configure_ca(self, mock_b64decode,
-                                                 mock_install_ca_cert,
-                                                 mock_ensure_permissions):
-        config = {'ssl_cert': 'somecert', 'ssl_key': 'greatkey'}
-
-        def fake_config(key):
-            return config.get(key)
-
-        self.config.side_effect = fake_config
-
-        context.ApacheSSLContext().configure_ca()
-        self.assertFalse(mock_b64decode.called)
-        self.assertFalse(mock_install_ca_cert.called)
-        self.assertFalse(mock_ensure_permissions.called)
-
-        config['ssl_ca'] = 'foofoofalalala'
-        context.ApacheSSLContext().configure_ca()
-        self.assertTrue(mock_b64decode.called)
-        self.assertTrue(mock_install_ca_cert.called)
-        self.assertTrue(mock_ensure_permissions.called)
-
     @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids')
     @patch('charmhelpers.contrib.openstack.ip.unit_get')
     @patch('charmhelpers.contrib.openstack.ip.service_name')
     @patch('charmhelpers.contrib.openstack.ip.config')
     @patch('keystone_utils.determine_ports')
-    @patch('keystone_utils.is_ssl_cert_master')
     @patch('charmhelpers.contrib.openstack.context.config')
     @patch('charmhelpers.contrib.openstack.context.is_clustered')
     @patch('charmhelpers.contrib.openstack.context.determine_apache_port')
     @patch('charmhelpers.contrib.openstack.context.determine_api_port')
     @patch('charmhelpers.contrib.openstack.context.unit_get')
+    @patch('charmhelpers.contrib.openstack.context.relation_ids')
     @patch('charmhelpers.contrib.openstack.context.https')
     def test_apache_ssl_context_service_enabled(self, mock_https,
+                                                mock_relation_ids,
                                                 mock_unit_get,
                                                 mock_determine_api_port,
                                                 mock_determine_apache_port,
                                                 mock_is_clustered,
                                                 mock_config,
-                                                mock_is_ssl_cert_master,
                                                 mock_determine_ports,
                                                 mock_ip_config,
                                                 mock_service_name,
                                                 mock_ip_unit_get,
                                                 mock_rel_ids,
                                                 ):
-        mock_is_ssl_cert_master.return_value = True
         mock_https.return_value = True
         mock_unit_get.return_value = '1.2.3.4'
         mock_ip_unit_get.return_value = '1.2.3.4'
@@ -142,6 +85,7 @@ class TestKeystoneContexts(CharmTestCase):
         self.assertTrue(mock_https.called)
         mock_unit_get.assert_called_with('private-address')
 
+    @patch('charmhelpers.contrib.openstack.context.get_relation_ip')
     @patch('charmhelpers.contrib.openstack.context.mkdir')
     @patch('keystone_utils.api_port')
     @patch('charmhelpers.contrib.openstack.context.get_netmask_for_address')
@@ -158,11 +102,12 @@ class TestKeystoneContexts(CharmTestCase):
         self, mock_open, mock_kv, mock_log, mock_relation_get,
             mock_related_units, mock_unit_get, mock_relation_ids, mock_config,
             mock_get_address_in_network, mock_get_netmask_for_address,
-            mock_api_port, mock_mkdir):
+            mock_api_port, mock_mkdir, mock_get_relation_ip):
         os.environ['JUJU_UNIT_NAME'] = 'keystone'
 
         mock_relation_ids.return_value = ['identity-service:0', ]
         mock_unit_get.return_value = '1.2.3.4'
+        mock_get_relation_ip.return_value = '1.2.3.4'
         mock_relation_get.return_value = '10.0.0.0'
         mock_related_units.return_value = ['unit/0', ]
         mock_config.return_value = None
@@ -215,3 +160,204 @@ class TestKeystoneContexts(CharmTestCase):
 
         mock_is_elected_leader.return_value = True
         self.assertEqual({'token_flush': True}, ctxt())
+
+    @patch.object(context, 'relation_ids')
+    @patch.object(context, 'related_units')
+    @patch.object(context, 'relation_get')
+    def test_keystone_fid_service_provider_rdata(
+            self, mock_relation_get, mock_related_units,
+            mock_relation_ids):
+        os.environ['JUJU_UNIT_NAME'] = 'keystone'
+
+        def relation_ids_side_effect(rname):
+            return {
+                'keystone-fid-service-provider': {
+                    'keystone-fid-service-provider:0',
+                    'keystone-fid-service-provider:1',
+                    'keystone-fid-service-provider:2'
+                }
+            }[rname]
+
+        mock_relation_ids.side_effect = relation_ids_side_effect
+
+        def related_units_side_effect(rid):
+            return {
+                'keystone-fid-service-provider:0': ['sp-mellon/0'],
+                'keystone-fid-service-provider:1': ['sp-shib/0'],
+                'keystone-fid-service-provider:2': ['sp-oidc/0'],
+            }[rid]
+        mock_related_units.side_effect = related_units_side_effect
+
+        def relation_get_side_effect(unit, rid):
+            # one unit only as the relation is container-scoped
+            return {
+                "keystone-fid-service-provider:0": {
+                    "sp-mellon/0": {
+                        "ingress-address": '10.0.0.10',
+                        "protocol-name": '"saml2"',
+                        "remote-id-attribute": '"MELLON_IDP"',
+                    },
+                },
+                "keystone-fid-service-provider:1": {
+                    "sp-shib/0": {
+                        "ingress-address": '10.0.0.10',
+                        "protocol-name": '"mapped"',
+                        "remote-id-attribute": '"Shib-Identity-Provider"',
+                    },
+                },
+                "keystone-fid-service-provider:2": {
+                    "sp-oidc/0": {
+                        "ingress-address": '10.0.0.10',
+                        "protocol-name": '"oidc"',
+                        "remote-id-attribute": '"HTTP_OIDC_ISS"',
+                    },
+                },
+            }[rid][unit]
+
+        mock_relation_get.side_effect = relation_get_side_effect
+        ctxt = context.KeystoneFIDServiceProviderContext()
+
+        self.maxDiff = None
+        self.assertItemsEqual(
+            ctxt(),
+            {
+                "fid_sps": [
+                    {
+                        "protocol-name": "saml2",
+                        "remote-id-attribute": "MELLON_IDP",
+                    },
+                    {
+                        "protocol-name": "mapped",
+                        "remote-id-attribute": "Shib-Identity-Provider",
+                    },
+                    {
+                        "protocol-name": "oidc",
+                        "remote-id-attribute": "HTTP_OIDC_ISS",
+                    },
+                ]
+            }
+        )
+
+    @patch.object(context, 'relation_ids')
+    def test_keystone_fid_service_provider_empty(
+            self, mock_relation_ids):
+        os.environ['JUJU_UNIT_NAME'] = 'keystone'
+
+        def relation_ids_side_effect(rname):
+            return {
+                'keystone-fid-service-provider': {}
+            }[rname]
+
+        mock_relation_ids.side_effect = relation_ids_side_effect
+        ctxt = context.KeystoneFIDServiceProviderContext()
+
+        self.maxDiff = None
+        self.assertItemsEqual(ctxt(), {})
+
+    @patch.object(context, 'relation_ids')
+    @patch.object(context, 'related_units')
+    @patch.object(context, 'relation_get')
+    def test_websso_trusted_dashboard_urls_generated(
+            self, mock_relation_get, mock_related_units,
+            mock_relation_ids):
+        os.environ['JUJU_UNIT_NAME'] = 'keystone'
+
+        def relation_ids_side_effect(rname):
+            return {
+                'websso-trusted-dashboard': {
+                    'websso-trusted-dashboard:0',
+                    'websso-trusted-dashboard:1',
+                    'websso-trusted-dashboard:2'
+                }
+            }[rname]
+
+        mock_relation_ids.side_effect = relation_ids_side_effect
+
+        def related_units_side_effect(rid):
+            return {
+                'websso-trusted-dashboard:0': ['dashboard-blue/0',
+                                               'dashboard-blue/1'],
+                'websso-trusted-dashboard:1': ['dashboard-red/0',
+                                               'dashboard-red/1'],
+                'websso-trusted-dashboard:2': ['dashboard-green/0',
+                                               'dashboard-green/1']
+            }[rid]
+        mock_related_units.side_effect = related_units_side_effect
+
+        def relation_get_side_effect(unit, rid):
+            return {
+                "websso-trusted-dashboard:0": {
+                    "dashboard-blue/0": {  # dns-ha
+                        "ingress-address": '10.0.0.10',
+                        "scheme": "https://",
+                        "hostname": "horizon.intranet.test",
+                        "path": "/auth/websso/",
+                    },
+                    "dashboard-blue/1": {  # dns-ha
+                        "ingress-address": '10.0.0.11',
+                        "scheme": "https://",
+                        "hostname": "horizon.intranet.test",
+                        "path": "/auth/websso/",
+                    },
+                },
+                "websso-trusted-dashboard:1": {
+                    "dashboard-red/0": {  # vip
+                        "ingress-address": '10.0.0.12',
+                        "scheme": "https://",
+                        "hostname": "10.0.0.100",
+                        "path": "/auth/websso/",
+                    },
+                    "dashboard-red/1": {  # vip
+                        "ingress-address": '10.0.0.13',
+                        "scheme": "https://",
+                        "hostname": "10.0.0.100",
+                        "path": "/auth/websso/",
+                    },
+                },
+                "websso-trusted-dashboard:2": {
+                    "dashboard-green/0": {  # vip-less, dns-ha-less
+                        "ingress-address": '10.0.0.14',
+                        "scheme": "http://",
+                        "hostname": "10.0.0.14",
+                        "path": "/auth/websso/",
+                    },
+                    "dashboard-green/1": {
+                        "ingress-address": '10.0.0.15',
+                        "scheme": "http://",
+                        "hostname": "10.0.0.15",
+                        "path": "/auth/websso/",
+                    },
+                },
+            }[rid][unit]
+
+        mock_relation_get.side_effect = relation_get_side_effect
+        ctxt = context.WebSSOTrustedDashboardContext()
+
+        self.maxDiff = None
+        self.assertEqual(
+            ctxt(),
+            {
+                'trusted_dashboards': set([
+                    'https://horizon.intranet.test/auth/websso/',
+                    'https://10.0.0.100/auth/websso/',
+                    'http://10.0.0.14/auth/websso/',
+                    'http://10.0.0.15/auth/websso/',
+                ])
+            }
+        )
+
+    @patch.object(context, 'relation_ids')
+    def test_websso_trusted_dashboard_empty(
+            self, mock_relation_ids):
+        os.environ['JUJU_UNIT_NAME'] = 'keystone'
+
+        def relation_ids_side_effect(rname):
+            return {
+                'websso-trusted-dashboard': {}
+            }[rname]
+
+        mock_relation_ids.side_effect = relation_ids_side_effect
+        ctxt = context.WebSSOTrustedDashboardContext()
+
+        self.maxDiff = None
+        self.assertItemsEqual(ctxt(), {})
diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py
index b7bd442b2ed48a003562481eb7ce5ea383f96719..c82ec961d5d84497226d8b8d4ff2b4dc0dc092ef 100644
--- a/unit_tests/test_keystone_hooks.py
+++ b/unit_tests/test_keystone_hooks.py
@@ -13,8 +13,6 @@
 # limitations under the License.
 
 import os
-import uuid
-import yaml
 import sys
 
 from mock import call, patch, MagicMock
@@ -44,8 +42,6 @@ with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
     with patch('keystone_utils.run_in_apache') as mock_run_in_apache:
         import keystone_hooks as hooks
 
-from charmhelpers.contrib import unison
-
 utils.register_configs = _reg
 utils.restart_map = _map
 
@@ -53,9 +49,7 @@ TO_PATCH = [
     # charmhelpers.core.hookenv
     'Hooks',
     'config',
-    'is_relation_made',
     'log',
-    'local_unit',
     'filter_installed_packages',
     'relation_ids',
     'relation_set',
@@ -71,7 +65,6 @@ TO_PATCH = [
     'service_restart',
     # charmhelpers.contrib.openstack.utils
     'configure_installation_source',
-    'git_install_requested',
     'snap_install_requested',
     # charmhelpers.contrib.openstack.ip
     'resolve_address',
@@ -92,22 +85,16 @@ TO_PATCH = [
     'migrate_database',
     'ensure_initial_admin',
     'add_service_to_keystone',
-    'synchronize_ca_if_changed',
     'update_nrpe_config',
-    'ensure_ssl_dirs',
     'is_db_ready',
-    'keystone_service',
     'create_or_show_domain',
     'get_api_version',
     # other
     'check_call',
     'execd_preinstall',
-    'mkdir',
-    'os',
     # ip
     'get_iface_for_address',
     'get_netmask_for_address',
-    'git_install',
     'is_service_present',
     'delete_service_entry',
     'os_release',
@@ -128,90 +115,40 @@ class KeystoneRelationTests(CharmTestCase):
         self.snap_install_requested.return_value = False
 
     @patch.object(utils, 'os_release')
-    @patch.object(utils, 'git_install_requested')
-    @patch.object(unison, 'ensure_user')
     @patch.object(hooks, 'service_stop', lambda *args: None)
     @patch.object(hooks, 'service_start', lambda *args: None)
-    def test_install_hook(self, ensure_user, git_requested, os_release):
+    def test_install_hook(self, os_release):
         os_release.return_value = 'havana'
-        git_requested.return_value = False
         self.run_in_apache.return_value = False
         repo = 'cloud:precise-grizzly'
         self.test_config.set('openstack-origin', repo)
         hooks.install()
         self.assertTrue(self.execd_preinstall.called)
         self.configure_installation_source.assert_called_with(repo)
-        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
         self.assertTrue(self.apt_update.called)
         self.apt_install.assert_called_with(
             ['apache2', 'haproxy', 'keystone', 'openssl', 'pwgen',
              'python-keystoneclient', 'python-mysqldb', 'python-psycopg2',
-             'python-requests', 'python-six', 'unison', 'uuid'], fatal=True)
-        self.git_install.assert_called_with(None)
+             'python-requests', 'python-six', 'uuid'], fatal=True)
         self.disable_unused_apache_sites.assert_not_called()
 
     @patch.object(utils, 'os_release')
-    @patch.object(utils, 'git_install_requested')
-    @patch.object(unison, 'ensure_user')
     @patch.object(hooks, 'service_stop', lambda *args: None)
     @patch.object(hooks, 'service_start', lambda *args: None)
-    def test_install_hook_apache2(self, ensure_user,
-                                  git_requested, os_release):
+    def test_install_hook_apache2(self, os_release):
         os_release.return_value = 'havana'
-        git_requested.return_value = False
         self.run_in_apache.return_value = True
         repo = 'cloud:xenial-newton'
         self.test_config.set('openstack-origin', repo)
-        self.os.path.exists.return_value = True
         hooks.install()
         self.assertTrue(self.execd_preinstall.called)
         self.configure_installation_source.assert_called_with(repo)
-        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
         self.assertTrue(self.apt_update.called)
         self.apt_install.assert_called_with(
             ['apache2', 'haproxy', 'keystone', 'openssl', 'pwgen',
              'python-keystoneclient', 'python-mysqldb', 'python-psycopg2',
-             'python-requests', 'python-six', 'unison', 'uuid'], fatal=True)
-        self.git_install.assert_called_with(None)
+             'python-requests', 'python-six', 'uuid'], fatal=True)
         self.disable_unused_apache_sites.assert_called_with()
-
-    @patch.object(utils, 'os_release')
-    @patch.object(utils, 'git_install_requested')
-    @patch.object(unison, 'ensure_user')
-    @patch.object(hooks, 'service_stop', lambda *args: None)
-    @patch.object(hooks, 'service_start', lambda *args: None)
-    def test_install_hook_git(self, ensure_user, git_requested, os_release):
-        os_release.return_value = 'havana'
-        git_requested.return_value = True
-        repo = 'cloud:trusty-juno'
-        openstack_origin_git = {
-            'repositories': [
-                {'name': 'requirements',
-                 'repository': 'git://git.openstack.org/openstack/requirements',  # noqa
-                 'branch': 'stable/juno'},
-                {'name': 'keystone',
-                 'repository': 'git://git.openstack.org/openstack/keystone',
-                 'branch': 'stable/juno'}
-            ],
-            'directory': '/mnt/openstack-git',
-        }
-        projects_yaml = yaml.dump(openstack_origin_git)
-        self.test_config.set('openstack-origin', repo)
-        self.test_config.set('openstack-origin-git', projects_yaml)
-        hooks.install()
-        self.assertTrue(self.execd_preinstall.called)
-        self.configure_installation_source.assert_called_with(repo)
-        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
-        self.assertTrue(self.apt_update.called)
-        self.apt_install.assert_called_with(
-            ['apache2', 'haproxy', 'libffi-dev', 'libmysqlclient-dev',
-             'libssl-dev', 'libxml2-dev', 'libxslt1-dev', 'libyaml-dev',
-             'openssl', 'pwgen', 'python-dev', 'python-keystoneclient',
-             'python-mysqldb', 'python-pip', 'python-psycopg2',
-             'python-requests', 'python-setuptools', 'python-six', 'unison',
-             'uuid', 'zlib1g-dev'], fatal=True)
-        self.git_install.assert_called_with(projects_yaml)
-
     mod_ch_openstack_utils = 'charmhelpers.contrib.openstack.utils'
 
     @patch.object(utils, 'os_release')
@@ -239,44 +176,15 @@ class KeystoneRelationTests(CharmTestCase):
         mock_config.side_effect = cfg
 
         self.get_relation_ip.return_value = '192.168.20.1'
-        self.is_relation_made.return_value = False
         hooks.db_joined()
         self.relation_set.assert_called_with(database='keystone',
                                              username='keystone',
                                              hostname='192.168.20.1')
 
-    def test_postgresql_db_joined(self):
-        self.is_relation_made.return_value = False
-        hooks.pgsql_db_joined()
-        self.relation_set.assert_called_with(database='keystone'),
-
-    def test_db_joined_with_postgresql(self):
-        self.is_relation_made.return_value = True
-
-        with self.assertRaises(Exception) as context:
-            hooks.db_joined()
-        self.assertEqual(
-            context.exception.message,
-            'Attempting to associate a mysql database when there '
-            'is already associated a postgresql one')
-
-    def test_postgresql_joined_with_db(self):
-        self.is_relation_made.return_value = True
-
-        with self.assertRaises(Exception) as context:
-            hooks.pgsql_db_joined()
-        self.assertEqual(
-            context.exception.message,
-            'Attempting to associate a postgresql database when there '
-            'is already associated a mysql one')
-
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
     @patch.object(hooks, 'CONFIGS')
     def test_db_changed_missing_relation_data(self, configs,
-                                              mock_ensure_ssl_cert_master,
                                               mock_log):
-        mock_ensure_ssl_cert_master.return_value = False
         configs.complete_contexts = MagicMock()
         configs.complete_contexts.return_value = []
         hooks.db_changed()
@@ -284,98 +192,39 @@ class KeystoneRelationTests(CharmTestCase):
             'shared-db relation incomplete. Peer not ready?'
         )
 
-    @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch.object(hooks, 'CONFIGS')
-    def test_postgresql_db_changed_missing_relation_data(self, configs,
-                                                         mock_ensure_leader,
-                                                         mock_log):
-        mock_ensure_leader.return_value = False
-        configs.complete_contexts = MagicMock()
-        configs.complete_contexts.return_value = []
-        hooks.pgsql_db_changed()
-        self.log.assert_called_with(
-            'pgsql-db relation incomplete. Peer not ready?'
-        )
-
-    def _shared_db_test(self, configs, unit_name):
+    @patch.object(hooks, 'update_all_identity_relation_units')
+    def _shared_db_test(self, configs, unit_name, mock_update_all):
         self.relation_get.return_value = 'keystone/0 keystone/3'
-        self.local_unit.return_value = unit_name
         configs.complete_contexts = MagicMock()
         configs.complete_contexts.return_value = ['shared-db']
         configs.write = MagicMock()
         hooks.db_changed()
 
-    def _postgresql_db_test(self, configs):
-        configs.complete_contexts = MagicMock()
-        configs.complete_contexts.return_value = ['pgsql-db']
-        configs.write = MagicMock()
-        hooks.pgsql_db_changed()
-
     @patch.object(hooks, 'leader_init_db_if_ready')
-    @patch('keystone_utils.ensure_ssl_cert_master')
     @patch.object(hooks, 'CONFIGS')
-    def test_db_changed(self, configs,
-                        mock_ensure_ssl_cert_master,
-                        leader_init):
+    def test_db_changed(self, configs, leader_init):
         self.os_release.return_value = 'havana'
-        mock_ensure_ssl_cert_master.return_value = False
         self._shared_db_test(configs, 'keystone/3')
         self.assertEqual([call('/etc/keystone/keystone.conf')],
                          configs.write.call_args_list)
         self.assertTrue(leader_init.called)
 
-    @patch.object(hooks, 'leader_init_db_if_ready')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch.object(hooks, 'CONFIGS')
-    def test_postgresql_db_changed(self, configs,
-                                   mock_ensure_ssl_cert_master,
-                                   leader_init):
-        self.os_release.return_value = 'havana'
-        mock_ensure_ssl_cert_master.return_value = False
-        self._postgresql_db_test(configs)
-        self.assertEqual([call('/etc/keystone/keystone.conf')],
-                         configs.write.call_args_list)
-        self.assertTrue(leader_init.called)
-
     @patch.object(hooks, 'update_all_domain_backends')
     @patch.object(hooks, 'update_all_identity_relation_units')
     @patch.object(hooks, 'run_in_apache')
     @patch.object(hooks, 'is_db_initialised')
-    @patch.object(hooks, 'git_install_requested')
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch('keystone_utils.ensure_ssl_dirs')
-    @patch.object(hooks, 'ensure_permissions')
-    @patch.object(hooks, 'ensure_pki_cert_paths')
-    @patch.object(hooks, 'ensure_pki_dir_permissions')
-    @patch.object(hooks, 'ensure_ssl_dir')
-    @patch.object(hooks, 'is_ssl_cert_master')
-    @patch.object(hooks, 'send_ssl_sync_request')
-    @patch.object(hooks, 'peer_units')
     @patch.object(hooks, 'admin_relation_changed')
     @patch.object(hooks, 'cluster_joined')
-    @patch.object(unison, 'ensure_user')
-    @patch.object(unison, 'get_homedir')
     @patch.object(hooks, 'CONFIGS')
     @patch.object(hooks, 'identity_changed')
     @patch.object(hooks, 'configure_https')
     def test_config_changed_no_upgrade_leader(self, configure_https,
                                               identity_changed,
-                                              configs, get_homedir,
-                                              ensure_user,
+                                              configs,
                                               mock_cluster_joined,
                                               admin_relation_changed,
-                                              mock_peer_units,
-                                              mock_send_ssl_sync_request,
-                                              mock_is_ssl_cert_master,
-                                              mock_ensure_ssl_dir,
-                                              mock_ensure_pki_cert_paths,
-                                              mock_ensure_permissions,
-                                              mock_ensure_pki_dir_permissions,
-                                              mock_ensure_ssl_dirs,
-                                              mock_ensure_ssl_cert_master,
-                                              mock_log, git_requested,
+                                              mock_log,
                                               mock_is_db_initialised,
                                               mock_run_in_apache,
                                               update,
@@ -390,20 +239,12 @@ class KeystoneRelationTests(CharmTestCase):
         self.relation_ids.side_effect = fake_relation_ids
 
         mock_run_in_apache.return_value = False
-        git_requested.return_value = False
-        mock_is_ssl_cert_master.return_value = True
         mock_is_db_initialised.return_value = True
         self.is_db_ready.return_value = True
         self.openstack_upgrade_available.return_value = False
-        self.is_elected_leader.return_value = True
-        # avoid having to mock syncer
-        mock_ensure_ssl_cert_master.return_value = False
-        mock_peer_units.return_value = []
         self.related_units.return_value = ['unit/0']
 
         hooks.config_changed()
-        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
-        get_homedir.assert_called_with(self.ssh_user)
 
         self.save_script_rc.assert_called_with()
         configure_https.assert_called_with()
@@ -417,36 +258,16 @@ class KeystoneRelationTests(CharmTestCase):
     @patch.object(hooks, 'update_all_domain_backends')
     @patch.object(hooks, 'update_all_identity_relation_units')
     @patch.object(hooks, 'run_in_apache')
-    @patch.object(hooks, 'git_install_requested')
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch('keystone_utils.ensure_ssl_dirs')
-    @patch.object(hooks, 'ensure_permissions')
-    @patch.object(hooks, 'ensure_pki_cert_paths')
-    @patch.object(hooks, 'ensure_pki_dir_permissions')
-    @patch.object(hooks, 'ensure_ssl_dir')
-    @patch.object(hooks, 'peer_units')
-    @patch.object(hooks, 'is_ssl_cert_master')
     @patch.object(hooks, 'cluster_joined')
-    @patch.object(unison, 'ensure_user')
-    @patch.object(unison, 'get_homedir')
     @patch.object(hooks, 'CONFIGS')
     @patch.object(hooks, 'identity_changed')
     @patch.object(hooks, 'configure_https')
     def test_config_changed_no_upgrade_not_leader(self, configure_https,
                                                   identity_changed,
-                                                  configs, get_homedir,
-                                                  ensure_user,
+                                                  configs,
                                                   mock_cluster_joined,
-                                                  mock_is_ssl_cert_master,
-                                                  mock_peer_units,
-                                                  mock_ensure_ssl_dir,
-                                                  mock_ensure_permissions,
-                                                  mock_ensure_pki_cert_paths,
-                                                  mock_ensure_pki_permissions,
-                                                  ensure_ssl_dirs,
-                                                  mock_ensure_ssl_cert_master,
-                                                  mock_log, git_requested,
+                                                  mock_log,
                                                   mock_run_in_apache, update,
                                                   mock_update_domains):
 
@@ -459,16 +280,9 @@ class KeystoneRelationTests(CharmTestCase):
         self.relation_ids.side_effect = fake_relation_ids
 
         mock_run_in_apache.return_value = False
-        git_requested.return_value = False
-        mock_is_ssl_cert_master.return_value = True
-        mock_peer_units.return_value = []
         self.openstack_upgrade_available.return_value = False
-        self.is_elected_leader.return_value = False
-        mock_ensure_ssl_cert_master.return_value = False
 
         hooks.config_changed()
-        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
-        get_homedir.assert_called_with(self.ssh_user)
 
         self.assertFalse(mock_cluster_joined.called)
         self.save_script_rc.assert_called_with()
@@ -483,39 +297,18 @@ class KeystoneRelationTests(CharmTestCase):
     @patch.object(hooks, 'update_all_identity_relation_units')
     @patch.object(hooks, 'run_in_apache')
     @patch.object(hooks, 'is_db_initialised')
-    @patch.object(hooks, 'git_install_requested')
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch('keystone_utils.ensure_ssl_dirs')
-    @patch.object(hooks, 'ensure_permissions')
-    @patch.object(hooks, 'ensure_pki_cert_paths')
-    @patch.object(hooks, 'ensure_pki_dir_permissions')
-    @patch.object(hooks, 'ensure_ssl_dir')
-    @patch.object(hooks, 'is_ssl_cert_master')
-    @patch.object(hooks, 'send_ssl_sync_request')
-    @patch.object(hooks, 'peer_units')
     @patch.object(hooks, 'admin_relation_changed')
     @patch.object(hooks, 'cluster_joined')
-    @patch.object(unison, 'ensure_user')
-    @patch.object(unison, 'get_homedir')
     @patch.object(hooks, 'CONFIGS')
     @patch.object(hooks, 'identity_changed')
     @patch.object(hooks, 'configure_https')
     def test_config_changed_with_openstack_upgrade(self, configure_https,
                                                    identity_changed,
-                                                   configs, get_homedir,
-                                                   ensure_user, cluster_joined,
+                                                   configs,
+                                                   cluster_joined,
                                                    admin_relation_changed,
-                                                   mock_peer_units,
-                                                   mock_send_ssl_sync_request,
-                                                   mock_is_ssl_cert_master,
-                                                   mock_ensure_ssl_dir,
-                                                   mock_ensure_permissions,
-                                                   mock_ensure_pki_cert_paths,
-                                                   mock_ensure_pki_permissions,
-                                                   mock_ensure_ssl_dirs,
-                                                   mock_ensure_ssl_cert_master,
-                                                   mock_log, git_requested,
+                                                   mock_log,
                                                    mock_is_db_initialised,
                                                    mock_run_in_apache,
                                                    update,
@@ -529,20 +322,12 @@ class KeystoneRelationTests(CharmTestCase):
         self.relation_ids.side_effect = fake_relation_ids
 
         mock_run_in_apache.return_value = False
-        git_requested.return_value = False
-        mock_is_ssl_cert_master.return_value = True
         self.is_db_ready.return_value = True
         mock_is_db_initialised.return_value = True
         self.openstack_upgrade_available.return_value = True
-        self.is_elected_leader.return_value = True
-        # avoid having to mock syncer
-        mock_ensure_ssl_cert_master.return_value = False
-        mock_peer_units.return_value = []
         self.related_units.return_value = ['unit/0']
 
         hooks.config_changed()
-        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
-        get_homedir.assert_called_with(self.ssh_user)
 
         self.assertTrue(self.do_openstack_upgrade_reexec.called)
 
@@ -553,104 +338,19 @@ class KeystoneRelationTests(CharmTestCase):
         self.assertTrue(update.called)
         self.assertTrue(mock_update_domains.called)
 
-    @patch.object(hooks, 'update_all_domain_backends')
-    @patch.object(hooks, 'update_all_identity_relation_units')
-    @patch.object(hooks, 'run_in_apache')
-    @patch.object(hooks, 'initialise_pki')
-    @patch.object(hooks, 'git_install_requested')
-    @patch.object(hooks, 'config_value_changed')
-    @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch.object(hooks, 'ensure_ssl_dir')
-    @patch.object(hooks, 'send_ssl_sync_request')
-    @patch.object(hooks, 'is_db_initialised')
-    @patch.object(hooks, 'is_db_ready')
-    @patch.object(hooks, 'peer_units')
-    @patch.object(hooks, 'admin_relation_changed')
-    @patch.object(hooks, 'cluster_joined')
-    @patch.object(unison, 'ensure_user')
-    @patch.object(unison, 'get_homedir')
-    @patch.object(hooks, 'CONFIGS')
-    @patch.object(hooks, 'identity_changed')
-    @patch.object(hooks, 'configure_https')
-    def test_config_changed_git_updated(self, configure_https,
-                                        identity_changed,
-                                        configs, get_homedir, ensure_user,
-                                        cluster_joined, admin_relation_changed,
-                                        mock_peer_units,
-                                        mock_is_db_ready,
-                                        mock_is_db_initialised,
-                                        mock_send_ssl_sync_request,
-                                        mock_ensure_ssl_dir,
-                                        mock_ensure_ssl_cert_master,
-                                        mock_log, config_val_changed,
-                                        git_requested,
-                                        mock_initialise_pki,
-                                        mock_run_in_apache,
-                                        update,
-                                        mock_update_domains):
-        self.enable_memcache.return_value = False
-        mock_run_in_apache.return_value = False
-        git_requested.return_value = True
-        mock_ensure_ssl_cert_master.return_value = False
-        self.openstack_upgrade_available.return_value = False
-        self.is_elected_leader.return_value = True
-        mock_peer_units.return_value = []
-        self.relation_ids.return_value = ['identity-service:0']
-        self.related_units.return_value = ['unit/0']
-
-        repo = 'cloud:trusty-juno'
-        openstack_origin_git = {
-            'repositories': [
-                {'name': 'requirements',
-                 'repository': 'git://git.openstack.org/openstack/requirements',  # noqa
-                 'branch': 'stable/juno'},
-                {'name': 'keystone',
-                 'repository': 'git://git.openstack.org/openstack/keystone',
-                 'branch': 'stable/juno'}
-            ],
-            'directory': '/mnt/openstack-git',
-        }
-        projects_yaml = yaml.dump(openstack_origin_git)
-        self.test_config.set('openstack-origin', repo)
-        self.test_config.set('openstack-origin-git', projects_yaml)
-        hooks.config_changed()
-        self.git_install.assert_called_with(projects_yaml)
-        self.assertFalse(self.openstack_upgrade_available.called)
-        self.assertFalse(self.do_openstack_upgrade_reexec.called)
-        self.assertTrue(update.called)
-        self.assertTrue(mock_update_domains.called)
-
+    @patch.object(hooks, 'os_release')
     @patch.object(hooks, 'run_in_apache')
-    @patch.object(hooks, 'initialise_pki')
     @patch.object(hooks, 'is_db_initialised')
-    @patch.object(hooks, 'git_install_requested')
-    @patch.object(hooks, 'config_value_changed')
-    @patch.object(hooks, 'ensure_ssl_dir')
     @patch.object(hooks, 'configure_https')
-    @patch.object(hooks, 'is_ssl_cert_master')
-    @patch.object(hooks, 'peer_units')
-    @patch.object(unison, 'get_homedir')
-    @patch.object(unison, 'ensure_user')
-    @patch('keystone_utils.ensure_ssl_cert_master')
     def test_config_changed_with_openstack_upgrade_action(self,
-                                                          ensure_ssl_cert,
-                                                          ensure_user,
-                                                          get_home,
-                                                          peer_units, is_ssl,
                                                           config_https,
-                                                          ensure_ssl_dir,
-                                                          config_value_changed,
-                                                          git_requested,
                                                           mock_db_init,
-                                                          mock_initialise_pki,
-                                                          mock_run_in_apache):
+                                                          mock_run_in_apache,
+                                                          os_release):
+        os_release.return_value = 'ocata'
         self.enable_memcache.return_value = False
         mock_run_in_apache.return_value = False
-        ensure_ssl_cert.return_value = False
-        peer_units.return_value = []
 
-        git_requested.return_value = False
         self.openstack_upgrade_available.return_value = True
         self.test_config.set('action-managed-upgrade', True)
 
@@ -660,17 +360,18 @@ class KeystoneRelationTests(CharmTestCase):
 
     @patch.object(hooks, 'is_db_initialised')
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch.object(hooks, 'hashlib')
     @patch.object(hooks, 'send_notifications')
     def test_identity_changed_leader(self, mock_send_notifications,
-                                     mock_hashlib, mock_ensure_ssl_cert_master,
                                      mock_log, mock_is_db_initialised):
         self.expect_ha.return_value = False
         mock_is_db_initialised.return_value = True
         self.is_db_ready.return_value = True
         self.is_service_present.return_value = True
-        mock_ensure_ssl_cert_master.return_value = False
+        self.relation_get.return_value = {
+            'public_url': 'http://dummy.local',
+            'admin_url': 'http://dummy.local',
+            'internal_url': 'http://dummy.local',
+        }
         hooks.identity_changed(
             relation_id='identity-service:0',
             remote_unit='unit/0')
@@ -683,31 +384,26 @@ class KeystoneRelationTests(CharmTestCase):
 
     @patch.object(hooks, 'is_db_initialised')
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch.object(hooks, 'hashlib')
     @patch.object(hooks, 'send_notifications')
     def test_identity_changed_leader_no_neutron(self, mock_send_notifications,
-                                                mock_hashlib,
-                                                mock_ensure_ssl_cert_master,
                                                 mock_log,
                                                 mock_is_db_initialised):
         self.expect_ha.return_value = False
         mock_is_db_initialised.return_value = True
         self.is_db_ready.return_value = True
         self.is_service_present.return_value = False
-        mock_ensure_ssl_cert_master.return_value = False
+        self.relation_get.return_value = {
+            'public_url': 'http://dummy.local',
+            'admin_url': 'http://dummy.local',
+            'internal_url': 'http://dummy.local',
+        }
         hooks.identity_changed(
             relation_id='identity-service:0',
             remote_unit='unit/0')
         self.assertFalse(self.delete_service_entry.called)
 
-    @patch.object(hooks, 'local_unit')
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    def test_identity_changed_no_leader(self, mock_ensure_ssl_cert_master,
-                                        mock_log, mock_local_unit):
-        mock_ensure_ssl_cert_master.return_value = False
-        mock_local_unit.return_value = 'unit/0'
+    def test_identity_changed_no_leader(self, mock_log):
         self.is_elected_leader.return_value = False
         hooks.identity_changed(
             relation_id='identity-service:0',
@@ -716,60 +412,18 @@ class KeystoneRelationTests(CharmTestCase):
         self.log.assert_called_with(
             'Deferring identity_changed() to service leader.')
 
-    @patch.object(hooks, 'send_ssl_sync_request')
-    @patch.object(hooks, 'local_unit')
-    @patch.object(hooks, 'peer_units')
-    @patch.object(unison, 'ssh_authorized_peers')
-    def test_cluster_joined(self, ssh_authorized_peers, mock_peer_units,
-                            mock_local_unit, mock_send_ssl_sync_request):
-        mock_local_unit.return_value = 'unit/0'
-        mock_peer_units.return_value = ['unit/0']
-        hooks.cluster_joined()
-        ssh_authorized_peers.assert_called_with(
-            user=self.ssh_user, group='juju_keystone',
-            peer_interface='cluster', ensure_local_user=True)
-        self.assertTrue(mock_send_ssl_sync_request.called)
-
-        mock_send_ssl_sync_request.reset_mock()
-        hooks.cluster_joined(rid='foo:1', ssl_sync_request=True)
-        self.assertTrue(mock_send_ssl_sync_request.called)
-
-        mock_send_ssl_sync_request.reset_mock()
-        hooks.cluster_joined(rid='foo:1', ssl_sync_request=False)
-        self.assertFalse(mock_send_ssl_sync_request.called)
-
-    @patch.object(hooks, 'relation_get_and_migrate')
-    @patch.object(hooks, 'initialise_pki')
     @patch.object(hooks, 'update_all_identity_relation_units')
-    @patch.object(hooks, 'get_ssl_sync_request_units')
-    @patch.object(hooks, 'is_ssl_cert_master')
-    @patch.object(hooks, 'peer_units')
     @patch('keystone_utils.relation_ids')
     @patch('keystone_utils.config')
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch('keystone_utils.synchronize_ca')
-    @patch.object(hooks, 'check_peer_actions')
-    @patch.object(unison, 'ssh_authorized_peers')
     @patch.object(hooks, 'CONFIGS')
-    def test_cluster_changed(self, configs, ssh_authorized_peers,
-                             check_peer_actions, mock_synchronize_ca,
-                             mock_ensure_ssl_cert_master,
+    def test_cluster_changed(self, configs,
                              mock_log, mock_config, mock_relation_ids,
-                             mock_peer_units,
-                             mock_is_ssl_cert_master,
-                             mock_get_ssl_sync_request_units,
-                             mock_update_all_identity_relation_units,
-                             mock_initialise_pki,
-                             mock_relation_get_and_migrate):
+                             mock_update_all_identity_relation_units):
 
         relation_settings = {'foo_passwd': '123',
                              'identity-service:16_foo': 'bar'}
 
-        mock_relation_get_and_migrate.return_value = None
-        mock_is_ssl_cert_master.return_value = False
-        mock_peer_units.return_value = ['unit/0']
-        mock_ensure_ssl_cert_master.return_value = False
         mock_relation_ids.return_value = []
         self.is_leader.return_value = False
 
@@ -784,13 +438,8 @@ class KeystoneRelationTests(CharmTestCase):
         mock_config.return_value = None
 
         hooks.cluster_changed()
-        whitelist = ['_passwd', 'identity-service:', 'db-initialised',
-                     'ssl-cert-available-updates', 'ssl-cert-master']
+        whitelist = ['_passwd', 'identity-service:', 'db-initialised']
         self.peer_echo.assert_called_with(force=True, includes=whitelist)
-        ssh_authorized_peers.assert_called_with(
-            user=self.ssh_user, group='juju_keystone',
-            peer_interface='cluster', ensure_local_user=True)
-        self.assertFalse(mock_synchronize_ca.called)
         self.assertTrue(configs.write_all.called)
 
     @patch.object(hooks, 'update_all_identity_relation_units')
@@ -969,54 +618,40 @@ class KeystoneRelationTests(CharmTestCase):
         self.assertTrue(self.update_dns_ha_resource_params.called)
         self.relation_set.assert_called_with(**args)
 
+    @patch.object(utils, 'peer_retrieve')
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch('keystone_utils.synchronize_ca')
     @patch.object(hooks, 'CONFIGS')
     def test_ha_relation_changed_not_clustered_not_leader(self, configs,
-                                                          mock_synchronize_ca,
-                                                          mock_is_master,
-                                                          mock_log):
-        mock_is_master.return_value = False
+                                                          mock_log,
+                                                          mock_peer_retrieve):
         self.relation_get.return_value = False
-        self.is_elected_leader.return_value = False
 
         hooks.ha_changed()
         self.assertTrue(configs.write_all.called)
-        self.assertFalse(mock_synchronize_ca.called)
 
-    @patch.object(hooks, 'is_ssl_cert_master')
-    @patch.object(hooks, 'update_all_identity_relation_units_force_sync')
+    @patch.object(hooks, 'update_all_identity_relation_units')
     @patch.object(hooks, 'is_db_initialised')
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
     @patch.object(hooks, 'identity_changed')
     @patch.object(hooks, 'CONFIGS')
     def test_ha_relation_changed_clustered_leader(self, configs,
                                                   identity_changed,
-                                                  mock_ensure_ssl_cert_master,
                                                   mock_log,
                                                   mock_is_db_initialised,
-                                                  update, cert_master):
+                                                  update):
         mock_is_db_initialised.return_value = True
         self.is_db_ready.return_value = True
-        mock_ensure_ssl_cert_master.return_value = False
         self.relation_get.return_value = True
-        self.is_elected_leader.return_value = True
         self.relation_ids.return_value = ['identity-service:0']
         self.related_units.return_value = ['unit/0']
-        cert_master.return_value = True
 
         hooks.ha_changed()
         self.assertTrue(configs.write_all.called)
         self.assertTrue(update.called)
 
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
     @patch.object(hooks, 'CONFIGS')
-    def test_configure_https_enable(self, configs, mock_ensure_ssl_cert_master,
-                                    mock_log):
-        mock_ensure_ssl_cert_master.return_value = False
+    def test_configure_https_enable(self, configs, mock_log):
         configs.complete_contexts = MagicMock()
         configs.complete_contexts.return_value = ['https']
         configs.write = MagicMock()
@@ -1027,12 +662,8 @@ class KeystoneRelationTests(CharmTestCase):
         self.check_call.assert_called_with(cmd)
 
     @patch('keystone_utils.log')
-    @patch('keystone_utils.ensure_ssl_cert_master')
     @patch.object(hooks, 'CONFIGS')
-    def test_configure_https_disable(self, configs,
-                                     mock_ensure_ssl_cert_master,
-                                     mock_log):
-        mock_ensure_ssl_cert_master.return_value = False
+    def test_configure_https_disable(self, configs, mock_log):
         configs.complete_contexts = MagicMock()
         configs.complete_contexts.return_value = ['']
         configs.write = MagicMock()
@@ -1044,47 +675,25 @@ class KeystoneRelationTests(CharmTestCase):
 
     @patch.object(hooks, 'update_all_identity_relation_units')
     @patch.object(utils, 'os_release')
-    @patch.object(utils, 'git_install_requested')
     @patch.object(hooks, 'is_db_ready')
     @patch.object(hooks, 'is_db_initialised')
     @patch('keystone_utils.log')
     @patch('keystone_utils.relation_ids')
-    @patch('keystone_utils.is_elected_leader')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch('keystone_utils.update_hash_from_path')
-    @patch('keystone_utils.synchronize_ca')
-    @patch.object(unison, 'ssh_authorized_peers')
-    def test_upgrade_charm_leader(self, ssh_authorized_peers,
-                                  mock_synchronize_ca,
-                                  mock_update_hash_from_path,
-                                  mock_ensure_ssl_cert_master,
-                                  mock_is_elected_leader,
+    def test_upgrade_charm_leader(self,
                                   mock_relation_ids,
                                   mock_log,
                                   mock_is_db_initialised,
                                   mock_is_db_ready,
-                                  git_requested,
                                   os_release,
                                   update):
         os_release.return_value = 'havana'
         mock_is_db_initialised.return_value = True
         mock_is_db_ready.return_value = True
-        mock_is_elected_leader.return_value = False
         mock_relation_ids.return_value = []
-        mock_ensure_ssl_cert_master.return_value = True
-        # Ensure always returns diff
-        mock_update_hash_from_path.side_effect = \
-            lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
 
-        self.is_elected_leader.return_value = True
         self.filter_installed_packages.return_value = []
-        git_requested.return_value = False
         hooks.upgrade_charm()
         self.assertTrue(self.apt_install.called)
-        ssh_authorized_peers.assert_called_with(
-            user=self.ssh_user, group='juju_keystone',
-            peer_interface='cluster', ensure_local_user=True)
-        self.assertTrue(mock_synchronize_ca.called)
         self.assertTrue(update.called)
 
     @patch.object(hooks, 'update_all_identity_relation_units')
@@ -1160,7 +769,6 @@ class KeystoneRelationTests(CharmTestCase):
                      call('Firing identity_credentials_changed hook for all '
                           'related services.')]
         hooks.update_all_identity_relation_units(check_db_ready=False)
-        self.assertTrue(configs.write_all.called)
         identity_changed.assert_called_with(
             relation_id='identity-relation:0',
             remote_unit='unit/0')
@@ -1176,7 +784,6 @@ class KeystoneRelationTests(CharmTestCase):
         """ Verify update identity relations when DB is not ready """
         self.is_db_ready.return_value = False
         hooks.update_all_identity_relation_units(check_db_ready=True)
-        self.assertTrue(configs.write_all.called)
         self.assertTrue(self.is_db_ready.called)
         self.log.assert_called_with('Allowed_units list provided and this '
                                     'unit not present', level='INFO')
@@ -1190,7 +797,6 @@ class KeystoneRelationTests(CharmTestCase):
         """ Verify update identity relations when DB is not initialized """
         is_db_initialized.return_value = False
         hooks.update_all_identity_relation_units(check_db_ready=False)
-        self.assertTrue(configs.write_all.called)
         self.assertFalse(self.is_db_ready.called)
         self.log.assert_called_with('Database not yet initialised - '
                                     'deferring identity-relation updates',
@@ -1206,7 +812,6 @@ class KeystoneRelationTests(CharmTestCase):
         self.is_elected_leader.return_value = True
         is_db_initialized.return_value = True
         hooks.update_all_identity_relation_units(check_db_ready=False)
-        self.assertTrue(configs.write_all.called)
         self.assertTrue(self.ensure_initial_admin.called)
         # Still updates relations
         self.assertTrue(self.relation_ids.called)
@@ -1220,40 +825,26 @@ class KeystoneRelationTests(CharmTestCase):
         self.is_elected_leader.return_value = False
         is_db_initialized.return_value = True
         hooks.update_all_identity_relation_units(check_db_ready=False)
-        self.assertTrue(configs.write_all.called)
         self.assertFalse(self.ensure_initial_admin.called)
         # Still updates relations
         self.assertTrue(self.relation_ids.called)
 
+    @patch.object(utils, 'peer_retrieve')
     @patch.object(hooks, 'update_all_identity_relation_units')
     @patch.object(utils, 'os_release')
-    @patch.object(utils, 'git_install_requested')
     @patch('keystone_utils.log')
     @patch('keystone_utils.relation_ids')
-    @patch('keystone_utils.ensure_ssl_cert_master')
-    @patch('keystone_utils.update_hash_from_path')
-    @patch.object(unison, 'ssh_authorized_peers')
-    def test_upgrade_charm_not_leader(self, ssh_authorized_peers,
-                                      mock_update_hash_from_path,
-                                      mock_ensure_ssl_cert_master,
+    def test_upgrade_charm_not_leader(self,
                                       mock_relation_ids,
-                                      mock_log, git_requested,
-                                      os_release, update):
+                                      mock_log,
+                                      os_release, update, mock_peer_retrieve):
         os_release.return_value = 'havana'
-        mock_relation_ids.return_value = []
-        mock_ensure_ssl_cert_master.return_value = False
-        # Ensure always returns diff
-        mock_update_hash_from_path.side_effect = \
-            lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
 
-        self.is_elected_leader.return_value = False
         self.filter_installed_packages.return_value = []
-        git_requested.return_value = False
+        mock_peer_retrieve.return_value = 'true'
+        self.is_elected_leader.return_value = False
         hooks.upgrade_charm()
         self.assertTrue(self.apt_install.called)
-        ssh_authorized_peers.assert_called_with(
-            user=self.ssh_user, group='juju_keystone',
-            peer_interface='cluster', ensure_local_user=True)
         self.assertTrue(self.log.called)
         self.assertFalse(update.called)
 
@@ -1277,9 +868,14 @@ class KeystoneRelationTests(CharmTestCase):
 
     @patch.object(hooks, 'is_unit_paused_set')
     @patch.object(hooks, 'is_db_initialised')
+    @patch.object(utils, 'run_in_apache')
+    @patch.object(utils, 'service_restart')
     def test_domain_backend_changed_complete(self,
+                                             service_restart,
+                                             run_in_apache,
                                              is_db_initialised,
                                              is_unit_paused_set):
+        run_in_apache.return_value = True
         self.get_api_version.return_value = 3
         self.relation_get.side_effect = ['mydomain', 'nonce2']
         self.is_leader.return_value = True
@@ -1289,7 +885,6 @@ class KeystoneRelationTests(CharmTestCase):
         mock_kv.get.return_value = None
         self.unitdata.kv.return_value = mock_kv
         is_unit_paused_set.return_value = False
-        self.keystone_service.return_value = 'apache2'
 
         hooks.domain_backend_changed()
 
@@ -1303,16 +898,21 @@ class KeystoneRelationTests(CharmTestCase):
                  rid=None),
         ])
         self.create_or_show_domain.assert_called_with('mydomain')
-        self.service_restart.assert_called_with('apache2')
+        service_restart.assert_called_with('apache2')
         mock_kv.set.assert_called_with('domain-restart-nonce-mydomain',
                                        'nonce2')
         self.assertTrue(mock_kv.flush.called)
 
     @patch.object(hooks, 'is_unit_paused_set')
     @patch.object(hooks, 'is_db_initialised')
+    @patch.object(utils, 'run_in_apache')
+    @patch.object(utils, 'service_restart')
     def test_domain_backend_changed_complete_follower(self,
+                                                      service_restart,
+                                                      run_in_apache,
                                                       is_db_initialised,
                                                       is_unit_paused_set):
+        run_in_apache.return_value = True
         self.get_api_version.return_value = 3
         self.relation_get.side_effect = ['mydomain', 'nonce2']
         self.is_leader.return_value = False
@@ -1322,7 +922,6 @@ class KeystoneRelationTests(CharmTestCase):
         mock_kv.get.return_value = None
         self.unitdata.kv.return_value = mock_kv
         is_unit_paused_set.return_value = False
-        self.keystone_service.return_value = 'apache2'
 
         hooks.domain_backend_changed()
 
@@ -1337,7 +936,84 @@ class KeystoneRelationTests(CharmTestCase):
         ])
         # Only lead unit will create the domain
         self.assertFalse(self.create_or_show_domain.called)
-        self.service_restart.assert_called_with('apache2')
+        service_restart.assert_called_with('apache2')
         mock_kv.set.assert_called_with('domain-restart-nonce-mydomain',
                                        'nonce2')
         self.assertTrue(mock_kv.flush.called)
+
+    @patch.object(hooks, 'os_release')
+    @patch.object(hooks, 'relation_id')
+    @patch.object(hooks, 'is_unit_paused_set')
+    @patch.object(hooks, 'is_db_initialised')
+    @patch.object(utils, 'run_in_apache')
+    @patch.object(utils, 'service_restart')
+    def test_fid_service_provider_changed_complete(
+            self,
+            service_restart,
+            run_in_apache,
+            is_db_initialised,
+            is_unit_paused_set,
+            relation_id, os_release):
+        os_release.return_value = 'ocata'
+        rel = 'keystone-fid-service-provider:0'
+        relation_id.return_value = rel
+        run_in_apache.return_value = True
+        self.get_api_version.return_value = 3
+        self.relation_get.side_effect = ['"nonce2"']
+        self.is_leader.return_value = True
+        self.is_db_ready.return_value = True
+        is_db_initialised.return_value = True
+        mock_kv = MagicMock()
+        mock_kv.get.return_value = None
+        self.unitdata.kv.return_value = mock_kv
+        is_unit_paused_set.return_value = False
+
+        hooks.keystone_fid_service_provider_changed()
+
+        self.assertTrue(self.get_api_version.called)
+        self.relation_get.assert_has_calls([
+            call('restart-nonce'),
+        ])
+        service_restart.assert_called_with('apache2')
+        mock_kv.set.assert_called_with(
+            'fid-restart-nonce-{}'.format(rel), 'nonce2')
+        self.assertTrue(mock_kv.flush.called)
+
+    @patch.object(hooks, 'os_release')
+    @patch.object(hooks, 'relation_id')
+    @patch.object(hooks, 'is_unit_paused_set')
+    @patch.object(hooks, 'is_db_initialised')
+    @patch.object(utils, 'run_in_apache')
+    @patch.object(utils, 'service_restart')
+    def test_fid_service_provider_changed_complete_follower(
+            self,
+            service_restart,
+            run_in_apache,
+            is_db_initialised,
+            is_unit_paused_set,
+            relation_id, os_release):
+        os_release.return_value = 'ocata'
+        rel = 'keystone-fid-service-provider:0'
+        relation_id.return_value = rel
+        run_in_apache.return_value = True
+        self.get_api_version.return_value = 3
+        self.relation_get.side_effect = ['"nonce2"']
+        self.is_leader.return_value = False
+        self.is_db_ready.return_value = True
+        is_db_initialised.return_value = True
+        mock_kv = MagicMock()
+        mock_kv.get.return_value = None
+        self.unitdata.kv.return_value = mock_kv
+        is_unit_paused_set.return_value = False
+
+        hooks.keystone_fid_service_provider_changed()
+
+        self.assertTrue(self.get_api_version.called)
+        self.relation_get.assert_has_calls([
+            call('restart-nonce'),
+        ])
+        service_restart.assert_called_with('apache2')
+        mock_kv.set.assert_called_with(
+            'fid-restart-nonce-{}'.format(rel),
+            'nonce2')
+        self.assertTrue(mock_kv.flush.called)
diff --git a/unit_tests/test_keystone_utils.py b/unit_tests/test_keystone_utils.py
index ebfd5fddd19757cfa77dec9cbd18abbe3cef2f19..97a405305ecada697e79d0980a26212557971d17 100644
--- a/unit_tests/test_keystone_utils.py
+++ b/unit_tests/test_keystone_utils.py
@@ -15,7 +15,6 @@
 from mock import patch, call, MagicMock
 from test_utils import CharmTestCase
 import os
-from base64 import b64encode
 import subprocess
 
 os.environ['JUJU_UNIT_NAME'] = 'keystone'
@@ -30,7 +29,6 @@ TO_PATCH = [
     'config',
     'os_release',
     'log',
-    'get_ca',
     'create_role',
     'create_service_entry',
     'create_endpoint_template',
@@ -39,17 +37,11 @@ TO_PATCH = [
     'get_requested_roles',
     'get_service_password',
     'get_os_codename_install_source',
-    'git_clone_and_install',
-    'git_pip_venv_dir',
-    'git_src_dir',
     'grant_role',
     'configure_installation_source',
-    'is_elected_leader',
-    'is_ssl_cert_master',
     'https',
     'lsb_release',
     'peer_store_and_set',
-    'service_restart',
     'service_stop',
     'service_start',
     'snap_install_requested',
@@ -57,12 +49,10 @@ TO_PATCH = [
     'relation_set',
     'relation_ids',
     'relation_id',
-    'render',
     'local_unit',
     'related_units',
     'https',
     'peer_store',
-    'pip_install',
     # generic
     'apt_update',
     'apt_upgrade',
@@ -71,19 +61,9 @@ TO_PATCH = [
     'time',
     'pwgen',
     'os_application_version_set',
-    'is_leader',
     'reset_os_release',
 ]
 
-openstack_origin_git = \
-    """repositories:
-         - {name: requirements,
-            repository: 'git://git.openstack.org/openstack/requirements',
-            branch: stable/juno}
-         - {name: keystone,
-            repository: 'git://git.openstack.org/openstack/keystone',
-            branch: stable/juno}"""
-
 
 class TestKeystoneUtils(CharmTestCase):
 
@@ -107,6 +87,7 @@ class TestKeystoneUtils(CharmTestCase):
                 'contexts': [self.ctxt],
             }
         }
+        self.get_os_codename_install_source.return_value = 'icehouse'
 
     @patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer')
     @patch('os.path.exists')
@@ -134,27 +115,21 @@ class TestKeystoneUtils(CharmTestCase):
         ]
         self.assertEqual(fake_renderer.register.call_args_list, ex_reg)
 
-    @patch.object(utils, 'git_determine_usr_bin')
     @patch.object(utils, 'snap_install_requested')
     @patch.object(utils, 'os')
     def test_resource_map_enable_memcache_mitaka(self, mock_os,
-                                                 snap_install_requested,
-                                                 git_determine_usr_bin):
+                                                 snap_install_requested):
         self.os_release.return_value = 'mitaka'
         snap_install_requested.return_value = False
-        git_determine_usr_bin.return_value = '/usr/bin'
         mock_os.path.exists.return_value = True
         self.assertTrue('/etc/memcached.conf' in utils.resource_map().keys())
 
-    @patch.object(utils, 'git_determine_usr_bin')
     @patch.object(utils, 'snap_install_requested')
     @patch.object(utils, 'os')
     def test_resource_map_enable_memcache_liberty(self, mock_os,
-                                                  snap_install_requested,
-                                                  git_determine_usr_bin):
+                                                  snap_install_requested):
         self.os_release.return_value = 'liberty'
         snap_install_requested.return_value = False
-        git_determine_usr_bin.return_value = '/usr/bin'
         mock_os.path.exists.return_value = True
         self.assertFalse('/etc/memcached.conf' in utils.resource_map().keys())
 
@@ -184,16 +159,6 @@ class TestKeystoneUtils(CharmTestCase):
             'memcached']
         self.assertEqual(set(ex), set(result))
 
-    @patch('charmhelpers.contrib.openstack.utils.config')
-    def test_determine_packages_git(self, _config):
-        self.os_release.return_value = 'havana'
-        _config.return_value = openstack_origin_git
-        result = utils.determine_packages()
-        ex = utils.BASE_PACKAGES + utils.BASE_GIT_PACKAGES
-        for p in utils.GIT_PACKAGE_BLACKLIST:
-            ex.remove(p)
-        self.assertEqual(set(ex), set(result))
-
     @patch('charmhelpers.contrib.openstack.utils.config')
     def test_determine_packages_snap_install(self, _config):
         self.os_release.return_value = 'mitaka'
@@ -203,6 +168,7 @@ class TestKeystoneUtils(CharmTestCase):
         ex = utils.BASE_PACKAGES_SNAP + ['memcached']
         self.assertEqual(set(ex), set(result))
 
+    @patch.object(utils, 'is_elected_leader')
     @patch.object(utils, 'disable_unused_apache_sites')
     @patch('os.path.exists')
     @patch.object(utils, 'run_in_apache')
@@ -210,11 +176,11 @@ class TestKeystoneUtils(CharmTestCase):
     @patch.object(utils, 'migrate_database')
     def test_openstack_upgrade_leader(
             self, migrate_database, determine_packages,
-            run_in_apache, os_path_exists, disable_unused_apache_sites):
+            run_in_apache, os_path_exists, disable_unused_apache_sites,
+            mock_is_elected_leader):
         configs = MagicMock()
         self.test_config.set('openstack-origin', 'cloud:xenial-newton')
         determine_packages.return_value = []
-        self.is_elected_leader.return_value = True
         os_path_exists.return_value = True
         run_in_apache.return_value = True
 
@@ -260,9 +226,8 @@ class TestKeystoneUtils(CharmTestCase):
     @patch.object(utils, 'get_api_version')
     @patch.object(utils, 'get_manager')
     @patch.object(utils, 'resolve_address')
-    @patch.object(utils, 'b64encode')
     def test_add_service_to_keystone_clustered_https_none_values(
-            self, b64encode, _resolve_address, _get_manager,
+            self, _resolve_address, _get_manager,
             _get_api_version, _leader_get):
         _get_api_version.return_value = 2
         _leader_get.return_value = None
@@ -270,11 +235,9 @@ class TestKeystoneUtils(CharmTestCase):
         remote_unit = 'unit/0'
         _resolve_address.return_value = '10.10.10.10'
         self.https.return_value = True
-        self.test_config.set('https-service-endpoints', 'True')
         self.test_config.set('vip', '10.10.10.10')
         self.test_config.set('admin-port', 80)
         self.test_config.set('service-port', 81)
-        b64encode.return_value = 'certificate'
         self.get_requested_roles.return_value = ['role1', ]
 
         self.relation_get.return_value = {'service': 'keystone',
@@ -291,12 +254,10 @@ class TestKeystoneUtils(CharmTestCase):
 
         relation_data = {'auth_host': '10.10.10.10',
                          'service_host': '10.10.10.10',
-                         'auth_protocol': 'https',
                          'service_protocol': 'https',
                          'auth_port': 80,
+                         'auth_protocol': 'https',
                          'service_port': 81,
-                         'https_keystone': 'True',
-                         'ca_cert': 'certificate',
                          'region': 'RegionOne',
                          'api_version': 2,
                          'admin_domain_id': None}
@@ -332,12 +293,15 @@ class TestKeystoneUtils(CharmTestCase):
         self.relation_ids.return_value = ['cluster/0']
 
         service_domain = None
+        service_domain_id = None
         service_role = 'Admin'
         if test_api_version > 2:
             service_domain = 'service_domain'
+            service_domain_id = '1234567890'
 
         mock_keystone = MagicMock()
         mock_keystone.resolve_tenant_id.return_value = 'tenant_id'
+        mock_keystone.resolve_domain_id.return_value = service_domain_id
         KeystoneManager.return_value = mock_keystone
 
         self.relation_get.return_value = {'service': 'keystone',
@@ -374,6 +338,7 @@ class TestKeystoneUtils(CharmTestCase):
                          'service_username': 'keystone',
                          'service_password': 'password',
                          'service_domain': service_domain,
+                         'service_domain_id': service_domain_id,
                          'service_tenant': 'tenant',
                          'https_keystone': '__null__',
                          'ssl_cert': '__null__', 'ssl_key': '__null__',
@@ -394,6 +359,8 @@ class TestKeystoneUtils(CharmTestCase):
                                                    **relation_data)
         self.relation_set.assert_called_with(relation_id=relation_id,
                                              **filtered)
+        if test_api_version > 2:
+            mock_keystone.resolve_domain_id.assert_called_with(service_domain)
 
     def test_add_service_to_keystone_no_clustered_no_https_complete_values_v3(
             self):
@@ -430,6 +397,49 @@ class TestKeystoneUtils(CharmTestCase):
                                         adminurl='10.0.0.2',
                                         internalurl='192.168.1.2')
 
+    @patch.object(utils, 'get_requested_roles')
+    @patch.object(utils, 'create_service_credentials')
+    @patch.object(utils, 'leader_get')
+    @patch('charmhelpers.contrib.openstack.ip.config')
+    @patch.object(utils, 'ensure_valid_service')
+    @patch.object(utils, 'add_endpoint')
+    @patch.object(utils, 'get_manager')
+    def test_add_service_to_keystone_multi_endpoints_bug_1739409(
+            self, KeystoneManager, add_endpoint, ensure_valid_service,
+            ip_config, leader_get, create_service_credentials,
+            get_requested_roles):
+        relation_id = 'identity-service:8'
+        remote_unit = 'nova-cloud-controller/0'
+        get_requested_roles.return_value = 'role1'
+        self.relation_get.return_value = {
+            'ec2_admin_url': 'http://10.5.0.16:8773/services/Cloud',
+            'ec2_internal_url': 'http://10.5.0.16:8773/services/Cloud',
+            'ec2_public_url': 'http://10.5.0.16:8773/services/Cloud',
+            'ec2_region': 'RegionOne',
+            'ec2_service': 'ec2',
+            'nova_admin_url': 'http://10.5.0.16:8774/v2/$(tenant_id)s',
+            'nova_internal_url': 'http://10.5.0.16:8774/v2/$(tenant_id)s',
+            'nova_public_url': 'http://10.5.0.16:8774/v2/$(tenant_id)s',
+            'nova_region': 'RegionOne',
+            'nova_service': 'nova',
+            'private-address': '10.5.0.16',
+            's3_admin_url': 'http://10.5.0.16:3333',
+            's3_internal_url': 'http://10.5.0.16:3333',
+            's3_public_url': 'http://10.5.0.16:3333',
+            's3_region': 'RegionOne',
+            's3_service': 's3'}
+
+        self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/'
+        KeystoneManager.resolve_tenant_id.return_value = 'tenant_id'
+        leader_get.return_value = None
+
+        utils.add_service_to_keystone(
+            relation_id=relation_id,
+            remote_unit=remote_unit)
+        create_service_credentials.assert_called_once_with(
+            'ec2_nova_s3',
+            new_roles='role1')
+
     @patch.object(utils, 'set_service_password')
     @patch.object(utils, 'get_service_password')
     @patch.object(utils, 'user_exists')
@@ -629,12 +639,15 @@ class TestKeystoneUtils(CharmTestCase):
         mock_relation_set.assert_called_once_with(relation_id=relation_id,
                                                   relation_settings=settings)
 
+    @patch.object(utils, 'is_elected_leader')
     @patch.object(utils, 'peer_retrieve')
     @patch.object(utils, 'peer_store')
     def test_get_admin_passwd_pwd_set(self, mock_peer_store,
-                                      mock_peer_retrieve):
+                                      mock_peer_retrieve,
+                                      mock_is_elected_leader):
         mock_peer_retrieve.return_value = None
         self.test_config.set('admin-password', 'supersecret')
+        mock_is_elected_leader.return_value = True
         self.assertEqual(utils.get_admin_passwd(), 'supersecret')
         mock_peer_store.assert_called_once_with('admin_passwd', 'supersecret')
 
@@ -682,96 +695,6 @@ class TestKeystoneUtils(CharmTestCase):
         self.related_units.return_value = []
         self.assertTrue(utils.is_db_ready())
 
-    @patch.object(utils, 'peer_units')
-    def test_ensure_ssl_cert_master_ssl_no_peers(self, mock_peer_units):
-        def mock_rel_get(unit=None, **kwargs):
-            return None
-
-        self.relation_get.side_effect = mock_rel_get
-        self.relation_ids.return_value = ['cluster:0']
-        self.local_unit.return_value = 'unit/0'
-        self.related_units.return_value = []
-        mock_peer_units.return_value = []
-        # This should get ignored since we are overriding
-        self.is_ssl_cert_master.return_value = False
-        self.is_elected_leader.return_value = False
-        self.assertTrue(utils.ensure_ssl_cert_master())
-        settings = {'ssl-cert-master': 'unit/0'}
-        self.relation_set.assert_called_with(relation_id='cluster:0',
-                                             relation_settings=settings)
-
-    @patch.object(utils, 'peer_units')
-    def test_ensure_ssl_cert_master_ssl_master_no_peers(self,
-                                                        mock_peer_units):
-        def mock_rel_get(unit=None, **kwargs):
-            if unit == 'unit/0':
-                return 'unit/0'
-
-            return None
-
-        self.relation_get.side_effect = mock_rel_get
-        self.relation_ids.return_value = ['cluster:0']
-        self.local_unit.return_value = 'unit/0'
-        self.related_units.return_value = []
-        mock_peer_units.return_value = []
-        # This should get ignored since we are overriding
-        self.is_ssl_cert_master.return_value = False
-        self.is_elected_leader.return_value = False
-        self.assertTrue(utils.ensure_ssl_cert_master())
-        settings = {'ssl-cert-master': 'unit/0'}
-        self.relation_set.assert_called_with(relation_id='cluster:0',
-                                             relation_settings=settings)
-
-    @patch.object(utils, 'peer_units')
-    def test_ensure_ssl_cert_master_ssl_not_leader(self, mock_peer_units):
-        self.relation_ids.return_value = ['cluster:0']
-        self.local_unit.return_value = 'unit/0'
-        mock_peer_units.return_value = ['unit/1']
-        self.is_ssl_cert_master.return_value = False
-        self.is_elected_leader.return_value = False
-        self.assertFalse(utils.ensure_ssl_cert_master())
-        self.assertFalse(self.relation_set.called)
-
-    @patch.object(utils, 'peer_units')
-    def test_ensure_ssl_cert_master_is_leader_new_peer(self,
-                                                       mock_peer_units):
-        def mock_rel_get(unit=None, **kwargs):
-            if unit == 'unit/0':
-                return 'unit/0'
-
-            return 'unknown'
-
-        self.relation_get.side_effect = mock_rel_get
-        self.relation_ids.return_value = ['cluster:0']
-        self.local_unit.return_value = 'unit/0'
-        mock_peer_units.return_value = ['unit/1']
-        self.related_units.return_value = ['unit/1']
-        self.is_ssl_cert_master.return_value = False
-        self.is_elected_leader.return_value = True
-        self.assertFalse(utils.ensure_ssl_cert_master())
-        settings = {'ssl-cert-master': 'unit/0'}
-        self.relation_set.assert_called_with(relation_id='cluster:0',
-                                             relation_settings=settings)
-
-    @patch.object(utils, 'peer_units')
-    def test_ensure_ssl_cert_master_is_leader_no_new_peer(self,
-                                                          mock_peer_units):
-        def mock_rel_get(unit=None, **kwargs):
-            if unit == 'unit/0':
-                return 'unit/0'
-
-            return 'unit/0'
-
-        self.relation_get.side_effect = mock_rel_get
-        self.relation_ids.return_value = ['cluster:0']
-        self.local_unit.return_value = 'unit/0'
-        mock_peer_units.return_value = ['unit/1']
-        self.related_units.return_value = ['unit/1']
-        self.is_ssl_cert_master.return_value = False
-        self.is_elected_leader.return_value = True
-        self.assertFalse(utils.ensure_ssl_cert_master())
-        self.assertFalse(self.relation_set.called)
-
     @patch.object(utils, 'leader_set')
     @patch.object(utils, 'leader_get')
     @patch('charmhelpers.contrib.openstack.ip.unit_get')
@@ -806,109 +729,6 @@ class TestKeystoneUtils(CharmTestCase):
             region='RegionOne',
         )
 
-    @patch.object(utils, 'peer_units')
-    def test_ensure_ssl_cert_master_is_leader_bad_votes(self,
-                                                        mock_peer_units):
-        counter = {0: 0}
-
-        def mock_rel_get(unit=None, **kwargs):
-            """Returns a mix of votes."""
-            if unit == 'unit/0':
-                return 'unit/0'
-
-            ret = 'unit/%d' % (counter[0])
-            counter[0] += 1
-            return ret
-
-        self.relation_get.side_effect = mock_rel_get
-        self.relation_ids.return_value = ['cluster:0']
-        self.local_unit.return_value = 'unit/0'
-        mock_peer_units.return_value = ['unit/1']
-        self.related_units.return_value = ['unit/1']
-        self.is_ssl_cert_master.return_value = False
-        self.is_elected_leader.return_value = True
-        self.assertFalse(utils.ensure_ssl_cert_master())
-        self.assertFalse(self.relation_set.called)
-
-    @patch.object(utils, 'git_install_requested')
-    @patch.object(utils, 'git_post_install')
-    @patch.object(utils, 'git_pre_install')
-    def test_git_install(self, git_requested, git_pre, git_post):
-        projects_yaml = openstack_origin_git
-        git_requested.return_value = True
-        utils.git_install(projects_yaml)
-        self.assertTrue(git_pre.called)
-        self.git_clone_and_install.assert_called_with(openstack_origin_git,
-                                                      core_project='keystone')
-        self.assertTrue(git_post.called)
-
-    @patch.object(utils, 'mkdir')
-    @patch.object(utils, 'write_file')
-    @patch.object(utils, 'add_user_to_group')
-    @patch.object(utils, 'add_group')
-    @patch.object(utils, 'adduser')
-    def test_git_pre_install(self, adduser, add_group, add_user_to_group,
-                             write_file, mkdir):
-        utils.git_pre_install()
-        adduser.assert_called_with('keystone', shell='/bin/bash',
-                                   system_user=True,
-                                   home_dir='/var/lib/keystone')
-        add_group.assert_called_with('keystone', system_group=True)
-        add_user_to_group.assert_called_with('keystone', 'keystone')
-        expected = [
-            call('/var/lib/keystone', owner='keystone',
-                 group='keystone', perms=0755, force=False),
-            call('/var/lib/keystone/cache', owner='keystone',
-                 group='keystone', perms=0755, force=False),
-            call('/var/log/keystone', owner='keystone',
-                 group='keystone', perms=0755, force=False),
-        ]
-        self.assertEqual(mkdir.call_args_list, expected)
-        write_file.assert_called_with('/var/log/keystone/keystone.log',
-                                      '', owner='keystone', group='keystone',
-                                      perms=0600)
-
-    @patch('os.path.join')
-    @patch('os.path.exists')
-    @patch('os.symlink')
-    @patch('shutil.copytree')
-    @patch('shutil.rmtree')
-    @patch('subprocess.check_call')
-    def test_git_post_install(self, check_call, rmtree, copytree, symlink,
-                              exists, join):
-        self.os_release.return_value = 'havana'
-        projects_yaml = openstack_origin_git
-        join.return_value = 'joined-string'
-        self.git_pip_venv_dir.return_value = '/mnt/openstack-git/venv'
-        self.lsb_release.return_value = {'DISTRIB_RELEASE': '15.04'}
-        utils.git_post_install(projects_yaml)
-        expected = [
-            call('joined-string', '/etc/keystone'),
-        ]
-        copytree.assert_has_calls(expected)
-        expected = [
-            call('joined-string', '/usr/local/bin/keystone-manage'),
-        ]
-        symlink.assert_has_calls(expected, any_order=True)
-        keystone_context = {
-            'service_description': 'Keystone API server',
-            'service_name': 'Keystone',
-            'user_name': 'keystone',
-            'start_dir': '/var/lib/keystone',
-            'process_name': 'keystone',
-            'executable_name': 'joined-string',
-            'config_files': ['/etc/keystone/keystone.conf'],
-            'log_file': '/var/log/keystone/keystone.log',
-        }
-        expected = [
-            call('git/logging.conf', '/etc/keystone/logging.conf', {},
-                 perms=0o644),
-            call('git.upstart', '/etc/init/keystone.conf',
-                 keystone_context, perms=0o644, templates_dir='joined-string'),
-        ]
-        self.assertEqual(self.render.call_args_list, expected)
-        self.service_restart.assert_called_with('keystone')
-
     @patch.object(utils, 'get_manager')
     def test_is_service_present(self, KeystoneManager):
         mock_keystone = MagicMock()
@@ -1065,16 +885,6 @@ class TestKeystoneUtils(CharmTestCase):
         protocol = utils.get_protocol()
         self.assertEqual(protocol, 'https')
 
-    def test_get_ssl_ca_settings(self):
-        CA = MagicMock()
-        CA.get_ca_bundle.return_value = 'certstring'
-        self.test_config.set('https-service-endpoints', 'True')
-        self.get_ca.return_value = CA
-        expected_settings = {'https_keystone': 'True',
-                             'ca_cert': b64encode('certstring')}
-        settings = utils.get_ssl_ca_settings()
-        self.assertEqual(settings, expected_settings)
-
     @patch.object(utils, 'get_manager')
     def test_add_credentials_keystone_not_ready(self, get_manager):
         """ Verify add_credentials_to_keystone when the relation
@@ -1260,67 +1070,6 @@ class TestKeystoneUtils(CharmTestCase):
         self.peer_store_and_set.assert_called_with(relation_id=relation_id,
                                                    **relation_data)
 
-    @patch.object(utils, 'set_service_password')
-    @patch.object(utils, 'get_service_password')
-    @patch.object(utils, 'get_ssl_ca_settings')
-    @patch.object(utils, 'create_user_credentials')
-    @patch.object(utils, 'get_protocol')
-    @patch.object(utils, 'resolve_address')
-    @patch.object(utils, 'get_api_version')
-    @patch.object(utils, 'get_manager')
-    def test_add_credentials_keystone_ssl(self, get_manager,
-                                          get_api_version,
-                                          resolve_address,
-                                          get_protocol,
-                                          create_user_credentials,
-                                          get_ssl_ca_settings,
-                                          get_callback, set_callback):
-        """ Verify add_credentials with SSL """
-        manager = MagicMock()
-        manager.resolve_tenant_id.return_value = 'abcdef0123456789'
-        get_manager.return_value = manager
-        remote_unit = 'unit/0'
-        relation_id = 'identity-credentials:0'
-        get_api_version.return_value = 2
-        get_protocol.return_value = 'https'
-        resolve_address.return_value = '10.10.10.10'
-        create_user_credentials.return_value = 'password'
-        get_ssl_ca_settings.return_value = {'https_keystone': 'True',
-                                            'ca_cert': 'base64certstring'}
-        self.relation_get.return_value = {'username': 'requester'}
-        self.get_service_password.return_value = 'password'
-        self.get_requested_roles.return_value = []
-        self.test_config.set('admin-port', 80)
-        self.test_config.set('service-port', 81)
-        self.test_config.set('https-service-endpoints', 'True')
-        relation_data = {'auth_host': '10.10.10.10',
-                         'credentials_host': '10.10.10.10',
-                         'credentials_port': 81,
-                         'auth_port': 80,
-                         'auth_protocol': 'https',
-                         'credentials_username': 'requester',
-                         'credentials_protocol': 'https',
-                         'credentials_password': 'password',
-                         'credentials_project': 'services',
-                         'credentials_project_id': 'abcdef0123456789',
-                         'region': 'RegionOne',
-                         'api_version': 2,
-                         'https_keystone': 'True',
-                         'ca_cert': 'base64certstring'}
-
-        utils.add_credentials_to_keystone(
-            relation_id=relation_id,
-            remote_unit=remote_unit)
-        create_user_credentials.assert_called_with('requester',
-                                                   get_callback,
-                                                   set_callback,
-                                                   domain=None,
-                                                   new_roles=[],
-                                                   grants=['Admin'],
-                                                   tenant='services')
-        self.peer_store_and_set.assert_called_with(relation_id=relation_id,
-                                                   **relation_data)
-
     @patch.object(utils.os, 'remove')
     @patch.object(utils.os.path, 'exists')
     def test_disable_unused_apache_sites(self, os_path_exists, os_remove):
@@ -1357,3 +1106,21 @@ class TestKeystoneUtils(CharmTestCase):
     def test_run_in_apache_set_release(self):
         self.os_release.return_value = 'kilo'
         self.assertTrue(utils.run_in_apache(release='liberty'))
+
+    def test_get_api_version_icehouse(self):
+        self.assertEqual(utils.get_api_version(), 2)
+
+    def test_get_api_version_queens(self):
+        self.get_os_codename_install_source.return_value = 'queens'
+        self.assertEqual(utils.get_api_version(), 3)
+
+    def test_get_api_version_invalid_option_value(self):
+        self.test_config.set('preferred-api-version', 4)
+        with self.assertRaises(ValueError):
+            utils.get_api_version()
+
+    def test_get_api_version_queens_invalid_option_value(self):
+        self.test_config.set('preferred-api-version', 2)
+        self.get_os_codename_install_source.return_value = 'queens'
+        with self.assertRaises(ValueError):
+            utils.get_api_version()