diff --git a/README.md b/README.md
index 3461b68957d93640d91b4c3dcee1b49c882c0c61..04ad9a0e629aa21096ee12aeeaa59cc29322c489 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
 Overview
 ========
 
-This charm provides Keystone, the Openstack identity service. Its target
+This charm provides Keystone, the Openstack identity service. It's target
 platform is (ideally) Ubuntu LTS + Openstack.
 
 This is a modified version,  which adds support for Identity
@@ -14,7 +14,7 @@ The following interfaces are provided:
 
     - nrpe-external-master: Used to generate Nagios checks.
 
-    - identity-service: Openstack API endpoints request an entry in the
+    - identity-service: Openstack API endpoints request an entry in the 
       Keystone service catalog + endpoint template catalog. When a relation
       is established, Keystone receives: service name, region, public_url,
       admin_url and internal_url. It first checks that the requested service
@@ -97,28 +97,33 @@ If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are se
 SSL/HTTPS
 ---------
 
-Support for SSL and https endpoint is provided via various charm configuration
-options.
+Support for SSL and https endpoint is provided via a set of configuration
+options on the charm. There are two types supported;
 
-To enable SSL and https endpoint with a charm-generated CA, set the following
-configuration options:
+use-https - if enabled this option tells Keystone to configure the identity
+endpoint as https. Under this model the keystone charm will either use the CA
+as provided by the user (see ssl_* options below) or will generate its own and
+sync across peers. The cert will be distributed to all service endpoints which
+will be configured to use https.
 
-- use-https - if enabled this option tells Keystone to configure the identity
-  endpoint as https, and the keystone charm will generate its own CA and sync
-  across peers. The cert will be distributed to all service endpoints which
-  will be configured to use https.
+https-service-endpoints - if enabled this option tells Keystone to configure
+ALL endpoints as https. Under this model the keystone charm will either use the
+CA as provided by the user (see ssl_* options below) or will generate its own
+and sync across peers. The cert will be distributed to all service endpoints
+which will be configured to use https as well as configuring themselves to be
+used as https.
 
-- https-service-endpoints - if enabled this option tells Keystone to configure
-  ALL endpoints as https. Under this model the keystone charm will generate its
-  own CA and sync across peers. The cert will be distributed to all service
-  endpoints which will be configured to use https as well as configuring
-  themselves to be used as https.
+When configuring the charms to use SSL there are three charm config options as
+ssl_ca, ssl_cert and ssl_key.
 
-To enable SSL and https endpoint with your own CA, SSL cert, and key set the
-following configuration options: ssl_ca, ssl_cert, and ssl_key. The user can
-provide SSL cert and key using ssl_cert and ssl_key only when the cert is
-signed by a trusted CA. These options should not be used with use-https and
-https-service-endpoints.
+- The user can provide their own CA, SSL cert and key using the options ssl_ca,
+  ssl_cert, ssl_key.
+
+- The user can provide SSL cert and key using ssl_cert and ssl_key when the cert
+  is signed by a trusted CA.
+
+- If not provided, the keystone charm will automatically generate a CA and certs
+  to distribute to endpoints.
 
 When the charm configures itself as a CA (generally only recommended for test
 purposes) it will elect an "ssl-cert-master" whose duty is to generate the CA
@@ -164,7 +169,7 @@ To use this feature, use the --bind option when deploying the charm:
 
     juju deploy keystone --bind "public=public-space internal=internal-space admin=admin-space shared-db=internal-space"
 
-Alternatively these can also be provided as part of a juju native bundle configuration:
+alternatively these can also be provided as part of a juju native bundle configuration:
 
     keystone:
       charm: cs:xenial/keystone
@@ -177,7 +182,7 @@ Alternatively these can also be provided as part of a juju native bundle configu
 
 NOTE: Spaces must be configured in the underlying provider prior to attempting to use them.
 
-NOTE: Existing deployments using os\-\*-network configuration options will continue to function; these options are preferred over any network space binding provided if set.
+NOTE: Existing deployments using os-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set.
 
 Federated Authentication
 ------------------------
diff --git a/actions.yaml b/actions.yaml
index 767bc8abaa8cdd3cfd50094ae41c1499354ca761..81bbd5b52380bb3c5ce6be4b5d3a211b7c24ab67 100644
--- a/actions.yaml
+++ b/actions.yaml
@@ -1,3 +1,5 @@
+git-reinstall:
+  description: Reinstall keystone from the openstack-origin-git repositories.
 pause:
   description: |
     Pause keystone services.
diff --git a/actions/charmhelpers b/actions/charmhelpers
deleted file mode 120000
index 702de734b0c015b34565dfbd7ba8c48ace8cb262..0000000000000000000000000000000000000000
--- a/actions/charmhelpers
+++ /dev/null
@@ -1 +0,0 @@
-../charmhelpers
\ No newline at end of file
diff --git a/actions/git-reinstall b/actions/git-reinstall
new file mode 100755
index 0000000000000000000000000000000000000000..2abc2e405272d0950d129b6efd36223f02ada341
--- /dev/null
+++ b/actions/git-reinstall
@@ -0,0 +1,57 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import traceback
+
+from charmhelpers.contrib.openstack.utils import (
+    git_install_requested,
+)
+
+from charmhelpers.core.hookenv import (
+    action_set,
+    action_fail,
+    config,
+)
+
+from hooks.keystone_utils import (
+    git_install,
+)
+
+from hooks.keystone_hooks import (
+    config_changed,
+)
+
+
+def git_reinstall():
+    """Reinstall from source and restart services.
+
+    If the openstack-origin-git config option was used to install openstack
+    from source git repositories, then this action can be used to reinstall
+    from updated git repositories, followed by a restart of services."""
+    if not git_install_requested():
+        action_fail('openstack-origin-git is not configured')
+        return
+
+    try:
+        git_install(config('openstack-origin-git'))
+        config_changed()
+    except:
+        action_set({'traceback': traceback.format_exc()})
+        action_fail('git-reinstall resulted in an unexpected error')
+
+
+if __name__ == '__main__':
+    git_reinstall()
diff --git a/actions/git_reinstall.py b/actions/git_reinstall.py
new file mode 100755
index 0000000000000000000000000000000000000000..2abc2e405272d0950d129b6efd36223f02ada341
--- /dev/null
+++ b/actions/git_reinstall.py
@@ -0,0 +1,57 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import traceback
+
+from charmhelpers.contrib.openstack.utils import (
+    git_install_requested,
+)
+
+from charmhelpers.core.hookenv import (
+    action_set,
+    action_fail,
+    config,
+)
+
+from hooks.keystone_utils import (
+    git_install,
+)
+
+from hooks.keystone_hooks import (
+    config_changed,
+)
+
+
+def git_reinstall():
+    """Reinstall from source and restart services.
+
+    If the openstack-origin-git config option was used to install openstack
+    from source git repositories, then this action can be used to reinstall
+    from updated git repositories, followed by a restart of services."""
+    if not git_install_requested():
+        action_fail('openstack-origin-git is not configured')
+        return
+
+    try:
+        git_install(config('openstack-origin-git'))
+        config_changed()
+    except:
+        action_set({'traceback': traceback.format_exc()})
+        action_fail('git-reinstall resulted in an unexpected error')
+
+
+if __name__ == '__main__':
+    git_reinstall()
diff --git a/actions/hooks b/actions/hooks
deleted file mode 120000
index f631275e19cd320f570733cb0ce1f287d6f02702..0000000000000000000000000000000000000000
--- a/actions/hooks
+++ /dev/null
@@ -1 +0,0 @@
-../hooks
\ No newline at end of file
diff --git a/charmhelpers/charmhelpers b/charmhelpers/charmhelpers
deleted file mode 120000
index 702de734b0c015b34565dfbd7ba8c48ace8cb262..0000000000000000000000000000000000000000
--- a/charmhelpers/charmhelpers
+++ /dev/null
@@ -1 +0,0 @@
-../charmhelpers
\ No newline at end of file
diff --git a/charmhelpers/contrib/hahelpers/apache.py b/charmhelpers/contrib/hahelpers/apache.py
index 605a1becd92a4eb5683d3db28a2267373780a736..22acb683e6cec8648451543d235286be151dbf2a 100644
--- a/charmhelpers/contrib/hahelpers/apache.py
+++ b/charmhelpers/contrib/hahelpers/apache.py
@@ -65,8 +65,7 @@ def get_ca_cert():
     if ca_cert is None:
         log("Inspecting identity-service relations for CA SSL certificate.",
             level=INFO)
-        for r_id in (relation_ids('identity-service') +
-                     relation_ids('identity-credentials')):
+        for r_id in relation_ids('identity-service'):
             for unit in relation_list(r_id):
                 if ca_cert is None:
                     ca_cert = relation_get('ca_cert',
@@ -77,7 +76,7 @@ def get_ca_cert():
 def retrieve_ca_cert(cert_file):
     cert = None
     if os.path.isfile(cert_file):
-        with open(cert_file, 'rb') as crt:
+        with open(cert_file, 'r') as crt:
             cert = crt.read()
     return cert
 
diff --git a/charmhelpers/contrib/hahelpers/cluster.py b/charmhelpers/contrib/hahelpers/cluster.py
index 4a737e24e885cd767c7185eebd84a3afdf244b13..4207e42c2209958d075bd3a3776d382463d3f6f2 100644
--- a/charmhelpers/contrib/hahelpers/cluster.py
+++ b/charmhelpers/contrib/hahelpers/cluster.py
@@ -223,11 +223,6 @@ def https():
         return True
     if config_get('ssl_cert') and config_get('ssl_key'):
         return True
-    for r_id in relation_ids('certificates'):
-        for unit in relation_list(r_id):
-            ca = relation_get('ca', rid=r_id, unit=unit)
-            if ca:
-                return True
     for r_id in relation_ids('identity-service'):
         for unit in relation_list(r_id):
             # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
@@ -376,7 +371,6 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
     ''' Distribute operations by waiting based on modulo_distribution
 
     If modulo and or wait are not set, check config_get for those values.
-    If config values are not set, default to modulo=3 and wait=30.
 
     :param modulo: int The modulo number creates the group distribution
     :param wait: int The constant time wait value
@@ -388,17 +382,10 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
     :side effect: Calls time.sleep()
     '''
     if modulo is None:
-        modulo = config_get('modulo-nodes') or 3
+        modulo = config_get('modulo-nodes')
     if wait is None:
-        wait = config_get('known-wait') or 30
-    if juju_is_leader():
-        # The leader should never wait
-        calculated_wait = 0
-    else:
-        # non_zero_wait=True guarantees the non-leader who gets modulo 0
-        # will still wait
-        calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
-                                              non_zero_wait=True)
+        wait = config_get('known-wait')
+    calculated_wait = modulo_distribution(modulo=modulo, wait=wait)
     msg = "Waiting {} seconds for {} ...".format(calculated_wait,
                                                  operation_name)
     log(msg, DEBUG)
diff --git a/charmhelpers/contrib/network/ip.py b/charmhelpers/contrib/network/ip.py
index b13277bb57c9227b1d9dfecf4f6750740e5a262a..a871ce3701ffd416de391bc4404e1acc0fa62c8c 100644
--- a/charmhelpers/contrib/network/ip.py
+++ b/charmhelpers/contrib/network/ip.py
@@ -27,7 +27,6 @@ from charmhelpers.core.hookenv import (
     network_get_primary_address,
     unit_get,
     WARNING,
-    NoNetworkBinding,
 )
 
 from charmhelpers.core.host import (
@@ -110,12 +109,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
         _validate_cidr(network)
         network = netaddr.IPNetwork(network)
         for iface in netifaces.interfaces():
-            try:
-                addresses = netifaces.ifaddresses(iface)
-            except ValueError:
-                # If an instance was deleted between
-                # netifaces.interfaces() run and now, its interfaces are gone
-                continue
+            addresses = netifaces.ifaddresses(iface)
             if network.version == 4 and netifaces.AF_INET in addresses:
                 for addr in addresses[netifaces.AF_INET]:
                     cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
@@ -584,9 +578,6 @@ def get_relation_ip(interface, cidr_network=None):
     except NotImplementedError:
         # If network-get is not available
         address = get_host_ip(unit_get('private-address'))
-    except NoNetworkBinding:
-        log("No network binding for {}".format(interface), WARNING)
-        address = get_host_ip(unit_get('private-address'))
 
     if config('prefer-ipv6'):
         # Currently IPv6 has priority, eventually we want IPv6 to just be
diff --git a/charmhelpers/contrib/openstack/amulet/deployment.py b/charmhelpers/contrib/openstack/amulet/deployment.py
index 1c96752a49fb36f389cd1ede38b31afb94127e42..5afbbd87c13e2b168b088c4da51b3b63ab4d07a2 100644
--- a/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -21,9 +21,6 @@ from collections import OrderedDict
 from charmhelpers.contrib.amulet.deployment import (
     AmuletDeployment
 )
-from charmhelpers.contrib.openstack.amulet.utils import (
-    OPENSTACK_RELEASES_PAIRS
-)
 
 DEBUG = logging.DEBUG
 ERROR = logging.ERROR
@@ -274,8 +271,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
            release.
            """
         # Must be ordered by OpenStack release (not by Ubuntu release):
-        for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
-            setattr(self, os_pair, i)
+        (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
+         self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
+         self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
+         self.xenial_pike, self.artful_pike, self.xenial_queens,
+         self.bionic_queens,) = range(13)
 
         releases = {
             ('trusty', None): self.trusty_icehouse,
@@ -291,8 +291,6 @@ class OpenStackAmuletDeployment(AmuletDeployment):
             ('zesty', None): self.zesty_ocata,
             ('artful', None): self.artful_pike,
             ('bionic', None): self.bionic_queens,
-            ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
-            ('cosmic', None): self.cosmic_rocky,
         }
         return releases[(self.series, self.openstack)]
 
@@ -308,7 +306,6 @@ class OpenStackAmuletDeployment(AmuletDeployment):
             ('zesty', 'ocata'),
             ('artful', 'pike'),
             ('bionic', 'queens'),
-            ('cosmic', 'rocky'),
         ])
         if self.openstack:
             os_origin = self.openstack.split(':')[1]
diff --git a/charmhelpers/contrib/openstack/amulet/utils.py b/charmhelpers/contrib/openstack/amulet/utils.py
index ef4ab54bc8d1a988f827d2b766c3d1f20f0238e1..b71b2b1910959f5dbe7860ff3d14d45b8e9e2d90 100644
--- a/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/charmhelpers/contrib/openstack/amulet/utils.py
@@ -40,7 +40,6 @@ import novaclient
 import pika
 import swiftclient
 
-from charmhelpers.core.decorators import retry_on_exception
 from charmhelpers.contrib.amulet.utils import (
     AmuletUtils
 )
@@ -51,13 +50,6 @@ ERROR = logging.ERROR
 
 NOVA_CLIENT_VERSION = "2"
 
-OPENSTACK_RELEASES_PAIRS = [
-    'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
-    'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
-    'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
-    'xenial_pike', 'artful_pike', 'xenial_queens',
-    'bionic_queens', 'bionic_rocky', 'cosmic_rocky']
-
 
 class OpenStackAmuletUtils(AmuletUtils):
     """OpenStack amulet utilities.
@@ -71,34 +63,7 @@ class OpenStackAmuletUtils(AmuletUtils):
         super(OpenStackAmuletUtils, self).__init__(log_level)
 
     def validate_endpoint_data(self, endpoints, admin_port, internal_port,
-                               public_port, expected, openstack_release=None):
-        """Validate endpoint data. Pick the correct validator based on
-           OpenStack release. Expected data should be in the v2 format:
-           {
-               'id': id,
-               'region': region,
-               'adminurl': adminurl,
-               'internalurl': internalurl,
-               'publicurl': publicurl,
-               'service_id': service_id}
-
-           """
-        validation_function = self.validate_v2_endpoint_data
-        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
-        if openstack_release and openstack_release >= xenial_queens:
-                validation_function = self.validate_v3_endpoint_data
-                expected = {
-                    'id': expected['id'],
-                    'region': expected['region'],
-                    'region_id': 'RegionOne',
-                    'url': self.valid_url,
-                    'interface': self.not_null,
-                    'service_id': expected['service_id']}
-        return validation_function(endpoints, admin_port, internal_port,
-                                   public_port, expected)
-
-    def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
-                                  public_port, expected):
+                               public_port, expected):
         """Validate endpoint data.
 
            Validate actual endpoint data vs expected endpoint data. The ports
@@ -127,7 +92,7 @@ class OpenStackAmuletUtils(AmuletUtils):
             return 'endpoint not found'
 
     def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
-                                  public_port, expected, expected_num_eps=3):
+                                  public_port, expected):
         """Validate keystone v3 endpoint data.
 
         Validate the v3 endpoint data which has changed from v2.  The
@@ -173,89 +138,10 @@ class OpenStackAmuletUtils(AmuletUtils):
                 if ret:
                     return 'unexpected endpoint data - {}'.format(ret)
 
-        if len(found) != expected_num_eps:
+        if len(found) != 3:
             return 'Unexpected number of endpoints found'
 
-    def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
-        """Convert v2 endpoint data into v3.
-
-           {
-               'service_name1': [
-                   {
-                       'adminURL': adminURL,
-                       'id': id,
-                       'region': region.
-                       'publicURL': publicURL,
-                       'internalURL': internalURL
-                   }],
-               'service_name2': [
-                   {
-                       'adminURL': adminURL,
-                       'id': id,
-                       'region': region.
-                       'publicURL': publicURL,
-                       'internalURL': internalURL
-                   }],
-           }
-          """
-        self.log.warn("Endpoint ID and Region ID validation is limited to not "
-                      "null checks after v2 to v3 conversion")
-        for svc in ep_data.keys():
-            assert len(ep_data[svc]) == 1, "Unknown data format"
-            svc_ep_data = ep_data[svc][0]
-            ep_data[svc] = [
-                {
-                    'url': svc_ep_data['adminURL'],
-                    'interface': 'admin',
-                    'region': svc_ep_data['region'],
-                    'region_id': self.not_null,
-                    'id': self.not_null},
-                {
-                    'url': svc_ep_data['publicURL'],
-                    'interface': 'public',
-                    'region': svc_ep_data['region'],
-                    'region_id': self.not_null,
-                    'id': self.not_null},
-                {
-                    'url': svc_ep_data['internalURL'],
-                    'interface': 'internal',
-                    'region': svc_ep_data['region'],
-                    'region_id': self.not_null,
-                    'id': self.not_null}]
-        return ep_data
-
-    def validate_svc_catalog_endpoint_data(self, expected, actual,
-                                           openstack_release=None):
-        """Validate service catalog endpoint data. Pick the correct validator
-           for the OpenStack version. Expected data should be in the v2 format:
-           {
-               'service_name1': [
-                   {
-                       'adminURL': adminURL,
-                       'id': id,
-                       'region': region.
-                       'publicURL': publicURL,
-                       'internalURL': internalURL
-                   }],
-               'service_name2': [
-                   {
-                       'adminURL': adminURL,
-                       'id': id,
-                       'region': region.
-                       'publicURL': publicURL,
-                       'internalURL': internalURL
-                   }],
-           }
-
-           """
-        validation_function = self.validate_v2_svc_catalog_endpoint_data
-        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
-        if openstack_release and openstack_release >= xenial_queens:
-            validation_function = self.validate_v3_svc_catalog_endpoint_data
-            expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
-        return validation_function(expected, actual)
-
-    def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
+    def validate_svc_catalog_endpoint_data(self, expected, actual):
         """Validate service catalog endpoint data.
 
            Validate a list of actual service catalog endpoints vs a list of
@@ -424,7 +310,6 @@ class OpenStackAmuletUtils(AmuletUtils):
         self.log.debug('Checking if tenant exists ({})...'.format(tenant))
         return tenant in [t.name for t in keystone.tenants.list()]
 
-    @retry_on_exception(num_retries=5, base_delay=1)
     def keystone_wait_for_propagation(self, sentry_relation_pairs,
                                       api_version):
         """Iterate over list of sentry and relation tuples and verify that
@@ -443,7 +328,7 @@ class OpenStackAmuletUtils(AmuletUtils):
             if rel.get('api_version') != str(api_version):
                 raise Exception("api_version not propagated through relation"
                                 " data yet ('{}' != '{}')."
-                                "".format(rel.get('api_version'), api_version))
+                                "".format(rel['api_version'], api_version))
 
     def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
                                        api_version):
@@ -465,13 +350,16 @@ class OpenStackAmuletUtils(AmuletUtils):
         deployment._auto_wait_for_status()
         self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
 
-    def authenticate_cinder_admin(self, keystone, api_version=2):
+    def authenticate_cinder_admin(self, keystone_sentry, username,
+                                  password, tenant, api_version=2):
         """Authenticates admin user with cinder."""
-        self.log.debug('Authenticating cinder admin...')
+        # NOTE(beisner): cinder python client doesn't accept tokens.
+        keystone_ip = keystone_sentry.info['public-address']
+        ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
         _clients = {
             1: cinder_client.Client,
             2: cinder_clientv2.Client}
-        return _clients[api_version](session=keystone.session)
+        return _clients[api_version](username, password, tenant, ept)
 
     def authenticate_keystone(self, keystone_ip, username, password,
                               api_version=False, admin_port=False,
@@ -479,36 +367,13 @@ class OpenStackAmuletUtils(AmuletUtils):
                               project_domain_name=None, project_name=None):
         """Authenticate with Keystone"""
         self.log.debug('Authenticating with keystone...')
-        if not api_version:
-            api_version = 2
-        sess, auth = self.get_keystone_session(
-            keystone_ip=keystone_ip,
-            username=username,
-            password=password,
-            api_version=api_version,
-            admin_port=admin_port,
-            user_domain_name=user_domain_name,
-            domain_name=domain_name,
-            project_domain_name=project_domain_name,
-            project_name=project_name
-        )
-        if api_version == 2:
-            client = keystone_client.Client(session=sess)
-        else:
-            client = keystone_client_v3.Client(session=sess)
-        # This populates the client.service_catalog
-        client.auth_ref = auth.get_access(sess)
-        return client
-
-    def get_keystone_session(self, keystone_ip, username, password,
-                             api_version=False, admin_port=False,
-                             user_domain_name=None, domain_name=None,
-                             project_domain_name=None, project_name=None):
-        """Return a keystone session object"""
-        ep = self.get_keystone_endpoint(keystone_ip,
-                                        api_version=api_version,
-                                        admin_port=admin_port)
-        if api_version == 2:
+        port = 5000
+        if admin_port:
+            port = 35357
+        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
+                                        port)
+        if not api_version or api_version == 2:
+            ep = base_ep + "/v2.0"
             auth = v2.Password(
                 username=username,
                 password=password,
@@ -516,7 +381,12 @@ class OpenStackAmuletUtils(AmuletUtils):
                 auth_url=ep
             )
             sess = keystone_session.Session(auth=auth)
+            client = keystone_client.Client(session=sess)
+            # This populates the client.service_catalog
+            client.auth_ref = auth.get_access(sess)
+            return client
         else:
+            ep = base_ep + "/v3"
             auth = v3.Password(
                 user_domain_name=user_domain_name,
                 username=username,
@@ -527,57 +397,10 @@ class OpenStackAmuletUtils(AmuletUtils):
                 auth_url=ep
             )
             sess = keystone_session.Session(auth=auth)
-        return (sess, auth)
-
-    def get_keystone_endpoint(self, keystone_ip, api_version=None,
-                              admin_port=False):
-        """Return keystone endpoint"""
-        port = 5000
-        if admin_port:
-            port = 35357
-        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
-                                        port)
-        if api_version == 2:
-            ep = base_ep + "/v2.0"
-        else:
-            ep = base_ep + "/v3"
-        return ep
-
-    def get_default_keystone_session(self, keystone_sentry,
-                                     openstack_release=None, api_version=2):
-        """Return a keystone session object and client object assuming standard
-           default settings
-
-           Example call in amulet tests:
-               self.keystone_session, self.keystone = u.get_default_keystone_session(
-                   self.keystone_sentry,
-                   openstack_release=self._get_openstack_release())
-
-           The session can then be used to auth other clients:
-               neutronclient.Client(session=session)
-               aodh_client.Client(session=session)
-               eyc
-        """
-        self.log.debug('Authenticating keystone admin...')
-        # 11 => xenial_queens
-        if api_version == 3 or (openstack_release and openstack_release >= 11):
-            client_class = keystone_client_v3.Client
-            api_version = 3
-        else:
-            client_class = keystone_client.Client
-        keystone_ip = keystone_sentry.info['public-address']
-        session, auth = self.get_keystone_session(
-            keystone_ip,
-            api_version=api_version,
-            username='admin',
-            password='openstack',
-            project_name='admin',
-            user_domain_name='admin_domain',
-            project_domain_name='admin_domain')
-        client = client_class(session=session)
-        # This populates the client.service_catalog
-        client.auth_ref = auth.get_access(session)
-        return session, client
+            client = keystone_client_v3.Client(session=sess)
+            # This populates the client.service_catalog
+            client.auth_ref = auth.get_access(sess)
+            return client
 
     def authenticate_keystone_admin(self, keystone_sentry, user, password,
                                     tenant=None, api_version=None,
@@ -1035,12 +858,9 @@ class OpenStackAmuletUtils(AmuletUtils):
         :returns: List of pool name, object count, kb disk space used
         """
         df = self.get_ceph_df(sentry_unit)
-        for pool in df['pools']:
-            if pool['id'] == pool_id:
-                pool_name = pool['name']
-                obj_count = pool['stats']['objects']
-                kb_used = pool['stats']['kb_used']
-
+        pool_name = df['pools'][pool_id]['name']
+        obj_count = df['pools'][pool_id]['stats']['objects']
+        kb_used = df['pools'][pool_id]['stats']['kb_used']
         self.log.debug('Ceph {} pool (ID {}): {} objects, '
                        '{} kb used'.format(pool_name, pool_id,
                                            obj_count, kb_used))
diff --git a/charmhelpers/contrib/openstack/context.py b/charmhelpers/contrib/openstack/context.py
index f3741b0e5ed0a8724fd009b4c0d1b54876618a47..e6c0e9feb4de9efdb97397b85979f7a4ffcbcfd0 100644
--- a/charmhelpers/contrib/openstack/context.py
+++ b/charmhelpers/contrib/openstack/context.py
@@ -93,14 +93,14 @@ from charmhelpers.contrib.network.ip import (
     format_ipv6_addr,
     is_bridge_member,
     is_ipv6_disabled,
-    get_relation_ip,
 )
 from charmhelpers.contrib.openstack.utils import (
     config_flags_parser,
+    get_host_ip,
+    git_determine_usr_bin,
+    git_determine_python_path,
     enable_memcache,
     snap_install_requested,
-    CompareOpenStackReleases,
-    os_release,
 )
 from charmhelpers.core.unitdata import kv
 
@@ -190,8 +190,8 @@ class OSContextGenerator(object):
 class SharedDBContext(OSContextGenerator):
     interfaces = ['shared-db']
 
-    def __init__(self, database=None, user=None, relation_prefix=None,
-                 ssl_dir=None, relation_id=None):
+    def __init__(self,
+                 database=None, user=None, relation_prefix=None, ssl_dir=None):
         """Allows inspecting relation for settings prefixed with
         relation_prefix. This is useful for parsing access for multiple
         databases returned via the shared-db interface (eg, nova_password,
@@ -202,7 +202,6 @@ class SharedDBContext(OSContextGenerator):
         self.user = user
         self.ssl_dir = ssl_dir
         self.rel_name = self.interfaces[0]
-        self.relation_id = relation_id
 
     def __call__(self):
         self.database = self.database or config('database')
@@ -236,12 +235,7 @@ class SharedDBContext(OSContextGenerator):
         if self.relation_prefix:
             password_setting = self.relation_prefix + '_password'
 
-        if self.relation_id:
-            rids = [self.relation_id]
-        else:
-            rids = relation_ids(self.interfaces[0])
-
-        for rid in rids:
+        for rid in relation_ids(self.interfaces[0]):
             self.related = True
             for unit in related_units(rid):
                 rdata = relation_get(rid=rid, unit=unit)
@@ -338,7 +332,10 @@ class IdentityServiceContext(OSContextGenerator):
         self.rel_name = rel_name
         self.interfaces = [self.rel_name]
 
-    def _setup_pki_cache(self):
+    def __call__(self):
+        log('Generating template context for ' + self.rel_name, level=DEBUG)
+        ctxt = {}
+
         if self.service and self.service_user:
             # This is required for pki token signing if we don't want /tmp to
             # be used.
@@ -348,15 +345,6 @@ class IdentityServiceContext(OSContextGenerator):
                 mkdir(path=cachedir, owner=self.service_user,
                       group=self.service_user, perms=0o700)
 
-            return cachedir
-        return None
-
-    def __call__(self):
-        log('Generating template context for ' + self.rel_name, level=DEBUG)
-        ctxt = {}
-
-        cachedir = self._setup_pki_cache()
-        if cachedir:
             ctxt['signing_dir'] = cachedir
 
         for rid in relation_ids(self.rel_name):
@@ -390,63 +378,6 @@ class IdentityServiceContext(OSContextGenerator):
                     # so a missing value just indicates keystone needs
                     # upgrading
                     ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
-                    ctxt['admin_domain_id'] = rdata.get('service_domain_id')
-                    return ctxt
-
-        return {}
-
-
-class IdentityCredentialsContext(IdentityServiceContext):
-    '''Context for identity-credentials interface type'''
-
-    def __init__(self,
-                 service=None,
-                 service_user=None,
-                 rel_name='identity-credentials'):
-        super(IdentityCredentialsContext, self).__init__(service,
-                                                         service_user,
-                                                         rel_name)
-
-    def __call__(self):
-        log('Generating template context for ' + self.rel_name, level=DEBUG)
-        ctxt = {}
-
-        cachedir = self._setup_pki_cache()
-        if cachedir:
-            ctxt['signing_dir'] = cachedir
-
-        for rid in relation_ids(self.rel_name):
-            self.related = True
-            for unit in related_units(rid):
-                rdata = relation_get(rid=rid, unit=unit)
-                credentials_host = rdata.get('credentials_host')
-                credentials_host = (
-                    format_ipv6_addr(credentials_host) or credentials_host
-                )
-                auth_host = rdata.get('auth_host')
-                auth_host = format_ipv6_addr(auth_host) or auth_host
-                svc_protocol = rdata.get('credentials_protocol') or 'http'
-                auth_protocol = rdata.get('auth_protocol') or 'http'
-                api_version = rdata.get('api_version') or '2.0'
-                ctxt.update({
-                    'service_port': rdata.get('credentials_port'),
-                    'service_host': credentials_host,
-                    'auth_host': auth_host,
-                    'auth_port': rdata.get('auth_port'),
-                    'admin_tenant_name': rdata.get('credentials_project'),
-                    'admin_tenant_id': rdata.get('credentials_project_id'),
-                    'admin_user': rdata.get('credentials_username'),
-                    'admin_password': rdata.get('credentials_password'),
-                    'service_protocol': svc_protocol,
-                    'auth_protocol': auth_protocol,
-                    'api_version': api_version
-                })
-
-                if float(api_version) > 2:
-                    ctxt.update({'admin_domain_name':
-                                 rdata.get('domain')})
-
-                if self.context_complete(ctxt):
                     return ctxt
 
         return {}
@@ -454,13 +385,11 @@ class IdentityCredentialsContext(IdentityServiceContext):
 
 class AMQPContext(OSContextGenerator):
 
-    def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None,
-                 relation_id=None):
+    def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
         self.ssl_dir = ssl_dir
         self.rel_name = rel_name
         self.relation_prefix = relation_prefix
         self.interfaces = [rel_name]
-        self.relation_id = relation_id
 
     def __call__(self):
         log('Generating template context for amqp', level=DEBUG)
@@ -481,11 +410,7 @@ class AMQPContext(OSContextGenerator):
             raise OSContextError
 
         ctxt = {}
-        if self.relation_id:
-            rids = [self.relation_id]
-        else:
-            rids = relation_ids(self.rel_name)
-        for rid in rids:
+        for rid in relation_ids(self.rel_name):
             ha_vip_only = False
             self.related = True
             transport_hosts = None
@@ -630,9 +555,7 @@ class HAProxyContext(OSContextGenerator):
     """
     interfaces = ['cluster']
 
-    def __init__(self, singlenode_mode=False,
-                 address_types=ADDRESS_TYPES):
-        self.address_types = address_types
+    def __init__(self, singlenode_mode=False):
         self.singlenode_mode = singlenode_mode
 
     def __call__(self):
@@ -641,22 +564,19 @@ class HAProxyContext(OSContextGenerator):
         if not relation_ids('cluster') and not self.singlenode_mode:
             return {}
 
+        if config('prefer-ipv6'):
+            addr = get_ipv6_addr(exc_list=[config('vip')])[0]
+        else:
+            addr = get_host_ip(unit_get('private-address'))
+
         l_unit = local_unit().replace('/', '-')
         cluster_hosts = {}
 
         # NOTE(jamespage): build out map of configured network endpoints
         # and associated backends
-        for addr_type in self.address_types:
+        for addr_type in ADDRESS_TYPES:
             cfg_opt = 'os-{}-network'.format(addr_type)
-            # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather
-            # than 'internal'
-            if addr_type == 'internal':
-                _addr_map_type = INTERNAL
-            else:
-                _addr_map_type = addr_type
-            # Network spaces aware
-            laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'],
-                                    config(cfg_opt))
+            laddr = get_address_in_network(config(cfg_opt))
             if laddr:
                 netmask = get_netmask_for_address(laddr)
                 cluster_hosts[laddr] = {
@@ -667,19 +587,15 @@ class HAProxyContext(OSContextGenerator):
                 }
                 for rid in relation_ids('cluster'):
                     for unit in sorted(related_units(rid)):
-                        # API Charms will need to set {addr_type}-address with
-                        # get_relation_ip(addr_type)
                         _laddr = relation_get('{}-address'.format(addr_type),
                                               rid=rid, unit=unit)
                         if _laddr:
                             _unit = unit.replace('/', '-')
                             cluster_hosts[laddr]['backends'][_unit] = _laddr
 
-        # NOTE(jamespage) add backend based on get_relation_ip - this
-        # will either be the only backend or the fallback if no acls
+        # NOTE(jamespage) add backend based on private address - this
+        # with either be the only backend or the fallback if no acls
         # match in the frontend
-        # Network spaces aware
-        addr = get_relation_ip('cluster')
         cluster_hosts[addr] = {}
         netmask = get_netmask_for_address(addr)
         cluster_hosts[addr] = {
@@ -689,8 +605,6 @@ class HAProxyContext(OSContextGenerator):
         }
         for rid in relation_ids('cluster'):
             for unit in sorted(related_units(rid)):
-                # API Charms will need to set their private-address with
-                # get_relation_ip('cluster')
                 _laddr = relation_get('private-address',
                                       rid=rid, unit=unit)
                 if _laddr:
@@ -801,18 +715,17 @@ class ApacheSSLContext(OSContextGenerator):
         ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
         mkdir(path=ssl_dir)
         cert, key = get_cert(cn)
-        if cert and key:
-            if cn:
-                cert_filename = 'cert_{}'.format(cn)
-                key_filename = 'key_{}'.format(cn)
-            else:
-                cert_filename = 'cert'
-                key_filename = 'key'
+        if cn:
+            cert_filename = 'cert_{}'.format(cn)
+            key_filename = 'key_{}'.format(cn)
+        else:
+            cert_filename = 'cert'
+            key_filename = 'key'
 
-            write_file(path=os.path.join(ssl_dir, cert_filename),
-                       content=b64decode(cert), perms=0o640)
-            write_file(path=os.path.join(ssl_dir, key_filename),
-                       content=b64decode(key), perms=0o640)
+        write_file(path=os.path.join(ssl_dir, cert_filename),
+                   content=b64decode(cert))
+        write_file(path=os.path.join(ssl_dir, key_filename),
+                   content=b64decode(key))
 
     def configure_ca(self):
         ca_cert = get_ca_cert()
@@ -884,31 +797,23 @@ class ApacheSSLContext(OSContextGenerator):
         if not self.external_ports or not https():
             return {}
 
-        use_keystone_ca = True
-        for rid in relation_ids('certificates'):
-            if related_units(rid):
-                use_keystone_ca = False
-
-        if use_keystone_ca:
-            self.configure_ca()
-
+        self.configure_ca()
         self.enable_modules()
 
         ctxt = {'namespace': self.service_namespace,
                 'endpoints': [],
                 'ext_ports': []}
 
-        if use_keystone_ca:
-            cns = self.canonical_names()
-            if cns:
-                for cn in cns:
-                    self.configure_cert(cn)
-            else:
-                # Expect cert/key provided in config (currently assumed that ca
-                # uses ip for cn)
-                for net_type in (INTERNAL, ADMIN, PUBLIC):
-                    cn = resolve_address(endpoint_type=net_type)
-                    self.configure_cert(cn)
+        cns = self.canonical_names()
+        if cns:
+            for cn in cns:
+                self.configure_cert(cn)
+        else:
+            # Expect cert/key provided in config (currently assumed that ca
+            # uses ip for cn)
+            for net_type in (INTERNAL, ADMIN, PUBLIC):
+                cn = resolve_address(endpoint_type=net_type)
+                self.configure_cert(cn)
 
         addresses = self.get_network_addresses()
         for address, endpoint in addresses:
@@ -1416,6 +1321,8 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
             "public_processes": int(math.ceil(self.public_process_weight *
                                               total_processes)),
             "threads": 1,
+            "usr_bin": git_determine_usr_bin(),
+            "python_path": git_determine_python_path(),
         }
         return ctxt
 
@@ -1663,82 +1570,6 @@ class InternalEndpointContext(OSContextGenerator):
         return {'use_internal_endpoints': config('use-internal-endpoints')}
 
 
-class VolumeAPIContext(InternalEndpointContext):
-    """Volume API context.
-
-    This context provides information regarding the volume endpoint to use
-    when communicating between services. It determines which version of the
-    API is appropriate for use.
-
-    This value will be determined in the resulting context dictionary
-    returned from calling the VolumeAPIContext object. Information provided
-    by this context is as follows:
-
-        volume_api_version: the volume api version to use, currently
-            'v2' or 'v3'
-        volume_catalog_info: the information to use for a cinder client
-            configuration that consumes API endpoints from the keystone
-            catalog. This is defined as the type:name:endpoint_type string.
-    """
-    # FIXME(wolsen) This implementation is based on the provider being able
-    # to specify the package version to check but does not guarantee that the
-    # volume service api version selected is available. In practice, it is
-    # quite likely the volume service *is* providing the v3 volume service.
-    # This should be resolved when the service-discovery spec is implemented.
-    def __init__(self, pkg):
-        """
-        Creates a new VolumeAPIContext for use in determining which version
-        of the Volume API should be used for communication. A package codename
-        should be supplied for determining the currently installed OpenStack
-        version.
-
-        :param pkg: the package codename to use in order to determine the
-            component version (e.g. nova-common). See
-            charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more.
-        """
-        super(VolumeAPIContext, self).__init__()
-        self._ctxt = None
-        if not pkg:
-            raise ValueError('package name must be provided in order to '
-                             'determine current OpenStack version.')
-        self.pkg = pkg
-
-    @property
-    def ctxt(self):
-        if self._ctxt is not None:
-            return self._ctxt
-        self._ctxt = self._determine_ctxt()
-        return self._ctxt
-
-    def _determine_ctxt(self):
-        """Determines the Volume API endpoint information.
-
-        Determines the appropriate version of the API that should be used
-        as well as the catalog_info string that would be supplied. Returns
-        a dict containing the volume_api_version and the volume_catalog_info.
-        """
-        rel = os_release(self.pkg, base='icehouse')
-        version = '2'
-        if CompareOpenStackReleases(rel) >= 'pike':
-            version = '3'
-
-        service_type = 'volumev{version}'.format(version=version)
-        service_name = 'cinderv{version}'.format(version=version)
-        endpoint_type = 'publicURL'
-        if config('use-internal-endpoints'):
-            endpoint_type = 'internalURL'
-        catalog_info = '{type}:{name}:{endpoint}'.format(
-            type=service_type, name=service_name, endpoint=endpoint_type)
-
-        return {
-            'volume_api_version': version,
-            'volume_catalog_info': catalog_info,
-        }
-
-    def __call__(self):
-        return self.ctxt
-
-
 class AppArmorContext(OSContextGenerator):
     """Base class for apparmor contexts."""
 
@@ -1874,31 +1705,3 @@ class MemcacheContext(OSContextGenerator):
                     ctxt['memcache_server_formatted'],
                     ctxt['memcache_port'])
         return ctxt
-
-
-class EnsureDirContext(OSContextGenerator):
-    '''
-    Serves as a generic context to create a directory as a side-effect.
-
-    Useful for software that supports drop-in files (.d) in conjunction
-    with config option-based templates. Examples include:
-        * OpenStack oslo.policy drop-in files;
-        * systemd drop-in config files;
-        * other software that supports overriding defaults with .d files
-
-    Another use-case is when a subordinate generates a configuration for
-    primary to render in a separate directory.
-
-    Some software requires a user to create a target directory to be
-    scanned for drop-in files with a specific format. This is why this
-    context is needed to do that before rendering a template.
-   '''
-
-    def __init__(self, dirname, **kwargs):
-        '''Used merely to ensure that a given directory exists.'''
-        self.dirname = dirname
-        self.kwargs = kwargs
-
-    def __call__(self):
-        mkdir(self.dirname, **self.kwargs)
-        return {}
diff --git a/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
index 91ce0246e66115994c3f518b36448f70100ecfc7..3ebb5329bc5a66ddbf73d813b2ee55e2fa6e87c7 100755
--- a/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
+++ b/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
@@ -10,7 +10,7 @@
 CURRQthrsh=0
 MAXQthrsh=100
 
-AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
+AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
 
 HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
 
diff --git a/charmhelpers/contrib/openstack/ha/utils.py b/charmhelpers/contrib/openstack/ha/utils.py
index 6060ae50b63677126e1941295487cd4460803b10..9a4d79c12fb9768eefd3930276696ba4ee987250 100644
--- a/charmhelpers/contrib/openstack/ha/utils.py
+++ b/charmhelpers/contrib/openstack/ha/utils.py
@@ -23,8 +23,6 @@
 Helpers for high availability.
 """
 
-import json
-
 import re
 
 from charmhelpers.core.hookenv import (
@@ -34,7 +32,6 @@ from charmhelpers.core.hookenv import (
     config,
     status_set,
     DEBUG,
-    WARNING,
 )
 
 from charmhelpers.core.host import (
@@ -43,23 +40,6 @@ from charmhelpers.core.host import (
 
 from charmhelpers.contrib.openstack.ip import (
     resolve_address,
-    is_ipv6,
-)
-
-from charmhelpers.contrib.network.ip import (
-    get_iface_for_address,
-    get_netmask_for_address,
-)
-
-from charmhelpers.contrib.hahelpers.cluster import (
-    get_hacluster_config
-)
-
-JSON_ENCODE_OPTIONS = dict(
-    sort_keys=True,
-    allow_nan=False,
-    indent=None,
-    separators=(',', ':'),
 )
 
 
@@ -73,8 +53,8 @@ class DNSHAException(Exception):
 def update_dns_ha_resource_params(resources, resource_params,
                                   relation_id=None,
                                   crm_ocf='ocf:maas:dns'):
-    """ Configure DNS-HA resources based on provided configuration and
-    update resource dictionaries for the HA relation.
+    """ Check for os-*-hostname settings and update resource dictionaries for
+    the HA relation.
 
     @param resources: Pointer to dictionary of resources.
                       Usually instantiated in ha_joined().
@@ -84,85 +64,7 @@ def update_dns_ha_resource_params(resources, resource_params,
     @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
                     DNS HA
     """
-    _relation_data = {'resources': {}, 'resource_params': {}}
-    update_hacluster_dns_ha(charm_name(),
-                            _relation_data,
-                            crm_ocf)
-    resources.update(_relation_data['resources'])
-    resource_params.update(_relation_data['resource_params'])
-    relation_set(relation_id=relation_id, groups=_relation_data['groups'])
-
-
-def assert_charm_supports_dns_ha():
-    """Validate prerequisites for DNS HA
-    The MAAS client is only available on Xenial or greater
-
-    :raises DNSHAException: if release is < 16.04
-    """
-    if lsb_release().get('DISTRIB_RELEASE') < '16.04':
-        msg = ('DNS HA is only supported on 16.04 and greater '
-               'versions of Ubuntu.')
-        status_set('blocked', msg)
-        raise DNSHAException(msg)
-    return True
-
-
-def expect_ha():
-    """ Determine if the unit expects to be in HA
-
-    Check for VIP or dns-ha settings which indicate the unit should expect to
-    be related to hacluster.
-
-    @returns boolean
-    """
-    return config('vip') or config('dns-ha')
-
-
-def generate_ha_relation_data(service):
-    """ Generate relation data for ha relation
-
-    Based on configuration options and unit interfaces, generate a json
-    encoded dict of relation data items for the hacluster relation,
-    providing configuration for DNS HA or VIP's + haproxy clone sets.
-
-    @returns dict: json encoded data for use with relation_set
-    """
-    _haproxy_res = 'res_{}_haproxy'.format(service)
-    _relation_data = {
-        'resources': {
-            _haproxy_res: 'lsb:haproxy',
-        },
-        'resource_params': {
-            _haproxy_res: 'op monitor interval="5s"'
-        },
-        'init_services': {
-            _haproxy_res: 'haproxy'
-        },
-        'clones': {
-            'cl_{}_haproxy'.format(service): _haproxy_res
-        },
-    }
-
-    if config('dns-ha'):
-        update_hacluster_dns_ha(service, _relation_data)
-    else:
-        update_hacluster_vip(service, _relation_data)
-
-    return {
-        'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS)
-        for k, v in _relation_data.items() if v
-    }
 
-
-def update_hacluster_dns_ha(service, relation_data,
-                            crm_ocf='ocf:maas:dns'):
-    """ Configure DNS-HA resources based on provided configuration
-
-    @param service: Name of the service being configured
-    @param relation_data: Pointer to dictionary of relation data.
-    @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
-                    DNS HA
-    """
     # Validate the charm environment for DNS HA
     assert_charm_supports_dns_ha()
 
@@ -191,7 +93,7 @@ def update_hacluster_dns_ha(service, relation_data,
             status_set('blocked', msg)
             raise DNSHAException(msg)
 
-        hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type)
+        hostname_key = 'res_{}_{}_hostname'.format(charm_name(), endpoint_type)
         if hostname_key in hostname_group:
             log('DNS HA: Resource {}: {} already exists in '
                 'hostname group - skipping'.format(hostname_key, hostname),
@@ -199,67 +101,42 @@ def update_hacluster_dns_ha(service, relation_data,
             continue
 
         hostname_group.append(hostname_key)
-        relation_data['resources'][hostname_key] = crm_ocf
-        relation_data['resource_params'][hostname_key] = (
-            'params fqdn="{}" ip_address="{}"'
-            .format(hostname, resolve_address(endpoint_type=endpoint_type,
-                                              override=False)))
+        resources[hostname_key] = crm_ocf
+        resource_params[hostname_key] = (
+            'params fqdn="{}" ip_address="{}" '
+            ''.format(hostname, resolve_address(endpoint_type=endpoint_type,
+                                                override=False)))
 
     if len(hostname_group) >= 1:
         log('DNS HA: Hostname group is set with {} as members. '
             'Informing the ha relation'.format(' '.join(hostname_group)),
             DEBUG)
-        relation_data['groups'] = {
-            'grp_{}_hostnames'.format(service): ' '.join(hostname_group)
-        }
+        relation_set(relation_id=relation_id, groups={
+            'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
     else:
         msg = 'DNS HA: Hostname group has no members.'
         status_set('blocked', msg)
         raise DNSHAException(msg)
 
 
-def update_hacluster_vip(service, relation_data):
-    """ Configure VIP resources based on provided configuration
-
-    @param service: Name of the service being configured
-    @param relation_data: Pointer to dictionary of relation data.
+def assert_charm_supports_dns_ha():
+    """Validate prerequisites for DNS HA
+    The MAAS client is only available on Xenial or greater
     """
-    cluster_config = get_hacluster_config()
-    vip_group = []
-    for vip in cluster_config['vip'].split():
-        if is_ipv6(vip):
-            res_neutron_vip = 'ocf:heartbeat:IPv6addr'
-            vip_params = 'ipv6addr'
-        else:
-            res_neutron_vip = 'ocf:heartbeat:IPaddr2'
-            vip_params = 'ip'
+    if lsb_release().get('DISTRIB_RELEASE') < '16.04':
+        msg = ('DNS HA is only supported on 16.04 and greater '
+               'versions of Ubuntu.')
+        status_set('blocked', msg)
+        raise DNSHAException(msg)
+    return True
 
-        iface = (get_iface_for_address(vip) or
-                 config('vip_iface'))
-        netmask = (get_netmask_for_address(vip) or
-                   config('vip_cidr'))
 
-        if iface is not None:
-            vip_key = 'res_{}_{}_vip'.format(service, iface)
-            if vip_key in vip_group:
-                if vip not in relation_data['resource_params'][vip_key]:
-                    vip_key = '{}_{}'.format(vip_key, vip_params)
-                else:
-                    log("Resource '%s' (vip='%s') already exists in "
-                        "vip group - skipping" % (vip_key, vip), WARNING)
-                    continue
+def expect_ha():
+    """ Determine if the unit expects to be in HA
 
-            relation_data['resources'][vip_key] = res_neutron_vip
-            relation_data['resource_params'][vip_key] = (
-                'params {ip}="{vip}" cidr_netmask="{netmask}" '
-                'nic="{iface}"'.format(ip=vip_params,
-                                       vip=vip,
-                                       iface=iface,
-                                       netmask=netmask)
-            )
-            vip_group.append(vip_key)
+    Check for VIP or dns-ha settings which indicate the unit should expect to
+    be related to hacluster.
 
-    if len(vip_group) >= 1:
-        relation_data['groups'] = {
-            'grp_{}_vips'.format(service): ' '.join(vip_group)
-        }
+    @returns boolean
+    """
+    return config('vip') or config('dns-ha')
diff --git a/charmhelpers/contrib/openstack/ip.py b/charmhelpers/contrib/openstack/ip.py
index 73102af7d5eec9fc0255acfeea211310b8d3794d..d1476b1ab21d40934db6eb0cc0d2174d41b1df72 100644
--- a/charmhelpers/contrib/openstack/ip.py
+++ b/charmhelpers/contrib/openstack/ip.py
@@ -184,13 +184,3 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
                          "clustered=%s)" % (net_type, clustered))
 
     return resolved_address
-
-
-def get_vip_in_network(network):
-    matching_vip = None
-    vips = config('vip')
-    if vips:
-        for vip in vips.split():
-            if is_address_in_network(network, vip):
-                matching_vip = vip
-    return matching_vip
diff --git a/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charmhelpers/contrib/openstack/templates/haproxy.cfg
index 0081fccbe4a9cd369247d99ae64c3cc2ba308140..55270795dd9d604d04f76c18428961dadccfc097 100644
--- a/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ b/charmhelpers/contrib/openstack/templates/haproxy.cfg
@@ -17,22 +17,22 @@ defaults
 {%- if haproxy_queue_timeout %}
     timeout queue {{ haproxy_queue_timeout }}
 {%- else %}
-    timeout queue 9000
+    timeout queue 5000
 {%- endif %}
 {%- if haproxy_connect_timeout %}
     timeout connect {{ haproxy_connect_timeout }}
 {%- else %}
-    timeout connect 9000
+    timeout connect 5000
 {%- endif %}
 {%- if haproxy_client_timeout %}
     timeout client {{ haproxy_client_timeout }}
 {%- else %}
-    timeout client 90000
+    timeout client 30000
 {%- endif %}
 {%- if haproxy_server_timeout %}
     timeout server {{ haproxy_server_timeout }}
 {%- else %}
-    timeout server 90000
+    timeout server 30000
 {%- endif %}
 
 listen stats
diff --git a/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/charmhelpers/contrib/openstack/templates/section-oslo-notifications
index 021a3c250822637c5fdf511024095730412ab098..5dccd4bb3943ff209bd820908baf7e77cb44649a 100644
--- a/charmhelpers/contrib/openstack/templates/section-oslo-notifications
+++ b/charmhelpers/contrib/openstack/templates/section-oslo-notifications
@@ -5,7 +5,4 @@ transport_url = {{ transport_url }}
 {% if notification_topics -%}
 topics = {{ notification_topics }}
 {% endif -%}
-{% if notification_format -%}
-notification_format = {{ notification_format }}
-{% endif -%}
 {% endif -%}
diff --git a/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
index b241bbfc7ff28c6293d740255bbe82875cd8bc13..a3841ea6dcb50a16b204d88c1e944f6e678e52ee 100644
--- a/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
+++ b/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
@@ -22,6 +22,9 @@ Listen {{ public_port }}
 {% if port -%}
 <VirtualHost *:{{ port }}>
     WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
+{% if python_path -%}
+                      python-path={{ python_path }} \
+{% endif -%}
                       display-name=%{GROUP}
     WSGIProcessGroup {{ service_name }}
     WSGIScriptAlias / {{ script }}
@@ -33,7 +36,7 @@ Listen {{ public_port }}
     ErrorLog /var/log/apache2/{{ service_name }}_error.log
     CustomLog /var/log/apache2/{{ service_name }}_access.log combined
 
-    <Directory /usr/bin>
+    <Directory {{ usr_bin }}>
         <IfVersion >= 2.4>
             Require all granted
         </IfVersion>
@@ -48,6 +51,9 @@ Listen {{ public_port }}
 {% if admin_port -%}
 <VirtualHost *:{{ admin_port }}>
     WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
+{% if python_path -%}
+                      python-path={{ python_path }} \
+{% endif -%}
                       display-name=%{GROUP}
     WSGIProcessGroup {{ service_name }}-admin
     WSGIScriptAlias / {{ admin_script }}
@@ -59,7 +65,7 @@ Listen {{ public_port }}
     ErrorLog /var/log/apache2/{{ service_name }}_error.log
     CustomLog /var/log/apache2/{{ service_name }}_access.log combined
 
-    <Directory /usr/bin>
+    <Directory {{ usr_bin }}>
         <IfVersion >= 2.4>
             Require all granted
         </IfVersion>
@@ -80,6 +86,9 @@ Listen {{ public_port }}
     Alias /eds /var/www/html/eds
 
     WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
+{% if python_path -%}
+                      python-path={{ python_path }} \
+{% endif -%}
                       display-name=%{GROUP}
     WSGIProcessGroup {{ service_name }}-public
     WSGIScriptAlias / {{ public_script }}
@@ -92,7 +101,7 @@ Listen {{ public_port }}
     CustomLog /var/log/apache2/{{ service_name }}_access.log combined
     WSGIScriptAlias ^(/v3/OS-FEDERATION/identity_providers/.*?/protocols/.*?/auth)$ /usr/bin/keystone-wsgi-public/$1
 
-    <Directory /usr/bin>
+    <Directory {{ usr_bin }}>
         <IfVersion >= 2.4>
             Require all granted
         </IfVersion>
diff --git a/charmhelpers/contrib/openstack/templating.py b/charmhelpers/contrib/openstack/templating.py
index bbb6a7e4b69afbc78dd919d2f84f59d2015d5722..edefcfe900eef85f9441428259bd9997a9801af9 100644
--- a/charmhelpers/contrib/openstack/templating.py
+++ b/charmhelpers/contrib/openstack/templating.py
@@ -94,8 +94,7 @@ class OSConfigTemplate(object):
     Associates a config file template with a list of context generators.
     Responsible for constructing a template context based on those generators.
     """
-
-    def __init__(self, config_file, contexts, config_template=None):
+    def __init__(self, config_file, contexts):
         self.config_file = config_file
 
         if hasattr(contexts, '__call__'):
@@ -105,8 +104,6 @@ class OSConfigTemplate(object):
 
         self._complete_contexts = []
 
-        self.config_template = config_template
-
     def context(self):
         ctxt = {}
         for context in self.contexts:
@@ -128,11 +125,6 @@ class OSConfigTemplate(object):
         self.context()
         return self._complete_contexts
 
-    @property
-    def is_string_template(self):
-        """:returns: Boolean if this instance is a template initialised with a string"""
-        return self.config_template is not None
-
 
 class OSConfigRenderer(object):
     """
@@ -157,10 +149,6 @@ class OSConfigRenderer(object):
                          contexts=[context.IdentityServiceContext()])
         configs.register(config_file='/etc/haproxy/haproxy.conf',
                          contexts=[context.HAProxyContext()])
-        configs.register(config_file='/etc/keystone/policy.d/extra.cfg',
-                         contexts=[context.ExtraPolicyContext()
-                                   context.KeystoneContext()],
-                         config_template=hookenv.config('extra-policy'))
         # write out a single config
         configs.write('/etc/nova/nova.conf')
         # write out all registered configs
@@ -231,23 +219,14 @@ class OSConfigRenderer(object):
             else:
                 apt_install('python3-jinja2')
 
-    def register(self, config_file, contexts, config_template=None):
+    def register(self, config_file, contexts):
         """
         Register a config file with a list of context generators to be called
         during rendering.
-        config_template can be used to load a template from a string instead of
-        using template loaders and template files.
-        :param config_file (str): a path where a config file will be rendered
-        :param contexts (list): a list of context dictionaries with kv pairs
-        :param config_template (str): an optional template string to use
         """
-        self.templates[config_file] = OSConfigTemplate(
-            config_file=config_file,
-            contexts=contexts,
-            config_template=config_template
-        )
-        log('Registered config file: {}'.format(config_file),
-            level=INFO)
+        self.templates[config_file] = OSConfigTemplate(config_file=config_file,
+                                                       contexts=contexts)
+        log('Registered config file: %s' % config_file, level=INFO)
 
     def _get_tmpl_env(self):
         if not self._tmpl_env:
@@ -257,58 +236,32 @@ class OSConfigRenderer(object):
     def _get_template(self, template):
         self._get_tmpl_env()
         template = self._tmpl_env.get_template(template)
-        log('Loaded template from {}'.format(template.filename),
-            level=INFO)
-        return template
-
-    def _get_template_from_string(self, ostmpl):
-        '''
-        Get a jinja2 template object from a string.
-        :param ostmpl: OSConfigTemplate to use as a data source.
-        '''
-        self._get_tmpl_env()
-        template = self._tmpl_env.from_string(ostmpl.config_template)
-        log('Loaded a template from a string for {}'.format(
-            ostmpl.config_file),
-            level=INFO)
+        log('Loaded template from %s' % template.filename, level=INFO)
         return template
 
     def render(self, config_file):
         if config_file not in self.templates:
-            log('Config not registered: {}'.format(config_file), level=ERROR)
+            log('Config not registered: %s' % config_file, level=ERROR)
             raise OSConfigException
-
-        ostmpl = self.templates[config_file]
-        ctxt = ostmpl.context()
-
-        if ostmpl.is_string_template:
-            template = self._get_template_from_string(ostmpl)
-            log('Rendering from a string template: '
-                '{}'.format(config_file),
-                level=INFO)
-        else:
-            _tmpl = os.path.basename(config_file)
+        ctxt = self.templates[config_file].context()
+
+        _tmpl = os.path.basename(config_file)
+        try:
+            template = self._get_template(_tmpl)
+        except exceptions.TemplateNotFound:
+            # if no template is found with basename, try looking for it
+            # using a munged full path, eg:
+            #   /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
+            _tmpl = '_'.join(config_file.split('/')[1:])
             try:
                 template = self._get_template(_tmpl)
-            except exceptions.TemplateNotFound:
-                # if no template is found with basename, try looking
-                # for it using a munged full path, eg:
-                # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
-                _tmpl = '_'.join(config_file.split('/')[1:])
-                try:
-                    template = self._get_template(_tmpl)
-                except exceptions.TemplateNotFound as e:
-                    log('Could not load template from {} by {} or {}.'
-                        ''.format(
-                            self.templates_dir,
-                            os.path.basename(config_file),
-                            _tmpl
-                        ),
-                        level=ERROR)
-                    raise e
-
-            log('Rendering from template: {}'.format(config_file),
-                level=INFO)
+            except exceptions.TemplateNotFound as e:
+                log('Could not load template from %s by %s or %s.' %
+                    (self.templates_dir, os.path.basename(config_file), _tmpl),
+                    level=ERROR)
+                raise e
+
+        log('Rendering from template: %s' % _tmpl, level=INFO)
         return template.render(ctxt)
 
     def write(self, config_file):
diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py
index 0180e5553854cbb730dcd5a2e865ef1106ca5f18..8a541d4087662120d26f480262181413380374b8 100644
--- a/charmhelpers/contrib/openstack/utils.py
+++ b/charmhelpers/contrib/openstack/utils.py
@@ -23,6 +23,7 @@ import sys
 import re
 import itertools
 import functools
+import shutil
 
 import six
 import traceback
@@ -46,6 +47,7 @@ from charmhelpers.core.hookenv import (
     related_units,
     relation_ids,
     relation_set,
+    service_name,
     status_set,
     hook_name,
     application_version_set,
@@ -66,6 +68,11 @@ from charmhelpers.contrib.network.ip import (
     port_has_listener,
 )
 
+from charmhelpers.contrib.python.packages import (
+    pip_create_virtualenv,
+    pip_install,
+)
+
 from charmhelpers.core.host import (
     lsb_release,
     mounts,
@@ -77,6 +84,7 @@ from charmhelpers.core.host import (
 )
 from charmhelpers.fetch import (
     apt_cache,
+    install_remote,
     import_key as fetch_import_key,
     add_source as fetch_add_source,
     SourceConfigError,
@@ -133,7 +141,6 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
     ('zesty', 'ocata'),
     ('artful', 'pike'),
     ('bionic', 'queens'),
-    ('cosmic', 'rocky'),
 ])
 
 
@@ -152,7 +159,6 @@ OPENSTACK_CODENAMES = OrderedDict([
     ('2017.1', 'ocata'),
     ('2017.2', 'pike'),
     ('2018.1', 'queens'),
-    ('2018.2', 'rocky'),
 ])
 
 # The ugly duckling - must list releases oldest to newest
@@ -184,9 +190,7 @@ SWIFT_CODENAMES = OrderedDict([
     ('pike',
         ['2.13.0', '2.15.0']),
     ('queens',
-        ['2.16.0', '2.17.0']),
-    ('rocky',
-        ['2.18.0']),
+        ['2.16.0']),
 ])
 
 # >= Liberty version->codename mapping
@@ -274,6 +278,27 @@ PACKAGE_CODENAMES = {
     ]),
 }
 
+GIT_DEFAULT_REPOS = {
+    'requirements': 'git://github.com/openstack/requirements',
+    'cinder': 'git://github.com/openstack/cinder',
+    'glance': 'git://github.com/openstack/glance',
+    'horizon': 'git://github.com/openstack/horizon',
+    'keystone': 'git://github.com/openstack/keystone',
+    'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
+    'neutron': 'git://github.com/openstack/neutron',
+    'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
+    'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
+    'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
+    'nova': 'git://github.com/openstack/nova',
+}
+
+GIT_DEFAULT_BRANCHES = {
+    'liberty': 'stable/liberty',
+    'mitaka': 'stable/mitaka',
+    'newton': 'stable/newton',
+    'master': 'master',
+}
+
 DEFAULT_LOOPBACK_SIZE = '5G'
 
 
@@ -310,7 +335,7 @@ def get_os_codename_install_source(src):
 
     if src.startswith('cloud:'):
         ca_rel = src.split(':')[1]
-        ca_rel = ca_rel.split('-')[1].split('/')[0]
+        ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
         return ca_rel
 
     # Best guess match based on deb string provided
@@ -367,8 +392,6 @@ def get_swift_codename(version):
             releases = UBUNTU_OPENSTACK_RELEASE
             release = [k for k, v in six.iteritems(releases) if codename in v]
             ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
-            if six.PY3:
-                ret = ret.decode('UTF-8')
             if codename in ret or release[0] in ret:
                 return codename
     elif len(codenames) == 1:
@@ -505,6 +528,7 @@ def os_release(package, base='essex', reset_cache=False):
     if _os_rel:
         return _os_rel
     _os_rel = (
+        git_os_codename_install_source(config('openstack-origin-git')) or
         get_os_codename_package(package, fatal=False) or
         get_os_codename_install_source(config('openstack-origin')) or
         base)
@@ -630,6 +654,11 @@ def openstack_upgrade_available(package):
     else:
         avail_vers = get_os_version_install_source(src)
     apt.init()
+    if "swift" in package:
+        major_cur_vers = cur_vers.split('.', 1)[0]
+        major_avail_vers = avail_vers.split('.', 1)[0]
+        major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
+        return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
     return apt.version_compare(avail_vers, cur_vers) == 1
 
 
@@ -740,6 +769,417 @@ def os_requires_version(ostack_release, pkg):
     return wrap
 
 
+def git_install_requested():
+    """
+    Returns true if openstack-origin-git is specified.
+    """
+    return config('openstack-origin-git') is not None
+
+
+def git_os_codename_install_source(projects_yaml):
+    """
+    Returns OpenStack codename of release being installed from source.
+    """
+    if git_install_requested():
+        projects = _git_yaml_load(projects_yaml)
+
+        if projects in GIT_DEFAULT_BRANCHES.keys():
+            if projects == 'master':
+                return 'ocata'
+            return projects
+
+        if 'release' in projects:
+            if projects['release'] == 'master':
+                return 'ocata'
+            return projects['release']
+
+    return None
+
+
+def git_default_repos(projects_yaml):
+    """
+    Returns default repos if a default openstack-origin-git value is specified.
+    """
+    service = service_name()
+    core_project = service
+
+    for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES):
+        if projects_yaml == default:
+
+            # add the requirements repo first
+            repo = {
+                'name': 'requirements',
+                'repository': GIT_DEFAULT_REPOS['requirements'],
+                'branch': branch,
+            }
+            repos = [repo]
+
+            # neutron-* and nova-* charms require some additional repos
+            if service in ['neutron-api', 'neutron-gateway',
+                           'neutron-openvswitch']:
+                core_project = 'neutron'
+                if service == 'neutron-api':
+                    repo = {
+                        'name': 'networking-hyperv',
+                        'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
+                        'branch': branch,
+                    }
+                    repos.append(repo)
+                for project in ['neutron-fwaas', 'neutron-lbaas',
+                                'neutron-vpnaas', 'nova']:
+                    repo = {
+                        'name': project,
+                        'repository': GIT_DEFAULT_REPOS[project],
+                        'branch': branch,
+                    }
+                    repos.append(repo)
+
+            elif service in ['nova-cloud-controller', 'nova-compute']:
+                core_project = 'nova'
+                repo = {
+                    'name': 'neutron',
+                    'repository': GIT_DEFAULT_REPOS['neutron'],
+                    'branch': branch,
+                }
+                repos.append(repo)
+            elif service == 'openstack-dashboard':
+                core_project = 'horizon'
+
+            # finally add the current service's core project repo
+            repo = {
+                'name': core_project,
+                'repository': GIT_DEFAULT_REPOS[core_project],
+                'branch': branch,
+            }
+            repos.append(repo)
+
+            return yaml.dump(dict(repositories=repos, release=default))
+
+    return projects_yaml
+
+
+def _git_yaml_load(projects_yaml):
+    """
+    Load the specified yaml into a dictionary.
+    """
+    if not projects_yaml:
+        return None
+
+    return yaml.load(projects_yaml)
+
+
+requirements_dir = None
+
+
+def git_clone_and_install(projects_yaml, core_project):
+    """
+    Clone/install all specified OpenStack repositories.
+
+    The expected format of projects_yaml is:
+
+        repositories:
+          - {name: keystone,
+             repository: 'git://git.openstack.org/openstack/keystone.git',
+             branch: 'stable/icehouse'}
+          - {name: requirements,
+             repository: 'git://git.openstack.org/openstack/requirements.git',
+             branch: 'stable/icehouse'}
+
+        directory: /mnt/openstack-git
+        http_proxy: squid-proxy-url
+        https_proxy: squid-proxy-url
+
+    The directory, http_proxy, and https_proxy keys are optional.
+
+    """
+    global requirements_dir
+    parent_dir = '/mnt/openstack-git'
+    http_proxy = None
+
+    projects = _git_yaml_load(projects_yaml)
+    _git_validate_projects_yaml(projects, core_project)
+
+    old_environ = dict(os.environ)
+
+    if 'http_proxy' in projects.keys():
+        http_proxy = projects['http_proxy']
+        os.environ['http_proxy'] = projects['http_proxy']
+    if 'https_proxy' in projects.keys():
+        os.environ['https_proxy'] = projects['https_proxy']
+
+    if 'directory' in projects.keys():
+        parent_dir = projects['directory']
+
+    pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
+
+    # Upgrade setuptools and pip from default virtualenv versions. The default
+    # versions in trusty break master OpenStack branch deployments.
+    for p in ['pip', 'setuptools']:
+        pip_install(p, upgrade=True, proxy=http_proxy,
+                    venv=os.path.join(parent_dir, 'venv'))
+
+    constraints = None
+    for p in projects['repositories']:
+        repo = p['repository']
+        branch = p['branch']
+        depth = '1'
+        if 'depth' in p.keys():
+            depth = p['depth']
+        if p['name'] == 'requirements':
+            repo_dir = _git_clone_and_install_single(repo, branch, depth,
+                                                     parent_dir, http_proxy,
+                                                     update_requirements=False)
+            requirements_dir = repo_dir
+            constraints = os.path.join(repo_dir, "upper-constraints.txt")
+            # upper-constraints didn't exist until after icehouse
+            if not os.path.isfile(constraints):
+                constraints = None
+            # use constraints unless project yaml sets use_constraints to false
+            if 'use_constraints' in projects.keys():
+                if not projects['use_constraints']:
+                    constraints = None
+        else:
+            repo_dir = _git_clone_and_install_single(repo, branch, depth,
+                                                     parent_dir, http_proxy,
+                                                     update_requirements=True,
+                                                     constraints=constraints)
+
+    os.environ = old_environ
+
+
+def _git_validate_projects_yaml(projects, core_project):
+    """
+    Validate the projects yaml.
+    """
+    _git_ensure_key_exists('repositories', projects)
+
+    for project in projects['repositories']:
+        _git_ensure_key_exists('name', project.keys())
+        _git_ensure_key_exists('repository', project.keys())
+        _git_ensure_key_exists('branch', project.keys())
+
+    if projects['repositories'][0]['name'] != 'requirements':
+        error_out('{} git repo must be specified first'.format('requirements'))
+
+    if projects['repositories'][-1]['name'] != core_project:
+        error_out('{} git repo must be specified last'.format(core_project))
+
+    _git_ensure_key_exists('release', projects)
+
+
+def _git_ensure_key_exists(key, keys):
+    """
+    Ensure that key exists in keys.
+    """
+    if key not in keys:
+        error_out('openstack-origin-git key \'{}\' is missing'.format(key))
+
+
+def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
+                                  update_requirements, constraints=None):
+    """
+    Clone and install a single git repository.
+    """
+    if not os.path.exists(parent_dir):
+        juju_log('Directory already exists at {}. '
+                 'No need to create directory.'.format(parent_dir))
+        os.mkdir(parent_dir)
+
+    juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
+    repo_dir = install_remote(
+        repo, dest=parent_dir, branch=branch, depth=depth)
+
+    venv = os.path.join(parent_dir, 'venv')
+
+    if update_requirements:
+        if not requirements_dir:
+            error_out('requirements repo must be cloned before '
+                      'updating from global requirements.')
+        _git_update_requirements(venv, repo_dir, requirements_dir)
+
+    juju_log('Installing git repo from dir: {}'.format(repo_dir))
+    if http_proxy:
+        pip_install(repo_dir, proxy=http_proxy, venv=venv,
+                    constraints=constraints)
+    else:
+        pip_install(repo_dir, venv=venv, constraints=constraints)
+
+    return repo_dir
+
+
+def _git_update_requirements(venv, package_dir, reqs_dir):
+    """
+    Update from global requirements.
+
+    Update an OpenStack git directory's requirements.txt and
+    test-requirements.txt from global-requirements.txt.
+    """
+    orig_dir = os.getcwd()
+    os.chdir(reqs_dir)
+    python = os.path.join(venv, 'bin/python')
+    cmd = [python, 'update.py', package_dir]
+    try:
+        subprocess.check_call(cmd)
+    except subprocess.CalledProcessError:
+        package = os.path.basename(package_dir)
+        error_out("Error updating {} from "
+                  "global-requirements.txt".format(package))
+    os.chdir(orig_dir)
+
+
+def git_pip_venv_dir(projects_yaml):
+    """
+    Return the pip virtualenv path.
+    """
+    parent_dir = '/mnt/openstack-git'
+
+    projects = _git_yaml_load(projects_yaml)
+
+    if 'directory' in projects.keys():
+        parent_dir = projects['directory']
+
+    return os.path.join(parent_dir, 'venv')
+
+
+def git_src_dir(projects_yaml, project):
+    """
+    Return the directory where the specified project's source is located.
+    """
+    parent_dir = '/mnt/openstack-git'
+
+    projects = _git_yaml_load(projects_yaml)
+
+    if 'directory' in projects.keys():
+        parent_dir = projects['directory']
+
+    for p in projects['repositories']:
+        if p['name'] == project:
+            return os.path.join(parent_dir, os.path.basename(p['repository']))
+
+    return None
+
+
+def git_yaml_value(projects_yaml, key):
+    """
+    Return the value in projects_yaml for the specified key.
+    """
+    projects = _git_yaml_load(projects_yaml)
+
+    if key in projects.keys():
+        return projects[key]
+
+    return None
+
+
+def git_generate_systemd_init_files(templates_dir):
+    """
+    Generate systemd init files.
+
+    Generates and installs systemd init units and script files based on the
+    *.init.in files contained in the templates_dir directory.
+
+    This code is based on the openstack-pkg-tools package and its init
+    script generation, which is used by the OpenStack packages.
+    """
+    for f in os.listdir(templates_dir):
+        # Create the init script and systemd unit file from the template
+        if f.endswith(".init.in"):
+            init_in_file = f
+            init_file = f[:-8]
+            service_file = "{}.service".format(init_file)
+
+            init_in_source = os.path.join(templates_dir, init_in_file)
+            init_source = os.path.join(templates_dir, init_file)
+            service_source = os.path.join(templates_dir, service_file)
+
+            init_dest = os.path.join('/etc/init.d', init_file)
+            service_dest = os.path.join('/lib/systemd/system', service_file)
+
+            shutil.copyfile(init_in_source, init_source)
+            with open(init_source, 'a') as outfile:
+                template = ('/usr/share/openstack-pkg-tools/'
+                            'init-script-template')
+                with open(template) as infile:
+                    outfile.write('\n\n{}'.format(infile.read()))
+
+            cmd = ['pkgos-gen-systemd-unit', init_in_source]
+            subprocess.check_call(cmd)
+
+            if os.path.exists(init_dest):
+                os.remove(init_dest)
+            if os.path.exists(service_dest):
+                os.remove(service_dest)
+            shutil.copyfile(init_source, init_dest)
+            shutil.copyfile(service_source, service_dest)
+            os.chmod(init_dest, 0o755)
+
+    for f in os.listdir(templates_dir):
+        # If there's a service.in file, use it instead of the generated one
+        if f.endswith(".service.in"):
+            service_in_file = f
+            service_file = f[:-3]
+
+            service_in_source = os.path.join(templates_dir, service_in_file)
+            service_source = os.path.join(templates_dir, service_file)
+            service_dest = os.path.join('/lib/systemd/system', service_file)
+
+            shutil.copyfile(service_in_source, service_source)
+
+            if os.path.exists(service_dest):
+                os.remove(service_dest)
+            shutil.copyfile(service_source, service_dest)
+
+    for f in os.listdir(templates_dir):
+        # Generate the systemd unit if there's no existing .service.in
+        if f.endswith(".init.in"):
+            init_in_file = f
+            init_file = f[:-8]
+            service_in_file = "{}.service.in".format(init_file)
+            service_file = "{}.service".format(init_file)
+
+            init_in_source = os.path.join(templates_dir, init_in_file)
+            service_in_source = os.path.join(templates_dir, service_in_file)
+            service_source = os.path.join(templates_dir, service_file)
+            service_dest = os.path.join('/lib/systemd/system', service_file)
+
+            if not os.path.exists(service_in_source):
+                cmd = ['pkgos-gen-systemd-unit', init_in_source]
+                subprocess.check_call(cmd)
+
+                if os.path.exists(service_dest):
+                    os.remove(service_dest)
+                shutil.copyfile(service_source, service_dest)
+
+
+def git_determine_usr_bin():
+    """Return the /usr/bin path for Apache2 config.
+
+    The /usr/bin path will be located in the virtualenv if the charm
+    is configured to deploy from source.
+    """
+    if git_install_requested():
+        projects_yaml = config('openstack-origin-git')
+        projects_yaml = git_default_repos(projects_yaml)
+        return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
+    else:
+        return '/usr/bin'
+
+
+def git_determine_python_path():
+    """Return the python-path for Apache2 config.
+
+    Returns 'None' unless the charm is configured to deploy from source,
+    in which case the path of the virtualenv's site-packages is returned.
+    """
+    if git_install_requested():
+        projects_yaml = config('openstack-origin-git')
+        projects_yaml = git_default_repos(projects_yaml)
+        return os.path.join(git_pip_venv_dir(projects_yaml),
+                            'lib/python2.7/site-packages')
+    else:
+        return None
+
+
 def os_workload_status(configs, required_interfaces, charm_func=None):
     """
     Decorator to set workload status based on complete contexts
@@ -1173,24 +1613,27 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
     """
     ret = False
 
-    if openstack_upgrade_available(package):
-        if config('action-managed-upgrade'):
-            juju_log('Upgrading OpenStack release')
-
-            try:
-                upgrade_callback(configs=configs)
-                action_set({'outcome': 'success, upgrade completed.'})
-                ret = True
-            except Exception:
-                action_set({'outcome': 'upgrade failed, see traceback.'})
-                action_set({'traceback': traceback.format_exc()})
-                action_fail('do_openstack_upgrade resulted in an '
-                            'unexpected error')
-        else:
-            action_set({'outcome': 'action-managed-upgrade config is '
-                                   'False, skipped upgrade.'})
+    if git_install_requested():
+        action_set({'outcome': 'installed from source, skipped upgrade.'})
     else:
-        action_set({'outcome': 'no upgrade available.'})
+        if openstack_upgrade_available(package):
+            if config('action-managed-upgrade'):
+                juju_log('Upgrading OpenStack release')
+
+                try:
+                    upgrade_callback(configs=configs)
+                    action_set({'outcome': 'success, upgrade completed.'})
+                    ret = True
+                except Exception:
+                    action_set({'outcome': 'upgrade failed, see traceback.'})
+                    action_set({'traceback': traceback.format_exc()})
+                    action_fail('do_openstack_upgrade resulted in an '
+                                'unexpected error')
+            else:
+                action_set({'outcome': 'action-managed-upgrade config is '
+                                       'False, skipped upgrade.'})
+        else:
+            action_set({'outcome': 'no upgrade available.'})
 
     return ret
 
@@ -1600,25 +2043,14 @@ def token_cache_pkgs(source=None, release=None):
 
 def update_json_file(filename, items):
     """Updates the json `filename` with a given dict.
-    :param filename: path to json file (e.g. /etc/glance/policy.json)
+    :param filename: json filename (i.e.: /etc/glance/policy.json)
     :param items: dict of items to update
     """
-    if not items:
-        return
-
     with open(filename) as fd:
         policy = json.load(fd)
-
-    # Compare before and after and if nothing has changed don't write the file
-    # since that could cause unnecessary service restarts.
-    before = json.dumps(policy, indent=4, sort_keys=True)
     policy.update(items)
-    after = json.dumps(policy, indent=4, sort_keys=True)
-    if before == after:
-        return
-
     with open(filename, "w") as fd:
-        fd.write(after)
+        fd.write(json.dumps(policy, indent=4))
 
 
 @cached
diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py
index 76828201628776a5c860805825711d38c47db4a5..392316126b3799796c786aead740f88e87b8a71e 100644
--- a/charmhelpers/contrib/storage/linux/ceph.py
+++ b/charmhelpers/contrib/storage/linux/ceph.py
@@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None):
         assert isinstance(valid_range, list), \
             "valid_range must be a list, was given {}".format(valid_range)
         # If we're dealing with strings
-        if isinstance(value, six.string_types):
+        if valid_type is six.string_types:
             assert value in valid_range, \
                 "{} is not in the list {}".format(value, valid_range)
         # Integer, float should have a min and max
@@ -291,7 +291,7 @@ class Pool(object):
 
 class ReplicatedPool(Pool):
     def __init__(self, service, name, pg_num=None, replicas=2,
-                 percent_data=10.0, app_name=None):
+                 percent_data=10.0):
         super(ReplicatedPool, self).__init__(service=service, name=name)
         self.replicas = replicas
         if pg_num:
@@ -301,10 +301,6 @@ class ReplicatedPool(Pool):
             self.pg_num = min(pg_num, max_pgs)
         else:
             self.pg_num = self.get_pgs(self.replicas, percent_data)
-        if app_name:
-            self.app_name = app_name
-        else:
-            self.app_name = 'unknown'
 
     def create(self):
         if not pool_exists(self.service, self.name):
@@ -317,12 +313,6 @@ class ReplicatedPool(Pool):
                 update_pool(client=self.service,
                             pool=self.name,
                             settings={'size': str(self.replicas)})
-                try:
-                    set_app_name_for_pool(client=self.service,
-                                          pool=self.name,
-                                          name=self.app_name)
-                except CalledProcessError:
-                    log('Could not set app name for pool {}'.format(self.name, level=WARNING))
             except CalledProcessError:
                 raise
 
@@ -330,14 +320,10 @@ class ReplicatedPool(Pool):
 # Default jerasure erasure coded pool
 class ErasurePool(Pool):
     def __init__(self, service, name, erasure_code_profile="default",
-                 percent_data=10.0, app_name=None):
+                 percent_data=10.0):
         super(ErasurePool, self).__init__(service=service, name=name)
         self.erasure_code_profile = erasure_code_profile
         self.percent_data = percent_data
-        if app_name:
-            self.app_name = app_name
-        else:
-            self.app_name = 'unknown'
 
     def create(self):
         if not pool_exists(self.service, self.name):
@@ -369,12 +355,6 @@ class ErasurePool(Pool):
                    'erasure', self.erasure_code_profile]
             try:
                 check_call(cmd)
-                try:
-                    set_app_name_for_pool(client=self.service,
-                                          pool=self.name,
-                                          name=self.app_name)
-                except CalledProcessError:
-                    log('Could not set app name for pool {}'.format(self.name, level=WARNING))
             except CalledProcessError:
                 raise
 
@@ -397,12 +377,12 @@ def get_mon_map(service):
         try:
             return json.loads(mon_status)
         except ValueError as v:
-            log("Unable to parse mon_status json: {}. Error: {}"
-                .format(mon_status, str(v)))
+            log("Unable to parse mon_status json: {}. Error: {}".format(
+                mon_status, v.message))
             raise
     except CalledProcessError as e:
-        log("mon_status command failed with message: {}"
-            .format(str(e)))
+        log("mon_status command failed with message: {}".format(
+            e.message))
         raise
 
 
@@ -537,8 +517,7 @@ def pool_set(service, pool_name, key, value):
     :param value:
     :return: None.  Can raise CalledProcessError
     """
-    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
-           str(value).lower()]
+    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
     try:
         check_call(cmd)
     except CalledProcessError:
@@ -642,24 +621,16 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
     :param durability_estimator: int
     :return: None.  Can raise CalledProcessError
     """
-    version = ceph_version()
-
     # Ensure this failure_domain is allowed by Ceph
     validator(failure_domain, six.string_types,
               ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
 
     cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
-           'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
-           ]
+           'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
+           'ruleset_failure_domain=' + failure_domain]
     if locality is not None and durability_estimator is not None:
         raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
 
-    # failure_domain changed in luminous
-    if version and version >= '12.0.0':
-        cmd.append('crush-failure-domain=' + failure_domain)
-    else:
-        cmd.append('ruleset-failure-domain=' + failure_domain)
-
     # Add plugin specific information
     if locality is not None:
         # For local erasure codes
@@ -798,25 +769,6 @@ def update_pool(client, pool, settings):
     check_call(cmd)
 
 
-def set_app_name_for_pool(client, pool, name):
-    """
-    Calls `osd pool application enable` for the specified pool name
-
-    :param client: Name of the ceph client to use
-    :type client: str
-    :param pool: Pool to set app name for
-    :type pool: str
-    :param name: app name for the specified pool
-    :type name: str
-
-    :raises: CalledProcessError if ceph call fails
-    """
-    if ceph_version() >= '12.0.0':
-        cmd = ['ceph', '--id', client, 'osd', 'pool',
-               'application', 'enable', pool, name]
-        check_call(cmd)
-
-
 def create_pool(service, name, replicas=3, pg_num=None):
     """Create a new RADOS pool."""
     if pool_exists(service, name):
@@ -1112,24 +1064,14 @@ class CephBrokerRq(object):
         self.ops = []
 
     def add_op_request_access_to_group(self, name, namespace=None,
-                                       permission=None, key_name=None,
-                                       object_prefix_permissions=None):
+                                       permission=None, key_name=None):
         """
         Adds the requested permissions to the current service's Ceph key,
-        allowing the key to access only the specified pools or
-        object prefixes. object_prefix_permissions should be a dictionary
-        keyed on the permission with the corresponding value being a list
-        of prefixes to apply that permission to.
-            {
-                'rwx': ['prefix1', 'prefix2'],
-                'class-read': ['prefix3']}
+        allowing the key to access only the specified pools
         """
-        self.ops.append({
-            'op': 'add-permissions-to-key', 'group': name,
-            'namespace': namespace,
-            'name': key_name or service_name(),
-            'group-permission': permission,
-            'object-prefix-permissions': object_prefix_permissions})
+        self.ops.append({'op': 'add-permissions-to-key', 'group': name,
+                         'namespace': namespace, 'name': key_name or service_name(),
+                         'group-permission': permission})
 
     def add_op_create_pool(self, name, replica_count=3, pg_num=None,
                            weight=None, group=None, namespace=None):
@@ -1165,10 +1107,7 @@ class CephBrokerRq(object):
     def _ops_equal(self, other):
         if len(self.ops) == len(other.ops):
             for req_no in range(0, len(self.ops)):
-                for key in [
-                        'replicas', 'name', 'op', 'pg_num', 'weight',
-                        'group', 'group-namespace', 'group-permission',
-                        'object-prefix-permissions']:
+                for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
                     if self.ops[req_no].get(key) != other.ops[req_no].get(key):
                         return False
         else:
diff --git a/charmhelpers/contrib/storage/linux/lvm.py b/charmhelpers/contrib/storage/linux/lvm.py
index c8bde69263f0e917d32d0e5d70abba1409b26012..7f2a0604931fd7b9ee15224971b4a3f5ed79f3ef 100644
--- a/charmhelpers/contrib/storage/linux/lvm.py
+++ b/charmhelpers/contrib/storage/linux/lvm.py
@@ -12,7 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import functools
 from subprocess import (
     CalledProcessError,
     check_call,
@@ -102,81 +101,3 @@ def create_lvm_volume_group(volume_group, block_device):
     :block_device: str: Full path of PV-initialized block device.
     '''
     check_call(['vgcreate', volume_group, block_device])
-
-
-def list_logical_volumes(select_criteria=None, path_mode=False):
-    '''
-    List logical volumes
-
-    :param select_criteria: str: Limit list to those volumes matching this
-                                 criteria (see 'lvs -S help' for more details)
-    :param path_mode: bool: return logical volume name in 'vg/lv' format, this
-                            format is required for some commands like lvextend
-    :returns: [str]: List of logical volumes
-    '''
-    lv_diplay_attr = 'lv_name'
-    if path_mode:
-        # Parsing output logic relies on the column order
-        lv_diplay_attr = 'vg_name,' + lv_diplay_attr
-    cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
-    if select_criteria:
-        cmd.extend(['--select', select_criteria])
-    lvs = []
-    for lv in check_output(cmd).decode('UTF-8').splitlines():
-        if not lv:
-            continue
-        if path_mode:
-            lvs.append('/'.join(lv.strip().split()))
-        else:
-            lvs.append(lv.strip())
-    return lvs
-
-
-list_thin_logical_volume_pools = functools.partial(
-    list_logical_volumes,
-    select_criteria='lv_attr =~ ^t')
-
-list_thin_logical_volumes = functools.partial(
-    list_logical_volumes,
-    select_criteria='lv_attr =~ ^V')
-
-
-def extend_logical_volume_by_device(lv_name, block_device):
-    '''
-    Extends the size of logical volume lv_name by the amount of free space on
-    physical volume block_device.
-
-    :param lv_name: str: name of logical volume to be extended (vg/lv format)
-    :param block_device: str: name of block_device to be allocated to lv_name
-    '''
-    cmd = ['lvextend', lv_name, block_device]
-    check_call(cmd)
-
-
-def create_logical_volume(lv_name, volume_group, size=None):
-    '''
-    Create a new logical volume in an existing volume group
-
-    :param lv_name: str: name of logical volume to be created.
-    :param volume_group: str: Name of volume group to use for the new volume.
-    :param size: str: Size of logical volume to create (100% if not supplied)
-    :raises subprocess.CalledProcessError: in the event that the lvcreate fails.
-    '''
-    if size:
-        check_call([
-            'lvcreate',
-            '--yes',
-            '-L',
-            '{}'.format(size),
-            '-n', lv_name, volume_group
-        ])
-    # create the lv with all the space available, this is needed because the
-    # system call is different for LVM
-    else:
-        check_call([
-            'lvcreate',
-            '--yes',
-            '-l',
-            '100%FREE',
-            '-n', lv_name, volume_group
-        ])
diff --git a/charmhelpers/contrib/storage/linux/utils.py b/charmhelpers/contrib/storage/linux/utils.py
index 6f846b056c27baeb2ffd848fed0e0aaa89f3b5ce..c9428894317a3285c870a08e8f3b53846739c552 100644
--- a/charmhelpers/contrib/storage/linux/utils.py
+++ b/charmhelpers/contrib/storage/linux/utils.py
@@ -67,19 +67,3 @@ def is_device_mounted(device):
     except Exception:
         return False
     return bool(re.search(r'MOUNTPOINT=".+"', out))
-
-
-def mkfs_xfs(device, force=False):
-    """Format device with XFS filesystem.
-
-    By default this should fail if the device already has a filesystem on it.
-    :param device: Full path to device to format
-    :ptype device: tr
-    :param force: Force operation
-    :ptype: force: boolean"""
-    cmd = ['mkfs.xfs']
-    if force:
-        cmd.append("-f")
-
-    cmd += ['-i', 'size=1024', device]
-    check_call(cmd)
diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py
index ed7af39e36fa0b921d42edb94cff997bf01135d1..b2d0cc758cf63ed36028bc77aa59f937fb52e806 100644
--- a/charmhelpers/core/hookenv.py
+++ b/charmhelpers/core/hookenv.py
@@ -27,7 +27,6 @@ import glob
 import os
 import json
 import yaml
-import re
 import subprocess
 import sys
 import errno
@@ -40,7 +39,6 @@ if not six.PY3:
 else:
     from collections import UserDict
 
-
 CRITICAL = "CRITICAL"
 ERROR = "ERROR"
 WARNING = "WARNING"
@@ -68,7 +66,7 @@ def cached(func):
     @wraps(func)
     def wrapper(*args, **kwargs):
         global cache
-        key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
+        key = str((func, args, kwargs))
         try:
             return cache[key]
         except KeyError:
@@ -290,7 +288,7 @@ class Config(dict):
         self.implicit_save = True
         self._prev_dict = None
         self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
-        if os.path.exists(self.path) and os.stat(self.path).st_size:
+        if os.path.exists(self.path):
             self.load_previous()
         atexit(self._implicit_save)
 
@@ -310,11 +308,7 @@ class Config(dict):
         """
         self.path = path or self.path
         with open(self.path) as f:
-            try:
-                self._prev_dict = json.load(f)
-            except ValueError as e:
-                log('Unable to parse previous config data - {}'.format(str(e)),
-                    level=ERROR)
+            self._prev_dict = json.load(f)
         for k, v in copy.deepcopy(self._prev_dict).items():
             if k not in self:
                 self[k] = v
@@ -350,7 +344,6 @@ class Config(dict):
 
         """
         with open(self.path, 'w') as f:
-            os.fchmod(f.fileno(), 0o600)
             json.dump(self, f)
 
     def _implicit_save(self):
@@ -358,40 +351,23 @@ class Config(dict):
             self.save()
 
 
-_cache_config = None
-
-
+@cached
 def config(scope=None):
-    """
-    Get the juju charm configuration (scope==None) or individual key,
-    (scope=str).  The returned value is a Python data structure loaded as
-    JSON from the Juju config command.
-
-    :param scope: If set, return the value for the specified key.
-    :type scope: Optional[str]
-    :returns: Either the whole config as a Config, or a key from it.
-    :rtype: Any
-    """
-    global _cache_config
-    config_cmd_line = ['config-get', '--all', '--format=json']
-    try:
-        # JSON Decode Exception for Python3.5+
-        exc_json = json.decoder.JSONDecodeError
-    except AttributeError:
-        # JSON Decode Exception for Python2.7 through Python3.4
-        exc_json = ValueError
+    """Juju charm configuration"""
+    config_cmd_line = ['config-get']
+    if scope is not None:
+        config_cmd_line.append(scope)
+    else:
+        config_cmd_line.append('--all')
+    config_cmd_line.append('--format=json')
     try:
-        if _cache_config is None:
-            config_data = json.loads(
+        print("CONFIG_CMD_LINE: " + ' '.join(config_cmd_line))
+        config_data = json.loads(
                 subprocess.check_output(config_cmd_line).decode('UTF-8'))
-            _cache_config = Config(config_data)
         if scope is not None:
-            return _cache_config.get(scope)
-        return _cache_config
-    except (exc_json, UnicodeDecodeError) as e:
-        log('Unable to parse output from config-get: config_cmd_line="{}" '
-            'message="{}"'
-            .format(config_cmd_line, str(e)), level=ERROR)
+            return config_data
+        return Config(config_data)
+    except ValueError:
         return None
 
 
@@ -843,10 +819,6 @@ class Hooks(object):
         return wrapper
 
 
-class NoNetworkBinding(Exception):
-    pass
-
-
 def charm_dir():
     """Return the root directory of the current charm"""
     d = os.environ.get('JUJU_CHARM_DIR')
@@ -972,13 +944,6 @@ def application_version_set(version):
         log("Application Version: {}".format(version))
 
 
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def goal_state():
-    """Juju goal state values"""
-    cmd = ['goal-state', '--format=json']
-    return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def is_leader():
     """Does the current unit hold the juju leadership
@@ -1073,6 +1038,7 @@ def juju_version():
                                    universal_newlines=True).strip()
 
 
+@cached
 def has_juju_version(minimum_version):
     """Return True if the Juju version is at least the provided version"""
     return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@@ -1132,8 +1098,6 @@ def _run_atexit():
 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def network_get_primary_address(binding):
     '''
-    Deprecated since Juju 2.3; use network_get()
-
     Retrieve the primary network address for a named binding
 
     :param binding: string. The name of a relation of extra-binding
@@ -1141,19 +1105,10 @@ def network_get_primary_address(binding):
     :raise: NotImplementedError if run on Juju < 2.0
     '''
     cmd = ['network-get', '--primary-address', binding]
-    try:
-        response = subprocess.check_output(
-            cmd,
-            stderr=subprocess.STDOUT).decode('UTF-8').strip()
-    except CalledProcessError as e:
-        if 'no network config found for binding' in e.output.decode('UTF-8'):
-            raise NoNetworkBinding("No network binding for {}"
-                                   .format(binding))
-        else:
-            raise
-    return response
+    return subprocess.check_output(cmd).decode('UTF-8').strip()
 
 
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def network_get(endpoint, relation_id=None):
     """
     Retrieve the network details for a relation endpoint
@@ -1161,20 +1116,24 @@ def network_get(endpoint, relation_id=None):
     :param endpoint: string. The name of a relation endpoint
     :param relation_id: int. The ID of the relation for the current context.
     :return: dict. The loaded YAML output of the network-get query.
-    :raise: NotImplementedError if request not supported by the Juju version.
+    :raise: NotImplementedError if run on Juju < 2.1
     """
-    if not has_juju_version('2.2'):
-        raise NotImplementedError(juju_version())  # earlier versions require --primary-address
-    if relation_id and not has_juju_version('2.3'):
-        raise NotImplementedError  # 2.3 added the -r option
-
     cmd = ['network-get', endpoint, '--format', 'yaml']
     if relation_id:
         cmd.append('-r')
         cmd.append(relation_id)
-    response = subprocess.check_output(
-        cmd,
-        stderr=subprocess.STDOUT).decode('UTF-8').strip()
+    try:
+        response = subprocess.check_output(
+            cmd,
+            stderr=subprocess.STDOUT).decode('UTF-8').strip()
+    except CalledProcessError as e:
+        # Early versions of Juju 2.0.x required the --primary-address argument.
+        # We catch that condition here and raise NotImplementedError since
+        # the requested semantics are not available - the caller can then
+        # use the network_get_primary_address() method instead.
+        if '--primary-address is currently required' in e.output.decode('UTF-8'):
+            raise NotImplementedError
+        raise
     return yaml.safe_load(response)
 
 
@@ -1230,23 +1189,9 @@ def iter_units_for_relation_name(relation_name):
 
 def ingress_address(rid=None, unit=None):
     """
-    Retrieve the ingress-address from a relation when available.
-    Otherwise, return the private-address.
-
-    When used on the consuming side of the relation (unit is a remote
-    unit), the ingress-address is the IP address that this unit needs
-    to use to reach the provided service on the remote unit.
-
-    When used on the providing side of the relation (unit == local_unit()),
-    the ingress-address is the IP address that is advertised to remote
-    units on this relation. Remote units need to use this address to
-    reach the local provided service on this unit.
-
-    Note that charms may document some other method to use in
-    preference to the ingress_address(), such as an address provided
-    on a different relation attribute or a service discovery mechanism.
-    This allows charms to redirect inbound connections to their peers
-    or different applications such as load balancers.
+    Retrieve the ingress-address from a relation when available. Otherwise,
+    return the private-address. This function is to be used on the consuming
+    side of the relation.
 
     Usage:
     addresses = [ingress_address(rid=u.rid, unit=u.unit)
@@ -1260,40 +1205,3 @@ def ingress_address(rid=None, unit=None):
     settings = relation_get(rid=rid, unit=unit)
     return (settings.get('ingress-address') or
             settings.get('private-address'))
-
-
-def egress_subnets(rid=None, unit=None):
-    """
-    Retrieve the egress-subnets from a relation.
-
-    This function is to be used on the providing side of the
-    relation, and provides the ranges of addresses that client
-    connections may come from. The result is uninteresting on
-    the consuming side of a relation (unit == local_unit()).
-
-    Returns a stable list of subnets in CIDR format.
-    eg. ['192.168.1.0/24', '2001::F00F/128']
-
-    If egress-subnets is not available, falls back to using the published
-    ingress-address, or finally private-address.
-
-    :param rid: string relation id
-    :param unit: string unit name
-    :side effect: calls relation_get
-    :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
-    """
-    def _to_range(addr):
-        if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
-            addr += '/32'
-        elif ':' in addr and '/' not in addr:  # IPv6
-            addr += '/128'
-        return addr
-
-    settings = relation_get(rid=rid, unit=unit)
-    if 'egress-subnets' in settings:
-        return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
-    if 'ingress-address' in settings:
-        return [_to_range(settings['ingress-address'])]
-    if 'private-address' in settings:
-        return [_to_range(settings['private-address'])]
-    return []  # Should never happen
diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py
index 322ab2acd71bb02f13d2d739e74d0ddc62774d9e..5cc5c86b701fc5375f387eb01a0d2b76c184c263 100644
--- a/charmhelpers/core/host.py
+++ b/charmhelpers/core/host.py
@@ -549,8 +549,6 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
         with open(path, 'wb') as target:
             os.fchown(target.fileno(), uid, gid)
             os.fchmod(target.fileno(), perms)
-            if six.PY3 and isinstance(content, six.string_types):
-                content = content.encode('UTF-8')
             target.write(content)
         return
     # the contents were the same, but we might still need to change the
@@ -993,7 +991,7 @@ def updatedb(updatedb_text, new_path):
     return output
 
 
-def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
+def modulo_distribution(modulo=3, wait=30):
     """ Modulo distribution
 
     This helper uses the unit number, a modulo value and a constant wait time
@@ -1015,14 +1013,7 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
 
     @param modulo: int The modulo number creates the group distribution
     @param wait: int The constant time wait value
-    @param non_zero_wait: boolean Override unit % modulo == 0,
-                          return modulo * wait. Used to avoid collisions with
-                          leader nodes which are often given priority.
     @return: int Calculated time to wait for unit operation
     """
     unit_number = int(local_unit().split('/')[1])
-    calculated_wait_time = (unit_number % modulo) * wait
-    if non_zero_wait and calculated_wait_time == 0:
-        return modulo * wait
-    else:
-        return calculated_wait_time
+    return (unit_number % modulo) * wait
diff --git a/charmhelpers/core/host_factory/ubuntu.py b/charmhelpers/core/host_factory/ubuntu.py
index 99451b59789a822b4f5a96d7310965f1c8921898..d8dc378a5dad29c271a89289e4b815e2c2c99060 100644
--- a/charmhelpers/core/host_factory/ubuntu.py
+++ b/charmhelpers/core/host_factory/ubuntu.py
@@ -20,7 +20,6 @@ UBUNTU_RELEASES = (
     'yakkety',
     'zesty',
     'artful',
-    'bionic',
 )
 
 
diff --git a/charmhelpers/core/services/base.py b/charmhelpers/core/services/base.py
index 179ad4f0c367dd6b13c10b201c3752d1c8daf05e..ca9dc996bd7d7fc2a18b7d9a9ee51adff171bda9 100644
--- a/charmhelpers/core/services/base.py
+++ b/charmhelpers/core/services/base.py
@@ -307,34 +307,23 @@ class PortManagerCallback(ManagerCallback):
     """
     def __call__(self, manager, service_name, event_name):
         service = manager.get_service(service_name)
-        # turn this generator into a list,
-        # as we'll be going over it multiple times
-        new_ports = list(service.get('ports', []))
+        new_ports = service.get('ports', [])
         port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
         if os.path.exists(port_file):
             with open(port_file) as fp:
                 old_ports = fp.read().split(',')
             for old_port in old_ports:
-                if bool(old_port) and not self.ports_contains(old_port, new_ports):
-                    hookenv.close_port(old_port)
+                if bool(old_port):
+                    old_port = int(old_port)
+                    if old_port not in new_ports:
+                        hookenv.close_port(old_port)
         with open(port_file, 'w') as fp:
             fp.write(','.join(str(port) for port in new_ports))
         for port in new_ports:
-            # A port is either a number or 'ICMP'
-            protocol = 'TCP'
-            if str(port).upper() == 'ICMP':
-                protocol = 'ICMP'
             if event_name == 'start':
-                hookenv.open_port(port, protocol)
+                hookenv.open_port(port)
             elif event_name == 'stop':
-                hookenv.close_port(port, protocol)
-
-    def ports_contains(self, port, ports):
-        if not bool(port):
-            return False
-        if str(port).upper() != 'ICMP':
-            port = int(port)
-        return port in ports
+                hookenv.close_port(port)
 
 
 def service_stop(service_name):
diff --git a/charmhelpers/core/sysctl.py b/charmhelpers/core/sysctl.py
index 1f188d8c653f9bf793e18ed484635fce310543cc..6e413e31480e5fb4bcb703d58b1e87f98adc53af 100644
--- a/charmhelpers/core/sysctl.py
+++ b/charmhelpers/core/sysctl.py
@@ -31,22 +31,18 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
 def create(sysctl_dict, sysctl_file):
     """Creates a sysctl.conf file from a YAML associative array
 
-    :param sysctl_dict: a dict or YAML-formatted string of sysctl
-                        options eg "{ 'kernel.max_pid': 1337 }"
+    :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
     :type sysctl_dict: str
     :param sysctl_file: path to the sysctl file to be saved
     :type sysctl_file: str or unicode
     :returns: None
     """
-    if type(sysctl_dict) is not dict:
-        try:
-            sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
-        except yaml.YAMLError:
-            log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
-                level=ERROR)
-            return
-    else:
-        sysctl_dict_parsed = sysctl_dict
+    try:
+        sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
+    except yaml.YAMLError:
+        log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
+            level=ERROR)
+        return
 
     with open(sysctl_file, "w") as fd:
         for key, value in sysctl_dict_parsed.items():
diff --git a/charmhelpers/core/templating.py b/charmhelpers/core/templating.py
index 9014015c14ee0b48c775562cd4f0d30884944439..7b801a34a5e6585485347f7a97bc18a10a093d03 100644
--- a/charmhelpers/core/templating.py
+++ b/charmhelpers/core/templating.py
@@ -20,8 +20,7 @@ from charmhelpers.core import hookenv
 
 
 def render(source, target, context, owner='root', group='root',
-           perms=0o444, templates_dir=None, encoding='UTF-8',
-           template_loader=None, config_template=None):
+           perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
     """
     Render a template.
 
@@ -33,9 +32,6 @@ def render(source, target, context, owner='root', group='root',
     The context should be a dict containing the values to be replaced in the
     template.
 
-    config_template may be provided to render from a provided template instead
-    of loading from a file.
-
     The `owner`, `group`, and `perms` options will be passed to `write_file`.
 
     If omitted, `templates_dir` defaults to the `templates` folder in the charm.
@@ -69,19 +65,14 @@ def render(source, target, context, owner='root', group='root',
         if templates_dir is None:
             templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
         template_env = Environment(loader=FileSystemLoader(templates_dir))
-
-    # load from a string if provided explicitly
-    if config_template is not None:
-        template = template_env.from_string(config_template)
-    else:
-        try:
-            source = source
-            template = template_env.get_template(source)
-        except exceptions.TemplateNotFound as e:
-            hookenv.log('Could not load template %s from %s.' %
-                        (source, templates_dir),
-                        level=hookenv.ERROR)
-            raise e
+    try:
+        source = source
+        template = template_env.get_template(source)
+    except exceptions.TemplateNotFound as e:
+        hookenv.log('Could not load template %s from %s.' %
+                    (source, templates_dir),
+                    level=hookenv.ERROR)
+        raise e
     content = template.render(context)
     if target is not None:
         target_dir = os.path.dirname(target)
diff --git a/charmhelpers/core/unitdata.py b/charmhelpers/core/unitdata.py
index ab554327b343f896880523fc627c1abea84be29a..7af875c2fcc1e2e38f9267bfdc60ab5a2a499c18 100644
--- a/charmhelpers/core/unitdata.py
+++ b/charmhelpers/core/unitdata.py
@@ -166,10 +166,6 @@ class Storage(object):
 
     To support dicts, lists, integer, floats, and booleans values
     are automatically json encoded/decoded.
-
-    Note: to facilitate unit testing, ':memory:' can be passed as the
-    path parameter which causes sqlite3 to only build the db in memory.
-    This should only be used for testing purposes.
     """
     def __init__(self, path=None):
         self.db_path = path
@@ -179,9 +175,6 @@ class Storage(object):
             else:
                 self.db_path = os.path.join(
                     os.environ.get('CHARM_DIR', ''), '.unit-state.db')
-        if self.db_path != ':memory:':
-            with open(self.db_path, 'a') as f:
-                os.fchmod(f.fileno(), 0o600)
         self.conn = sqlite3.connect('%s' % self.db_path)
         self.cursor = self.conn.cursor()
         self.revision = None
diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py
index 736be713db66bff7d4d8e742d5b68d45a2270c00..910e96a66f45b8a173147141e7c5108d685b7b7a 100644
--- a/charmhelpers/fetch/ubuntu.py
+++ b/charmhelpers/fetch/ubuntu.py
@@ -44,7 +44,6 @@ ARCH_TO_PROPOSED_POCKET = {
     'x86_64': PROPOSED_POCKET,
     'ppc64le': PROPOSED_PORTS_POCKET,
     'aarch64': PROPOSED_PORTS_POCKET,
-    's390x': PROPOSED_PORTS_POCKET,
 }
 CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
 CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
@@ -158,14 +157,6 @@ CLOUD_ARCHIVE_POCKETS = {
     'queens/proposed': 'xenial-proposed/queens',
     'xenial-queens/proposed': 'xenial-proposed/queens',
     'xenial-proposed/queens': 'xenial-proposed/queens',
-    # Rocky
-    'rocky': 'bionic-updates/rocky',
-    'bionic-rocky': 'bionic-updates/rocky',
-    'bionic-rocky/updates': 'bionic-updates/rocky',
-    'bionic-updates/rocky': 'bionic-updates/rocky',
-    'rocky/proposed': 'bionic-proposed/rocky',
-    'bionic-rocky/proposed': 'bionic-proposed/rocky',
-    'bionic-proposed/rocky': 'bionic-proposed/rocky',
 }
 
 
diff --git a/config.yaml b/config.yaml
index 57f8e2ce848ea8baa921ce7e2ea477ba25a8761f..f85c8f8e314c1c6f558d059deb78590d0fd622c5 100644
--- a/config.yaml
+++ b/config.yaml
@@ -35,6 +35,31 @@ options:
       NOTE: updating this setting to a source that is known to provide
       a later version of OpenStack will trigger a software upgrade unless
       action-managed-upgrade is set to True.
+  openstack-origin-git:
+    type: string
+    default:
+    description: |
+      Specifies a default OpenStack release name, or a YAML dictionary
+      listing the git repositories to install from.
+      .
+      The default Openstack release name may be one of the following, where
+      the corresponding OpenStack github branch will be used:
+        * mitaka
+        * newton
+        * ocata
+        * pike
+        * master
+      .
+      The YAML must minimally include requirements and keystone repositories,
+      and may also include repositories for other dependencies:
+        repositories:
+        - {name: requirements,
+           repository: 'git://github.com/openstack/requirements',
+           branch: master}
+        - {name: keystone,
+           repository: 'git://github.com/openstack/keystone',
+           branch: master}
+        release: master
   action-managed-upgrade:
     type: boolean
     default: False
@@ -80,8 +105,7 @@ options:
     default: None
     description: |
       Admin password. To be used *for testing only*. Randomly generated by
-      default. To retreive generated password,
-      juju run --unit keystone/0 leader-get admin_passwd
+      default.
   admin-token:
     type: string
     default: None
@@ -120,42 +144,37 @@ options:
   enable-pki:
     type: string
     default: "false"
-    description: |
-      Enable PKI token signing.
-      .
-      [DEPRECATED] This option should no longer be used.
-      This option will be removed in a future release.
+    description: Enable PKI token signing.
   preferred-api-version:
     type: int
-    default:
+    default: 2
     description: |
       Use this keystone api version for keystone endpoints and advertise this
-      version to identity client charms.  For OpenStack releases < Queens this
-      option defaults to 2; for Queens or later it defaults to 3.
+      version to identity client charms.
   haproxy-server-timeout:
     type: int
     default:
     description: |
       Server timeout configuration in ms for haproxy, used in HA
-      configurations. If not provided, default value of 90000ms is used.
+      configurations. If not provided, default value of 30000ms is used.
   haproxy-client-timeout:
     type: int
     default:
     description: |
       Client timeout configuration in ms for haproxy, used in HA
-      configurations. If not provided, default value of 90000ms is used.
+      configurations. If not provided, default value of 30000ms is used.
   haproxy-queue-timeout:
     type: int
     default:
     description: |
       Queue timeout configuration in ms for haproxy, used in HA
-      configurations. If not provided, default value of 9000ms is used.
+      configurations. If not provided, default value of 5000ms is used.
   haproxy-connect-timeout:
     type: int
     default:
     description: |
       Connect timeout configuration in ms for haproxy, used in HA
-      configurations. If not provided, default value of 9000ms is used.
+      configurations. If not provided, default value of 5000ms is used.
   database:
     type: string
     default: "keystone"
@@ -540,24 +559,11 @@ options:
   https-service-endpoints:
     type: string
     default: "False"
-    description: |
-      Manage SSL certificates for all service endpoints. This option
-      should be False when specifying ssl\_\* options.
-      .
-      [DEPRECATED] This option should no longer be used.
-      Provide SSL certificate data through the ssl\_\* options.
-      This option will be removed in a future release.
+    description: Manage SSL certificates for all service endpoints.
   use-https:
     type: string
     default: "no"
-    description: |
-      Use SSL for Keystone itself using a charm-generated CA. Set to
-      'yes' to enable it. This option should be 'no' when specifying ssl\_\*
-      options.
-      .
-      [DEPRECATED] This option should no longer be used.
-      Provide SSL certificate data through the ssl\_\* options.
-      This option will be removed in a future release.
+    description: Use SSL for Keystone itself. Set to 'yes' to enable it.
   ssl_cert:
     type: string
     default:
diff --git a/hooks/charmhelpers b/hooks/charmhelpers
deleted file mode 120000
index 702de734b0c015b34565dfbd7ba8c48ace8cb262..0000000000000000000000000000000000000000
--- a/hooks/charmhelpers
+++ /dev/null
@@ -1 +0,0 @@
-../charmhelpers
\ No newline at end of file
diff --git a/hooks/hooks b/hooks/hooks
deleted file mode 120000
index f631275e19cd320f570733cb0ce1f287d6f02702..0000000000000000000000000000000000000000
--- a/hooks/hooks
+++ /dev/null
@@ -1 +0,0 @@
-../hooks
\ No newline at end of file
diff --git a/hooks/keystone-fid-service-provider-relation-broken b/hooks/keystone-fid-service-provider-relation-broken
deleted file mode 120000
index dd3b3eff4b7109293b4cfd9b81f5fc49643432a0..0000000000000000000000000000000000000000
--- a/hooks/keystone-fid-service-provider-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/keystone-fid-service-provider-relation-changed b/hooks/keystone-fid-service-provider-relation-changed
deleted file mode 120000
index dd3b3eff4b7109293b4cfd9b81f5fc49643432a0..0000000000000000000000000000000000000000
--- a/hooks/keystone-fid-service-provider-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/keystone-fid-service-provider-relation-departed b/hooks/keystone-fid-service-provider-relation-departed
deleted file mode 120000
index dd3b3eff4b7109293b4cfd9b81f5fc49643432a0..0000000000000000000000000000000000000000
--- a/hooks/keystone-fid-service-provider-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/keystone-fid-service-provider-relation-joined b/hooks/keystone-fid-service-provider-relation-joined
deleted file mode 120000
index dd3b3eff4b7109293b4cfd9b81f5fc49643432a0..0000000000000000000000000000000000000000
--- a/hooks/keystone-fid-service-provider-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/keystone_context.py b/hooks/keystone_context.py
index febdd85f3297ea80985122230dfffd955fc7d09b..f4de3dc5ad4687718014d6ed4ce3c061f6938732 100644
--- a/hooks/keystone_context.py
+++ b/hooks/keystone_context.py
@@ -12,6 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import hashlib
 import os
 import shutil
 import tarfile
@@ -27,8 +28,8 @@ from base64 import b64decode
 from charmhelpers.core.host import (
     mkdir,
     write_file,
+    service_restart,
 )
-import json
 
 from charmhelpers.contrib.openstack import context
 
@@ -44,14 +45,113 @@ from charmhelpers.core.hookenv import (
     config,
     log,
     leader_get,
+    DEBUG,
     INFO,
-    related_units,
-    relation_ids,
-    relation_get,
 )
 
+from charmhelpers.core.strutils import (
+    bool_from_string,
+)
+
+from charmhelpers.contrib.hahelpers.apache import install_ca_cert
+
+CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
+
+
+def is_cert_provided_in_config():
+    cert = config('ssl_cert')
+    key = config('ssl_key')
+    return bool(cert and key)
+
+
+class SSLContext(context.ApacheSSLContext):
+
+    def configure_cert(self, cn):
+        from keystone_utils import (
+            SSH_USER,
+            get_ca,
+            ensure_permissions,
+            is_ssl_cert_master,
+            KEYSTONE_USER,
+        )
+
+        # Ensure ssl dir exists whether master or not
+        perms = 0o775
+        mkdir(path=self.ssl_dir, owner=SSH_USER, group=KEYSTONE_USER,
+              perms=perms)
+        # Ensure accessible by keystone ssh user and group (for sync)
+        ensure_permissions(self.ssl_dir, user=SSH_USER, group=KEYSTONE_USER,
+                           perms=perms)
+
+        if not is_cert_provided_in_config() and not is_ssl_cert_master():
+            log("Not ssl-cert-master - skipping apache cert config until "
+                "master is elected", level=INFO)
+            return
+
+        log("Creating apache ssl certs in %s" % (self.ssl_dir), level=INFO)
+
+        cert = config('ssl_cert')
+        key = config('ssl_key')
+
+        if not (cert and key):
+            ca = get_ca(user=SSH_USER)
+            cert, key = ca.get_cert_and_key(common_name=cn)
+        else:
+            cert = b64decode(cert)
+            key = b64decode(key)
+
+        write_file(path=os.path.join(self.ssl_dir, 'cert_{}'.format(cn)),
+                   content=cert, owner=SSH_USER, group=KEYSTONE_USER,
+                   perms=0o644)
+        write_file(path=os.path.join(self.ssl_dir, 'key_{}'.format(cn)),
+                   content=key, owner=SSH_USER, group=KEYSTONE_USER,
+                   perms=0o644)
+
+    def configure_ca(self):
+        from keystone_utils import (
+            SSH_USER,
+            get_ca,
+            ensure_permissions,
+            is_ssl_cert_master,
+            KEYSTONE_USER,
+        )
+
+        if not is_cert_provided_in_config() and not is_ssl_cert_master():
+            log("Not ssl-cert-master - skipping apache ca config until "
+                "master is elected", level=INFO)
+            return
+
+        cert = config('ssl_cert')
+        key = config('ssl_key')
+
+        ca_cert = config('ssl_ca')
+        if ca_cert:
+            ca_cert = b64decode(ca_cert)
+        elif not (cert and key):
+            # NOTE(hopem): if a cert and key are provided as config we don't
+            # mandate that a CA is also provided since it isn't necessarily
+            # needed. As a result we only generate a custom CA if we are also
+            # generating cert and key.
+            ca = get_ca(user=SSH_USER)
+            ca_cert = ca.get_ca_bundle()
+
+        if ca_cert:
+            # Ensure accessible by keystone ssh user and group (unison)
+            install_ca_cert(ca_cert)
+            ensure_permissions(CA_CERT_PATH, user=SSH_USER,
+                               group=KEYSTONE_USER, perms=0o0644)
+
+    def canonical_names(self):
+        addresses = self.get_network_addresses()
+        addrs = []
+        for address, endpoint in addresses:
+            addrs.append(endpoint)
+
+        return list(set(addrs))
+
+
+class ApacheSSLContext(SSLContext):
 
-class ApacheSSLContext(context.ApacheSSLContext):
     interfaces = ['https']
     external_ports = []
     service_namespace = 'keystone'
@@ -61,13 +161,31 @@ class ApacheSSLContext(context.ApacheSSLContext):
         # late import to work around circular dependency
         from keystone_utils import (
             determine_ports,
+            update_hash_from_path,
         )
 
+        ssl_paths = [CA_CERT_PATH, self.ssl_dir]
+
         self.external_ports = determine_ports()
-        return super(ApacheSSLContext, self).__call__()
+        before = hashlib.sha256()
+        for path in ssl_paths:
+            update_hash_from_path(before, path)
+
+        ret = super(ApacheSSLContext, self).__call__()
+
+        after = hashlib.sha256()
+        for path in ssl_paths:
+            update_hash_from_path(after, path)
 
+        # Ensure that apache2 is restarted if these change
+        if before.hexdigest() != after.hexdigest():
+            service_restart('apache2')
+
+        return ret
+
+
+class NginxSSLContext(SSLContext):
 
-class NginxSSLContext(context.ApacheSSLContext):
     interfaces = ['https']
     external_ports = []
     service_namespace = 'keystone'
@@ -78,14 +196,30 @@ class NginxSSLContext(context.ApacheSSLContext):
         # late import to work around circular dependency
         from keystone_utils import (
             determine_ports,
+            update_hash_from_path,
+            APACHE_SSL_DIR
         )
 
+        ssl_paths = [CA_CERT_PATH, APACHE_SSL_DIR]
+
         self.external_ports = determine_ports()
+        before = hashlib.sha256()
+        for path in ssl_paths:
+            update_hash_from_path(before, path)
+
         ret = super(NginxSSLContext, self).__call__()
         if not ret:
             log("SSL not used", level='DEBUG')
             return {}
 
+        after = hashlib.sha256()
+        for path in ssl_paths:
+            update_hash_from_path(after, path)
+
+        # Ensure that Nginx is restarted if these change
+        if before.hexdigest() != after.hexdigest():
+            service_restart('snap.keystone.nginx')
+
         # Transform for use by Nginx
         """
         {'endpoints': [(u'10.5.0.30', u'10.5.0.30', 4990, 4980),
@@ -164,12 +298,12 @@ class KeystoneContext(context.OSContextGenerator):
     def __call__(self):
         from keystone_utils import (
             api_port, set_admin_token, endpoint_url, resolve_address,
-            PUBLIC, ADMIN, ADMIN_DOMAIN,
-            snap_install_requested, get_api_version,
+            PUBLIC, ADMIN, PKI_CERTS_DIR, ensure_pki_cert_paths, ADMIN_DOMAIN,
+            snap_install_requested,
         )
         ctxt = {}
         ctxt['token'] = set_admin_token(config('admin-token'))
-        ctxt['api_version'] = get_api_version()
+        ctxt['api_version'] = int(config('preferred-api-version'))
         ctxt['admin_role'] = config('admin-role')
         if ctxt['api_version'] > 2:
             ctxt['service_tenant_id'] = \
@@ -201,6 +335,25 @@ class KeystoneContext(context.OSContextGenerator):
                 flags = context.config_flags_parser(ldap_flags)
                 ctxt['ldap_config_flags'] = flags
 
+        enable_pki = config('enable-pki')
+        if enable_pki and bool_from_string(enable_pki):
+            log("Enabling PKI", level=DEBUG)
+            ctxt['token_provider'] = 'pki'
+
+            # NOTE(jamespage): Only check PKI configuration if the PKI
+            #                  token format is in use, which has been
+            #                  removed as of OpenStack Ocata.
+            ensure_pki_cert_paths()
+            certs = os.path.join(PKI_CERTS_DIR, 'certs')
+            privates = os.path.join(PKI_CERTS_DIR, 'privates')
+            ctxt['enable_signing'] = True
+            ctxt.update({'certfile': os.path.join(certs, 'signing_cert.pem'),
+                         'keyfile': os.path.join(privates, 'signing_key.pem'),
+                         'ca_certs': os.path.join(certs, 'ca.pem'),
+                         'ca_key': os.path.join(certs, 'ca_key.pem')})
+        else:
+            ctxt['enable_signing'] = False
+
         # Base endpoint URL's which are used in keystone responses
         # to unauthenticated requests to redirect clients to the
         # correct auth URL.
@@ -418,9 +571,9 @@ class TokenFlushContext(context.OSContextGenerator):
 class IdpFetchContext(context.OSContextGenerator):
 
     def __call__(self):
-        from keystone_utils import (get_api_suffix, api_port, endpoint_url,
-                                    resolve_address, ADMIN
-                                    )
+        from keystone_utils import (get_api_suffix,
+            api_port, endpoint_url, resolve_address, ADMIN,
+        )
         ctxt = {
             'enable_saml2': config('enable-saml2'),
             'remote_ids_fetch': is_elected_leader(DC_RESOURCE_NAME) and
@@ -431,46 +584,3 @@ class IdpFetchContext(context.OSContextGenerator):
                                               get_api_suffix())
         }
         return ctxt
-
-
-class KeystoneFIDServiceProviderContext(context.OSContextGenerator):
-    interfaces = ['keystone-fid-service-provider']
-
-    def __call__(self):
-        fid_sp_keys = ['protocol-name', 'remote-id-attribute']
-        fid_sps = []
-        for rid in relation_ids("keystone-fid-service-provider"):
-            for unit in related_units(rid):
-                rdata = relation_get(unit=unit, rid=rid)
-                if set(rdata).issuperset(set(fid_sp_keys)):
-                    fid_sps.append({
-                        k: json.loads(v) for k, v in rdata.items()
-                        if k in fid_sp_keys
-                    })
-        # populate the context with data from one or more
-        # service providers
-        ctxt = ({'fid_sps': fid_sps}
-                if fid_sps else {})
-        return ctxt
-
-
-class WebSSOTrustedDashboardContext(context.OSContextGenerator):
-    interfaces = ['websso-trusted-dashboard']
-
-    def __call__(self):
-        trusted_dashboard_keys = ['scheme', 'hostname', 'path']
-        trusted_dashboards = set()
-        for rid in relation_ids("websso-trusted-dashboard"):
-            for unit in related_units(rid):
-                rdata = relation_get(unit=unit, rid=rid)
-                if set(rdata).issuperset(set(trusted_dashboard_keys)):
-                    scheme = rdata.get('scheme')
-                    hostname = rdata.get('hostname')
-                    path = rdata.get('path')
-                    url = '{}{}{}'.format(scheme, hostname, path)
-                    trusted_dashboards.add(url)
-        # populate the context with data from one or more
-        # service providers
-        ctxt = ({'trusted_dashboards': trusted_dashboards}
-                if trusted_dashboards else {})
-        return ctxt
diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py
index b5386523604e3ae8bec6980090934b35da4668e2..505db8a7b867de2c46c8ef6e79bc57c1a0294ca1 100755
--- a/hooks/keystone_hooks.py
+++ b/hooks/keystone_hooks.py
@@ -16,18 +16,21 @@
 
 import hashlib
 import json
-import sys
 import os
+import sys
 
 from subprocess import check_call
 
+from charmhelpers.contrib import unison
 from charmhelpers.core import unitdata
 
 from charmhelpers.core.hookenv import (
     Hooks,
     UnregisteredHookError,
     config,
+    is_relation_made,
     log,
+    local_unit,
     DEBUG,
     INFO,
     WARNING,
@@ -39,23 +42,29 @@ from charmhelpers.core.hookenv import (
     status_set,
     open_port,
     is_leader,
-    relation_id,
 )
 
 from charmhelpers.core.host import (
+    mkdir,
     service_pause,
     service_stop,
     service_start,
     service_restart,
 )
 
+from charmhelpers.core.strutils import (
+    bool_from_string,
+)
+
 from charmhelpers.fetch import (
     apt_install, apt_update,
     filter_installed_packages
 )
 
 from charmhelpers.contrib.openstack.utils import (
+    config_value_changed,
     configure_installation_source,
+    git_install_requested,
     openstack_upgrade_available,
     sync_db_with_multi_ipv6_addresses,
     os_release,
@@ -76,21 +85,36 @@ from keystone_utils import (
     do_openstack_upgrade_reexec,
     ensure_initial_admin,
     get_admin_passwd,
+    git_install,
     migrate_database,
     save_script_rc,
     post_snap_install,
+    synchronize_ca_if_changed,
     register_configs,
     restart_map,
     services,
     CLUSTER_RES,
     KEYSTONE_CONF,
+    KEYSTONE_USER,
     POLICY_JSON,
     TOKEN_FLUSH_CRON_FILE,
+    SSH_USER,
     setup_ipv6,
     send_notifications,
+    check_peer_actions,
+    get_ssl_sync_request_units,
+    is_ssl_cert_master,
     is_db_ready,
+    clear_ssl_synced_units,
     is_db_initialised,
+    update_certs_if_available,
+    ensure_ssl_dir,
+    ensure_pki_dir_permissions,
+    ensure_permissions,
+    force_ssl_sync,
     filter_null,
+    ensure_ssl_dirs,
+    ensure_pki_cert_paths,
     is_service_present,
     delete_service_entry,
     assess_status,
@@ -106,12 +130,13 @@ from keystone_utils import (
     ADMIN_DOMAIN,
     ADMIN_PROJECT,
     create_or_show_domain,
-    restart_keystone,
+    keystone_service,
 )
 
 from charmhelpers.contrib.hahelpers.cluster import (
     is_elected_leader,
     get_hacluster_config,
+    peer_units,
     https,
     is_clustered,
 )
@@ -125,12 +150,12 @@ from charmhelpers.payload.execd import execd_preinstall
 from charmhelpers.contrib.peerstorage import (
     peer_retrieve_by_prefix,
     peer_echo,
+    relation_get as relation_get_and_migrate,
 )
 from charmhelpers.contrib.openstack.ip import (
     ADMIN,
     resolve_address,
 )
-
 from charmhelpers.contrib.network.ip import (
     get_iface_for_address,
     get_netmask_for_address,
@@ -143,11 +168,6 @@ from charmhelpers.contrib.charmsupport import nrpe
 
 from charmhelpers.contrib.hardening.harden import harden
 
-from charmhelpers.contrib.openstack.cert_utils import (
-    get_certificate_request,
-    process_certificates,
-)
-
 hooks = Hooks()
 CONFIGS = register_configs()
 
@@ -179,12 +199,20 @@ def install():
         service_start('haproxy')
         if run_in_apache():
             disable_unused_apache_sites()
-            service_pause('keystone')
+            if not git_install_requested():
+                service_pause('keystone')
         install_apache_error_handler(config('no-user-mapping-url'))
 
+    status_set('maintenance', 'Git install')
+    git_install(config('openstack-origin-git'))
+
+    unison.ensure_user(user=SSH_USER, group=SSH_USER)
+    unison.ensure_user(user=SSH_USER, group=KEYSTONE_USER)
+
 
 @hooks.hook('config-changed')
 @restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed(fatal=True)
 @harden()
 def config_changed():
     if config('prefer-ipv6'):
@@ -193,21 +221,40 @@ def config_changed():
         sync_db_with_multi_ipv6_addresses(config('database'),
                                           config('database-user'))
 
-    if not config('action-managed-upgrade'):
+    unison.ensure_user(user=SSH_USER, group=SSH_USER)
+    unison.ensure_user(user=SSH_USER, group=KEYSTONE_USER)
+    homedir = unison.get_homedir(SSH_USER)
+    if not os.path.isdir(homedir):
+        mkdir(homedir, SSH_USER, SSH_USER, 0o775)
+
+    if git_install_requested():
+        if config_value_changed('openstack-origin-git'):
+            status_set('maintenance', 'Running Git install')
+            git_install(config('openstack-origin-git'))
+    elif not config('action-managed-upgrade'):
         if openstack_upgrade_available('keystone'):
             status_set('maintenance', 'Running openstack upgrade')
             do_openstack_upgrade_reexec(configs=CONFIGS)
 
     for r_id in relation_ids('cluster'):
-        cluster_joined(rid=r_id)
+        cluster_joined(rid=r_id, ssl_sync_request=False)
 
     config_changed_postupgrade()
 
 
 @hooks.hook('config-changed-postupgrade')
 @restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed(fatal=True)
 @harden()
 def config_changed_postupgrade():
+    # Ensure ssl dir exists and is unison-accessible
+    ensure_ssl_dir()
+
+    if not snap_install_requested():
+        check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
+
+    ensure_ssl_dirs()
+
     save_script_rc()
     release = os_release('keystone')
     if run_in_apache(release=release):
@@ -216,7 +263,8 @@ def config_changed_postupgrade():
         # decorator can fire
         apt_install(filter_installed_packages(determine_packages()))
         # when deployed from source, init scripts aren't installed
-        service_pause('keystone')
+        if not git_install_requested():
+            service_pause('keystone')
 
         disable_unused_apache_sites()
         if WSGI_KEYSTONE_API_CONF in CONFIGS.templates:
@@ -251,16 +299,65 @@ def config_changed_postupgrade():
     if snap_install_requested() and not is_unit_paused_set():
         service_restart('snap.keystone.*')
 
+    initialise_pki()
+
     update_all_identity_relation_units()
     update_all_domain_backends()
-    update_all_fid_backends()
+
+    # Ensure sync request is sent out (needed for any/all ssl change)
+    send_ssl_sync_request()
 
     for r_id in relation_ids('ha'):
         ha_joined(relation_id=r_id)
 
 
+@synchronize_ca_if_changed(fatal=True)
+def initialise_pki():
+    """Create certs and keys required for token signing.
+
+    Used for PKI and signing token revocation list.
+
+    NOTE: keystone.conf [signing] section must be up-to-date prior to
+          executing this.
+    """
+    if CompareOpenStackReleases(os_release('keystone-common')) >= 'pike':
+        # pike dropped support for PKI token; skip function
+        return
+    ensure_pki_cert_paths()
+    if not peer_units() or is_ssl_cert_master():
+        log("Ensuring PKI token certs created", level=DEBUG)
+        if snap_install_requested():
+            cmd = ['/snap/bin/keystone-manage', 'pki_setup',
+                   '--keystone-user', KEYSTONE_USER,
+                   '--keystone-group', KEYSTONE_USER]
+            _log_dir = '/var/snap/keystone/common/log'
+        else:
+            cmd = ['keystone-manage', 'pki_setup',
+                   '--keystone-user', KEYSTONE_USER,
+                   '--keystone-group', KEYSTONE_USER]
+            _log_dir = '/var/log/keystone'
+        check_call(cmd)
+
+        # Ensure logfile has keystone perms since we may have just created it
+        # with root.
+        ensure_permissions(_log_dir, user=KEYSTONE_USER,
+                           group=KEYSTONE_USER, perms=0o744)
+        ensure_permissions('{}/keystone.log'.format(_log_dir),
+                           user=KEYSTONE_USER, group=KEYSTONE_USER,
+                           perms=0o644)
+
+    ensure_pki_dir_permissions()
+
+
 @hooks.hook('shared-db-relation-joined')
 def db_joined():
+    if is_relation_made('pgsql-db'):
+        # error, postgresql is used
+        e = ('Attempting to associate a mysql database when there is already '
+             'associated a postgresql one')
+        log(e, level=ERROR)
+        raise Exception(e)
+
     if config('prefer-ipv6'):
         sync_db_with_multi_ipv6_addresses(config('database'),
                                           config('database-user'))
@@ -279,9 +376,23 @@ def db_joined():
                      hostname=host)
 
 
+@hooks.hook('pgsql-db-relation-joined')
+def pgsql_db_joined():
+    if is_relation_made('shared-db'):
+        # raise error
+        e = ('Attempting to associate a postgresql database when there'
+             ' is already associated a mysql one')
+        log(e, level=ERROR)
+        raise Exception(e)
+
+    relation_set(database=config('database'))
+
+
 def update_all_identity_relation_units(check_db_ready=True):
     if is_unit_paused_set():
         return
+    CONFIGS.write_all()
+    configure_https()
     if check_db_ready and not is_db_ready():
         log('Allowed_units list provided and this unit not present',
             level=INFO)
@@ -308,6 +419,11 @@ def update_all_identity_relation_units(check_db_ready=True):
             identity_credentials_changed(relation_id=rid, remote_unit=unit)
 
 
+@synchronize_ca_if_changed(force=True)
+def update_all_identity_relation_units_force_sync():
+    update_all_identity_relation_units()
+
+
 def update_all_domain_backends():
     """Re-trigger hooks for all domain-backend relations/units"""
     for rid in relation_ids('domain-backend'):
@@ -315,17 +431,6 @@ def update_all_domain_backends():
             domain_backend_changed(relation_id=rid, unit=unit)
 
 
-def update_all_fid_backends():
-    if CompareOpenStackReleases(os_release('keystone-common')) < 'ocata':
-        log('Ignoring keystone-fid-service-provider relation as it is'
-            ' not supported on releases older than Ocata')
-        return
-    """If there are any config changes, e.g. for domain or service port
-    make sure to update those for all relation-level buckets"""
-    for rid in relation_ids('keystone-fid-service-provider'):
-        update_keystone_fid_service_provider(relation_id=rid)
-
-
 def leader_init_db_if_ready(use_current_context=False):
     """ Initialise the keystone db if it is ready and mark it as initialised.
 
@@ -357,6 +462,7 @@ def leader_init_db_if_ready(use_current_context=False):
 
 @hooks.hook('shared-db-relation-changed')
 @restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
 def db_changed():
     if 'shared-db' not in CONFIGS.complete_contexts():
         log('shared-db relation incomplete. Peer not ready?')
@@ -366,12 +472,28 @@ def db_changed():
         if CompareOpenStackReleases(
                 os_release('keystone-common')) >= 'liberty':
             CONFIGS.write(POLICY_JSON)
-        update_all_identity_relation_units()
+
+
+@hooks.hook('pgsql-db-relation-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
+def pgsql_db_changed():
+    if 'pgsql-db' not in CONFIGS.complete_contexts():
+        log('pgsql-db relation incomplete. Peer not ready?')
+    else:
+        CONFIGS.write(KEYSTONE_CONF)
+        leader_init_db_if_ready(use_current_context=True)
+        if CompareOpenStackReleases(
+                os_release('keystone-common')) >= 'liberty':
+            CONFIGS.write(POLICY_JSON)
 
 
 @hooks.hook('identity-service-relation-changed')
 @restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
 def identity_changed(relation_id=None, remote_unit=None):
+    CONFIGS.write_all()
+
     notifications = {}
     if is_elected_leader(CLUSTER_RES):
         if not is_db_ready():
@@ -450,8 +572,59 @@ def identity_credentials_changed(relation_id=None, remote_unit=None):
         log('Deferring identity_credentials_changed() to service leader.')
 
 
+def send_ssl_sync_request():
+    """Set sync request on cluster relation.
+
+    Value set equals number of ssl configs currently enabled so that if they
+    change, we ensure that certs are synced. This setting is consumed by
+    cluster-relation-changed ssl master. We also clear the 'synced' set to
+    guarantee that a sync will occur.
+
+    Note the we do nothing if the setting is already applied.
+    """
+    unit = local_unit().replace('/', '-')
+    # Start with core config (e.g. used for signing revoked token list)
+    ssl_config = 0b1
+
+    use_https = config('use-https')
+    if use_https and bool_from_string(use_https):
+        ssl_config ^= 0b10
+
+    https_service_endpoints = config('https-service-endpoints')
+    if (https_service_endpoints and
+            bool_from_string(https_service_endpoints)):
+        ssl_config ^= 0b100
+
+    enable_pki = config('enable-pki')
+    if enable_pki and bool_from_string(enable_pki):
+        ssl_config ^= 0b1000
+
+    key = 'ssl-sync-required-%s' % (unit)
+    settings = {key: ssl_config}
+
+    prev = 0b0
+    rid = None
+    for rid in relation_ids('cluster'):
+        for unit in related_units(rid):
+            _prev = relation_get(rid=rid, unit=unit, attribute=key) or 0b0
+            if _prev and _prev > prev:
+                prev = bin(_prev)
+
+    if rid and prev ^ ssl_config:
+        if is_leader():
+            clear_ssl_synced_units()
+
+        log("Setting %s=%s" % (key, bin(ssl_config)), level=DEBUG)
+        relation_set(relation_id=rid, relation_settings=settings)
+
+
 @hooks.hook('cluster-relation-joined')
-def cluster_joined(rid=None):
+def cluster_joined(rid=None, ssl_sync_request=True):
+    unison.ssh_authorized_peers(user=SSH_USER,
+                                group=SSH_USER,
+                                peer_interface='cluster',
+                                ensure_local_user=True)
+
     settings = {}
 
     for addr_type in ADDRESS_TYPES:
@@ -465,19 +638,57 @@ def cluster_joined(rid=None):
 
     relation_set(relation_id=rid, relation_settings=settings)
 
+    if ssl_sync_request:
+        send_ssl_sync_request()
+
 
 @hooks.hook('cluster-relation-changed')
 @restart_on_change(restart_map(), stopstart=True)
+@update_certs_if_available
 def cluster_changed():
+    unison.ssh_authorized_peers(user=SSH_USER,
+                                group=SSH_USER,
+                                peer_interface='cluster',
+                                ensure_local_user=True)
     # NOTE(jamespage) re-echo passwords for peer storage
-    echo_whitelist = ['_passwd', 'identity-service:', 'db-initialised']
+    echo_whitelist = ['_passwd', 'identity-service:',
+                      'db-initialised', 'ssl-cert-available-updates']
+    # Don't echo if leader since a re-election may be in progress.
+    if not is_leader():
+        echo_whitelist.append('ssl-cert-master')
 
     log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG)
     peer_echo(includes=echo_whitelist, force=True)
 
-    update_all_identity_relation_units()
+    check_peer_actions()
 
-    CONFIGS.write_all()
+    initialise_pki()
+
+    if is_leader():
+        # Figure out if we need to mandate a sync
+        units = get_ssl_sync_request_units()
+        synced_units = relation_get_and_migrate(attribute='ssl-synced-units',
+                                                unit=local_unit())
+        diff = None
+        if synced_units:
+            synced_units = json.loads(synced_units)
+            diff = set(units).symmetric_difference(set(synced_units))
+    else:
+        units = None
+
+    if units and (not synced_units or diff):
+        log("New peers joined and need syncing - %s" %
+            (', '.join(units)), level=DEBUG)
+        update_all_identity_relation_units_force_sync()
+    else:
+        update_all_identity_relation_units()
+
+    if not is_leader() and is_ssl_cert_master():
+        # Force and sync and trigger a sync master re-election since we are not
+        # leader anymore.
+        force_ssl_sync()
+    else:
+        CONFIGS.write_all()
 
 
 @hooks.hook('leader-elected')
@@ -490,6 +701,8 @@ def leader_elected():
 
     update_all_identity_relation_units()
 
+    update_all_identity_relation_units()
+
 
 @hooks.hook('leader-settings-changed')
 @restart_on_change(restart_map(), stopstart=True)
@@ -572,6 +785,7 @@ def ha_joined(relation_id=None):
 
 @hooks.hook('ha-relation-changed')
 @restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
 def ha_changed():
     CONFIGS.write_all()
 
@@ -579,7 +793,10 @@ def ha_changed():
     if clustered:
         log('Cluster configured, notifying other services and updating '
             'keystone endpoint configuration')
-        update_all_identity_relation_units()
+        if is_ssl_cert_master():
+            update_all_identity_relation_units_force_sync()
+        else:
+            update_all_identity_relation_units()
 
 
 @hooks.hook('identity-admin-relation-changed')
@@ -632,11 +849,16 @@ def domain_backend_changed(relation_id=None, unit=None):
         domain_nonce_key = 'domain-restart-nonce-{}'.format(domain_name)
         db = unitdata.kv()
         if restart_nonce != db.get(domain_nonce_key):
-            restart_keystone()
+            if not is_unit_paused_set():
+                if snap_install_requested():
+                    service_restart('snap.keystone.*')
+                else:
+                    service_restart(keystone_service())
             db.set(domain_nonce_key, restart_nonce)
             db.flush()
 
 
+@synchronize_ca_if_changed(fatal=True)
 def configure_https():
     '''
     Enables SSL API Apache config if appropriate and kicks identity-service
@@ -815,10 +1037,17 @@ def configure_oidc():
 
 @hooks.hook('upgrade-charm')
 @restart_on_change(restart_map(), stopstart=True)
+@synchronize_ca_if_changed()
 @harden()
 def upgrade_charm():
     status_set('maintenance', 'Installing apt packages')
     apt_install(filter_installed_packages(determine_packages()))
+    unison.ssh_authorized_peers(user=SSH_USER,
+                                group=SSH_USER,
+                                peer_interface='cluster',
+                                ensure_local_user=True)
+
+    ensure_ssl_dirs()
 
     if run_in_apache():
         disable_unused_apache_sites()
@@ -861,102 +1090,6 @@ def update_nrpe_config():
     nrpe_setup.write()
 
 
-@hooks.hook('keystone-fid-service-provider-relation-joined',
-            'keystone-fid-service-provider-relation-changed')
-def keystone_fid_service_provider_changed():
-    if get_api_version() < 3:
-        log('Identity federation is only supported with keystone v3')
-        return
-    if CompareOpenStackReleases(os_release('keystone-common')) < 'ocata':
-        log('Ignoring keystone-fid-service-provider relation as it is'
-            ' not supported on releases older than Ocata')
-        return
-    # for the join case a keystone public-facing hostname and service
-    # port need to be set
-    update_keystone_fid_service_provider(relation_id=relation_id())
-
-    # handle relation data updates (if any), e.g. remote_id_attribute
-    # and a restart will be handled via a nonce, not restart_on_change
-    CONFIGS.write(KEYSTONE_CONF)
-
-    # The relation is container-scoped so this keystone unit's unitdata
-    # will only contain a nonce of a single fid subordinate for a given
-    # fid backend (relation id)
-    restart_nonce = relation_get('restart-nonce')
-    if restart_nonce:
-        nonce = json.loads(restart_nonce)
-        # multiplex by relation id for multiple federated identity
-        # provider charms
-        fid_nonce_key = 'fid-restart-nonce-{}'.format(relation_id())
-        db = unitdata.kv()
-        if restart_nonce != db.get(fid_nonce_key):
-            restart_keystone()
-            db.set(fid_nonce_key, nonce)
-            db.flush()
-
-
-@hooks.hook('keystone-fid-service-provider-relation-broken')
-def keystone_fid_service_provider_broken():
-    if CompareOpenStackReleases(os_release('keystone-common')) < 'ocata':
-        log('Ignoring keystone-fid-service-provider relation as it is'
-            ' not supported on releases older than Ocata')
-        return
-
-    restart_keystone()
-
-
-@hooks.hook('websso-trusted-dashboard-relation-joined',
-            'websso-trusted-dashboard-relation-changed',
-            'websso-trusted-dashboard-relation-broken')
-@restart_on_change(restart_map(), restart_functions=restart_function_map())
-def websso_trusted_dashboard_changed():
-    if get_api_version() < 3:
-        log('WebSSO is only supported with keystone v3')
-        return
-    if CompareOpenStackReleases(os_release('keystone-common')) < 'ocata':
-        log('Ignoring WebSSO relation as it is not supported on'
-            ' releases older than Ocata')
-        return
-    CONFIGS.write(KEYSTONE_CONF)
-
-
-def update_keystone_fid_service_provider(relation_id=None):
-    tls_enabled = (config('ssl_cert') is not None and
-                   config('ssl_key') is not None)
-    # reactive endpoints implementation on the other side, hence
-    # json-encoded values
-    fid_settings = {
-        'hostname': json.dumps(config('os-public-hostname')),
-        'port': json.dumps(config('service-port')),
-        'tls-enabled': json.dumps(tls_enabled),
-    }
-
-    relation_set(relation_id=relation_id,
-                 relation_settings=fid_settings)
-
-
-@hooks.hook('certificates-relation-joined')
-def certs_joined(relation_id=None):
-    relation_set(
-        relation_id=relation_id,
-        relation_settings=get_certificate_request())
-
-
-@hooks.hook('certificates-relation-changed')
-@restart_on_change(restart_map(), stopstart=True)
-def certs_changed(relation_id=None, unit=None):
-    # update_all_identity_relation_units calls the keystone API
-    # so configs need to be written and services restarted
-    # before
-    @restart_on_change(restart_map(), stopstart=True)
-    def write_certs_and_config():
-        process_certificates('keystone', relation_id, unit)
-        configure_https()
-    write_certs_and_config()
-    update_all_identity_relation_units()
-    update_all_domain_backends()
-
-
 def main():
     try:
         hooks.execute(sys.argv)
diff --git a/hooks/keystone_ssl.py b/hooks/keystone_ssl.py
new file mode 100644
index 0000000000000000000000000000000000000000..43f96588c44a6d1cb6418cbd7ca5655e15a36b88
--- /dev/null
+++ b/hooks/keystone_ssl.py
@@ -0,0 +1,358 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import shutil
+import subprocess
+import tarfile
+import tempfile
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+)
+
+CA_EXPIRY = '365'
+ORG_NAME = 'Ubuntu'
+ORG_UNIT = 'Ubuntu Cloud'
+CA_BUNDLE = '/usr/local/share/ca-certificates/juju_ca_cert.crt'
+
+CA_CONFIG = """
+[ ca ]
+default_ca = CA_default
+
+[ CA_default ]
+dir                     = %(ca_dir)s
+policy                  = policy_match
+database                = $dir/index.txt
+serial                  = $dir/serial
+certs                   = $dir/certs
+crl_dir                 = $dir/crl
+new_certs_dir           = $dir/newcerts
+certificate             = $dir/cacert.pem
+private_key             = $dir/private/cacert.key
+RANDFILE                = $dir/private/.rand
+default_md              = default
+
+[ req ]
+default_bits            = 1024
+default_md              = sha1
+
+prompt                  = no
+distinguished_name      = ca_distinguished_name
+
+x509_extensions         = ca_extensions
+
+[ ca_distinguished_name ]
+organizationName        = %(org_name)s
+organizationalUnitName  = %(org_unit_name)s Certificate Authority
+commonName              = %(common_name)s
+
+[ policy_match ]
+countryName             = optional
+stateOrProvinceName     = optional
+organizationName        = match
+organizationalUnitName  = optional
+commonName              = supplied
+
+[ ca_extensions ]
+basicConstraints        = critical,CA:true
+subjectKeyIdentifier    = hash
+authorityKeyIdentifier  = keyid:always, issuer
+keyUsage                = cRLSign, keyCertSign
+"""
+
+SIGNING_CONFIG = """
+[ ca ]
+default_ca = CA_default
+
+[ CA_default ]
+dir                     = %(ca_dir)s
+policy                  = policy_match
+database                = $dir/index.txt
+serial                  = $dir/serial
+certs                   = $dir/certs
+crl_dir                 = $dir/crl
+new_certs_dir           = $dir/newcerts
+certificate             = $dir/cacert.pem
+private_key             = $dir/private/cacert.key
+RANDFILE                = $dir/private/.rand
+default_md              = default
+
+[ req ]
+default_bits            = 1024
+default_md              = sha1
+
+prompt                  = no
+distinguished_name      = req_distinguished_name
+
+x509_extensions         = req_extensions
+
+[ req_distinguished_name ]
+organizationName        = %(org_name)s
+organizationalUnitName  = %(org_unit_name)s Server Farm
+
+[ policy_match ]
+countryName             = optional
+stateOrProvinceName     = optional
+organizationName        = match
+organizationalUnitName  = optional
+commonName              = supplied
+
+[ req_extensions ]
+basicConstraints        = CA:false
+subjectKeyIdentifier    = hash
+authorityKeyIdentifier  = keyid:always, issuer
+keyUsage                = digitalSignature, keyEncipherment, keyAgreement
+extendedKeyUsage        = serverAuth, clientAuth
+"""
+
+# Instance can be appended to this list to represent a singleton
+CA_SINGLETON = []
+
+
+def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT):
+    log('Ensuring certificate authority exists at %s.' % ca_dir, level=DEBUG)
+    if not os.path.exists(ca_dir):
+        log('Initializing new certificate authority at %s' % ca_dir,
+            level=DEBUG)
+        os.mkdir(ca_dir)
+
+    for i in ['certs', 'crl', 'newcerts', 'private']:
+        d = os.path.join(ca_dir, i)
+        if not os.path.exists(d):
+            log('Creating %s.' % d, level=DEBUG)
+            os.mkdir(d)
+    os.chmod(os.path.join(ca_dir, 'private'), 0o710)
+
+    if not os.path.isfile(os.path.join(ca_dir, 'serial')):
+        with open(os.path.join(ca_dir, 'serial'), 'wb') as out:
+            out.write('01\n')
+
+    if not os.path.isfile(os.path.join(ca_dir, 'index.txt')):
+        with open(os.path.join(ca_dir, 'index.txt'), 'wb') as out:
+            out.write('')
+
+    conf = os.path.join(ca_dir, 'ca.cnf')
+    if not os.path.isfile(conf):
+        log('Creating new CA config in %s' % ca_dir, level=DEBUG)
+        with open(conf, 'wb') as out:
+            out.write(CA_CONFIG % locals())
+
+
+def root_ca_crt_key(ca_dir):
+    init = False
+    crt = os.path.join(ca_dir, 'cacert.pem')
+    key = os.path.join(ca_dir, 'private', 'cacert.key')
+    for f in [crt, key]:
+        if not os.path.isfile(f):
+            log('Missing %s, will re-initialize cert+key.' % f, level=DEBUG)
+            init = True
+        else:
+            log('Found %s.' % f, level=DEBUG)
+
+    if init:
+        conf = os.path.join(ca_dir, 'ca.cnf')
+        cmd = ['openssl', 'req', '-config', conf,
+               '-x509', '-nodes', '-newkey', 'rsa', '-days', '21360',
+               '-keyout', key, '-out', crt, '-outform', 'PEM']
+        subprocess.check_call(cmd)
+
+    return crt, key
+
+
+def intermediate_ca_csr_key(ca_dir):
+    log('Creating new intermediate CSR.', level=DEBUG)
+    key = os.path.join(ca_dir, 'private', 'cacert.key')
+    csr = os.path.join(ca_dir, 'cacert.csr')
+    conf = os.path.join(ca_dir, 'ca.cnf')
+    cmd = ['openssl', 'req', '-config', conf, '-sha1', '-newkey', 'rsa',
+           '-nodes', '-keyout', key, '-out', csr, '-outform', 'PEM']
+    subprocess.check_call(cmd)
+    return csr, key
+
+
+def sign_int_csr(ca_dir, csr, common_name):
+    log('Signing certificate request %s.' % csr, level=DEBUG)
+    crt_name = os.path.basename(csr).split('.')[0]
+    crt = os.path.join(ca_dir, 'certs', '%s.crt' % crt_name)
+    subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
+    conf = os.path.join(ca_dir, 'ca.cnf')
+    cmd = ['openssl', 'ca', '-batch', '-config', conf, '-extensions',
+           'ca_extensions', '-days', CA_EXPIRY, '-notext', '-in', csr, '-out',
+           crt, '-subj', subj, '-batch']
+    log("Executing: %s" % ' '.join(cmd), level=DEBUG)
+    subprocess.check_call(cmd)
+    return crt
+
+
+def init_root_ca(ca_dir, common_name):
+    init_ca(ca_dir, common_name)
+    return root_ca_crt_key(ca_dir)
+
+
+def init_intermediate_ca(ca_dir, common_name, root_ca_dir, org_name=ORG_NAME,
+                         org_unit_name=ORG_UNIT):
+    init_ca(ca_dir, common_name)
+    if not os.path.isfile(os.path.join(ca_dir, 'cacert.pem')):
+        csr, key = intermediate_ca_csr_key(ca_dir)
+        crt = sign_int_csr(root_ca_dir, csr, common_name)
+        shutil.copy(crt, os.path.join(ca_dir, 'cacert.pem'))
+    else:
+        log('Intermediate CA certificate already exists.', level=DEBUG)
+
+    conf = os.path.join(ca_dir, 'signing.cnf')
+    if not os.path.isfile(conf):
+        log('Creating new signing config in %s' % ca_dir, level=DEBUG)
+        with open(conf, 'wb') as out:
+            out.write(SIGNING_CONFIG % locals())
+
+
+def create_certificate(ca_dir, service):
+    common_name = service
+    subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
+    csr = os.path.join(ca_dir, 'certs', '%s.csr' % service)
+    key = os.path.join(ca_dir, 'certs', '%s.key' % service)
+    cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa', '-nodes', '-keyout',
+           key, '-out', csr, '-subj', subj]
+    subprocess.check_call(cmd)
+    crt = sign_int_csr(ca_dir, csr, common_name)
+    log('Signed new CSR, crt @ %s' % crt, level=DEBUG)
+    return
+
+
+def update_bundle(bundle_file, new_bundle):
+    return
+    if os.path.isfile(bundle_file):
+        with open(bundle_file, 'r') as f:
+            current = f.read().strip()
+        if new_bundle == current:
+            log('CA Bundle @ %s is up to date.' % bundle_file, level=DEBUG)
+            return
+
+        log('Updating CA bundle @ %s.' % bundle_file, level=DEBUG)
+
+    with open(bundle_file, 'wb') as out:
+        out.write(new_bundle)
+
+    subprocess.check_call(['update-ca-certificates'])
+
+
+def tar_directory(path):
+    cwd = os.getcwd()
+    parent = os.path.dirname(path)
+    directory = os.path.basename(path)
+    tmp = tempfile.TemporaryFile()
+    os.chdir(parent)
+    tarball = tarfile.TarFile(fileobj=tmp, mode='w')
+    tarball.add(directory)
+    tarball.close()
+    tmp.seek(0)
+    out = tmp.read()
+    tmp.close()
+    os.chdir(cwd)
+    return out
+
+
+class JujuCA(object):
+
+    def __init__(self, name, ca_dir, root_ca_dir, user, group):
+        # Root CA
+        cn = '%s Certificate Authority' % name
+        root_crt, root_key = init_root_ca(root_ca_dir, cn)
+        # Intermediate CA
+        cn = '%s Intermediate Certificate Authority' % name
+        init_intermediate_ca(ca_dir, cn, root_ca_dir)
+
+        # Create dirs
+        cmd = ['chown', '-R', '%s.%s' % (user, group), ca_dir]
+        subprocess.check_call(cmd)
+        cmd = ['chown', '-R', '%s.%s' % (user, group), root_ca_dir]
+        subprocess.check_call(cmd)
+
+        self.ca_dir = ca_dir
+        self.root_ca_dir = root_ca_dir
+        self.user = user
+        self.group = group
+        update_bundle(CA_BUNDLE, self.get_ca_bundle())
+
+    def _sign_csr(self, csr, service, common_name):
+        subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
+        crt = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name)
+        conf = os.path.join(self.ca_dir, 'signing.cnf')
+        cmd = ['openssl', 'ca', '-config', conf, '-extensions',
+               'req_extensions', '-days', '365', '-notext', '-in', csr,
+               '-out', crt, '-batch', '-subj', subj]
+        subprocess.check_call(cmd)
+        return crt
+
+    def _create_certificate(self, service, common_name):
+        subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
+        csr = os.path.join(self.ca_dir, 'certs', '%s.csr' % service)
+        key = os.path.join(self.ca_dir, 'certs', '%s.key' % service)
+        cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa', '-nodes',
+               '-keyout', key, '-out', csr, '-subj', subj]
+        subprocess.check_call(cmd)
+        crt = self._sign_csr(csr, service, common_name)
+        cmd = ['chown', '-R', '%s.%s' % (self.user, self.group), self.ca_dir]
+        subprocess.check_call(cmd)
+        log('Signed new CSR, crt @ %s' % crt, level=DEBUG)
+        return crt, key
+
+    def get_key_path(self, cn):
+        return os.path.join(self.ca_dir, 'certs', '%s.key' % cn)
+
+    def get_cert_path(self, cn):
+        return os.path.join(self.ca_dir, 'certs', '%s.crt' % cn)
+
+    def get_cert_and_key(self, common_name):
+        keypath = self.get_key_path(common_name)
+        crtpath = self.get_cert_path(common_name)
+        if not os.path.isfile(crtpath):
+            log("Creating certificate and key for {}.".format(common_name),
+                level=DEBUG)
+            crtpath, keypath = self._create_certificate(common_name,
+                                                        common_name)
+
+        with open(crtpath, 'r') as f:
+            crt = f.read()
+        with open(keypath, 'r') as f:
+            key = f.read()
+        return crt, key
+
+    @property
+    def ca_cert_path(self):
+        return os.path.join(self.ca_dir, 'cacert.pem')
+
+    @property
+    def ca_key_path(self):
+        return os.path.join(self.ca_dir, 'private', 'cacert.key')
+
+    @property
+    def root_ca_cert_path(self):
+        return os.path.join(self.root_ca_dir, 'cacert.pem')
+
+    @property
+    def root_ca_key_path(self):
+        return os.path.join(self.root_ca_dir, 'private', 'cacert.key')
+
+    def get_ca_bundle(self):
+        with open(self.ca_cert_path) as f:
+            int_cert = f.read()
+        with open(self.root_ca_cert_path) as f:
+            root_cert = f.read()
+        # NOTE: ordering of certs in bundle matters!
+        return int_cert + root_cert
diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py
index a0eee3126a4150a6a7480efe8bb72b4c0297d95b..c3e817b8f3e951939918ece611b1f2b73d744f1d 100644
--- a/hooks/keystone_utils.py
+++ b/hooks/keystone_utils.py
@@ -14,15 +14,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import glob
+import grp
+import hashlib
+import json
 import os
+import pwd
+import re
 import shutil
 import subprocess
+import tarfile
+import threading
 import time
 import urlparse
 import uuid
 import sys
 
 from itertools import chain
+from base64 import b64encode
 from collections import OrderedDict
 from copy import deepcopy
 
@@ -30,6 +39,7 @@ from charmhelpers.contrib.hahelpers.cluster import (
     is_elected_leader,
     determine_api_port,
     https,
+    peer_units,
     get_hacluster_config,
 )
 
@@ -50,10 +60,18 @@ from charmhelpers.contrib.openstack.utils import (
     configure_installation_source,
     error_out,
     get_os_codename_install_source,
+    git_clone_and_install,
+    git_default_repos,
+    git_determine_usr_bin,
+    git_install_requested,
+    git_pip_venv_dir,
+    git_src_dir,
+    git_yaml_value,
     os_release,
     save_script_rc as _save_script_rc,
     pause_unit,
     resume_unit,
+    is_unit_paused_set,
     make_assess_status_func,
     os_application_version_set,
     CompareOpenStackReleases,
@@ -62,18 +80,24 @@ from charmhelpers.contrib.openstack.utils import (
     install_os_snaps,
     get_snaps_install_info_from_origin,
     enable_memcache,
-    is_unit_paused_set,
+)
+
+from charmhelpers.contrib.python.packages import (
+    pip_install,
 )
 
 from charmhelpers.core.strutils import (
     bool_from_string,
 )
 
+import charmhelpers.contrib.unison as unison
+
 from charmhelpers.core.decorators import (
     retry_on_exception,
 )
 
 from charmhelpers.core.hookenv import (
+    charm_dir,
     config,
     leader_get,
     leader_set,
@@ -86,6 +110,9 @@ from charmhelpers.core.hookenv import (
     related_units,
     DEBUG,
     INFO,
+    WARNING,
+    ERROR,
+    is_leader,
 )
 
 from charmhelpers.fetch import (
@@ -96,12 +123,16 @@ from charmhelpers.fetch import (
 )
 
 from charmhelpers.core.host import (
+    adduser,
+    add_group,
+    add_user_to_group,
     mkdir,
-    service_restart,
     service_stop,
     service_start,
+    service_restart,
     pwgen,
     lsb_release,
+    write_file,
     CompareHostReleases,
 )
 
@@ -109,11 +140,13 @@ from charmhelpers.contrib.peerstorage import (
     peer_store_and_set,
     peer_store,
     peer_retrieve,
+    relation_set as relation_set_and_migrate_to_leader,
 )
 
 from charmhelpers.core.templating import render
 
 import keystone_context
+import keystone_ssl as ssl
 
 
 TEMPLATES = 'templates/'
@@ -130,6 +163,7 @@ BASE_PACKAGES = [
     'python-requests',
     'python-six',
     'pwgen',
+    'unison',
     'uuid',
 ]
 
@@ -138,11 +172,32 @@ BASE_PACKAGES_SNAP = [
     'openssl',
     'python-six',
     'pwgen',
+    'unison',
     'uuid',
 ]
 
 VERSION_PACKAGE = 'keystone'
 
+BASE_GIT_PACKAGES = [
+    'libffi-dev',
+    'libmysqlclient-dev',
+    'libssl-dev',
+    'libxml2-dev',
+    'libxslt1-dev',
+    'libyaml-dev',
+    'python-dev',
+    'python-pip',
+    'python-setuptools',
+    'zlib1g-dev',
+]
+
+# ubuntu packages that should not be installed when deploying from git
+GIT_PACKAGE_BLACKLIST = [
+    'keystone',
+]
+
+
+SSH_USER = 'juju_keystone'
 if snap_install_requested():
     SNAP_BASE_DIR = "/snap/keystone/current"
     SNAP_COMMON_DIR = "/var/snap/keystone/common"
@@ -164,9 +219,17 @@ if snap_install_requested():
     STORED_DEFAULT_DOMAIN_ID = ("{}/keystone.default_domain_id"
                                 "".format(SNAP_LIB_DIR))
     SERVICE_PASSWD_PATH = '{}/services.passwd'.format(SNAP_LIB_DIR)
+
+    SSH_USER_HOME = '/home/{}'.format(SSH_USER)
+    SYNC_FLAGS_DIR = '{}/juju_sync_flags/'.format(SSH_USER_HOME)
+    SYNC_DIR = '{}/juju_sync/'.format(SSH_USER_HOME)
+    SSL_SYNC_ARCHIVE = os.path.join(SYNC_DIR, 'juju-ssl-sync.tar')
+    SSL_DIR = '{}/juju_ssl/'.format(SNAP_LIB_DIR)
+    PKI_CERTS_DIR = os.path.join(SSL_DIR, 'pki')
     POLICY_JSON = ('{}/keystone.conf.d/policy.json'
                    ''.format(SNAP_COMMON_KEYSTONE_DIR))
     BASE_SERVICES = ['snap.keystone.uwsgi', 'snap.keystone.nginx']
+    APACHE_SSL_DIR = '{}/keystone'.format(SSL_DIR)
 else:
     APACHE_SSL_DIR = '/etc/apache2/ssl/keystone'
     KEYSTONE_USER = 'keystone'
@@ -180,6 +243,12 @@ else:
     STORED_ADMIN_DOMAIN_ID = "/var/lib/keystone/keystone.admin_domain_id"
     STORED_DEFAULT_DOMAIN_ID = "/var/lib/keystone/keystone.default_domain_id"
     SERVICE_PASSWD_PATH = '/var/lib/keystone/services.passwd'
+
+    SYNC_FLAGS_DIR = '/var/lib/keystone/juju_sync_flags/'
+    SYNC_DIR = '/var/lib/keystone/juju_sync/'
+    SSL_SYNC_ARCHIVE = os.path.join(SYNC_DIR, 'juju-ssl-sync.tar')
+    SSL_DIR = '/var/lib/keystone/juju_ssl/'
+    PKI_CERTS_DIR = os.path.join(SSL_DIR, 'pki')
     POLICY_JSON = '/etc/keystone/policy.json'
     BASE_SERVICES = [
         'keystone',
@@ -210,8 +279,8 @@ KEYSTONE_USER = 'keystone'
 SSL_CA_NAME = 'Ubuntu Cloud'
 CLUSTER_RES = 'grp_ks_vips'
 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
+SSL_SYNC_SEMAPHORE = threading.Semaphore()
 SSL_DIRS = [SSL_DIR, APACHE_SSL_DIR, SHIB_DIR, CA_CERT_PATH]
-CLUSTER_RES = 'grp_ks_vips'
 ADMIN_DOMAIN = 'admin_domain'
 ADMIN_PROJECT = 'admin'
 DEFAULT_DOMAIN = 'default'
@@ -226,13 +295,12 @@ BASE_RESOURCE_MAP = OrderedDict([
         'services': BASE_SERVICES,
         'contexts': [keystone_context.KeystoneContext(),
                      context.SharedDBContext(ssl_dir=KEYSTONE_CONF_DIR),
+                     context.PostgresqlDBContext(),
                      context.SyslogContext(),
                      keystone_context.HAProxyContext(),
                      context.BindHostContext(),
                      context.WorkerConfigContext(),
-                     context.MemcacheContext(package='keystone'),
-                     keystone_context.KeystoneFIDServiceProviderContext(),
-                     keystone_context.WebSSOTrustedDashboardContext()],
+                     context.MemcacheContext(package='keystone')],
     }),
     (KEYSTONE_LOGGER_CONF, {
         'contexts': [keystone_context.KeystoneLoggingContext()],
@@ -248,6 +316,7 @@ BASE_RESOURCE_MAP = OrderedDict([
         'contexts': [keystone_context.KeystoneContext(),
                      keystone_context.NginxSSLContext(),
                      context.SharedDBContext(ssl_dir=KEYSTONE_CONF_DIR),
+                     context.PostgresqlDBContext(),
                      context.SyslogContext(),
                      keystone_context.HAProxyContext(),
                      context.BindHostContext(),
@@ -257,6 +326,7 @@ BASE_RESOURCE_MAP = OrderedDict([
         'services': BASE_SERVICES,
         'contexts': [keystone_context.KeystoneContext(),
                      context.SharedDBContext(ssl_dir=KEYSTONE_CONF_DIR),
+                     context.PostgresqlDBContext(),
                      context.SyslogContext(),
                      keystone_context.HAProxyContext(),
                      keystone_context.NginxSSLContext(),
@@ -428,7 +498,7 @@ valid_services = {
 # The interface is said to be satisfied if anyone of the interfaces in the
 # list has a complete context.
 REQUIRED_INTERFACES = {
-    'database': ['shared-db'],
+    'database': ['shared-db', 'pgsql-db'],
 }
 
 
@@ -496,17 +566,17 @@ def resource_map():
                     svcs.remove('keystone')
                 if 'apache2' not in svcs:
                     svcs.append('apache2')
-
+            admin_script = os.path.join(git_determine_usr_bin(),
+                                        "keystone-wsgi-admin")
+            public_script = os.path.join(git_determine_usr_bin(),
+                                         "keystone-wsgi-public")
             # use single copy:
             keystoneContext = keystone_context.KeystoneContext()
             samlContext = keystone_context.SamlContext()
 
             resource_map[WSGI_KEYSTONE_API_CONF] = {
                 'contexts': [
-                    context.WSGIWorkerConfigContext(
-                        name="keystone",
-                        admin_script='/usr/bin/keystone-wsgi-admin',
-                        public_script='/usr/bin/keystone-wsgi-public'),
+                    context.WSGIWorkerConfigContext(name="keystone", admin_script=admin_script, public_script=public_script),  # nopep8
                     keystoneContext,
                     samlContext,
                     keystone_context.OidcContext()],
@@ -636,6 +706,9 @@ def determine_packages():
         return sorted(pkgs)
     else:
         packages = set(services()).union(BASE_PACKAGES)
+        if git_install_requested():
+            packages |= set(BASE_GIT_PACKAGES)
+            packages -= set(GIT_PACKAGE_BLACKLIST)
         if run_in_apache():
             packages.add('libapache2-mod-wsgi')
             if config('enable-oidc'):
@@ -1124,18 +1197,7 @@ def set_admin_passwd(passwd, user=None):
 
 def get_api_version():
     api_version = config('preferred-api-version')
-    cmp_release = CompareOpenStackReleases(
-        get_os_codename_install_source(config('openstack-origin'))
-    )
-    if not api_version:
-        # NOTE(jamespage): Queens dropped support for v2, so default
-        #                  to v3.
-        if cmp_release >= 'queens':
-            api_version = 3
-        else:
-            api_version = 2
-    if ((cmp_release < 'queens' and api_version not in [2, 3]) or
-            (cmp_release >= 'queens' and api_version != 3)):
+    if api_version not in [2, 3]:
         raise ValueError('Bad preferred-api-version')
     return api_version
 
@@ -1328,6 +1390,592 @@ def is_password_changed(username, passwd):
     return (_passwd is None or passwd != _passwd)
 
 
+def ensure_ssl_dirs():
+    """Ensure unison has access to these dirs."""
+    for path in [SYNC_FLAGS_DIR, SYNC_DIR]:
+        if not os.path.isdir(path):
+            mkdir(path, SSH_USER, KEYSTONE_USER, 0o775)
+        else:
+            ensure_permissions(path, user=SSH_USER, group=KEYSTONE_USER,
+                               perms=0o775)
+
+
+def ensure_permissions(path, user=None, group=None, perms=None, recurse=False,
+                       maxdepth=50):
+    """Set chownand chmod for path
+
+    Note that -1 for uid or gid result in no change.
+    """
+    if user:
+        uid = pwd.getpwnam(user).pw_uid
+    else:
+        uid = -1
+
+    if group:
+        gid = grp.getgrnam(group).gr_gid
+    else:
+        gid = -1
+
+    os.chown(path, uid, gid)
+
+    if perms:
+        os.chmod(path, perms)
+
+    if recurse:
+        if not maxdepth:
+            log("Max recursion depth reached - skipping further recursion")
+            return
+
+        paths = glob.glob("%s/*" % (path))
+        for path in paths:
+            ensure_permissions(path, user=user, group=group, perms=perms,
+                               recurse=recurse, maxdepth=maxdepth - 1)
+
+
+def check_peer_actions():
+    """Honour service action requests from sync master.
+
+    Check for service action request flags, perform the action then delete the
+    flag.
+    """
+    restart = relation_get(attribute='restart-services-trigger')
+    if restart and os.path.isdir(SYNC_FLAGS_DIR):
+        for flagfile in glob.glob(os.path.join(SYNC_FLAGS_DIR, '*')):
+            flag = os.path.basename(flagfile)
+            key = re.compile("^(.+)?\.(.+)?\.(.+)")
+            res = re.search(key, flag)
+            if res:
+                source = res.group(1)
+                service = res.group(2)
+                action = res.group(3)
+            else:
+                key = re.compile("^(.+)?\.(.+)?")
+                res = re.search(key, flag)
+                source = res.group(1)
+                action = res.group(2)
+
+            # Don't execute actions requested by this unit.
+            if local_unit().replace('.', '-') != source:
+                if action == 'restart':
+                    log("Running action='%s' on service '%s'" %
+                        (action, service), level=DEBUG)
+                    service_restart(service)
+                elif action == 'start':
+                    log("Running action='%s' on service '%s'" %
+                        (action, service), level=DEBUG)
+                    service_start(service)
+                elif action == 'stop':
+                    log("Running action='%s' on service '%s'" %
+                        (action, service), level=DEBUG)
+                    service_stop(service)
+                elif action == 'update-ca-certificates':
+                    log("Running %s" % (action), level=DEBUG)
+                    subprocess.check_call(['update-ca-certificates'])
+                elif action == 'ensure-pki-permissions':
+                    log("Running %s" % (action), level=DEBUG)
+                    ensure_pki_dir_permissions()
+                else:
+                    log("Unknown action flag=%s" % (flag), level=WARNING)
+
+            try:
+                os.remove(flagfile)
+            except:
+                pass
+
+
+def create_peer_service_actions(action, services):
+    """Mark remote services for action.
+
+    Default action is restart. These action will be picked up by peer units
+    e.g. we may need to restart services on peer units after certs have been
+    synced.
+    """
+    for service in services:
+        flagfile = os.path.join(SYNC_FLAGS_DIR, '%s.%s.%s' %
+                                (local_unit().replace('/', '-'),
+                                 service.strip(), action))
+        log("Creating action %s" % (flagfile), level=DEBUG)
+        write_file(flagfile, content='', owner=SSH_USER, group=KEYSTONE_USER,
+                   perms=0o744)
+
+
+def create_peer_actions(actions):
+    for action in actions:
+        action = "%s.%s" % (local_unit().replace('/', '-'), action)
+        flagfile = os.path.join(SYNC_FLAGS_DIR, action)
+        log("Creating action %s" % (flagfile), level=DEBUG)
+        write_file(flagfile, content='', owner=SSH_USER, group=KEYSTONE_USER,
+                   perms=0o744)
+
+
+@retry_on_exception(3, base_delay=2, exc_type=subprocess.CalledProcessError)
+def unison_sync(paths_to_sync):
+    """Do unison sync and retry a few times if it fails since peers may not be
+    ready for sync.
+
+    Returns list of synced units or None if one or more peers was not synced.
+    """
+    log('Synchronizing CA (%s) to all peers.' % (', '.join(paths_to_sync)),
+        level=INFO)
+    keystone_gid = grp.getgrnam(KEYSTONE_USER).gr_gid
+
+    # NOTE(dosaboy): This will sync to all peers who have already provided
+    # their ssh keys. If any existing peers have not provided their keys yet,
+    # they will be silently ignored.
+    unison.sync_to_peers(peer_interface='cluster', paths=paths_to_sync,
+                         user=SSH_USER, verbose=True, gid=keystone_gid,
+                         fatal=True)
+
+    synced_units = peer_units()
+    if len(unison.collect_authed_hosts('cluster')) != len(synced_units):
+        log("Not all peer units synced due to missing public keys", level=INFO)
+        return None
+    else:
+        return synced_units
+
+
+def get_ssl_sync_request_units():
+    """Get list of units that have requested to be synced.
+
+    NOTE: this must be called from cluster relation context.
+    """
+    units = []
+    for unit in related_units():
+        settings = relation_get(unit=unit) or {}
+        rkeys = settings.keys()
+        key = re.compile("^ssl-sync-required-(.+)")
+        for rkey in rkeys:
+            res = re.search(key, rkey)
+            if res:
+                units.append(res.group(1))
+
+    return units
+
+
+def is_ssl_cert_master(votes=None):
+    """Return True if this unit is ssl cert master."""
+
+    votes = votes or get_ssl_cert_master_votes()
+    set_votes = set(votes)
+    # Discard unknown votes
+    if 'unknown' in set_votes:
+        set_votes.remove('unknown')
+
+    # This is the elected ssl-cert-master leader
+    if len(set_votes) == 1 and set_votes == set([local_unit()]):
+        log("This unit is the elected ssl-cert-master "
+            "{}".format(votes), level=DEBUG)
+        return True
+
+    # Contested election
+    if len(set_votes) > 1:
+        log("Did not get consensus from peers on who is ssl-cert-master "
+            "{}".format(votes), level=DEBUG)
+        return False
+
+    # Neither the elected ssl-cert-master leader nor the juju leader
+    if not is_leader():
+        return False
+    # Only the juju elected leader continues
+
+    # Singleton
+    if not peer_units():
+        log("This unit is a singleton and thefore ssl-cert-master",
+            level=DEBUG)
+        return True
+
+    # Early in the process and juju leader
+    if not set_votes:
+        log("This unit is the juju leader and there are no votes yet, "
+            "becoming the ssl-cert-master",
+            level=DEBUG)
+        return True
+    elif (len(set_votes) == 1 and set_votes != set([local_unit()]) and
+            is_leader()):
+        log("This unit is the juju leader but not yet ssl-cert-master "
+            "(current votes = {})".format(set_votes), level=DEBUG)
+        return False
+
+    # Should never reach here
+    log("Could not determine the ssl-cert-master. Missing edge case. "
+        "(current votes = {})".format(set_votes),
+        level=ERROR)
+    return False
+
+
+def get_ssl_cert_master_votes():
+    """Returns a list of unique votes."""
+    votes = []
+    # Gather election results from peers. These will need to be consistent.
+    for rid in relation_ids('cluster'):
+        for unit in related_units(rid):
+            m = relation_get(rid=rid, unit=unit,
+                             attribute='ssl-cert-master')
+            if m is not None:
+                votes.append(m)
+
+    return list(set(votes))
+
+
+def ensure_ssl_cert_master():
+    """Ensure that an ssl cert master has been elected.
+
+    Normally the cluster leader will take control but we allow for this to be
+    ignored since this could be called before the cluster is ready.
+    """
+    master_override = False
+    elect = is_elected_leader(CLUSTER_RES)
+
+    # If no peers we allow this unit to elect itsef as master and do
+    # sync immediately.
+    if not peer_units():
+        elect = True
+        master_override = True
+
+    if elect:
+        votes = get_ssl_cert_master_votes()
+        # We expect all peers to echo this setting
+        if not votes or 'unknown' in votes:
+            log("Notifying peers this unit is ssl-cert-master", level=INFO)
+            for rid in relation_ids('cluster'):
+                settings = {'ssl-cert-master': local_unit()}
+                relation_set(relation_id=rid, relation_settings=settings)
+
+            # Return now and wait for cluster-relation-changed (peer_echo) for
+            # sync.
+            return master_override
+        elif not is_ssl_cert_master(votes):
+            if not master_override:
+                log("Conscensus not reached - current master will need to "
+                    "release", level=INFO)
+
+            return master_override
+
+    if not is_ssl_cert_master():
+        log("Not ssl cert master - skipping sync", level=INFO)
+        return False
+
+    return True
+
+
+def stage_paths_for_sync(paths):
+    shutil.rmtree(SYNC_DIR)
+    ensure_ssl_dirs()
+    with tarfile.open(SSL_SYNC_ARCHIVE, 'w') as fd:
+        for path in paths:
+            if os.path.exists(path):
+                log("Adding path '%s' sync tarball" % (path), level=DEBUG)
+                fd.add(path)
+            else:
+                log("Path '%s' does not exist - not adding to sync "
+                    "tarball" % (path), level=INFO)
+
+    ensure_permissions(SYNC_DIR, user=SSH_USER, group=KEYSTONE_USER,
+                       perms=0o775, recurse=True)
+
+
+def is_pki_enabled():
+    enable_pki = config('enable-pki')
+    if enable_pki and bool_from_string(enable_pki):
+        return True
+
+    return False
+
+
+def ensure_pki_cert_paths():
+    certs = os.path.join(PKI_CERTS_DIR, 'certs')
+    privates = os.path.join(PKI_CERTS_DIR, 'privates')
+    not_exists = [p for p in [PKI_CERTS_DIR, certs, privates]
+                  if not os.path.exists(p)]
+    if not_exists:
+        log("Configuring token signing cert paths", level=DEBUG)
+        perms = 0o775
+        for path in not_exists:
+            if not os.path.isdir(path):
+                mkdir(path=path, owner=SSH_USER, group=KEYSTONE_USER,
+                      perms=perms)
+            else:
+                # Ensure accessible by ssh user and group (for sync).
+                ensure_permissions(path, user=SSH_USER, group=KEYSTONE_USER,
+                                   perms=perms)
+
+
+def ensure_pki_dir_permissions():
+    # Ensure accessible by unison user and group (for sync).
+    ensure_permissions(PKI_CERTS_DIR, user=SSH_USER, group=KEYSTONE_USER,
+                       perms=0o775, recurse=True)
+
+
+def update_certs_if_available(f):
+    def _inner_update_certs_if_available(*args, **kwargs):
+        path = None
+        for rid in relation_ids('cluster'):
+            path = relation_get(attribute='ssl-cert-available-updates',
+                                rid=rid, unit=local_unit())
+
+        if path and os.path.exists(path):
+            log("Updating certs from '%s'" % (path), level=DEBUG)
+            with tarfile.open(path) as fd:
+                files = ["/%s" % m.name for m in fd.getmembers()]
+                fd.extractall(path='/')
+
+            for syncfile in files:
+                ensure_permissions(syncfile, user=KEYSTONE_USER,
+                                   group=KEYSTONE_USER,
+                                   perms=0o744, recurse=True)
+
+            # Mark as complete
+            os.rename(path, "%s.complete" % (path))
+        else:
+            log("No cert updates available", level=DEBUG)
+
+        return f(*args, **kwargs)
+
+    return _inner_update_certs_if_available
+
+
+def synchronize_ca(fatal=False):
+    """Broadcast service credentials to peers.
+
+    By default a failure to sync is fatal and will result in a raised
+    exception.
+
+    This function uses a relation setting 'ssl-cert-master' to get some
+    leader stickiness while synchronisation is being carried out. This ensures
+    that the last host to create and broadcast cetificates has the option to
+    complete actions before electing the new leader as sync master.
+
+    Returns a dictionary of settings to be set on the cluster relation.
+    """
+    paths_to_sync = []
+    peer_service_actions = {'restart': []}
+    peer_actions = []
+
+    if bool_from_string(config('https-service-endpoints')):
+        log("Syncing all endpoint certs since https-service-endpoints=True",
+            level=DEBUG)
+        paths_to_sync.append(SSL_DIR)
+        paths_to_sync.append(CA_CERT_PATH)
+        # We need to restart peer apache services to ensure they have picked up
+        # new ssl keys.
+        peer_service_actions['restart'].append('apache2')
+        peer_actions.append('update-ca-certificates')
+
+    if bool_from_string(config('use-https')):
+        log("Syncing keystone-endpoint certs since use-https=True",
+            level=DEBUG)
+        paths_to_sync.append(SSL_DIR)
+        paths_to_sync.append(APACHE_SSL_DIR)
+        paths_to_sync.append(CA_CERT_PATH)
+        # We need to restart peer apache services to ensure they have picked up
+        # new ssl keys.
+        peer_service_actions['restart'].append('apache2')
+        peer_actions.append('update-ca-certificates')
+
+    # NOTE: certs needed for token signing e.g. pki and revocation list query.
+    log("Syncing token certs", level=DEBUG)
+    paths_to_sync.append(PKI_CERTS_DIR)
+    peer_actions.append('ensure-pki-permissions')
+
+    if not paths_to_sync:
+        log("Nothing to sync - skipping", level=DEBUG)
+        return {}
+
+    if not os.path.isdir(SYNC_FLAGS_DIR):
+        mkdir(SYNC_FLAGS_DIR, SSH_USER, KEYSTONE_USER, 0o775)
+
+    restart_trigger = None
+    for action, services in peer_service_actions.iteritems():
+        services = set(services)
+        if services:
+            restart_trigger = str(uuid.uuid4())
+            create_peer_service_actions(action, services)
+
+    create_peer_actions(peer_actions)
+
+    paths_to_sync = list(set(paths_to_sync))
+    stage_paths_for_sync(paths_to_sync)
+
+    hash1 = hashlib.sha256()
+    for path in paths_to_sync:
+        update_hash_from_path(hash1, path)
+
+    cluster_rel_settings = {'ssl-cert-available-updates': SSL_SYNC_ARCHIVE,
+                            'sync-hash': hash1.hexdigest()}
+
+    synced_units = unison_sync([SSL_SYNC_ARCHIVE, SYNC_FLAGS_DIR])
+    if synced_units:
+        # Format here needs to match that used when peers request sync
+        synced_units = [u.replace('/', '-') for u in synced_units]
+        ssl_synced_units = \
+            json.dumps(synced_units)
+        # NOTE(hopem): we pull this onto the leader settings to avoid
+        # unnecessary cluster relation noise. This is possible because the
+        # setting is only needed by the cert master.
+        if 'ssl-synced-units' not in leader_get():
+            rid = relation_ids('cluster')[0]
+            relation_set_and_migrate_to_leader(relation_id=rid,
+                                               **{'ssl-synced-units':
+                                                  ssl_synced_units})
+        else:
+            leader_set({'ssl-synced-units': ssl_synced_units})
+
+    if restart_trigger:
+        log("Sending restart-services-trigger=%s to all peers" %
+            (restart_trigger), level=DEBUG)
+        cluster_rel_settings['restart-services-trigger'] = restart_trigger
+
+    log("Sync complete", level=DEBUG)
+    return cluster_rel_settings
+
+
+def clear_ssl_synced_units():
+    """Clear the 'synced' units record on the cluster relation.
+
+    If new unit sync reauests are set this will ensure that a sync occurs when
+    the sync master receives the requests.
+    """
+    log("Clearing ssl sync units", level=DEBUG)
+    for rid in relation_ids('cluster'):
+        if 'ssl-synced-units' not in leader_get():
+            relation_set_and_migrate_to_leader(relation_id=rid,
+                                               **{'ssl-synced-units': None})
+        else:
+            leader_set({'ssl-synced-units': None})
+
+
+def update_hash_from_path(hash, path, recurse_depth=10):
+    """Recurse through path and update the provided hash for every file found.
+    """
+    if not recurse_depth:
+        log("Max recursion depth (%s) reached for update_hash_from_path() at "
+            "path='%s' - not going any deeper" % (recurse_depth, path),
+            level=WARNING)
+        return
+
+    for p in glob.glob("%s/*" % path):
+        if os.path.isdir(p):
+            update_hash_from_path(hash, p, recurse_depth=recurse_depth - 1)
+        else:
+            with open(p, 'r') as fd:
+                hash.update(fd.read())
+
+
+def synchronize_ca_if_changed(force=False, fatal=False):
+    """Decorator to perform ssl cert sync if decorated function modifies them
+    in any way.
+
+    If force is True a sync is done regardless.
+    """
+    def inner_synchronize_ca_if_changed1(f):
+        def inner_synchronize_ca_if_changed2(*args, **kwargs):
+            # Only sync master can do sync. Ensure (a) we are not nested and
+            # (b) a master is elected and we are it.
+            acquired = SSL_SYNC_SEMAPHORE.acquire(blocking=0)
+            try:
+                if not acquired:
+                    log("Nested sync - ignoring", level=DEBUG)
+                    return f(*args, **kwargs)
+
+                if not ensure_ssl_cert_master():
+                    log("Not ssl-cert-master - ignoring sync", level=DEBUG)
+                    return f(*args, **kwargs)
+
+                peer_settings = {}
+                if not force:
+                    hash1 = hashlib.sha256()
+                    for path in SSL_DIRS:
+                        update_hash_from_path(hash1, path)
+
+                    ret = f(*args, **kwargs)
+
+                    hash2 = hashlib.sha256()
+                    for path in SSL_DIRS:
+                        update_hash_from_path(hash2, path)
+
+                    if hash1.hexdigest() != hash2.hexdigest():
+                        log("SSL certs have changed - syncing peers",
+                            level=DEBUG)
+                        peer_settings = synchronize_ca(fatal=fatal)
+                    else:
+                        log("SSL certs have not changed - skipping sync",
+                            level=DEBUG)
+                else:
+                    ret = f(*args, **kwargs)
+                    log("Doing forced ssl cert sync", level=DEBUG)
+                    peer_settings = synchronize_ca(fatal=fatal)
+
+                # If we are the sync master but not leader, ensure we have
+                # relinquished master status.
+                cluster_rids = relation_ids('cluster')
+                if cluster_rids:
+                    master = relation_get('ssl-cert-master',
+                                          rid=cluster_rids[0],
+                                          unit=local_unit())
+                    if not is_leader() and master == local_unit():
+                        log("Re-electing ssl cert master.", level=INFO)
+                        peer_settings['ssl-cert-master'] = 'unknown'
+
+                    if peer_settings:
+                        relation_set(relation_id=cluster_rids[0],
+                                     relation_settings=peer_settings)
+
+                return ret
+            finally:
+                SSL_SYNC_SEMAPHORE.release()
+
+        return inner_synchronize_ca_if_changed2
+
+    return inner_synchronize_ca_if_changed1
+
+
+@synchronize_ca_if_changed(force=True, fatal=True)
+def force_ssl_sync():
+    """Force SSL sync to all peers.
+
+    This is useful if we need to relinquish ssl-cert-master status while
+    making sure that the new master has up-to-date certs.
+    """
+    return
+
+
+def ensure_ssl_dir():
+    """Ensure juju ssl dir exists and is unsion read/writable."""
+    # NOTE(thedac) Snap service restarts will override permissions
+    # in SNAP_LIB_DIR including SSL_DIR
+    perms = 0o775
+    if not os.path.isdir(SSL_DIR):
+        mkdir(SSL_DIR, SSH_USER, KEYSTONE_USER, perms)
+    else:
+        ensure_permissions(SSL_DIR, user=SSH_USER, group=KEYSTONE_USER,
+                           perms=perms)
+
+
+def get_ca(user=KEYSTONE_USER, group=KEYSTONE_USER):
+    """Initialize a new CA object if one hasn't already been loaded.
+
+    This will create a new CA or load an existing one.
+    """
+    if not ssl.CA_SINGLETON:
+        ensure_ssl_dir()
+        d_name = '_'.join(SSL_CA_NAME.lower().split(' '))
+        ca = ssl.JujuCA(name=SSL_CA_NAME, user=user, group=group,
+                        ca_dir=os.path.join(SSL_DIR,
+                                            '%s_intermediate_ca' % d_name),
+                        root_ca_dir=os.path.join(SSL_DIR,
+                                                 '%s_root_ca' % d_name))
+
+        # Ensure a master is elected. This should cover the following cases:
+        # * single unit == 'oldest' unit is elected as master
+        # * multi unit + not clustered == 'oldest' unit is elcted as master
+        # * multi unit + clustered == cluster leader is elected as master
+        ensure_ssl_cert_master()
+
+        ssl.CA_SINGLETON.append(ca)
+
+    return ssl.CA_SINGLETON[0]
+
+
 def relation_list(rid):
     cmd = [
         'relation-list',
@@ -1446,6 +2094,8 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
             relation_data["api_version"] = get_api_version()
             relation_data["admin_domain_id"] = leader_get(
                 attribute='admin_domain_id')
+            # Get and pass CA bundle settings
+            relation_data.update(get_ssl_ca_settings())
 
             # Allow the remote service to request creation of any additional
             # roles. Currently used by Horizon
@@ -1502,6 +2152,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
             endpoints[ep][x] = v
 
         services = []
+        https_cn = None
         for ep in endpoints:
             # weed out any unrelated relation stuff Juju might have added
             # by ensuring each possible endpiont has appropriate fields
@@ -1521,7 +2172,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
                 https_cns.append(urlparse.urlparse(ep['public_url']).hostname)
                 https_cns.append(urlparse.urlparse(ep['admin_url']).hostname)
 
-        service_username = '_'.join(sorted(services))
+        service_username = '_'.join(services)
 
         # If an admin username prefix is provided, ensure all services use it.
         prefix = config('service-admin-prefix')
@@ -1539,10 +2190,8 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
     service_password = create_service_credentials(service_username,
                                                   new_roles=roles)
     service_domain = None
-    service_domain_id = None
     if get_api_version() > 2:
         service_domain = SERVICE_DOMAIN
-        service_domain_id = manager.resolve_domain_id(SERVICE_DOMAIN)
     service_tenant = config('service-tenant')
     service_tenant_id = manager.resolve_tenant_id(service_tenant,
                                                   domain=service_domain)
@@ -1558,7 +2207,6 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
         "service_username": service_username,
         "service_password": service_password,
         "service_domain": service_domain,
-        "service_domain_id": service_domain_id,
         "service_tenant": service_tenant,
         "service_tenant_id": service_tenant_id,
         "https_keystone": '__null__',
@@ -1571,6 +2219,25 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
         "admin_domain_id": leader_get(attribute='admin_domain_id'),
     }
 
+    # generate or get a new cert/key for service if set to manage certs.
+    https_service_endpoints = config('https-service-endpoints')
+    if https_service_endpoints and bool_from_string(https_service_endpoints):
+        ca = get_ca(user=SSH_USER)
+        # NOTE(jamespage) may have multiple cns to deal with to iterate
+        https_cns = set(https_cns)
+        for https_cn in https_cns:
+            cert, key = ca.get_cert_and_key(common_name=https_cn)
+            relation_data['ssl_cert_{}'.format(https_cn)] = b64encode(cert)
+            relation_data['ssl_key_{}'.format(https_cn)] = b64encode(key)
+
+        # NOTE(jamespage) for backwards compatibility
+        cert, key = ca.get_cert_and_key(common_name=internal_cn)
+        relation_data['ssl_cert'] = b64encode(cert)
+        relation_data['ssl_key'] = b64encode(key)
+
+        # Get and pass CA bundle settings
+        relation_data.update(get_ssl_ca_settings())
+
     peer_store_and_set(relation_id=relation_id, **relation_data)
     # NOTE(dosaboy): '__null__' settings are for peer relation only so that
     # settings can flushed so we filter them out for non-peer relation.
@@ -1636,10 +2303,30 @@ def add_credentials_to_keystone(relation_id=None, remote_unit=None):
     }
     if domain:
         relation_data['domain'] = domain
+    # Get and pass CA bundle settings
+    relation_data.update(get_ssl_ca_settings())
 
     peer_store_and_set(relation_id=relation_id, **relation_data)
 
 
+def get_ssl_ca_settings():
+    """ Get the Certificate Authority settings required to use the CA.
+
+    :returns: Dictionary with https_keystone and ca_cert set
+    """
+    ca_data = {}
+    https_service_endpoints = config('https-service-endpoints')
+    if (https_service_endpoints and
+            bool_from_string(https_service_endpoints)):
+        # Pass CA cert as client will need it to
+        # verify https connections
+        ca = get_ca(user=SSH_USER)
+        ca_bundle = ca.get_ca_bundle()
+        ca_data['https_keystone'] = 'True'
+        ca_data['ca_cert'] = b64encode(ca_bundle)
+    return ca_data
+
+
 def get_protocol():
     """Determine the http protocol
 
@@ -1783,7 +2470,7 @@ def is_db_ready(use_current_context=False, db_rel=None):
     returns True otherwise False.
     """
     key = 'allowed_units'
-    db_rels = ['shared-db']
+    db_rels = ['shared-db', 'pgsql-db']
     if db_rel:
         db_rels = [db_rel]
 
@@ -1819,10 +2506,23 @@ def is_db_ready(use_current_context=False, db_rel=None):
     return not rel_has_units
 
 
+def determine_usr_bin():
+    """Return the /usr/bin path for Apache2 vhost config.
+    The /usr/bin path will be located in the virtualenv if the charm
+    is configured to deploy keystone from source.
+    """
+    if git_install_requested():
+        projects_yaml = config('openstack-origin-git')
+        projects_yaml = git_default_repos(projects_yaml)
+        return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
+    else:
+        return '/usr/bin'
+
+
 def determine_python_path():
     """Return the python-path
 
-    Determine if snap installed and return the appropriate python path.
+    Determine if git or snap installed and return the appropriate python path.
     Returns None unless the charm if neither condition is true.
 
     :returns: string python path or None
@@ -1830,10 +2530,108 @@ def determine_python_path():
     _python_path = 'lib/python2.7/site-packages'
     if snap_install_requested():
         return os.path.join(SNAP_BASE_DIR, _python_path)
+    elif git_install_requested():
+        projects_yaml = config('openstack-origin-git')
+        projects_yaml = git_default_repos(projects_yaml)
+        return os.path.join(git_pip_venv_dir(projects_yaml), _python_path)
     else:
         return None
 
 
+def git_install(projects_yaml):
+    """Perform setup, and install git repos specified in yaml parameter."""
+    if git_install_requested():
+        git_pre_install()
+        projects_yaml = git_default_repos(projects_yaml)
+        git_clone_and_install(projects_yaml, core_project='keystone')
+        git_post_install(projects_yaml)
+
+
+def git_pre_install():
+    """Perform keystone pre-install setup."""
+    dirs = [
+        '/var/lib/keystone',
+        '/var/lib/keystone/cache',
+        '/var/log/keystone',
+    ]
+
+    logs = [
+        '/var/log/keystone/keystone.log',
+    ]
+
+    adduser('keystone', shell='/bin/bash', system_user=True,
+            home_dir='/var/lib/keystone')
+    add_group('keystone', system_group=True)
+    add_user_to_group('keystone', 'keystone')
+
+    for d in dirs:
+        mkdir(d, owner=KEYSTONE_USER, group=KEYSTONE_USER, perms=0o755,
+              force=False)
+
+    for l in logs:
+        write_file(l, '', owner=KEYSTONE_USER, group=KEYSTONE_USER,
+                   perms=0o600)
+
+
+def git_post_install(projects_yaml):
+    """Perform keystone post-install setup."""
+    http_proxy = git_yaml_value(projects_yaml, 'http_proxy')
+    if http_proxy:
+        pip_install('mysql-python', proxy=http_proxy,
+                    venv=git_pip_venv_dir(projects_yaml))
+    else:
+        pip_install('mysql-python',
+                    venv=git_pip_venv_dir(projects_yaml))
+
+    src_etc = os.path.join(git_src_dir(projects_yaml, 'keystone'), 'etc')
+    configs = {
+        'src': src_etc,
+        'dest': '/etc/keystone',
+    }
+
+    if os.path.exists(configs['dest']):
+        shutil.rmtree(configs['dest'])
+    shutil.copytree(configs['src'], configs['dest'])
+
+    # NOTE(coreycb): Need to find better solution than bin symlinks.
+    symlinks = [
+        {'src': os.path.join(git_pip_venv_dir(projects_yaml),
+                             'bin/keystone-manage'),
+         'link': '/usr/local/bin/keystone-manage'},
+    ]
+
+    for s in symlinks:
+        if os.path.lexists(s['link']):
+            os.remove(s['link'])
+        os.symlink(s['src'], s['link'])
+
+    render('git/logging.conf', '/etc/keystone/logging.conf', {}, perms=0o644)
+
+    bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
+    # The charm runs the keystone API under apache2 for openstack liberty
+    # onward.  Prior to liberty upstart is used.
+    if CompareOpenStackReleases(os_release('keystone')) < 'liberty':
+        keystone_context = {
+            'service_description': 'Keystone API server',
+            'service_name': 'Keystone',
+            'user_name': 'keystone',
+            'start_dir': '/var/lib/keystone',
+            'process_name': 'keystone',
+            'executable_name': os.path.join(bin_dir, 'keystone-all'),
+            'config_files': ['/etc/keystone/keystone.conf'],
+        }
+
+        keystone_context['log_file'] = '/var/log/keystone/keystone.log'
+        templates_dir = 'hooks/charmhelpers/contrib/openstack/templates'
+        templates_dir = os.path.join(charm_dir(), templates_dir)
+        render('git.upstart', '/etc/init/keystone.conf', keystone_context,
+               perms=0o644, templates_dir=templates_dir)
+
+    # Don't restart if the unit is supposed to be paused.
+    if not is_unit_paused_set():
+        service_restart(keystone_service())
+
+
 def get_optional_interfaces():
     """Return the optional interfaces that should be checked if the relavent
     relations have appeared.
@@ -1976,11 +2774,3 @@ def post_snap_install():
     if os.path.exists(PASTE_SRC):
         log("Perfoming post snap install tasks", INFO)
         shutil.copy(PASTE_SRC, PASTE_DST)
-
-
-def restart_keystone():
-    if not is_unit_paused_set():
-        if snap_install_requested():
-            service_restart('snap.keystone.*')
-        else:
-            service_restart(keystone_service())
diff --git a/hooks/pgsql-db-relation-changed b/hooks/pgsql-db-relation-changed
new file mode 100755
index 0000000000000000000000000000000000000000..505db8a7b867de2c46c8ef6e79bc57c1a0294ca1
--- /dev/null
+++ b/hooks/pgsql-db-relation-changed
@@ -0,0 +1,1102 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import json
+import os
+import sys
+
+from subprocess import check_call
+
+from charmhelpers.contrib import unison
+from charmhelpers.core import unitdata
+
+from charmhelpers.core.hookenv import (
+    Hooks,
+    UnregisteredHookError,
+    config,
+    is_relation_made,
+    log,
+    local_unit,
+    DEBUG,
+    INFO,
+    WARNING,
+    ERROR,
+    relation_get,
+    relation_ids,
+    relation_set,
+    related_units,
+    status_set,
+    open_port,
+    is_leader,
+)
+
+from charmhelpers.core.host import (
+    mkdir,
+    service_pause,
+    service_stop,
+    service_start,
+    service_restart,
+)
+
+from charmhelpers.core.strutils import (
+    bool_from_string,
+)
+
+from charmhelpers.fetch import (
+    apt_install, apt_update,
+    filter_installed_packages
+)
+
+from charmhelpers.contrib.openstack.utils import (
+    config_value_changed,
+    configure_installation_source,
+    git_install_requested,
+    openstack_upgrade_available,
+    sync_db_with_multi_ipv6_addresses,
+    os_release,
+    pausable_restart_on_change as restart_on_change,
+    is_unit_paused_set,
+    CompareOpenStackReleases,
+    snap_install_requested,
+    install_os_snaps,
+    get_snaps_install_info_from_origin,
+    enable_memcache,
+)
+
+from keystone_utils import (
+    add_service_to_keystone,
+    add_credentials_to_keystone,
+    determine_packages,
+    disable_unused_apache_sites,
+    do_openstack_upgrade_reexec,
+    ensure_initial_admin,
+    get_admin_passwd,
+    git_install,
+    migrate_database,
+    save_script_rc,
+    post_snap_install,
+    synchronize_ca_if_changed,
+    register_configs,
+    restart_map,
+    services,
+    CLUSTER_RES,
+    KEYSTONE_CONF,
+    KEYSTONE_USER,
+    POLICY_JSON,
+    TOKEN_FLUSH_CRON_FILE,
+    SSH_USER,
+    setup_ipv6,
+    send_notifications,
+    check_peer_actions,
+    get_ssl_sync_request_units,
+    is_ssl_cert_master,
+    is_db_ready,
+    clear_ssl_synced_units,
+    is_db_initialised,
+    update_certs_if_available,
+    ensure_ssl_dir,
+    ensure_pki_dir_permissions,
+    ensure_permissions,
+    force_ssl_sync,
+    filter_null,
+    ensure_ssl_dirs,
+    ensure_pki_cert_paths,
+    is_service_present,
+    delete_service_entry,
+    assess_status,
+    run_in_apache,
+    restart_function_map,
+    WSGI_KEYSTONE_API_CONF,
+    SHIBSP_FILES,
+    OIDC_MAPPING_FILE,
+    SAML2_MAPPING_FILE,
+    install_apache_error_handler,
+    restart_pid_check,
+    get_api_version,
+    ADMIN_DOMAIN,
+    ADMIN_PROJECT,
+    create_or_show_domain,
+    keystone_service,
+)
+
+from charmhelpers.contrib.hahelpers.cluster import (
+    is_elected_leader,
+    get_hacluster_config,
+    peer_units,
+    https,
+    is_clustered,
+)
+
+from charmhelpers.contrib.openstack.ha.utils import (
+    update_dns_ha_resource_params,
+    expect_ha,
+)
+
+from charmhelpers.payload.execd import execd_preinstall
+from charmhelpers.contrib.peerstorage import (
+    peer_retrieve_by_prefix,
+    peer_echo,
+    relation_get as relation_get_and_migrate,
+)
+from charmhelpers.contrib.openstack.ip import (
+    ADMIN,
+    resolve_address,
+)
+from charmhelpers.contrib.network.ip import (
+    get_iface_for_address,
+    get_netmask_for_address,
+    is_ipv6,
+    get_relation_ip,
+)
+from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
+
+from charmhelpers.contrib.charmsupport import nrpe
+
+from charmhelpers.contrib.hardening.harden import harden
+
+hooks = Hooks()
+CONFIGS = register_configs()
+
+
+@hooks.hook('install.real')
+@harden()
+def install():
+    status_set('maintenance', 'Executing pre-install')
+    execd_preinstall()
+    configure_installation_source(config('openstack-origin'))
+    status_set('maintenance', 'Installing apt packages')
+    apt_update()
+    apt_install(determine_packages(), fatal=True)
+
+    if snap_install_requested():
+        status_set('maintenance', 'Installing keystone snap')
+        # NOTE(thedac) Setting devmode until LP#1719636 is fixed
+        install_os_snaps(
+            get_snaps_install_info_from_origin(
+                ['keystone'],
+                config('openstack-origin'),
+                mode='devmode'))
+        post_snap_install()
+        service_stop('snap.keystone.*')
+    else:
+        # unconfigured keystone service will prevent start of haproxy in some
+        # circumstances. make sure haproxy runs. LP #1648396
+        service_stop('keystone')
+        service_start('haproxy')
+        if run_in_apache():
+            disable_unused_apache_sites()
+            if not git_install_requested():
+                service_pause('keystone')
+        install_apache_error_handler(config('no-user-mapping-url'))
+
+    status_set('maintenance', 'Git install')
+    git_install(config('openstack-origin-git'))
+
+    unison.ensure_user(user=SSH_USER, group=SSH_USER)
+    unison.ensure_user(user=SSH_USER, group=KEYSTONE_USER)
+
+
+@hooks.hook('config-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed(fatal=True)
+@harden()
+def config_changed():
+    if config('prefer-ipv6'):
+        status_set('maintenance', 'configuring ipv6')
+        setup_ipv6()
+        sync_db_with_multi_ipv6_addresses(config('database'),
+                                          config('database-user'))
+
+    unison.ensure_user(user=SSH_USER, group=SSH_USER)
+    unison.ensure_user(user=SSH_USER, group=KEYSTONE_USER)
+    homedir = unison.get_homedir(SSH_USER)
+    if not os.path.isdir(homedir):
+        mkdir(homedir, SSH_USER, SSH_USER, 0o775)
+
+    if git_install_requested():
+        if config_value_changed('openstack-origin-git'):
+            status_set('maintenance', 'Running Git install')
+            git_install(config('openstack-origin-git'))
+    elif not config('action-managed-upgrade'):
+        if openstack_upgrade_available('keystone'):
+            status_set('maintenance', 'Running openstack upgrade')
+            do_openstack_upgrade_reexec(configs=CONFIGS)
+
+    for r_id in relation_ids('cluster'):
+        cluster_joined(rid=r_id, ssl_sync_request=False)
+
+    config_changed_postupgrade()
+
+
+@hooks.hook('config-changed-postupgrade')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed(fatal=True)
+@harden()
+def config_changed_postupgrade():
+    # Ensure ssl dir exists and is unison-accessible
+    ensure_ssl_dir()
+
+    if not snap_install_requested():
+        check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
+
+    ensure_ssl_dirs()
+
+    save_script_rc()
+    release = os_release('keystone')
+    if run_in_apache(release=release):
+        # Need to ensure mod_wsgi is installed and apache2 is reloaded
+        # immediatly as charm querys its local keystone before restart
+        # decorator can fire
+        apt_install(filter_installed_packages(determine_packages()))
+        # when deployed from source, init scripts aren't installed
+        if not git_install_requested():
+            service_pause('keystone')
+
+        disable_unused_apache_sites()
+        if WSGI_KEYSTONE_API_CONF in CONFIGS.templates:
+            CONFIGS.write(WSGI_KEYSTONE_API_CONF)
+        if not is_unit_paused_set():
+            restart_pid_check('apache2')
+        if config('enable-oidc'):
+            CONFIGS.write(OIDC_MAPPING_FILE)
+            configure_oidc()
+        if config('enable-saml2'):
+            CONFIGS.write(SAML2_MAPPING_FILE)
+            for shibsp_file in SHIBSP_FILES:
+                CONFIGS.write(shibsp_file)
+            configure_saml2()
+            service_stop('shibd')
+            if not is_unit_paused_set():
+                service_start('shibd')
+        install_apache_error_handler(config('no-user-mapping-url'))
+
+    if enable_memcache(release=release):
+        # If charm or OpenStack have been upgraded then the list of required
+        # packages may have changed so ensure they are installed.
+        apt_install(filter_installed_packages(determine_packages()))
+
+    configure_https()
+    open_port(config('service-port'))
+
+    update_nrpe_config()
+
+    CONFIGS.write_all()
+
+    if snap_install_requested() and not is_unit_paused_set():
+        service_restart('snap.keystone.*')
+
+    initialise_pki()
+
+    update_all_identity_relation_units()
+    update_all_domain_backends()
+
+    # Ensure sync request is sent out (needed for any/all ssl change)
+    send_ssl_sync_request()
+
+    for r_id in relation_ids('ha'):
+        ha_joined(relation_id=r_id)
+
+
+@synchronize_ca_if_changed(fatal=True)
+def initialise_pki():
+    """Create certs and keys required for token signing.
+
+    Used for PKI and signing token revocation list.
+
+    NOTE: keystone.conf [signing] section must be up-to-date prior to
+          executing this.
+    """
+    if CompareOpenStackReleases(os_release('keystone-common')) >= 'pike':
+        # pike dropped support for PKI token; skip function
+        return
+    ensure_pki_cert_paths()
+    if not peer_units() or is_ssl_cert_master():
+        log("Ensuring PKI token certs created", level=DEBUG)
+        if snap_install_requested():
+            cmd = ['/snap/bin/keystone-manage', 'pki_setup',
+                   '--keystone-user', KEYSTONE_USER,
+                   '--keystone-group', KEYSTONE_USER]
+            _log_dir = '/var/snap/keystone/common/log'
+        else:
+            cmd = ['keystone-manage', 'pki_setup',
+                   '--keystone-user', KEYSTONE_USER,
+                   '--keystone-group', KEYSTONE_USER]
+            _log_dir = '/var/log/keystone'
+        check_call(cmd)
+
+        # Ensure logfile has keystone perms since we may have just created it
+        # with root.
+        ensure_permissions(_log_dir, user=KEYSTONE_USER,
+                           group=KEYSTONE_USER, perms=0o744)
+        ensure_permissions('{}/keystone.log'.format(_log_dir),
+                           user=KEYSTONE_USER, group=KEYSTONE_USER,
+                           perms=0o644)
+
+    ensure_pki_dir_permissions()
+
+
+@hooks.hook('shared-db-relation-joined')
+def db_joined():
+    if is_relation_made('pgsql-db'):
+        # error, postgresql is used
+        e = ('Attempting to associate a mysql database when there is already '
+             'associated a postgresql one')
+        log(e, level=ERROR)
+        raise Exception(e)
+
+    if config('prefer-ipv6'):
+        sync_db_with_multi_ipv6_addresses(config('database'),
+                                          config('database-user'))
+    else:
+        # Avoid churn check for access-network early
+        access_network = None
+        for unit in related_units():
+            access_network = relation_get(unit=unit,
+                                          attribute='access-network')
+            if access_network:
+                break
+        host = get_relation_ip('shared-db', cidr_network=access_network)
+
+        relation_set(database=config('database'),
+                     username=config('database-user'),
+                     hostname=host)
+
+
+@hooks.hook('pgsql-db-relation-joined')
+def pgsql_db_joined():
+    if is_relation_made('shared-db'):
+        # raise error
+        e = ('Attempting to associate a postgresql database when there'
+             ' is already associated a mysql one')
+        log(e, level=ERROR)
+        raise Exception(e)
+
+    relation_set(database=config('database'))
+
+
+def update_all_identity_relation_units(check_db_ready=True):
+    if is_unit_paused_set():
+        return
+    CONFIGS.write_all()
+    configure_https()
+    if check_db_ready and not is_db_ready():
+        log('Allowed_units list provided and this unit not present',
+            level=INFO)
+        return
+
+    if not is_db_initialised():
+        log("Database not yet initialised - deferring identity-relation "
+            "updates", level=INFO)
+        return
+
+    if is_elected_leader(CLUSTER_RES):
+        ensure_initial_admin(config)
+
+    log('Firing identity_changed hook for all related services.')
+    for rid in relation_ids('identity-service'):
+        for unit in related_units(rid):
+            identity_changed(relation_id=rid, remote_unit=unit)
+    log('Firing admin_relation_changed hook for all related services.')
+    for rid in relation_ids('identity-admin'):
+        admin_relation_changed(rid)
+    log('Firing identity_credentials_changed hook for all related services.')
+    for rid in relation_ids('identity-credentials'):
+        for unit in related_units(rid):
+            identity_credentials_changed(relation_id=rid, remote_unit=unit)
+
+
+@synchronize_ca_if_changed(force=True)
+def update_all_identity_relation_units_force_sync():
+    update_all_identity_relation_units()
+
+
+def update_all_domain_backends():
+    """Re-trigger hooks for all domain-backend relations/units"""
+    for rid in relation_ids('domain-backend'):
+        for unit in related_units(rid):
+            domain_backend_changed(relation_id=rid, unit=unit)
+
+
+def leader_init_db_if_ready(use_current_context=False):
+    """ Initialise the keystone db if it is ready and mark it as initialised.
+
+    NOTE: this must be idempotent.
+    """
+    if not is_elected_leader(CLUSTER_RES):
+        log("Not leader - skipping db init", level=DEBUG)
+        return
+
+    if is_db_initialised():
+        log("Database already initialised - skipping db init", level=DEBUG)
+        update_all_identity_relation_units(check_db_ready=False)
+        return
+
+    # Bugs 1353135 & 1187508. Dbs can appear to be ready before the
+    # units acl entry has been added. So, if the db supports passing
+    # a list of permitted units then check if we're in the list.
+    if not is_db_ready(use_current_context=use_current_context):
+        log('Allowed_units list provided and this unit not present',
+            level=INFO)
+        return
+
+    migrate_database()
+    # Ensure any existing service entries are updated in the
+    # new database backend. Also avoid duplicate db ready check.
+    update_all_identity_relation_units(check_db_ready=False)
+    update_all_domain_backends()
+
+
+@hooks.hook('shared-db-relation-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
+def db_changed():
+    if 'shared-db' not in CONFIGS.complete_contexts():
+        log('shared-db relation incomplete. Peer not ready?')
+    else:
+        CONFIGS.write(KEYSTONE_CONF)
+        leader_init_db_if_ready(use_current_context=True)
+        if CompareOpenStackReleases(
+                os_release('keystone-common')) >= 'liberty':
+            CONFIGS.write(POLICY_JSON)
+
+
+@hooks.hook('pgsql-db-relation-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
+def pgsql_db_changed():
+    if 'pgsql-db' not in CONFIGS.complete_contexts():
+        log('pgsql-db relation incomplete. Peer not ready?')
+    else:
+        CONFIGS.write(KEYSTONE_CONF)
+        leader_init_db_if_ready(use_current_context=True)
+        if CompareOpenStackReleases(
+                os_release('keystone-common')) >= 'liberty':
+            CONFIGS.write(POLICY_JSON)
+
+
+@hooks.hook('identity-service-relation-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
+def identity_changed(relation_id=None, remote_unit=None):
+    CONFIGS.write_all()
+
+    notifications = {}
+    if is_elected_leader(CLUSTER_RES):
+        if not is_db_ready():
+            log("identity-service-relation-changed hook fired before db "
+                "ready - deferring until db ready", level=WARNING)
+            return
+
+        if not is_db_initialised():
+            log("Database not yet initialised - deferring identity-relation "
+                "updates", level=INFO)
+            return
+
+        if expect_ha() and not is_clustered():
+            log("Expected to be HA but no hacluster relation yet", level=INFO)
+            return
+
+        add_service_to_keystone(relation_id, remote_unit)
+        if is_service_present('neutron', 'network'):
+            delete_service_entry('quantum', 'network')
+        settings = relation_get(rid=relation_id, unit=remote_unit)
+        service = settings.get('service', None)
+        if service:
+            # If service is known and endpoint has changed, notify service if
+            # it is related with notifications interface.
+            csum = hashlib.sha256()
+            # We base the decision to notify on whether these parameters have
+            # changed (if csum is unchanged from previous notify, relation will
+            # not fire).
+            csum.update(settings.get('public_url', None))
+            csum.update(settings.get('admin_url', None))
+            csum.update(settings.get('internal_url', None))
+            notifications['%s-endpoint-changed' % (service)] = csum.hexdigest()
+    else:
+        # Each unit needs to set the db information otherwise if the unit
+        # with the info dies the settings die with it Bug# 1355848
+        for rel_id in relation_ids('identity-service'):
+            peerdb_settings = peer_retrieve_by_prefix(rel_id)
+            # Ensure the null'd settings are unset in the relation.
+            peerdb_settings = filter_null(peerdb_settings)
+            if 'service_password' in peerdb_settings:
+                relation_set(relation_id=rel_id, **peerdb_settings)
+
+        log('Deferring identity_changed() to service leader.')
+
+    if notifications:
+        send_notifications(notifications)
+
+
+@hooks.hook('identity-credentials-relation-joined',
+            'identity-credentials-relation-changed')
+def identity_credentials_changed(relation_id=None, remote_unit=None):
+    """Update the identity credentials relation on change
+
+    Calls add_credentials_to_keystone
+
+    :param relation_id: Relation id of the relation
+    :param remote_unit: Related unit on the relation
+    """
+    if is_elected_leader(CLUSTER_RES):
+        if expect_ha() and not is_clustered():
+            log("Expected to be HA but no hacluster relation yet", level=INFO)
+            return
+        if not is_db_ready():
+            log("identity-credentials-relation-changed hook fired before db "
+                "ready - deferring until db ready", level=WARNING)
+            return
+
+        if not is_db_initialised():
+            log("Database not yet initialised - deferring "
+                "identity-credentials-relation updates", level=INFO)
+            return
+
+        # Create the tenant user
+        add_credentials_to_keystone(relation_id, remote_unit)
+    else:
+        log('Deferring identity_credentials_changed() to service leader.')
+
+
+def send_ssl_sync_request():
+    """Set sync request on cluster relation.
+
+    Value set equals number of ssl configs currently enabled so that if they
+    change, we ensure that certs are synced. This setting is consumed by
+    cluster-relation-changed ssl master. We also clear the 'synced' set to
+    guarantee that a sync will occur.
+
+    Note the we do nothing if the setting is already applied.
+    """
+    unit = local_unit().replace('/', '-')
+    # Start with core config (e.g. used for signing revoked token list)
+    ssl_config = 0b1
+
+    use_https = config('use-https')
+    if use_https and bool_from_string(use_https):
+        ssl_config ^= 0b10
+
+    https_service_endpoints = config('https-service-endpoints')
+    if (https_service_endpoints and
+            bool_from_string(https_service_endpoints)):
+        ssl_config ^= 0b100
+
+    enable_pki = config('enable-pki')
+    if enable_pki and bool_from_string(enable_pki):
+        ssl_config ^= 0b1000
+
+    key = 'ssl-sync-required-%s' % (unit)
+    settings = {key: ssl_config}
+
+    prev = 0b0
+    rid = None
+    for rid in relation_ids('cluster'):
+        for unit in related_units(rid):
+            _prev = relation_get(rid=rid, unit=unit, attribute=key) or 0b0
+            if _prev and _prev > prev:
+                prev = bin(_prev)
+
+    if rid and prev ^ ssl_config:
+        if is_leader():
+            clear_ssl_synced_units()
+
+        log("Setting %s=%s" % (key, bin(ssl_config)), level=DEBUG)
+        relation_set(relation_id=rid, relation_settings=settings)
+
+
+@hooks.hook('cluster-relation-joined')
+def cluster_joined(rid=None, ssl_sync_request=True):
+    unison.ssh_authorized_peers(user=SSH_USER,
+                                group=SSH_USER,
+                                peer_interface='cluster',
+                                ensure_local_user=True)
+
+    settings = {}
+
+    for addr_type in ADDRESS_TYPES:
+        address = get_relation_ip(
+            addr_type,
+            cidr_network=config('os-{}-network'.format(addr_type)))
+        if address:
+            settings['{}-address'.format(addr_type)] = address
+
+    settings['private-address'] = get_relation_ip('cluster')
+
+    relation_set(relation_id=rid, relation_settings=settings)
+
+    if ssl_sync_request:
+        send_ssl_sync_request()
+
+
+@hooks.hook('cluster-relation-changed')
+@restart_on_change(restart_map(), stopstart=True)
+@update_certs_if_available
+def cluster_changed():
+    unison.ssh_authorized_peers(user=SSH_USER,
+                                group=SSH_USER,
+                                peer_interface='cluster',
+                                ensure_local_user=True)
+    # NOTE(jamespage) re-echo passwords for peer storage
+    echo_whitelist = ['_passwd', 'identity-service:',
+                      'db-initialised', 'ssl-cert-available-updates']
+    # Don't echo if leader since a re-election may be in progress.
+    if not is_leader():
+        echo_whitelist.append('ssl-cert-master')
+
+    log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG)
+    peer_echo(includes=echo_whitelist, force=True)
+
+    check_peer_actions()
+
+    initialise_pki()
+
+    if is_leader():
+        # Figure out if we need to mandate a sync
+        units = get_ssl_sync_request_units()
+        synced_units = relation_get_and_migrate(attribute='ssl-synced-units',
+                                                unit=local_unit())
+        diff = None
+        if synced_units:
+            synced_units = json.loads(synced_units)
+            diff = set(units).symmetric_difference(set(synced_units))
+    else:
+        units = None
+
+    if units and (not synced_units or diff):
+        log("New peers joined and need syncing - %s" %
+            (', '.join(units)), level=DEBUG)
+        update_all_identity_relation_units_force_sync()
+    else:
+        update_all_identity_relation_units()
+
+    if not is_leader() and is_ssl_cert_master():
+        # Force and sync and trigger a sync master re-election since we are not
+        # leader anymore.
+        force_ssl_sync()
+    else:
+        CONFIGS.write_all()
+
+
+@hooks.hook('leader-elected')
+@restart_on_change(restart_map(), stopstart=True)
+def leader_elected():
+    log('Unit has been elected leader.', level=DEBUG)
+    # When the local unit has been elected the leader, update the cron jobs
+    # to ensure that the cron jobs are active on this unit.
+    CONFIGS.write(TOKEN_FLUSH_CRON_FILE)
+
+    update_all_identity_relation_units()
+
+    update_all_identity_relation_units()
+
+
+@hooks.hook('leader-settings-changed')
+@restart_on_change(restart_map(), stopstart=True)
+def leader_settings_changed():
+    # Since minions are notified of a regime change via the
+    # leader-settings-changed hook, rewrite the token flush cron job to make
+    # sure only the leader is running the cron job.
+    CONFIGS.write(TOKEN_FLUSH_CRON_FILE)
+
+    update_all_identity_relation_units()
+
+
+@hooks.hook('ha-relation-joined')
+def ha_joined(relation_id=None):
+    cluster_config = get_hacluster_config()
+    resources = {
+        'res_ks_haproxy': 'lsb:haproxy',
+    }
+    resource_params = {
+        'res_ks_haproxy': 'op monitor interval="5s"'
+    }
+
+    if config('dns-ha'):
+        update_dns_ha_resource_params(relation_id=relation_id,
+                                      resources=resources,
+                                      resource_params=resource_params)
+    else:
+        vip_group = []
+        for vip in cluster_config['vip'].split():
+            if is_ipv6(vip):
+                res_ks_vip = 'ocf:heartbeat:IPv6addr'
+                vip_params = 'ipv6addr'
+            else:
+                res_ks_vip = 'ocf:heartbeat:IPaddr2'
+                vip_params = 'ip'
+
+            iface = (get_iface_for_address(vip) or
+                     config('vip_iface'))
+            netmask = (get_netmask_for_address(vip) or
+                       config('vip_cidr'))
+
+            if iface is not None:
+                vip_key = 'res_ks_{}_vip'.format(iface)
+                if vip_key in vip_group:
+                    if vip not in resource_params[vip_key]:
+                        vip_key = '{}_{}'.format(vip_key, vip_params)
+                    else:
+                        log("Resource '%s' (vip='%s') already exists in "
+                            "vip group - skipping" % (vip_key, vip), WARNING)
+                        continue
+
+                vip_group.append(vip_key)
+                resources[vip_key] = res_ks_vip
+                resource_params[vip_key] = (
+                    'params {ip}="{vip}" cidr_netmask="{netmask}"'
+                    ' nic="{iface}"'.format(ip=vip_params,
+                                            vip=vip,
+                                            iface=iface,
+                                            netmask=netmask)
+                )
+
+        if len(vip_group) >= 1:
+            relation_set(relation_id=relation_id,
+                         groups={CLUSTER_RES: ' '.join(vip_group)})
+
+    init_services = {
+        'res_ks_haproxy': 'haproxy'
+    }
+    clones = {
+        'cl_ks_haproxy': 'res_ks_haproxy'
+    }
+    relation_set(relation_id=relation_id,
+                 init_services=init_services,
+                 corosync_bindiface=cluster_config['ha-bindiface'],
+                 corosync_mcastport=cluster_config['ha-mcastport'],
+                 resources=resources,
+                 resource_params=resource_params,
+                 clones=clones)
+
+
+@hooks.hook('ha-relation-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
+def ha_changed():
+    CONFIGS.write_all()
+
+    clustered = relation_get('clustered')
+    if clustered:
+        log('Cluster configured, notifying other services and updating '
+            'keystone endpoint configuration')
+        if is_ssl_cert_master():
+            update_all_identity_relation_units_force_sync()
+        else:
+            update_all_identity_relation_units()
+
+
+@hooks.hook('identity-admin-relation-changed')
+def admin_relation_changed(relation_id=None):
+    # TODO: fixup
+    if expect_ha() and not is_clustered():
+        log("Expected to be HA but no hacluster relation yet", level=INFO)
+        return
+    relation_data = {
+        'service_hostname': resolve_address(ADMIN),
+        'service_port': config('service-port'),
+        'service_username': config('admin-user'),
+        'service_tenant_name': config('admin-role'),
+        'service_region': config('region'),
+        'service_protocol': 'https' if https() else 'http',
+        'api_version': get_api_version(),
+    }
+    if relation_data['api_version'] > 2:
+        relation_data['service_user_domain_name'] = ADMIN_DOMAIN
+        relation_data['service_project_domain_name'] = ADMIN_DOMAIN
+        relation_data['service_project_name'] = ADMIN_PROJECT
+    relation_data['service_password'] = get_admin_passwd()
+    relation_set(relation_id=relation_id, **relation_data)
+
+
+@hooks.hook('domain-backend-relation-changed')
+def domain_backend_changed(relation_id=None, unit=None):
+    if get_api_version() < 3:
+        log('Domain specific backend identity configuration only supported '
+            'with Keystone v3 API, skipping domain creation and '
+            'restart.')
+        return
+
+    domain_name = relation_get(attribute='domain-name',
+                               unit=unit,
+                               rid=relation_id)
+    if domain_name:
+        # NOTE(jamespage): Only create domain data from lead
+        #                  unit when clustered and database
+        #                  is configured and created.
+        if is_leader() and is_db_ready() and is_db_initialised():
+            create_or_show_domain(domain_name)
+        # NOTE(jamespage): Deployment may have multiple domains,
+        #                  with different identity backends so
+        #                  ensure that a domain specific nonce
+        #                  is checked for restarts of keystone
+        restart_nonce = relation_get(attribute='restart-nonce',
+                                     unit=unit,
+                                     rid=relation_id)
+        domain_nonce_key = 'domain-restart-nonce-{}'.format(domain_name)
+        db = unitdata.kv()
+        if restart_nonce != db.get(domain_nonce_key):
+            if not is_unit_paused_set():
+                if snap_install_requested():
+                    service_restart('snap.keystone.*')
+                else:
+                    service_restart(keystone_service())
+            db.set(domain_nonce_key, restart_nonce)
+            db.flush()
+
+
+@synchronize_ca_if_changed(fatal=True)
+def configure_https():
+    '''
+    Enables SSL API Apache config if appropriate and kicks identity-service
+    with any required api updates.
+    '''
+    # need to write all to ensure changes to the entire request pipeline
+    # propagate (c-api, haprxy, apache)
+    CONFIGS.write_all()
+    # NOTE (thedac): When using snaps, nginx is installed, skip any apache2
+    # config.
+    if snap_install_requested():
+        return
+    if 'https' in CONFIGS.complete_contexts():
+        cmd = ['a2ensite', 'openstack_https_frontend']
+        check_call(cmd)
+    else:
+        cmd = ['a2dissite', 'openstack_https_frontend']
+        check_call(cmd)
+
+
+def configure_idp(idp, remote_ids, mappings_file, mapping_id, protocol):
+    '''
+    Configure Federated Identity Provider.
+    See: https://developer.openstack.org/api-ref/identity/v3-ext/
+         #os-federation-api
+    See: https://docs.openstack.org/python-keystoneclient/3.11.0/api/
+         keystoneclient.v3.contrib.federation.html
+    '''
+
+    from keystone_utils import (
+        get_local_endpoint,
+        get_admin_token
+    )
+    import requests
+
+    if not idp:
+        log("ERROR Missing Identity Provider name for %s" % protocol,
+            level=ERROR)
+        return
+    if not remote_ids:
+        log("ERROR Missing remote ids for %s provider" % protocol, level=ERROR)
+        return
+    if not mappings_file:
+        log("ERROR Missing mappings file for %s provider" % protocol,
+            level=ERROR)
+        return
+    if not mapping_id:
+        log("ERROR Missing mapping ID for %s provider" % protocol, level=ERROR)
+        return
+
+    federation_uri = os.path.join(get_local_endpoint(),
+                                  'OS-FEDERATION')
+    ks_admin_token = get_admin_token()
+    headers = {'X-Auth-Token': ks_admin_token}
+
+    # Is provider configured?
+    federation_idp_uri = os.path.join(federation_uri,
+                                      'identity_providers',
+                                      idp)
+    try:
+        resGet = requests.get(federation_idp_uri, headers=headers)
+    except requests.exceptions.RequestException as error:
+        log("ERROR %s, trying to GET %s, headers: %s" %
+            (error, federation_idp_uri, headers), level=WARNING)
+        return
+
+    data = {
+        "identity_provider": {
+            "description": "Identity provider %s" % idp,
+            "remote_ids": remote_ids,
+            "enabled": True
+        }
+    }
+    if not resGet.ok:
+        # Register a new Identity Provider
+        resPut = requests.put(federation_idp_uri, json=data, headers=headers)
+        if not resPut.ok:
+            log("ERROR IdP PUT: %s, %s, %s, %s" %
+                (resPut.reason, federation_idp_uri, headers, remote_ids),
+                level=WARNING)
+    else:
+        # Update the Identity Provider
+        resPatch = requests.patch(federation_idp_uri, json=data,
+                                  headers=headers)
+        if not resPatch.ok:
+            log("ERROR IdP PATCH %s, %s, %s, %s" %
+                (resPatch.reason, federation_idp_uri, headers, remote_ids),
+                level=WARNING)
+
+    # IdP users mapping
+    federation_mappings_uri = os.path.join(federation_uri,
+                                           'mappings',
+                                           mapping_id)
+    # Is IdP mapping for federated users already created?
+    try:
+        resGet = requests.get(federation_mappings_uri, headers=headers)
+    except requests.exceptions.RequestException as resError:
+        log("ERROR GET %s, %s, headers: %s" %
+            (resError, federation_mappings_uri, headers),
+            level=WARNING)
+    if os.path.isfile(mappings_file):
+        with open(mappings_file) as f:
+            data = f.read()
+        if not resGet.ok:
+            # Create the IdP mapping for federated users
+            resPut = requests.put(federation_mappings_uri,
+                                  data=data,
+                                  headers=headers)
+            if not resPut.ok:
+                log("ERROR IdP PUT %s, %s" %
+                    (resPut.reason, federation_mappings_uri),
+                    level=WARNING)
+        else:
+            # Update the IdP mapping for federated users
+            resPatch = requests.patch(federation_mappings_uri,
+                                      data=data,
+                                      headers=headers)
+            if not resPatch.ok:
+                log("ERROR IdP PATCH %s, %s" %
+                    (resPatch.reason, federation_mappings_uri),
+                    level=WARNING)
+
+    # IdP protocol
+    federation_idp_protocols_uri = os.path.join(federation_uri,
+                                                'identity_providers',
+                                                idp, 'protocols', protocol)
+    # Is IdP protocol for federated users already created?
+    try:
+        resGet = requests.get(federation_idp_protocols_uri, headers=headers)
+    except requests.exceptions.RequestException as getProError:
+        log("ERROR GET %s, headers: %s, message: %s" %
+            (federation_idp_protocols_uri, headers, getProError),
+            level=WARNING)
+    if not resGet.ok:
+        # Add the protocol
+        data = {
+            "protocol": {
+                "mapping_id": mapping_id
+            }
+        }
+        resPut = requests.put(federation_idp_protocols_uri,
+                              json=data,
+                              headers=headers)
+        if not resPut.ok:
+            log("ERROR IdP PUT %s, %s" %
+                (resPut.reason, federation_idp_protocols_uri),
+                level=WARNING)
+
+
+def configure_saml2():
+    '''
+    Configure SAML Provider.
+    '''
+    from keystone_context import SamlContext
+
+    configure_idp(config('shibsp-identity-provider'),
+                  config('shibsp-idp-remote-ids'),
+                  SAML2_MAPPING_FILE,
+                  config('saml2-mapping'),
+                  'saml2')
+
+    samlContext = SamlContext()
+    samlContext.configure()
+
+
+def configure_oidc():
+    '''
+    Configure OIDC Provider.
+    '''
+    configure_idp(config('oidc-identity-provider'),
+                  [config('oidc-idp-remote-id')],
+                  OIDC_MAPPING_FILE,
+                  config('oidc-mapping'),
+                  'oidc')
+
+
+@hooks.hook('upgrade-charm')
+@restart_on_change(restart_map(), stopstart=True)
+@synchronize_ca_if_changed()
+@harden()
+def upgrade_charm():
+    status_set('maintenance', 'Installing apt packages')
+    apt_install(filter_installed_packages(determine_packages()))
+    unison.ssh_authorized_peers(user=SSH_USER,
+                                group=SSH_USER,
+                                peer_interface='cluster',
+                                ensure_local_user=True)
+
+    ensure_ssl_dirs()
+
+    if run_in_apache():
+        disable_unused_apache_sites()
+
+    CONFIGS.write_all()
+
+    # See LP bug 1519035
+    leader_init_db_if_ready()
+
+    update_nrpe_config()
+
+    if is_elected_leader(CLUSTER_RES):
+        log('Cluster leader - ensuring endpoint configuration is up to '
+            'date', level=DEBUG)
+        update_all_identity_relation_units()
+
+
+@hooks.hook('update-status')
+@harden()
+def update_status():
+    log('Updating status.')
+
+
+@hooks.hook('nrpe-external-master-relation-joined',
+            'nrpe-external-master-relation-changed')
+def update_nrpe_config():
+    # python-dbus is used by check_upstart_job
+    apt_install('python-dbus')
+    hostname = nrpe.get_nagios_hostname()
+    current_unit = nrpe.get_nagios_unit_name()
+    nrpe_setup = nrpe.NRPE(hostname=hostname)
+    nrpe.copy_nrpe_checks()
+    _services = []
+    for service in services():
+        if service.startswith('snap.'):
+            service = service.split('.')[1]
+        _services.append(service)
+    nrpe.add_init_service_checks(nrpe_setup, _services, current_unit)
+    nrpe.add_haproxy_checks(nrpe_setup, current_unit)
+    nrpe_setup.write()
+
+
+def main():
+    try:
+        hooks.execute(sys.argv)
+    except UnregisteredHookError as e:
+        log('Unknown hook {} - skipping.'.format(e))
+    assess_status(CONFIGS)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/hooks/pgsql-db-relation-joined b/hooks/pgsql-db-relation-joined
new file mode 100755
index 0000000000000000000000000000000000000000..505db8a7b867de2c46c8ef6e79bc57c1a0294ca1
--- /dev/null
+++ b/hooks/pgsql-db-relation-joined
@@ -0,0 +1,1102 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import json
+import os
+import sys
+
+from subprocess import check_call
+
+from charmhelpers.contrib import unison
+from charmhelpers.core import unitdata
+
+from charmhelpers.core.hookenv import (
+    Hooks,
+    UnregisteredHookError,
+    config,
+    is_relation_made,
+    log,
+    local_unit,
+    DEBUG,
+    INFO,
+    WARNING,
+    ERROR,
+    relation_get,
+    relation_ids,
+    relation_set,
+    related_units,
+    status_set,
+    open_port,
+    is_leader,
+)
+
+from charmhelpers.core.host import (
+    mkdir,
+    service_pause,
+    service_stop,
+    service_start,
+    service_restart,
+)
+
+from charmhelpers.core.strutils import (
+    bool_from_string,
+)
+
+from charmhelpers.fetch import (
+    apt_install, apt_update,
+    filter_installed_packages
+)
+
+from charmhelpers.contrib.openstack.utils import (
+    config_value_changed,
+    configure_installation_source,
+    git_install_requested,
+    openstack_upgrade_available,
+    sync_db_with_multi_ipv6_addresses,
+    os_release,
+    pausable_restart_on_change as restart_on_change,
+    is_unit_paused_set,
+    CompareOpenStackReleases,
+    snap_install_requested,
+    install_os_snaps,
+    get_snaps_install_info_from_origin,
+    enable_memcache,
+)
+
+from keystone_utils import (
+    add_service_to_keystone,
+    add_credentials_to_keystone,
+    determine_packages,
+    disable_unused_apache_sites,
+    do_openstack_upgrade_reexec,
+    ensure_initial_admin,
+    get_admin_passwd,
+    git_install,
+    migrate_database,
+    save_script_rc,
+    post_snap_install,
+    synchronize_ca_if_changed,
+    register_configs,
+    restart_map,
+    services,
+    CLUSTER_RES,
+    KEYSTONE_CONF,
+    KEYSTONE_USER,
+    POLICY_JSON,
+    TOKEN_FLUSH_CRON_FILE,
+    SSH_USER,
+    setup_ipv6,
+    send_notifications,
+    check_peer_actions,
+    get_ssl_sync_request_units,
+    is_ssl_cert_master,
+    is_db_ready,
+    clear_ssl_synced_units,
+    is_db_initialised,
+    update_certs_if_available,
+    ensure_ssl_dir,
+    ensure_pki_dir_permissions,
+    ensure_permissions,
+    force_ssl_sync,
+    filter_null,
+    ensure_ssl_dirs,
+    ensure_pki_cert_paths,
+    is_service_present,
+    delete_service_entry,
+    assess_status,
+    run_in_apache,
+    restart_function_map,
+    WSGI_KEYSTONE_API_CONF,
+    SHIBSP_FILES,
+    OIDC_MAPPING_FILE,
+    SAML2_MAPPING_FILE,
+    install_apache_error_handler,
+    restart_pid_check,
+    get_api_version,
+    ADMIN_DOMAIN,
+    ADMIN_PROJECT,
+    create_or_show_domain,
+    keystone_service,
+)
+
+from charmhelpers.contrib.hahelpers.cluster import (
+    is_elected_leader,
+    get_hacluster_config,
+    peer_units,
+    https,
+    is_clustered,
+)
+
+from charmhelpers.contrib.openstack.ha.utils import (
+    update_dns_ha_resource_params,
+    expect_ha,
+)
+
+from charmhelpers.payload.execd import execd_preinstall
+from charmhelpers.contrib.peerstorage import (
+    peer_retrieve_by_prefix,
+    peer_echo,
+    relation_get as relation_get_and_migrate,
+)
+from charmhelpers.contrib.openstack.ip import (
+    ADMIN,
+    resolve_address,
+)
+from charmhelpers.contrib.network.ip import (
+    get_iface_for_address,
+    get_netmask_for_address,
+    is_ipv6,
+    get_relation_ip,
+)
+from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
+
+from charmhelpers.contrib.charmsupport import nrpe
+
+from charmhelpers.contrib.hardening.harden import harden
+
+hooks = Hooks()
+CONFIGS = register_configs()
+
+
+@hooks.hook('install.real')
+@harden()
+def install():
+    status_set('maintenance', 'Executing pre-install')
+    execd_preinstall()
+    configure_installation_source(config('openstack-origin'))
+    status_set('maintenance', 'Installing apt packages')
+    apt_update()
+    apt_install(determine_packages(), fatal=True)
+
+    if snap_install_requested():
+        status_set('maintenance', 'Installing keystone snap')
+        # NOTE(thedac) Setting devmode until LP#1719636 is fixed
+        install_os_snaps(
+            get_snaps_install_info_from_origin(
+                ['keystone'],
+                config('openstack-origin'),
+                mode='devmode'))
+        post_snap_install()
+        service_stop('snap.keystone.*')
+    else:
+        # unconfigured keystone service will prevent start of haproxy in some
+        # circumstances. make sure haproxy runs. LP #1648396
+        service_stop('keystone')
+        service_start('haproxy')
+        if run_in_apache():
+            disable_unused_apache_sites()
+            if not git_install_requested():
+                service_pause('keystone')
+        install_apache_error_handler(config('no-user-mapping-url'))
+
+    status_set('maintenance', 'Git install')
+    git_install(config('openstack-origin-git'))
+
+    unison.ensure_user(user=SSH_USER, group=SSH_USER)
+    unison.ensure_user(user=SSH_USER, group=KEYSTONE_USER)
+
+
+@hooks.hook('config-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed(fatal=True)
+@harden()
+def config_changed():
+    if config('prefer-ipv6'):
+        status_set('maintenance', 'configuring ipv6')
+        setup_ipv6()
+        sync_db_with_multi_ipv6_addresses(config('database'),
+                                          config('database-user'))
+
+    unison.ensure_user(user=SSH_USER, group=SSH_USER)
+    unison.ensure_user(user=SSH_USER, group=KEYSTONE_USER)
+    homedir = unison.get_homedir(SSH_USER)
+    if not os.path.isdir(homedir):
+        mkdir(homedir, SSH_USER, SSH_USER, 0o775)
+
+    if git_install_requested():
+        if config_value_changed('openstack-origin-git'):
+            status_set('maintenance', 'Running Git install')
+            git_install(config('openstack-origin-git'))
+    elif not config('action-managed-upgrade'):
+        if openstack_upgrade_available('keystone'):
+            status_set('maintenance', 'Running openstack upgrade')
+            do_openstack_upgrade_reexec(configs=CONFIGS)
+
+    for r_id in relation_ids('cluster'):
+        cluster_joined(rid=r_id, ssl_sync_request=False)
+
+    config_changed_postupgrade()
+
+
+@hooks.hook('config-changed-postupgrade')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed(fatal=True)
+@harden()
+def config_changed_postupgrade():
+    # Ensure ssl dir exists and is unison-accessible
+    ensure_ssl_dir()
+
+    if not snap_install_requested():
+        check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
+
+    ensure_ssl_dirs()
+
+    save_script_rc()
+    release = os_release('keystone')
+    if run_in_apache(release=release):
+        # Need to ensure mod_wsgi is installed and apache2 is reloaded
+        # immediatly as charm querys its local keystone before restart
+        # decorator can fire
+        apt_install(filter_installed_packages(determine_packages()))
+        # when deployed from source, init scripts aren't installed
+        if not git_install_requested():
+            service_pause('keystone')
+
+        disable_unused_apache_sites()
+        if WSGI_KEYSTONE_API_CONF in CONFIGS.templates:
+            CONFIGS.write(WSGI_KEYSTONE_API_CONF)
+        if not is_unit_paused_set():
+            restart_pid_check('apache2')
+        if config('enable-oidc'):
+            CONFIGS.write(OIDC_MAPPING_FILE)
+            configure_oidc()
+        if config('enable-saml2'):
+            CONFIGS.write(SAML2_MAPPING_FILE)
+            for shibsp_file in SHIBSP_FILES:
+                CONFIGS.write(shibsp_file)
+            configure_saml2()
+            service_stop('shibd')
+            if not is_unit_paused_set():
+                service_start('shibd')
+        install_apache_error_handler(config('no-user-mapping-url'))
+
+    if enable_memcache(release=release):
+        # If charm or OpenStack have been upgraded then the list of required
+        # packages may have changed so ensure they are installed.
+        apt_install(filter_installed_packages(determine_packages()))
+
+    configure_https()
+    open_port(config('service-port'))
+
+    update_nrpe_config()
+
+    CONFIGS.write_all()
+
+    if snap_install_requested() and not is_unit_paused_set():
+        service_restart('snap.keystone.*')
+
+    initialise_pki()
+
+    update_all_identity_relation_units()
+    update_all_domain_backends()
+
+    # Ensure sync request is sent out (needed for any/all ssl change)
+    send_ssl_sync_request()
+
+    for r_id in relation_ids('ha'):
+        ha_joined(relation_id=r_id)
+
+
+@synchronize_ca_if_changed(fatal=True)
+def initialise_pki():
+    """Create certs and keys required for token signing.
+
+    Used for PKI and signing token revocation list.
+
+    NOTE: keystone.conf [signing] section must be up-to-date prior to
+          executing this.
+    """
+    if CompareOpenStackReleases(os_release('keystone-common')) >= 'pike':
+        # pike dropped support for PKI token; skip function
+        return
+    ensure_pki_cert_paths()
+    if not peer_units() or is_ssl_cert_master():
+        log("Ensuring PKI token certs created", level=DEBUG)
+        if snap_install_requested():
+            cmd = ['/snap/bin/keystone-manage', 'pki_setup',
+                   '--keystone-user', KEYSTONE_USER,
+                   '--keystone-group', KEYSTONE_USER]
+            _log_dir = '/var/snap/keystone/common/log'
+        else:
+            cmd = ['keystone-manage', 'pki_setup',
+                   '--keystone-user', KEYSTONE_USER,
+                   '--keystone-group', KEYSTONE_USER]
+            _log_dir = '/var/log/keystone'
+        check_call(cmd)
+
+        # Ensure logfile has keystone perms since we may have just created it
+        # with root.
+        ensure_permissions(_log_dir, user=KEYSTONE_USER,
+                           group=KEYSTONE_USER, perms=0o744)
+        ensure_permissions('{}/keystone.log'.format(_log_dir),
+                           user=KEYSTONE_USER, group=KEYSTONE_USER,
+                           perms=0o644)
+
+    ensure_pki_dir_permissions()
+
+
+@hooks.hook('shared-db-relation-joined')
+def db_joined():
+    if is_relation_made('pgsql-db'):
+        # error, postgresql is used
+        e = ('Attempting to associate a mysql database when there is already '
+             'associated a postgresql one')
+        log(e, level=ERROR)
+        raise Exception(e)
+
+    if config('prefer-ipv6'):
+        sync_db_with_multi_ipv6_addresses(config('database'),
+                                          config('database-user'))
+    else:
+        # Avoid churn check for access-network early
+        access_network = None
+        for unit in related_units():
+            access_network = relation_get(unit=unit,
+                                          attribute='access-network')
+            if access_network:
+                break
+        host = get_relation_ip('shared-db', cidr_network=access_network)
+
+        relation_set(database=config('database'),
+                     username=config('database-user'),
+                     hostname=host)
+
+
+@hooks.hook('pgsql-db-relation-joined')
+def pgsql_db_joined():
+    if is_relation_made('shared-db'):
+        # raise error
+        e = ('Attempting to associate a postgresql database when there'
+             ' is already associated a mysql one')
+        log(e, level=ERROR)
+        raise Exception(e)
+
+    relation_set(database=config('database'))
+
+
+def update_all_identity_relation_units(check_db_ready=True):
+    if is_unit_paused_set():
+        return
+    CONFIGS.write_all()
+    configure_https()
+    if check_db_ready and not is_db_ready():
+        log('Allowed_units list provided and this unit not present',
+            level=INFO)
+        return
+
+    if not is_db_initialised():
+        log("Database not yet initialised - deferring identity-relation "
+            "updates", level=INFO)
+        return
+
+    if is_elected_leader(CLUSTER_RES):
+        ensure_initial_admin(config)
+
+    log('Firing identity_changed hook for all related services.')
+    for rid in relation_ids('identity-service'):
+        for unit in related_units(rid):
+            identity_changed(relation_id=rid, remote_unit=unit)
+    log('Firing admin_relation_changed hook for all related services.')
+    for rid in relation_ids('identity-admin'):
+        admin_relation_changed(rid)
+    log('Firing identity_credentials_changed hook for all related services.')
+    for rid in relation_ids('identity-credentials'):
+        for unit in related_units(rid):
+            identity_credentials_changed(relation_id=rid, remote_unit=unit)
+
+
+@synchronize_ca_if_changed(force=True)
+def update_all_identity_relation_units_force_sync():
+    update_all_identity_relation_units()
+
+
+def update_all_domain_backends():
+    """Re-trigger hooks for all domain-backend relations/units"""
+    for rid in relation_ids('domain-backend'):
+        for unit in related_units(rid):
+            domain_backend_changed(relation_id=rid, unit=unit)
+
+
+def leader_init_db_if_ready(use_current_context=False):
+    """ Initialise the keystone db if it is ready and mark it as initialised.
+
+    NOTE: this must be idempotent.
+    """
+    if not is_elected_leader(CLUSTER_RES):
+        log("Not leader - skipping db init", level=DEBUG)
+        return
+
+    if is_db_initialised():
+        log("Database already initialised - skipping db init", level=DEBUG)
+        update_all_identity_relation_units(check_db_ready=False)
+        return
+
+    # Bugs 1353135 & 1187508. Dbs can appear to be ready before the
+    # units acl entry has been added. So, if the db supports passing
+    # a list of permitted units then check if we're in the list.
+    if not is_db_ready(use_current_context=use_current_context):
+        log('Allowed_units list provided and this unit not present',
+            level=INFO)
+        return
+
+    migrate_database()
+    # Ensure any existing service entries are updated in the
+    # new database backend. Also avoid duplicate db ready check.
+    update_all_identity_relation_units(check_db_ready=False)
+    update_all_domain_backends()
+
+
+@hooks.hook('shared-db-relation-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
+def db_changed():
+    if 'shared-db' not in CONFIGS.complete_contexts():
+        log('shared-db relation incomplete. Peer not ready?')
+    else:
+        CONFIGS.write(KEYSTONE_CONF)
+        leader_init_db_if_ready(use_current_context=True)
+        if CompareOpenStackReleases(
+                os_release('keystone-common')) >= 'liberty':
+            CONFIGS.write(POLICY_JSON)
+
+
+@hooks.hook('pgsql-db-relation-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
+def pgsql_db_changed():
+    if 'pgsql-db' not in CONFIGS.complete_contexts():
+        log('pgsql-db relation incomplete. Peer not ready?')
+    else:
+        CONFIGS.write(KEYSTONE_CONF)
+        leader_init_db_if_ready(use_current_context=True)
+        if CompareOpenStackReleases(
+                os_release('keystone-common')) >= 'liberty':
+            CONFIGS.write(POLICY_JSON)
+
+
+@hooks.hook('identity-service-relation-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
+def identity_changed(relation_id=None, remote_unit=None):
+    CONFIGS.write_all()
+
+    notifications = {}
+    if is_elected_leader(CLUSTER_RES):
+        if not is_db_ready():
+            log("identity-service-relation-changed hook fired before db "
+                "ready - deferring until db ready", level=WARNING)
+            return
+
+        if not is_db_initialised():
+            log("Database not yet initialised - deferring identity-relation "
+                "updates", level=INFO)
+            return
+
+        if expect_ha() and not is_clustered():
+            log("Expected to be HA but no hacluster relation yet", level=INFO)
+            return
+
+        add_service_to_keystone(relation_id, remote_unit)
+        if is_service_present('neutron', 'network'):
+            delete_service_entry('quantum', 'network')
+        settings = relation_get(rid=relation_id, unit=remote_unit)
+        service = settings.get('service', None)
+        if service:
+            # If service is known and endpoint has changed, notify service if
+            # it is related with notifications interface.
+            csum = hashlib.sha256()
+            # We base the decision to notify on whether these parameters have
+            # changed (if csum is unchanged from previous notify, relation will
+            # not fire).
+            csum.update(settings.get('public_url', None))
+            csum.update(settings.get('admin_url', None))
+            csum.update(settings.get('internal_url', None))
+            notifications['%s-endpoint-changed' % (service)] = csum.hexdigest()
+    else:
+        # Each unit needs to set the db information otherwise if the unit
+        # with the info dies the settings die with it Bug# 1355848
+        for rel_id in relation_ids('identity-service'):
+            peerdb_settings = peer_retrieve_by_prefix(rel_id)
+            # Ensure the null'd settings are unset in the relation.
+            peerdb_settings = filter_null(peerdb_settings)
+            if 'service_password' in peerdb_settings:
+                relation_set(relation_id=rel_id, **peerdb_settings)
+
+        log('Deferring identity_changed() to service leader.')
+
+    if notifications:
+        send_notifications(notifications)
+
+
+@hooks.hook('identity-credentials-relation-joined',
+            'identity-credentials-relation-changed')
+def identity_credentials_changed(relation_id=None, remote_unit=None):
+    """Update the identity credentials relation on change
+
+    Calls add_credentials_to_keystone
+
+    :param relation_id: Relation id of the relation
+    :param remote_unit: Related unit on the relation
+    """
+    if is_elected_leader(CLUSTER_RES):
+        if expect_ha() and not is_clustered():
+            log("Expected to be HA but no hacluster relation yet", level=INFO)
+            return
+        if not is_db_ready():
+            log("identity-credentials-relation-changed hook fired before db "
+                "ready - deferring until db ready", level=WARNING)
+            return
+
+        if not is_db_initialised():
+            log("Database not yet initialised - deferring "
+                "identity-credentials-relation updates", level=INFO)
+            return
+
+        # Create the tenant user
+        add_credentials_to_keystone(relation_id, remote_unit)
+    else:
+        log('Deferring identity_credentials_changed() to service leader.')
+
+
+def send_ssl_sync_request():
+    """Set sync request on cluster relation.
+
+    Value set equals number of ssl configs currently enabled so that if they
+    change, we ensure that certs are synced. This setting is consumed by
+    cluster-relation-changed ssl master. We also clear the 'synced' set to
+    guarantee that a sync will occur.
+
+    Note the we do nothing if the setting is already applied.
+    """
+    unit = local_unit().replace('/', '-')
+    # Start with core config (e.g. used for signing revoked token list)
+    ssl_config = 0b1
+
+    use_https = config('use-https')
+    if use_https and bool_from_string(use_https):
+        ssl_config ^= 0b10
+
+    https_service_endpoints = config('https-service-endpoints')
+    if (https_service_endpoints and
+            bool_from_string(https_service_endpoints)):
+        ssl_config ^= 0b100
+
+    enable_pki = config('enable-pki')
+    if enable_pki and bool_from_string(enable_pki):
+        ssl_config ^= 0b1000
+
+    key = 'ssl-sync-required-%s' % (unit)
+    settings = {key: ssl_config}
+
+    prev = 0b0
+    rid = None
+    for rid in relation_ids('cluster'):
+        for unit in related_units(rid):
+            _prev = relation_get(rid=rid, unit=unit, attribute=key) or 0b0
+            if _prev and _prev > prev:
+                prev = bin(_prev)
+
+    if rid and prev ^ ssl_config:
+        if is_leader():
+            clear_ssl_synced_units()
+
+        log("Setting %s=%s" % (key, bin(ssl_config)), level=DEBUG)
+        relation_set(relation_id=rid, relation_settings=settings)
+
+
+@hooks.hook('cluster-relation-joined')
+def cluster_joined(rid=None, ssl_sync_request=True):
+    unison.ssh_authorized_peers(user=SSH_USER,
+                                group=SSH_USER,
+                                peer_interface='cluster',
+                                ensure_local_user=True)
+
+    settings = {}
+
+    for addr_type in ADDRESS_TYPES:
+        address = get_relation_ip(
+            addr_type,
+            cidr_network=config('os-{}-network'.format(addr_type)))
+        if address:
+            settings['{}-address'.format(addr_type)] = address
+
+    settings['private-address'] = get_relation_ip('cluster')
+
+    relation_set(relation_id=rid, relation_settings=settings)
+
+    if ssl_sync_request:
+        send_ssl_sync_request()
+
+
+@hooks.hook('cluster-relation-changed')
+@restart_on_change(restart_map(), stopstart=True)
+@update_certs_if_available
+def cluster_changed():
+    unison.ssh_authorized_peers(user=SSH_USER,
+                                group=SSH_USER,
+                                peer_interface='cluster',
+                                ensure_local_user=True)
+    # NOTE(jamespage) re-echo passwords for peer storage
+    echo_whitelist = ['_passwd', 'identity-service:',
+                      'db-initialised', 'ssl-cert-available-updates']
+    # Don't echo if leader since a re-election may be in progress.
+    if not is_leader():
+        echo_whitelist.append('ssl-cert-master')
+
+    log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG)
+    peer_echo(includes=echo_whitelist, force=True)
+
+    check_peer_actions()
+
+    initialise_pki()
+
+    if is_leader():
+        # Figure out if we need to mandate a sync
+        units = get_ssl_sync_request_units()
+        synced_units = relation_get_and_migrate(attribute='ssl-synced-units',
+                                                unit=local_unit())
+        diff = None
+        if synced_units:
+            synced_units = json.loads(synced_units)
+            diff = set(units).symmetric_difference(set(synced_units))
+    else:
+        units = None
+
+    if units and (not synced_units or diff):
+        log("New peers joined and need syncing - %s" %
+            (', '.join(units)), level=DEBUG)
+        update_all_identity_relation_units_force_sync()
+    else:
+        update_all_identity_relation_units()
+
+    if not is_leader() and is_ssl_cert_master():
+        # Force and sync and trigger a sync master re-election since we are not
+        # leader anymore.
+        force_ssl_sync()
+    else:
+        CONFIGS.write_all()
+
+
+@hooks.hook('leader-elected')
+@restart_on_change(restart_map(), stopstart=True)
+def leader_elected():
+    log('Unit has been elected leader.', level=DEBUG)
+    # When the local unit has been elected the leader, update the cron jobs
+    # to ensure that the cron jobs are active on this unit.
+    CONFIGS.write(TOKEN_FLUSH_CRON_FILE)
+
+    update_all_identity_relation_units()
+
+    update_all_identity_relation_units()
+
+
+@hooks.hook('leader-settings-changed')
+@restart_on_change(restart_map(), stopstart=True)
+def leader_settings_changed():
+    # Since minions are notified of a regime change via the
+    # leader-settings-changed hook, rewrite the token flush cron job to make
+    # sure only the leader is running the cron job.
+    CONFIGS.write(TOKEN_FLUSH_CRON_FILE)
+
+    update_all_identity_relation_units()
+
+
+@hooks.hook('ha-relation-joined')
+def ha_joined(relation_id=None):
+    cluster_config = get_hacluster_config()
+    resources = {
+        'res_ks_haproxy': 'lsb:haproxy',
+    }
+    resource_params = {
+        'res_ks_haproxy': 'op monitor interval="5s"'
+    }
+
+    if config('dns-ha'):
+        update_dns_ha_resource_params(relation_id=relation_id,
+                                      resources=resources,
+                                      resource_params=resource_params)
+    else:
+        vip_group = []
+        for vip in cluster_config['vip'].split():
+            if is_ipv6(vip):
+                res_ks_vip = 'ocf:heartbeat:IPv6addr'
+                vip_params = 'ipv6addr'
+            else:
+                res_ks_vip = 'ocf:heartbeat:IPaddr2'
+                vip_params = 'ip'
+
+            iface = (get_iface_for_address(vip) or
+                     config('vip_iface'))
+            netmask = (get_netmask_for_address(vip) or
+                       config('vip_cidr'))
+
+            if iface is not None:
+                vip_key = 'res_ks_{}_vip'.format(iface)
+                if vip_key in vip_group:
+                    if vip not in resource_params[vip_key]:
+                        vip_key = '{}_{}'.format(vip_key, vip_params)
+                    else:
+                        log("Resource '%s' (vip='%s') already exists in "
+                            "vip group - skipping" % (vip_key, vip), WARNING)
+                        continue
+
+                vip_group.append(vip_key)
+                resources[vip_key] = res_ks_vip
+                resource_params[vip_key] = (
+                    'params {ip}="{vip}" cidr_netmask="{netmask}"'
+                    ' nic="{iface}"'.format(ip=vip_params,
+                                            vip=vip,
+                                            iface=iface,
+                                            netmask=netmask)
+                )
+
+        if len(vip_group) >= 1:
+            relation_set(relation_id=relation_id,
+                         groups={CLUSTER_RES: ' '.join(vip_group)})
+
+    init_services = {
+        'res_ks_haproxy': 'haproxy'
+    }
+    clones = {
+        'cl_ks_haproxy': 'res_ks_haproxy'
+    }
+    relation_set(relation_id=relation_id,
+                 init_services=init_services,
+                 corosync_bindiface=cluster_config['ha-bindiface'],
+                 corosync_mcastport=cluster_config['ha-mcastport'],
+                 resources=resources,
+                 resource_params=resource_params,
+                 clones=clones)
+
+
+@hooks.hook('ha-relation-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@synchronize_ca_if_changed()
+def ha_changed():
+    CONFIGS.write_all()
+
+    clustered = relation_get('clustered')
+    if clustered:
+        log('Cluster configured, notifying other services and updating '
+            'keystone endpoint configuration')
+        if is_ssl_cert_master():
+            update_all_identity_relation_units_force_sync()
+        else:
+            update_all_identity_relation_units()
+
+
+@hooks.hook('identity-admin-relation-changed')
+def admin_relation_changed(relation_id=None):
+    # TODO: fixup
+    if expect_ha() and not is_clustered():
+        log("Expected to be HA but no hacluster relation yet", level=INFO)
+        return
+    relation_data = {
+        'service_hostname': resolve_address(ADMIN),
+        'service_port': config('service-port'),
+        'service_username': config('admin-user'),
+        'service_tenant_name': config('admin-role'),
+        'service_region': config('region'),
+        'service_protocol': 'https' if https() else 'http',
+        'api_version': get_api_version(),
+    }
+    if relation_data['api_version'] > 2:
+        relation_data['service_user_domain_name'] = ADMIN_DOMAIN
+        relation_data['service_project_domain_name'] = ADMIN_DOMAIN
+        relation_data['service_project_name'] = ADMIN_PROJECT
+    relation_data['service_password'] = get_admin_passwd()
+    relation_set(relation_id=relation_id, **relation_data)
+
+
+@hooks.hook('domain-backend-relation-changed')
+def domain_backend_changed(relation_id=None, unit=None):
+    if get_api_version() < 3:
+        log('Domain specific backend identity configuration only supported '
+            'with Keystone v3 API, skipping domain creation and '
+            'restart.')
+        return
+
+    domain_name = relation_get(attribute='domain-name',
+                               unit=unit,
+                               rid=relation_id)
+    if domain_name:
+        # NOTE(jamespage): Only create domain data from lead
+        #                  unit when clustered and database
+        #                  is configured and created.
+        if is_leader() and is_db_ready() and is_db_initialised():
+            create_or_show_domain(domain_name)
+        # NOTE(jamespage): Deployment may have multiple domains,
+        #                  with different identity backends so
+        #                  ensure that a domain specific nonce
+        #                  is checked for restarts of keystone
+        restart_nonce = relation_get(attribute='restart-nonce',
+                                     unit=unit,
+                                     rid=relation_id)
+        domain_nonce_key = 'domain-restart-nonce-{}'.format(domain_name)
+        db = unitdata.kv()
+        if restart_nonce != db.get(domain_nonce_key):
+            if not is_unit_paused_set():
+                if snap_install_requested():
+                    service_restart('snap.keystone.*')
+                else:
+                    service_restart(keystone_service())
+            db.set(domain_nonce_key, restart_nonce)
+            db.flush()
+
+
+@synchronize_ca_if_changed(fatal=True)
+def configure_https():
+    '''
+    Enables SSL API Apache config if appropriate and kicks identity-service
+    with any required api updates.
+    '''
+    # need to write all to ensure changes to the entire request pipeline
+    # propagate (c-api, haprxy, apache)
+    CONFIGS.write_all()
+    # NOTE (thedac): When using snaps, nginx is installed, skip any apache2
+    # config.
+    if snap_install_requested():
+        return
+    if 'https' in CONFIGS.complete_contexts():
+        cmd = ['a2ensite', 'openstack_https_frontend']
+        check_call(cmd)
+    else:
+        cmd = ['a2dissite', 'openstack_https_frontend']
+        check_call(cmd)
+
+
+def configure_idp(idp, remote_ids, mappings_file, mapping_id, protocol):
+    '''
+    Configure Federated Identity Provider.
+    See: https://developer.openstack.org/api-ref/identity/v3-ext/
+         #os-federation-api
+    See: https://docs.openstack.org/python-keystoneclient/3.11.0/api/
+         keystoneclient.v3.contrib.federation.html
+    '''
+
+    from keystone_utils import (
+        get_local_endpoint,
+        get_admin_token
+    )
+    import requests
+
+    if not idp:
+        log("ERROR Missing Identity Provider name for %s" % protocol,
+            level=ERROR)
+        return
+    if not remote_ids:
+        log("ERROR Missing remote ids for %s provider" % protocol, level=ERROR)
+        return
+    if not mappings_file:
+        log("ERROR Missing mappings file for %s provider" % protocol,
+            level=ERROR)
+        return
+    if not mapping_id:
+        log("ERROR Missing mapping ID for %s provider" % protocol, level=ERROR)
+        return
+
+    federation_uri = os.path.join(get_local_endpoint(),
+                                  'OS-FEDERATION')
+    ks_admin_token = get_admin_token()
+    headers = {'X-Auth-Token': ks_admin_token}
+
+    # Is provider configured?
+    federation_idp_uri = os.path.join(federation_uri,
+                                      'identity_providers',
+                                      idp)
+    try:
+        resGet = requests.get(federation_idp_uri, headers=headers)
+    except requests.exceptions.RequestException as error:
+        log("ERROR %s, trying to GET %s, headers: %s" %
+            (error, federation_idp_uri, headers), level=WARNING)
+        return
+
+    data = {
+        "identity_provider": {
+            "description": "Identity provider %s" % idp,
+            "remote_ids": remote_ids,
+            "enabled": True
+        }
+    }
+    if not resGet.ok:
+        # Register a new Identity Provider
+        resPut = requests.put(federation_idp_uri, json=data, headers=headers)
+        if not resPut.ok:
+            log("ERROR IdP PUT: %s, %s, %s, %s" %
+                (resPut.reason, federation_idp_uri, headers, remote_ids),
+                level=WARNING)
+    else:
+        # Update the Identity Provider
+        resPatch = requests.patch(federation_idp_uri, json=data,
+                                  headers=headers)
+        if not resPatch.ok:
+            log("ERROR IdP PATCH %s, %s, %s, %s" %
+                (resPatch.reason, federation_idp_uri, headers, remote_ids),
+                level=WARNING)
+
+    # IdP users mapping
+    federation_mappings_uri = os.path.join(federation_uri,
+                                           'mappings',
+                                           mapping_id)
+    # Is IdP mapping for federated users already created?
+    try:
+        resGet = requests.get(federation_mappings_uri, headers=headers)
+    except requests.exceptions.RequestException as resError:
+        log("ERROR GET %s, %s, headers: %s" %
+            (resError, federation_mappings_uri, headers),
+            level=WARNING)
+    if os.path.isfile(mappings_file):
+        with open(mappings_file) as f:
+            data = f.read()
+        if not resGet.ok:
+            # Create the IdP mapping for federated users
+            resPut = requests.put(federation_mappings_uri,
+                                  data=data,
+                                  headers=headers)
+            if not resPut.ok:
+                log("ERROR IdP PUT %s, %s" %
+                    (resPut.reason, federation_mappings_uri),
+                    level=WARNING)
+        else:
+            # Update the IdP mapping for federated users
+            resPatch = requests.patch(federation_mappings_uri,
+                                      data=data,
+                                      headers=headers)
+            if not resPatch.ok:
+                log("ERROR IdP PATCH %s, %s" %
+                    (resPatch.reason, federation_mappings_uri),
+                    level=WARNING)
+
+    # IdP protocol
+    federation_idp_protocols_uri = os.path.join(federation_uri,
+                                                'identity_providers',
+                                                idp, 'protocols', protocol)
+    # Is IdP protocol for federated users already created?
+    try:
+        resGet = requests.get(federation_idp_protocols_uri, headers=headers)
+    except requests.exceptions.RequestException as getProError:
+        log("ERROR GET %s, headers: %s, message: %s" %
+            (federation_idp_protocols_uri, headers, getProError),
+            level=WARNING)
+    if not resGet.ok:
+        # Add the protocol
+        data = {
+            "protocol": {
+                "mapping_id": mapping_id
+            }
+        }
+        resPut = requests.put(federation_idp_protocols_uri,
+                              json=data,
+                              headers=headers)
+        if not resPut.ok:
+            log("ERROR IdP PUT %s, %s" %
+                (resPut.reason, federation_idp_protocols_uri),
+                level=WARNING)
+
+
+def configure_saml2():
+    '''
+    Configure SAML Provider.
+    '''
+    from keystone_context import SamlContext
+
+    configure_idp(config('shibsp-identity-provider'),
+                  config('shibsp-idp-remote-ids'),
+                  SAML2_MAPPING_FILE,
+                  config('saml2-mapping'),
+                  'saml2')
+
+    samlContext = SamlContext()
+    samlContext.configure()
+
+
+def configure_oidc():
+    '''
+    Configure OIDC Provider.
+    '''
+    configure_idp(config('oidc-identity-provider'),
+                  [config('oidc-idp-remote-id')],
+                  OIDC_MAPPING_FILE,
+                  config('oidc-mapping'),
+                  'oidc')
+
+
+@hooks.hook('upgrade-charm')
+@restart_on_change(restart_map(), stopstart=True)
+@synchronize_ca_if_changed()
+@harden()
+def upgrade_charm():
+    status_set('maintenance', 'Installing apt packages')
+    apt_install(filter_installed_packages(determine_packages()))
+    unison.ssh_authorized_peers(user=SSH_USER,
+                                group=SSH_USER,
+                                peer_interface='cluster',
+                                ensure_local_user=True)
+
+    ensure_ssl_dirs()
+
+    if run_in_apache():
+        disable_unused_apache_sites()
+
+    CONFIGS.write_all()
+
+    # See LP bug 1519035
+    leader_init_db_if_ready()
+
+    update_nrpe_config()
+
+    if is_elected_leader(CLUSTER_RES):
+        log('Cluster leader - ensuring endpoint configuration is up to '
+            'date', level=DEBUG)
+        update_all_identity_relation_units()
+
+
+@hooks.hook('update-status')
+@harden()
+def update_status():
+    log('Updating status.')
+
+
+@hooks.hook('nrpe-external-master-relation-joined',
+            'nrpe-external-master-relation-changed')
+def update_nrpe_config():
+    # python-dbus is used by check_upstart_job
+    apt_install('python-dbus')
+    hostname = nrpe.get_nagios_hostname()
+    current_unit = nrpe.get_nagios_unit_name()
+    nrpe_setup = nrpe.NRPE(hostname=hostname)
+    nrpe.copy_nrpe_checks()
+    _services = []
+    for service in services():
+        if service.startswith('snap.'):
+            service = service.split('.')[1]
+        _services.append(service)
+    nrpe.add_init_service_checks(nrpe_setup, _services, current_unit)
+    nrpe.add_haproxy_checks(nrpe_setup, current_unit)
+    nrpe_setup.write()
+
+
+def main():
+    try:
+        hooks.execute(sys.argv)
+    except UnregisteredHookError as e:
+        log('Unknown hook {} - skipping.'.format(e))
+    assess_status(CONFIGS)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/hooks/websso-trusted-dashboard-relation-broken b/hooks/websso-trusted-dashboard-relation-broken
deleted file mode 120000
index dd3b3eff4b7109293b4cfd9b81f5fc49643432a0..0000000000000000000000000000000000000000
--- a/hooks/websso-trusted-dashboard-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/websso-trusted-dashboard-relation-changed b/hooks/websso-trusted-dashboard-relation-changed
deleted file mode 120000
index dd3b3eff4b7109293b4cfd9b81f5fc49643432a0..0000000000000000000000000000000000000000
--- a/hooks/websso-trusted-dashboard-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/websso-trusted-dashboard-relation-departed b/hooks/websso-trusted-dashboard-relation-departed
deleted file mode 120000
index dd3b3eff4b7109293b4cfd9b81f5fc49643432a0..0000000000000000000000000000000000000000
--- a/hooks/websso-trusted-dashboard-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-keystone_hooks.py
\ No newline at end of file
diff --git a/hooks/websso-trusted-dashboard-relation-joined b/hooks/websso-trusted-dashboard-relation-joined
deleted file mode 120000
index dd3b3eff4b7109293b4cfd9b81f5fc49643432a0..0000000000000000000000000000000000000000
--- a/hooks/websso-trusted-dashboard-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-keystone_hooks.py
\ No newline at end of file
diff --git a/metadata.yaml b/metadata.yaml
index aad95f17986fe67f905910f3ee3805716167261b..431b688f85655318a51fac6cc8085bbec2a7affa 100644
--- a/metadata.yaml
+++ b/metadata.yaml
@@ -11,8 +11,8 @@ tags:
   - misc
 series:
   - xenial
-  - bionic
-  - cosmic
+  - artful
+  - zesty
   - trusty
 extra-bindings:
   public:
@@ -33,19 +33,14 @@ provides:
 requires:
   shared-db:
     interface: mysql-shared
+  pgsql-db:
+    interface: pgsql
   ha:
     interface: hacluster
     scope: container
   domain-backend:
     interface: keystone-domain-backend
     scope: container
-  keystone-fid-service-provider:
-    interface: keystone-fid-service-provider
-    scope: container
-  websso-trusted-dashboard:
-    interface: websso-trusted-dashboard
-  certificates:
-    interface: tls-certificates
 peers:
   cluster:
     interface: keystone-ha
diff --git a/templates/git/logging.conf b/templates/git/logging.conf
new file mode 100644
index 0000000000000000000000000000000000000000..7a538ae8f1ee5ebc27504b2a7719f5ac0f865119
--- /dev/null
+++ b/templates/git/logging.conf
@@ -0,0 +1,39 @@
+[loggers]
+keys=root
+
+[formatters]
+keys=normal,normal_with_name,debug
+
+[handlers]
+keys=production,file,devel
+
+[logger_root]
+level=WARNING
+handlers=file
+
+[handler_production]
+class=handlers.SysLogHandler
+level=ERROR
+formatter=normal_with_name
+args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
+
+[handler_file]
+class=FileHandler
+level=DEBUG
+formatter=normal_with_name
+args=('/var/log/keystone/keystone.log', 'a')
+
+[handler_devel]
+class=StreamHandler
+level=NOTSET
+formatter=debug
+args=(sys.stdout,)
+
+[formatter_normal]
+format=%(asctime)s %(levelname)s %(message)s
+
+[formatter_normal_with_name]
+format=(%(name)s): %(asctime)s %(levelname)s %(message)s
+
+[formatter_debug]
+format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
diff --git a/templates/ocata/keystone.conf b/templates/ocata/keystone.conf
index 666ac6c2057f492e3608a2732ff4d59538f4668a..a0306e5db122b072332330f3642283ac95b1f08e 100644
--- a/templates/ocata/keystone.conf
+++ b/templates/ocata/keystone.conf
@@ -67,7 +67,7 @@ driver = {{ assignment_backend }}
 [oauth1]
 
 [auth]
-methods = external,password,token,oauth1,mapped,openid,totp{% if enable_oidc %},oidc{% endif %}{% if enable_saml2 %},saml2{% endif %}
+methods = external,password,token,oauth1,mapped,openid,totp,application_credential{% if enable_oidc %},oidc{% endif %}{% if enable_saml2 %},saml2{% endif %}
 password = keystone.auth.plugins.password.Password
 token = keystone.auth.plugins.token.Token
 oauth1 = keystone.auth.plugins.oauth1.OAuth
@@ -78,21 +78,6 @@ oidc = keystone.auth.plugins.mapped.Mapped
 saml2 = keystone.auth.plugins.mapped.Mapped
 {% endif -%}
 
-[oidc]
-{% if enable_oidc -%}
-remote_id_attribute = HTTP_OIDC_ISS
-{% endif -%}
-
-[saml2]
-{% if enable_saml2 -%}
-remote_id_attribute = Shib-Identity-Provider
-{% endif -%}
-
-[federation]
-{% if trusted_dashboard -%}
-trusted_dashboard = {{ trusted_dashboard }}
-{% endif -%}
-
 [paste_deploy]
 config_file = {{ paste_config_file }}
 
diff --git a/templates/parts/section-federation b/templates/parts/section-federation
index 65ee99edb6513be50507a625d99de050bbc4775b..99a48bfbd8de977fd8e960933a04c18370751de9 100644
--- a/templates/parts/section-federation
+++ b/templates/parts/section-federation
@@ -1,10 +1,14 @@
-{% if trusted_dashboards %}
+{% if trusted_dashboard %}
 [federation]
-{% for dashboard_url in trusted_dashboards -%}
-trusted_dashboard = {{ dashboard_url }}
-{% endfor -%}
+trusted_dashboard = {{ trusted_dashboard }}
 {% endif %}
-{% for sp in fid_sps -%}
-[{{ sp['protocol-name'] }}]
-remote_id_attribute = {{ sp['remote-id-attribute'] }}
-{% endfor -%}
+
+{% if enable_oidc -%}
+[oidc]
+remote_id_attribute = HTTP_OIDC_ISS
+{% endif -%}
+
+{% if enable_saml2 -%}
+[saml2]
+remote_id_attribute = Shib-Identity-Provider
+{% endif -%}
diff --git a/templates/queens/keystone.conf b/templates/queens/keystone.conf
index ff9868e4a76b46f85d8ecf788b8d1ef1dcdb4555..a0306e5db122b072332330f3642283ac95b1f08e 100644
--- a/templates/queens/keystone.conf
+++ b/templates/queens/keystone.conf
@@ -78,21 +78,6 @@ oidc = keystone.auth.plugins.mapped.Mapped
 saml2 = keystone.auth.plugins.mapped.Mapped
 {% endif -%}
 
-[oidc]
-{% if enable_oidc -%}
-remote_id_attribute = HTTP_OIDC_ISS
-{% endif -%}
-
-[saml2]
-{% if enable_saml2 -%}
-remote_id_attribute = Shib-Identity-Provider
-{% endif -%}
-
-[federation]
-{% if trusted_dashboard -%}
-trusted_dashboard = {{ trusted_dashboard }}
-{% endif -%}
-
 [paste_deploy]
 config_file = {{ paste_config_file }}
 
diff --git a/templates/wsgi-openstack-api.conf b/templates/wsgi-openstack-api.conf
deleted file mode 100644
index 942e2b29d7ceb9a1edba938098d5839f627c2b6e..0000000000000000000000000000000000000000
--- a/templates/wsgi-openstack-api.conf
+++ /dev/null
@@ -1,94 +0,0 @@
-# Configuration file maintained by Juju. Local changes may be overwritten.
-
-{% if port -%}
-Listen {{ port }}
-{% endif -%}
-
-{% if admin_port -%}
-Listen {{ admin_port }}
-{% endif -%}
-
-{% if public_port -%}
-Listen {{ public_port }}
-{% endif -%}
-
-{% if port -%}
-<VirtualHost *:{{ port }}>
-    WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-                      display-name=%{GROUP}
-    WSGIProcessGroup {{ service_name }}
-    WSGIScriptAlias / {{ script }}
-    WSGIApplicationGroup %{GLOBAL}
-    WSGIPassAuthorization On
-    <IfVersion >= 2.4>
-      ErrorLogFormat "%{cu}t %M"
-    </IfVersion>
-    ErrorLog /var/log/apache2/{{ service_name }}_error.log
-    CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
-    <Directory /usr/bin>
-        <IfVersion >= 2.4>
-            Require all granted
-        </IfVersion>
-        <IfVersion < 2.4>
-            Order allow,deny
-            Allow from all
-        </IfVersion>
-    </Directory>
-    IncludeOptional /etc/apache2/mellon*/sp-location*.conf
-</VirtualHost>
-{% endif -%}
-
-{% if admin_port -%}
-<VirtualHost *:{{ admin_port }}>
-    WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-                      display-name=%{GROUP}
-    WSGIProcessGroup {{ service_name }}-admin
-    WSGIScriptAlias / {{ admin_script }}
-    WSGIApplicationGroup %{GLOBAL}
-    WSGIPassAuthorization On
-    <IfVersion >= 2.4>
-      ErrorLogFormat "%{cu}t %M"
-    </IfVersion>
-    ErrorLog /var/log/apache2/{{ service_name }}_error.log
-    CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
-    <Directory /usr/bin>
-        <IfVersion >= 2.4>
-            Require all granted
-        </IfVersion>
-        <IfVersion < 2.4>
-            Order allow,deny
-            Allow from all
-        </IfVersion>
-    </Directory>
-    IncludeOptional /etc/apache2/mellon*/sp-location*.conf
-</VirtualHost>
-{% endif -%}
-
-{% if public_port -%}
-<VirtualHost *:{{ public_port }}>
-    WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-                      display-name=%{GROUP}
-    WSGIProcessGroup {{ service_name }}-public
-    WSGIScriptAlias / {{ public_script }}
-    WSGIApplicationGroup %{GLOBAL}
-    WSGIPassAuthorization On
-    <IfVersion >= 2.4>
-      ErrorLogFormat "%{cu}t %M"
-    </IfVersion>
-    ErrorLog /var/log/apache2/{{ service_name }}_error.log
-    CustomLog /var/log/apache2/{{ service_name }}_access.log combined
-
-    <Directory /usr/bin>
-        <IfVersion >= 2.4>
-            Require all granted
-        </IfVersion>
-        <IfVersion < 2.4>
-            Order allow,deny
-            Allow from all
-        </IfVersion>
-    </Directory>
-    IncludeOptional /etc/apache2/mellon*/sp-location*.conf
-</VirtualHost>
-{% endif -%}
diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py
index 70b114411879dce614255a9024954b40198ca964..7216dafb75f6974fc80fe20576c44ddf82c2506e 100644
--- a/tests/basic_deployment.py
+++ b/tests/basic_deployment.py
@@ -21,6 +21,7 @@ Basic keystone amulet functional tests.
 import amulet
 import json
 import os
+import yaml
 
 from charmhelpers.contrib.openstack.amulet.deployment import (
     OpenStackAmuletDeployment
@@ -48,8 +49,9 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
         """Deploy the entire test environment."""
         super(KeystoneBasicDeployment, self).__init__(series, openstack,
                                                       source, stable)
-
-        self._initialize_deployment_differences()
+        self.keystone_num_units = 3
+        self.keystone_api_version = 2
+        self.git = git
 
         self._setup_test_object(snap_source)
         self._add_services()
@@ -63,11 +65,6 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
 
         self.d.sentry.wait()
         self._initialize_tests()
-        self._initialize_test_differences()
-
-    def _initialize_deployment_differences(self):
-        self.keystone_num_units = 3
-        self.keystone_api_version = 2
 
     def _setup_test_object(self, snap_source):
         self.snap_source = snap_source
@@ -145,6 +142,33 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
             'preferred-api-version': self.keystone_api_version,
         })
 
+        if self.git:
+            amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY')
+
+            reqs_repo = 'git://github.com/openstack/requirements'
+            keystone_repo = 'git://github.com/openstack/keystone'
+            if self._get_openstack_release() == self.trusty_icehouse:
+                reqs_repo = 'git://github.com/coreycb/requirements'
+                keystone_repo = 'git://github.com/coreycb/keystone'
+
+            branch = 'stable/' + self._get_openstack_release_string()
+
+            openstack_origin_git = {
+                'repositories': [
+                    {'name': 'requirements',
+                     'repository': reqs_repo,
+                     'branch': branch},
+                    {'name': 'keystone',
+                     'repository': keystone_repo,
+                     'branch': branch},
+                ],
+                'directory': '/mnt/openstack-git',
+                'http_proxy': amulet_http_proxy,
+                'https_proxy': amulet_http_proxy,
+            }
+            self.keystone_config['openstack-origin-git'] = \
+                yaml.dump(openstack_origin_git)
+
         pxc_config = {
             'dataset-size': '25%',
             'max-connections': 1000,
@@ -186,23 +210,22 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
 
     def set_api_version(self, api_version):
         # Avoid costly settings if we are already at the correct api_version
-        if self.api_change_required(api_version):
-            u.log.debug('Setting preferred-api-version={}'.format(api_version))
-            se_rels = []
-            for i in range(0, self.keystone_num_units):
-                se_rels.append(
-                    (self.keystone_sentries[i], 'cinder:identity-service'),
-                )
-            # Make config change, wait for propagation
-            u.keystone_configure_api_version(se_rels, self, api_version)
+        if not self.api_change_required(api_version):
+            return True
+        u.log.debug('Setting preferred-api-version={}'.format(api_version))
+        se_rels = []
+        for i in range(0, self.keystone_num_units):
+            se_rels.append(
+                (self.keystone_sentries[i], 'cinder:identity-service'),
+            )
+        # Make config change, wait for propagation
+        u.keystone_configure_api_version(se_rels, self, api_version)
 
-        # Store in self.keystone_client
+        # Success if we get here, get and store client.
         if api_version == 2:
             self.keystone_v2 = self.get_keystone_client(api_version=2)
-            self.keystone_client = self.keystone_v2
         else:
             self.keystone_v3 = self.get_keystone_client(api_version=3)
-            self.keystone_client = self.keystone_v3
         self.keystone_api_version = api_version
 
     def get_keystone_client(self, api_version=None, keystone_ip=None):
@@ -227,42 +250,41 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
         self.demo_tenant = 'demoTenant'
         self.demo_role = 'demoRole'
         self.demo_user = 'demoUser'
-        if not u.tenant_exists(self.keystone_client, self.demo_tenant):
-            tenant = self.keystone_client.tenants.create(
+        if not u.tenant_exists(self.keystone_v2, self.demo_tenant):
+            tenant = self.keystone_v2.tenants.create(
                 tenant_name=self.demo_tenant,
                 description='demo tenant',
                 enabled=True)
-            self.keystone_client.roles.create(name=self.demo_role)
-            self.keystone_client.users.create(name=self.demo_user,
-                                              password='password',
-                                              tenant_id=tenant.id,
-                                              email='demo@demo.com')
+            self.keystone_v2.roles.create(name=self.demo_role)
+            self.keystone_v2.users.create(name=self.demo_user,
+                                          password='password',
+                                          tenant_id=tenant.id,
+                                          email='demo@demo.com')
 
         # Authenticate keystone demo
         self.keystone_demo = u.authenticate_keystone_user(
-            self.keystone_client, user=self.demo_user,
+            self.keystone_v2, user=self.demo_user,
             password='password', tenant=self.demo_tenant)
 
     def create_users_v3(self):
         # Create a demo tenant/role/user
         self.demo_project = 'demoProject'
         self.demo_user_v3 = 'demoUserV3'
-        self.demo_role = 'demoRoleV3'
         self.demo_domain_admin = 'demoDomainAdminV3'
         self.demo_domain = 'demoDomain'
         try:
-            domain = self.keystone_client.domains.find(name=self.demo_domain)
+            domain = self.keystone_v3.domains.find(name=self.demo_domain)
         except keystoneclient.exceptions.NotFound:
-            domain = self.keystone_client.domains.create(
+            domain = self.keystone_v3.domains.create(
                 self.demo_domain,
                 description='Demo Domain',
                 enabled=True
             )
 
         try:
-            self.keystone_client.projects.find(name=self.demo_project)
+            self.keystone_v3.projects.find(name=self.demo_project)
         except keystoneclient.exceptions.NotFound:
-            self.keystone_client.projects.create(
+            self.keystone_v3.projects.create(
                 self.demo_project,
                 domain,
                 description='Demo Project',
@@ -270,14 +292,14 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
             )
 
         try:
-            self.keystone_client.roles.find(name=self.demo_role)
+            self.keystone_v3.roles.find(name=self.demo_role)
         except keystoneclient.exceptions.NotFound:
-            self.keystone_client.roles.create(name=self.demo_role)
+            self.keystone_v3.roles.create(name=self.demo_role)
 
-        if not self.find_keystone_v3_user(self.keystone_client,
+        if not self.find_keystone_v3_user(self.keystone_v3,
                                           self.demo_user_v3,
                                           self.demo_domain):
-            self.keystone_client.users.create(
+            self.keystone_v3.users.create(
                 self.demo_user_v3,
                 domain=domain.id,
                 project=self.demo_project,
@@ -287,14 +309,14 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
                 enabled=True)
 
         try:
-            self.keystone_client.roles.find(name='Admin')
+            self.keystone_v3.roles.find(name='Admin')
         except keystoneclient.exceptions.NotFound:
-            self.keystone_client.roles.create(name='Admin')
+            self.keystone_v3.roles.create(name='Admin')
 
-        if not self.find_keystone_v3_user(self.keystone_client,
+        if not self.find_keystone_v3_user(self.keystone_v3,
                                           self.demo_domain_admin,
                                           self.demo_domain):
-            user = self.keystone_client.users.create(
+            user = self.keystone_v3.users.create(
                 self.demo_domain_admin,
                 domain=domain.id,
                 project=self.demo_project,
@@ -303,10 +325,10 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
                 description='Demo Admin',
                 enabled=True)
 
-            role = self.keystone_client.roles.find(name='Admin')
-            u.log.debug("self.keystone_client.roles.grant('{}', user='{}', "
+            role = self.keystone_v3.roles.find(name='Admin')
+            u.log.debug("self.keystone_v3.roles.grant('{}', user='{}', "
                         "domain='{}')".format(role.id, user.id, domain.id))
-            self.keystone_client.roles.grant(
+            self.keystone_v3.roles.grant(
                 role.id,
                 user=user.id,
                 domain=domain.id)
@@ -326,8 +348,6 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
         self.keystone_ip = self.keystone_sentries[0].relation(
             'shared-db',
             'percona-cluster:shared-db')['private-address']
-
-    def _initialize_test_differences(self):
         self.set_api_version(2)
         self.create_users_v2()
 
@@ -378,7 +398,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
 
     def test_102_keystone_tenants(self):
         self.set_api_version(2)
-        self.validate_keystone_tenants(self.keystone_client)
+        self.validate_keystone_tenants(self.keystone_v2)
 
     def validate_keystone_roles(self, client):
         """Verify all existing roles."""
@@ -397,7 +417,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
 
     def test_104_keystone_roles(self):
         self.set_api_version(2)
-        self.validate_keystone_roles(self.keystone_client)
+        self.validate_keystone_roles(self.keystone_v2)
 
     def validate_keystone_users(self, client):
         """Verify all existing roles."""
@@ -406,7 +426,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
         if self._get_openstack_release() < self.xenial_pike:
             cinder_user = 'cinder_cinderv2'
         else:
-            cinder_user = 'cinderv2_cinderv3'
+            cinder_user = 'cinderv3_cinderv2'
         base = [
             {'name': 'demoUser',
              'enabled': True,
@@ -453,7 +473,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
 
     def test_106_keystone_users(self):
         self.set_api_version(2)
-        self.validate_keystone_users(self.keystone_client)
+        self.validate_keystone_users(self.keystone_v2)
 
     def is_liberty_or_newer(self):
         # os_release = self._get_openstack_release_string()
@@ -478,15 +498,15 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
     def test_112_keystone_list_resources(self):
         if self.is_mitaka_or_newer():
             self.set_api_version(3)
-            self.validate_keystone_tenants(self.keystone_client)
-            self.validate_keystone_roles(self.keystone_client)
-            self.validate_keystone_users(self.keystone_client)
+            self.validate_keystone_tenants(self.keystone_v3)
+            self.validate_keystone_roles(self.keystone_v3)
+            self.validate_keystone_users(self.keystone_v3)
 
     def test_118_keystone_create_users(self):
         if self.is_mitaka_or_newer():
             self.set_api_version(3)
             self.create_users_v3()
-            actual_user = self.find_keystone_v3_user(self.keystone_client,
+            actual_user = self.find_keystone_v3_user(self.keystone_v3,
                                                      self.demo_user_v3,
                                                      self.demo_domain)
             assert actual_user is not None
@@ -507,7 +527,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
         if self.is_mitaka_or_newer():
             self.set_api_version(3)
             self.create_users_v3()
-            actual_domain = self.keystone_client.domains.find(
+            actual_domain = self.keystone_v3.domains.find(
                 name=self.demo_domain
             )
             expect = {
@@ -606,8 +626,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
     def test_140_keystone_endpoint(self):
         """Verify the keystone endpoint data."""
         u.log.debug('Checking keystone api endpoint data...')
-        self.set_api_version(2)
-        endpoints = self.keystone_client.endpoints.list()
+        endpoints = self.keystone_v2.endpoints.list()
         admin_port = '35357'
         internal_port = public_port = '5000'
         expected = {
@@ -627,7 +646,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
     def test_142_cinder_endpoint(self):
         """Verify the cinder endpoint data."""
         u.log.debug('Checking cinder endpoint...')
-        endpoints = self.keystone_client.endpoints.list()
+        endpoints = self.keystone_v2.endpoints.list()
         admin_port = internal_port = public_port = '8776'
         expected = {
             'id': u.not_null,
@@ -694,7 +713,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
             'service_host': u.valid_ip
         }
         if self._get_openstack_release() >= self.xenial_pike:
-            expected['service_username'] = 'cinderv2_cinderv3'
+            expected['service_username'] = 'cinderv3_cinderv2'
         for unit in self.keystone_sentries:
             ret = u.validate_relation_data(unit, relation, expected)
             if ret:
@@ -905,7 +924,6 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
                 amulet.raise_status(amulet.FAIL, msg=msg)
 
         self.d.configure(juju_service, set_default)
-        self._auto_wait_for_status(exclude_services=self.exclude_services)
 
         u.log.debug('OK')
 
@@ -958,11 +976,11 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
                 message="Unit is ready",
                 timeout=timeout,
                 include_only=['keystone'])
-            domain = self.keystone_client.domains.find(name='admin_domain')
-            v3_admin_user = self.keystone_client.users.list(domain=domain)[0]
+            domain = self.keystone_v3.domains.find(name='admin_domain')
+            v3_admin_user = self.keystone_v3.users.list(domain=domain)[0]
             u.log.debug(v3_admin_user)
-            self.keystone_client.users.update(user=v3_admin_user,
-                                              password='wrongpass')
+            self.keystone_v3.users.update(user=v3_admin_user,
+                                          password='wrongpass')
             u.log.debug('Removing keystone percona-cluster relation')
             self.d.unrelate('keystone:shared-db', 'percona-cluster:shared-db')
             self.d.sentry.wait(timeout=timeout)
@@ -987,179 +1005,3 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
                     amulet.FAIL,
                     msg="Admin user password not reset")
             u.log.debug('OK')
-
-
-class KeystoneV3Deployment(KeystoneBasicDeployment):
-    """Amulet tests on a basic keystone deployment."""
-
-    def _initialize_deployment_differences(self):
-        self.keystone_num_units = 3
-        self.keystone_api_version = 3
-
-    def _initialize_test_differences(self):
-        self.keystone_client = self.get_keystone_client(api_version=3)
-        self.create_users_v3()
-
-    def api_change_required(self, api_version):
-        u.log.warn('This is a Keystone V3 only deployment.')
-        return False
-
-    def set_api_version(self, api_version):
-        u.log.warn('This is a Keystone V3 only deployment. '
-                   'Ignoring request for api version 2')
-
-    def validate_keystone_tenants(self, client):
-        """Verify all existing tenants."""
-        u.log.debug('Checking keystone tenants...')
-        expected = [
-            {'name': 'services',
-             'enabled': True,
-             'description': 'Created by Juju',
-             'id': u.not_null},
-            {'name': 'demoProject',
-             'enabled': True,
-             'description': 'Demo Project',
-             'id': u.not_null},
-            {'name': 'admin',
-             'enabled': True,
-             'description': 'Created by Juju',
-             'id': u.not_null}
-        ]
-        actual = client.projects.list()
-
-        ret = u.validate_tenant_data(expected, actual)
-        if ret:
-            amulet.raise_status(amulet.FAIL, msg=ret)
-
-    def validate_keystone_roles(self, client):
-        """Verify all existing roles."""
-        u.log.debug('Checking keystone roles...')
-        expected = [
-            {'name': 'demoRoleV3',
-             'id': u.not_null},
-            {'name': 'Admin',
-             'id': u.not_null}
-        ]
-        actual = client.roles.list()
-
-        ret = u.validate_role_data(expected, actual)
-        if ret:
-            amulet.raise_status(amulet.FAIL, msg=ret)
-
-    def validate_keystone_users(self, client):
-        """Verify all existing roles."""
-        u.log.debug('Checking keystone users...')
-
-        if self._get_openstack_release() < self.xenial_pike:
-            cinder_user = 'cinder_cinderv2'
-        else:
-            cinder_user = 'cinderv2_cinderv3'
-        base = [
-            {'name': 'demoUserV3',
-             'enabled': True,
-             'id': u.not_null,
-             'email': 'demov3@demo.com'},
-            {'name': 'admin',
-             'enabled': True,
-             'id': u.not_null,
-             'email': 'juju@localhost'},
-            {'name': cinder_user,
-             'enabled': True,
-             'id': u.not_null,
-             'email': u'juju@localhost'}
-        ]
-        expected = []
-        for user_info in base:
-            user_info['default_project_id'] = u.not_null
-            expected.append(user_info)
-        # Ensure list is scoped to the default domain
-        # when checking v3 users (v2->v3 upgrade check)
-        actual = client.users.list(
-            domain=client.domains.find(name=self.DEFAULT_DOMAIN).id
-        )
-        actual += client.users.list(
-            domain=client.domains.find(name=self.demo_domain).id)
-        actual += client.users.list(
-            domain=client.domains.find(name='admin_domain').id)
-        ret = u.validate_user_data(expected, actual,
-                                   api_version=self.keystone_api_version)
-        if ret:
-            amulet.raise_status(amulet.FAIL, msg=ret)
-
-    def test_138_service_catalog(self):
-        """Verify that the service catalog endpoint data is valid."""
-        u.log.debug('Checking keystone service catalog...')
-        expected = {
-            u'identity': [{u'id': u.not_null,
-                           u'interface': u'admin',
-                           u'region': u'RegionOne',
-                           u'region_id': u'RegionOne',
-                           u'url': u.valid_url},
-                          {u'id': u.not_null,
-                           u'interface': u'public',
-                           u'region': u'RegionOne',
-                           u'region_id': u'RegionOne',
-                           u'url': u.valid_url},
-                          {u'id': u.not_null,
-                           u'interface': u'internal',
-                           u'region': u'RegionOne',
-                           u'region_id': u'RegionOne',
-                           u'url': u.valid_url}],
-
-            u'volumev2': [{u'id': u.not_null,
-                           u'interface': u'admin',
-                           u'region': u'RegionOne',
-                           u'region_id': u'RegionOne',
-                           u'url': u.valid_url},
-                          {u'id': u.not_null,
-                           u'interface': u'public',
-                           u'region': u'RegionOne',
-                           u'region_id': u'RegionOne',
-                           u'url': u.valid_url},
-                          {u'id': u.not_null,
-                           u'interface': u'internal',
-                           u'region': u'RegionOne',
-                           u'region_id': u'RegionOne',
-                           u'url': u.valid_url}]}
-
-        actual = self.keystone_client.service_catalog.get_endpoints()
-        ret = u.validate_v3_svc_catalog_endpoint_data(expected, actual)
-        if ret:
-            amulet.raise_status(amulet.FAIL, msg=ret)
-
-    def test_140_keystone_endpoint(self):
-        """Verify the keystone endpoint data."""
-        u.log.debug('Checking keystone api endpoint data...')
-        admin_port = '35357'
-        internal_port = public_port = '5000'
-        expected = {'id': u.not_null,
-                    'region': 'RegionOne',
-                    'region_id': 'RegionOne',
-                    'interface': u.not_null,
-                    'url': u.valid_url,
-                    'service_id': u.not_null}
-
-        endpoints = self.keystone_client.endpoints.list()
-        ret = u.validate_v3_endpoint_data(endpoints, admin_port, internal_port,
-                                          public_port, expected)
-        if ret:
-            amulet.raise_status(amulet.FAIL,
-                                msg='keystone endpoint: {}'.format(ret))
-
-    def test_142_cinder_endpoint(self):
-        """Verify the cinder endpoint data."""
-        u.log.debug('Checking cinder endpoint...')
-        admin_port = internal_port = public_port = '8776'
-        expected = {'id': u.not_null,
-                    'region': 'RegionOne',
-                    'region_id': 'RegionOne',
-                    'interface': u.not_null,
-                    'url': u.valid_url,
-                    'service_id': u.not_null}
-        endpoints = self.keystone_client.endpoints.list()
-        ret = u.validate_v3_endpoint_data(endpoints, admin_port, internal_port,
-                                          public_port, expected,
-                                          expected_num_eps=6)
-        if ret:
-            amulet.raise_status(amulet.FAIL,
-                                msg='cinder endpoint: {}'.format(ret))
diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py
index d21d01d8ffe242d686283b0ed977b88be6bfc74e..9c65518e1c4c6ff6f508ff7e046ce2b91f961f4c 100644
--- a/tests/charmhelpers/contrib/amulet/deployment.py
+++ b/tests/charmhelpers/contrib/amulet/deployment.py
@@ -50,8 +50,7 @@ class AmuletDeployment(object):
             this_service['units'] = 1
 
         self.d.add(this_service['name'], units=this_service['units'],
-                   constraints=this_service.get('constraints'),
-                   storage=this_service.get('storage'))
+                   constraints=this_service.get('constraints'))
 
         for svc in other_services:
             if 'location' in svc:
@@ -65,8 +64,7 @@ class AmuletDeployment(object):
                 svc['units'] = 1
 
             self.d.add(svc['name'], charm=branch_location, units=svc['units'],
-                       constraints=svc.get('constraints'),
-                       storage=svc.get('storage'))
+                       constraints=svc.get('constraints'))
 
     def _add_relations(self, relations):
         """Add all of the relations for the services."""
diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
index 1c96752a49fb36f389cd1ede38b31afb94127e42..5afbbd87c13e2b168b088c4da51b3b63ab4d07a2 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -21,9 +21,6 @@ from collections import OrderedDict
 from charmhelpers.contrib.amulet.deployment import (
     AmuletDeployment
 )
-from charmhelpers.contrib.openstack.amulet.utils import (
-    OPENSTACK_RELEASES_PAIRS
-)
 
 DEBUG = logging.DEBUG
 ERROR = logging.ERROR
@@ -274,8 +271,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
            release.
            """
         # Must be ordered by OpenStack release (not by Ubuntu release):
-        for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
-            setattr(self, os_pair, i)
+        (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
+         self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
+         self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
+         self.xenial_pike, self.artful_pike, self.xenial_queens,
+         self.bionic_queens,) = range(13)
 
         releases = {
             ('trusty', None): self.trusty_icehouse,
@@ -291,8 +291,6 @@ class OpenStackAmuletDeployment(AmuletDeployment):
             ('zesty', None): self.zesty_ocata,
             ('artful', None): self.artful_pike,
             ('bionic', None): self.bionic_queens,
-            ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
-            ('cosmic', None): self.cosmic_rocky,
         }
         return releases[(self.series, self.openstack)]
 
@@ -308,7 +306,6 @@ class OpenStackAmuletDeployment(AmuletDeployment):
             ('zesty', 'ocata'),
             ('artful', 'pike'),
             ('bionic', 'queens'),
-            ('cosmic', 'rocky'),
         ])
         if self.openstack:
             os_origin = self.openstack.split(':')[1]
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
index ef4ab54bc8d1a988f827d2b766c3d1f20f0238e1..b71b2b1910959f5dbe7860ff3d14d45b8e9e2d90 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py
@@ -40,7 +40,6 @@ import novaclient
 import pika
 import swiftclient
 
-from charmhelpers.core.decorators import retry_on_exception
 from charmhelpers.contrib.amulet.utils import (
     AmuletUtils
 )
@@ -51,13 +50,6 @@ ERROR = logging.ERROR
 
 NOVA_CLIENT_VERSION = "2"
 
-OPENSTACK_RELEASES_PAIRS = [
-    'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
-    'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
-    'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
-    'xenial_pike', 'artful_pike', 'xenial_queens',
-    'bionic_queens', 'bionic_rocky', 'cosmic_rocky']
-
 
 class OpenStackAmuletUtils(AmuletUtils):
     """OpenStack amulet utilities.
@@ -71,34 +63,7 @@ class OpenStackAmuletUtils(AmuletUtils):
         super(OpenStackAmuletUtils, self).__init__(log_level)
 
     def validate_endpoint_data(self, endpoints, admin_port, internal_port,
-                               public_port, expected, openstack_release=None):
-        """Validate endpoint data. Pick the correct validator based on
-           OpenStack release. Expected data should be in the v2 format:
-           {
-               'id': id,
-               'region': region,
-               'adminurl': adminurl,
-               'internalurl': internalurl,
-               'publicurl': publicurl,
-               'service_id': service_id}
-
-           """
-        validation_function = self.validate_v2_endpoint_data
-        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
-        if openstack_release and openstack_release >= xenial_queens:
-                validation_function = self.validate_v3_endpoint_data
-                expected = {
-                    'id': expected['id'],
-                    'region': expected['region'],
-                    'region_id': 'RegionOne',
-                    'url': self.valid_url,
-                    'interface': self.not_null,
-                    'service_id': expected['service_id']}
-        return validation_function(endpoints, admin_port, internal_port,
-                                   public_port, expected)
-
-    def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
-                                  public_port, expected):
+                               public_port, expected):
         """Validate endpoint data.
 
            Validate actual endpoint data vs expected endpoint data. The ports
@@ -127,7 +92,7 @@ class OpenStackAmuletUtils(AmuletUtils):
             return 'endpoint not found'
 
     def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
-                                  public_port, expected, expected_num_eps=3):
+                                  public_port, expected):
         """Validate keystone v3 endpoint data.
 
         Validate the v3 endpoint data which has changed from v2.  The
@@ -173,89 +138,10 @@ class OpenStackAmuletUtils(AmuletUtils):
                 if ret:
                     return 'unexpected endpoint data - {}'.format(ret)
 
-        if len(found) != expected_num_eps:
+        if len(found) != 3:
             return 'Unexpected number of endpoints found'
 
-    def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
-        """Convert v2 endpoint data into v3.
-
-           {
-               'service_name1': [
-                   {
-                       'adminURL': adminURL,
-                       'id': id,
-                       'region': region.
-                       'publicURL': publicURL,
-                       'internalURL': internalURL
-                   }],
-               'service_name2': [
-                   {
-                       'adminURL': adminURL,
-                       'id': id,
-                       'region': region.
-                       'publicURL': publicURL,
-                       'internalURL': internalURL
-                   }],
-           }
-          """
-        self.log.warn("Endpoint ID and Region ID validation is limited to not "
-                      "null checks after v2 to v3 conversion")
-        for svc in ep_data.keys():
-            assert len(ep_data[svc]) == 1, "Unknown data format"
-            svc_ep_data = ep_data[svc][0]
-            ep_data[svc] = [
-                {
-                    'url': svc_ep_data['adminURL'],
-                    'interface': 'admin',
-                    'region': svc_ep_data['region'],
-                    'region_id': self.not_null,
-                    'id': self.not_null},
-                {
-                    'url': svc_ep_data['publicURL'],
-                    'interface': 'public',
-                    'region': svc_ep_data['region'],
-                    'region_id': self.not_null,
-                    'id': self.not_null},
-                {
-                    'url': svc_ep_data['internalURL'],
-                    'interface': 'internal',
-                    'region': svc_ep_data['region'],
-                    'region_id': self.not_null,
-                    'id': self.not_null}]
-        return ep_data
-
-    def validate_svc_catalog_endpoint_data(self, expected, actual,
-                                           openstack_release=None):
-        """Validate service catalog endpoint data. Pick the correct validator
-           for the OpenStack version. Expected data should be in the v2 format:
-           {
-               'service_name1': [
-                   {
-                       'adminURL': adminURL,
-                       'id': id,
-                       'region': region.
-                       'publicURL': publicURL,
-                       'internalURL': internalURL
-                   }],
-               'service_name2': [
-                   {
-                       'adminURL': adminURL,
-                       'id': id,
-                       'region': region.
-                       'publicURL': publicURL,
-                       'internalURL': internalURL
-                   }],
-           }
-
-           """
-        validation_function = self.validate_v2_svc_catalog_endpoint_data
-        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
-        if openstack_release and openstack_release >= xenial_queens:
-            validation_function = self.validate_v3_svc_catalog_endpoint_data
-            expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
-        return validation_function(expected, actual)
-
-    def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
+    def validate_svc_catalog_endpoint_data(self, expected, actual):
         """Validate service catalog endpoint data.
 
            Validate a list of actual service catalog endpoints vs a list of
@@ -424,7 +310,6 @@ class OpenStackAmuletUtils(AmuletUtils):
         self.log.debug('Checking if tenant exists ({})...'.format(tenant))
         return tenant in [t.name for t in keystone.tenants.list()]
 
-    @retry_on_exception(num_retries=5, base_delay=1)
     def keystone_wait_for_propagation(self, sentry_relation_pairs,
                                       api_version):
         """Iterate over list of sentry and relation tuples and verify that
@@ -443,7 +328,7 @@ class OpenStackAmuletUtils(AmuletUtils):
             if rel.get('api_version') != str(api_version):
                 raise Exception("api_version not propagated through relation"
                                 " data yet ('{}' != '{}')."
-                                "".format(rel.get('api_version'), api_version))
+                                "".format(rel['api_version'], api_version))
 
     def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
                                        api_version):
@@ -465,13 +350,16 @@ class OpenStackAmuletUtils(AmuletUtils):
         deployment._auto_wait_for_status()
         self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
 
-    def authenticate_cinder_admin(self, keystone, api_version=2):
+    def authenticate_cinder_admin(self, keystone_sentry, username,
+                                  password, tenant, api_version=2):
         """Authenticates admin user with cinder."""
-        self.log.debug('Authenticating cinder admin...')
+        # NOTE(beisner): cinder python client doesn't accept tokens.
+        keystone_ip = keystone_sentry.info['public-address']
+        ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
         _clients = {
             1: cinder_client.Client,
             2: cinder_clientv2.Client}
-        return _clients[api_version](session=keystone.session)
+        return _clients[api_version](username, password, tenant, ept)
 
     def authenticate_keystone(self, keystone_ip, username, password,
                               api_version=False, admin_port=False,
@@ -479,36 +367,13 @@ class OpenStackAmuletUtils(AmuletUtils):
                               project_domain_name=None, project_name=None):
         """Authenticate with Keystone"""
         self.log.debug('Authenticating with keystone...')
-        if not api_version:
-            api_version = 2
-        sess, auth = self.get_keystone_session(
-            keystone_ip=keystone_ip,
-            username=username,
-            password=password,
-            api_version=api_version,
-            admin_port=admin_port,
-            user_domain_name=user_domain_name,
-            domain_name=domain_name,
-            project_domain_name=project_domain_name,
-            project_name=project_name
-        )
-        if api_version == 2:
-            client = keystone_client.Client(session=sess)
-        else:
-            client = keystone_client_v3.Client(session=sess)
-        # This populates the client.service_catalog
-        client.auth_ref = auth.get_access(sess)
-        return client
-
-    def get_keystone_session(self, keystone_ip, username, password,
-                             api_version=False, admin_port=False,
-                             user_domain_name=None, domain_name=None,
-                             project_domain_name=None, project_name=None):
-        """Return a keystone session object"""
-        ep = self.get_keystone_endpoint(keystone_ip,
-                                        api_version=api_version,
-                                        admin_port=admin_port)
-        if api_version == 2:
+        port = 5000
+        if admin_port:
+            port = 35357
+        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
+                                        port)
+        if not api_version or api_version == 2:
+            ep = base_ep + "/v2.0"
             auth = v2.Password(
                 username=username,
                 password=password,
@@ -516,7 +381,12 @@ class OpenStackAmuletUtils(AmuletUtils):
                 auth_url=ep
             )
             sess = keystone_session.Session(auth=auth)
+            client = keystone_client.Client(session=sess)
+            # This populates the client.service_catalog
+            client.auth_ref = auth.get_access(sess)
+            return client
         else:
+            ep = base_ep + "/v3"
             auth = v3.Password(
                 user_domain_name=user_domain_name,
                 username=username,
@@ -527,57 +397,10 @@ class OpenStackAmuletUtils(AmuletUtils):
                 auth_url=ep
             )
             sess = keystone_session.Session(auth=auth)
-        return (sess, auth)
-
-    def get_keystone_endpoint(self, keystone_ip, api_version=None,
-                              admin_port=False):
-        """Return keystone endpoint"""
-        port = 5000
-        if admin_port:
-            port = 35357
-        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
-                                        port)
-        if api_version == 2:
-            ep = base_ep + "/v2.0"
-        else:
-            ep = base_ep + "/v3"
-        return ep
-
-    def get_default_keystone_session(self, keystone_sentry,
-                                     openstack_release=None, api_version=2):
-        """Return a keystone session object and client object assuming standard
-           default settings
-
-           Example call in amulet tests:
-               self.keystone_session, self.keystone = u.get_default_keystone_session(
-                   self.keystone_sentry,
-                   openstack_release=self._get_openstack_release())
-
-           The session can then be used to auth other clients:
-               neutronclient.Client(session=session)
-               aodh_client.Client(session=session)
-               eyc
-        """
-        self.log.debug('Authenticating keystone admin...')
-        # 11 => xenial_queens
-        if api_version == 3 or (openstack_release and openstack_release >= 11):
-            client_class = keystone_client_v3.Client
-            api_version = 3
-        else:
-            client_class = keystone_client.Client
-        keystone_ip = keystone_sentry.info['public-address']
-        session, auth = self.get_keystone_session(
-            keystone_ip,
-            api_version=api_version,
-            username='admin',
-            password='openstack',
-            project_name='admin',
-            user_domain_name='admin_domain',
-            project_domain_name='admin_domain')
-        client = client_class(session=session)
-        # This populates the client.service_catalog
-        client.auth_ref = auth.get_access(session)
-        return session, client
+            client = keystone_client_v3.Client(session=sess)
+            # This populates the client.service_catalog
+            client.auth_ref = auth.get_access(sess)
+            return client
 
     def authenticate_keystone_admin(self, keystone_sentry, user, password,
                                     tenant=None, api_version=None,
@@ -1035,12 +858,9 @@ class OpenStackAmuletUtils(AmuletUtils):
         :returns: List of pool name, object count, kb disk space used
         """
         df = self.get_ceph_df(sentry_unit)
-        for pool in df['pools']:
-            if pool['id'] == pool_id:
-                pool_name = pool['name']
-                obj_count = pool['stats']['objects']
-                kb_used = pool['stats']['kb_used']
-
+        pool_name = df['pools'][pool_id]['name']
+        obj_count = df['pools'][pool_id]['stats']['objects']
+        kb_used = df['pools'][pool_id]['stats']['kb_used']
         self.log.debug('Ceph {} pool (ID {}): {} objects, '
                        '{} kb used'.format(pool_name, pool_id,
                                            obj_count, kb_used))
diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py
index ed7af39e36fa0b921d42edb94cff997bf01135d1..5a88f798e89546ad9128cb7d4a1cb8bd6e69a644 100644
--- a/tests/charmhelpers/core/hookenv.py
+++ b/tests/charmhelpers/core/hookenv.py
@@ -27,7 +27,6 @@ import glob
 import os
 import json
 import yaml
-import re
 import subprocess
 import sys
 import errno
@@ -40,7 +39,6 @@ if not six.PY3:
 else:
     from collections import UserDict
 
-
 CRITICAL = "CRITICAL"
 ERROR = "ERROR"
 WARNING = "WARNING"
@@ -68,7 +66,7 @@ def cached(func):
     @wraps(func)
     def wrapper(*args, **kwargs):
         global cache
-        key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
+        key = str((func, args, kwargs))
         try:
             return cache[key]
         except KeyError:
@@ -290,7 +288,7 @@ class Config(dict):
         self.implicit_save = True
         self._prev_dict = None
         self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
-        if os.path.exists(self.path) and os.stat(self.path).st_size:
+        if os.path.exists(self.path):
             self.load_previous()
         atexit(self._implicit_save)
 
@@ -310,11 +308,7 @@ class Config(dict):
         """
         self.path = path or self.path
         with open(self.path) as f:
-            try:
-                self._prev_dict = json.load(f)
-            except ValueError as e:
-                log('Unable to parse previous config data - {}'.format(str(e)),
-                    level=ERROR)
+            self._prev_dict = json.load(f)
         for k, v in copy.deepcopy(self._prev_dict).items():
             if k not in self:
                 self[k] = v
@@ -350,7 +344,6 @@ class Config(dict):
 
         """
         with open(self.path, 'w') as f:
-            os.fchmod(f.fileno(), 0o600)
             json.dump(self, f)
 
     def _implicit_save(self):
@@ -358,40 +351,22 @@ class Config(dict):
             self.save()
 
 
-_cache_config = None
-
-
+@cached
 def config(scope=None):
-    """
-    Get the juju charm configuration (scope==None) or individual key,
-    (scope=str).  The returned value is a Python data structure loaded as
-    JSON from the Juju config command.
-
-    :param scope: If set, return the value for the specified key.
-    :type scope: Optional[str]
-    :returns: Either the whole config as a Config, or a key from it.
-    :rtype: Any
-    """
-    global _cache_config
-    config_cmd_line = ['config-get', '--all', '--format=json']
-    try:
-        # JSON Decode Exception for Python3.5+
-        exc_json = json.decoder.JSONDecodeError
-    except AttributeError:
-        # JSON Decode Exception for Python2.7 through Python3.4
-        exc_json = ValueError
+    """Juju charm configuration"""
+    config_cmd_line = ['config-get']
+    if scope is not None:
+        config_cmd_line.append(scope)
+    else:
+        config_cmd_line.append('--all')
+    config_cmd_line.append('--format=json')
     try:
-        if _cache_config is None:
-            config_data = json.loads(
-                subprocess.check_output(config_cmd_line).decode('UTF-8'))
-            _cache_config = Config(config_data)
+        config_data = json.loads(
+            subprocess.check_output(config_cmd_line).decode('UTF-8'))
         if scope is not None:
-            return _cache_config.get(scope)
-        return _cache_config
-    except (exc_json, UnicodeDecodeError) as e:
-        log('Unable to parse output from config-get: config_cmd_line="{}" '
-            'message="{}"'
-            .format(config_cmd_line, str(e)), level=ERROR)
+            return config_data
+        return Config(config_data)
+    except ValueError:
         return None
 
 
@@ -843,10 +818,6 @@ class Hooks(object):
         return wrapper
 
 
-class NoNetworkBinding(Exception):
-    pass
-
-
 def charm_dir():
     """Return the root directory of the current charm"""
     d = os.environ.get('JUJU_CHARM_DIR')
@@ -972,13 +943,6 @@ def application_version_set(version):
         log("Application Version: {}".format(version))
 
 
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def goal_state():
-    """Juju goal state values"""
-    cmd = ['goal-state', '--format=json']
-    return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def is_leader():
     """Does the current unit hold the juju leadership
@@ -1073,6 +1037,7 @@ def juju_version():
                                    universal_newlines=True).strip()
 
 
+@cached
 def has_juju_version(minimum_version):
     """Return True if the Juju version is at least the provided version"""
     return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@@ -1132,8 +1097,6 @@ def _run_atexit():
 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def network_get_primary_address(binding):
     '''
-    Deprecated since Juju 2.3; use network_get()
-
     Retrieve the primary network address for a named binding
 
     :param binding: string. The name of a relation of extra-binding
@@ -1141,19 +1104,10 @@ def network_get_primary_address(binding):
     :raise: NotImplementedError if run on Juju < 2.0
     '''
     cmd = ['network-get', '--primary-address', binding]
-    try:
-        response = subprocess.check_output(
-            cmd,
-            stderr=subprocess.STDOUT).decode('UTF-8').strip()
-    except CalledProcessError as e:
-        if 'no network config found for binding' in e.output.decode('UTF-8'):
-            raise NoNetworkBinding("No network binding for {}"
-                                   .format(binding))
-        else:
-            raise
-    return response
+    return subprocess.check_output(cmd).decode('UTF-8').strip()
 
 
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
 def network_get(endpoint, relation_id=None):
     """
     Retrieve the network details for a relation endpoint
@@ -1161,20 +1115,24 @@ def network_get(endpoint, relation_id=None):
     :param endpoint: string. The name of a relation endpoint
     :param relation_id: int. The ID of the relation for the current context.
     :return: dict. The loaded YAML output of the network-get query.
-    :raise: NotImplementedError if request not supported by the Juju version.
+    :raise: NotImplementedError if run on Juju < 2.1
     """
-    if not has_juju_version('2.2'):
-        raise NotImplementedError(juju_version())  # earlier versions require --primary-address
-    if relation_id and not has_juju_version('2.3'):
-        raise NotImplementedError  # 2.3 added the -r option
-
     cmd = ['network-get', endpoint, '--format', 'yaml']
     if relation_id:
         cmd.append('-r')
         cmd.append(relation_id)
-    response = subprocess.check_output(
-        cmd,
-        stderr=subprocess.STDOUT).decode('UTF-8').strip()
+    try:
+        response = subprocess.check_output(
+            cmd,
+            stderr=subprocess.STDOUT).decode('UTF-8').strip()
+    except CalledProcessError as e:
+        # Early versions of Juju 2.0.x required the --primary-address argument.
+        # We catch that condition here and raise NotImplementedError since
+        # the requested semantics are not available - the caller can then
+        # use the network_get_primary_address() method instead.
+        if '--primary-address is currently required' in e.output.decode('UTF-8'):
+            raise NotImplementedError
+        raise
     return yaml.safe_load(response)
 
 
@@ -1230,23 +1188,9 @@ def iter_units_for_relation_name(relation_name):
 
 def ingress_address(rid=None, unit=None):
     """
-    Retrieve the ingress-address from a relation when available.
-    Otherwise, return the private-address.
-
-    When used on the consuming side of the relation (unit is a remote
-    unit), the ingress-address is the IP address that this unit needs
-    to use to reach the provided service on the remote unit.
-
-    When used on the providing side of the relation (unit == local_unit()),
-    the ingress-address is the IP address that is advertised to remote
-    units on this relation. Remote units need to use this address to
-    reach the local provided service on this unit.
-
-    Note that charms may document some other method to use in
-    preference to the ingress_address(), such as an address provided
-    on a different relation attribute or a service discovery mechanism.
-    This allows charms to redirect inbound connections to their peers
-    or different applications such as load balancers.
+    Retrieve the ingress-address from a relation when available. Otherwise,
+    return the private-address. This function is to be used on the consuming
+    side of the relation.
 
     Usage:
     addresses = [ingress_address(rid=u.rid, unit=u.unit)
@@ -1260,40 +1204,3 @@ def ingress_address(rid=None, unit=None):
     settings = relation_get(rid=rid, unit=unit)
     return (settings.get('ingress-address') or
             settings.get('private-address'))
-
-
-def egress_subnets(rid=None, unit=None):
-    """
-    Retrieve the egress-subnets from a relation.
-
-    This function is to be used on the providing side of the
-    relation, and provides the ranges of addresses that client
-    connections may come from. The result is uninteresting on
-    the consuming side of a relation (unit == local_unit()).
-
-    Returns a stable list of subnets in CIDR format.
-    eg. ['192.168.1.0/24', '2001::F00F/128']
-
-    If egress-subnets is not available, falls back to using the published
-    ingress-address, or finally private-address.
-
-    :param rid: string relation id
-    :param unit: string unit name
-    :side effect: calls relation_get
-    :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
-    """
-    def _to_range(addr):
-        if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
-            addr += '/32'
-        elif ':' in addr and '/' not in addr:  # IPv6
-            addr += '/128'
-        return addr
-
-    settings = relation_get(rid=rid, unit=unit)
-    if 'egress-subnets' in settings:
-        return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
-    if 'ingress-address' in settings:
-        return [_to_range(settings['ingress-address'])]
-    if 'private-address' in settings:
-        return [_to_range(settings['private-address'])]
-    return []  # Should never happen
diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py
index 322ab2acd71bb02f13d2d739e74d0ddc62774d9e..5cc5c86b701fc5375f387eb01a0d2b76c184c263 100644
--- a/tests/charmhelpers/core/host.py
+++ b/tests/charmhelpers/core/host.py
@@ -549,8 +549,6 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
         with open(path, 'wb') as target:
             os.fchown(target.fileno(), uid, gid)
             os.fchmod(target.fileno(), perms)
-            if six.PY3 and isinstance(content, six.string_types):
-                content = content.encode('UTF-8')
             target.write(content)
         return
     # the contents were the same, but we might still need to change the
@@ -993,7 +991,7 @@ def updatedb(updatedb_text, new_path):
     return output
 
 
-def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
+def modulo_distribution(modulo=3, wait=30):
     """ Modulo distribution
 
     This helper uses the unit number, a modulo value and a constant wait time
@@ -1015,14 +1013,7 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
 
     @param modulo: int The modulo number creates the group distribution
     @param wait: int The constant time wait value
-    @param non_zero_wait: boolean Override unit % modulo == 0,
-                          return modulo * wait. Used to avoid collisions with
-                          leader nodes which are often given priority.
     @return: int Calculated time to wait for unit operation
     """
     unit_number = int(local_unit().split('/')[1])
-    calculated_wait_time = (unit_number % modulo) * wait
-    if non_zero_wait and calculated_wait_time == 0:
-        return modulo * wait
-    else:
-        return calculated_wait_time
+    return (unit_number % modulo) * wait
diff --git a/tests/charmhelpers/core/host_factory/ubuntu.py b/tests/charmhelpers/core/host_factory/ubuntu.py
index 99451b59789a822b4f5a96d7310965f1c8921898..d8dc378a5dad29c271a89289e4b815e2c2c99060 100644
--- a/tests/charmhelpers/core/host_factory/ubuntu.py
+++ b/tests/charmhelpers/core/host_factory/ubuntu.py
@@ -20,7 +20,6 @@ UBUNTU_RELEASES = (
     'yakkety',
     'zesty',
     'artful',
-    'bionic',
 )
 
 
diff --git a/tests/charmhelpers/core/services/base.py b/tests/charmhelpers/core/services/base.py
index 179ad4f0c367dd6b13c10b201c3752d1c8daf05e..ca9dc996bd7d7fc2a18b7d9a9ee51adff171bda9 100644
--- a/tests/charmhelpers/core/services/base.py
+++ b/tests/charmhelpers/core/services/base.py
@@ -307,34 +307,23 @@ class PortManagerCallback(ManagerCallback):
     """
     def __call__(self, manager, service_name, event_name):
         service = manager.get_service(service_name)
-        # turn this generator into a list,
-        # as we'll be going over it multiple times
-        new_ports = list(service.get('ports', []))
+        new_ports = service.get('ports', [])
         port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
         if os.path.exists(port_file):
             with open(port_file) as fp:
                 old_ports = fp.read().split(',')
             for old_port in old_ports:
-                if bool(old_port) and not self.ports_contains(old_port, new_ports):
-                    hookenv.close_port(old_port)
+                if bool(old_port):
+                    old_port = int(old_port)
+                    if old_port not in new_ports:
+                        hookenv.close_port(old_port)
         with open(port_file, 'w') as fp:
             fp.write(','.join(str(port) for port in new_ports))
         for port in new_ports:
-            # A port is either a number or 'ICMP'
-            protocol = 'TCP'
-            if str(port).upper() == 'ICMP':
-                protocol = 'ICMP'
             if event_name == 'start':
-                hookenv.open_port(port, protocol)
+                hookenv.open_port(port)
             elif event_name == 'stop':
-                hookenv.close_port(port, protocol)
-
-    def ports_contains(self, port, ports):
-        if not bool(port):
-            return False
-        if str(port).upper() != 'ICMP':
-            port = int(port)
-        return port in ports
+                hookenv.close_port(port)
 
 
 def service_stop(service_name):
diff --git a/tests/charmhelpers/core/sysctl.py b/tests/charmhelpers/core/sysctl.py
index 1f188d8c653f9bf793e18ed484635fce310543cc..6e413e31480e5fb4bcb703d58b1e87f98adc53af 100644
--- a/tests/charmhelpers/core/sysctl.py
+++ b/tests/charmhelpers/core/sysctl.py
@@ -31,22 +31,18 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
 def create(sysctl_dict, sysctl_file):
     """Creates a sysctl.conf file from a YAML associative array
 
-    :param sysctl_dict: a dict or YAML-formatted string of sysctl
-                        options eg "{ 'kernel.max_pid': 1337 }"
+    :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
     :type sysctl_dict: str
     :param sysctl_file: path to the sysctl file to be saved
     :type sysctl_file: str or unicode
     :returns: None
     """
-    if type(sysctl_dict) is not dict:
-        try:
-            sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
-        except yaml.YAMLError:
-            log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
-                level=ERROR)
-            return
-    else:
-        sysctl_dict_parsed = sysctl_dict
+    try:
+        sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
+    except yaml.YAMLError:
+        log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
+            level=ERROR)
+        return
 
     with open(sysctl_file, "w") as fd:
         for key, value in sysctl_dict_parsed.items():
diff --git a/tests/charmhelpers/core/templating.py b/tests/charmhelpers/core/templating.py
index 9014015c14ee0b48c775562cd4f0d30884944439..7b801a34a5e6585485347f7a97bc18a10a093d03 100644
--- a/tests/charmhelpers/core/templating.py
+++ b/tests/charmhelpers/core/templating.py
@@ -20,8 +20,7 @@ from charmhelpers.core import hookenv
 
 
 def render(source, target, context, owner='root', group='root',
-           perms=0o444, templates_dir=None, encoding='UTF-8',
-           template_loader=None, config_template=None):
+           perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
     """
     Render a template.
 
@@ -33,9 +32,6 @@ def render(source, target, context, owner='root', group='root',
     The context should be a dict containing the values to be replaced in the
     template.
 
-    config_template may be provided to render from a provided template instead
-    of loading from a file.
-
     The `owner`, `group`, and `perms` options will be passed to `write_file`.
 
     If omitted, `templates_dir` defaults to the `templates` folder in the charm.
@@ -69,19 +65,14 @@ def render(source, target, context, owner='root', group='root',
         if templates_dir is None:
             templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
         template_env = Environment(loader=FileSystemLoader(templates_dir))
-
-    # load from a string if provided explicitly
-    if config_template is not None:
-        template = template_env.from_string(config_template)
-    else:
-        try:
-            source = source
-            template = template_env.get_template(source)
-        except exceptions.TemplateNotFound as e:
-            hookenv.log('Could not load template %s from %s.' %
-                        (source, templates_dir),
-                        level=hookenv.ERROR)
-            raise e
+    try:
+        source = source
+        template = template_env.get_template(source)
+    except exceptions.TemplateNotFound as e:
+        hookenv.log('Could not load template %s from %s.' %
+                    (source, templates_dir),
+                    level=hookenv.ERROR)
+        raise e
     content = template.render(context)
     if target is not None:
         target_dir = os.path.dirname(target)
diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py
index ab554327b343f896880523fc627c1abea84be29a..7af875c2fcc1e2e38f9267bfdc60ab5a2a499c18 100644
--- a/tests/charmhelpers/core/unitdata.py
+++ b/tests/charmhelpers/core/unitdata.py
@@ -166,10 +166,6 @@ class Storage(object):
 
     To support dicts, lists, integer, floats, and booleans values
     are automatically json encoded/decoded.
-
-    Note: to facilitate unit testing, ':memory:' can be passed as the
-    path parameter which causes sqlite3 to only build the db in memory.
-    This should only be used for testing purposes.
     """
     def __init__(self, path=None):
         self.db_path = path
@@ -179,9 +175,6 @@ class Storage(object):
             else:
                 self.db_path = os.path.join(
                     os.environ.get('CHARM_DIR', ''), '.unit-state.db')
-        if self.db_path != ':memory:':
-            with open(self.db_path, 'a') as f:
-                os.fchmod(f.fileno(), 0o600)
         self.conn = sqlite3.connect('%s' % self.db_path)
         self.cursor = self.conn.cursor()
         self.revision = None
diff --git a/tox.ini b/tox.ini
index 930d52644953836b46da027f0787a557efab13e7..6d44f4b9affa6fd79582a8d52d93f057011eefcc 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,9 +9,9 @@ skipsdist = True
 setenv = VIRTUAL_ENV={envdir}
          PYTHONHASHSEED=0
          CHARM_DIR={envdir}
-         AMULET_SETUP_TIMEOUT=5400
+         AMULET_SETUP_TIMEOUT=2700
 install_command =
-  pip install {opts} {packages}
+  pip install --allow-unverified python-apt {opts} {packages}
 commands = ostestr {posargs}
 whitelist_externals = juju
 passenv = HOME TERM AMULET_* CS_API_*
@@ -26,11 +26,6 @@ basepython = python3.5
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
 
-[testenv:py36]
-basepython = python3.6
-deps = -r{toxinidir}/requirements.txt
-       -r{toxinidir}/test-requirements.txt
-
 [testenv:pep8]
 basepython = python2.7
 deps = -r{toxinidir}/requirements.txt
@@ -65,7 +60,7 @@ basepython = python2.7
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
 commands =
-    bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy
+    bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy
 
 [testenv:func27-dfs]
 # Charm Functional Test
diff --git a/unit_tests/test_actions_openstack_upgrade.py b/unit_tests/test_actions_openstack_upgrade.py
index 2ce82aed609144be90bf376f9adf88891791f43f..2e6bee02e9baef0f87622247bea3cce10315fd52 100644
--- a/unit_tests/test_actions_openstack_upgrade.py
+++ b/unit_tests/test_actions_openstack_upgrade.py
@@ -46,9 +46,11 @@ class TestKeystoneUpgradeActions(CharmTestCase):
     @patch.object(openstack_upgrade, 'register_configs')
     @patch('charmhelpers.contrib.openstack.utils.config')
     @patch('charmhelpers.contrib.openstack.utils.action_set')
+    @patch('charmhelpers.contrib.openstack.utils.git_install_requested')
     @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
-    def test_openstack_upgrade_true(self, upgrade_avail,
+    def test_openstack_upgrade_true(self, upgrade_avail, git_requested,
                                     action_set, config, reg_configs):
+        git_requested.return_value = False
         upgrade_avail.return_value = True
         config.return_value = True
 
@@ -61,9 +63,11 @@ class TestKeystoneUpgradeActions(CharmTestCase):
     @patch.object(openstack_upgrade, 'register_configs')
     @patch('charmhelpers.contrib.openstack.utils.config')
     @patch('charmhelpers.contrib.openstack.utils.action_set')
+    @patch('charmhelpers.contrib.openstack.utils.git_install_requested')
     @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
-    def test_openstack_upgrade_false(self, upgrade_avail,
+    def test_openstack_upgrade_false(self, upgrade_avail, git_requested,
                                      action_set, config, reg_configs):
+        git_requested.return_value = False
         upgrade_avail.return_value = True
         config.return_value = False
 
diff --git a/unit_tests/test_keystone_contexts.py b/unit_tests/test_keystone_contexts.py
index ba09b17df2547fbf4e4a409be297aa8e2c9a1220..e245b11b1ff1826e4316f6ea4e1c760e44fd95a4 100644
--- a/unit_tests/test_keystone_contexts.py
+++ b/unit_tests/test_keystone_contexts.py
@@ -37,31 +37,88 @@ class TestKeystoneContexts(CharmTestCase):
     def setUp(self):
         super(TestKeystoneContexts, self).setUp(context, TO_PATCH)
 
+    def test_is_cert_provided_in_config(self):
+        config = {'ssl_cert': 'somecert', 'ssl_key': 'greatkey'}
+
+        def fake_config(key):
+            return config.get(key)
+
+        self.config.side_effect = fake_config
+        self.assertTrue(context.is_cert_provided_in_config())
+
+        del config['ssl_cert']
+        self.assertFalse(context.is_cert_provided_in_config())
+
+    @patch.object(context, 'mkdir')
+    @patch('keystone_utils.get_ca')
+    @patch('keystone_utils.ensure_permissions')
+    @patch('keystone_utils.determine_ports', lambda: None)
+    @patch('keystone_utils.is_ssl_cert_master', lambda: False)
+    @patch.object(context, 'is_cert_provided_in_config', lambda: False)
+    @patch.object(context, 'log', lambda *args, **kwargs: None)
+    def test_apache_ssl_context_ssl_not_master(self, mock_ensure_permissions,
+                                               mock_get_ca, mock_mkdir):
+        context.ApacheSSLContext().configure_cert('foo')
+        context.ApacheSSLContext().configure_ca()
+        self.assertTrue(mock_mkdir.called)
+        self.assertTrue(mock_ensure_permissions.called)
+        self.assertFalse(mock_get_ca.called)
+
+    @patch('keystone_utils.ensure_permissions')
+    @patch.object(context, 'install_ca_cert')
+    @patch.object(context, 'b64decode')
+    @patch.object(context, 'mkdir', lambda *args: None)
+    @patch('keystone_utils.get_ca', lambda: None)
+    @patch('keystone_utils.determine_ports', lambda: None)
+    @patch('keystone_utils.is_ssl_cert_master', lambda: True)
+    @patch.object(context, 'log', lambda *args, **kwargs: None)
+    def test_apache_ssl_context_ssl_configure_ca(self, mock_b64decode,
+                                                 mock_install_ca_cert,
+                                                 mock_ensure_permissions):
+        config = {'ssl_cert': 'somecert', 'ssl_key': 'greatkey'}
+
+        def fake_config(key):
+            return config.get(key)
+
+        self.config.side_effect = fake_config
+
+        context.ApacheSSLContext().configure_ca()
+        self.assertFalse(mock_b64decode.called)
+        self.assertFalse(mock_install_ca_cert.called)
+        self.assertFalse(mock_ensure_permissions.called)
+
+        config['ssl_ca'] = 'foofoofalalala'
+        context.ApacheSSLContext().configure_ca()
+        self.assertTrue(mock_b64decode.called)
+        self.assertTrue(mock_install_ca_cert.called)
+        self.assertTrue(mock_ensure_permissions.called)
+
     @patch('charmhelpers.contrib.hahelpers.cluster.relation_ids')
     @patch('charmhelpers.contrib.openstack.ip.unit_get')
     @patch('charmhelpers.contrib.openstack.ip.service_name')
     @patch('charmhelpers.contrib.openstack.ip.config')
     @patch('keystone_utils.determine_ports')
+    @patch('keystone_utils.is_ssl_cert_master')
     @patch('charmhelpers.contrib.openstack.context.config')
     @patch('charmhelpers.contrib.openstack.context.is_clustered')
     @patch('charmhelpers.contrib.openstack.context.determine_apache_port')
     @patch('charmhelpers.contrib.openstack.context.determine_api_port')
     @patch('charmhelpers.contrib.openstack.context.unit_get')
-    @patch('charmhelpers.contrib.openstack.context.relation_ids')
     @patch('charmhelpers.contrib.openstack.context.https')
     def test_apache_ssl_context_service_enabled(self, mock_https,
-                                                mock_relation_ids,
                                                 mock_unit_get,
                                                 mock_determine_api_port,
                                                 mock_determine_apache_port,
                                                 mock_is_clustered,
                                                 mock_config,
+                                                mock_is_ssl_cert_master,
                                                 mock_determine_ports,
                                                 mock_ip_config,
                                                 mock_service_name,
                                                 mock_ip_unit_get,
                                                 mock_rel_ids,
                                                 ):
+        mock_is_ssl_cert_master.return_value = True
         mock_https.return_value = True
         mock_unit_get.return_value = '1.2.3.4'
         mock_ip_unit_get.return_value = '1.2.3.4'
@@ -85,7 +142,6 @@ class TestKeystoneContexts(CharmTestCase):
         self.assertTrue(mock_https.called)
         mock_unit_get.assert_called_with('private-address')
 
-    @patch('charmhelpers.contrib.openstack.context.get_relation_ip')
     @patch('charmhelpers.contrib.openstack.context.mkdir')
     @patch('keystone_utils.api_port')
     @patch('charmhelpers.contrib.openstack.context.get_netmask_for_address')
@@ -102,12 +158,11 @@ class TestKeystoneContexts(CharmTestCase):
         self, mock_open, mock_kv, mock_log, mock_relation_get,
             mock_related_units, mock_unit_get, mock_relation_ids, mock_config,
             mock_get_address_in_network, mock_get_netmask_for_address,
-            mock_api_port, mock_mkdir, mock_get_relation_ip):
+            mock_api_port, mock_mkdir):
         os.environ['JUJU_UNIT_NAME'] = 'keystone'
 
         mock_relation_ids.return_value = ['identity-service:0', ]
         mock_unit_get.return_value = '1.2.3.4'
-        mock_get_relation_ip.return_value = '1.2.3.4'
         mock_relation_get.return_value = '10.0.0.0'
         mock_related_units.return_value = ['unit/0', ]
         mock_config.return_value = None
@@ -160,204 +215,3 @@ class TestKeystoneContexts(CharmTestCase):
 
         mock_is_elected_leader.return_value = True
         self.assertEqual({'token_flush': True}, ctxt())
-
-    @patch.object(context, 'relation_ids')
-    @patch.object(context, 'related_units')
-    @patch.object(context, 'relation_get')
-    def test_keystone_fid_service_provider_rdata(
-            self, mock_relation_get, mock_related_units,
-            mock_relation_ids):
-        os.environ['JUJU_UNIT_NAME'] = 'keystone'
-
-        def relation_ids_side_effect(rname):
-            return {
-                'keystone-fid-service-provider': {
-                    'keystone-fid-service-provider:0',
-                    'keystone-fid-service-provider:1',
-                    'keystone-fid-service-provider:2'
-                }
-            }[rname]
-
-        mock_relation_ids.side_effect = relation_ids_side_effect
-
-        def related_units_side_effect(rid):
-            return {
-                'keystone-fid-service-provider:0': ['sp-mellon/0'],
-                'keystone-fid-service-provider:1': ['sp-shib/0'],
-                'keystone-fid-service-provider:2': ['sp-oidc/0'],
-            }[rid]
-        mock_related_units.side_effect = related_units_side_effect
-
-        def relation_get_side_effect(unit, rid):
-            # one unit only as the relation is container-scoped
-            return {
-                "keystone-fid-service-provider:0": {
-                    "sp-mellon/0": {
-                        "ingress-address": '10.0.0.10',
-                        "protocol-name": '"saml2"',
-                        "remote-id-attribute": '"MELLON_IDP"',
-                    },
-                },
-                "keystone-fid-service-provider:1": {
-                    "sp-shib/0": {
-                        "ingress-address": '10.0.0.10',
-                        "protocol-name": '"mapped"',
-                        "remote-id-attribute": '"Shib-Identity-Provider"',
-                    },
-                },
-                "keystone-fid-service-provider:2": {
-                    "sp-oidc/0": {
-                        "ingress-address": '10.0.0.10',
-                        "protocol-name": '"oidc"',
-                        "remote-id-attribute": '"HTTP_OIDC_ISS"',
-                    },
-                },
-            }[rid][unit]
-
-        mock_relation_get.side_effect = relation_get_side_effect
-        ctxt = context.KeystoneFIDServiceProviderContext()
-
-        self.maxDiff = None
-        self.assertItemsEqual(
-            ctxt(),
-            {
-                "fid_sps": [
-                    {
-                        "protocol-name": "saml2",
-                        "remote-id-attribute": "MELLON_IDP",
-                    },
-                    {
-                        "protocol-name": "mapped",
-                        "remote-id-attribute": "Shib-Identity-Provider",
-                    },
-                    {
-                        "protocol-name": "oidc",
-                        "remote-id-attribute": "HTTP_OIDC_ISS",
-                    },
-                ]
-            }
-        )
-
-    @patch.object(context, 'relation_ids')
-    def test_keystone_fid_service_provider_empty(
-            self, mock_relation_ids):
-        os.environ['JUJU_UNIT_NAME'] = 'keystone'
-
-        def relation_ids_side_effect(rname):
-            return {
-                'keystone-fid-service-provider': {}
-            }[rname]
-
-        mock_relation_ids.side_effect = relation_ids_side_effect
-        ctxt = context.KeystoneFIDServiceProviderContext()
-
-        self.maxDiff = None
-        self.assertItemsEqual(ctxt(), {})
-
-    @patch.object(context, 'relation_ids')
-    @patch.object(context, 'related_units')
-    @patch.object(context, 'relation_get')
-    def test_websso_trusted_dashboard_urls_generated(
-            self, mock_relation_get, mock_related_units,
-            mock_relation_ids):
-        os.environ['JUJU_UNIT_NAME'] = 'keystone'
-
-        def relation_ids_side_effect(rname):
-            return {
-                'websso-trusted-dashboard': {
-                    'websso-trusted-dashboard:0',
-                    'websso-trusted-dashboard:1',
-                    'websso-trusted-dashboard:2'
-                }
-            }[rname]
-
-        mock_relation_ids.side_effect = relation_ids_side_effect
-
-        def related_units_side_effect(rid):
-            return {
-                'websso-trusted-dashboard:0': ['dashboard-blue/0',
-                                               'dashboard-blue/1'],
-                'websso-trusted-dashboard:1': ['dashboard-red/0',
-                                               'dashboard-red/1'],
-                'websso-trusted-dashboard:2': ['dashboard-green/0',
-                                               'dashboard-green/1']
-            }[rid]
-        mock_related_units.side_effect = related_units_side_effect
-
-        def relation_get_side_effect(unit, rid):
-            return {
-                "websso-trusted-dashboard:0": {
-                    "dashboard-blue/0": {  # dns-ha
-                        "ingress-address": '10.0.0.10',
-                        "scheme": "https://",
-                        "hostname": "horizon.intranet.test",
-                        "path": "/auth/websso/",
-                    },
-                    "dashboard-blue/1": {  # dns-ha
-                        "ingress-address": '10.0.0.11',
-                        "scheme": "https://",
-                        "hostname": "horizon.intranet.test",
-                        "path": "/auth/websso/",
-                    },
-                },
-                "websso-trusted-dashboard:1": {
-                    "dashboard-red/0": {  # vip
-                        "ingress-address": '10.0.0.12',
-                        "scheme": "https://",
-                        "hostname": "10.0.0.100",
-                        "path": "/auth/websso/",
-                    },
-                    "dashboard-red/1": {  # vip
-                        "ingress-address": '10.0.0.13',
-                        "scheme": "https://",
-                        "hostname": "10.0.0.100",
-                        "path": "/auth/websso/",
-                    },
-                },
-                "websso-trusted-dashboard:2": {
-                    "dashboard-green/0": {  # vip-less, dns-ha-less
-                        "ingress-address": '10.0.0.14',
-                        "scheme": "http://",
-                        "hostname": "10.0.0.14",
-                        "path": "/auth/websso/",
-                    },
-                    "dashboard-green/1": {
-                        "ingress-address": '10.0.0.15',
-                        "scheme": "http://",
-                        "hostname": "10.0.0.15",
-                        "path": "/auth/websso/",
-                    },
-                },
-            }[rid][unit]
-
-        mock_relation_get.side_effect = relation_get_side_effect
-        ctxt = context.WebSSOTrustedDashboardContext()
-
-        self.maxDiff = None
-        self.assertEqual(
-            ctxt(),
-            {
-                'trusted_dashboards': set([
-                    'https://horizon.intranet.test/auth/websso/',
-                    'https://10.0.0.100/auth/websso/',
-                    'http://10.0.0.14/auth/websso/',
-                    'http://10.0.0.15/auth/websso/',
-                ])
-            }
-        )
-
-    @patch.object(context, 'relation_ids')
-    def test_websso_trusted_dashboard_empty(
-            self, mock_relation_ids):
-        os.environ['JUJU_UNIT_NAME'] = 'keystone'
-
-        def relation_ids_side_effect(rname):
-            return {
-                'websso-trusted-dashboard': {}
-            }[rname]
-
-        mock_relation_ids.side_effect = relation_ids_side_effect
-        ctxt = context.WebSSOTrustedDashboardContext()
-
-        self.maxDiff = None
-        self.assertItemsEqual(ctxt(), {})
diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py
index c82ec961d5d84497226d8b8d4ff2b4dc0dc092ef..b7bd442b2ed48a003562481eb7ce5ea383f96719 100644
--- a/unit_tests/test_keystone_hooks.py
+++ b/unit_tests/test_keystone_hooks.py
@@ -13,6 +13,8 @@
 # limitations under the License.
 
 import os
+import uuid
+import yaml
 import sys
 
 from mock import call, patch, MagicMock
@@ -42,6 +44,8 @@ with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
     with patch('keystone_utils.run_in_apache') as mock_run_in_apache:
         import keystone_hooks as hooks
 
+from charmhelpers.contrib import unison
+
 utils.register_configs = _reg
 utils.restart_map = _map
 
@@ -49,7 +53,9 @@ TO_PATCH = [
     # charmhelpers.core.hookenv
     'Hooks',
     'config',
+    'is_relation_made',
     'log',
+    'local_unit',
     'filter_installed_packages',
     'relation_ids',
     'relation_set',
@@ -65,6 +71,7 @@ TO_PATCH = [
     'service_restart',
     # charmhelpers.contrib.openstack.utils
     'configure_installation_source',
+    'git_install_requested',
     'snap_install_requested',
     # charmhelpers.contrib.openstack.ip
     'resolve_address',
@@ -85,16 +92,22 @@ TO_PATCH = [
     'migrate_database',
     'ensure_initial_admin',
     'add_service_to_keystone',
+    'synchronize_ca_if_changed',
     'update_nrpe_config',
+    'ensure_ssl_dirs',
     'is_db_ready',
+    'keystone_service',
     'create_or_show_domain',
     'get_api_version',
     # other
     'check_call',
     'execd_preinstall',
+    'mkdir',
+    'os',
     # ip
     'get_iface_for_address',
     'get_netmask_for_address',
+    'git_install',
     'is_service_present',
     'delete_service_entry',
     'os_release',
@@ -115,40 +128,90 @@ class KeystoneRelationTests(CharmTestCase):
         self.snap_install_requested.return_value = False
 
     @patch.object(utils, 'os_release')
+    @patch.object(utils, 'git_install_requested')
+    @patch.object(unison, 'ensure_user')
     @patch.object(hooks, 'service_stop', lambda *args: None)
     @patch.object(hooks, 'service_start', lambda *args: None)
-    def test_install_hook(self, os_release):
+    def test_install_hook(self, ensure_user, git_requested, os_release):
         os_release.return_value = 'havana'
+        git_requested.return_value = False
         self.run_in_apache.return_value = False
         repo = 'cloud:precise-grizzly'
         self.test_config.set('openstack-origin', repo)
         hooks.install()
         self.assertTrue(self.execd_preinstall.called)
         self.configure_installation_source.assert_called_with(repo)
+        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
         self.assertTrue(self.apt_update.called)
         self.apt_install.assert_called_with(
             ['apache2', 'haproxy', 'keystone', 'openssl', 'pwgen',
              'python-keystoneclient', 'python-mysqldb', 'python-psycopg2',
-             'python-requests', 'python-six', 'uuid'], fatal=True)
+             'python-requests', 'python-six', 'unison', 'uuid'], fatal=True)
+        self.git_install.assert_called_with(None)
         self.disable_unused_apache_sites.assert_not_called()
 
     @patch.object(utils, 'os_release')
+    @patch.object(utils, 'git_install_requested')
+    @patch.object(unison, 'ensure_user')
     @patch.object(hooks, 'service_stop', lambda *args: None)
     @patch.object(hooks, 'service_start', lambda *args: None)
-    def test_install_hook_apache2(self, os_release):
+    def test_install_hook_apache2(self, ensure_user,
+                                  git_requested, os_release):
         os_release.return_value = 'havana'
+        git_requested.return_value = False
         self.run_in_apache.return_value = True
         repo = 'cloud:xenial-newton'
         self.test_config.set('openstack-origin', repo)
+        self.os.path.exists.return_value = True
         hooks.install()
         self.assertTrue(self.execd_preinstall.called)
         self.configure_installation_source.assert_called_with(repo)
+        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
         self.assertTrue(self.apt_update.called)
         self.apt_install.assert_called_with(
             ['apache2', 'haproxy', 'keystone', 'openssl', 'pwgen',
              'python-keystoneclient', 'python-mysqldb', 'python-psycopg2',
-             'python-requests', 'python-six', 'uuid'], fatal=True)
+             'python-requests', 'python-six', 'unison', 'uuid'], fatal=True)
+        self.git_install.assert_called_with(None)
         self.disable_unused_apache_sites.assert_called_with()
+
+    @patch.object(utils, 'os_release')
+    @patch.object(utils, 'git_install_requested')
+    @patch.object(unison, 'ensure_user')
+    @patch.object(hooks, 'service_stop', lambda *args: None)
+    @patch.object(hooks, 'service_start', lambda *args: None)
+    def test_install_hook_git(self, ensure_user, git_requested, os_release):
+        os_release.return_value = 'havana'
+        git_requested.return_value = True
+        repo = 'cloud:trusty-juno'
+        openstack_origin_git = {
+            'repositories': [
+                {'name': 'requirements',
+                 'repository': 'git://git.openstack.org/openstack/requirements',  # noqa
+                 'branch': 'stable/juno'},
+                {'name': 'keystone',
+                 'repository': 'git://git.openstack.org/openstack/keystone',
+                 'branch': 'stable/juno'}
+            ],
+            'directory': '/mnt/openstack-git',
+        }
+        projects_yaml = yaml.dump(openstack_origin_git)
+        self.test_config.set('openstack-origin', repo)
+        self.test_config.set('openstack-origin-git', projects_yaml)
+        hooks.install()
+        self.assertTrue(self.execd_preinstall.called)
+        self.configure_installation_source.assert_called_with(repo)
+        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
+        self.assertTrue(self.apt_update.called)
+        self.apt_install.assert_called_with(
+            ['apache2', 'haproxy', 'libffi-dev', 'libmysqlclient-dev',
+             'libssl-dev', 'libxml2-dev', 'libxslt1-dev', 'libyaml-dev',
+             'openssl', 'pwgen', 'python-dev', 'python-keystoneclient',
+             'python-mysqldb', 'python-pip', 'python-psycopg2',
+             'python-requests', 'python-setuptools', 'python-six', 'unison',
+             'uuid', 'zlib1g-dev'], fatal=True)
+        self.git_install.assert_called_with(projects_yaml)
+
     mod_ch_openstack_utils = 'charmhelpers.contrib.openstack.utils'
 
     @patch.object(utils, 'os_release')
@@ -176,15 +239,44 @@ class KeystoneRelationTests(CharmTestCase):
         mock_config.side_effect = cfg
 
         self.get_relation_ip.return_value = '192.168.20.1'
+        self.is_relation_made.return_value = False
         hooks.db_joined()
         self.relation_set.assert_called_with(database='keystone',
                                              username='keystone',
                                              hostname='192.168.20.1')
 
+    def test_postgresql_db_joined(self):
+        self.is_relation_made.return_value = False
+        hooks.pgsql_db_joined()
+        self.relation_set.assert_called_with(database='keystone'),
+
+    def test_db_joined_with_postgresql(self):
+        self.is_relation_made.return_value = True
+
+        with self.assertRaises(Exception) as context:
+            hooks.db_joined()
+        self.assertEqual(
+            context.exception.message,
+            'Attempting to associate a mysql database when there '
+            'is already associated a postgresql one')
+
+    def test_postgresql_joined_with_db(self):
+        self.is_relation_made.return_value = True
+
+        with self.assertRaises(Exception) as context:
+            hooks.pgsql_db_joined()
+        self.assertEqual(
+            context.exception.message,
+            'Attempting to associate a postgresql database when there '
+            'is already associated a mysql one')
+
     @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
     @patch.object(hooks, 'CONFIGS')
     def test_db_changed_missing_relation_data(self, configs,
+                                              mock_ensure_ssl_cert_master,
                                               mock_log):
+        mock_ensure_ssl_cert_master.return_value = False
         configs.complete_contexts = MagicMock()
         configs.complete_contexts.return_value = []
         hooks.db_changed()
@@ -192,39 +284,98 @@ class KeystoneRelationTests(CharmTestCase):
             'shared-db relation incomplete. Peer not ready?'
         )
 
-    @patch.object(hooks, 'update_all_identity_relation_units')
-    def _shared_db_test(self, configs, unit_name, mock_update_all):
+    @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch.object(hooks, 'CONFIGS')
+    def test_postgresql_db_changed_missing_relation_data(self, configs,
+                                                         mock_ensure_leader,
+                                                         mock_log):
+        mock_ensure_leader.return_value = False
+        configs.complete_contexts = MagicMock()
+        configs.complete_contexts.return_value = []
+        hooks.pgsql_db_changed()
+        self.log.assert_called_with(
+            'pgsql-db relation incomplete. Peer not ready?'
+        )
+
+    def _shared_db_test(self, configs, unit_name):
         self.relation_get.return_value = 'keystone/0 keystone/3'
+        self.local_unit.return_value = unit_name
         configs.complete_contexts = MagicMock()
         configs.complete_contexts.return_value = ['shared-db']
         configs.write = MagicMock()
         hooks.db_changed()
 
+    def _postgresql_db_test(self, configs):
+        configs.complete_contexts = MagicMock()
+        configs.complete_contexts.return_value = ['pgsql-db']
+        configs.write = MagicMock()
+        hooks.pgsql_db_changed()
+
     @patch.object(hooks, 'leader_init_db_if_ready')
+    @patch('keystone_utils.ensure_ssl_cert_master')
     @patch.object(hooks, 'CONFIGS')
-    def test_db_changed(self, configs, leader_init):
+    def test_db_changed(self, configs,
+                        mock_ensure_ssl_cert_master,
+                        leader_init):
         self.os_release.return_value = 'havana'
+        mock_ensure_ssl_cert_master.return_value = False
         self._shared_db_test(configs, 'keystone/3')
         self.assertEqual([call('/etc/keystone/keystone.conf')],
                          configs.write.call_args_list)
         self.assertTrue(leader_init.called)
 
+    @patch.object(hooks, 'leader_init_db_if_ready')
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch.object(hooks, 'CONFIGS')
+    def test_postgresql_db_changed(self, configs,
+                                   mock_ensure_ssl_cert_master,
+                                   leader_init):
+        self.os_release.return_value = 'havana'
+        mock_ensure_ssl_cert_master.return_value = False
+        self._postgresql_db_test(configs)
+        self.assertEqual([call('/etc/keystone/keystone.conf')],
+                         configs.write.call_args_list)
+        self.assertTrue(leader_init.called)
+
     @patch.object(hooks, 'update_all_domain_backends')
     @patch.object(hooks, 'update_all_identity_relation_units')
     @patch.object(hooks, 'run_in_apache')
     @patch.object(hooks, 'is_db_initialised')
+    @patch.object(hooks, 'git_install_requested')
     @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch('keystone_utils.ensure_ssl_dirs')
+    @patch.object(hooks, 'ensure_permissions')
+    @patch.object(hooks, 'ensure_pki_cert_paths')
+    @patch.object(hooks, 'ensure_pki_dir_permissions')
+    @patch.object(hooks, 'ensure_ssl_dir')
+    @patch.object(hooks, 'is_ssl_cert_master')
+    @patch.object(hooks, 'send_ssl_sync_request')
+    @patch.object(hooks, 'peer_units')
     @patch.object(hooks, 'admin_relation_changed')
     @patch.object(hooks, 'cluster_joined')
+    @patch.object(unison, 'ensure_user')
+    @patch.object(unison, 'get_homedir')
     @patch.object(hooks, 'CONFIGS')
     @patch.object(hooks, 'identity_changed')
     @patch.object(hooks, 'configure_https')
     def test_config_changed_no_upgrade_leader(self, configure_https,
                                               identity_changed,
-                                              configs,
+                                              configs, get_homedir,
+                                              ensure_user,
                                               mock_cluster_joined,
                                               admin_relation_changed,
-                                              mock_log,
+                                              mock_peer_units,
+                                              mock_send_ssl_sync_request,
+                                              mock_is_ssl_cert_master,
+                                              mock_ensure_ssl_dir,
+                                              mock_ensure_pki_cert_paths,
+                                              mock_ensure_permissions,
+                                              mock_ensure_pki_dir_permissions,
+                                              mock_ensure_ssl_dirs,
+                                              mock_ensure_ssl_cert_master,
+                                              mock_log, git_requested,
                                               mock_is_db_initialised,
                                               mock_run_in_apache,
                                               update,
@@ -239,12 +390,20 @@ class KeystoneRelationTests(CharmTestCase):
         self.relation_ids.side_effect = fake_relation_ids
 
         mock_run_in_apache.return_value = False
+        git_requested.return_value = False
+        mock_is_ssl_cert_master.return_value = True
         mock_is_db_initialised.return_value = True
         self.is_db_ready.return_value = True
         self.openstack_upgrade_available.return_value = False
+        self.is_elected_leader.return_value = True
+        # avoid having to mock syncer
+        mock_ensure_ssl_cert_master.return_value = False
+        mock_peer_units.return_value = []
         self.related_units.return_value = ['unit/0']
 
         hooks.config_changed()
+        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
+        get_homedir.assert_called_with(self.ssh_user)
 
         self.save_script_rc.assert_called_with()
         configure_https.assert_called_with()
@@ -258,16 +417,36 @@ class KeystoneRelationTests(CharmTestCase):
     @patch.object(hooks, 'update_all_domain_backends')
     @patch.object(hooks, 'update_all_identity_relation_units')
     @patch.object(hooks, 'run_in_apache')
+    @patch.object(hooks, 'git_install_requested')
     @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch('keystone_utils.ensure_ssl_dirs')
+    @patch.object(hooks, 'ensure_permissions')
+    @patch.object(hooks, 'ensure_pki_cert_paths')
+    @patch.object(hooks, 'ensure_pki_dir_permissions')
+    @patch.object(hooks, 'ensure_ssl_dir')
+    @patch.object(hooks, 'peer_units')
+    @patch.object(hooks, 'is_ssl_cert_master')
     @patch.object(hooks, 'cluster_joined')
+    @patch.object(unison, 'ensure_user')
+    @patch.object(unison, 'get_homedir')
     @patch.object(hooks, 'CONFIGS')
     @patch.object(hooks, 'identity_changed')
     @patch.object(hooks, 'configure_https')
     def test_config_changed_no_upgrade_not_leader(self, configure_https,
                                                   identity_changed,
-                                                  configs,
+                                                  configs, get_homedir,
+                                                  ensure_user,
                                                   mock_cluster_joined,
-                                                  mock_log,
+                                                  mock_is_ssl_cert_master,
+                                                  mock_peer_units,
+                                                  mock_ensure_ssl_dir,
+                                                  mock_ensure_permissions,
+                                                  mock_ensure_pki_cert_paths,
+                                                  mock_ensure_pki_permissions,
+                                                  ensure_ssl_dirs,
+                                                  mock_ensure_ssl_cert_master,
+                                                  mock_log, git_requested,
                                                   mock_run_in_apache, update,
                                                   mock_update_domains):
 
@@ -280,9 +459,16 @@ class KeystoneRelationTests(CharmTestCase):
         self.relation_ids.side_effect = fake_relation_ids
 
         mock_run_in_apache.return_value = False
+        git_requested.return_value = False
+        mock_is_ssl_cert_master.return_value = True
+        mock_peer_units.return_value = []
         self.openstack_upgrade_available.return_value = False
+        self.is_elected_leader.return_value = False
+        mock_ensure_ssl_cert_master.return_value = False
 
         hooks.config_changed()
+        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
+        get_homedir.assert_called_with(self.ssh_user)
 
         self.assertFalse(mock_cluster_joined.called)
         self.save_script_rc.assert_called_with()
@@ -297,18 +483,39 @@ class KeystoneRelationTests(CharmTestCase):
     @patch.object(hooks, 'update_all_identity_relation_units')
     @patch.object(hooks, 'run_in_apache')
     @patch.object(hooks, 'is_db_initialised')
+    @patch.object(hooks, 'git_install_requested')
     @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch('keystone_utils.ensure_ssl_dirs')
+    @patch.object(hooks, 'ensure_permissions')
+    @patch.object(hooks, 'ensure_pki_cert_paths')
+    @patch.object(hooks, 'ensure_pki_dir_permissions')
+    @patch.object(hooks, 'ensure_ssl_dir')
+    @patch.object(hooks, 'is_ssl_cert_master')
+    @patch.object(hooks, 'send_ssl_sync_request')
+    @patch.object(hooks, 'peer_units')
     @patch.object(hooks, 'admin_relation_changed')
     @patch.object(hooks, 'cluster_joined')
+    @patch.object(unison, 'ensure_user')
+    @patch.object(unison, 'get_homedir')
     @patch.object(hooks, 'CONFIGS')
     @patch.object(hooks, 'identity_changed')
     @patch.object(hooks, 'configure_https')
     def test_config_changed_with_openstack_upgrade(self, configure_https,
                                                    identity_changed,
-                                                   configs,
-                                                   cluster_joined,
+                                                   configs, get_homedir,
+                                                   ensure_user, cluster_joined,
                                                    admin_relation_changed,
-                                                   mock_log,
+                                                   mock_peer_units,
+                                                   mock_send_ssl_sync_request,
+                                                   mock_is_ssl_cert_master,
+                                                   mock_ensure_ssl_dir,
+                                                   mock_ensure_permissions,
+                                                   mock_ensure_pki_cert_paths,
+                                                   mock_ensure_pki_permissions,
+                                                   mock_ensure_ssl_dirs,
+                                                   mock_ensure_ssl_cert_master,
+                                                   mock_log, git_requested,
                                                    mock_is_db_initialised,
                                                    mock_run_in_apache,
                                                    update,
@@ -322,12 +529,20 @@ class KeystoneRelationTests(CharmTestCase):
         self.relation_ids.side_effect = fake_relation_ids
 
         mock_run_in_apache.return_value = False
+        git_requested.return_value = False
+        mock_is_ssl_cert_master.return_value = True
         self.is_db_ready.return_value = True
         mock_is_db_initialised.return_value = True
         self.openstack_upgrade_available.return_value = True
+        self.is_elected_leader.return_value = True
+        # avoid having to mock syncer
+        mock_ensure_ssl_cert_master.return_value = False
+        mock_peer_units.return_value = []
         self.related_units.return_value = ['unit/0']
 
         hooks.config_changed()
+        ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
+        get_homedir.assert_called_with(self.ssh_user)
 
         self.assertTrue(self.do_openstack_upgrade_reexec.called)
 
@@ -338,19 +553,104 @@ class KeystoneRelationTests(CharmTestCase):
         self.assertTrue(update.called)
         self.assertTrue(mock_update_domains.called)
 
-    @patch.object(hooks, 'os_release')
+    @patch.object(hooks, 'update_all_domain_backends')
+    @patch.object(hooks, 'update_all_identity_relation_units')
+    @patch.object(hooks, 'run_in_apache')
+    @patch.object(hooks, 'initialise_pki')
+    @patch.object(hooks, 'git_install_requested')
+    @patch.object(hooks, 'config_value_changed')
+    @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch.object(hooks, 'ensure_ssl_dir')
+    @patch.object(hooks, 'send_ssl_sync_request')
+    @patch.object(hooks, 'is_db_initialised')
+    @patch.object(hooks, 'is_db_ready')
+    @patch.object(hooks, 'peer_units')
+    @patch.object(hooks, 'admin_relation_changed')
+    @patch.object(hooks, 'cluster_joined')
+    @patch.object(unison, 'ensure_user')
+    @patch.object(unison, 'get_homedir')
+    @patch.object(hooks, 'CONFIGS')
+    @patch.object(hooks, 'identity_changed')
+    @patch.object(hooks, 'configure_https')
+    def test_config_changed_git_updated(self, configure_https,
+                                        identity_changed,
+                                        configs, get_homedir, ensure_user,
+                                        cluster_joined, admin_relation_changed,
+                                        mock_peer_units,
+                                        mock_is_db_ready,
+                                        mock_is_db_initialised,
+                                        mock_send_ssl_sync_request,
+                                        mock_ensure_ssl_dir,
+                                        mock_ensure_ssl_cert_master,
+                                        mock_log, config_val_changed,
+                                        git_requested,
+                                        mock_initialise_pki,
+                                        mock_run_in_apache,
+                                        update,
+                                        mock_update_domains):
+        self.enable_memcache.return_value = False
+        mock_run_in_apache.return_value = False
+        git_requested.return_value = True
+        mock_ensure_ssl_cert_master.return_value = False
+        self.openstack_upgrade_available.return_value = False
+        self.is_elected_leader.return_value = True
+        mock_peer_units.return_value = []
+        self.relation_ids.return_value = ['identity-service:0']
+        self.related_units.return_value = ['unit/0']
+
+        repo = 'cloud:trusty-juno'
+        openstack_origin_git = {
+            'repositories': [
+                {'name': 'requirements',
+                 'repository': 'git://git.openstack.org/openstack/requirements',  # noqa
+                 'branch': 'stable/juno'},
+                {'name': 'keystone',
+                 'repository': 'git://git.openstack.org/openstack/keystone',
+                 'branch': 'stable/juno'}
+            ],
+            'directory': '/mnt/openstack-git',
+        }
+        projects_yaml = yaml.dump(openstack_origin_git)
+        self.test_config.set('openstack-origin', repo)
+        self.test_config.set('openstack-origin-git', projects_yaml)
+        hooks.config_changed()
+        self.git_install.assert_called_with(projects_yaml)
+        self.assertFalse(self.openstack_upgrade_available.called)
+        self.assertFalse(self.do_openstack_upgrade_reexec.called)
+        self.assertTrue(update.called)
+        self.assertTrue(mock_update_domains.called)
+
     @patch.object(hooks, 'run_in_apache')
+    @patch.object(hooks, 'initialise_pki')
     @patch.object(hooks, 'is_db_initialised')
+    @patch.object(hooks, 'git_install_requested')
+    @patch.object(hooks, 'config_value_changed')
+    @patch.object(hooks, 'ensure_ssl_dir')
     @patch.object(hooks, 'configure_https')
+    @patch.object(hooks, 'is_ssl_cert_master')
+    @patch.object(hooks, 'peer_units')
+    @patch.object(unison, 'get_homedir')
+    @patch.object(unison, 'ensure_user')
+    @patch('keystone_utils.ensure_ssl_cert_master')
     def test_config_changed_with_openstack_upgrade_action(self,
+                                                          ensure_ssl_cert,
+                                                          ensure_user,
+                                                          get_home,
+                                                          peer_units, is_ssl,
                                                           config_https,
+                                                          ensure_ssl_dir,
+                                                          config_value_changed,
+                                                          git_requested,
                                                           mock_db_init,
-                                                          mock_run_in_apache,
-                                                          os_release):
-        os_release.return_value = 'ocata'
+                                                          mock_initialise_pki,
+                                                          mock_run_in_apache):
         self.enable_memcache.return_value = False
         mock_run_in_apache.return_value = False
+        ensure_ssl_cert.return_value = False
+        peer_units.return_value = []
 
+        git_requested.return_value = False
         self.openstack_upgrade_available.return_value = True
         self.test_config.set('action-managed-upgrade', True)
 
@@ -360,18 +660,17 @@ class KeystoneRelationTests(CharmTestCase):
 
     @patch.object(hooks, 'is_db_initialised')
     @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch.object(hooks, 'hashlib')
     @patch.object(hooks, 'send_notifications')
     def test_identity_changed_leader(self, mock_send_notifications,
+                                     mock_hashlib, mock_ensure_ssl_cert_master,
                                      mock_log, mock_is_db_initialised):
         self.expect_ha.return_value = False
         mock_is_db_initialised.return_value = True
         self.is_db_ready.return_value = True
         self.is_service_present.return_value = True
-        self.relation_get.return_value = {
-            'public_url': 'http://dummy.local',
-            'admin_url': 'http://dummy.local',
-            'internal_url': 'http://dummy.local',
-        }
+        mock_ensure_ssl_cert_master.return_value = False
         hooks.identity_changed(
             relation_id='identity-service:0',
             remote_unit='unit/0')
@@ -384,26 +683,31 @@ class KeystoneRelationTests(CharmTestCase):
 
     @patch.object(hooks, 'is_db_initialised')
     @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch.object(hooks, 'hashlib')
     @patch.object(hooks, 'send_notifications')
     def test_identity_changed_leader_no_neutron(self, mock_send_notifications,
+                                                mock_hashlib,
+                                                mock_ensure_ssl_cert_master,
                                                 mock_log,
                                                 mock_is_db_initialised):
         self.expect_ha.return_value = False
         mock_is_db_initialised.return_value = True
         self.is_db_ready.return_value = True
         self.is_service_present.return_value = False
-        self.relation_get.return_value = {
-            'public_url': 'http://dummy.local',
-            'admin_url': 'http://dummy.local',
-            'internal_url': 'http://dummy.local',
-        }
+        mock_ensure_ssl_cert_master.return_value = False
         hooks.identity_changed(
             relation_id='identity-service:0',
             remote_unit='unit/0')
         self.assertFalse(self.delete_service_entry.called)
 
+    @patch.object(hooks, 'local_unit')
     @patch('keystone_utils.log')
-    def test_identity_changed_no_leader(self, mock_log):
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    def test_identity_changed_no_leader(self, mock_ensure_ssl_cert_master,
+                                        mock_log, mock_local_unit):
+        mock_ensure_ssl_cert_master.return_value = False
+        mock_local_unit.return_value = 'unit/0'
         self.is_elected_leader.return_value = False
         hooks.identity_changed(
             relation_id='identity-service:0',
@@ -412,18 +716,60 @@ class KeystoneRelationTests(CharmTestCase):
         self.log.assert_called_with(
             'Deferring identity_changed() to service leader.')
 
+    @patch.object(hooks, 'send_ssl_sync_request')
+    @patch.object(hooks, 'local_unit')
+    @patch.object(hooks, 'peer_units')
+    @patch.object(unison, 'ssh_authorized_peers')
+    def test_cluster_joined(self, ssh_authorized_peers, mock_peer_units,
+                            mock_local_unit, mock_send_ssl_sync_request):
+        mock_local_unit.return_value = 'unit/0'
+        mock_peer_units.return_value = ['unit/0']
+        hooks.cluster_joined()
+        ssh_authorized_peers.assert_called_with(
+            user=self.ssh_user, group='juju_keystone',
+            peer_interface='cluster', ensure_local_user=True)
+        self.assertTrue(mock_send_ssl_sync_request.called)
+
+        mock_send_ssl_sync_request.reset_mock()
+        hooks.cluster_joined(rid='foo:1', ssl_sync_request=True)
+        self.assertTrue(mock_send_ssl_sync_request.called)
+
+        mock_send_ssl_sync_request.reset_mock()
+        hooks.cluster_joined(rid='foo:1', ssl_sync_request=False)
+        self.assertFalse(mock_send_ssl_sync_request.called)
+
+    @patch.object(hooks, 'relation_get_and_migrate')
+    @patch.object(hooks, 'initialise_pki')
     @patch.object(hooks, 'update_all_identity_relation_units')
+    @patch.object(hooks, 'get_ssl_sync_request_units')
+    @patch.object(hooks, 'is_ssl_cert_master')
+    @patch.object(hooks, 'peer_units')
     @patch('keystone_utils.relation_ids')
     @patch('keystone_utils.config')
     @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch('keystone_utils.synchronize_ca')
+    @patch.object(hooks, 'check_peer_actions')
+    @patch.object(unison, 'ssh_authorized_peers')
     @patch.object(hooks, 'CONFIGS')
-    def test_cluster_changed(self, configs,
+    def test_cluster_changed(self, configs, ssh_authorized_peers,
+                             check_peer_actions, mock_synchronize_ca,
+                             mock_ensure_ssl_cert_master,
                              mock_log, mock_config, mock_relation_ids,
-                             mock_update_all_identity_relation_units):
+                             mock_peer_units,
+                             mock_is_ssl_cert_master,
+                             mock_get_ssl_sync_request_units,
+                             mock_update_all_identity_relation_units,
+                             mock_initialise_pki,
+                             mock_relation_get_and_migrate):
 
         relation_settings = {'foo_passwd': '123',
                              'identity-service:16_foo': 'bar'}
 
+        mock_relation_get_and_migrate.return_value = None
+        mock_is_ssl_cert_master.return_value = False
+        mock_peer_units.return_value = ['unit/0']
+        mock_ensure_ssl_cert_master.return_value = False
         mock_relation_ids.return_value = []
         self.is_leader.return_value = False
 
@@ -438,8 +784,13 @@ class KeystoneRelationTests(CharmTestCase):
         mock_config.return_value = None
 
         hooks.cluster_changed()
-        whitelist = ['_passwd', 'identity-service:', 'db-initialised']
+        whitelist = ['_passwd', 'identity-service:', 'db-initialised',
+                     'ssl-cert-available-updates', 'ssl-cert-master']
         self.peer_echo.assert_called_with(force=True, includes=whitelist)
+        ssh_authorized_peers.assert_called_with(
+            user=self.ssh_user, group='juju_keystone',
+            peer_interface='cluster', ensure_local_user=True)
+        self.assertFalse(mock_synchronize_ca.called)
         self.assertTrue(configs.write_all.called)
 
     @patch.object(hooks, 'update_all_identity_relation_units')
@@ -618,40 +969,54 @@ class KeystoneRelationTests(CharmTestCase):
         self.assertTrue(self.update_dns_ha_resource_params.called)
         self.relation_set.assert_called_with(**args)
 
-    @patch.object(utils, 'peer_retrieve')
     @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch('keystone_utils.synchronize_ca')
     @patch.object(hooks, 'CONFIGS')
     def test_ha_relation_changed_not_clustered_not_leader(self, configs,
-                                                          mock_log,
-                                                          mock_peer_retrieve):
+                                                          mock_synchronize_ca,
+                                                          mock_is_master,
+                                                          mock_log):
+        mock_is_master.return_value = False
         self.relation_get.return_value = False
+        self.is_elected_leader.return_value = False
 
         hooks.ha_changed()
         self.assertTrue(configs.write_all.called)
+        self.assertFalse(mock_synchronize_ca.called)
 
-    @patch.object(hooks, 'update_all_identity_relation_units')
+    @patch.object(hooks, 'is_ssl_cert_master')
+    @patch.object(hooks, 'update_all_identity_relation_units_force_sync')
     @patch.object(hooks, 'is_db_initialised')
     @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
     @patch.object(hooks, 'identity_changed')
     @patch.object(hooks, 'CONFIGS')
     def test_ha_relation_changed_clustered_leader(self, configs,
                                                   identity_changed,
+                                                  mock_ensure_ssl_cert_master,
                                                   mock_log,
                                                   mock_is_db_initialised,
-                                                  update):
+                                                  update, cert_master):
         mock_is_db_initialised.return_value = True
         self.is_db_ready.return_value = True
+        mock_ensure_ssl_cert_master.return_value = False
         self.relation_get.return_value = True
+        self.is_elected_leader.return_value = True
         self.relation_ids.return_value = ['identity-service:0']
         self.related_units.return_value = ['unit/0']
+        cert_master.return_value = True
 
         hooks.ha_changed()
         self.assertTrue(configs.write_all.called)
         self.assertTrue(update.called)
 
     @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
     @patch.object(hooks, 'CONFIGS')
-    def test_configure_https_enable(self, configs, mock_log):
+    def test_configure_https_enable(self, configs, mock_ensure_ssl_cert_master,
+                                    mock_log):
+        mock_ensure_ssl_cert_master.return_value = False
         configs.complete_contexts = MagicMock()
         configs.complete_contexts.return_value = ['https']
         configs.write = MagicMock()
@@ -662,8 +1027,12 @@ class KeystoneRelationTests(CharmTestCase):
         self.check_call.assert_called_with(cmd)
 
     @patch('keystone_utils.log')
+    @patch('keystone_utils.ensure_ssl_cert_master')
     @patch.object(hooks, 'CONFIGS')
-    def test_configure_https_disable(self, configs, mock_log):
+    def test_configure_https_disable(self, configs,
+                                     mock_ensure_ssl_cert_master,
+                                     mock_log):
+        mock_ensure_ssl_cert_master.return_value = False
         configs.complete_contexts = MagicMock()
         configs.complete_contexts.return_value = ['']
         configs.write = MagicMock()
@@ -675,25 +1044,47 @@ class KeystoneRelationTests(CharmTestCase):
 
     @patch.object(hooks, 'update_all_identity_relation_units')
     @patch.object(utils, 'os_release')
+    @patch.object(utils, 'git_install_requested')
     @patch.object(hooks, 'is_db_ready')
     @patch.object(hooks, 'is_db_initialised')
     @patch('keystone_utils.log')
     @patch('keystone_utils.relation_ids')
-    def test_upgrade_charm_leader(self,
+    @patch('keystone_utils.is_elected_leader')
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch('keystone_utils.update_hash_from_path')
+    @patch('keystone_utils.synchronize_ca')
+    @patch.object(unison, 'ssh_authorized_peers')
+    def test_upgrade_charm_leader(self, ssh_authorized_peers,
+                                  mock_synchronize_ca,
+                                  mock_update_hash_from_path,
+                                  mock_ensure_ssl_cert_master,
+                                  mock_is_elected_leader,
                                   mock_relation_ids,
                                   mock_log,
                                   mock_is_db_initialised,
                                   mock_is_db_ready,
+                                  git_requested,
                                   os_release,
                                   update):
         os_release.return_value = 'havana'
         mock_is_db_initialised.return_value = True
         mock_is_db_ready.return_value = True
+        mock_is_elected_leader.return_value = False
         mock_relation_ids.return_value = []
+        mock_ensure_ssl_cert_master.return_value = True
+        # Ensure always returns diff
+        mock_update_hash_from_path.side_effect = \
+            lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
 
+        self.is_elected_leader.return_value = True
         self.filter_installed_packages.return_value = []
+        git_requested.return_value = False
         hooks.upgrade_charm()
         self.assertTrue(self.apt_install.called)
+        ssh_authorized_peers.assert_called_with(
+            user=self.ssh_user, group='juju_keystone',
+            peer_interface='cluster', ensure_local_user=True)
+        self.assertTrue(mock_synchronize_ca.called)
         self.assertTrue(update.called)
 
     @patch.object(hooks, 'update_all_identity_relation_units')
@@ -769,6 +1160,7 @@ class KeystoneRelationTests(CharmTestCase):
                      call('Firing identity_credentials_changed hook for all '
                           'related services.')]
         hooks.update_all_identity_relation_units(check_db_ready=False)
+        self.assertTrue(configs.write_all.called)
         identity_changed.assert_called_with(
             relation_id='identity-relation:0',
             remote_unit='unit/0')
@@ -784,6 +1176,7 @@ class KeystoneRelationTests(CharmTestCase):
         """ Verify update identity relations when DB is not ready """
         self.is_db_ready.return_value = False
         hooks.update_all_identity_relation_units(check_db_ready=True)
+        self.assertTrue(configs.write_all.called)
         self.assertTrue(self.is_db_ready.called)
         self.log.assert_called_with('Allowed_units list provided and this '
                                     'unit not present', level='INFO')
@@ -797,6 +1190,7 @@ class KeystoneRelationTests(CharmTestCase):
         """ Verify update identity relations when DB is not initialized """
         is_db_initialized.return_value = False
         hooks.update_all_identity_relation_units(check_db_ready=False)
+        self.assertTrue(configs.write_all.called)
         self.assertFalse(self.is_db_ready.called)
         self.log.assert_called_with('Database not yet initialised - '
                                     'deferring identity-relation updates',
@@ -812,6 +1206,7 @@ class KeystoneRelationTests(CharmTestCase):
         self.is_elected_leader.return_value = True
         is_db_initialized.return_value = True
         hooks.update_all_identity_relation_units(check_db_ready=False)
+        self.assertTrue(configs.write_all.called)
         self.assertTrue(self.ensure_initial_admin.called)
         # Still updates relations
         self.assertTrue(self.relation_ids.called)
@@ -825,26 +1220,40 @@ class KeystoneRelationTests(CharmTestCase):
         self.is_elected_leader.return_value = False
         is_db_initialized.return_value = True
         hooks.update_all_identity_relation_units(check_db_ready=False)
+        self.assertTrue(configs.write_all.called)
         self.assertFalse(self.ensure_initial_admin.called)
         # Still updates relations
         self.assertTrue(self.relation_ids.called)
 
-    @patch.object(utils, 'peer_retrieve')
     @patch.object(hooks, 'update_all_identity_relation_units')
     @patch.object(utils, 'os_release')
+    @patch.object(utils, 'git_install_requested')
     @patch('keystone_utils.log')
     @patch('keystone_utils.relation_ids')
-    def test_upgrade_charm_not_leader(self,
+    @patch('keystone_utils.ensure_ssl_cert_master')
+    @patch('keystone_utils.update_hash_from_path')
+    @patch.object(unison, 'ssh_authorized_peers')
+    def test_upgrade_charm_not_leader(self, ssh_authorized_peers,
+                                      mock_update_hash_from_path,
+                                      mock_ensure_ssl_cert_master,
                                       mock_relation_ids,
-                                      mock_log,
-                                      os_release, update, mock_peer_retrieve):
+                                      mock_log, git_requested,
+                                      os_release, update):
         os_release.return_value = 'havana'
+        mock_relation_ids.return_value = []
+        mock_ensure_ssl_cert_master.return_value = False
+        # Ensure always returns diff
+        mock_update_hash_from_path.side_effect = \
+            lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4()))
 
-        self.filter_installed_packages.return_value = []
-        mock_peer_retrieve.return_value = 'true'
         self.is_elected_leader.return_value = False
+        self.filter_installed_packages.return_value = []
+        git_requested.return_value = False
         hooks.upgrade_charm()
         self.assertTrue(self.apt_install.called)
+        ssh_authorized_peers.assert_called_with(
+            user=self.ssh_user, group='juju_keystone',
+            peer_interface='cluster', ensure_local_user=True)
         self.assertTrue(self.log.called)
         self.assertFalse(update.called)
 
@@ -868,14 +1277,9 @@ class KeystoneRelationTests(CharmTestCase):
 
     @patch.object(hooks, 'is_unit_paused_set')
     @patch.object(hooks, 'is_db_initialised')
-    @patch.object(utils, 'run_in_apache')
-    @patch.object(utils, 'service_restart')
     def test_domain_backend_changed_complete(self,
-                                             service_restart,
-                                             run_in_apache,
                                              is_db_initialised,
                                              is_unit_paused_set):
-        run_in_apache.return_value = True
         self.get_api_version.return_value = 3
         self.relation_get.side_effect = ['mydomain', 'nonce2']
         self.is_leader.return_value = True
@@ -885,6 +1289,7 @@ class KeystoneRelationTests(CharmTestCase):
         mock_kv.get.return_value = None
         self.unitdata.kv.return_value = mock_kv
         is_unit_paused_set.return_value = False
+        self.keystone_service.return_value = 'apache2'
 
         hooks.domain_backend_changed()
 
@@ -898,21 +1303,16 @@ class KeystoneRelationTests(CharmTestCase):
                  rid=None),
         ])
         self.create_or_show_domain.assert_called_with('mydomain')
-        service_restart.assert_called_with('apache2')
+        self.service_restart.assert_called_with('apache2')
         mock_kv.set.assert_called_with('domain-restart-nonce-mydomain',
                                        'nonce2')
         self.assertTrue(mock_kv.flush.called)
 
     @patch.object(hooks, 'is_unit_paused_set')
     @patch.object(hooks, 'is_db_initialised')
-    @patch.object(utils, 'run_in_apache')
-    @patch.object(utils, 'service_restart')
     def test_domain_backend_changed_complete_follower(self,
-                                                      service_restart,
-                                                      run_in_apache,
                                                       is_db_initialised,
                                                       is_unit_paused_set):
-        run_in_apache.return_value = True
         self.get_api_version.return_value = 3
         self.relation_get.side_effect = ['mydomain', 'nonce2']
         self.is_leader.return_value = False
@@ -922,6 +1322,7 @@ class KeystoneRelationTests(CharmTestCase):
         mock_kv.get.return_value = None
         self.unitdata.kv.return_value = mock_kv
         is_unit_paused_set.return_value = False
+        self.keystone_service.return_value = 'apache2'
 
         hooks.domain_backend_changed()
 
@@ -936,84 +1337,7 @@ class KeystoneRelationTests(CharmTestCase):
         ])
         # Only lead unit will create the domain
         self.assertFalse(self.create_or_show_domain.called)
-        service_restart.assert_called_with('apache2')
+        self.service_restart.assert_called_with('apache2')
         mock_kv.set.assert_called_with('domain-restart-nonce-mydomain',
                                        'nonce2')
         self.assertTrue(mock_kv.flush.called)
-
-    @patch.object(hooks, 'os_release')
-    @patch.object(hooks, 'relation_id')
-    @patch.object(hooks, 'is_unit_paused_set')
-    @patch.object(hooks, 'is_db_initialised')
-    @patch.object(utils, 'run_in_apache')
-    @patch.object(utils, 'service_restart')
-    def test_fid_service_provider_changed_complete(
-            self,
-            service_restart,
-            run_in_apache,
-            is_db_initialised,
-            is_unit_paused_set,
-            relation_id, os_release):
-        os_release.return_value = 'ocata'
-        rel = 'keystone-fid-service-provider:0'
-        relation_id.return_value = rel
-        run_in_apache.return_value = True
-        self.get_api_version.return_value = 3
-        self.relation_get.side_effect = ['"nonce2"']
-        self.is_leader.return_value = True
-        self.is_db_ready.return_value = True
-        is_db_initialised.return_value = True
-        mock_kv = MagicMock()
-        mock_kv.get.return_value = None
-        self.unitdata.kv.return_value = mock_kv
-        is_unit_paused_set.return_value = False
-
-        hooks.keystone_fid_service_provider_changed()
-
-        self.assertTrue(self.get_api_version.called)
-        self.relation_get.assert_has_calls([
-            call('restart-nonce'),
-        ])
-        service_restart.assert_called_with('apache2')
-        mock_kv.set.assert_called_with(
-            'fid-restart-nonce-{}'.format(rel), 'nonce2')
-        self.assertTrue(mock_kv.flush.called)
-
-    @patch.object(hooks, 'os_release')
-    @patch.object(hooks, 'relation_id')
-    @patch.object(hooks, 'is_unit_paused_set')
-    @patch.object(hooks, 'is_db_initialised')
-    @patch.object(utils, 'run_in_apache')
-    @patch.object(utils, 'service_restart')
-    def test_fid_service_provider_changed_complete_follower(
-            self,
-            service_restart,
-            run_in_apache,
-            is_db_initialised,
-            is_unit_paused_set,
-            relation_id, os_release):
-        os_release.return_value = 'ocata'
-        rel = 'keystone-fid-service-provider:0'
-        relation_id.return_value = rel
-        run_in_apache.return_value = True
-        self.get_api_version.return_value = 3
-        self.relation_get.side_effect = ['"nonce2"']
-        self.is_leader.return_value = False
-        self.is_db_ready.return_value = True
-        is_db_initialised.return_value = True
-        mock_kv = MagicMock()
-        mock_kv.get.return_value = None
-        self.unitdata.kv.return_value = mock_kv
-        is_unit_paused_set.return_value = False
-
-        hooks.keystone_fid_service_provider_changed()
-
-        self.assertTrue(self.get_api_version.called)
-        self.relation_get.assert_has_calls([
-            call('restart-nonce'),
-        ])
-        service_restart.assert_called_with('apache2')
-        mock_kv.set.assert_called_with(
-            'fid-restart-nonce-{}'.format(rel),
-            'nonce2')
-        self.assertTrue(mock_kv.flush.called)
diff --git a/unit_tests/test_keystone_utils.py b/unit_tests/test_keystone_utils.py
index 97a405305ecada697e79d0980a26212557971d17..ebfd5fddd19757cfa77dec9cbd18abbe3cef2f19 100644
--- a/unit_tests/test_keystone_utils.py
+++ b/unit_tests/test_keystone_utils.py
@@ -15,6 +15,7 @@
 from mock import patch, call, MagicMock
 from test_utils import CharmTestCase
 import os
+from base64 import b64encode
 import subprocess
 
 os.environ['JUJU_UNIT_NAME'] = 'keystone'
@@ -29,6 +30,7 @@ TO_PATCH = [
     'config',
     'os_release',
     'log',
+    'get_ca',
     'create_role',
     'create_service_entry',
     'create_endpoint_template',
@@ -37,11 +39,17 @@ TO_PATCH = [
     'get_requested_roles',
     'get_service_password',
     'get_os_codename_install_source',
+    'git_clone_and_install',
+    'git_pip_venv_dir',
+    'git_src_dir',
     'grant_role',
     'configure_installation_source',
+    'is_elected_leader',
+    'is_ssl_cert_master',
     'https',
     'lsb_release',
     'peer_store_and_set',
+    'service_restart',
     'service_stop',
     'service_start',
     'snap_install_requested',
@@ -49,10 +57,12 @@ TO_PATCH = [
     'relation_set',
     'relation_ids',
     'relation_id',
+    'render',
     'local_unit',
     'related_units',
     'https',
     'peer_store',
+    'pip_install',
     # generic
     'apt_update',
     'apt_upgrade',
@@ -61,9 +71,19 @@ TO_PATCH = [
     'time',
     'pwgen',
     'os_application_version_set',
+    'is_leader',
     'reset_os_release',
 ]
 
+openstack_origin_git = \
+    """repositories:
+         - {name: requirements,
+            repository: 'git://git.openstack.org/openstack/requirements',
+            branch: stable/juno}
+         - {name: keystone,
+            repository: 'git://git.openstack.org/openstack/keystone',
+            branch: stable/juno}"""
+
 
 class TestKeystoneUtils(CharmTestCase):
 
@@ -87,7 +107,6 @@ class TestKeystoneUtils(CharmTestCase):
                 'contexts': [self.ctxt],
             }
         }
-        self.get_os_codename_install_source.return_value = 'icehouse'
 
     @patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer')
     @patch('os.path.exists')
@@ -115,21 +134,27 @@ class TestKeystoneUtils(CharmTestCase):
         ]
         self.assertEqual(fake_renderer.register.call_args_list, ex_reg)
 
+    @patch.object(utils, 'git_determine_usr_bin')
     @patch.object(utils, 'snap_install_requested')
     @patch.object(utils, 'os')
     def test_resource_map_enable_memcache_mitaka(self, mock_os,
-                                                 snap_install_requested):
+                                                 snap_install_requested,
+                                                 git_determine_usr_bin):
         self.os_release.return_value = 'mitaka'
         snap_install_requested.return_value = False
+        git_determine_usr_bin.return_value = '/usr/bin'
         mock_os.path.exists.return_value = True
         self.assertTrue('/etc/memcached.conf' in utils.resource_map().keys())
 
+    @patch.object(utils, 'git_determine_usr_bin')
     @patch.object(utils, 'snap_install_requested')
     @patch.object(utils, 'os')
     def test_resource_map_enable_memcache_liberty(self, mock_os,
-                                                  snap_install_requested):
+                                                  snap_install_requested,
+                                                  git_determine_usr_bin):
         self.os_release.return_value = 'liberty'
         snap_install_requested.return_value = False
+        git_determine_usr_bin.return_value = '/usr/bin'
         mock_os.path.exists.return_value = True
         self.assertFalse('/etc/memcached.conf' in utils.resource_map().keys())
 
@@ -159,6 +184,16 @@ class TestKeystoneUtils(CharmTestCase):
             'memcached']
         self.assertEqual(set(ex), set(result))
 
+    @patch('charmhelpers.contrib.openstack.utils.config')
+    def test_determine_packages_git(self, _config):
+        self.os_release.return_value = 'havana'
+        _config.return_value = openstack_origin_git
+        result = utils.determine_packages()
+        ex = utils.BASE_PACKAGES + utils.BASE_GIT_PACKAGES
+        for p in utils.GIT_PACKAGE_BLACKLIST:
+            ex.remove(p)
+        self.assertEqual(set(ex), set(result))
+
     @patch('charmhelpers.contrib.openstack.utils.config')
     def test_determine_packages_snap_install(self, _config):
         self.os_release.return_value = 'mitaka'
@@ -168,7 +203,6 @@ class TestKeystoneUtils(CharmTestCase):
         ex = utils.BASE_PACKAGES_SNAP + ['memcached']
         self.assertEqual(set(ex), set(result))
 
-    @patch.object(utils, 'is_elected_leader')
     @patch.object(utils, 'disable_unused_apache_sites')
     @patch('os.path.exists')
     @patch.object(utils, 'run_in_apache')
@@ -176,11 +210,11 @@ class TestKeystoneUtils(CharmTestCase):
     @patch.object(utils, 'migrate_database')
     def test_openstack_upgrade_leader(
             self, migrate_database, determine_packages,
-            run_in_apache, os_path_exists, disable_unused_apache_sites,
-            mock_is_elected_leader):
+            run_in_apache, os_path_exists, disable_unused_apache_sites):
         configs = MagicMock()
         self.test_config.set('openstack-origin', 'cloud:xenial-newton')
         determine_packages.return_value = []
+        self.is_elected_leader.return_value = True
         os_path_exists.return_value = True
         run_in_apache.return_value = True
 
@@ -226,8 +260,9 @@ class TestKeystoneUtils(CharmTestCase):
     @patch.object(utils, 'get_api_version')
     @patch.object(utils, 'get_manager')
     @patch.object(utils, 'resolve_address')
+    @patch.object(utils, 'b64encode')
     def test_add_service_to_keystone_clustered_https_none_values(
-            self, _resolve_address, _get_manager,
+            self, b64encode, _resolve_address, _get_manager,
             _get_api_version, _leader_get):
         _get_api_version.return_value = 2
         _leader_get.return_value = None
@@ -235,9 +270,11 @@ class TestKeystoneUtils(CharmTestCase):
         remote_unit = 'unit/0'
         _resolve_address.return_value = '10.10.10.10'
         self.https.return_value = True
+        self.test_config.set('https-service-endpoints', 'True')
         self.test_config.set('vip', '10.10.10.10')
         self.test_config.set('admin-port', 80)
         self.test_config.set('service-port', 81)
+        b64encode.return_value = 'certificate'
         self.get_requested_roles.return_value = ['role1', ]
 
         self.relation_get.return_value = {'service': 'keystone',
@@ -254,10 +291,12 @@ class TestKeystoneUtils(CharmTestCase):
 
         relation_data = {'auth_host': '10.10.10.10',
                          'service_host': '10.10.10.10',
+                         'auth_protocol': 'https',
                          'service_protocol': 'https',
                          'auth_port': 80,
-                         'auth_protocol': 'https',
                          'service_port': 81,
+                         'https_keystone': 'True',
+                         'ca_cert': 'certificate',
                          'region': 'RegionOne',
                          'api_version': 2,
                          'admin_domain_id': None}
@@ -293,15 +332,12 @@ class TestKeystoneUtils(CharmTestCase):
         self.relation_ids.return_value = ['cluster/0']
 
         service_domain = None
-        service_domain_id = None
         service_role = 'Admin'
         if test_api_version > 2:
             service_domain = 'service_domain'
-            service_domain_id = '1234567890'
 
         mock_keystone = MagicMock()
         mock_keystone.resolve_tenant_id.return_value = 'tenant_id'
-        mock_keystone.resolve_domain_id.return_value = service_domain_id
         KeystoneManager.return_value = mock_keystone
 
         self.relation_get.return_value = {'service': 'keystone',
@@ -338,7 +374,6 @@ class TestKeystoneUtils(CharmTestCase):
                          'service_username': 'keystone',
                          'service_password': 'password',
                          'service_domain': service_domain,
-                         'service_domain_id': service_domain_id,
                          'service_tenant': 'tenant',
                          'https_keystone': '__null__',
                          'ssl_cert': '__null__', 'ssl_key': '__null__',
@@ -359,8 +394,6 @@ class TestKeystoneUtils(CharmTestCase):
                                                    **relation_data)
         self.relation_set.assert_called_with(relation_id=relation_id,
                                              **filtered)
-        if test_api_version > 2:
-            mock_keystone.resolve_domain_id.assert_called_with(service_domain)
 
     def test_add_service_to_keystone_no_clustered_no_https_complete_values_v3(
             self):
@@ -397,49 +430,6 @@ class TestKeystoneUtils(CharmTestCase):
                                         adminurl='10.0.0.2',
                                         internalurl='192.168.1.2')
 
-    @patch.object(utils, 'get_requested_roles')
-    @patch.object(utils, 'create_service_credentials')
-    @patch.object(utils, 'leader_get')
-    @patch('charmhelpers.contrib.openstack.ip.config')
-    @patch.object(utils, 'ensure_valid_service')
-    @patch.object(utils, 'add_endpoint')
-    @patch.object(utils, 'get_manager')
-    def test_add_service_to_keystone_multi_endpoints_bug_1739409(
-            self, KeystoneManager, add_endpoint, ensure_valid_service,
-            ip_config, leader_get, create_service_credentials,
-            get_requested_roles):
-        relation_id = 'identity-service:8'
-        remote_unit = 'nova-cloud-controller/0'
-        get_requested_roles.return_value = 'role1'
-        self.relation_get.return_value = {
-            'ec2_admin_url': 'http://10.5.0.16:8773/services/Cloud',
-            'ec2_internal_url': 'http://10.5.0.16:8773/services/Cloud',
-            'ec2_public_url': 'http://10.5.0.16:8773/services/Cloud',
-            'ec2_region': 'RegionOne',
-            'ec2_service': 'ec2',
-            'nova_admin_url': 'http://10.5.0.16:8774/v2/$(tenant_id)s',
-            'nova_internal_url': 'http://10.5.0.16:8774/v2/$(tenant_id)s',
-            'nova_public_url': 'http://10.5.0.16:8774/v2/$(tenant_id)s',
-            'nova_region': 'RegionOne',
-            'nova_service': 'nova',
-            'private-address': '10.5.0.16',
-            's3_admin_url': 'http://10.5.0.16:3333',
-            's3_internal_url': 'http://10.5.0.16:3333',
-            's3_public_url': 'http://10.5.0.16:3333',
-            's3_region': 'RegionOne',
-            's3_service': 's3'}
-
-        self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/'
-        KeystoneManager.resolve_tenant_id.return_value = 'tenant_id'
-        leader_get.return_value = None
-
-        utils.add_service_to_keystone(
-            relation_id=relation_id,
-            remote_unit=remote_unit)
-        create_service_credentials.assert_called_once_with(
-            'ec2_nova_s3',
-            new_roles='role1')
-
     @patch.object(utils, 'set_service_password')
     @patch.object(utils, 'get_service_password')
     @patch.object(utils, 'user_exists')
@@ -639,15 +629,12 @@ class TestKeystoneUtils(CharmTestCase):
         mock_relation_set.assert_called_once_with(relation_id=relation_id,
                                                   relation_settings=settings)
 
-    @patch.object(utils, 'is_elected_leader')
     @patch.object(utils, 'peer_retrieve')
     @patch.object(utils, 'peer_store')
     def test_get_admin_passwd_pwd_set(self, mock_peer_store,
-                                      mock_peer_retrieve,
-                                      mock_is_elected_leader):
+                                      mock_peer_retrieve):
         mock_peer_retrieve.return_value = None
         self.test_config.set('admin-password', 'supersecret')
-        mock_is_elected_leader.return_value = True
         self.assertEqual(utils.get_admin_passwd(), 'supersecret')
         mock_peer_store.assert_called_once_with('admin_passwd', 'supersecret')
 
@@ -695,6 +682,96 @@ class TestKeystoneUtils(CharmTestCase):
         self.related_units.return_value = []
         self.assertTrue(utils.is_db_ready())
 
+    @patch.object(utils, 'peer_units')
+    def test_ensure_ssl_cert_master_ssl_no_peers(self, mock_peer_units):
+        def mock_rel_get(unit=None, **kwargs):
+            return None
+
+        self.relation_get.side_effect = mock_rel_get
+        self.relation_ids.return_value = ['cluster:0']
+        self.local_unit.return_value = 'unit/0'
+        self.related_units.return_value = []
+        mock_peer_units.return_value = []
+        # This should get ignored since we are overriding
+        self.is_ssl_cert_master.return_value = False
+        self.is_elected_leader.return_value = False
+        self.assertTrue(utils.ensure_ssl_cert_master())
+        settings = {'ssl-cert-master': 'unit/0'}
+        self.relation_set.assert_called_with(relation_id='cluster:0',
+                                             relation_settings=settings)
+
+    @patch.object(utils, 'peer_units')
+    def test_ensure_ssl_cert_master_ssl_master_no_peers(self,
+                                                        mock_peer_units):
+        def mock_rel_get(unit=None, **kwargs):
+            if unit == 'unit/0':
+                return 'unit/0'
+
+            return None
+
+        self.relation_get.side_effect = mock_rel_get
+        self.relation_ids.return_value = ['cluster:0']
+        self.local_unit.return_value = 'unit/0'
+        self.related_units.return_value = []
+        mock_peer_units.return_value = []
+        # This should get ignored since we are overriding
+        self.is_ssl_cert_master.return_value = False
+        self.is_elected_leader.return_value = False
+        self.assertTrue(utils.ensure_ssl_cert_master())
+        settings = {'ssl-cert-master': 'unit/0'}
+        self.relation_set.assert_called_with(relation_id='cluster:0',
+                                             relation_settings=settings)
+
+    @patch.object(utils, 'peer_units')
+    def test_ensure_ssl_cert_master_ssl_not_leader(self, mock_peer_units):
+        self.relation_ids.return_value = ['cluster:0']
+        self.local_unit.return_value = 'unit/0'
+        mock_peer_units.return_value = ['unit/1']
+        self.is_ssl_cert_master.return_value = False
+        self.is_elected_leader.return_value = False
+        self.assertFalse(utils.ensure_ssl_cert_master())
+        self.assertFalse(self.relation_set.called)
+
+    @patch.object(utils, 'peer_units')
+    def test_ensure_ssl_cert_master_is_leader_new_peer(self,
+                                                       mock_peer_units):
+        def mock_rel_get(unit=None, **kwargs):
+            if unit == 'unit/0':
+                return 'unit/0'
+
+            return 'unknown'
+
+        self.relation_get.side_effect = mock_rel_get
+        self.relation_ids.return_value = ['cluster:0']
+        self.local_unit.return_value = 'unit/0'
+        mock_peer_units.return_value = ['unit/1']
+        self.related_units.return_value = ['unit/1']
+        self.is_ssl_cert_master.return_value = False
+        self.is_elected_leader.return_value = True
+        self.assertFalse(utils.ensure_ssl_cert_master())
+        settings = {'ssl-cert-master': 'unit/0'}
+        self.relation_set.assert_called_with(relation_id='cluster:0',
+                                             relation_settings=settings)
+
+    @patch.object(utils, 'peer_units')
+    def test_ensure_ssl_cert_master_is_leader_no_new_peer(self,
+                                                          mock_peer_units):
+        def mock_rel_get(unit=None, **kwargs):
+            if unit == 'unit/0':
+                return 'unit/0'
+
+            return 'unit/0'
+
+        self.relation_get.side_effect = mock_rel_get
+        self.relation_ids.return_value = ['cluster:0']
+        self.local_unit.return_value = 'unit/0'
+        mock_peer_units.return_value = ['unit/1']
+        self.related_units.return_value = ['unit/1']
+        self.is_ssl_cert_master.return_value = False
+        self.is_elected_leader.return_value = True
+        self.assertFalse(utils.ensure_ssl_cert_master())
+        self.assertFalse(self.relation_set.called)
+
     @patch.object(utils, 'leader_set')
     @patch.object(utils, 'leader_get')
     @patch('charmhelpers.contrib.openstack.ip.unit_get')
@@ -729,6 +806,109 @@ class TestKeystoneUtils(CharmTestCase):
             region='RegionOne',
         )
 
+    @patch.object(utils, 'peer_units')
+    def test_ensure_ssl_cert_master_is_leader_bad_votes(self,
+                                                        mock_peer_units):
+        counter = {0: 0}
+
+        def mock_rel_get(unit=None, **kwargs):
+            """Returns a mix of votes."""
+            if unit == 'unit/0':
+                return 'unit/0'
+
+            ret = 'unit/%d' % (counter[0])
+            counter[0] += 1
+            return ret
+
+        self.relation_get.side_effect = mock_rel_get
+        self.relation_ids.return_value = ['cluster:0']
+        self.local_unit.return_value = 'unit/0'
+        mock_peer_units.return_value = ['unit/1']
+        self.related_units.return_value = ['unit/1']
+        self.is_ssl_cert_master.return_value = False
+        self.is_elected_leader.return_value = True
+        self.assertFalse(utils.ensure_ssl_cert_master())
+        self.assertFalse(self.relation_set.called)
+
+    @patch.object(utils, 'git_install_requested')
+    @patch.object(utils, 'git_post_install')
+    @patch.object(utils, 'git_pre_install')
+    def test_git_install(self, git_requested, git_pre, git_post):
+        projects_yaml = openstack_origin_git
+        git_requested.return_value = True
+        utils.git_install(projects_yaml)
+        self.assertTrue(git_pre.called)
+        self.git_clone_and_install.assert_called_with(openstack_origin_git,
+                                                      core_project='keystone')
+        self.assertTrue(git_post.called)
+
+    @patch.object(utils, 'mkdir')
+    @patch.object(utils, 'write_file')
+    @patch.object(utils, 'add_user_to_group')
+    @patch.object(utils, 'add_group')
+    @patch.object(utils, 'adduser')
+    def test_git_pre_install(self, adduser, add_group, add_user_to_group,
+                             write_file, mkdir):
+        utils.git_pre_install()
+        adduser.assert_called_with('keystone', shell='/bin/bash',
+                                   system_user=True,
+                                   home_dir='/var/lib/keystone')
+        add_group.assert_called_with('keystone', system_group=True)
+        add_user_to_group.assert_called_with('keystone', 'keystone')
+        expected = [
+            call('/var/lib/keystone', owner='keystone',
+                 group='keystone', perms=0755, force=False),
+            call('/var/lib/keystone/cache', owner='keystone',
+                 group='keystone', perms=0755, force=False),
+            call('/var/log/keystone', owner='keystone',
+                 group='keystone', perms=0755, force=False),
+        ]
+        self.assertEqual(mkdir.call_args_list, expected)
+        write_file.assert_called_with('/var/log/keystone/keystone.log',
+                                      '', owner='keystone', group='keystone',
+                                      perms=0600)
+
+    @patch('os.path.join')
+    @patch('os.path.exists')
+    @patch('os.symlink')
+    @patch('shutil.copytree')
+    @patch('shutil.rmtree')
+    @patch('subprocess.check_call')
+    def test_git_post_install(self, check_call, rmtree, copytree, symlink,
+                              exists, join):
+        self.os_release.return_value = 'havana'
+        projects_yaml = openstack_origin_git
+        join.return_value = 'joined-string'
+        self.git_pip_venv_dir.return_value = '/mnt/openstack-git/venv'
+        self.lsb_release.return_value = {'DISTRIB_RELEASE': '15.04'}
+        utils.git_post_install(projects_yaml)
+        expected = [
+            call('joined-string', '/etc/keystone'),
+        ]
+        copytree.assert_has_calls(expected)
+        expected = [
+            call('joined-string', '/usr/local/bin/keystone-manage'),
+        ]
+        symlink.assert_has_calls(expected, any_order=True)
+        keystone_context = {
+            'service_description': 'Keystone API server',
+            'service_name': 'Keystone',
+            'user_name': 'keystone',
+            'start_dir': '/var/lib/keystone',
+            'process_name': 'keystone',
+            'executable_name': 'joined-string',
+            'config_files': ['/etc/keystone/keystone.conf'],
+            'log_file': '/var/log/keystone/keystone.log',
+        }
+        expected = [
+            call('git/logging.conf', '/etc/keystone/logging.conf', {},
+                 perms=0o644),
+            call('git.upstart', '/etc/init/keystone.conf',
+                 keystone_context, perms=0o644, templates_dir='joined-string'),
+        ]
+        self.assertEqual(self.render.call_args_list, expected)
+        self.service_restart.assert_called_with('keystone')
+
     @patch.object(utils, 'get_manager')
     def test_is_service_present(self, KeystoneManager):
         mock_keystone = MagicMock()
@@ -885,6 +1065,16 @@ class TestKeystoneUtils(CharmTestCase):
         protocol = utils.get_protocol()
         self.assertEqual(protocol, 'https')
 
+    def test_get_ssl_ca_settings(self):
+        CA = MagicMock()
+        CA.get_ca_bundle.return_value = 'certstring'
+        self.test_config.set('https-service-endpoints', 'True')
+        self.get_ca.return_value = CA
+        expected_settings = {'https_keystone': 'True',
+                             'ca_cert': b64encode('certstring')}
+        settings = utils.get_ssl_ca_settings()
+        self.assertEqual(settings, expected_settings)
+
     @patch.object(utils, 'get_manager')
     def test_add_credentials_keystone_not_ready(self, get_manager):
         """ Verify add_credentials_to_keystone when the relation
@@ -1070,6 +1260,67 @@ class TestKeystoneUtils(CharmTestCase):
         self.peer_store_and_set.assert_called_with(relation_id=relation_id,
                                                    **relation_data)
 
+    @patch.object(utils, 'set_service_password')
+    @patch.object(utils, 'get_service_password')
+    @patch.object(utils, 'get_ssl_ca_settings')
+    @patch.object(utils, 'create_user_credentials')
+    @patch.object(utils, 'get_protocol')
+    @patch.object(utils, 'resolve_address')
+    @patch.object(utils, 'get_api_version')
+    @patch.object(utils, 'get_manager')
+    def test_add_credentials_keystone_ssl(self, get_manager,
+                                          get_api_version,
+                                          resolve_address,
+                                          get_protocol,
+                                          create_user_credentials,
+                                          get_ssl_ca_settings,
+                                          get_callback, set_callback):
+        """ Verify add_credentials with SSL """
+        manager = MagicMock()
+        manager.resolve_tenant_id.return_value = 'abcdef0123456789'
+        get_manager.return_value = manager
+        remote_unit = 'unit/0'
+        relation_id = 'identity-credentials:0'
+        get_api_version.return_value = 2
+        get_protocol.return_value = 'https'
+        resolve_address.return_value = '10.10.10.10'
+        create_user_credentials.return_value = 'password'
+        get_ssl_ca_settings.return_value = {'https_keystone': 'True',
+                                            'ca_cert': 'base64certstring'}
+        self.relation_get.return_value = {'username': 'requester'}
+        self.get_service_password.return_value = 'password'
+        self.get_requested_roles.return_value = []
+        self.test_config.set('admin-port', 80)
+        self.test_config.set('service-port', 81)
+        self.test_config.set('https-service-endpoints', 'True')
+        relation_data = {'auth_host': '10.10.10.10',
+                         'credentials_host': '10.10.10.10',
+                         'credentials_port': 81,
+                         'auth_port': 80,
+                         'auth_protocol': 'https',
+                         'credentials_username': 'requester',
+                         'credentials_protocol': 'https',
+                         'credentials_password': 'password',
+                         'credentials_project': 'services',
+                         'credentials_project_id': 'abcdef0123456789',
+                         'region': 'RegionOne',
+                         'api_version': 2,
+                         'https_keystone': 'True',
+                         'ca_cert': 'base64certstring'}
+
+        utils.add_credentials_to_keystone(
+            relation_id=relation_id,
+            remote_unit=remote_unit)
+        create_user_credentials.assert_called_with('requester',
+                                                   get_callback,
+                                                   set_callback,
+                                                   domain=None,
+                                                   new_roles=[],
+                                                   grants=['Admin'],
+                                                   tenant='services')
+        self.peer_store_and_set.assert_called_with(relation_id=relation_id,
+                                                   **relation_data)
+
     @patch.object(utils.os, 'remove')
     @patch.object(utils.os.path, 'exists')
     def test_disable_unused_apache_sites(self, os_path_exists, os_remove):
@@ -1106,21 +1357,3 @@ class TestKeystoneUtils(CharmTestCase):
     def test_run_in_apache_set_release(self):
         self.os_release.return_value = 'kilo'
         self.assertTrue(utils.run_in_apache(release='liberty'))
-
-    def test_get_api_version_icehouse(self):
-        self.assertEqual(utils.get_api_version(), 2)
-
-    def test_get_api_version_queens(self):
-        self.get_os_codename_install_source.return_value = 'queens'
-        self.assertEqual(utils.get_api_version(), 3)
-
-    def test_get_api_version_invalid_option_value(self):
-        self.test_config.set('preferred-api-version', 4)
-        with self.assertRaises(ValueError):
-            utils.get_api_version()
-
-    def test_get_api_version_queens_invalid_option_value(self):
-        self.test_config.set('preferred-api-version', 2)
-        self.get_os_codename_install_source.return_value = 'queens'
-        with self.assertRaises(ValueError):
-            utils.get_api_version()