diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000000000000000000000000000000000000..7f7b5be3a9d2be7f3183bbb99a296a51e6dbbd84
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,7 @@
+[report]
+# Regexes for lines to exclude from consideration
+exclude_lines =
+    if __name__ == .__main__.:
+include=
+    hooks/hooks.py
+    hooks/ceph*.py
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..9e552b19b6d8811642fca8047bf27acb0417f6c4
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,9 @@
+bin
+.coverage
+.testrepository
+.tox
+*.sw[nop]
+*.pyc
+.unit-state.db
+.stestr
+__pycache__
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000000000000000000000000000000000000..14a8e185054e75e0232a30b5fc5f3a58f9b1e0fb
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.opendev.org
+port=29418
+project=openstack/charm-ceph-proxy
diff --git a/.project b/.project
new file mode 100644
index 0000000000000000000000000000000000000000..17434fc24a59f9cc402c936c88bef573cc1c0f9a
--- /dev/null
+++ b/.project
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+	<name>ceph-mon</name>
+	<comment></comment>
+	<projects>
+	</projects>
+	<buildSpec>
+		<buildCommand>
+			<name>org.python.pydev.PyDevBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+	</buildSpec>
+	<natures>
+		<nature>org.python.pydev.pythonNature</nature>
+	</natures>
+</projectDescription>
diff --git a/.pydevproject b/.pydevproject
new file mode 100644
index 0000000000000000000000000000000000000000..683d89d8ebd061cf2d057874abbe940465d6cee9
--- /dev/null
+++ b/.pydevproject
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<?eclipse-pydev version="1.0"?><pydev_project>
+<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
+<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
+<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
+<path>/ceph-mon/hooks</path>
+<path>/ceph-mon/unit_tests</path>
+<path>/ceph-mon/tests</path>
+<path>/ceph-mon/actions</path>
+</pydev_pathproperty>
+</pydev_project>
diff --git a/.stestr.conf b/.stestr.conf
new file mode 100644
index 0000000000000000000000000000000000000000..5fcccaca861e750fcf37f412c13e560614c8b3b4
--- /dev/null
+++ b/.stestr.conf
@@ -0,0 +1,3 @@
+[DEFAULT]
+test_path=./unit_tests
+top_dir=./
diff --git a/.zuul.yaml b/.zuul.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..34184f1d951c6bf6a6215242b167cd26d3241da6
--- /dev/null
+++ b/.zuul.yaml
@@ -0,0 +1,4 @@
+- project:
+    templates:
+      - python35-charm-jobs
+      - openstack-python3-ussuri-jobs
diff --git a/Makefile b/Makefile
index f63dfd0d051220a32c26e16e7c978bedcb247887..09b701f65f8e5c78fdf8352cf25cbfef36041cbb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
 #!/usr/bin/make
-PYTHON := /usr/bin/env python
+PYTHON := /usr/bin/env python3
 
 lint:
 	@tox -e pep8
@@ -14,9 +14,14 @@ functional_test:
 
 bin/charm_helpers_sync.py:
 	@mkdir -p bin
-	@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
-		> bin/charm_helpers_sync.py
+	@curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py
 
 sync: bin/charm_helpers_sync.py
 	$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
-	$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
+
+bin/git_sync.py:
+	@mkdir -p bin
+	@wget -O bin/git_sync.py https://raw.githubusercontent.com/CanonicalLtd/git-sync/master/git_sync.py
+
+ceph-sync:  bin/git_sync.py
+	$(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git
diff --git a/README.md b/README.md
index 7951addcb66f5dca2f2463175027c7eee3dd372b..bf32d10d36ca0c76e9b7897b2781b52f2a319d8c 100644
--- a/README.md
+++ b/README.md
@@ -1,35 +1,75 @@
 # Overview
 
-Ceph is a distributed storage and network file system designed to provide
+[Ceph][ceph-upstream] is a unified, distributed storage system designed for
 excellent performance, reliability, and scalability.
 
-This charm allows connecting an existing Ceph deployment with a Juju environment.
+The ceph-proxy charm deploys a proxy that acts as a [ceph-mon][ceph-mon-charm]
+application for an external Ceph cluster. It joins a non-charmed Ceph cluster
+to a Juju model.
+
+With respect to current version of default charm (v37 at time of writing),
+this adds capability to relate to [ceph-fs][ceph-fs-charm].
 
 # Usage
 
-Your config.yaml needs to provide the  monitor-hosts and fsid options like below:
+## Configuration
+
+This section covers common and/or important configuration options. See file
+`config.yaml` for the full list of options, along with their descriptions and
+default values. See the [Juju documentation][juju-docs-config-apps] for details
+on configuring applications.
+
+#### `fsid`
+
+The `fsid` option supplies the UUID of the external cluster.
+
+#### `admin-key`
+
+The `admin-key` option supplies the admin Cephx key of the external cluster.
+
+#### `monitor-hosts`
+
+The `monitor-hosts` option supplies the network addresses (and ports) of the
+Monitors of the external cluster.
+
+## Deployment
+
+Let file ``ceph-proxy.yaml`` contain the deployment configuration:
 
-`config.yaml`:
 ```yaml
-ceph-proxy:
-  monitor-hosts: IP_ADDRESS:PORT IP ADDRESS:PORT
-  fsid: FSID
+    ceph-proxy:
+        fsid: a4f1fb08-c83d-11ea-8f4a-635b3b062931
+        admin-key: AQCJvBFfWX+GLhAAln5dFd1rZekcGLyMmy58bQ==
+        monitor-hosts: '10.246.114.21:6789 10.246.114.22:6789 10.246.114.7:6789'
 ```
 
-You must then provide this configuration to the new deployment: `juju deploy ceph-proxy -c config.yaml`.
+To deploy:
+
+    juju deploy --config ceph-proxy.yaml ceph-proxy
+
+Now add relations as you normally would between a ceph-mon application and
+another application, except substitute ceph-proxy for ceph-mon. For instance,
+to use the external Ceph cluster as the backend for an existing glance
+application:
+
+    juju add-relation ceph-proxy:client glance:ceph
 
-This charm noes NOT insert itself between the clusters, but merely makes the external cluster available through Juju's environment by exposing the same relations that the existing ceph charms do.
+## Actions
 
-# Contact Information
+Many of the ceph-mon charm's actions are supported. See file `actions.yaml` for
+the full list of actions, along with their descriptions.
 
-## Authors 
+# Bugs
 
-- Chris MacNaughton <chris.macnaughton@canonical.com>
+Please report bugs on [Launchpad][lp-bugs-charm-ceph-proxy].
 
-Report bugs on [Launchpad](http://bugs.launchpad.net/charm-ceph-proxy/+filebug)
+For general charm questions refer to the [OpenStack Charm Guide][cg].
 
-## Ceph
+<!-- LINKS -->
 
-- [Ceph website](http://ceph.com)
-- [Ceph mailing lists](http://ceph.com/resources/mailing-list-irc/)
-- [Ceph bug tracker](http://tracker.ceph.com/projects/ceph)
+[ceph-upstream]: https://ceph.io
+[cg]: https://docs.openstack.org/charm-guide
+[ceph-mon-charm]: https://jaas.ai/ceph-mon
+[juju-docs-actions]: https://jaas.ai/docs/actions
+[juju-docs-config-apps]: https://juju.is/docs/configuring-applications
+[lp-bugs-charm-ceph-proxy]: https://bugs.launchpad.net/charm-ceph-proxy/+filebug
diff --git a/actions/create-cache-tier b/actions/create-cache-tier
deleted file mode 120000
index 2a7e4346f20239b766e16a95e60066753037354e..0000000000000000000000000000000000000000
--- a/actions/create-cache-tier
+++ /dev/null
@@ -1 +0,0 @@
-create-cache-tier.py
\ No newline at end of file
diff --git a/actions/create-cache-tier b/actions/create-cache-tier
new file mode 100755
index 0000000000000000000000000000000000000000..97a1d1efd64d0bbc36474b333f5088da3e356832
--- /dev/null
+++ b/actions/create-cache-tier
@@ -0,0 +1,54 @@
+#!/usr/bin/env python3
+__author__ = 'chris'
+import os
+from subprocess import CalledProcessError
+import sys
+
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
+
+
+from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists
+from charmhelpers.core.hookenv import action_get, config, log, action_fail
+
+
+def make_cache_tier():
+    backer_pool = action_get("backer-pool")
+    cache_pool = action_get("cache-pool")
+    cache_mode = action_get("cache-mode")
+    user = config('admin-user')
+
+    # Pre flight checks
+    if not pool_exists(user, backer_pool):
+        log("Please create {} pool before calling create-cache-tier".format(
+            backer_pool))
+        action_fail("create-cache-tier failed. Backer pool {} must exist "
+                    "before calling this".format(backer_pool))
+
+    if not pool_exists(user, cache_pool):
+        log("Please create {} pool before calling create-cache-tier".format(
+            cache_pool))
+        action_fail("create-cache-tier failed. Cache pool {} must exist "
+                    "before calling this".format(cache_pool))
+
+    pool = Pool(service=user, name=backer_pool)
+    try:
+        pool.add_cache_tier(cache_pool=cache_pool, mode=cache_mode)
+    except CalledProcessError as err:
+        log("Add cache tier failed with message: {}".format(
+            err.message))
+        action_fail("create-cache-tier failed.  Add cache tier failed with "
+                    "message: {}".format(err.message))
+
+
+if __name__ == '__main__':
+    make_cache_tier()
diff --git a/actions/create-erasure-profile b/actions/create-erasure-profile
index 2b00b588fbdaa0cbe09e83271b60d9929b274894..016862c83858a3aef35b0e222de5bc7ce38fee64 100755
--- a/actions/create-erasure-profile
+++ b/actions/create-erasure-profile
@@ -1,18 +1,30 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
+import os
 from subprocess import CalledProcessError
 import sys
 
-sys.path.append('hooks')
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
+
 
 from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile
-from charmhelpers.core.hookenv import action_get, log, action_fail
+from charmhelpers.core.hookenv import action_get, config, log, action_fail
 
 
 def make_erasure_profile():
     name = action_get("name")
     plugin = action_get("plugin")
     failure_domain = action_get("failure-domain")
-
+    user = config('admin-user')
     # jerasure requires k+m
     # isa requires k+m
     # local requires k+m+l
@@ -22,28 +34,28 @@ def make_erasure_profile():
         k = action_get("data-chunks")
         m = action_get("coding-chunks")
         try:
-            create_erasure_profile(service='admin',
+            create_erasure_profile(service=user,
                                    erasure_plugin_name=plugin,
                                    profile_name=name,
                                    data_chunks=k,
                                    coding_chunks=m,
                                    failure_domain=failure_domain)
         except CalledProcessError as e:
-            log(e)
+            log(str(e))
             action_fail("Create erasure profile failed with "
                         "message: {}".format(e.message))
     elif plugin == "isa":
         k = action_get("data-chunks")
         m = action_get("coding-chunks")
         try:
-            create_erasure_profile(service='admin',
+            create_erasure_profile(service=user,
                                    erasure_plugin_name=plugin,
                                    profile_name=name,
                                    data_chunks=k,
                                    coding_chunks=m,
                                    failure_domain=failure_domain)
         except CalledProcessError as e:
-            log(e)
+            log(str(e))
             action_fail("Create erasure profile failed with "
                         "message: {}".format(e.message))
     elif plugin == "local":
@@ -51,7 +63,7 @@ def make_erasure_profile():
         m = action_get("coding-chunks")
         l = action_get("locality-chunks")
         try:
-            create_erasure_profile(service='admin',
+            create_erasure_profile(service=user,
                                    erasure_plugin_name=plugin,
                                    profile_name=name,
                                    data_chunks=k,
@@ -59,7 +71,7 @@ def make_erasure_profile():
                                    locality=l,
                                    failure_domain=failure_domain)
         except CalledProcessError as e:
-            log(e)
+            log(str(e))
             action_fail("Create erasure profile failed with "
                         "message: {}".format(e.message))
     elif plugin == "shec":
@@ -67,7 +79,7 @@ def make_erasure_profile():
         m = action_get("coding-chunks")
         c = action_get("durability-estimator")
         try:
-            create_erasure_profile(service='admin',
+            create_erasure_profile(service=user,
                                    erasure_plugin_name=plugin,
                                    profile_name=name,
                                    data_chunks=k,
@@ -75,7 +87,7 @@ def make_erasure_profile():
                                    durability_estimator=c,
                                    failure_domain=failure_domain)
         except CalledProcessError as e:
-            log(e)
+            log(str(e))
             action_fail("Create erasure profile failed with "
                         "message: {}".format(e.message))
     else:
diff --git a/actions/create-pool b/actions/create-pool
index 4d1d2148b3bafb8f35adc7828e42a306fa81a424..ee6a779870ed2bf59478c8299dc67d90659c2bed 100755
--- a/actions/create-pool
+++ b/actions/create-pool
@@ -1,20 +1,33 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
+import os
 import sys
 
-sys.path.append('hooks')
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
+
 from subprocess import CalledProcessError
-from charmhelpers.core.hookenv import action_get, log, action_fail
+from charmhelpers.core.hookenv import action_get, config, log, action_fail
 from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool
 
 
 def create_pool():
     pool_name = action_get("name")
     pool_type = action_get("pool-type")
+    user = config('admin-user')
     try:
         if pool_type == "replicated":
             replicas = action_get("replicas")
             replicated_pool = ReplicatedPool(name=pool_name,
-                                             service='admin',
+                                             service=user,
                                              replicas=replicas)
             replicated_pool.create()
 
@@ -22,7 +35,7 @@ def create_pool():
             crush_profile_name = action_get("erasure-profile-name")
             erasure_pool = ErasurePool(name=pool_name,
                                        erasure_code_profile=crush_profile_name,
-                                       service='admin')
+                                       service=user)
             erasure_pool.create()
         else:
             log("Unknown pool type of {}. Only erasure or replicated is "
@@ -31,7 +44,7 @@ def create_pool():
                         "is allowed".format(pool_type))
     except CalledProcessError as e:
         action_fail("Pool creation failed because of a failed process. "
-                    "Ret Code: {} Message: {}".format(e.returncode, e.message))
+                    "Ret Code: {} Message: {}".format(e.returncode, str(e)))
 
 
 if __name__ == '__main__':
diff --git a/actions/delete-erasure-profile b/actions/delete-erasure-profile
index 075c410ec29dc30929a509f273162d68a6b80c85..7df8c44581b5c775ad5a345cbb3cf549e95c0bab 100755
--- a/actions/delete-erasure-profile
+++ b/actions/delete-erasure-profile
@@ -1,20 +1,31 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
 from subprocess import CalledProcessError
 
 __author__ = 'chris'
+import os
 import sys
 
-sys.path.append('hooks')
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
 
 from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile
-from charmhelpers.core.hookenv import action_get, log, action_fail
+from charmhelpers.core.hookenv import action_get, config, log, action_fail
 
 
 def delete_erasure_profile():
     name = action_get("name")
 
     try:
-        remove_erasure_profile(service='admin', profile_name=name)
+        remove_erasure_profile(service=config('admin-user'), profile_name=name)
     except CalledProcessError as e:
         action_fail("Remove erasure profile failed with error: {}".format(
             e.message))
diff --git a/actions/delete-pool b/actions/delete-pool
index 3d6550760d834f2be9b2d8b4db50656596d15df1..68b89b23287690ac4448a1dacccc2c651679bd57 100755
--- a/actions/delete-pool
+++ b/actions/delete-pool
@@ -1,7 +1,18 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
+import os
 import sys
 
-sys.path.append('hooks')
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
 
 import rados
 from ceph_ops import connect
@@ -20,8 +31,8 @@ def remove_pool():
             rados.NoData,
             rados.NoSpace,
             rados.PermissionError) as e:
-        log(e)
-        action_fail(e)
+        log(str(e))
+        action_fail(str(e))
 
 
 if __name__ == '__main__':
diff --git a/actions/get-erasure-profile b/actions/get-erasure-profile
index 29ece59d81ee5cdaa2df8e4a647abe8adc2b8342..1f6b311d9e4b0d79c954abb18dd225503ef44a66 100755
--- a/actions/get-erasure-profile
+++ b/actions/get-erasure-profile
@@ -1,16 +1,27 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
 __author__ = 'chris'
+import os
 import sys
 
-sys.path.append('hooks')
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
 
 from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile
-from charmhelpers.core.hookenv import action_get, action_set
+from charmhelpers.core.hookenv import action_get, action_set, config
 
 
 def make_erasure_profile():
     name = action_get("name")
-    out = get_erasure_profile(service='admin', name=name)
+    out = get_erasure_profile(service=config('admin-user'), name=name)
     action_set({'message': out})
 
 
diff --git a/actions/list-erasure-profiles b/actions/list-erasure-profiles
index cf6dfa096de82f62a418c8ad0ea693bdbf1fcd6a..caaa68c48eb348c45b450a3d13c675358553f854 100755
--- a/actions/list-erasure-profiles
+++ b/actions/list-erasure-profiles
@@ -1,22 +1,33 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
 __author__ = 'chris'
-import sys
+import os
 from subprocess import check_output, CalledProcessError
+import sys
+
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
 
-sys.path.append('hooks')
+_add_path(_hooks)
+_add_path(_root)
 
-from charmhelpers.core.hookenv import action_get, log, action_set, action_fail
+from charmhelpers.core.hookenv import action_get, log, config, action_set, action_fail
 
 if __name__ == '__main__':
     name = action_get("name")
     try:
         out = check_output(['ceph',
-                            '--id', 'admin',
+                            '--id', config('admin-user'),
                             'osd',
                             'erasure-code-profile',
                             'ls']).decode('UTF-8')
         action_set({'message': out})
     except CalledProcessError as e:
-        log(e)
+        log(str(e))
         action_fail("Listing erasure profiles failed with error: {}".format(
-            e.message))
+            str(e)))
diff --git a/actions/list-pools b/actions/list-pools
index 102667cff97081276d313ccf7d06925bb4195701..401619cd0f33cdcb8f97f9b7b1ee83262c055892 100755
--- a/actions/list-pools
+++ b/actions/list-pools
@@ -1,17 +1,28 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
 __author__ = 'chris'
-import sys
+import os
 from subprocess import check_output, CalledProcessError
+import sys
+
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
 
-sys.path.append('hooks')
+_add_path(_hooks)
+_add_path(_root)
 
-from charmhelpers.core.hookenv import log, action_set, action_fail
+from charmhelpers.core.hookenv import log, config, action_set, action_fail
 
 if __name__ == '__main__':
     try:
-        out = check_output(['ceph', '--id', 'admin',
+        out = check_output(['ceph', '--id', config('admin-user'),
                             'osd', 'lspools']).decode('UTF-8')
         action_set({'message': out})
     except CalledProcessError as e:
-        log(e)
-        action_fail("List pools failed with error: {}".format(e.message))
+        log(str(e))
+        action_fail("List pools failed with error: {}".format(str(e)))
diff --git a/actions/pool-get b/actions/pool-get
index e4f924b95db1c2f0e2545153abfffbf4b251bf92..f1a5077d0701145c71ae8af512334f4485759aae 100755
--- a/actions/pool-get
+++ b/actions/pool-get
@@ -1,19 +1,30 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
 __author__ = 'chris'
-import sys
+import os
 from subprocess import check_output, CalledProcessError
+import sys
+
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
 
-sys.path.append('hooks')
+_add_path(_hooks)
+_add_path(_root)
 
-from charmhelpers.core.hookenv import log, action_set, action_get, action_fail
+from charmhelpers.core.hookenv import log, config, action_set, action_get, action_fail
 
 if __name__ == '__main__':
     name = action_get('pool-name')
     key = action_get('key')
     try:
-        out = check_output(['ceph', '--id', 'admin',
+        out = check_output(['ceph', '--id', config('admin-user'),
                             'osd', 'pool', 'get', name, key]).decode('UTF-8')
         action_set({'message': out})
     except CalledProcessError as e:
-        log(e)
-        action_fail("Pool get failed with message: {}".format(e.message))
+        log(str(e))
+        action_fail("Pool get failed with message: {}".format(str(e)))
diff --git a/actions/pool-set b/actions/pool-set
index 1f6e13b810b7d7896041ba0b04ac0198cf416dd5..44874eb2197ae14620e581178436f6d62f91cfe0 100755
--- a/actions/pool-set
+++ b/actions/pool-set
@@ -1,10 +1,21 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
+import os
 from subprocess import CalledProcessError
 import sys
 
-sys.path.append('hooks')
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
 
-from charmhelpers.core.hookenv import action_get, log, action_fail
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
+
+from charmhelpers.core.hookenv import action_get, config, log, action_fail
 from ceph_broker import handle_set_pool_value
 
 if __name__ == '__main__':
@@ -16,8 +27,8 @@ if __name__ == '__main__':
                'value': value}
 
     try:
-        handle_set_pool_value(service='admin', request=request)
+        handle_set_pool_value(service=config('admin-user'), request=request)
     except CalledProcessError as e:
-        log(e.message)
+        log(str(e))
         action_fail("Setting pool key: {} and value: {} failed with "
-                    "message: {}".format(key, value, e.message))
+                    "message: {}".format(key, value, str(e)))
diff --git a/actions/pool-statistics b/actions/pool-statistics
index 536c889a0a7a91fd0714e1712c1ade3357ca1ea8..56e56a7a6f1bb1d6f826ee391f288b39744364bf 100755
--- a/actions/pool-statistics
+++ b/actions/pool-statistics
@@ -1,15 +1,27 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
+import os
 import sys
 
-sys.path.append('hooks')
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
+
 from subprocess import check_output, CalledProcessError
-from charmhelpers.core.hookenv import log, action_set, action_fail
+from charmhelpers.core.hookenv import log, config, action_set, action_fail
 
 if __name__ == '__main__':
     try:
-        out = check_output(['ceph', '--id', 'admin',
+        out = check_output(['ceph', '--id', config('admin-user'),
                             'df']).decode('UTF-8')
         action_set({'message': out})
     except CalledProcessError as e:
-        log(e)
-        action_fail("ceph df failed with message: {}".format(e.message))
+        log(str(e))
+        action_fail("ceph df failed with message: {}".format(str(e)))
diff --git a/actions/remove-cache-tier b/actions/remove-cache-tier
deleted file mode 120000
index 136c0f065127da5fb9b861130dbaa62169808cb7..0000000000000000000000000000000000000000
--- a/actions/remove-cache-tier
+++ /dev/null
@@ -1 +0,0 @@
-remove-cache-tier.py
\ No newline at end of file
diff --git a/actions/remove-cache-tier b/actions/remove-cache-tier
new file mode 100755
index 0000000000000000000000000000000000000000..a6f8f2b68a222998979c53841d10719b5efcc6c2
--- /dev/null
+++ b/actions/remove-cache-tier
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+import os
+from subprocess import CalledProcessError
+import sys
+
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
+
+from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists
+from charmhelpers.core.hookenv import action_get, config, log, action_fail
+
+__author__ = 'chris'
+
+
+def delete_cache_tier():
+    backer_pool = action_get("backer-pool")
+    cache_pool = action_get("cache-pool")
+    user = config('admin-user')
+    # Pre flight checks
+    if not pool_exists(user, backer_pool):
+        log("Backer pool {} must exist before calling this".format(
+            backer_pool))
+        action_fail("remove-cache-tier failed. Backer pool {} must exist "
+                    "before calling this".format(backer_pool))
+
+    if not pool_exists(user, cache_pool):
+        log("Cache pool {} must exist before calling this".format(
+            cache_pool))
+        action_fail("remove-cache-tier failed. Cache pool {} must exist "
+                    "before calling this".format(cache_pool))
+
+    pool = Pool(service=user, name=backer_pool)
+    try:
+        pool.remove_cache_tier(cache_pool=cache_pool)
+    except CalledProcessError as err:
+        log("Removing the cache tier failed with message: {}".format(
+            str(err)))
+        action_fail("remove-cache-tier failed. Removing the cache tier failed "
+                    "with message: {}".format(str(err)))
+
+
+if __name__ == '__main__':
+    delete_cache_tier()
diff --git a/actions/remove-pool-snapshot b/actions/remove-pool-snapshot
index 387849ea5e77713133175a5d9fe9d3664c804464..7569db5cebdf008eaa7ffc771dc8ae83ac743f66 100755
--- a/actions/remove-pool-snapshot
+++ b/actions/remove-pool-snapshot
@@ -1,19 +1,31 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
+import os
 import sys
 
-sys.path.append('hooks')
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
+
 from subprocess import CalledProcessError
-from charmhelpers.core.hookenv import action_get, log, action_fail
+from charmhelpers.core.hookenv import action_get, config, log, action_fail
 from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot
 
 if __name__ == '__main__':
     name = action_get("pool-name")
     snapname = action_get("snapshot-name")
     try:
-        remove_pool_snapshot(service='admin',
+        remove_pool_snapshot(service=config('admin-user'),
                              pool_name=name,
                              snapshot_name=snapname)
     except CalledProcessError as e:
-        log(e)
+        log(str(e))
         action_fail("Remove pool snapshot failed with message: {}".format(
-            e.message))
+            str(e)))
diff --git a/actions/rename-pool b/actions/rename-pool
index 6fe088ecb4d79ebf1a7fb5c3536e5189c3d888ba..c8508b78dc0fea273380a7d320af0087a8a23025 100755
--- a/actions/rename-pool
+++ b/actions/rename-pool
@@ -1,16 +1,28 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
+import os
 import sys
 
-sys.path.append('hooks')
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
+
 from subprocess import CalledProcessError
-from charmhelpers.core.hookenv import action_get, log, action_fail
+from charmhelpers.core.hookenv import action_get, config, log, action_fail
 from charmhelpers.contrib.storage.linux.ceph import rename_pool
 
 if __name__ == '__main__':
     name = action_get("pool-name")
     new_name = action_get("new-name")
     try:
-        rename_pool(service='admin', old_name=name, new_name=new_name)
+        rename_pool(service=config('admin-user'), old_name=name, new_name=new_name)
     except CalledProcessError as e:
-        log(e)
-        action_fail("Renaming pool failed with message: {}".format(e.message))
+        log(str(e))
+        action_fail("Renaming pool failed with message: {}".format(str(e)))
diff --git a/actions/set-pool-max-bytes b/actions/set-pool-max-bytes
index 86360885469304972679384e6af33eefa363b9a1..91196b3e3ed13af0a566b2802c3a3cad704b2334 100755
--- a/actions/set-pool-max-bytes
+++ b/actions/set-pool-max-bytes
@@ -1,16 +1,28 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
+import os
 import sys
 
-sys.path.append('hooks')
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
+
 from subprocess import CalledProcessError
-from charmhelpers.core.hookenv import action_get, log, action_fail
+from charmhelpers.core.hookenv import action_get, config, log, action_fail
 from charmhelpers.contrib.storage.linux.ceph import set_pool_quota
 
 if __name__ == '__main__':
     max_bytes = action_get("max")
     name = action_get("pool-name")
     try:
-        set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes)
+        set_pool_quota(service=config('admin-user'), pool_name=name, max_bytes=max_bytes)
     except CalledProcessError as e:
-        log(e)
-        action_fail("Set pool quota failed with message: {}".format(e.message))
+        log(str(e))
+        action_fail("Set pool quota failed with message: {}".format(str(e)))
diff --git a/actions/snapshot-pool b/actions/snapshot-pool
index a02619bfce36ee7be6128ec3916463e3458e09e2..3eb6926e3fa061681df250b8e8a2f4d0af5c2988 100755
--- a/actions/snapshot-pool
+++ b/actions/snapshot-pool
@@ -1,18 +1,30 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
+import os
 import sys
 
-sys.path.append('hooks')
+_path = os.path.dirname(os.path.realpath(__file__))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_root = os.path.abspath(os.path.join(_path, '..'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+_add_path(_hooks)
+_add_path(_root)
+
 from subprocess import CalledProcessError
-from charmhelpers.core.hookenv import action_get, log, action_fail
+from charmhelpers.core.hookenv import action_get, config, log, action_fail
 from charmhelpers.contrib.storage.linux.ceph import snapshot_pool
 
 if __name__ == '__main__':
     name = action_get("pool-name")
     snapname = action_get("snapshot-name")
     try:
-        snapshot_pool(service='admin',
+        snapshot_pool(service=config('admin-user'),
                       pool_name=name,
                       snapshot_name=snapname)
     except CalledProcessError as e:
-        log(e)
-        action_fail("Snapshot pool failed with message: {}".format(e.message))
+        log(str(e))
+        action_fail("Snapshot pool failed with message: {}".format(str(e)))
diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml
index 14aa3e0d17f0a63015519782a4519db32a525e81..02a4f8e69a48db31f2e19fa78499ed579f10fa2d 100644
--- a/charm-helpers-hooks.yaml
+++ b/charm-helpers-hooks.yaml
@@ -1,5 +1,5 @@
-branch: lp:charm-helpers
-destination: hooks/charmhelpers
+repo: https://github.com/juju/charm-helpers
+destination: charmhelpers
 include:
     - core
     - cli
@@ -12,10 +12,14 @@ include:
         - lvm
     - payload.execd
     - contrib.openstack:
-        - utils
-        - exceptions
         - alternatives
+        - exceptions
+        - ha
+        - ip
+        - utils
     - contrib.network.ip
     - contrib.charmsupport
     - contrib.hardening|inc=*
     - contrib.python
+    - contrib.openstack.policyd
+    - contrib.hahelpers
diff --git a/charmhelpers/__init__.py b/charmhelpers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f57ed2adb8a109cb18c6f78be5218131fd7d1eb
--- /dev/null
+++ b/charmhelpers/__init__.py
@@ -0,0 +1,99 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Bootstrap charm-helpers, installing its dependencies if necessary using
+# only standard libraries.
+from __future__ import print_function
+from __future__ import absolute_import
+
+import functools
+import inspect
+import subprocess
+import sys
+
+try:
+    import six  # NOQA:F401
+except ImportError:
+    if sys.version_info.major == 2:
+        subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
+    else:
+        subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
+    import six  # NOQA:F401
+
+try:
+    import yaml  # NOQA:F401
+except ImportError:
+    if sys.version_info.major == 2:
+        subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
+    else:
+        subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
+    import yaml  # NOQA:F401
+
+
+# Holds a list of mapping of mangled function names that have been deprecated
+# using the @deprecate decorator below.  This is so that the warning is only
+# printed once for each usage of the function.
+__deprecated_functions = {}
+
+
+def deprecate(warning, date=None, log=None):
+    """Add a deprecation warning the first time the function is used.
+
+    The date which is a string in semi-ISO8660 format indicates the year-month
+    that the function is officially going to be removed.
+
+    usage:
+
+    @deprecate('use core/fetch/add_source() instead', '2017-04')
+    def contributed_add_source_thing(...):
+        ...
+
+    And it then prints to the log ONCE that the function is deprecated.
+    The reason for passing the logging function (log) is so that hookenv.log
+    can be used for a charm if needed.
+
+    :param warning: String to indicate what is to be used instead.
+    :param date: Optional string in YYYY-MM format to indicate when the
+                 function will definitely (probably) be removed.
+    :param log: The log function to call in order to log. If None, logs to
+                stdout
+    """
+    def wrap(f):
+
+        @functools.wraps(f)
+        def wrapped_f(*args, **kwargs):
+            try:
+                module = inspect.getmodule(f)
+                file = inspect.getsourcefile(f)
+                lines = inspect.getsourcelines(f)
+                f_name = "{}-{}-{}..{}-{}".format(
+                    module.__name__, file, lines[0], lines[-1], f.__name__)
+            except (IOError, TypeError):
+                # assume it was local, so just use the name of the function
+                f_name = f.__name__
+            if f_name not in __deprecated_functions:
+                __deprecated_functions[f_name] = True
+                s = "DEPRECATION WARNING: Function {} is being removed".format(
+                    f.__name__)
+                if date:
+                    s = "{} on/around {}".format(s, date)
+                if warning:
+                    s = "{} : {}".format(s, warning)
+                if log:
+                    log(s)
+                else:
+                    print(s)
+            return f(*args, **kwargs)
+        return wrapped_f
+    return wrap
diff --git a/charmhelpers/cli/__init__.py b/charmhelpers/cli/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..389b490f4eec27f18118ab6a5f3f529dbf2e9ecc
--- /dev/null
+++ b/charmhelpers/cli/__init__.py
@@ -0,0 +1,189 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+import argparse
+import sys
+
+from six.moves import zip
+
+import charmhelpers.core.unitdata
+
+
+class OutputFormatter(object):
+    def __init__(self, outfile=sys.stdout):
+        self.formats = (
+            "raw",
+            "json",
+            "py",
+            "yaml",
+            "csv",
+            "tab",
+        )
+        self.outfile = outfile
+
+    def add_arguments(self, argument_parser):
+        formatgroup = argument_parser.add_mutually_exclusive_group()
+        choices = self.supported_formats
+        formatgroup.add_argument("--format", metavar='FMT',
+                                 help="Select output format for returned data, "
+                                      "where FMT is one of: {}".format(choices),
+                                 choices=choices, default='raw')
+        for fmt in self.formats:
+            fmtfunc = getattr(self, fmt)
+            formatgroup.add_argument("-{}".format(fmt[0]),
+                                     "--{}".format(fmt), action='store_const',
+                                     const=fmt, dest='format',
+                                     help=fmtfunc.__doc__)
+
+    @property
+    def supported_formats(self):
+        return self.formats
+
+    def raw(self, output):
+        """Output data as raw string (default)"""
+        if isinstance(output, (list, tuple)):
+            output = '\n'.join(map(str, output))
+        self.outfile.write(str(output))
+
+    def py(self, output):
+        """Output data as a nicely-formatted python data structure"""
+        import pprint
+        pprint.pprint(output, stream=self.outfile)
+
+    def json(self, output):
+        """Output data in JSON format"""
+        import json
+        json.dump(output, self.outfile)
+
+    def yaml(self, output):
+        """Output data in YAML format"""
+        import yaml
+        yaml.safe_dump(output, self.outfile)
+
+    def csv(self, output):
+        """Output data as excel-compatible CSV"""
+        import csv
+        csvwriter = csv.writer(self.outfile)
+        csvwriter.writerows(output)
+
+    def tab(self, output):
+        """Output data in excel-compatible tab-delimited format"""
+        import csv
+        csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
+        csvwriter.writerows(output)
+
+    def format_output(self, output, fmt='raw'):
+        fmtfunc = getattr(self, fmt)
+        fmtfunc(output)
+
+
+class CommandLine(object):
+    argument_parser = None
+    subparsers = None
+    formatter = None
+    exit_code = 0
+
+    def __init__(self):
+        if not self.argument_parser:
+            self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
+        if not self.formatter:
+            self.formatter = OutputFormatter()
+            self.formatter.add_arguments(self.argument_parser)
+        if not self.subparsers:
+            self.subparsers = self.argument_parser.add_subparsers(help='Commands')
+
+    def subcommand(self, command_name=None):
+        """
+        Decorate a function as a subcommand. Use its arguments as the
+        command-line arguments"""
+        def wrapper(decorated):
+            cmd_name = command_name or decorated.__name__
+            subparser = self.subparsers.add_parser(cmd_name,
+                                                   description=decorated.__doc__)
+            for args, kwargs in describe_arguments(decorated):
+                subparser.add_argument(*args, **kwargs)
+            subparser.set_defaults(func=decorated)
+            return decorated
+        return wrapper
+
+    def test_command(self, decorated):
+        """
+        Subcommand is a boolean test function, so bool return values should be
+        converted to a 0/1 exit code.
+        """
+        decorated._cli_test_command = True
+        return decorated
+
+    def no_output(self, decorated):
+        """
+        Subcommand is not expected to return a value, so don't print a spurious None.
+        """
+        decorated._cli_no_output = True
+        return decorated
+
+    def subcommand_builder(self, command_name, description=None):
+        """
+        Decorate a function that builds a subcommand. Builders should accept a
+        single argument (the subparser instance) and return the function to be
+        run as the command."""
+        def wrapper(decorated):
+            subparser = self.subparsers.add_parser(command_name)
+            func = decorated(subparser)
+            subparser.set_defaults(func=func)
+            subparser.description = description or func.__doc__
+        return wrapper
+
+    def run(self):
+        "Run cli, processing arguments and executing subcommands."
+        arguments = self.argument_parser.parse_args()
+        argspec = inspect.getargspec(arguments.func)
+        vargs = []
+        for arg in argspec.args:
+            vargs.append(getattr(arguments, arg))
+        if argspec.varargs:
+            vargs.extend(getattr(arguments, argspec.varargs))
+        output = arguments.func(*vargs)
+        if getattr(arguments.func, '_cli_test_command', False):
+            self.exit_code = 0 if output else 1
+            output = ''
+        if getattr(arguments.func, '_cli_no_output', False):
+            output = ''
+        self.formatter.format_output(output, arguments.format)
+        if charmhelpers.core.unitdata._KV:
+            charmhelpers.core.unitdata._KV.flush()
+
+
+cmdline = CommandLine()
+
+
+def describe_arguments(func):
+    """
+    Analyze a function's signature and return a data structure suitable for
+    passing in as arguments to an argparse parser's add_argument() method."""
+
+    argspec = inspect.getargspec(func)
+    # we should probably raise an exception somewhere if func includes **kwargs
+    if argspec.defaults:
+        positional_args = argspec.args[:-len(argspec.defaults)]
+        keyword_names = argspec.args[-len(argspec.defaults):]
+        for arg, default in zip(keyword_names, argspec.defaults):
+            yield ('--{}'.format(arg),), {'default': default}
+    else:
+        positional_args = argspec.args
+
+    for arg in positional_args:
+        yield (arg,), {}
+    if argspec.varargs:
+        yield (argspec.varargs,), {'nargs': '*'}
diff --git a/charmhelpers/cli/benchmark.py b/charmhelpers/cli/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..303af14b607d31e338aefff0df593609b7b45feb
--- /dev/null
+++ b/charmhelpers/cli/benchmark.py
@@ -0,0 +1,34 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import cmdline
+from charmhelpers.contrib.benchmark import Benchmark
+
+
+@cmdline.subcommand(command_name='benchmark-start')
+def start():
+    Benchmark.start()
+
+
+@cmdline.subcommand(command_name='benchmark-finish')
+def finish():
+    Benchmark.finish()
+
+
+@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
+def service(subparser):
+    subparser.add_argument("value", help="The composite score.")
+    subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
+    subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
+    return Benchmark.set_composite_score
diff --git a/charmhelpers/cli/commands.py b/charmhelpers/cli/commands.py
new file mode 100644
index 0000000000000000000000000000000000000000..b93105650be8226aa390fda46aa08afb23ebc7bc
--- /dev/null
+++ b/charmhelpers/cli/commands.py
@@ -0,0 +1,30 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This module loads sub-modules into the python runtime so they can be
+discovered via the inspect module. In order to prevent flake8 from (rightfully)
+telling us these are unused modules, throw a ' # noqa' at the end of each import
+so that the warning is suppressed.
+"""
+
+from . import CommandLine  # noqa
+
+"""
+Import the sub-modules which have decorated subcommands to register with chlp.
+"""
+from . import host  # noqa
+from . import benchmark  # noqa
+from . import unitdata  # noqa
+from . import hookenv  # noqa
diff --git a/charmhelpers/cli/hookenv.py b/charmhelpers/cli/hookenv.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd72f448bf0092251a454ff6dd3145f09048ae72
--- /dev/null
+++ b/charmhelpers/cli/hookenv.py
@@ -0,0 +1,21 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import cmdline
+from charmhelpers.core import hookenv
+
+
+cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
+cmdline.subcommand('service-name')(hookenv.service_name)
+cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
diff --git a/charmhelpers/cli/host.py b/charmhelpers/cli/host.py
new file mode 100644
index 0000000000000000000000000000000000000000..40396849907976fd077cc9c53a0852c4380c6266
--- /dev/null
+++ b/charmhelpers/cli/host.py
@@ -0,0 +1,29 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import cmdline
+from charmhelpers.core import host
+
+
+@cmdline.subcommand()
+def mounts():
+    "List mounts"
+    return host.mounts()
+
+
+@cmdline.subcommand_builder('service', description="Control system services")
+def service(subparser):
+    subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
+    subparser.add_argument("service_name", help="Name of the service to control")
+    return host.service
diff --git a/charmhelpers/cli/unitdata.py b/charmhelpers/cli/unitdata.py
new file mode 100644
index 0000000000000000000000000000000000000000..acce846f84ef32ed0b5829cf08e67ad33f0eb5d1
--- /dev/null
+++ b/charmhelpers/cli/unitdata.py
@@ -0,0 +1,46 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import cmdline
+from charmhelpers.core import unitdata
+
+
+@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
+def unitdata_cmd(subparser):
+    nested = subparser.add_subparsers()
+
+    get_cmd = nested.add_parser('get', help='Retrieve data')
+    get_cmd.add_argument('key', help='Key to retrieve the value of')
+    get_cmd.set_defaults(action='get', value=None)
+
+    getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data')
+    getrange_cmd.add_argument('key', metavar='prefix',
+                              help='Prefix of the keys to retrieve')
+    getrange_cmd.set_defaults(action='getrange', value=None)
+
+    set_cmd = nested.add_parser('set', help='Store data')
+    set_cmd.add_argument('key', help='Key to set')
+    set_cmd.add_argument('value', help='Value to store')
+    set_cmd.set_defaults(action='set')
+
+    def _unitdata_cmd(action, key, value):
+        if action == 'get':
+            return unitdata.kv().get(key)
+        elif action == 'getrange':
+            return unitdata.kv().getrange(key)
+        elif action == 'set':
+            unitdata.kv().set(key, value)
+            unitdata.kv().flush()
+            return ''
+    return _unitdata_cmd
diff --git a/charmhelpers/contrib/__init__.py b/charmhelpers/contrib/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b
--- /dev/null
+++ b/charmhelpers/contrib/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charmhelpers/contrib/charmsupport/__init__.py b/charmhelpers/contrib/charmsupport/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b
--- /dev/null
+++ b/charmhelpers/contrib/charmsupport/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charmhelpers/contrib/charmsupport/nrpe.py b/charmhelpers/contrib/charmsupport/nrpe.py
new file mode 100644
index 0000000000000000000000000000000000000000..c87cf4896c4af199878d82f7d97472bf231aa486
--- /dev/null
+++ b/charmhelpers/contrib/charmsupport/nrpe.py
@@ -0,0 +1,525 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Compatibility with the nrpe-external-master charm"""
+# Copyright 2012 Canonical Ltd.
+#
+# Authors:
+#  Matthew Wedgwood <matthew.wedgwood@canonical.com>
+
+import glob
+import grp
+import os
+import pwd
+import re
+import shlex
+import shutil
+import subprocess
+import yaml
+
+from charmhelpers.core.hookenv import (
+    config,
+    hook_name,
+    local_unit,
+    log,
+    relation_get,
+    relation_ids,
+    relation_set,
+    relations_of_type,
+)
+
+from charmhelpers.core.host import service
+from charmhelpers.core import host
+
+# This module adds compatibility with the nrpe-external-master and plain nrpe
+# subordinate charms. To use it in your charm:
+#
+# 1. Update metadata.yaml
+#
+#   provides:
+#     (...)
+#     nrpe-external-master:
+#       interface: nrpe-external-master
+#       scope: container
+#
+#   and/or
+#
+#   provides:
+#     (...)
+#     local-monitors:
+#       interface: local-monitors
+#       scope: container
+
+#
+# 2. Add the following to config.yaml
+#
+#    nagios_context:
+#      default: "juju"
+#      type: string
+#      description: |
+#        Used by the nrpe subordinate charms.
+#        A string that will be prepended to instance name to set the host name
+#        in nagios. So for instance the hostname would be something like:
+#            juju-myservice-0
+#        If you're running multiple environments with the same services in them
+#        this allows you to differentiate between them.
+#    nagios_servicegroups:
+#      default: ""
+#      type: string
+#      description: |
+#        A comma-separated list of nagios servicegroups.
+#        If left empty, the nagios_context will be used as the servicegroup
+#
+# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
+#
+# 4. Update your hooks.py with something like this:
+#
+#    from charmsupport.nrpe import NRPE
+#    (...)
+#    def update_nrpe_config():
+#        nrpe_compat = NRPE()
+#        nrpe_compat.add_check(
+#            shortname = "myservice",
+#            description = "Check MyService",
+#            check_cmd = "check_http -w 2 -c 10 http://localhost"
+#            )
+#        nrpe_compat.add_check(
+#            "myservice_other",
+#            "Check for widget failures",
+#            check_cmd = "/srv/myapp/scripts/widget_check"
+#            )
+#        nrpe_compat.write()
+#
+#    def config_changed():
+#        (...)
+#        update_nrpe_config()
+#
+#    def nrpe_external_master_relation_changed():
+#        update_nrpe_config()
+#
+#    def local_monitors_relation_changed():
+#        update_nrpe_config()
+#
+# 4.a If your charm is a subordinate charm set primary=False
+#
+#    from charmsupport.nrpe import NRPE
+#    (...)
+#    def update_nrpe_config():
+#        nrpe_compat = NRPE(primary=False)
+#
+# 5. ln -s hooks.py nrpe-external-master-relation-changed
+#    ln -s hooks.py local-monitors-relation-changed
+
+
+class CheckException(Exception):
+    pass
+
+
+class Check(object):
+    shortname_re = '[A-Za-z0-9-_.@]+$'
+    service_template = ("""
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+define service {{
+    use                             active-service
+    host_name                       {nagios_hostname}
+    service_description             {nagios_hostname}[{shortname}] """
+                        """{description}
+    check_command                   check_nrpe!{command}
+    servicegroups                   {nagios_servicegroup}
+{service_config_overrides}
+}}
+""")
+
+    def __init__(self, shortname, description, check_cmd, max_check_attempts=None):
+        super(Check, self).__init__()
+        # XXX: could be better to calculate this from the service name
+        if not re.match(self.shortname_re, shortname):
+            raise CheckException("shortname must match {}".format(
+                Check.shortname_re))
+        self.shortname = shortname
+        self.command = "check_{}".format(shortname)
+        # Note: a set of invalid characters is defined by the
+        # Nagios server config
+        # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
+        self.description = description
+        self.check_cmd = self._locate_cmd(check_cmd)
+        self.max_check_attempts = max_check_attempts
+
+    def _get_check_filename(self):
+        return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
+
+    def _get_service_filename(self, hostname):
+        return os.path.join(NRPE.nagios_exportdir,
+                            'service__{}_{}.cfg'.format(hostname, self.command))
+
+    def _locate_cmd(self, check_cmd):
+        search_path = (
+            '/usr/lib/nagios/plugins',
+            '/usr/local/lib/nagios/plugins',
+        )
+        parts = shlex.split(check_cmd)
+        for path in search_path:
+            if os.path.exists(os.path.join(path, parts[0])):
+                command = os.path.join(path, parts[0])
+                if len(parts) > 1:
+                    command += " " + " ".join(parts[1:])
+                return command
+        log('Check command not found: {}'.format(parts[0]))
+        return ''
+
+    def _remove_service_files(self):
+        if not os.path.exists(NRPE.nagios_exportdir):
+            return
+        for f in os.listdir(NRPE.nagios_exportdir):
+            if f.endswith('_{}.cfg'.format(self.command)):
+                os.remove(os.path.join(NRPE.nagios_exportdir, f))
+
+    def remove(self, hostname):
+        nrpe_check_file = self._get_check_filename()
+        if os.path.exists(nrpe_check_file):
+            os.remove(nrpe_check_file)
+        self._remove_service_files()
+
+    def write(self, nagios_context, hostname, nagios_servicegroups):
+        nrpe_check_file = self._get_check_filename()
+        with open(nrpe_check_file, 'w') as nrpe_check_config:
+            nrpe_check_config.write("# check {}\n".format(self.shortname))
+            if nagios_servicegroups:
+                nrpe_check_config.write(
+                    "# The following header was added automatically by juju\n")
+                nrpe_check_config.write(
+                    "# Modifying it will affect nagios monitoring and alerting\n")
+                nrpe_check_config.write(
+                    "# servicegroups: {}\n".format(nagios_servicegroups))
+            nrpe_check_config.write("command[{}]={}\n".format(
+                self.command, self.check_cmd))
+
+        if not os.path.exists(NRPE.nagios_exportdir):
+            log('Not writing service config as {} is not accessible'.format(
+                NRPE.nagios_exportdir))
+        else:
+            self.write_service_config(nagios_context, hostname,
+                                      nagios_servicegroups)
+
+    def write_service_config(self, nagios_context, hostname,
+                             nagios_servicegroups):
+        self._remove_service_files()
+
+        if self.max_check_attempts:
+            service_config_overrides = '    max_check_attempts              {}'.format(
+                self.max_check_attempts
+            )  # Note indentation is here rather than in the template to avoid trailing spaces
+        else:
+            service_config_overrides = ''  # empty string to avoid printing 'None'
+        templ_vars = {
+            'nagios_hostname': hostname,
+            'nagios_servicegroup': nagios_servicegroups,
+            'description': self.description,
+            'shortname': self.shortname,
+            'command': self.command,
+            'service_config_overrides': service_config_overrides,
+        }
+        nrpe_service_text = Check.service_template.format(**templ_vars)
+        nrpe_service_file = self._get_service_filename(hostname)
+        with open(nrpe_service_file, 'w') as nrpe_service_config:
+            nrpe_service_config.write(str(nrpe_service_text))
+
+    def run(self):
+        subprocess.call(self.check_cmd)
+
+
+class NRPE(object):
+    nagios_logdir = '/var/log/nagios'
+    nagios_exportdir = '/var/lib/nagios/export'
+    nrpe_confdir = '/etc/nagios/nrpe.d'
+    homedir = '/var/lib/nagios'  # home dir provided by nagios-nrpe-server
+
+    def __init__(self, hostname=None, primary=True):
+        super(NRPE, self).__init__()
+        self.config = config()
+        self.primary = primary
+        self.nagios_context = self.config['nagios_context']
+        if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
+            self.nagios_servicegroups = self.config['nagios_servicegroups']
+        else:
+            self.nagios_servicegroups = self.nagios_context
+        self.unit_name = local_unit().replace('/', '-')
+        if hostname:
+            self.hostname = hostname
+        else:
+            nagios_hostname = get_nagios_hostname()
+            if nagios_hostname:
+                self.hostname = nagios_hostname
+            else:
+                self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
+        self.checks = []
+        # Iff in an nrpe-external-master relation hook, set primary status
+        relation = relation_ids('nrpe-external-master')
+        if relation:
+            log("Setting charm primary status {}".format(primary))
+            for rid in relation:
+                relation_set(relation_id=rid, relation_settings={'primary': self.primary})
+        self.remove_check_queue = set()
+
+    @classmethod
+    def does_nrpe_conf_dir_exist(cls):
+        """Return True if th nrpe_confdif directory exists."""
+        return os.path.isdir(cls.nrpe_confdir)
+
+    def add_check(self, *args, **kwargs):
+        shortname = None
+        if kwargs.get('shortname') is None:
+            if len(args) > 0:
+                shortname = args[0]
+        else:
+            shortname = kwargs['shortname']
+
+        self.checks.append(Check(*args, **kwargs))
+        try:
+            self.remove_check_queue.remove(shortname)
+        except KeyError:
+            pass
+
+    def remove_check(self, *args, **kwargs):
+        if kwargs.get('shortname') is None:
+            raise ValueError('shortname of check must be specified')
+
+        # Use sensible defaults if they're not specified - these are not
+        # actually used during removal, but they're required for constructing
+        # the Check object; check_disk is chosen because it's part of the
+        # nagios-plugins-basic package.
+        if kwargs.get('check_cmd') is None:
+            kwargs['check_cmd'] = 'check_disk'
+        if kwargs.get('description') is None:
+            kwargs['description'] = ''
+
+        check = Check(*args, **kwargs)
+        check.remove(self.hostname)
+        self.remove_check_queue.add(kwargs['shortname'])
+
+    def write(self):
+        try:
+            nagios_uid = pwd.getpwnam('nagios').pw_uid
+            nagios_gid = grp.getgrnam('nagios').gr_gid
+        except Exception:
+            log("Nagios user not set up, nrpe checks not updated")
+            return
+
+        if not os.path.exists(NRPE.nagios_logdir):
+            os.mkdir(NRPE.nagios_logdir)
+            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
+
+        nrpe_monitors = {}
+        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
+
+        # check that the charm can write to the conf dir.  If not, then nagios
+        # probably isn't installed, and we can defer.
+        if not self.does_nrpe_conf_dir_exist():
+            return
+
+        for nrpecheck in self.checks:
+            nrpecheck.write(self.nagios_context, self.hostname,
+                            self.nagios_servicegroups)
+            nrpe_monitors[nrpecheck.shortname] = {
+                "command": nrpecheck.command,
+            }
+            # If we were passed max_check_attempts, add that to the relation data
+            try:
+                nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts
+            except AttributeError:
+                pass
+
+        # update-status hooks are configured to firing every 5 minutes by
+        # default. When nagios-nrpe-server is restarted, the nagios server
+        # reports checks failing causing unnecessary alerts. Let's not restart
+        # on update-status hooks.
+        if not hook_name() == 'update-status':
+            service('restart', 'nagios-nrpe-server')
+
+        monitor_ids = relation_ids("local-monitors") + \
+            relation_ids("nrpe-external-master")
+        for rid in monitor_ids:
+            reldata = relation_get(unit=local_unit(), rid=rid)
+            if 'monitors' in reldata:
+                # update the existing set of monitors with the new data
+                old_monitors = yaml.safe_load(reldata['monitors'])
+                old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
+                # remove keys that are in the remove_check_queue
+                old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
+                                     if k not in self.remove_check_queue}
+                # update/add nrpe_monitors
+                old_nrpe_monitors.update(nrpe_monitors)
+                old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
+                # write back to the relation
+                relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
+            else:
+                # write a brand new set of monitors, as no existing ones.
+                relation_set(relation_id=rid, monitors=yaml.dump(monitors))
+
+        self.remove_check_queue.clear()
+
+
+def get_nagios_hostcontext(relation_name='nrpe-external-master'):
+    """
+    Query relation with nrpe subordinate, return the nagios_host_context
+
+    :param str relation_name: Name of relation nrpe sub joined to
+    """
+    for rel in relations_of_type(relation_name):
+        if 'nagios_host_context' in rel:
+            return rel['nagios_host_context']
+
+
+def get_nagios_hostname(relation_name='nrpe-external-master'):
+    """
+    Query relation with nrpe subordinate, return the nagios_hostname
+
+    :param str relation_name: Name of relation nrpe sub joined to
+    """
+    for rel in relations_of_type(relation_name):
+        if 'nagios_hostname' in rel:
+            return rel['nagios_hostname']
+
+
+def get_nagios_unit_name(relation_name='nrpe-external-master'):
+    """
+    Return the nagios unit name prepended with host_context if needed
+
+    :param str relation_name: Name of relation nrpe sub joined to
+    """
+    host_context = get_nagios_hostcontext(relation_name)
+    if host_context:
+        unit = "%s:%s" % (host_context, local_unit())
+    else:
+        unit = local_unit()
+    return unit
+
+
+def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
+    """
+    Add checks for each service in list
+
+    :param NRPE nrpe: NRPE object to add check to
+    :param list services: List of services to check
+    :param str unit_name: Unit name to use in check description
+    :param bool immediate_check: For sysv init, run the service check immediately
+    """
+    for svc in services:
+        # Don't add a check for these services from neutron-gateway
+        if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
+            next
+
+        upstart_init = '/etc/init/%s.conf' % svc
+        sysv_init = '/etc/init.d/%s' % svc
+
+        if host.init_is_systemd(service_name=svc):
+            nrpe.add_check(
+                shortname=svc,
+                description='process check {%s}' % unit_name,
+                check_cmd='check_systemd.py %s' % svc
+            )
+        elif os.path.exists(upstart_init):
+            nrpe.add_check(
+                shortname=svc,
+                description='process check {%s}' % unit_name,
+                check_cmd='check_upstart_job %s' % svc
+            )
+        elif os.path.exists(sysv_init):
+            cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
+            checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
+            croncmd = (
+                '/usr/local/lib/nagios/plugins/check_exit_status.pl '
+                '-e -s /etc/init.d/%s status' % svc
+            )
+            cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
+            f = open(cronpath, 'w')
+            f.write(cron_file)
+            f.close()
+            nrpe.add_check(
+                shortname=svc,
+                description='service check {%s}' % unit_name,
+                check_cmd='check_status_file.py -f %s' % checkpath,
+            )
+            # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
+            # (LP: #1670223).
+            if immediate_check and os.path.isdir(nrpe.homedir):
+                f = open(checkpath, 'w')
+                subprocess.call(
+                    croncmd.split(),
+                    stdout=f,
+                    stderr=subprocess.STDOUT
+                )
+                f.close()
+                os.chmod(checkpath, 0o644)
+
+
+def copy_nrpe_checks(nrpe_files_dir=None):
+    """
+    Copy the nrpe checks into place
+
+    """
+    NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
+    if nrpe_files_dir is None:
+        # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
+        for segment in ['.', 'hooks']:
+            nrpe_files_dir = os.path.abspath(os.path.join(
+                os.getenv('CHARM_DIR'),
+                segment,
+                'charmhelpers',
+                'contrib',
+                'openstack',
+                'files'))
+            if os.path.isdir(nrpe_files_dir):
+                break
+        else:
+            raise RuntimeError("Couldn't find charmhelpers directory")
+    if not os.path.exists(NAGIOS_PLUGINS):
+        os.makedirs(NAGIOS_PLUGINS)
+    for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
+        if os.path.isfile(fname):
+            shutil.copy2(fname,
+                         os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
+
+
+def add_haproxy_checks(nrpe, unit_name):
+    """
+    Add checks for each service in list
+
+    :param NRPE nrpe: NRPE object to add check to
+    :param str unit_name: Unit name to use in check description
+    """
+    nrpe.add_check(
+        shortname='haproxy_servers',
+        description='Check HAProxy {%s}' % unit_name,
+        check_cmd='check_haproxy.sh')
+    nrpe.add_check(
+        shortname='haproxy_queue',
+        description='Check HAProxy queue depth {%s}' % unit_name,
+        check_cmd='check_haproxy_queue_depth.sh')
+
+
+def remove_deprecated_check(nrpe, deprecated_services):
+    """
+    Remove checks fro deprecated services in list
+
+    :param nrpe: NRPE object to remove check from
+    :type nrpe: NRPE
+    :param deprecated_services: List of deprecated services that are removed
+    :type deprecated_services: list
+    """
+    for dep_svc in deprecated_services:
+        log('Deprecated service: {}'.format(dep_svc))
+        nrpe.remove_check(shortname=dep_svc)
diff --git a/charmhelpers/contrib/charmsupport/volumes.py b/charmhelpers/contrib/charmsupport/volumes.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ea43f0888cd92ac4344f908aa7c9d0afe7568ed
--- /dev/null
+++ b/charmhelpers/contrib/charmsupport/volumes.py
@@ -0,0 +1,173 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+Functions for managing volumes in juju units. One volume is supported per unit.
+Subordinates may have their own storage, provided it is on its own partition.
+
+Configuration stanzas::
+
+  volume-ephemeral:
+    type: boolean
+    default: true
+    description: >
+      If false, a volume is mounted as sepecified in "volume-map"
+      If true, ephemeral storage will be used, meaning that log data
+         will only exist as long as the machine. YOU HAVE BEEN WARNED.
+  volume-map:
+    type: string
+    default: {}
+    description: >
+      YAML map of units to device names, e.g:
+        "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
+      Service units will raise a configure-error if volume-ephemeral
+      is 'true' and no volume-map value is set. Use 'juju set' to set a
+      value and 'juju resolved' to complete configuration.
+
+Usage::
+
+    from charmsupport.volumes import configure_volume, VolumeConfigurationError
+    from charmsupport.hookenv import log, ERROR
+    def post_mount_hook():
+        stop_service('myservice')
+    def post_mount_hook():
+        start_service('myservice')
+
+    if __name__ == '__main__':
+        try:
+            configure_volume(before_change=pre_mount_hook,
+                             after_change=post_mount_hook)
+        except VolumeConfigurationError:
+            log('Storage could not be configured', ERROR)
+
+'''
+
+# XXX: Known limitations
+# - fstab is neither consulted nor updated
+
+import os
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+import yaml
+
+
+MOUNT_BASE = '/srv/juju/volumes'
+
+
+class VolumeConfigurationError(Exception):
+    '''Volume configuration data is missing or invalid'''
+    pass
+
+
+def get_config():
+    '''Gather and sanity-check volume configuration data'''
+    volume_config = {}
+    config = hookenv.config()
+
+    errors = False
+
+    if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
+        volume_config['ephemeral'] = True
+    else:
+        volume_config['ephemeral'] = False
+
+    try:
+        volume_map = yaml.safe_load(config.get('volume-map', '{}'))
+    except yaml.YAMLError as e:
+        hookenv.log("Error parsing YAML volume-map: {}".format(e),
+                    hookenv.ERROR)
+        errors = True
+    if volume_map is None:
+        # probably an empty string
+        volume_map = {}
+    elif not isinstance(volume_map, dict):
+        hookenv.log("Volume-map should be a dictionary, not {}".format(
+            type(volume_map)))
+        errors = True
+
+    volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
+    if volume_config['device'] and volume_config['ephemeral']:
+        # asked for ephemeral storage but also defined a volume ID
+        hookenv.log('A volume is defined for this unit, but ephemeral '
+                    'storage was requested', hookenv.ERROR)
+        errors = True
+    elif not volume_config['device'] and not volume_config['ephemeral']:
+        # asked for permanent storage but did not define volume ID
+        hookenv.log('Ephemeral storage was requested, but there is no volume '
+                    'defined for this unit.', hookenv.ERROR)
+        errors = True
+
+    unit_mount_name = hookenv.local_unit().replace('/', '-')
+    volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
+
+    if errors:
+        return None
+    return volume_config
+
+
+def mount_volume(config):
+    if os.path.exists(config['mountpoint']):
+        if not os.path.isdir(config['mountpoint']):
+            hookenv.log('Not a directory: {}'.format(config['mountpoint']))
+            raise VolumeConfigurationError()
+    else:
+        host.mkdir(config['mountpoint'])
+    if os.path.ismount(config['mountpoint']):
+        unmount_volume(config)
+    if not host.mount(config['device'], config['mountpoint'], persist=True):
+        raise VolumeConfigurationError()
+
+
+def unmount_volume(config):
+    if os.path.ismount(config['mountpoint']):
+        if not host.umount(config['mountpoint'], persist=True):
+            raise VolumeConfigurationError()
+
+
+def managed_mounts():
+    '''List of all mounted managed volumes'''
+    return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
+
+
+def configure_volume(before_change=lambda: None, after_change=lambda: None):
+    '''Set up storage (or don't) according to the charm's volume configuration.
+       Returns the mount point or "ephemeral". before_change and after_change
+       are optional functions to be called if the volume configuration changes.
+    '''
+
+    config = get_config()
+    if not config:
+        hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
+        raise VolumeConfigurationError()
+
+    if config['ephemeral']:
+        if os.path.ismount(config['mountpoint']):
+            before_change()
+            unmount_volume(config)
+            after_change()
+        return 'ephemeral'
+    else:
+        # persistent storage
+        if os.path.ismount(config['mountpoint']):
+            mounts = dict(managed_mounts())
+            if mounts.get(config['mountpoint']) != config['device']:
+                before_change()
+                unmount_volume(config)
+                mount_volume(config)
+                after_change()
+        else:
+            before_change()
+            mount_volume(config)
+            after_change()
+        return config['mountpoint']
diff --git a/charmhelpers/contrib/hahelpers/__init__.py b/charmhelpers/contrib/hahelpers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b
--- /dev/null
+++ b/charmhelpers/contrib/hahelpers/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charmhelpers/contrib/hahelpers/apache.py b/charmhelpers/contrib/hahelpers/apache.py
new file mode 100644
index 0000000000000000000000000000000000000000..a54702bc1a3b8d63a9cf013c4cfaf83e4adfe471
--- /dev/null
+++ b/charmhelpers/contrib/hahelpers/apache.py
@@ -0,0 +1,90 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Copyright 2012 Canonical Ltd.
+#
+# This file is sourced from lp:openstack-charm-helpers
+#
+# Authors:
+#  James Page <james.page@ubuntu.com>
+#  Adam Gandelman <adamg@ubuntu.com>
+#
+
+import os
+
+from charmhelpers.core import host
+from charmhelpers.core.hookenv import (
+    config as config_get,
+    relation_get,
+    relation_ids,
+    related_units as relation_list,
+    log,
+    INFO,
+)
+
+# This file contains the CA cert from the charms ssl_ca configuration
+# option, in future the file name should be updated reflect that.
+CONFIG_CA_CERT_FILE = 'keystone_juju_ca_cert'
+
+
+def get_cert(cn=None):
+    # TODO: deal with multiple https endpoints via charm config
+    cert = config_get('ssl_cert')
+    key = config_get('ssl_key')
+    if not (cert and key):
+        log("Inspecting identity-service relations for SSL certificate.",
+            level=INFO)
+        cert = key = None
+        if cn:
+            ssl_cert_attr = 'ssl_cert_{}'.format(cn)
+            ssl_key_attr = 'ssl_key_{}'.format(cn)
+        else:
+            ssl_cert_attr = 'ssl_cert'
+            ssl_key_attr = 'ssl_key'
+        for r_id in relation_ids('identity-service'):
+            for unit in relation_list(r_id):
+                if not cert:
+                    cert = relation_get(ssl_cert_attr,
+                                        rid=r_id, unit=unit)
+                if not key:
+                    key = relation_get(ssl_key_attr,
+                                       rid=r_id, unit=unit)
+    return (cert, key)
+
+
+def get_ca_cert():
+    ca_cert = config_get('ssl_ca')
+    if ca_cert is None:
+        log("Inspecting identity-service relations for CA SSL certificate.",
+            level=INFO)
+        for r_id in (relation_ids('identity-service') +
+                     relation_ids('identity-credentials')):
+            for unit in relation_list(r_id):
+                if ca_cert is None:
+                    ca_cert = relation_get('ca_cert',
+                                           rid=r_id, unit=unit)
+    return ca_cert
+
+
+def retrieve_ca_cert(cert_file):
+    cert = None
+    if os.path.isfile(cert_file):
+        with open(cert_file, 'rb') as crt:
+            cert = crt.read()
+    return cert
+
+
+def install_ca_cert(ca_cert):
+    host.install_ca_cert(ca_cert, CONFIG_CA_CERT_FILE)
diff --git a/charmhelpers/contrib/hahelpers/cluster.py b/charmhelpers/contrib/hahelpers/cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba34fba0cafa21d15a6a27946544b2c99fbd3663
--- /dev/null
+++ b/charmhelpers/contrib/hahelpers/cluster.py
@@ -0,0 +1,451 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Copyright 2012 Canonical Ltd.
+#
+# Authors:
+#  James Page <james.page@ubuntu.com>
+#  Adam Gandelman <adamg@ubuntu.com>
+#
+
+"""
+Helpers for clustering and determining "cluster leadership" and other
+clustering-related helpers.
+"""
+
+import functools
+import subprocess
+import os
+import time
+
+from socket import gethostname as get_unit_hostname
+
+import six
+
+from charmhelpers.core.hookenv import (
+    log,
+    relation_ids,
+    related_units as relation_list,
+    relation_get,
+    config as config_get,
+    INFO,
+    DEBUG,
+    WARNING,
+    unit_get,
+    is_leader as juju_is_leader,
+    status_set,
+)
+from charmhelpers.core.host import (
+    modulo_distribution,
+)
+from charmhelpers.core.decorators import (
+    retry_on_exception,
+)
+from charmhelpers.core.strutils import (
+    bool_from_string,
+)
+
+DC_RESOURCE_NAME = 'DC'
+
+
+class HAIncompleteConfig(Exception):
+    pass
+
+
+class HAIncorrectConfig(Exception):
+    pass
+
+
+class CRMResourceNotFound(Exception):
+    pass
+
+
+class CRMDCNotFound(Exception):
+    pass
+
+
+def is_elected_leader(resource):
+    """
+    Returns True if the charm executing this is the elected cluster leader.
+
+    It relies on two mechanisms to determine leadership:
+        1. If juju is sufficiently new and leadership election is supported,
+        the is_leader command will be used.
+        2. If the charm is part of a corosync cluster, call corosync to
+        determine leadership.
+        3. If the charm is not part of a corosync cluster, the leader is
+        determined as being "the alive unit with the lowest unit numer". In
+        other words, the oldest surviving unit.
+    """
+    try:
+        return juju_is_leader()
+    except NotImplementedError:
+        log('Juju leadership election feature not enabled'
+            ', using fallback support',
+            level=WARNING)
+
+    if is_clustered():
+        if not is_crm_leader(resource):
+            log('Deferring action to CRM leader.', level=INFO)
+            return False
+    else:
+        peers = peer_units()
+        if peers and not oldest_peer(peers):
+            log('Deferring action to oldest service unit.', level=INFO)
+            return False
+    return True
+
+
+def is_clustered():
+    for r_id in (relation_ids('ha') or []):
+        for unit in (relation_list(r_id) or []):
+            clustered = relation_get('clustered',
+                                     rid=r_id,
+                                     unit=unit)
+            if clustered:
+                return True
+    return False
+
+
+def is_crm_dc():
+    """
+    Determine leadership by querying the pacemaker Designated Controller
+    """
+    cmd = ['crm', 'status']
+    try:
+        status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+        if not isinstance(status, six.text_type):
+            status = six.text_type(status, "utf-8")
+    except subprocess.CalledProcessError as ex:
+        raise CRMDCNotFound(str(ex))
+
+    current_dc = ''
+    for line in status.split('\n'):
+        if line.startswith('Current DC'):
+            # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
+            current_dc = line.split(':')[1].split()[0]
+    if current_dc == get_unit_hostname():
+        return True
+    elif current_dc == 'NONE':
+        raise CRMDCNotFound('Current DC: NONE')
+
+    return False
+
+
+@retry_on_exception(5, base_delay=2,
+                    exc_type=(CRMResourceNotFound, CRMDCNotFound))
+def is_crm_leader(resource, retry=False):
+    """
+    Returns True if the charm calling this is the elected corosync leader,
+    as returned by calling the external "crm" command.
+
+    We allow this operation to be retried to avoid the possibility of getting a
+    false negative. See LP #1396246 for more info.
+    """
+    if resource == DC_RESOURCE_NAME:
+        return is_crm_dc()
+    cmd = ['crm', 'resource', 'show', resource]
+    try:
+        status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+        if not isinstance(status, six.text_type):
+            status = six.text_type(status, "utf-8")
+    except subprocess.CalledProcessError:
+        status = None
+
+    if status and get_unit_hostname() in status:
+        return True
+
+    if status and "resource %s is NOT running" % (resource) in status:
+        raise CRMResourceNotFound("CRM resource %s not found" % (resource))
+
+    return False
+
+
+def is_leader(resource):
+    log("is_leader is deprecated. Please consider using is_crm_leader "
+        "instead.", level=WARNING)
+    return is_crm_leader(resource)
+
+
+def peer_units(peer_relation="cluster"):
+    peers = []
+    for r_id in (relation_ids(peer_relation) or []):
+        for unit in (relation_list(r_id) or []):
+            peers.append(unit)
+    return peers
+
+
+def peer_ips(peer_relation='cluster', addr_key='private-address'):
+    '''Return a dict of peers and their private-address'''
+    peers = {}
+    for r_id in relation_ids(peer_relation):
+        for unit in relation_list(r_id):
+            peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
+    return peers
+
+
+def oldest_peer(peers):
+    """Determines who the oldest peer is by comparing unit numbers."""
+    local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
+    for peer in peers:
+        remote_unit_no = int(peer.split('/')[1])
+        if remote_unit_no < local_unit_no:
+            return False
+    return True
+
+
+def eligible_leader(resource):
+    log("eligible_leader is deprecated. Please consider using "
+        "is_elected_leader instead.", level=WARNING)
+    return is_elected_leader(resource)
+
+
+def https():
+    '''
+    Determines whether enough data has been provided in configuration
+    or relation data to configure HTTPS
+    .
+    returns: boolean
+    '''
+    use_https = config_get('use-https')
+    if use_https and bool_from_string(use_https):
+        return True
+    if config_get('ssl_cert') and config_get('ssl_key'):
+        return True
+    for r_id in relation_ids('certificates'):
+        for unit in relation_list(r_id):
+            ca = relation_get('ca', rid=r_id, unit=unit)
+            if ca:
+                return True
+    for r_id in relation_ids('identity-service'):
+        for unit in relation_list(r_id):
+            # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
+            rel_state = [
+                relation_get('https_keystone', rid=r_id, unit=unit),
+                relation_get('ca_cert', rid=r_id, unit=unit),
+            ]
+            # NOTE: works around (LP: #1203241)
+            if (None not in rel_state) and ('' not in rel_state):
+                return True
+    return False
+
+
+def determine_api_port(public_port, singlenode_mode=False):
+    '''
+    Determine correct API server listening port based on
+    existence of HTTPS reverse proxy and/or haproxy.
+
+    public_port: int: standard public port for given service
+
+    singlenode_mode: boolean: Shuffle ports when only a single unit is present
+
+    returns: int: the correct listening port for the API service
+    '''
+    i = 0
+    if singlenode_mode:
+        i += 1
+    elif len(peer_units()) > 0 or is_clustered():
+        i += 1
+    if https():
+        i += 1
+    return public_port - (i * 10)
+
+
+def determine_apache_port(public_port, singlenode_mode=False):
+    '''
+    Description: Determine correct apache listening port based on public IP +
+    state of the cluster.
+
+    public_port: int: standard public port for given service
+
+    singlenode_mode: boolean: Shuffle ports when only a single unit is present
+
+    returns: int: the correct listening port for the HAProxy service
+    '''
+    i = 0
+    if singlenode_mode:
+        i += 1
+    elif len(peer_units()) > 0 or is_clustered():
+        i += 1
+    return public_port - (i * 10)
+
+
+determine_apache_port_single = functools.partial(
+    determine_apache_port, singlenode_mode=True)
+
+
+def get_hacluster_config(exclude_keys=None):
+    '''
+    Obtains all relevant configuration from charm configuration required
+    for initiating a relation to hacluster:
+
+        ha-bindiface, ha-mcastport, vip, os-internal-hostname,
+        os-admin-hostname, os-public-hostname, os-access-hostname
+
+    param: exclude_keys: list of setting key(s) to be excluded.
+    returns: dict: A dict containing settings keyed by setting name.
+    raises: HAIncompleteConfig if settings are missing or incorrect.
+    '''
+    settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
+                'os-admin-hostname', 'os-public-hostname', 'os-access-hostname']
+    conf = {}
+    for setting in settings:
+        if exclude_keys and setting in exclude_keys:
+            continue
+
+        conf[setting] = config_get(setting)
+
+    if not valid_hacluster_config():
+        raise HAIncorrectConfig('Insufficient or incorrect config data to '
+                                'configure hacluster.')
+    return conf
+
+
+def valid_hacluster_config():
+    '''
+    Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
+    must be set.
+
+    Note: ha-bindiface and ha-macastport both have defaults and will always
+    be set. We only care that either vip or dns-ha is set.
+
+    :returns: boolean: valid config returns true.
+    raises: HAIncompatibileConfig if settings conflict.
+    raises: HAIncompleteConfig if settings are missing.
+    '''
+    vip = config_get('vip')
+    dns = config_get('dns-ha')
+    if not(bool(vip) ^ bool(dns)):
+        msg = ('HA: Either vip or dns-ha must be set but not both in order to '
+               'use high availability')
+        status_set('blocked', msg)
+        raise HAIncorrectConfig(msg)
+
+    # If dns-ha then one of os-*-hostname must be set
+    if dns:
+        dns_settings = ['os-internal-hostname', 'os-admin-hostname',
+                        'os-public-hostname', 'os-access-hostname']
+        # At this point it is unknown if one or all of the possible
+        # network spaces are in HA. Validate at least one is set which is
+        # the minimum required.
+        for setting in dns_settings:
+            if config_get(setting):
+                log('DNS HA: At least one hostname is set {}: {}'
+                    ''.format(setting, config_get(setting)),
+                    level=DEBUG)
+                return True
+
+        msg = ('DNS HA: At least one os-*-hostname(s) must be set to use '
+               'DNS HA')
+        status_set('blocked', msg)
+        raise HAIncompleteConfig(msg)
+
+    log('VIP HA: VIP is set {}'.format(vip), level=DEBUG)
+    return True
+
+
+def canonical_url(configs, vip_setting='vip'):
+    '''
+    Returns the correct HTTP URL to this host given the state of HTTPS
+    configuration and hacluster.
+
+    :configs    : OSTemplateRenderer: A config tempating object to inspect for
+                                      a complete https context.
+
+    :vip_setting:                str: Setting in charm config that specifies
+                                      VIP address.
+    '''
+    scheme = 'http'
+    if 'https' in configs.complete_contexts():
+        scheme = 'https'
+    if is_clustered():
+        addr = config_get(vip_setting)
+    else:
+        addr = unit_get('private-address')
+    return '%s://%s' % (scheme, addr)
+
+
+def distributed_wait(modulo=None, wait=None, operation_name='operation'):
+    ''' Distribute operations by waiting based on modulo_distribution
+
+    If modulo and or wait are not set, check config_get for those values.
+    If config values are not set, default to modulo=3 and wait=30.
+
+    :param modulo: int The modulo number creates the group distribution
+    :param wait: int The constant time wait value
+    :param operation_name: string Operation name for status message
+                           i.e.  'restart'
+    :side effect: Calls config_get()
+    :side effect: Calls log()
+    :side effect: Calls status_set()
+    :side effect: Calls time.sleep()
+    '''
+    if modulo is None:
+        modulo = config_get('modulo-nodes') or 3
+    if wait is None:
+        wait = config_get('known-wait') or 30
+    if juju_is_leader():
+        # The leader should never wait
+        calculated_wait = 0
+    else:
+        # non_zero_wait=True guarantees the non-leader who gets modulo 0
+        # will still wait
+        calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
+                                              non_zero_wait=True)
+    msg = "Waiting {} seconds for {} ...".format(calculated_wait,
+                                                 operation_name)
+    log(msg, DEBUG)
+    status_set('maintenance', msg)
+    time.sleep(calculated_wait)
+
+
+def get_managed_services_and_ports(services, external_ports,
+                                   external_services=None,
+                                   port_conv_f=determine_apache_port_single):
+    """Get the services and ports managed by this charm.
+
+    Return only the services and corresponding ports that are managed by this
+    charm. This excludes haproxy when there is a relation with hacluster. This
+    is because this charm passes responsability for stopping and starting
+    haproxy to hacluster.
+
+    Similarly, if a relation with hacluster exists then the ports returned by
+    this method correspond to those managed by the apache server rather than
+    haproxy.
+
+    :param services: List of services.
+    :type services: List[str]
+    :param external_ports: List of ports managed by external services.
+    :type external_ports: List[int]
+    :param external_services: List of services to be removed if ha relation is
+                              present.
+    :type external_services: List[str]
+    :param port_conv_f: Function to apply to ports to calculate the ports
+                        managed by services controlled by this charm.
+    :type port_convert_func: f()
+    :returns: A tuple containing a list of services first followed by a list of
+              ports.
+    :rtype: Tuple[List[str], List[int]]
+    """
+    if external_services is None:
+        external_services = ['haproxy']
+    if relation_ids('ha'):
+        for svc in external_services:
+            try:
+                services.remove(svc)
+            except ValueError:
+                pass
+        external_ports = [port_conv_f(p) for p in external_ports]
+    return services, external_ports
diff --git a/charmhelpers/contrib/hardening/README.hardening.md b/charmhelpers/contrib/hardening/README.hardening.md
new file mode 100644
index 0000000000000000000000000000000000000000..91280c03e6d7b5d75b356cd94614fc821abc2644
--- /dev/null
+++ b/charmhelpers/contrib/hardening/README.hardening.md
@@ -0,0 +1,38 @@
+# Juju charm-helpers hardening library
+
+## Description
+
+This library provides multiple implementations of system and application
+hardening that conform to the standards of http://hardening.io/.
+
+Current implementations include:
+
+ * OS
+ * SSH
+ * MySQL
+ * Apache
+
+## Requirements
+
+* Juju Charms
+
+## Usage
+
+1. Synchronise this library into your charm and add the harden() decorator
+   (from contrib.hardening.harden) to any functions or methods you want to use
+   to trigger hardening of your application/system.
+
+2. Add a config option called 'harden' to your charm config.yaml and set it to
+   a space-delimited list of hardening modules you want to run e.g. "os ssh"
+
+3. Override any config defaults (contrib.hardening.defaults) by adding a file
+   called hardening.yaml to your charm root containing the name(s) of the
+   modules whose settings you want override at root level and then any settings
+   with overrides e.g.
+   
+   os:
+       general:
+            desktop_enable: True
+
+4. Now just run your charm as usual and hardening will be applied each time the
+   hook runs.
diff --git a/charmhelpers/contrib/hardening/__init__.py b/charmhelpers/contrib/hardening/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..30a3e94359e94011cd247de7ade76667346e7379
--- /dev/null
+++ b/charmhelpers/contrib/hardening/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charmhelpers/contrib/hardening/apache/__init__.py b/charmhelpers/contrib/hardening/apache/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453
--- /dev/null
+++ b/charmhelpers/contrib/hardening/apache/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from os import path
+
+TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charmhelpers/contrib/hardening/apache/checks/__init__.py b/charmhelpers/contrib/hardening/apache/checks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bc2ebd4760124e23c128868e098aceac610260f
--- /dev/null
+++ b/charmhelpers/contrib/hardening/apache/checks/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+)
+from charmhelpers.contrib.hardening.apache.checks import config
+
+
+def run_apache_checks():
+    log("Starting Apache hardening checks.", level=DEBUG)
+    checks = config.get_audits()
+    for check in checks:
+        log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
+        check.ensure_compliance()
+
+    log("Apache hardening checks complete.", level=DEBUG)
diff --git a/charmhelpers/contrib/hardening/apache/checks/config.py b/charmhelpers/contrib/hardening/apache/checks/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..341da9eee10f73cbe3d7e7e5cf91b57b4d2a89b4
--- /dev/null
+++ b/charmhelpers/contrib/hardening/apache/checks/config.py
@@ -0,0 +1,104 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+import six
+import subprocess
+
+
+from charmhelpers.core.hookenv import (
+    log,
+    INFO,
+)
+from charmhelpers.contrib.hardening.audits.file import (
+    FilePermissionAudit,
+    DirectoryPermissionAudit,
+    NoReadWriteForOther,
+    TemplatedFile,
+    DeletedFile
+)
+from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit
+from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+    """Get Apache hardening config audits.
+
+    :returns:  dictionary of audits
+    """
+    if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0:
+        log("Apache server does not appear to be installed on this node - "
+            "skipping apache hardening", level=INFO)
+        return []
+
+    context = ApacheConfContext()
+    settings = utils.get_settings('apache')
+    audits = [
+        FilePermissionAudit(paths=os.path.join(
+                            settings['common']['apache_dir'], 'apache2.conf'),
+                            user='root', group='root', mode=0o0640),
+
+        TemplatedFile(os.path.join(settings['common']['apache_dir'],
+                                   'mods-available/alias.conf'),
+                      context,
+                      TEMPLATES_DIR,
+                      mode=0o0640,
+                      user='root',
+                      service_actions=[{'service': 'apache2',
+                                        'actions': ['restart']}]),
+
+        TemplatedFile(os.path.join(settings['common']['apache_dir'],
+                                   'conf-enabled/99-hardening.conf'),
+                      context,
+                      TEMPLATES_DIR,
+                      mode=0o0640,
+                      user='root',
+                      service_actions=[{'service': 'apache2',
+                                        'actions': ['restart']}]),
+
+        DirectoryPermissionAudit(settings['common']['apache_dir'],
+                                 user='root',
+                                 group='root',
+                                 mode=0o0750),
+
+        DisabledModuleAudit(settings['hardening']['modules_to_disable']),
+
+        NoReadWriteForOther(settings['common']['apache_dir']),
+
+        DeletedFile(['/var/www/html/index.html'])
+    ]
+
+    return audits
+
+
+class ApacheConfContext(object):
+    """Defines the set of key/value pairs to set in a apache config file.
+
+    This context, when called, will return a dictionary containing the
+    key/value pairs of setting to specify in the
+    /etc/apache/conf-enabled/hardening.conf file.
+    """
+    def __call__(self):
+        settings = utils.get_settings('apache')
+        ctxt = settings['hardening']
+
+        out = subprocess.check_output(['apache2', '-v'])
+        if six.PY3:
+            out = out.decode('utf-8')
+        ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
+                                           out).group(1)
+        ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
+        return ctxt
diff --git a/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf b/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf
new file mode 100644
index 0000000000000000000000000000000000000000..22b68041d50ff753284bbb4b41a21e8f2bd8c18a
--- /dev/null
+++ b/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf
@@ -0,0 +1,32 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+
+<Location / >
+  <LimitExcept {{ allowed_http_methods }} >
+    # http://httpd.apache.org/docs/2.4/upgrading.html
+    {% if apache_version > '2.2' -%}
+    Require all granted
+    {% else -%}
+    Order Allow,Deny
+    Deny from all
+    {% endif %}
+  </LimitExcept>
+</Location>
+
+<Directory />
+    Options -Indexes -FollowSymLinks
+    AllowOverride None
+</Directory>
+
+<Directory /var/www/>
+    Options -Indexes -FollowSymLinks
+    AllowOverride None
+</Directory>
+
+TraceEnable {{ traceenable }}
+ServerTokens {{ servertokens }}
+
+SSLHonorCipherOrder {{ honor_cipher_order }}
+SSLCipherSuite {{ cipher_suite }}
diff --git a/charmhelpers/contrib/hardening/apache/templates/__init__.py b/charmhelpers/contrib/hardening/apache/templates/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/charmhelpers/contrib/hardening/apache/templates/alias.conf b/charmhelpers/contrib/hardening/apache/templates/alias.conf
new file mode 100644
index 0000000000000000000000000000000000000000..e46a58a30dadbb6ccffa02d82593c63a9cbf52df
--- /dev/null
+++ b/charmhelpers/contrib/hardening/apache/templates/alias.conf
@@ -0,0 +1,31 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+<IfModule alias_module>
+  #
+  # Aliases: Add here as many aliases as you need (with no limit). The format is
+  # Alias fakename realname
+  #
+  # Note that if you include a trailing / on fakename then the server will
+  # require it to be present in the URL.  So "/icons" isn't aliased in this
+  # example, only "/icons/".  If the fakename is slash-terminated, then the
+  # realname must also be slash terminated, and if the fakename omits the
+  # trailing slash, the realname must also omit it.
+  #
+  # We include the /icons/ alias for FancyIndexed directory listings.  If
+  # you do not use FancyIndexing, you may comment this out.
+  #
+  Alias /icons/ "{{ apache_icondir }}/"
+
+  <Directory "{{ apache_icondir }}">
+    Options -Indexes -MultiViews -FollowSymLinks
+    AllowOverride None
+{% if apache_version == '2.4' -%}
+    Require all granted
+{% else -%}
+    Order allow,deny
+    Allow from all
+{% endif %}
+  </Directory>
+</IfModule>
diff --git a/charmhelpers/contrib/hardening/audits/__init__.py b/charmhelpers/contrib/hardening/audits/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6dd5b05fec4ffcfcdb4378a06dfda4e8ac7e8371
--- /dev/null
+++ b/charmhelpers/contrib/hardening/audits/__init__.py
@@ -0,0 +1,54 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class BaseAudit(object):  # NO-QA
+    """Base class for hardening checks.
+
+    The lifecycle of a hardening check is to first check to see if the system
+    is in compliance for the specified check. If it is not in compliance, the
+    check method will return a value which will be supplied to the.
+    """
+    def __init__(self, *args, **kwargs):
+        self.unless = kwargs.get('unless', None)
+        super(BaseAudit, self).__init__()
+
+    def ensure_compliance(self):
+        """Checks to see if the current hardening check is in compliance or
+        not.
+
+        If the check that is performed is not in compliance, then an exception
+        should be raised.
+        """
+        pass
+
+    def _take_action(self):
+        """Determines whether to perform the action or not.
+
+        Checks whether or not an action should be taken. This is determined by
+        the truthy value for the unless parameter. If unless is a callback
+        method, it will be invoked with no parameters in order to determine
+        whether or not the action should be taken. Otherwise, the truthy value
+        of the unless attribute will determine if the action should be
+        performed.
+        """
+        # Do the action if there isn't an unless override.
+        if self.unless is None:
+            return True
+
+        # Invoke the callback if there is one.
+        if hasattr(self.unless, '__call__'):
+            return not self.unless()
+
+        return not self.unless
diff --git a/charmhelpers/contrib/hardening/audits/apache.py b/charmhelpers/contrib/hardening/audits/apache.py
new file mode 100644
index 0000000000000000000000000000000000000000..c15376256135752d207d3d6db16d34c0083a6a6b
--- /dev/null
+++ b/charmhelpers/contrib/hardening/audits/apache.py
@@ -0,0 +1,105 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import subprocess
+
+import six
+
+from charmhelpers.core.hookenv import (
+    log,
+    INFO,
+    ERROR,
+)
+
+from charmhelpers.contrib.hardening.audits import BaseAudit
+
+
+class DisabledModuleAudit(BaseAudit):
+    """Audits Apache2 modules.
+
+    Determines if the apache2 modules are enabled. If the modules are enabled
+    then they are removed in the ensure_compliance.
+    """
+    def __init__(self, modules):
+        if modules is None:
+            self.modules = []
+        elif isinstance(modules, six.string_types):
+            self.modules = [modules]
+        else:
+            self.modules = modules
+
+    def ensure_compliance(self):
+        """Ensures that the modules are not loaded."""
+        if not self.modules:
+            return
+
+        try:
+            loaded_modules = self._get_loaded_modules()
+            non_compliant_modules = []
+            for module in self.modules:
+                if module in loaded_modules:
+                    log("Module '%s' is enabled but should not be." %
+                        (module), level=INFO)
+                    non_compliant_modules.append(module)
+
+            if len(non_compliant_modules) == 0:
+                return
+
+            for module in non_compliant_modules:
+                self._disable_module(module)
+            self._restart_apache()
+        except subprocess.CalledProcessError as e:
+            log('Error occurred auditing apache module compliance. '
+                'This may have been already reported. '
+                'Output is: %s' % e.output, level=ERROR)
+
+    @staticmethod
+    def _get_loaded_modules():
+        """Returns the modules which are enabled in Apache."""
+        output = subprocess.check_output(['apache2ctl', '-M'])
+        if six.PY3:
+            output = output.decode('utf-8')
+        modules = []
+        for line in output.splitlines():
+            # Each line of the enabled module output looks like:
+            #  module_name (static|shared)
+            # Plus a header line at the top of the output which is stripped
+            # out by the regex.
+            matcher = re.search(r'^ (\S*)_module (\S*)', line)
+            if matcher:
+                modules.append(matcher.group(1))
+        return modules
+
+    @staticmethod
+    def _disable_module(module):
+        """Disables the specified module in Apache."""
+        try:
+            subprocess.check_call(['a2dismod', module])
+        except subprocess.CalledProcessError as e:
+            # Note: catch error here to allow the attempt of disabling
+            # multiple modules in one go rather than failing after the
+            # first module fails.
+            log('Error occurred disabling module %s. '
+                'Output is: %s' % (module, e.output), level=ERROR)
+
+    @staticmethod
+    def _restart_apache():
+        """Restarts the apache process"""
+        subprocess.check_output(['service', 'apache2', 'restart'])
+
+    @staticmethod
+    def is_ssl_enabled():
+        """Check if SSL module is enabled or not"""
+        return 'ssl' in DisabledModuleAudit._get_loaded_modules()
diff --git a/charmhelpers/contrib/hardening/audits/apt.py b/charmhelpers/contrib/hardening/audits/apt.py
new file mode 100644
index 0000000000000000000000000000000000000000..cad7bf7376d6f22ce8feccd843b070c399887aa9
--- /dev/null
+++ b/charmhelpers/contrib/hardening/audits/apt.py
@@ -0,0 +1,104 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import  # required for external apt import
+from six import string_types
+
+from charmhelpers.fetch import (
+    apt_cache,
+    apt_purge
+)
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+    WARNING,
+)
+from charmhelpers.contrib.hardening.audits import BaseAudit
+from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg
+
+
+class AptConfig(BaseAudit):
+
+    def __init__(self, config, **kwargs):
+        self.config = config
+
+    def verify_config(self):
+        apt_pkg.init()
+        for cfg in self.config:
+            value = apt_pkg.config.get(cfg['key'], cfg.get('default', ''))
+            if value and value != cfg['expected']:
+                log("APT config '%s' has unexpected value '%s' "
+                    "(expected='%s')" %
+                    (cfg['key'], value, cfg['expected']), level=WARNING)
+
+    def ensure_compliance(self):
+        self.verify_config()
+
+
+class RestrictedPackages(BaseAudit):
+    """Class used to audit restricted packages on the system."""
+
+    def __init__(self, pkgs, **kwargs):
+        super(RestrictedPackages, self).__init__(**kwargs)
+        if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'):
+            self.pkgs = pkgs.split()
+        else:
+            self.pkgs = pkgs
+
+    def ensure_compliance(self):
+        cache = apt_cache()
+
+        for p in self.pkgs:
+            if p not in cache:
+                continue
+
+            pkg = cache[p]
+            if not self.is_virtual_package(pkg):
+                if not pkg.current_ver:
+                    log("Package '%s' is not installed." % pkg.name,
+                        level=DEBUG)
+                    continue
+                else:
+                    log("Restricted package '%s' is installed" % pkg.name,
+                        level=WARNING)
+                    self.delete_package(cache, pkg)
+            else:
+                log("Checking restricted virtual package '%s' provides" %
+                    pkg.name, level=DEBUG)
+                self.delete_package(cache, pkg)
+
+    def delete_package(self, cache, pkg):
+        """Deletes the package from the system.
+
+        Deletes the package form the system, properly handling virtual
+        packages.
+
+        :param cache: the apt cache
+        :param pkg: the package to remove
+        """
+        if self.is_virtual_package(pkg):
+            log("Package '%s' appears to be virtual - purging provides" %
+                pkg.name, level=DEBUG)
+            for _p in pkg.provides_list:
+                self.delete_package(cache, _p[2].parent_pkg)
+        elif not pkg.current_ver:
+            log("Package '%s' not installed" % pkg.name, level=DEBUG)
+            return
+        else:
+            log("Purging package '%s'" % pkg.name, level=DEBUG)
+            apt_purge(pkg.name)
+
+    def is_virtual_package(self, pkg):
+        return (pkg.get('has_provides', False) and
+                not pkg.get('has_versions', False))
diff --git a/charmhelpers/contrib/hardening/audits/file.py b/charmhelpers/contrib/hardening/audits/file.py
new file mode 100644
index 0000000000000000000000000000000000000000..257c6351a0b0d244273013faef913f52349f2486
--- /dev/null
+++ b/charmhelpers/contrib/hardening/audits/file.py
@@ -0,0 +1,550 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grp
+import os
+import pwd
+import re
+
+from subprocess import (
+    CalledProcessError,
+    check_output,
+    check_call,
+)
+from traceback import format_exc
+from six import string_types
+from stat import (
+    S_ISGID,
+    S_ISUID
+)
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+    INFO,
+    WARNING,
+    ERROR,
+)
+from charmhelpers.core import unitdata
+from charmhelpers.core.host import file_hash
+from charmhelpers.contrib.hardening.audits import BaseAudit
+from charmhelpers.contrib.hardening.templating import (
+    get_template_path,
+    render_and_write,
+)
+from charmhelpers.contrib.hardening import utils
+
+
+class BaseFileAudit(BaseAudit):
+    """Base class for file audits.
+
+    Provides api stubs for compliance check flow that must be used by any class
+    that implemented this one.
+    """
+
+    def __init__(self, paths, always_comply=False, *args, **kwargs):
+        """
+        :param paths: string path of list of paths of files we want to apply
+                      compliance checks are criteria to.
+        :param always_comply: if true compliance criteria is always applied
+                              else compliance is skipped for non-existent
+                              paths.
+        """
+        super(BaseFileAudit, self).__init__(*args, **kwargs)
+        self.always_comply = always_comply
+        if isinstance(paths, string_types) or not hasattr(paths, '__iter__'):
+            self.paths = [paths]
+        else:
+            self.paths = paths
+
+    def ensure_compliance(self):
+        """Ensure that the all registered files comply to registered criteria.
+        """
+        for p in self.paths:
+            if os.path.exists(p):
+                if self.is_compliant(p):
+                    continue
+
+                log('File %s is not in compliance.' % p, level=INFO)
+            else:
+                if not self.always_comply:
+                    log("Non-existent path '%s' - skipping compliance check"
+                        % (p), level=INFO)
+                    continue
+
+            if self._take_action():
+                log("Applying compliance criteria to '%s'" % (p), level=INFO)
+                self.comply(p)
+
+    def is_compliant(self, path):
+        """Audits the path to see if it is compliance.
+
+        :param path: the path to the file that should be checked.
+        """
+        raise NotImplementedError
+
+    def comply(self, path):
+        """Enforces the compliance of a path.
+
+        :param path: the path to the file that should be enforced.
+        """
+        raise NotImplementedError
+
+    @classmethod
+    def _get_stat(cls, path):
+        """Returns the Posix st_stat information for the specified file path.
+
+        :param path: the path to get the st_stat information for.
+        :returns: an st_stat object for the path or None if the path doesn't
+                  exist.
+        """
+        return os.stat(path)
+
+
+class FilePermissionAudit(BaseFileAudit):
+    """Implements an audit for file permissions and ownership for a user.
+
+    This class implements functionality that ensures that a specific user/group
+    will own the file(s) specified and that the permissions specified are
+    applied properly to the file.
+    """
+    def __init__(self, paths, user, group=None, mode=0o600, **kwargs):
+        self.user = user
+        self.group = group
+        self.mode = mode
+        super(FilePermissionAudit, self).__init__(paths, user, group, mode,
+                                                  **kwargs)
+
+    @property
+    def user(self):
+        return self._user
+
+    @user.setter
+    def user(self, name):
+        try:
+            user = pwd.getpwnam(name)
+        except KeyError:
+            log('Unknown user %s' % name, level=ERROR)
+            user = None
+        self._user = user
+
+    @property
+    def group(self):
+        return self._group
+
+    @group.setter
+    def group(self, name):
+        try:
+            group = None
+            if name:
+                group = grp.getgrnam(name)
+            else:
+                group = grp.getgrgid(self.user.pw_gid)
+        except KeyError:
+            log('Unknown group %s' % name, level=ERROR)
+        self._group = group
+
+    def is_compliant(self, path):
+        """Checks if the path is in compliance.
+
+        Used to determine if the path specified meets the necessary
+        requirements to be in compliance with the check itself.
+
+        :param path: the file path to check
+        :returns: True if the path is compliant, False otherwise.
+        """
+        stat = self._get_stat(path)
+        user = self.user
+        group = self.group
+
+        compliant = True
+        if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid:
+            log('File %s is not owned by %s:%s.' % (path, user.pw_name,
+                                                    group.gr_name),
+                level=INFO)
+            compliant = False
+
+        # POSIX refers to the st_mode bits as corresponding to both the
+        # file type and file permission bits, where the least significant 12
+        # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the
+        # file permission bits (8-0)
+        perms = stat.st_mode & 0o7777
+        if perms != self.mode:
+            log('File %s has incorrect permissions, currently set to %s' %
+                (path, oct(stat.st_mode & 0o7777)), level=INFO)
+            compliant = False
+
+        return compliant
+
+    def comply(self, path):
+        """Issues a chown and chmod to the file paths specified."""
+        utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name,
+                                 self.mode)
+
+
+class DirectoryPermissionAudit(FilePermissionAudit):
+    """Performs a permission check for the  specified directory path."""
+
+    def __init__(self, paths, user, group=None, mode=0o600,
+                 recursive=True, **kwargs):
+        super(DirectoryPermissionAudit, self).__init__(paths, user, group,
+                                                       mode, **kwargs)
+        self.recursive = recursive
+
+    def is_compliant(self, path):
+        """Checks if the directory is compliant.
+
+        Used to determine if the path specified and all of its children
+        directories are in compliance with the check itself.
+
+        :param path: the directory path to check
+        :returns: True if the directory tree is compliant, otherwise False.
+        """
+        if not os.path.isdir(path):
+            log('Path specified %s is not a directory.' % path, level=ERROR)
+            raise ValueError("%s is not a directory." % path)
+
+        if not self.recursive:
+            return super(DirectoryPermissionAudit, self).is_compliant(path)
+
+        compliant = True
+        for root, dirs, _ in os.walk(path):
+            if len(dirs) > 0:
+                continue
+
+            if not super(DirectoryPermissionAudit, self).is_compliant(root):
+                compliant = False
+                continue
+
+        return compliant
+
+    def comply(self, path):
+        for root, dirs, _ in os.walk(path):
+            if len(dirs) > 0:
+                super(DirectoryPermissionAudit, self).comply(root)
+
+
+class ReadOnly(BaseFileAudit):
+    """Audits that files and folders are read only."""
+    def __init__(self, paths, *args, **kwargs):
+        super(ReadOnly, self).__init__(paths=paths, *args, **kwargs)
+
+    def is_compliant(self, path):
+        try:
+            output = check_output(['find', path, '-perm', '-go+w',
+                                   '-type', 'f']).strip()
+
+            # The find above will find any files which have permission sets
+            # which allow too broad of write access. As such, the path is
+            # compliant if there is no output.
+            if output:
+                return False
+
+            return True
+        except CalledProcessError as e:
+            log('Error occurred checking finding writable files for %s. '
+                'Error information is: command %s failed with returncode '
+                '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
+                                           format_exc(e)), level=ERROR)
+            return False
+
+    def comply(self, path):
+        try:
+            check_output(['chmod', 'go-w', '-R', path])
+        except CalledProcessError as e:
+            log('Error occurred removing writeable permissions for %s. '
+                'Error information is: command %s failed with returncode '
+                '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
+                                           format_exc(e)), level=ERROR)
+
+
+class NoReadWriteForOther(BaseFileAudit):
+    """Ensures that the files found under the base path are readable or
+    writable by anyone other than the owner or the group.
+    """
+    def __init__(self, paths):
+        super(NoReadWriteForOther, self).__init__(paths)
+
+    def is_compliant(self, path):
+        try:
+            cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o',
+                   '-perm', '-o+w', '-type', 'f']
+            output = check_output(cmd).strip()
+
+            # The find above here will find any files which have read or
+            # write permissions for other, meaning there is too broad of access
+            # to read/write the file. As such, the path is compliant if there's
+            # no output.
+            if output:
+                return False
+
+            return True
+        except CalledProcessError as e:
+            log('Error occurred while finding files which are readable or '
+                'writable to the world in %s. '
+                'Command output is: %s.' % (path, e.output), level=ERROR)
+
+    def comply(self, path):
+        try:
+            check_output(['chmod', '-R', 'o-rw', path])
+        except CalledProcessError as e:
+            log('Error occurred attempting to change modes of files under '
+                'path %s. Output of command is: %s' % (path, e.output))
+
+
+class NoSUIDSGIDAudit(BaseFileAudit):
+    """Audits that specified files do not have SUID/SGID bits set."""
+    def __init__(self, paths, *args, **kwargs):
+        super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs)
+
+    def is_compliant(self, path):
+        stat = self._get_stat(path)
+        if (stat.st_mode & (S_ISGID | S_ISUID)) != 0:
+            return False
+
+        return True
+
+    def comply(self, path):
+        try:
+            log('Removing suid/sgid from %s.' % path, level=DEBUG)
+            check_output(['chmod', '-s', path])
+        except CalledProcessError as e:
+            log('Error occurred removing suid/sgid from %s.'
+                'Error information is: command %s failed with returncode '
+                '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
+                                           format_exc(e)), level=ERROR)
+
+
+class TemplatedFile(BaseFileAudit):
+    """The TemplatedFileAudit audits the contents of a templated file.
+
+    This audit renders a file from a template, sets the appropriate file
+    permissions, then generates a hashsum with which to check the content
+    changed.
+    """
+    def __init__(self, path, context, template_dir, mode, user='root',
+                 group='root', service_actions=None, **kwargs):
+        self.context = context
+        self.user = user
+        self.group = group
+        self.mode = mode
+        self.template_dir = template_dir
+        self.service_actions = service_actions
+        super(TemplatedFile, self).__init__(paths=path, always_comply=True,
+                                            **kwargs)
+
+    def is_compliant(self, path):
+        """Determines if the templated file is compliant.
+
+        A templated file is only compliant if it has not changed (as
+        determined by its sha256 hashsum) AND its file permissions are set
+        appropriately.
+
+        :param path: the path to check compliance.
+        """
+        same_templates = self.templates_match(path)
+        same_content = self.contents_match(path)
+        same_permissions = self.permissions_match(path)
+
+        if same_content and same_permissions and same_templates:
+            return True
+
+        return False
+
+    def run_service_actions(self):
+        """Run any actions on services requested."""
+        if not self.service_actions:
+            return
+
+        for svc_action in self.service_actions:
+            name = svc_action['service']
+            actions = svc_action['actions']
+            log("Running service '%s' actions '%s'" % (name, actions),
+                level=DEBUG)
+            for action in actions:
+                cmd = ['service', name, action]
+                try:
+                    check_call(cmd)
+                except CalledProcessError as exc:
+                    log("Service name='%s' action='%s' failed - %s" %
+                        (name, action, exc), level=WARNING)
+
+    def comply(self, path):
+        """Ensures the contents and the permissions of the file.
+
+        :param path: the path to correct
+        """
+        dirname = os.path.dirname(path)
+        if not os.path.exists(dirname):
+            os.makedirs(dirname)
+
+        self.pre_write()
+        render_and_write(self.template_dir, path, self.context())
+        utils.ensure_permissions(path, self.user, self.group, self.mode)
+        self.run_service_actions()
+        self.save_checksum(path)
+        self.post_write()
+
+    def pre_write(self):
+        """Invoked prior to writing the template."""
+        pass
+
+    def post_write(self):
+        """Invoked after writing the template."""
+        pass
+
+    def templates_match(self, path):
+        """Determines if the template files are the same.
+
+        The template file equality is determined by the hashsum of the
+        template files themselves. If there is no hashsum, then the content
+        cannot be sure to be the same so treat it as if they changed.
+        Otherwise, return whether or not the hashsums are the same.
+
+        :param path: the path to check
+        :returns: boolean
+        """
+        template_path = get_template_path(self.template_dir, path)
+        key = 'hardening:template:%s' % template_path
+        template_checksum = file_hash(template_path)
+        kv = unitdata.kv()
+        stored_tmplt_checksum = kv.get(key)
+        if not stored_tmplt_checksum:
+            kv.set(key, template_checksum)
+            kv.flush()
+            log('Saved template checksum for %s.' % template_path,
+                level=DEBUG)
+            # Since we don't have a template checksum, then assume it doesn't
+            # match and return that the template is different.
+            return False
+        elif stored_tmplt_checksum != template_checksum:
+            kv.set(key, template_checksum)
+            kv.flush()
+            log('Updated template checksum for %s.' % template_path,
+                level=DEBUG)
+            return False
+
+        # Here the template hasn't changed based upon the calculated
+        # checksum of the template and what was previously stored.
+        return True
+
+    def contents_match(self, path):
+        """Determines if the file content is the same.
+
+        This is determined by comparing hashsum of the file contents and
+        the saved hashsum. If there is no hashsum, then the content cannot
+        be sure to be the same so treat them as if they are not the same.
+        Otherwise, return True if the hashsums are the same, False if they
+        are not the same.
+
+        :param path: the file to check.
+        """
+        checksum = file_hash(path)
+
+        kv = unitdata.kv()
+        stored_checksum = kv.get('hardening:%s' % path)
+        if not stored_checksum:
+            # If the checksum hasn't been generated, return False to ensure
+            # the file is written and the checksum stored.
+            log('Checksum for %s has not been calculated.' % path, level=DEBUG)
+            return False
+        elif stored_checksum != checksum:
+            log('Checksum mismatch for %s.' % path, level=DEBUG)
+            return False
+
+        return True
+
+    def permissions_match(self, path):
+        """Determines if the file owner and permissions match.
+
+        :param path: the path to check.
+        """
+        audit = FilePermissionAudit(path, self.user, self.group, self.mode)
+        return audit.is_compliant(path)
+
+    def save_checksum(self, path):
+        """Calculates and saves the checksum for the path specified.
+
+        :param path: the path of the file to save the checksum.
+        """
+        checksum = file_hash(path)
+        kv = unitdata.kv()
+        kv.set('hardening:%s' % path, checksum)
+        kv.flush()
+
+
+class DeletedFile(BaseFileAudit):
+    """Audit to ensure that a file is deleted."""
+    def __init__(self, paths):
+        super(DeletedFile, self).__init__(paths)
+
+    def is_compliant(self, path):
+        return not os.path.exists(path)
+
+    def comply(self, path):
+        os.remove(path)
+
+
+class FileContentAudit(BaseFileAudit):
+    """Audit the contents of a file."""
+    def __init__(self, paths, cases, **kwargs):
+        # Cases we expect to pass
+        self.pass_cases = cases.get('pass', [])
+        # Cases we expect to fail
+        self.fail_cases = cases.get('fail', [])
+        super(FileContentAudit, self).__init__(paths, **kwargs)
+
+    def is_compliant(self, path):
+        """
+        Given a set of content matching cases i.e. tuple(regex, bool) where
+        bool value denotes whether or not regex is expected to match, check that
+        all cases match as expected with the contents of the file. Cases can be
+        expected to pass of fail.
+
+        :param path: Path of file to check.
+        :returns: Boolean value representing whether or not all cases are
+                  found to be compliant.
+        """
+        log("Auditing contents of file '%s'" % (path), level=DEBUG)
+        with open(path, 'r') as fd:
+            contents = fd.read()
+
+        matches = 0
+        for pattern in self.pass_cases:
+            key = re.compile(pattern, flags=re.MULTILINE)
+            results = re.search(key, contents)
+            if results:
+                matches += 1
+            else:
+                log("Pattern '%s' was expected to pass but instead it failed"
+                    % (pattern), level=WARNING)
+
+        for pattern in self.fail_cases:
+            key = re.compile(pattern, flags=re.MULTILINE)
+            results = re.search(key, contents)
+            if not results:
+                matches += 1
+            else:
+                log("Pattern '%s' was expected to fail but instead it passed"
+                    % (pattern), level=WARNING)
+
+        total = len(self.pass_cases) + len(self.fail_cases)
+        log("Checked %s cases and %s passed" % (total, matches), level=DEBUG)
+        return matches == total
+
+    def comply(self, *args, **kwargs):
+        """NOOP since we just issue warnings. This is to avoid the
+        NotImplememtedError.
+        """
+        log("Not applying any compliance criteria, only checks.", level=INFO)
diff --git a/charmhelpers/contrib/hardening/defaults/__init__.py b/charmhelpers/contrib/hardening/defaults/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/charmhelpers/contrib/hardening/defaults/apache.yaml b/charmhelpers/contrib/hardening/defaults/apache.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0f940d4cfa85ca7051dd60a4805d84bb6aebed6d
--- /dev/null
+++ b/charmhelpers/contrib/hardening/defaults/apache.yaml
@@ -0,0 +1,16 @@
+# NOTE: this file contains the default configuration for the 'apache' hardening
+#       code. If you want to override any settings you must add them to a file
+#       called hardening.yaml in the root directory of your charm using the
+#       name 'apache' as the root key followed by any of the following with new
+#       values.
+
+common:
+    apache_dir: '/etc/apache2'
+
+hardening:
+    traceenable: 'off'
+    allowed_http_methods: "GET POST"
+    modules_to_disable: [ cgi, cgid ]
+    servertokens: 'Prod'
+    honor_cipher_order: 'on'
+    cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES'
diff --git a/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/charmhelpers/contrib/hardening/defaults/apache.yaml.schema
new file mode 100644
index 0000000000000000000000000000000000000000..c112137cb45c4b63cb05384145b3edf8c443e2b8
--- /dev/null
+++ b/charmhelpers/contrib/hardening/defaults/apache.yaml.schema
@@ -0,0 +1,12 @@
+# NOTE: this schema must contain all valid keys from it's associated defaults
+#       file. It is used to validate user-provided overrides.
+common:
+    apache_dir:
+    traceenable:
+
+hardening:
+    allowed_http_methods:
+    modules_to_disable:
+    servertokens:
+    honor_cipher_order:
+    cipher_suite:
diff --git a/charmhelpers/contrib/hardening/defaults/mysql.yaml b/charmhelpers/contrib/hardening/defaults/mysql.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..682d22bf3ded32eb1c8d6188486ec4468d9ec457
--- /dev/null
+++ b/charmhelpers/contrib/hardening/defaults/mysql.yaml
@@ -0,0 +1,38 @@
+# NOTE: this file contains the default configuration for the 'mysql' hardening
+#       code. If you want to override any settings you must add them to a file
+#       called hardening.yaml in the root directory of your charm using the
+#       name 'mysql' as the root key followed by any of the following with new
+#       values.
+
+hardening:
+    mysql-conf: /etc/mysql/my.cnf
+    hardening-conf: /etc/mysql/conf.d/hardening.cnf
+
+security:
+    # @see http://www.symantec.com/connect/articles/securing-mysql-step-step
+    # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot
+    chroot: None
+
+    # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create
+    safe-user-create: 1
+
+    # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth
+    secure-auth: 1
+
+    # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links
+    skip-symbolic-links: 1
+
+    # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database
+    skip-show-database: True
+
+    # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile
+    local-infile: 0
+
+    # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs
+    allow-suspicious-udfs: 0
+
+    # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges
+    automatic-sp-privileges: 0
+
+    # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv
+    secure-file-priv: /tmp
diff --git a/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema
new file mode 100644
index 0000000000000000000000000000000000000000..2edf325c311c6fbb062a072083b4d12cebc3d9c1
--- /dev/null
+++ b/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema
@@ -0,0 +1,15 @@
+# NOTE: this schema must contain all valid keys from it's associated defaults
+#       file. It is used to validate user-provided overrides.
+hardening:
+    mysql-conf:
+    hardening-conf:
+security:
+    chroot:
+    safe-user-create:
+    secure-auth:
+    skip-symbolic-links:
+    skip-show-database:
+    local-infile:
+    allow-suspicious-udfs:
+    automatic-sp-privileges:
+    secure-file-priv:
diff --git a/charmhelpers/contrib/hardening/defaults/os.yaml b/charmhelpers/contrib/hardening/defaults/os.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9a8627b5ed2803828e1e4d78260c6b5f90cae659
--- /dev/null
+++ b/charmhelpers/contrib/hardening/defaults/os.yaml
@@ -0,0 +1,68 @@
+# NOTE: this file contains the default configuration for the 'os' hardening
+#       code. If you want to override any settings you must add them to a file
+#       called hardening.yaml in the root directory of your charm using the
+#       name 'os' as the root key followed by any of the following with new
+#       values.
+
+general:
+    desktop_enable: False  # (type:boolean)
+
+environment:
+    extra_user_paths: []
+    umask: 027
+    root_path: /
+
+auth:
+    pw_max_age: 60
+    # discourage password cycling
+    pw_min_age: 7
+    retries: 5
+    lockout_time: 600
+    timeout: 60
+    allow_homeless: False  # (type:boolean)
+    pam_passwdqc_enable: True  # (type:boolean)
+    pam_passwdqc_options: 'min=disabled,disabled,16,12,8'
+    root_ttys:
+        console
+        tty1
+        tty2
+        tty3
+        tty4
+        tty5
+        tty6
+    uid_min: 1000
+    gid_min: 1000
+    sys_uid_min: 100
+    sys_uid_max: 999
+    sys_gid_min: 100
+    sys_gid_max: 999
+    chfn_restrict:
+
+security:
+    users_allow: []
+    suid_sgid_enforce: True  # (type:boolean)
+    # user-defined blacklist and whitelist
+    suid_sgid_blacklist: []
+    suid_sgid_whitelist: []
+    # if this is True, remove any suid/sgid bits from files that were not in the whitelist
+    suid_sgid_dry_run_on_unknown: False  # (type:boolean)
+    suid_sgid_remove_from_unknown: False  # (type:boolean)
+    # remove packages with known issues
+    packages_clean: True  # (type:boolean)
+    packages_list:
+        xinetd
+        inetd
+        ypserv
+        telnet-server
+        rsh-server
+        rsync
+    kernel_enable_module_loading: True  # (type:boolean)
+    kernel_enable_core_dump: False  # (type:boolean)
+    ssh_tmout: 300
+
+sysctl:
+    kernel_secure_sysrq: 244  # 4 + 16 + 32 + 64 + 128
+    kernel_enable_sysrq: False  # (type:boolean)
+    forwarding: False  # (type:boolean)
+    ipv6_enable: False  # (type:boolean)
+    arp_restricted: True  # (type:boolean)
diff --git a/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/charmhelpers/contrib/hardening/defaults/os.yaml.schema
new file mode 100644
index 0000000000000000000000000000000000000000..cc3b9c206eae56cbe68826cb76748e2deb9483e1
--- /dev/null
+++ b/charmhelpers/contrib/hardening/defaults/os.yaml.schema
@@ -0,0 +1,43 @@
+# NOTE: this schema must contain all valid keys from it's associated defaults
+#       file. It is used to validate user-provided overrides.
+general:
+    desktop_enable:
+environment:
+    extra_user_paths:
+    umask:
+    root_path:
+auth:
+    pw_max_age:
+    pw_min_age:
+    retries:
+    lockout_time:
+    timeout:
+    allow_homeless:
+    pam_passwdqc_enable:
+    pam_passwdqc_options:
+    root_ttys:
+    uid_min:
+    gid_min:
+    sys_uid_min:
+    sys_uid_max:
+    sys_gid_min:
+    sys_gid_max:
+    chfn_restrict:
+security:
+    users_allow:
+    suid_sgid_enforce:
+    suid_sgid_blacklist:
+    suid_sgid_whitelist:
+    suid_sgid_dry_run_on_unknown:
+    suid_sgid_remove_from_unknown:
+    packages_clean:
+    packages_list:
+    kernel_enable_module_loading:
+    kernel_enable_core_dump:
+    ssh_tmout:
+sysctl:
+    kernel_secure_sysrq:
+    kernel_enable_sysrq:
+    forwarding:
+    ipv6_enable:
+    arp_restricted:
diff --git a/charmhelpers/contrib/hardening/defaults/ssh.yaml b/charmhelpers/contrib/hardening/defaults/ssh.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cd529bcae1ec00fef2e969f43dc3cf530b46ef9a
--- /dev/null
+++ b/charmhelpers/contrib/hardening/defaults/ssh.yaml
@@ -0,0 +1,49 @@
+# NOTE: this file contains the default configuration for the 'ssh' hardening
+#       code. If you want to override any settings you must add them to a file
+#       called hardening.yaml in the root directory of your charm using the
+#       name 'ssh' as the root key followed by any of the following with new
+#       values.
+
+common:
+    service_name: 'ssh'
+    network_ipv6_enable: False  # (type:boolean)
+    ports: [22]
+    remote_hosts: []
+
+client:
+    package: 'openssh-client'
+    cbc_required: False  # (type:boolean)
+    weak_hmac: False  # (type:boolean)
+    weak_kex: False  # (type:boolean)
+    roaming: False
+    password_authentication: 'no'
+
+server:
+    host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key',
+                     '/etc/ssh/ssh_host_ecdsa_key']
+    cbc_required: False  # (type:boolean)
+    weak_hmac: False  # (type:boolean)
+    weak_kex: False  # (type:boolean)
+    allow_root_with_key: False  # (type:boolean)
+    allow_tcp_forwarding: 'no'
+    allow_agent_forwarding: 'no'
+    allow_x11_forwarding: 'no'
+    use_privilege_separation: 'sandbox'
+    listen_to: ['0.0.0.0']
+    use_pam: 'no'
+    package: 'openssh-server'
+    password_authentication: 'no'
+    alive_interval: '600'
+    alive_count: '3'
+    sftp_enable: False  # (type:boolean)
+    sftp_group: 'sftponly'
+    sftp_chroot: '/home/%u'
+    deny_users: []
+    allow_users: []
+    deny_groups: []
+    allow_groups: []
+    print_motd: 'no'
+    print_last_log: 'no'
+    use_dns: 'no'
+    max_auth_tries: 2
+    max_sessions: 10
diff --git a/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema
new file mode 100644
index 0000000000000000000000000000000000000000..d05e054bc234015206bb1195152fa9ffd6a33151
--- /dev/null
+++ b/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema
@@ -0,0 +1,42 @@
+# NOTE: this schema must contain all valid keys from it's associated defaults
+#       file. It is used to validate user-provided overrides.
+common:
+    service_name:
+    network_ipv6_enable:
+    ports:
+    remote_hosts:
+client:
+    package:
+    cbc_required:
+    weak_hmac:
+    weak_kex:
+    roaming:
+    password_authentication:
+server:
+    host_key_files:
+    cbc_required:
+    weak_hmac:
+    weak_kex:
+    allow_root_with_key:
+    allow_tcp_forwarding:
+    allow_agent_forwarding:
+    allow_x11_forwarding:
+    use_privilege_separation:
+    listen_to:
+    use_pam:
+    package:
+    password_authentication:
+    alive_interval:
+    alive_count:
+    sftp_enable:
+    sftp_group:
+    sftp_chroot:
+    deny_users:
+    allow_users:
+    deny_groups:
+    allow_groups:
+    print_motd:
+    print_last_log:
+    use_dns:
+    max_auth_tries:
+    max_sessions:
diff --git a/charmhelpers/contrib/hardening/harden.py b/charmhelpers/contrib/hardening/harden.py
new file mode 100644
index 0000000000000000000000000000000000000000..63f21b9c9855065da3be875c01a2c94db7df47b4
--- /dev/null
+++ b/charmhelpers/contrib/hardening/harden.py
@@ -0,0 +1,96 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import six
+
+from collections import OrderedDict
+
+from charmhelpers.core.hookenv import (
+    config,
+    log,
+    DEBUG,
+    WARNING,
+)
+from charmhelpers.contrib.hardening.host.checks import run_os_checks
+from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks
+from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks
+from charmhelpers.contrib.hardening.apache.checks import run_apache_checks
+
+_DISABLE_HARDENING_FOR_UNIT_TEST = False
+
+
+def harden(overrides=None):
+    """Hardening decorator.
+
+    This is the main entry point for running the hardening stack. In order to
+    run modules of the stack you must add this decorator to charm hook(s) and
+    ensure that your charm config.yaml contains the 'harden' option set to
+    one or more of the supported modules. Setting these will cause the
+    corresponding hardening code to be run when the hook fires.
+
+    This decorator can and should be applied to more than one hook or function
+    such that hardening modules are called multiple times. This is because
+    subsequent calls will perform auditing checks that will report any changes
+    to resources hardened by the first run (and possibly perform compliance
+    actions as a result of any detected infractions).
+
+    :param overrides: Optional list of stack modules used to override those
+                      provided with 'harden' config.
+    :returns: Returns value returned by decorated function once executed.
+    """
+    if overrides is None:
+        overrides = []
+
+    def _harden_inner1(f):
+        # As this has to be py2.7 compat, we can't use nonlocal.  Use a trick
+        # to capture the dictionary that can then be updated.
+        _logged = {'done': False}
+
+        def _harden_inner2(*args, **kwargs):
+            # knock out hardening via a config var; normally it won't get
+            # disabled.
+            if _DISABLE_HARDENING_FOR_UNIT_TEST:
+                return f(*args, **kwargs)
+            if not _logged['done']:
+                log("Hardening function '%s'" % (f.__name__), level=DEBUG)
+                _logged['done'] = True
+            RUN_CATALOG = OrderedDict([('os', run_os_checks),
+                                       ('ssh', run_ssh_checks),
+                                       ('mysql', run_mysql_checks),
+                                       ('apache', run_apache_checks)])
+
+            enabled = overrides[:] or (config("harden") or "").split()
+            if enabled:
+                modules_to_run = []
+                # modules will always be performed in the following order
+                for module, func in six.iteritems(RUN_CATALOG):
+                    if module in enabled:
+                        enabled.remove(module)
+                        modules_to_run.append(func)
+
+                if enabled:
+                    log("Unknown hardening modules '%s' - ignoring" %
+                        (', '.join(enabled)), level=WARNING)
+
+                for hardener in modules_to_run:
+                    log("Executing hardening module '%s'" %
+                        (hardener.__name__), level=DEBUG)
+                    hardener()
+            else:
+                log("No hardening applied to '%s'" % (f.__name__), level=DEBUG)
+
+            return f(*args, **kwargs)
+        return _harden_inner2
+
+    return _harden_inner1
diff --git a/charmhelpers/contrib/hardening/host/__init__.py b/charmhelpers/contrib/hardening/host/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from os import path
+
+TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charmhelpers/contrib/hardening/host/checks/__init__.py b/charmhelpers/contrib/hardening/host/checks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e7e409f3c7e0406b40353f48acfc3479e4c1a24
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/checks/__init__.py
@@ -0,0 +1,48 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+)
+from charmhelpers.contrib.hardening.host.checks import (
+    apt,
+    limits,
+    login,
+    minimize_access,
+    pam,
+    profile,
+    securetty,
+    suid_sgid,
+    sysctl
+)
+
+
+def run_os_checks():
+    log("Starting OS hardening checks.", level=DEBUG)
+    checks = apt.get_audits()
+    checks.extend(limits.get_audits())
+    checks.extend(login.get_audits())
+    checks.extend(minimize_access.get_audits())
+    checks.extend(pam.get_audits())
+    checks.extend(profile.get_audits())
+    checks.extend(securetty.get_audits())
+    checks.extend(suid_sgid.get_audits())
+    checks.extend(sysctl.get_audits())
+
+    for check in checks:
+        log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
+        check.ensure_compliance()
+
+    log("OS hardening checks complete.", level=DEBUG)
diff --git a/charmhelpers/contrib/hardening/host/checks/apt.py b/charmhelpers/contrib/hardening/host/checks/apt.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ce41b0043134e256d9c20ee729f1c4345faa3f9
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/checks/apt.py
@@ -0,0 +1,37 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.contrib.hardening.utils import get_settings
+from charmhelpers.contrib.hardening.audits.apt import (
+    AptConfig,
+    RestrictedPackages,
+)
+
+
+def get_audits():
+    """Get OS hardening apt audits.
+
+    :returns:  dictionary of audits
+    """
+    audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated',
+                          'expected': 'false'}])]
+
+    settings = get_settings('os')
+    clean_packages = settings['security']['packages_clean']
+    if clean_packages:
+        security_packages = settings['security']['packages_list']
+        if security_packages:
+            audits.append(RestrictedPackages(security_packages))
+
+    return audits
diff --git a/charmhelpers/contrib/hardening/host/checks/limits.py b/charmhelpers/contrib/hardening/host/checks/limits.py
new file mode 100644
index 0000000000000000000000000000000000000000..e94f5ebef360c7c80c35eba8243d3e7f7dcbb14d
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/checks/limits.py
@@ -0,0 +1,53 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.contrib.hardening.audits.file import (
+    DirectoryPermissionAudit,
+    TemplatedFile,
+)
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+    """Get OS hardening security limits audits.
+
+    :returns:  dictionary of audits
+    """
+    audits = []
+    settings = utils.get_settings('os')
+
+    # Ensure that the /etc/security/limits.d directory is only writable
+    # by the root user, but others can execute and read.
+    audits.append(DirectoryPermissionAudit('/etc/security/limits.d',
+                                           user='root', group='root',
+                                           mode=0o755))
+
+    # If core dumps are not enabled, then don't allow core dumps to be
+    # created as they may contain sensitive information.
+    if not settings['security']['kernel_enable_core_dump']:
+        audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf',
+                                    SecurityLimitsContext(),
+                                    template_dir=TEMPLATES_DIR,
+                                    user='root', group='root', mode=0o0440))
+    return audits
+
+
+class SecurityLimitsContext(object):
+
+    def __call__(self):
+        settings = utils.get_settings('os')
+        ctxt = {'disable_core_dump':
+                not settings['security']['kernel_enable_core_dump']}
+        return ctxt
diff --git a/charmhelpers/contrib/hardening/host/checks/login.py b/charmhelpers/contrib/hardening/host/checks/login.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe2bc6ef34a0dae612c2617dc1d13390f651e419
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/checks/login.py
@@ -0,0 +1,65 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from six import string_types
+
+from charmhelpers.contrib.hardening.audits.file import TemplatedFile
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+    """Get OS hardening login.defs audits.
+
+    :returns:  dictionary of audits
+    """
+    audits = [TemplatedFile('/etc/login.defs', LoginContext(),
+                            template_dir=TEMPLATES_DIR,
+                            user='root', group='root', mode=0o0444)]
+    return audits
+
+
+class LoginContext(object):
+
+    def __call__(self):
+        settings = utils.get_settings('os')
+
+        # Octal numbers in yaml end up being turned into decimal,
+        # so check if the umask is entered as a string (e.g. '027')
+        # or as an octal umask as we know it (e.g. 002). If its not
+        # a string assume it to be octal and turn it into an octal
+        # string.
+        umask = settings['environment']['umask']
+        if not isinstance(umask, string_types):
+            umask = '%s' % oct(umask)
+
+        ctxt = {
+            'additional_user_paths':
+            settings['environment']['extra_user_paths'],
+            'umask': umask,
+            'pwd_max_age': settings['auth']['pw_max_age'],
+            'pwd_min_age': settings['auth']['pw_min_age'],
+            'uid_min': settings['auth']['uid_min'],
+            'sys_uid_min': settings['auth']['sys_uid_min'],
+            'sys_uid_max': settings['auth']['sys_uid_max'],
+            'gid_min': settings['auth']['gid_min'],
+            'sys_gid_min': settings['auth']['sys_gid_min'],
+            'sys_gid_max': settings['auth']['sys_gid_max'],
+            'login_retries': settings['auth']['retries'],
+            'login_timeout': settings['auth']['timeout'],
+            'chfn_restrict': settings['auth']['chfn_restrict'],
+            'allow_login_without_home': settings['auth']['allow_homeless']
+        }
+
+        return ctxt
diff --git a/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/charmhelpers/contrib/hardening/host/checks/minimize_access.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e64be003be0b89d1416b22c35c43b6024979361
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/checks/minimize_access.py
@@ -0,0 +1,50 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.contrib.hardening.audits.file import (
+    FilePermissionAudit,
+    ReadOnly,
+)
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+    """Get OS hardening access audits.
+
+    :returns:  dictionary of audits
+    """
+    audits = []
+    settings = utils.get_settings('os')
+
+    # Remove write permissions from $PATH folders for all regular users.
+    # This prevents changing system-wide commands from normal users.
+    path_folders = {'/usr/local/sbin',
+                    '/usr/local/bin',
+                    '/usr/sbin',
+                    '/usr/bin',
+                    '/bin'}
+    extra_user_paths = settings['environment']['extra_user_paths']
+    path_folders.update(extra_user_paths)
+    audits.append(ReadOnly(path_folders))
+
+    # Only allow the root user to have access to the shadow file.
+    audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600))
+
+    if 'change_user' not in settings['security']['users_allow']:
+        # su should only be accessible to user and group root, unless it is
+        # expressly defined to allow users to change to root via the
+        # security_users_allow config option.
+        audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750))
+
+    return audits
diff --git a/charmhelpers/contrib/hardening/host/checks/pam.py b/charmhelpers/contrib/hardening/host/checks/pam.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b38d5f0cf0b16282968825b79b44d80a1a7f577
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/checks/pam.py
@@ -0,0 +1,132 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from subprocess import (
+    check_output,
+    CalledProcessError,
+)
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+    ERROR,
+)
+from charmhelpers.fetch import (
+    apt_install,
+    apt_purge,
+    apt_update,
+)
+from charmhelpers.contrib.hardening.audits.file import (
+    TemplatedFile,
+    DeletedFile,
+)
+from charmhelpers.contrib.hardening import utils
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+
+
+def get_audits():
+    """Get OS hardening PAM authentication audits.
+
+    :returns:  dictionary of audits
+    """
+    audits = []
+
+    settings = utils.get_settings('os')
+
+    if settings['auth']['pam_passwdqc_enable']:
+        audits.append(PasswdqcPAM('/etc/passwdqc.conf'))
+
+    if settings['auth']['retries']:
+        audits.append(Tally2PAM('/usr/share/pam-configs/tally2'))
+    else:
+        audits.append(DeletedFile('/usr/share/pam-configs/tally2'))
+
+    return audits
+
+
+class PasswdqcPAMContext(object):
+
+    def __call__(self):
+        ctxt = {}
+        settings = utils.get_settings('os')
+
+        ctxt['auth_pam_passwdqc_options'] = \
+            settings['auth']['pam_passwdqc_options']
+
+        return ctxt
+
+
+class PasswdqcPAM(TemplatedFile):
+    """The PAM Audit verifies the linux PAM settings."""
+    def __init__(self, path):
+        super(PasswdqcPAM, self).__init__(path=path,
+                                          template_dir=TEMPLATES_DIR,
+                                          context=PasswdqcPAMContext(),
+                                          user='root',
+                                          group='root',
+                                          mode=0o0640)
+
+    def pre_write(self):
+        # Always remove?
+        for pkg in ['libpam-ccreds', 'libpam-cracklib']:
+            log("Purging package '%s'" % pkg, level=DEBUG),
+            apt_purge(pkg)
+
+        apt_update(fatal=True)
+        for pkg in ['libpam-passwdqc']:
+            log("Installing package '%s'" % pkg, level=DEBUG),
+            apt_install(pkg)
+
+    def post_write(self):
+        """Updates the PAM configuration after the file has been written"""
+        try:
+            check_output(['pam-auth-update', '--package'])
+        except CalledProcessError as e:
+            log('Error calling pam-auth-update: %s' % e, level=ERROR)
+
+
+class Tally2PAMContext(object):
+
+    def __call__(self):
+        ctxt = {}
+        settings = utils.get_settings('os')
+
+        ctxt['auth_lockout_time'] = settings['auth']['lockout_time']
+        ctxt['auth_retries'] = settings['auth']['retries']
+
+        return ctxt
+
+
+class Tally2PAM(TemplatedFile):
+    """The PAM Audit verifies the linux PAM settings."""
+    def __init__(self, path):
+        super(Tally2PAM, self).__init__(path=path,
+                                        template_dir=TEMPLATES_DIR,
+                                        context=Tally2PAMContext(),
+                                        user='root',
+                                        group='root',
+                                        mode=0o0640)
+
+    def pre_write(self):
+        # Always remove?
+        apt_purge('libpam-ccreds')
+        apt_update(fatal=True)
+        apt_install('libpam-modules')
+
+    def post_write(self):
+        """Updates the PAM configuration after the file has been written"""
+        try:
+            check_output(['pam-auth-update', '--package'])
+        except CalledProcessError as e:
+            log('Error calling pam-auth-update: %s' % e, level=ERROR)
diff --git a/charmhelpers/contrib/hardening/host/checks/profile.py b/charmhelpers/contrib/hardening/host/checks/profile.py
new file mode 100644
index 0000000000000000000000000000000000000000..2727428da9241ccf88a60843d05dffb26cebac96
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/checks/profile.py
@@ -0,0 +1,49 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.contrib.hardening.audits.file import TemplatedFile
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+    """Get OS hardening profile audits.
+
+    :returns:  dictionary of audits
+    """
+    audits = []
+
+    settings = utils.get_settings('os')
+    # If core dumps are not enabled, then don't allow core dumps to be
+    # created as they may contain sensitive information.
+    if not settings['security']['kernel_enable_core_dump']:
+        audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh',
+                                    ProfileContext(),
+                                    template_dir=TEMPLATES_DIR,
+                                    mode=0o0755, user='root', group='root'))
+    if settings['security']['ssh_tmout']:
+        audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh',
+                                    ProfileContext(),
+                                    template_dir=TEMPLATES_DIR,
+                                    mode=0o0644, user='root', group='root'))
+    return audits
+
+
+class ProfileContext(object):
+
+    def __call__(self):
+        settings = utils.get_settings('os')
+        ctxt = {'ssh_tmout':
+                settings['security']['ssh_tmout']}
+        return ctxt
diff --git a/charmhelpers/contrib/hardening/host/checks/securetty.py b/charmhelpers/contrib/hardening/host/checks/securetty.py
new file mode 100644
index 0000000000000000000000000000000000000000..34cd02178c1fbf1c7d467af0814ad9fd4199dc3d
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/checks/securetty.py
@@ -0,0 +1,37 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.contrib.hardening.audits.file import TemplatedFile
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+    """Get OS hardening Secure TTY audits.
+
+    :returns:  dictionary of audits
+    """
+    audits = []
+    audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(),
+                                template_dir=TEMPLATES_DIR,
+                                mode=0o0400, user='root', group='root'))
+    return audits
+
+
+class SecureTTYContext(object):
+
+    def __call__(self):
+        settings = utils.get_settings('os')
+        ctxt = {'ttys': settings['auth']['root_ttys']}
+        return ctxt
diff --git a/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/charmhelpers/contrib/hardening/host/checks/suid_sgid.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcbe3fde07ea0716e2de6d1d4e103fcb19166c14
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/checks/suid_sgid.py
@@ -0,0 +1,129 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess
+
+from charmhelpers.core.hookenv import (
+    log,
+    INFO,
+)
+from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit
+from charmhelpers.contrib.hardening import utils
+
+
+BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh',
+             '/usr/libexec/openssh/ssh-keysign',
+             '/usr/lib/openssh/ssh-keysign',
+             '/sbin/netreport',
+             '/usr/sbin/usernetctl',
+             '/usr/sbin/userisdnctl',
+             '/usr/sbin/pppd',
+             '/usr/bin/lockfile',
+             '/usr/bin/mail-lock',
+             '/usr/bin/mail-unlock',
+             '/usr/bin/mail-touchlock',
+             '/usr/bin/dotlockfile',
+             '/usr/bin/arping',
+             '/usr/sbin/uuidd',
+             '/usr/bin/mtr',
+             '/usr/lib/evolution/camel-lock-helper-1.2',
+             '/usr/lib/pt_chown',
+             '/usr/lib/eject/dmcrypt-get-device',
+             '/usr/lib/mc/cons.saver']
+
+WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount',
+             '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at',
+             '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp',
+             '/usr/bin/passwd', '/usr/bin/ssh-agent',
+             '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev',
+             '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry',
+             '/bin/ping6', '/usr/bin/traceroute6.iputils',
+             '/sbin/mount.nfs', '/sbin/umount.nfs',
+             '/sbin/mount.nfs4', '/sbin/umount.nfs4',
+             '/usr/bin/crontab',
+             '/usr/bin/wall', '/usr/bin/write',
+             '/usr/bin/screen',
+             '/usr/bin/mlocate',
+             '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh',
+             '/bin/fusermount',
+             '/usr/bin/pkexec',
+             '/usr/bin/sudo', '/usr/bin/sudoedit',
+             '/usr/sbin/postdrop', '/usr/sbin/postqueue',
+             '/usr/sbin/suexec',
+             '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth',
+             '/usr/kerberos/bin/ksu',
+             '/usr/sbin/ccreds_validate',
+             '/usr/bin/Xorg',
+             '/usr/bin/X',
+             '/usr/lib/dbus-1.0/dbus-daemon-launch-helper',
+             '/usr/lib/vte/gnome-pty-helper',
+             '/usr/lib/libvte9/gnome-pty-helper',
+             '/usr/lib/libvte-2.90-9/gnome-pty-helper']
+
+
+def get_audits():
+    """Get OS hardening suid/sgid audits.
+
+    :returns:  dictionary of audits
+    """
+    checks = []
+    settings = utils.get_settings('os')
+    if not settings['security']['suid_sgid_enforce']:
+        log("Skipping suid/sgid hardening", level=INFO)
+        return checks
+
+    # Build the blacklist and whitelist of files for suid/sgid checks.
+    # There are a total of 4 lists:
+    #   1. the system blacklist
+    #   2. the system whitelist
+    #   3. the user blacklist
+    #   4. the user whitelist
+    #
+    # The blacklist is the set of paths which should NOT have the suid/sgid bit
+    # set and the whitelist is the set of paths which MAY have the suid/sgid
+    # bit setl. The user whitelist/blacklist effectively override the system
+    # whitelist/blacklist.
+    u_b = settings['security']['suid_sgid_blacklist']
+    u_w = settings['security']['suid_sgid_whitelist']
+
+    blacklist = set(BLACKLIST) - set(u_w + u_b)
+    whitelist = set(WHITELIST) - set(u_b + u_w)
+
+    checks.append(NoSUIDSGIDAudit(blacklist))
+
+    dry_run = settings['security']['suid_sgid_dry_run_on_unknown']
+
+    if settings['security']['suid_sgid_remove_from_unknown'] or dry_run:
+        # If the policy is a dry_run (e.g. complain only) or remove unknown
+        # suid/sgid bits then find all of the paths which have the suid/sgid
+        # bit set and then remove the whitelisted paths.
+        root_path = settings['environment']['root_path']
+        unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist)
+        checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run))
+
+    return checks
+
+
+def find_paths_with_suid_sgid(root_path):
+    """Finds all paths/files which have an suid/sgid bit enabled.
+
+    Starting with the root_path, this will recursively find all paths which
+    have an suid or sgid bit set.
+    """
+    cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000',
+           '-type', 'f', '!', '-path', '/proc/*', '-print']
+
+    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    out, _ = p.communicate()
+    return set(out.split('\n'))
diff --git a/charmhelpers/contrib/hardening/host/checks/sysctl.py b/charmhelpers/contrib/hardening/host/checks/sysctl.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1ea5813036b11893e8b9a986bf30a2f7a541b5d
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/checks/sysctl.py
@@ -0,0 +1,209 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import platform
+import re
+import six
+import subprocess
+
+from charmhelpers.core.hookenv import (
+    log,
+    INFO,
+    WARNING,
+)
+from charmhelpers.contrib.hardening import utils
+from charmhelpers.contrib.hardening.audits.file import (
+    FilePermissionAudit,
+    TemplatedFile,
+)
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+
+
+SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s
+net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s
+net.ipv4.conf.all.rp_filter=1
+net.ipv4.conf.default.rp_filter=1
+net.ipv4.icmp_echo_ignore_broadcasts=1
+net.ipv4.icmp_ignore_bogus_error_responses=1
+net.ipv4.icmp_ratelimit=100
+net.ipv4.icmp_ratemask=88089
+net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s
+net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s
+net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s
+net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s
+net.ipv4.tcp_rfc1337=1
+net.ipv4.tcp_syncookies=1
+net.ipv4.conf.all.shared_media=1
+net.ipv4.conf.default.shared_media=1
+net.ipv4.conf.all.accept_source_route=0
+net.ipv4.conf.default.accept_source_route=0
+net.ipv4.conf.all.accept_redirects=0
+net.ipv4.conf.default.accept_redirects=0
+net.ipv6.conf.all.accept_redirects=0
+net.ipv6.conf.default.accept_redirects=0
+net.ipv4.conf.all.secure_redirects=0
+net.ipv4.conf.default.secure_redirects=0
+net.ipv4.conf.all.send_redirects=0
+net.ipv4.conf.default.send_redirects=0
+net.ipv4.conf.all.log_martians=0
+net.ipv6.conf.default.router_solicitations=0
+net.ipv6.conf.default.accept_ra_rtr_pref=0
+net.ipv6.conf.default.accept_ra_pinfo=0
+net.ipv6.conf.default.accept_ra_defrtr=0
+net.ipv6.conf.default.autoconf=0
+net.ipv6.conf.default.dad_transmits=0
+net.ipv6.conf.default.max_addresses=1
+net.ipv6.conf.all.accept_ra=0
+net.ipv6.conf.default.accept_ra=0
+kernel.modules_disabled=%(kernel_modules_disabled)s
+kernel.sysrq=%(kernel_sysrq)s
+fs.suid_dumpable=%(fs_suid_dumpable)s
+kernel.randomize_va_space=2
+"""
+
+
+def get_audits():
+    """Get OS hardening sysctl audits.
+
+    :returns:  dictionary of audits
+    """
+    audits = []
+    settings = utils.get_settings('os')
+
+    # Apply the sysctl settings which are configured to be applied.
+    audits.append(SysctlConf())
+    # Make sure that only root has access to the sysctl.conf file, and
+    # that it is read-only.
+    audits.append(FilePermissionAudit('/etc/sysctl.conf',
+                                      user='root',
+                                      group='root', mode=0o0440))
+    # If module loading is not enabled, then ensure that the modules
+    # file has the appropriate permissions and rebuild the initramfs
+    if not settings['security']['kernel_enable_module_loading']:
+        audits.append(ModulesTemplate())
+
+    return audits
+
+
+class ModulesContext(object):
+
+    def __call__(self):
+        settings = utils.get_settings('os')
+        with open('/proc/cpuinfo', 'r') as fd:
+            cpuinfo = fd.readlines()
+
+        for line in cpuinfo:
+            match = re.search(r"^vendor_id\s+:\s+(.+)", line)
+            if match:
+                vendor = match.group(1)
+
+        if vendor == "GenuineIntel":
+            vendor = "intel"
+        elif vendor == "AuthenticAMD":
+            vendor = "amd"
+
+        ctxt = {'arch': platform.processor(),
+                'cpuVendor': vendor,
+                'desktop_enable': settings['general']['desktop_enable']}
+
+        return ctxt
+
+
+class ModulesTemplate(object):
+
+    def __init__(self):
+        super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules',
+                                              ModulesContext(),
+                                              templates_dir=TEMPLATES_DIR,
+                                              user='root', group='root',
+                                              mode=0o0440)
+
+    def post_write(self):
+        subprocess.check_call(['update-initramfs', '-u'])
+
+
+class SysCtlHardeningContext(object):
+    def __call__(self):
+        settings = utils.get_settings('os')
+        ctxt = {'sysctl': {}}
+
+        log("Applying sysctl settings", level=INFO)
+        extras = {'net_ipv4_ip_forward': 0,
+                  'net_ipv6_conf_all_forwarding': 0,
+                  'net_ipv6_conf_all_disable_ipv6': 1,
+                  'net_ipv4_tcp_timestamps': 0,
+                  'net_ipv4_conf_all_arp_ignore': 0,
+                  'net_ipv4_conf_all_arp_announce': 0,
+                  'kernel_sysrq': 0,
+                  'fs_suid_dumpable': 0,
+                  'kernel_modules_disabled': 1}
+
+        if settings['sysctl']['ipv6_enable']:
+            extras['net_ipv6_conf_all_disable_ipv6'] = 0
+
+        if settings['sysctl']['forwarding']:
+            extras['net_ipv4_ip_forward'] = 1
+            extras['net_ipv6_conf_all_forwarding'] = 1
+
+        if settings['sysctl']['arp_restricted']:
+            extras['net_ipv4_conf_all_arp_ignore'] = 1
+            extras['net_ipv4_conf_all_arp_announce'] = 2
+
+        if settings['security']['kernel_enable_module_loading']:
+            extras['kernel_modules_disabled'] = 0
+
+        if settings['sysctl']['kernel_enable_sysrq']:
+            sysrq_val = settings['sysctl']['kernel_secure_sysrq']
+            extras['kernel_sysrq'] = sysrq_val
+
+        if settings['security']['kernel_enable_core_dump']:
+            extras['fs_suid_dumpable'] = 1
+
+        settings.update(extras)
+        for d in (SYSCTL_DEFAULTS % settings).split():
+            d = d.strip().partition('=')
+            key = d[0].strip()
+            path = os.path.join('/proc/sys', key.replace('.', '/'))
+            if not os.path.exists(path):
+                log("Skipping '%s' since '%s' does not exist" % (key, path),
+                    level=WARNING)
+                continue
+
+            ctxt['sysctl'][key] = d[2] or None
+
+        # Translate for python3
+        return {'sysctl_settings':
+                [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]}
+
+
+class SysctlConf(TemplatedFile):
+    """An audit check for sysctl settings."""
+    def __init__(self):
+        self.conffile = '/etc/sysctl.d/99-juju-hardening.conf'
+        super(SysctlConf, self).__init__(self.conffile,
+                                         SysCtlHardeningContext(),
+                                         template_dir=TEMPLATES_DIR,
+                                         user='root', group='root',
+                                         mode=0o0440)
+
+    def post_write(self):
+        try:
+            subprocess.check_call(['sysctl', '-p', self.conffile])
+        except subprocess.CalledProcessError as e:
+            # NOTE: on some systems if sysctl cannot apply all settings it
+            #       will return non-zero as well.
+            log("sysctl command returned an error (maybe some "
+                "keys could not be set) - %s" % (e),
+                level=WARNING)
diff --git a/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf
new file mode 100644
index 0000000000000000000000000000000000000000..0014191fc8152fd9147b3fb5446987e6e62f2d77
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf
@@ -0,0 +1,8 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+{% if disable_core_dump -%}
+# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information.
+* hard core 0
+{% endif %}
\ No newline at end of file
diff --git a/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/charmhelpers/contrib/hardening/host/templates/99-hardening.sh
new file mode 100644
index 0000000000000000000000000000000000000000..616cef46f492f682aca28c71a6e20176870a36f2
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/templates/99-hardening.sh
@@ -0,0 +1,5 @@
+TMOUT={{ tmout }}
+readonly TMOUT
+export TMOUT
+
+readonly HISTFILE
diff --git a/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf
new file mode 100644
index 0000000000000000000000000000000000000000..101f1e1d709c268890553957f30c93259681ce59
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf
@@ -0,0 +1,7 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+{% for key, value in sysctl_settings -%}
+{{ key }}={{ value }}
+{% endfor -%}
diff --git a/charmhelpers/contrib/hardening/host/templates/__init__.py b/charmhelpers/contrib/hardening/host/templates/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/charmhelpers/contrib/hardening/host/templates/login.defs b/charmhelpers/contrib/hardening/host/templates/login.defs
new file mode 100644
index 0000000000000000000000000000000000000000..db137d6dbb7a3a850294407199225392a880cfc2
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/templates/login.defs
@@ -0,0 +1,349 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+#
+# /etc/login.defs - Configuration control definitions for the login package.
+#
+# Three items must be defined:  MAIL_DIR, ENV_SUPATH, and ENV_PATH.
+# If unspecified, some arbitrary (and possibly incorrect) value will
+# be assumed.  All other items are optional - if not specified then
+# the described action or option will be inhibited.
+#
+# Comment lines (lines beginning with "#") and blank lines are ignored.
+#
+# Modified for Linux.  --marekm
+
+# REQUIRED for useradd/userdel/usermod
+#   Directory where mailboxes reside, _or_ name of file, relative to the
+#   home directory.  If you _do_ define MAIL_DIR and MAIL_FILE,
+#   MAIL_DIR takes precedence.
+#
+#   Essentially:
+#      - MAIL_DIR defines the location of users mail spool files
+#        (for mbox use) by appending the username to MAIL_DIR as defined
+#        below.
+#      - MAIL_FILE defines the location of the users mail spool files as the
+#        fully-qualified filename obtained by prepending the user home
+#        directory before $MAIL_FILE
+#
+# NOTE: This is no more used for setting up users MAIL environment variable
+#       which is, starting from shadow 4.0.12-1 in Debian, entirely the
+#       job of the pam_mail PAM modules
+#       See default PAM configuration files provided for
+#       login, su, etc.
+#
+# This is a temporary situation: setting these variables will soon
+# move to /etc/default/useradd and the variables will then be
+# no more supported
+MAIL_DIR        /var/mail
+#MAIL_FILE      .mail
+
+#
+# Enable logging and display of /var/log/faillog login failure info.
+# This option conflicts with the pam_tally PAM module.
+#
+FAILLOG_ENAB		yes
+
+#
+# Enable display of unknown usernames when login failures are recorded.
+#
+# WARNING: Unknown usernames may become world readable. 
+# See #290803 and #298773 for details about how this could become a security
+# concern
+LOG_UNKFAIL_ENAB	no
+
+#
+# Enable logging of successful logins
+#
+LOG_OK_LOGINS		yes
+
+#
+# Enable "syslog" logging of su activity - in addition to sulog file logging.
+# SYSLOG_SG_ENAB does the same for newgrp and sg.
+#
+SYSLOG_SU_ENAB		yes
+SYSLOG_SG_ENAB		yes
+
+#
+# If defined, all su activity is logged to this file.
+#
+#SULOG_FILE	/var/log/sulog
+
+#
+# If defined, file which maps tty line to TERM environment parameter.
+# Each line of the file is in a format something like "vt100  tty01".
+#
+#TTYTYPE_FILE	/etc/ttytype
+
+#
+# If defined, login failures will be logged here in a utmp format
+# last, when invoked as lastb, will read /var/log/btmp, so...
+#
+FTMP_FILE	/var/log/btmp
+
+#
+# If defined, the command name to display when running "su -".  For
+# example, if this is defined as "su" then a "ps" will display the
+# command is "-su".  If not defined, then "ps" would display the
+# name of the shell actually being run, e.g. something like "-sh".
+#
+SU_NAME		su
+
+#
+# If defined, file which inhibits all the usual chatter during the login
+# sequence.  If a full pathname, then hushed mode will be enabled if the
+# user's name or shell are found in the file.  If not a full pathname, then
+# hushed mode will be enabled if the file exists in the user's home directory.
+#
+HUSHLOGIN_FILE	.hushlogin
+#HUSHLOGIN_FILE	/etc/hushlogins
+
+#
+# *REQUIRED*  The default PATH settings, for superuser and normal users.
+#
+# (they are minimal, add the rest in the shell startup files)
+ENV_SUPATH	PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ENV_PATH	PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %}
+
+#
+# Terminal permissions
+#
+#	TTYGROUP	Login tty will be assigned this group ownership.
+#	TTYPERM		Login tty will be set to this permission.
+#
+# If you have a "write" program which is "setgid" to a special group
+# which owns the terminals, define TTYGROUP to the group number and
+# TTYPERM to 0620.  Otherwise leave TTYGROUP commented out and assign
+# TTYPERM to either 622 or 600.
+#
+# In Debian /usr/bin/bsd-write or similar programs are setgid tty
+# However, the default and recommended value for TTYPERM is still 0600
+# to not allow anyone to write to anyone else console or terminal
+
+# Users can still allow other people to write them by issuing 
+# the "mesg y" command.
+
+TTYGROUP	tty
+TTYPERM		0600
+
+#
+# Login configuration initializations:
+#
+#	ERASECHAR	Terminal ERASE character ('\010' = backspace).
+#	KILLCHAR	Terminal KILL character ('\025' = CTRL/U).
+#	UMASK		Default "umask" value.
+#
+# The ERASECHAR and KILLCHAR are used only on System V machines.
+# 
+# UMASK is the default umask value for pam_umask and is used by
+# useradd and newusers to set the mode of the new home directories.
+# 022 is the "historical" value in Debian for UMASK
+# 027, or even 077, could be considered better for privacy
+# There is no One True Answer here : each sysadmin must make up his/her
+# mind.
+#
+# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value
+# for private user groups, i. e. the uid is the same as gid, and username is
+# the same as the primary group name: for these, the user permissions will be
+# used as group permissions, e. g. 022 will become 002.
+#
+# Prefix these values with "0" to get octal, "0x" to get hexadecimal.
+#
+ERASECHAR	0177
+KILLCHAR	025
+UMASK		{{ umask }}
+
+# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name.
+# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user.
+USERGROUPS_ENAB yes
+
+#
+# Password aging controls:
+#
+#	PASS_MAX_DAYS	Maximum number of days a password may be used.
+#	PASS_MIN_DAYS	Minimum number of days allowed between password changes.
+#	PASS_WARN_AGE	Number of days warning given before a password expires.
+#
+PASS_MAX_DAYS	{{ pwd_max_age }}
+PASS_MIN_DAYS	{{ pwd_min_age }}
+PASS_WARN_AGE	7
+
+#
+# Min/max values for automatic uid selection in useradd
+#
+UID_MIN			 {{ uid_min }}
+UID_MAX			60000
+# System accounts
+SYS_UID_MIN		  {{ sys_uid_min }}
+SYS_UID_MAX		  {{ sys_uid_max }}
+
+# Min/max values for automatic gid selection in groupadd
+GID_MIN			 {{ gid_min }}
+GID_MAX			60000
+# System accounts
+SYS_GID_MIN		  {{ sys_gid_min }}
+SYS_GID_MAX		  {{ sys_gid_max }}
+
+#
+# Max number of login retries if password is bad. This will most likely be
+# overriden by PAM, since the default pam_unix module has it's own built
+# in of 3 retries. However, this is a safe fallback in case you are using
+# an authentication module that does not enforce PAM_MAXTRIES.
+#
+LOGIN_RETRIES		{{ login_retries }}
+
+#
+# Max time in seconds for login
+#
+LOGIN_TIMEOUT		{{ login_timeout }}
+
+#
+# Which fields may be changed by regular users using chfn - use
+# any combination of letters "frwh" (full name, room number, work
+# phone, home phone).  If not defined, no changes are allowed.
+# For backward compatibility, "yes" = "rwh" and "no" = "frwh".
+# 
+{% if chfn_restrict %}
+CHFN_RESTRICT		{{ chfn_restrict }}
+{% endif %}
+
+#
+# Should login be allowed if we can't cd to the home directory?
+# Default in no.
+#
+DEFAULT_HOME	{% if allow_login_without_home %} yes {% else %} no {% endif %}
+
+#
+# If defined, this command is run when removing a user.
+# It should remove any at/cron/print jobs etc. owned by
+# the user to be removed (passed as the first argument).
+#
+#USERDEL_CMD	/usr/sbin/userdel_local
+
+#
+# Enable setting of the umask group bits to be the same as owner bits
+# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is
+# the same as gid, and username is the same as the primary group name.
+#
+# If set to yes, userdel will remove the user´s group if it contains no
+# more members, and useradd will create by default a group with the name
+# of the user.
+#
+USERGROUPS_ENAB yes
+
+#
+# Instead of the real user shell, the program specified by this parameter
+# will be launched, although its visible name (argv[0]) will be the shell's.
+# The program may do whatever it wants (logging, additional authentification,
+# banner, ...) before running the actual shell.
+#
+# FAKE_SHELL /bin/fakeshell
+
+#
+# If defined, either full pathname of a file containing device names or
+# a ":" delimited list of device names.  Root logins will be allowed only
+# upon these devices.
+#
+# This variable is used by login and su.
+#
+#CONSOLE	/etc/consoles
+#CONSOLE	console:tty01:tty02:tty03:tty04
+
+#
+# List of groups to add to the user's supplementary group set
+# when logging in on the console (as determined by the CONSOLE
+# setting).  Default is none.
+#
+# Use with caution - it is possible for users to gain permanent
+# access to these groups, even when not logged in on the console.
+# How to do it is left as an exercise for the reader...
+#
+# This variable is used by login and su.
+#
+#CONSOLE_GROUPS		floppy:audio:cdrom
+
+#
+# If set to "yes", new passwords will be encrypted using the MD5-based
+# algorithm compatible with the one used by recent releases of FreeBSD.
+# It supports passwords of unlimited length and longer salt strings.
+# Set to "no" if you need to copy encrypted passwords to other systems
+# which don't understand the new algorithm.  Default is "no".
+#
+# This variable is deprecated. You should use ENCRYPT_METHOD.
+#
+MD5_CRYPT_ENAB	no
+
+#
+# If set to MD5 , MD5-based algorithm will be used for encrypting password
+# If set to SHA256, SHA256-based algorithm will be used for encrypting password
+# If set to SHA512, SHA512-based algorithm will be used for encrypting password
+# If set to DES, DES-based algorithm will be used for encrypting password (default)
+# Overrides the MD5_CRYPT_ENAB option
+#
+# Note: It is recommended to use a value consistent with
+# the PAM modules configuration.
+#
+ENCRYPT_METHOD SHA512
+
+#
+# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512.
+#
+# Define the number of SHA rounds.
+# With a lot of rounds, it is more difficult to brute forcing the password.
+# But note also that it more CPU resources will be needed to authenticate
+# users.
+#
+# If not specified, the libc will choose the default number of rounds (5000).
+# The values must be inside the 1000-999999999 range.
+# If only one of the MIN or MAX values is set, then this value will be used.
+# If MIN > MAX, the highest value will be used.
+#
+# SHA_CRYPT_MIN_ROUNDS 5000
+# SHA_CRYPT_MAX_ROUNDS 5000
+
+################# OBSOLETED BY PAM ##############
+#						#
+# These options are now handled by PAM. Please	#
+# edit the appropriate file in /etc/pam.d/ to	#
+# enable the equivelants of them.
+#
+###############
+
+#MOTD_FILE
+#DIALUPS_CHECK_ENAB
+#LASTLOG_ENAB
+#MAIL_CHECK_ENAB
+#OBSCURE_CHECKS_ENAB
+#PORTTIME_CHECKS_ENAB
+#SU_WHEEL_ONLY
+#CRACKLIB_DICTPATH
+#PASS_CHANGE_TRIES
+#PASS_ALWAYS_WARN
+#ENVIRON_FILE
+#NOLOGINS_FILE
+#ISSUE_FILE
+#PASS_MIN_LEN
+#PASS_MAX_LEN
+#ULIMIT
+#ENV_HZ
+#CHFN_AUTH
+#CHSH_AUTH
+#FAIL_DELAY
+
+################# OBSOLETED #######################
+#						  #
+# These options are no more handled by shadow.    #
+#                                                 #
+# Shadow utilities will display a warning if they #
+# still appear.                                   #
+#                                                 #
+###################################################
+
+# CLOSE_SESSIONS
+# LOGIN_STRING
+# NO_PASSWORD_CONSOLE
+# QMAIL_DIR
+
+
+
diff --git a/charmhelpers/contrib/hardening/host/templates/modules b/charmhelpers/contrib/hardening/host/templates/modules
new file mode 100644
index 0000000000000000000000000000000000000000..ef0354ee35fa363b303bb22c6ed0d2d1196aed52
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/templates/modules
@@ -0,0 +1,117 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+# /etc/modules: kernel modules to load at boot time.
+#
+# This file contains the names of kernel modules that should be loaded
+# at boot time, one per line. Lines beginning with "#" are ignored.
+# Parameters can be specified after the module name.
+
+# Arch
+# ----
+# 
+# Modules for certains builds, contains support modules and some CPU-specific optimizations.
+
+{% if arch == "x86_64" -%}
+# Optimize for x86_64 cryptographic features
+twofish-x86_64-3way
+twofish-x86_64
+aes-x86_64
+salsa20-x86_64
+blowfish-x86_64
+{% endif -%}
+
+{% if cpuVendor == "intel" -%}
+# Intel-specific optimizations
+ghash-clmulni-intel
+aesni-intel
+kvm-intel
+{% endif -%}
+
+{% if cpuVendor == "amd" -%}
+# AMD-specific optimizations
+kvm-amd
+{% endif -%}
+
+kvm
+
+
+# Crypto
+# ------
+
+# Some core modules which comprise strong cryptography.
+blowfish_common
+blowfish_generic
+ctr
+cts
+lrw
+lzo
+rmd160
+rmd256
+rmd320
+serpent
+sha512_generic
+twofish_common
+twofish_generic
+xts
+zlib
+
+
+# Drivers
+# -------
+
+# Basics
+lp
+rtc
+loop
+
+# Filesystems
+ext2
+btrfs
+
+{% if desktop_enable -%}
+# Desktop
+psmouse
+snd
+snd_ac97_codec
+snd_intel8x0
+snd_page_alloc
+snd_pcm
+snd_timer
+soundcore
+usbhid
+{% endif -%}
+
+# Lib
+# ---
+xz
+
+
+# Net
+# ---
+
+# All packets needed for netfilter rules (ie iptables, ebtables).
+ip_tables
+x_tables
+iptable_filter
+iptable_nat
+
+# Targets
+ipt_LOG
+ipt_REJECT
+
+# Modules
+xt_connlimit
+xt_tcpudp
+xt_recent
+xt_limit
+xt_conntrack
+nf_conntrack
+nf_conntrack_ipv4
+nf_defrag_ipv4
+xt_state
+nf_nat
+
+# Addons
+xt_pknock
\ No newline at end of file
diff --git a/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/charmhelpers/contrib/hardening/host/templates/passwdqc.conf
new file mode 100644
index 0000000000000000000000000000000000000000..f98d14e57428c106692e0f57e8b381f2b0a12c44
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/templates/passwdqc.conf
@@ -0,0 +1,11 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+Name: passwdqc password strength enforcement
+Default: yes
+Priority: 1024
+Conflicts: cracklib
+Password-Type: Primary
+Password:
+  requisite     pam_passwdqc.so {{ auth_pam_passwdqc_options }}
diff --git a/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh
new file mode 100644
index 0000000000000000000000000000000000000000..fd2de791b96fbb8889811daf7340d1f2ca2ab3a6
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh
@@ -0,0 +1,8 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+# Disable core dumps via soft limits for all users. Compliance to this setting
+# is voluntary and can be modified by users up to a hard limit. This setting is
+# a sane default.
+ulimit -S -c 0 > /dev/null 2>&1
diff --git a/charmhelpers/contrib/hardening/host/templates/securetty b/charmhelpers/contrib/hardening/host/templates/securetty
new file mode 100644
index 0000000000000000000000000000000000000000..15b18d4e2f45747845d0b65c06997f154ef674a4
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/templates/securetty
@@ -0,0 +1,11 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+# A list of TTYs, from which root can log in
+# see `man securetty` for reference
+{% if ttys -%}
+{% for tty in ttys -%}
+{{ tty }}
+{% endfor -%}
+{% endif -%}
diff --git a/charmhelpers/contrib/hardening/host/templates/tally2 b/charmhelpers/contrib/hardening/host/templates/tally2
new file mode 100644
index 0000000000000000000000000000000000000000..d9620299c55e51abbee1017a227c217cd4a9fd33
--- /dev/null
+++ b/charmhelpers/contrib/hardening/host/templates/tally2
@@ -0,0 +1,14 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+Name: tally2 lockout after failed attempts enforcement
+Default: yes
+Priority: 1024
+Conflicts: cracklib
+Auth-Type: Primary
+Auth-Initial:
+  required      pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }}
+Account-Type: Primary
+Account-Initial:
+  required      pam_tally2.so
diff --git a/charmhelpers/contrib/hardening/mysql/__init__.py b/charmhelpers/contrib/hardening/mysql/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453
--- /dev/null
+++ b/charmhelpers/contrib/hardening/mysql/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from os import path
+
+TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/charmhelpers/contrib/hardening/mysql/checks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1990d8513bbeef067a8d9a2168e1952efb2961dc
--- /dev/null
+++ b/charmhelpers/contrib/hardening/mysql/checks/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+)
+from charmhelpers.contrib.hardening.mysql.checks import config
+
+
+def run_mysql_checks():
+    log("Starting MySQL hardening checks.", level=DEBUG)
+    checks = config.get_audits()
+    for check in checks:
+        log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
+        check.ensure_compliance()
+
+    log("MySQL hardening checks complete.", level=DEBUG)
diff --git a/charmhelpers/contrib/hardening/mysql/checks/config.py b/charmhelpers/contrib/hardening/mysql/checks/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..a79f33b74a5c2972a82f0b4d8de8d1073dc293ed
--- /dev/null
+++ b/charmhelpers/contrib/hardening/mysql/checks/config.py
@@ -0,0 +1,87 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import six
+import subprocess
+
+from charmhelpers.core.hookenv import (
+    log,
+    WARNING,
+)
+from charmhelpers.contrib.hardening.audits.file import (
+    FilePermissionAudit,
+    DirectoryPermissionAudit,
+    TemplatedFile,
+)
+from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+    """Get MySQL hardening config audits.
+
+    :returns:  dictionary of audits
+    """
+    if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0:
+        log("MySQL does not appear to be installed on this node - "
+            "skipping mysql hardening", level=WARNING)
+        return []
+
+    settings = utils.get_settings('mysql')
+    hardening_settings = settings['hardening']
+    my_cnf = hardening_settings['mysql-conf']
+
+    audits = [
+        FilePermissionAudit(paths=[my_cnf], user='root',
+                            group='root', mode=0o0600),
+
+        TemplatedFile(hardening_settings['hardening-conf'],
+                      MySQLConfContext(),
+                      TEMPLATES_DIR,
+                      mode=0o0750,
+                      user='mysql',
+                      group='root',
+                      service_actions=[{'service': 'mysql',
+                                        'actions': ['restart']}]),
+
+        # MySQL and Percona charms do not allow configuration of the
+        # data directory, so use the default.
+        DirectoryPermissionAudit('/var/lib/mysql',
+                                 user='mysql',
+                                 group='mysql',
+                                 recursive=False,
+                                 mode=0o755),
+
+        DirectoryPermissionAudit('/etc/mysql',
+                                 user='root',
+                                 group='root',
+                                 recursive=False,
+                                 mode=0o700),
+    ]
+
+    return audits
+
+
+class MySQLConfContext(object):
+    """Defines the set of key/value pairs to set in a mysql config file.
+
+    This context, when called, will return a dictionary containing the
+    key/value pairs of setting to specify in the
+    /etc/mysql/conf.d/hardening.cnf file.
+    """
+    def __call__(self):
+        settings = utils.get_settings('mysql')
+        # Translate for python3
+        return {'mysql_settings':
+                [(k, v) for k, v in six.iteritems(settings['security'])]}
diff --git a/charmhelpers/contrib/hardening/mysql/templates/__init__.py b/charmhelpers/contrib/hardening/mysql/templates/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf
new file mode 100644
index 0000000000000000000000000000000000000000..8242586cd66360b7e6ae33f13018363b95cd4ea9
--- /dev/null
+++ b/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf
@@ -0,0 +1,12 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+[mysqld]
+{% for setting, value in mysql_settings -%}
+{% if value == 'True' -%}
+{{ setting }}
+{% elif value != 'None' and value != None -%}
+{{ setting }} = {{ value }}
+{% endif -%}
+{% endfor -%}
diff --git a/charmhelpers/contrib/hardening/ssh/__init__.py b/charmhelpers/contrib/hardening/ssh/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..58bebd846bd6fa648cfab6ab1056ad10d8415453
--- /dev/null
+++ b/charmhelpers/contrib/hardening/ssh/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from os import path
+
+TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/charmhelpers/contrib/hardening/ssh/checks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..edaf484b39f8c7353cebb2f4b68944c6493ba7b3
--- /dev/null
+++ b/charmhelpers/contrib/hardening/ssh/checks/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+)
+from charmhelpers.contrib.hardening.ssh.checks import config
+
+
+def run_ssh_checks():
+    log("Starting SSH hardening checks.", level=DEBUG)
+    checks = config.get_audits()
+    for check in checks:
+        log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
+        check.ensure_compliance()
+
+    log("SSH hardening checks complete.", level=DEBUG)
diff --git a/charmhelpers/contrib/hardening/ssh/checks/config.py b/charmhelpers/contrib/hardening/ssh/checks/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..41bed2d1e7b031182edcf62710876e4073dfbc6e
--- /dev/null
+++ b/charmhelpers/contrib/hardening/ssh/checks/config.py
@@ -0,0 +1,435 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from charmhelpers.contrib.network.ip import (
+    get_address_in_network,
+    get_iface_addr,
+    is_ip,
+)
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+)
+from charmhelpers.fetch import (
+    apt_install,
+    apt_update,
+)
+from charmhelpers.core.host import (
+    lsb_release,
+    CompareHostReleases,
+)
+from charmhelpers.contrib.hardening.audits.file import (
+    TemplatedFile,
+    FileContentAudit,
+)
+from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+    """Get SSH hardening config audits.
+
+    :returns:  dictionary of audits
+    """
+    audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(),
+              SSHDConfigFileContentAudit()]
+    return audits
+
+
+class SSHConfigContext(object):
+
+    type = 'client'
+
+    def get_macs(self, allow_weak_mac):
+        if allow_weak_mac:
+            weak_macs = 'weak'
+        else:
+            weak_macs = 'default'
+
+        default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
+        macs = {'default': default,
+                'weak': default + ',hmac-sha1'}
+
+        default = ('hmac-sha2-512-etm@openssh.com,'
+                   'hmac-sha2-256-etm@openssh.com,'
+                   'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,'
+                   'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160')
+        macs_66 = {'default': default,
+                   'weak': default + ',hmac-sha1'}
+
+        # Use newer ciphers on Ubuntu Trusty and above
+        _release = lsb_release()['DISTRIB_CODENAME'].lower()
+        if CompareHostReleases(_release) >= 'trusty':
+            log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG)
+            macs = macs_66
+
+        return macs[weak_macs]
+
+    def get_kexs(self, allow_weak_kex):
+        if allow_weak_kex:
+            weak_kex = 'weak'
+        else:
+            weak_kex = 'default'
+
+        default = 'diffie-hellman-group-exchange-sha256'
+        weak = (default + ',diffie-hellman-group14-sha1,'
+                'diffie-hellman-group-exchange-sha1,'
+                'diffie-hellman-group1-sha1')
+        kex = {'default': default,
+               'weak': weak}
+
+        default = ('curve25519-sha256@libssh.org,'
+                   'diffie-hellman-group-exchange-sha256')
+        weak = (default + ',diffie-hellman-group14-sha1,'
+                'diffie-hellman-group-exchange-sha1,'
+                'diffie-hellman-group1-sha1')
+        kex_66 = {'default': default,
+                  'weak': weak}
+
+        # Use newer kex on Ubuntu Trusty and above
+        _release = lsb_release()['DISTRIB_CODENAME'].lower()
+        if CompareHostReleases(_release) >= 'trusty':
+            log('Detected Ubuntu 14.04 or newer, using new key exchange '
+                'algorithms', level=DEBUG)
+            kex = kex_66
+
+        return kex[weak_kex]
+
+    def get_ciphers(self, cbc_required):
+        if cbc_required:
+            weak_ciphers = 'weak'
+        else:
+            weak_ciphers = 'default'
+
+        default = 'aes256-ctr,aes192-ctr,aes128-ctr'
+        cipher = {'default': default,
+                  'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'}
+
+        default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,'
+                   'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr')
+        ciphers_66 = {'default': default,
+                      'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'}
+
+        # Use newer ciphers on ubuntu Trusty and above
+        _release = lsb_release()['DISTRIB_CODENAME'].lower()
+        if CompareHostReleases(_release) >= 'trusty':
+            log('Detected Ubuntu 14.04 or newer, using new ciphers',
+                level=DEBUG)
+            cipher = ciphers_66
+
+        return cipher[weak_ciphers]
+
+    def get_listening(self, listen=['0.0.0.0']):
+        """Returns a list of addresses SSH can list on
+
+        Turns input into a sensible list of IPs SSH can listen on. Input
+        must be a python list of interface names, IPs and/or CIDRs.
+
+        :param listen: list of IPs, CIDRs, interface names
+
+        :returns: list of IPs available on the host
+        """
+        if listen == ['0.0.0.0']:
+            return listen
+
+        value = []
+        for network in listen:
+            try:
+                ip = get_address_in_network(network=network, fatal=True)
+            except ValueError:
+                if is_ip(network):
+                    ip = network
+                else:
+                    try:
+                        ip = get_iface_addr(iface=network, fatal=False)[0]
+                    except IndexError:
+                        continue
+            value.append(ip)
+        if value == []:
+            return ['0.0.0.0']
+        return value
+
+    def __call__(self):
+        settings = utils.get_settings('ssh')
+        if settings['common']['network_ipv6_enable']:
+            addr_family = 'any'
+        else:
+            addr_family = 'inet'
+
+        ctxt = {
+            'addr_family': addr_family,
+            'remote_hosts': settings['common']['remote_hosts'],
+            'password_auth_allowed':
+            settings['client']['password_authentication'],
+            'ports': settings['common']['ports'],
+            'ciphers': self.get_ciphers(settings['client']['cbc_required']),
+            'macs': self.get_macs(settings['client']['weak_hmac']),
+            'kexs': self.get_kexs(settings['client']['weak_kex']),
+            'roaming': settings['client']['roaming'],
+        }
+        return ctxt
+
+
+class SSHConfig(TemplatedFile):
+    def __init__(self):
+        path = '/etc/ssh/ssh_config'
+        super(SSHConfig, self).__init__(path=path,
+                                        template_dir=TEMPLATES_DIR,
+                                        context=SSHConfigContext(),
+                                        user='root',
+                                        group='root',
+                                        mode=0o0644)
+
+    def pre_write(self):
+        settings = utils.get_settings('ssh')
+        apt_update(fatal=True)
+        apt_install(settings['client']['package'])
+        if not os.path.exists('/etc/ssh'):
+            os.makedir('/etc/ssh')
+            # NOTE: don't recurse
+            utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
+                                     maxdepth=0)
+
+    def post_write(self):
+        # NOTE: don't recurse
+        utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
+                                 maxdepth=0)
+
+
+class SSHDConfigContext(SSHConfigContext):
+
+    type = 'server'
+
+    def __call__(self):
+        settings = utils.get_settings('ssh')
+        if settings['common']['network_ipv6_enable']:
+            addr_family = 'any'
+        else:
+            addr_family = 'inet'
+
+        ctxt = {
+            'ssh_ip': self.get_listening(settings['server']['listen_to']),
+            'password_auth_allowed':
+            settings['server']['password_authentication'],
+            'ports': settings['common']['ports'],
+            'addr_family': addr_family,
+            'ciphers': self.get_ciphers(settings['server']['cbc_required']),
+            'macs': self.get_macs(settings['server']['weak_hmac']),
+            'kexs': self.get_kexs(settings['server']['weak_kex']),
+            'host_key_files': settings['server']['host_key_files'],
+            'allow_root_with_key': settings['server']['allow_root_with_key'],
+            'password_authentication':
+            settings['server']['password_authentication'],
+            'use_priv_sep': settings['server']['use_privilege_separation'],
+            'use_pam': settings['server']['use_pam'],
+            'allow_x11_forwarding': settings['server']['allow_x11_forwarding'],
+            'print_motd': settings['server']['print_motd'],
+            'print_last_log': settings['server']['print_last_log'],
+            'client_alive_interval':
+            settings['server']['alive_interval'],
+            'client_alive_count': settings['server']['alive_count'],
+            'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'],
+            'allow_agent_forwarding':
+            settings['server']['allow_agent_forwarding'],
+            'deny_users': settings['server']['deny_users'],
+            'allow_users': settings['server']['allow_users'],
+            'deny_groups': settings['server']['deny_groups'],
+            'allow_groups': settings['server']['allow_groups'],
+            'use_dns': settings['server']['use_dns'],
+            'sftp_enable': settings['server']['sftp_enable'],
+            'sftp_group': settings['server']['sftp_group'],
+            'sftp_chroot': settings['server']['sftp_chroot'],
+            'max_auth_tries': settings['server']['max_auth_tries'],
+            'max_sessions': settings['server']['max_sessions'],
+        }
+        return ctxt
+
+
+class SSHDConfig(TemplatedFile):
+    def __init__(self):
+        path = '/etc/ssh/sshd_config'
+        super(SSHDConfig, self).__init__(path=path,
+                                         template_dir=TEMPLATES_DIR,
+                                         context=SSHDConfigContext(),
+                                         user='root',
+                                         group='root',
+                                         mode=0o0600,
+                                         service_actions=[{'service': 'ssh',
+                                                           'actions':
+                                                           ['restart']}])
+
+    def pre_write(self):
+        settings = utils.get_settings('ssh')
+        apt_update(fatal=True)
+        apt_install(settings['server']['package'])
+        if not os.path.exists('/etc/ssh'):
+            os.makedir('/etc/ssh')
+            # NOTE: don't recurse
+            utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
+                                     maxdepth=0)
+
+    def post_write(self):
+        # NOTE: don't recurse
+        utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
+                                 maxdepth=0)
+
+
+class SSHConfigFileContentAudit(FileContentAudit):
+    def __init__(self):
+        self.path = '/etc/ssh/ssh_config'
+        super(SSHConfigFileContentAudit, self).__init__(self.path, {})
+
+    def is_compliant(self, *args, **kwargs):
+        self.pass_cases = []
+        self.fail_cases = []
+        settings = utils.get_settings('ssh')
+
+        _release = lsb_release()['DISTRIB_CODENAME'].lower()
+        if CompareHostReleases(_release) >= 'trusty':
+            if not settings['server']['weak_hmac']:
+                self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
+            else:
+                self.pass_cases.append(r'^MACs.+,hmac-sha1$')
+
+            if settings['server']['weak_kex']:
+                self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?')  # noqa
+            else:
+                self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$')  # noqa
+                self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?')  # noqa
+
+            if settings['server']['cbc_required']:
+                self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+            else:
+                self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+                self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+')  # noqa
+                self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
+                self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+                self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+        else:
+            if not settings['client']['weak_hmac']:
+                self.fail_cases.append(r'^MACs.+,hmac-sha1$')
+            else:
+                self.pass_cases.append(r'^MACs.+,hmac-sha1$')
+
+            if settings['client']['weak_kex']:
+                self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?')  # noqa
+            else:
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$')  # noqa
+                self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?')  # noqa
+                self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?')  # noqa
+                self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?')  # noqa
+
+            if settings['client']['cbc_required']:
+                self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+            else:
+                self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+                self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+                self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+                self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+
+        if settings['client']['roaming']:
+            self.pass_cases.append(r'^UseRoaming yes$')
+        else:
+            self.fail_cases.append(r'^UseRoaming yes$')
+
+        return super(SSHConfigFileContentAudit, self).is_compliant(*args,
+                                                                   **kwargs)
+
+
+class SSHDConfigFileContentAudit(FileContentAudit):
+    def __init__(self):
+        self.path = '/etc/ssh/sshd_config'
+        super(SSHDConfigFileContentAudit, self).__init__(self.path, {})
+
+    def is_compliant(self, *args, **kwargs):
+        self.pass_cases = []
+        self.fail_cases = []
+        settings = utils.get_settings('ssh')
+
+        _release = lsb_release()['DISTRIB_CODENAME'].lower()
+        if CompareHostReleases(_release) >= 'trusty':
+            if not settings['server']['weak_hmac']:
+                self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
+            else:
+                self.pass_cases.append(r'^MACs.+,hmac-sha1$')
+
+            if settings['server']['weak_kex']:
+                self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?')  # noqa
+            else:
+                self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$')  # noqa
+                self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?')  # noqa
+
+            if settings['server']['cbc_required']:
+                self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+            else:
+                self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+                self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+')  # noqa
+                self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
+                self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+                self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+        else:
+            if not settings['server']['weak_hmac']:
+                self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
+            else:
+                self.pass_cases.append(r'^MACs.+,hmac-sha1$')
+
+            if settings['server']['weak_kex']:
+                self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?')  # noqa
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?')  # noqa
+            else:
+                self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$')  # noqa
+                self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?')  # noqa
+                self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?')  # noqa
+                self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?')  # noqa
+
+            if settings['server']['cbc_required']:
+                self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+                self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+            else:
+                self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+                self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+                self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+                self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+
+        if settings['server']['sftp_enable']:
+            self.pass_cases.append(r'^Subsystem\ssftp')
+        else:
+            self.fail_cases.append(r'^Subsystem\ssftp')
+
+        return super(SSHDConfigFileContentAudit, self).is_compliant(*args,
+                                                                    **kwargs)
diff --git a/charmhelpers/contrib/hardening/ssh/templates/__init__.py b/charmhelpers/contrib/hardening/ssh/templates/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/charmhelpers/contrib/hardening/ssh/templates/ssh_config
new file mode 100644
index 0000000000000000000000000000000000000000..9742d8e2a32cd5da01a9dcb691a5a1201ed93050
--- /dev/null
+++ b/charmhelpers/contrib/hardening/ssh/templates/ssh_config
@@ -0,0 +1,70 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#       be overwritten.
+###############################################################################
+# This is the ssh client system-wide configuration file.  See
+# ssh_config(5) for more information.  This file provides defaults for
+# users, and the values can be changed in per-user configuration files
+# or on the command line.
+
+# Configuration data is parsed as follows:
+#  1. command line options
+#  2. user-specific file
+#  3. system-wide file
+# Any configuration value is only changed the first time it is set.
+# Thus, host-specific definitions should be at the beginning of the
+# configuration file, and defaults at the end.
+
+# Site-wide defaults for some commonly used options.  For a comprehensive
+# list of available options, their meanings and defaults, please see the
+# ssh_config(5) man page.
+
+# Restrict the following configuration to be limited to this Host.
+{% if remote_hosts -%}
+Host {{ ' '.join(remote_hosts) }}
+{% endif %}
+ForwardAgent no
+ForwardX11 no
+ForwardX11Trusted yes
+RhostsRSAAuthentication no
+RSAAuthentication yes
+PasswordAuthentication {{ password_auth_allowed }}
+HostbasedAuthentication no
+GSSAPIAuthentication no
+GSSAPIDelegateCredentials no
+GSSAPIKeyExchange no
+GSSAPITrustDNS no
+BatchMode no
+CheckHostIP yes
+AddressFamily {{ addr_family }}
+ConnectTimeout 0
+StrictHostKeyChecking ask
+IdentityFile ~/.ssh/identity
+IdentityFile ~/.ssh/id_rsa
+IdentityFile ~/.ssh/id_dsa
+# The port at the destination should be defined
+{% for port in ports -%}
+Port {{ port }}
+{% endfor %}
+Protocol 2
+Cipher 3des
+{% if ciphers -%}
+Ciphers {{ ciphers }}
+{%- endif %}
+{% if macs -%}
+MACs {{ macs }}
+{%- endif %}
+{% if kexs -%}
+KexAlgorithms {{ kexs }}
+{%- endif %}
+EscapeChar ~
+Tunnel no
+TunnelDevice any:any
+PermitLocalCommand no
+VisualHostKey no
+RekeyLimit 1G 1h
+SendEnv LANG LC_*
+HashKnownHosts yes
+{% if roaming -%}
+UseRoaming {{ roaming }}
+{% endif %}
diff --git a/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/charmhelpers/contrib/hardening/ssh/templates/sshd_config
new file mode 100644
index 0000000000000000000000000000000000000000..5f87298a8119bcab1d2578bcaefd068e5af167c4
--- /dev/null
+++ b/charmhelpers/contrib/hardening/ssh/templates/sshd_config
@@ -0,0 +1,159 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+#          be overwritten.
+###############################################################################
+# Package generated configuration file
+# See the sshd_config(5) manpage for details
+
+# What ports, IPs and protocols we listen for
+{% for port in ports -%}
+Port {{ port }}
+{% endfor -%}
+AddressFamily {{ addr_family }}
+# Use these options to restrict which interfaces/protocols sshd will bind to
+{% if ssh_ip -%}
+{% for ip in ssh_ip -%}
+ListenAddress {{ ip }}
+{% endfor %}
+{%- else -%}
+ListenAddress ::
+ListenAddress 0.0.0.0
+{% endif -%}
+Protocol 2
+{% if ciphers -%}
+Ciphers {{ ciphers }}
+{% endif -%}
+{% if macs -%}
+MACs {{ macs }}
+{% endif -%}
+{% if kexs -%}
+KexAlgorithms {{ kexs }}
+{% endif -%}
+# HostKeys for protocol version 2
+{% for keyfile in host_key_files -%}
+HostKey {{ keyfile }}
+{% endfor -%}
+
+# Privilege Separation is turned on for security
+{% if use_priv_sep -%} 
+UsePrivilegeSeparation {{ use_priv_sep }}
+{% endif -%}
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 1024
+
+# Logging
+SyslogFacility AUTH
+LogLevel VERBOSE
+
+# Authentication:
+LoginGraceTime 30s
+{% if allow_root_with_key -%}
+PermitRootLogin without-password
+{% else -%}
+PermitRootLogin no
+{% endif %}
+PermitTunnel no
+PermitUserEnvironment no
+StrictModes yes
+
+RSAAuthentication yes
+PubkeyAuthentication yes
+AuthorizedKeysFile %h/.ssh/authorized_keys
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts yes
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication no
+# similar for protocol version 2
+HostbasedAuthentication no
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords no
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication no
+
+# Change to no to disable tunnelled clear text passwords
+PasswordAuthentication {{ password_authentication }}
+
+# Kerberos options
+KerberosAuthentication no
+KerberosGetAFSToken no
+KerberosOrLocalPasswd no
+KerberosTicketCleanup yes
+
+# GSSAPI options
+GSSAPIAuthentication no
+GSSAPICleanupCredentials yes
+
+X11Forwarding {{ allow_x11_forwarding }}
+X11DisplayOffset 10
+X11UseLocalhost yes
+GatewayPorts no
+PrintMotd {{ print_motd }}
+PrintLastLog {{ print_last_log }}
+TCPKeepAlive no
+UseLogin no
+
+ClientAliveInterval {{ client_alive_interval }}
+ClientAliveCountMax {{ client_alive_count }}
+AllowTcpForwarding {{ allow_tcp_forwarding }}
+AllowAgentForwarding {{ allow_agent_forwarding }}
+
+MaxStartups 10:30:100
+#Banner /etc/issue.net
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication.  Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM {{ use_pam }}
+
+{% if deny_users -%}
+DenyUsers {{ deny_users }}
+{% endif -%}
+{% if allow_users -%}
+AllowUsers {{ allow_users }}
+{% endif -%}
+{% if deny_groups -%}
+DenyGroups {{ deny_groups }}
+{% endif -%}
+{% if allow_groups -%}
+AllowGroups allow_groups
+{% endif -%}
+UseDNS {{ use_dns }}
+MaxAuthTries {{ max_auth_tries }}
+MaxSessions {{ max_sessions }}
+
+{% if sftp_enable -%}
+# Configuration, in case SFTP is used
+## override default of no subsystems
+## Subsystem sftp /opt/app/openssh5/libexec/sftp-server
+Subsystem sftp internal-sftp -l VERBOSE
+
+## These lines must appear at the *end* of sshd_config
+Match Group {{ sftp_group }}
+ForceCommand internal-sftp -l VERBOSE
+ChrootDirectory {{ sftp_chroot }}
+{% else -%}
+# Configuration, in case SFTP is used
+## override default of no subsystems
+## Subsystem sftp /opt/app/openssh5/libexec/sftp-server
+## These lines must appear at the *end* of sshd_config
+Match Group sftponly
+ForceCommand internal-sftp -l VERBOSE
+ChrootDirectory /sftpchroot/home/%u
+{% endif %}
diff --git a/charmhelpers/contrib/hardening/templating.py b/charmhelpers/contrib/hardening/templating.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b6765f7edeee4bed739fd354c6f7bdf0a8c952e
--- /dev/null
+++ b/charmhelpers/contrib/hardening/templating.py
@@ -0,0 +1,73 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import six
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+    WARNING,
+)
+
+try:
+    from jinja2 import FileSystemLoader, Environment
+except ImportError:
+    from charmhelpers.fetch import apt_install
+    from charmhelpers.fetch import apt_update
+    apt_update(fatal=True)
+    if six.PY2:
+        apt_install('python-jinja2', fatal=True)
+    else:
+        apt_install('python3-jinja2', fatal=True)
+    from jinja2 import FileSystemLoader, Environment
+
+
+# NOTE: function separated from main rendering code to facilitate easier
+#       mocking in unit tests.
+def write(path, data):
+    with open(path, 'wb') as out:
+        out.write(data)
+
+
+def get_template_path(template_dir, path):
+    """Returns the template file which would be used to render the path.
+
+    The path to the template file is returned.
+    :param template_dir: the directory the templates are located in
+    :param path: the file path to be written to.
+    :returns: path to the template file
+    """
+    return os.path.join(template_dir, os.path.basename(path))
+
+
+def render_and_write(template_dir, path, context):
+    """Renders the specified template into the file.
+
+    :param template_dir: the directory to load the template from
+    :param path: the path to write the templated contents to
+    :param context: the parameters to pass to the rendering engine
+    """
+    env = Environment(loader=FileSystemLoader(template_dir))
+    template_file = os.path.basename(path)
+    template = env.get_template(template_file)
+    log('Rendering from template: %s' % template.name, level=DEBUG)
+    rendered_content = template.render(context)
+    if not rendered_content:
+        log("Render returned None - skipping '%s'" % path,
+            level=WARNING)
+        return
+
+    write(path, rendered_content.encode('utf-8').strip())
+    log('Wrote template %s' % path, level=DEBUG)
diff --git a/charmhelpers/contrib/hardening/utils.py b/charmhelpers/contrib/hardening/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff7485c28c8748ba366dba54f1e3b8f7e6a7c619
--- /dev/null
+++ b/charmhelpers/contrib/hardening/utils.py
@@ -0,0 +1,155 @@
+# Copyright 2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import glob
+import grp
+import os
+import pwd
+import six
+import yaml
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+    INFO,
+    WARNING,
+    ERROR,
+)
+
+
+# Global settings cache. Since each hook fire entails a fresh module import it
+# is safe to hold this in memory and not risk missing config changes (since
+# they will result in a new hook fire and thus re-import).
+__SETTINGS__ = {}
+
+
+def _get_defaults(modules):
+    """Load the default config for the provided modules.
+
+    :param modules: stack modules config defaults to lookup.
+    :returns: modules default config dictionary.
+    """
+    default = os.path.join(os.path.dirname(__file__),
+                           'defaults/%s.yaml' % (modules))
+    return yaml.safe_load(open(default))
+
+
+def _get_schema(modules):
+    """Load the config schema for the provided modules.
+
+    NOTE: this schema is intended to have 1-1 relationship with they keys in
+    the default config and is used a means to verify valid overrides provided
+    by the user.
+
+    :param modules: stack modules config schema to lookup.
+    :returns: modules default schema dictionary.
+    """
+    schema = os.path.join(os.path.dirname(__file__),
+                          'defaults/%s.yaml.schema' % (modules))
+    return yaml.safe_load(open(schema))
+
+
+def _get_user_provided_overrides(modules):
+    """Load user-provided config overrides.
+
+    :param modules: stack modules to lookup in user overrides yaml file.
+    :returns: overrides dictionary.
+    """
+    overrides = os.path.join(os.environ['JUJU_CHARM_DIR'],
+                             'hardening.yaml')
+    if os.path.exists(overrides):
+        log("Found user-provided config overrides file '%s'" %
+            (overrides), level=DEBUG)
+        settings = yaml.safe_load(open(overrides))
+        if settings and settings.get(modules):
+            log("Applying '%s' overrides" % (modules), level=DEBUG)
+            return settings.get(modules)
+
+        log("No overrides found for '%s'" % (modules), level=DEBUG)
+    else:
+        log("No hardening config overrides file '%s' found in charm "
+            "root dir" % (overrides), level=DEBUG)
+
+    return {}
+
+
+def _apply_overrides(settings, overrides, schema):
+    """Get overrides config overlayed onto modules defaults.
+
+    :param modules: require stack modules config.
+    :returns: dictionary of modules config with user overrides applied.
+    """
+    if overrides:
+        for k, v in six.iteritems(overrides):
+            if k in schema:
+                if schema[k] is None:
+                    settings[k] = v
+                elif type(schema[k]) is dict:
+                    settings[k] = _apply_overrides(settings[k], overrides[k],
+                                                   schema[k])
+                else:
+                    raise Exception("Unexpected type found in schema '%s'" %
+                                    type(schema[k]), level=ERROR)
+            else:
+                log("Unknown override key '%s' - ignoring" % (k), level=INFO)
+
+    return settings
+
+
+def get_settings(modules):
+    global __SETTINGS__
+    if modules in __SETTINGS__:
+        return __SETTINGS__[modules]
+
+    schema = _get_schema(modules)
+    settings = _get_defaults(modules)
+    overrides = _get_user_provided_overrides(modules)
+    __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema)
+    return __SETTINGS__[modules]
+
+
+def ensure_permissions(path, user, group, permissions, maxdepth=-1):
+    """Ensure permissions for path.
+
+    If path is a file, apply to file and return. If path is a directory,
+    apply recursively (if required) to directory contents and return.
+
+    :param user: user name
+    :param group: group name
+    :param permissions: octal permissions
+    :param maxdepth: maximum recursion depth. A negative maxdepth allows
+                     infinite recursion and maxdepth=0 means no recursion.
+    :returns: None
+    """
+    if not os.path.exists(path):
+        log("File '%s' does not exist - cannot set permissions" % (path),
+            level=WARNING)
+        return
+
+    _user = pwd.getpwnam(user)
+    os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid)
+    os.chmod(path, permissions)
+
+    if maxdepth == 0:
+        log("Max recursion depth reached - skipping further recursion",
+            level=DEBUG)
+        return
+    elif maxdepth > 0:
+        maxdepth -= 1
+
+    if os.path.isdir(path):
+        contents = glob.glob("%s/*" % (path))
+        for c in contents:
+            ensure_permissions(c, user=user, group=group,
+                               permissions=permissions, maxdepth=maxdepth)
diff --git a/charmhelpers/contrib/network/__init__.py b/charmhelpers/contrib/network/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b
--- /dev/null
+++ b/charmhelpers/contrib/network/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charmhelpers/contrib/network/ip.py b/charmhelpers/contrib/network/ip.py
new file mode 100644
index 0000000000000000000000000000000000000000..63e91cca510e4a498a871404784c45933068c73f
--- /dev/null
+++ b/charmhelpers/contrib/network/ip.py
@@ -0,0 +1,603 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import glob
+import re
+import subprocess
+import six
+import socket
+
+from functools import partial
+
+from charmhelpers.fetch import apt_install, apt_update
+from charmhelpers.core.hookenv import (
+    config,
+    log,
+    network_get_primary_address,
+    unit_get,
+    WARNING,
+    NoNetworkBinding,
+)
+
+from charmhelpers.core.host import (
+    lsb_release,
+    CompareHostReleases,
+)
+
+try:
+    import netifaces
+except ImportError:
+    apt_update(fatal=True)
+    if six.PY2:
+        apt_install('python-netifaces', fatal=True)
+    else:
+        apt_install('python3-netifaces', fatal=True)
+    import netifaces
+
+try:
+    import netaddr
+except ImportError:
+    apt_update(fatal=True)
+    if six.PY2:
+        apt_install('python-netaddr', fatal=True)
+    else:
+        apt_install('python3-netaddr', fatal=True)
+    import netaddr
+
+
+def _validate_cidr(network):
+    try:
+        netaddr.IPNetwork(network)
+    except (netaddr.core.AddrFormatError, ValueError):
+        raise ValueError("Network (%s) is not in CIDR presentation format" %
+                         network)
+
+
+def no_ip_found_error_out(network):
+    errmsg = ("No IP address found in network(s): %s" % network)
+    raise ValueError(errmsg)
+
+
+def _get_ipv6_network_from_address(address):
+    """Get an netaddr.IPNetwork for the given IPv6 address
+    :param address: a dict as returned by netifaces.ifaddresses
+    :returns netaddr.IPNetwork: None if the address is a link local or loopback
+    address
+    """
+    if address['addr'].startswith('fe80') or address['addr'] == "::1":
+        return None
+
+    prefix = address['netmask'].split("/")
+    if len(prefix) > 1:
+        netmask = prefix[1]
+    else:
+        netmask = address['netmask']
+    return netaddr.IPNetwork("%s/%s" % (address['addr'],
+                                        netmask))
+
+
+def get_address_in_network(network, fallback=None, fatal=False):
+    """Get an IPv4 or IPv6 address within the network from the host.
+
+    :param network (str): CIDR presentation format. For example,
+        '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
+    :param fallback (str): If no address is found, return fallback.
+    :param fatal (boolean): If no address is found, fallback is not
+        set and fatal is True then exit(1).
+    """
+    if network is None:
+        if fallback is not None:
+            return fallback
+
+        if fatal:
+            no_ip_found_error_out(network)
+        else:
+            return None
+
+    networks = network.split() or [network]
+    for network in networks:
+        _validate_cidr(network)
+        network = netaddr.IPNetwork(network)
+        for iface in netifaces.interfaces():
+            try:
+                addresses = netifaces.ifaddresses(iface)
+            except ValueError:
+                # If an instance was deleted between
+                # netifaces.interfaces() run and now, its interfaces are gone
+                continue
+            if network.version == 4 and netifaces.AF_INET in addresses:
+                for addr in addresses[netifaces.AF_INET]:
+                    cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
+                                                        addr['netmask']))
+                    if cidr in network:
+                        return str(cidr.ip)
+
+            if network.version == 6 and netifaces.AF_INET6 in addresses:
+                for addr in addresses[netifaces.AF_INET6]:
+                    cidr = _get_ipv6_network_from_address(addr)
+                    if cidr and cidr in network:
+                        return str(cidr.ip)
+
+    if fallback is not None:
+        return fallback
+
+    if fatal:
+        no_ip_found_error_out(network)
+
+    return None
+
+
+def is_ipv6(address):
+    """Determine whether provided address is IPv6 or not."""
+    try:
+        address = netaddr.IPAddress(address)
+    except netaddr.AddrFormatError:
+        # probably a hostname - so not an address at all!
+        return False
+
+    return address.version == 6
+
+
+def is_address_in_network(network, address):
+    """
+    Determine whether the provided address is within a network range.
+
+    :param network (str): CIDR presentation format. For example,
+        '192.168.1.0/24'.
+    :param address: An individual IPv4 or IPv6 address without a net
+        mask or subnet prefix. For example, '192.168.1.1'.
+    :returns boolean: Flag indicating whether address is in network.
+    """
+    try:
+        network = netaddr.IPNetwork(network)
+    except (netaddr.core.AddrFormatError, ValueError):
+        raise ValueError("Network (%s) is not in CIDR presentation format" %
+                         network)
+
+    try:
+        address = netaddr.IPAddress(address)
+    except (netaddr.core.AddrFormatError, ValueError):
+        raise ValueError("Address (%s) is not in correct presentation format" %
+                         address)
+
+    if address in network:
+        return True
+    else:
+        return False
+
+
+def _get_for_address(address, key):
+    """Retrieve an attribute of or the physical interface that
+    the IP address provided could be bound to.
+
+    :param address (str): An individual IPv4 or IPv6 address without a net
+        mask or subnet prefix. For example, '192.168.1.1'.
+    :param key: 'iface' for the physical interface name or an attribute
+        of the configured interface, for example 'netmask'.
+    :returns str: Requested attribute or None if address is not bindable.
+    """
+    address = netaddr.IPAddress(address)
+    for iface in netifaces.interfaces():
+        addresses = netifaces.ifaddresses(iface)
+        if address.version == 4 and netifaces.AF_INET in addresses:
+            addr = addresses[netifaces.AF_INET][0]['addr']
+            netmask = addresses[netifaces.AF_INET][0]['netmask']
+            network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
+            cidr = network.cidr
+            if address in cidr:
+                if key == 'iface':
+                    return iface
+                else:
+                    return addresses[netifaces.AF_INET][0][key]
+
+        if address.version == 6 and netifaces.AF_INET6 in addresses:
+            for addr in addresses[netifaces.AF_INET6]:
+                network = _get_ipv6_network_from_address(addr)
+                if not network:
+                    continue
+
+                cidr = network.cidr
+                if address in cidr:
+                    if key == 'iface':
+                        return iface
+                    elif key == 'netmask' and cidr:
+                        return str(cidr).split('/')[1]
+                    else:
+                        return addr[key]
+    return None
+
+
+get_iface_for_address = partial(_get_for_address, key='iface')
+
+
+get_netmask_for_address = partial(_get_for_address, key='netmask')
+
+
+def resolve_network_cidr(ip_address):
+    '''
+    Resolves the full address cidr of an ip_address based on
+    configured network interfaces
+    '''
+    netmask = get_netmask_for_address(ip_address)
+    return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
+
+
+def format_ipv6_addr(address):
+    """If address is IPv6, wrap it in '[]' otherwise return None.
+
+    This is required by most configuration files when specifying IPv6
+    addresses.
+    """
+    if is_ipv6(address):
+        return "[%s]" % address
+
+    return None
+
+
+def is_ipv6_disabled():
+    try:
+        result = subprocess.check_output(
+            ['sysctl', 'net.ipv6.conf.all.disable_ipv6'],
+            stderr=subprocess.STDOUT,
+            universal_newlines=True)
+    except subprocess.CalledProcessError:
+        return True
+
+    return "net.ipv6.conf.all.disable_ipv6 = 1" in result
+
+
+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
+                   fatal=True, exc_list=None):
+    """Return the assigned IP address for a given interface, if any.
+
+    :param iface: network interface on which address(es) are expected to
+                  be found.
+    :param inet_type: inet address family
+    :param inc_aliases: include alias interfaces in search
+    :param fatal: if True, raise exception if address not found
+    :param exc_list: list of addresses to ignore
+    :return: list of ip addresses
+    """
+    # Extract nic if passed /dev/ethX
+    if '/' in iface:
+        iface = iface.split('/')[-1]
+
+    if not exc_list:
+        exc_list = []
+
+    try:
+        inet_num = getattr(netifaces, inet_type)
+    except AttributeError:
+        raise Exception("Unknown inet type '%s'" % str(inet_type))
+
+    interfaces = netifaces.interfaces()
+    if inc_aliases:
+        ifaces = []
+        for _iface in interfaces:
+            if iface == _iface or _iface.split(':')[0] == iface:
+                ifaces.append(_iface)
+
+        if fatal and not ifaces:
+            raise Exception("Invalid interface '%s'" % iface)
+
+        ifaces.sort()
+    else:
+        if iface not in interfaces:
+            if fatal:
+                raise Exception("Interface '%s' not found " % (iface))
+            else:
+                return []
+
+        else:
+            ifaces = [iface]
+
+    addresses = []
+    for netiface in ifaces:
+        net_info = netifaces.ifaddresses(netiface)
+        if inet_num in net_info:
+            for entry in net_info[inet_num]:
+                if 'addr' in entry and entry['addr'] not in exc_list:
+                    addresses.append(entry['addr'])
+
+    if fatal and not addresses:
+        raise Exception("Interface '%s' doesn't have any %s addresses." %
+                        (iface, inet_type))
+
+    return sorted(addresses)
+
+
+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
+
+
+def get_iface_from_addr(addr):
+    """Work out on which interface the provided address is configured."""
+    for iface in netifaces.interfaces():
+        addresses = netifaces.ifaddresses(iface)
+        for inet_type in addresses:
+            for _addr in addresses[inet_type]:
+                _addr = _addr['addr']
+                # link local
+                ll_key = re.compile("(.+)%.*")
+                raw = re.match(ll_key, _addr)
+                if raw:
+                    _addr = raw.group(1)
+
+                if _addr == addr:
+                    log("Address '%s' is configured on iface '%s'" %
+                        (addr, iface))
+                    return iface
+
+    msg = "Unable to infer net iface on which '%s' is configured" % (addr)
+    raise Exception(msg)
+
+
+def sniff_iface(f):
+    """Ensure decorated function is called with a value for iface.
+
+    If no iface provided, inject net iface inferred from unit private address.
+    """
+    def iface_sniffer(*args, **kwargs):
+        if not kwargs.get('iface', None):
+            kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
+
+        return f(*args, **kwargs)
+
+    return iface_sniffer
+
+
+@sniff_iface
+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
+                  dynamic_only=True):
+    """Get assigned IPv6 address for a given interface.
+
+    Returns list of addresses found. If no address found, returns empty list.
+
+    If iface is None, we infer the current primary interface by doing a reverse
+    lookup on the unit private-address.
+
+    We currently only support scope global IPv6 addresses i.e. non-temporary
+    addresses. If no global IPv6 address is found, return the first one found
+    in the ipv6 address list.
+
+    :param iface: network interface on which ipv6 address(es) are expected to
+                  be found.
+    :param inc_aliases: include alias interfaces in search
+    :param fatal: if True, raise exception if address not found
+    :param exc_list: list of addresses to ignore
+    :param dynamic_only: only recognise dynamic addresses
+    :return: list of ipv6 addresses
+    """
+    addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
+                               inc_aliases=inc_aliases, fatal=fatal,
+                               exc_list=exc_list)
+
+    if addresses:
+        global_addrs = []
+        for addr in addresses:
+            key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
+            m = re.match(key_scope_link_local, addr)
+            if m:
+                eui_64_mac = m.group(1)
+                iface = m.group(2)
+            else:
+                global_addrs.append(addr)
+
+        if global_addrs:
+            # Make sure any found global addresses are not temporary
+            cmd = ['ip', 'addr', 'show', iface]
+            out = subprocess.check_output(
+                cmd).decode('UTF-8', errors='replace')
+            if dynamic_only:
+                key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
+            else:
+                key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
+
+            addrs = []
+            for line in out.split('\n'):
+                line = line.strip()
+                m = re.match(key, line)
+                if m and 'temporary' not in line:
+                    # Return the first valid address we find
+                    for addr in global_addrs:
+                        if m.group(1) == addr:
+                            if not dynamic_only or \
+                                    m.group(1).endswith(eui_64_mac):
+                                addrs.append(addr)
+
+            if addrs:
+                return addrs
+
+    if fatal:
+        raise Exception("Interface '%s' does not have a scope global "
+                        "non-temporary ipv6 address." % iface)
+
+    return []
+
+
+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
+    """Return a list of bridges on the system."""
+    b_regex = "%s/*/bridge" % vnic_dir
+    return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
+
+
+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
+    """Return a list of nics comprising a given bridge on the system."""
+    brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
+    return [x.split('/')[-1] for x in glob.glob(brif_regex)]
+
+
+def is_bridge_member(nic):
+    """Check if a given nic is a member of a bridge."""
+    for bridge in get_bridges():
+        if nic in get_bridge_nics(bridge):
+            return True
+
+    return False
+
+
+def is_ip(address):
+    """
+    Returns True if address is a valid IP address.
+    """
+    try:
+        # Test to see if already an IPv4/IPv6 address
+        address = netaddr.IPAddress(address)
+        return True
+    except (netaddr.AddrFormatError, ValueError):
+        return False
+
+
+def ns_query(address):
+    try:
+        import dns.resolver
+    except ImportError:
+        if six.PY2:
+            apt_install('python-dnspython', fatal=True)
+        else:
+            apt_install('python3-dnspython', fatal=True)
+        import dns.resolver
+
+    if isinstance(address, dns.name.Name):
+        rtype = 'PTR'
+    elif isinstance(address, six.string_types):
+        rtype = 'A'
+    else:
+        return None
+
+    try:
+        answers = dns.resolver.query(address, rtype)
+    except dns.resolver.NXDOMAIN:
+        return None
+
+    if answers:
+        return str(answers[0])
+    return None
+
+
+def get_host_ip(hostname, fallback=None):
+    """
+    Resolves the IP for a given hostname, or returns
+    the input if it is already an IP.
+    """
+    if is_ip(hostname):
+        return hostname
+
+    ip_addr = ns_query(hostname)
+    if not ip_addr:
+        try:
+            ip_addr = socket.gethostbyname(hostname)
+        except Exception:
+            log("Failed to resolve hostname '%s'" % (hostname),
+                level=WARNING)
+            return fallback
+    return ip_addr
+
+
+def get_hostname(address, fqdn=True):
+    """
+    Resolves hostname for given IP, or returns the input
+    if it is already a hostname.
+    """
+    if is_ip(address):
+        try:
+            import dns.reversename
+        except ImportError:
+            if six.PY2:
+                apt_install("python-dnspython", fatal=True)
+            else:
+                apt_install("python3-dnspython", fatal=True)
+            import dns.reversename
+
+        rev = dns.reversename.from_address(address)
+        result = ns_query(rev)
+
+        if not result:
+            try:
+                result = socket.gethostbyaddr(address)[0]
+            except Exception:
+                return None
+    else:
+        result = address
+
+    if fqdn:
+        # strip trailing .
+        if result.endswith('.'):
+            return result[:-1]
+        else:
+            return result
+    else:
+        return result.split('.')[0]
+
+
+def port_has_listener(address, port):
+    """
+    Returns True if the address:port is open and being listened to,
+    else False.
+
+    @param address: an IP address or hostname
+    @param port: integer port
+
+    Note calls 'zc' via a subprocess shell
+    """
+    cmd = ['nc', '-z', address, str(port)]
+    result = subprocess.call(cmd)
+    return not(bool(result))
+
+
+def assert_charm_supports_ipv6():
+    """Check whether we are able to support charms ipv6."""
+    release = lsb_release()['DISTRIB_CODENAME'].lower()
+    if CompareHostReleases(release) < "trusty":
+        raise Exception("IPv6 is not supported in the charms for Ubuntu "
+                        "versions less than Trusty 14.04")
+
+
+def get_relation_ip(interface, cidr_network=None):
+    """Return this unit's IP for the given interface.
+
+    Allow for an arbitrary interface to use with network-get to select an IP.
+    Handle all address selection options including passed cidr network and
+    IPv6.
+
+    Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8')
+
+    @param interface: string name of the relation.
+    @param cidr_network: string CIDR Network to select an address from.
+    @raises Exception if prefer-ipv6 is configured but IPv6 unsupported.
+    @returns IPv6 or IPv4 address
+    """
+    # Select the interface address first
+    # For possible use as a fallback bellow with get_address_in_network
+    try:
+        # Get the interface specific IP
+        address = network_get_primary_address(interface)
+    except NotImplementedError:
+        # If network-get is not available
+        address = get_host_ip(unit_get('private-address'))
+    except NoNetworkBinding:
+        log("No network binding for {}".format(interface), WARNING)
+        address = get_host_ip(unit_get('private-address'))
+
+    if config('prefer-ipv6'):
+        # Currently IPv6 has priority, eventually we want IPv6 to just be
+        # another network space.
+        assert_charm_supports_ipv6()
+        return get_ipv6_addr()[0]
+    elif cidr_network:
+        # If a specific CIDR network is passed get the address from that
+        # network.
+        return get_address_in_network(cidr_network, address)
+
+    # Return the interface address
+    return address
diff --git a/charmhelpers/contrib/openstack/__init__.py b/charmhelpers/contrib/openstack/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b
--- /dev/null
+++ b/charmhelpers/contrib/openstack/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charmhelpers/contrib/openstack/alternatives.py b/charmhelpers/contrib/openstack/alternatives.py
new file mode 100644
index 0000000000000000000000000000000000000000..547de09c6d818772191b519618fa32b08b0e6eff
--- /dev/null
+++ b/charmhelpers/contrib/openstack/alternatives.py
@@ -0,0 +1,44 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+''' Helper for managing alternatives for file conflict resolution '''
+
+import subprocess
+import shutil
+import os
+
+
+def install_alternative(name, target, source, priority=50):
+    ''' Install alternative configuration '''
+    if (os.path.exists(target) and not os.path.islink(target)):
+        # Move existing file/directory away before installing
+        shutil.move(target, '{}.bak'.format(target))
+    cmd = [
+        'update-alternatives', '--force', '--install',
+        target, name, source, str(priority)
+    ]
+    subprocess.check_call(cmd)
+
+
+def remove_alternative(name, source):
+    """Remove an installed alternative configuration file
+
+    :param name: string name of the alternative to remove
+    :param source: string full path to alternative to remove
+    """
+    cmd = [
+        'update-alternatives', '--remove',
+        name, source
+    ]
+    subprocess.check_call(cmd)
diff --git a/charmhelpers/contrib/openstack/exceptions.py b/charmhelpers/contrib/openstack/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..f85ae4f4cdbb6567cbdd896338bf88fbf3c9c0ec
--- /dev/null
+++ b/charmhelpers/contrib/openstack/exceptions.py
@@ -0,0 +1,21 @@
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class OSContextError(Exception):
+    """Raised when an error occurs during context generation.
+
+    This exception is principally used in contrib.openstack.context
+    """
+    pass
diff --git a/charmhelpers/contrib/openstack/ha/__init__.py b/charmhelpers/contrib/openstack/ha/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b088de84e4b288b551603816fc10eebfa7b1503
--- /dev/null
+++ b/charmhelpers/contrib/openstack/ha/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charmhelpers/contrib/openstack/ha/utils.py b/charmhelpers/contrib/openstack/ha/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5cbdf535d495a09a0b91f41fdda09862e34140d
--- /dev/null
+++ b/charmhelpers/contrib/openstack/ha/utils.py
@@ -0,0 +1,348 @@
+# Copyright 2014-2016 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Copyright 2016 Canonical Ltd.
+#
+# Authors:
+#  Openstack Charmers <
+#
+
+"""
+Helpers for high availability.
+"""
+
+import hashlib
+import json
+
+import re
+
+from charmhelpers.core.hookenv import (
+    expected_related_units,
+    log,
+    relation_set,
+    charm_name,
+    config,
+    status_set,
+    DEBUG,
+)
+
+from charmhelpers.core.host import (
+    lsb_release
+)
+
+from charmhelpers.contrib.openstack.ip import (
+    resolve_address,
+    is_ipv6,
+)
+
+from charmhelpers.contrib.network.ip import (
+    get_iface_for_address,
+    get_netmask_for_address,
+)
+
+from charmhelpers.contrib.hahelpers.cluster import (
+    get_hacluster_config
+)
+
+JSON_ENCODE_OPTIONS = dict(
+    sort_keys=True,
+    allow_nan=False,
+    indent=None,
+    separators=(',', ':'),
+)
+
+VIP_GROUP_NAME = 'grp_{service}_vips'
+DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
+
+
+class DNSHAException(Exception):
+    """Raised when an error occurs setting up DNS HA
+    """
+
+    pass
+
+
+def update_dns_ha_resource_params(resources, resource_params,
+                                  relation_id=None,
+                                  crm_ocf='ocf:maas:dns'):
+    """ Configure DNS-HA resources based on provided configuration and
+    update resource dictionaries for the HA relation.
+
+    @param resources: Pointer to dictionary of resources.
+                      Usually instantiated in ha_joined().
+    @param resource_params: Pointer to dictionary of resource parameters.
+                            Usually instantiated in ha_joined()
+    @param relation_id: Relation ID of the ha relation
+    @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
+                    DNS HA
+    """
+    _relation_data = {'resources': {}, 'resource_params': {}}
+    update_hacluster_dns_ha(charm_name(),
+                            _relation_data,
+                            crm_ocf)
+    resources.update(_relation_data['resources'])
+    resource_params.update(_relation_data['resource_params'])
+    relation_set(relation_id=relation_id, groups=_relation_data['groups'])
+
+
+def assert_charm_supports_dns_ha():
+    """Validate prerequisites for DNS HA
+    The MAAS client is only available on Xenial or greater
+
+    :raises DNSHAException: if release is < 16.04
+    """
+    if lsb_release().get('DISTRIB_RELEASE') < '16.04':
+        msg = ('DNS HA is only supported on 16.04 and greater '
+               'versions of Ubuntu.')
+        status_set('blocked', msg)
+        raise DNSHAException(msg)
+    return True
+
+
+def expect_ha():
+    """ Determine if the unit expects to be in HA
+
+    Check juju goal-state if ha relation is expected, check for VIP or dns-ha
+    settings which indicate the unit should expect to be related to hacluster.
+
+    @returns boolean
+    """
+    ha_related_units = []
+    try:
+        ha_related_units = list(expected_related_units(reltype='ha'))
+    except (NotImplementedError, KeyError):
+        pass
+    return len(ha_related_units) > 0 or config('vip') or config('dns-ha')
+
+
+def generate_ha_relation_data(service,
+                              extra_settings=None,
+                              haproxy_enabled=True):
+    """ Generate relation data for ha relation
+
+    Based on configuration options and unit interfaces, generate a json
+    encoded dict of relation data items for the hacluster relation,
+    providing configuration for DNS HA or VIP's + haproxy clone sets.
+
+    Example of supplying additional settings::
+
+        COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips'
+        AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth'
+        AGENT_CA_PARAMS = 'op monitor interval="5s"'
+
+        ha_console_settings = {
+            'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH},
+            'init_services': {'res_nova_consoleauth': 'nova-consoleauth'},
+            'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH},
+            'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS})
+        generate_ha_relation_data('nova', extra_settings=ha_console_settings)
+
+
+    @param service: Name of the service being configured
+    @param extra_settings: Dict of additional resource data
+    @returns dict: json encoded data for use with relation_set
+    """
+    _relation_data = {'resources': {}, 'resource_params': {}}
+
+    if haproxy_enabled:
+        _meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"'
+        _haproxy_res = 'res_{}_haproxy'.format(service)
+        _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'}
+        _relation_data['resource_params'] = {
+            _haproxy_res: '{} op monitor interval="5s"'.format(_meta)
+        }
+        _relation_data['init_services'] = {_haproxy_res: 'haproxy'}
+        _relation_data['clones'] = {
+            'cl_{}_haproxy'.format(service): _haproxy_res
+        }
+
+    if extra_settings:
+        for k, v in extra_settings.items():
+            if _relation_data.get(k):
+                _relation_data[k].update(v)
+            else:
+                _relation_data[k] = v
+
+    if config('dns-ha'):
+        update_hacluster_dns_ha(service, _relation_data)
+    else:
+        update_hacluster_vip(service, _relation_data)
+
+    return {
+        'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS)
+        for k, v in _relation_data.items() if v
+    }
+
+
+def update_hacluster_dns_ha(service, relation_data,
+                            crm_ocf='ocf:maas:dns'):
+    """ Configure DNS-HA resources based on provided configuration
+
+    @param service: Name of the service being configured
+    @param relation_data: Pointer to dictionary of relation data.
+    @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
+                    DNS HA
+    """
+    # Validate the charm environment for DNS HA
+    assert_charm_supports_dns_ha()
+
+    settings = ['os-admin-hostname', 'os-internal-hostname',
+                'os-public-hostname', 'os-access-hostname']
+
+    # Check which DNS settings are set and update dictionaries
+    hostname_group = []
+    for setting in settings:
+        hostname = config(setting)
+        if hostname is None:
+            log('DNS HA: Hostname setting {} is None. Ignoring.'
+                ''.format(setting),
+                DEBUG)
+            continue
+        m = re.search('os-(.+?)-hostname', setting)
+        if m:
+            endpoint_type = m.group(1)
+            # resolve_address's ADDRESS_MAP uses 'int' not 'internal'
+            if endpoint_type == 'internal':
+                endpoint_type = 'int'
+        else:
+            msg = ('Unexpected DNS hostname setting: {}. '
+                   'Cannot determine endpoint_type name'
+                   ''.format(setting))
+            status_set('blocked', msg)
+            raise DNSHAException(msg)
+
+        hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type)
+        if hostname_key in hostname_group:
+            log('DNS HA: Resource {}: {} already exists in '
+                'hostname group - skipping'.format(hostname_key, hostname),
+                DEBUG)
+            continue
+
+        hostname_group.append(hostname_key)
+        relation_data['resources'][hostname_key] = crm_ocf
+        relation_data['resource_params'][hostname_key] = (
+            'params fqdn="{}" ip_address="{}"'
+            .format(hostname, resolve_address(endpoint_type=endpoint_type,
+                                              override=False)))
+
+    if len(hostname_group) >= 1:
+        log('DNS HA: Hostname group is set with {} as members. '
+            'Informing the ha relation'.format(' '.join(hostname_group)),
+            DEBUG)
+        relation_data['groups'] = {
+            DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group)
+        }
+    else:
+        msg = 'DNS HA: Hostname group has no members.'
+        status_set('blocked', msg)
+        raise DNSHAException(msg)
+
+
+def get_vip_settings(vip):
+    """Calculate which nic is on the correct network for the given vip.
+
+    If nic or netmask discovery fail then fallback to using charm supplied
+    config. If fallback is used this is indicated via the fallback variable.
+
+    @param vip: VIP to lookup nic and cidr for.
+    @returns (str, str, bool): eg (iface, netmask, fallback)
+    """
+    iface = get_iface_for_address(vip)
+    netmask = get_netmask_for_address(vip)
+    fallback = False
+    if iface is None:
+        iface = config('vip_iface')
+        fallback = True
+    if netmask is None:
+        netmask = config('vip_cidr')
+        fallback = True
+    return iface, netmask, fallback
+
+
+def update_hacluster_vip(service, relation_data):
+    """ Configure VIP resources based on provided configuration
+
+    @param service: Name of the service being configured
+    @param relation_data: Pointer to dictionary of relation data.
+    """
+    cluster_config = get_hacluster_config()
+    vip_group = []
+    vips_to_delete = []
+    for vip in cluster_config['vip'].split():
+        if is_ipv6(vip):
+            res_vip = 'ocf:heartbeat:IPv6addr'
+            vip_params = 'ipv6addr'
+        else:
+            res_vip = 'ocf:heartbeat:IPaddr2'
+            vip_params = 'ip'
+
+        iface, netmask, fallback = get_vip_settings(vip)
+
+        vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"'
+        if iface is not None:
+            # NOTE(jamespage): Delete old VIP resources
+            # Old style naming encoding iface in name
+            # does not work well in environments where
+            # interface/subnet wiring is not consistent
+            vip_key = 'res_{}_{}_vip'.format(service, iface)
+            if vip_key in vips_to_delete:
+                vip_key = '{}_{}'.format(vip_key, vip_params)
+            vips_to_delete.append(vip_key)
+
+            vip_key = 'res_{}_{}_vip'.format(
+                service,
+                hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])
+
+            relation_data['resources'][vip_key] = res_vip
+            # NOTE(jamespage):
+            # Use option provided vip params if these where used
+            # instead of auto-detected values
+            if fallback:
+                relation_data['resource_params'][vip_key] = (
+                    'params {ip}="{vip}" cidr_netmask="{netmask}" '
+                    'nic="{iface}" {vip_monitoring}'.format(
+                        ip=vip_params,
+                        vip=vip,
+                        iface=iface,
+                        netmask=netmask,
+                        vip_monitoring=vip_monitoring))
+            else:
+                # NOTE(jamespage):
+                # let heartbeat figure out which interface and
+                # netmask to configure, which works nicely
+                # when network interface naming is not
+                # consistent across units.
+                relation_data['resource_params'][vip_key] = (
+                    'params {ip}="{vip}" {vip_monitoring}'.format(
+                        ip=vip_params,
+                        vip=vip,
+                        vip_monitoring=vip_monitoring))
+
+            vip_group.append(vip_key)
+
+    if vips_to_delete:
+        try:
+            relation_data['delete_resources'].extend(vips_to_delete)
+        except KeyError:
+            relation_data['delete_resources'] = vips_to_delete
+
+    if len(vip_group) >= 1:
+        key = VIP_GROUP_NAME.format(service=service)
+        try:
+            relation_data['groups'][key] = ' '.join(vip_group)
+        except KeyError:
+            relation_data['groups'] = {
+                key: ' '.join(vip_group)
+            }
diff --git a/charmhelpers/contrib/openstack/ip.py b/charmhelpers/contrib/openstack/ip.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8c94c56771e8edd848e5f3ebcefb2f372286dd6
--- /dev/null
+++ b/charmhelpers/contrib/openstack/ip.py
@@ -0,0 +1,235 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charmhelpers.core.hookenv import (
+    NoNetworkBinding,
+    config,
+    unit_get,
+    service_name,
+    network_get_primary_address,
+)
+from charmhelpers.contrib.network.ip import (
+    get_address_in_network,
+    is_address_in_network,
+    is_ipv6,
+    get_ipv6_addr,
+    resolve_network_cidr,
+)
+from charmhelpers.contrib.hahelpers.cluster import is_clustered
+
+PUBLIC = 'public'
+INTERNAL = 'int'
+ADMIN = 'admin'
+ACCESS = 'access'
+
+# TODO: reconcile 'int' vs 'internal' binding names
+ADDRESS_MAP = {
+    PUBLIC: {
+        'binding': 'public',
+        'config': 'os-public-network',
+        'fallback': 'public-address',
+        'override': 'os-public-hostname',
+    },
+    INTERNAL: {
+        'binding': 'internal',
+        'config': 'os-internal-network',
+        'fallback': 'private-address',
+        'override': 'os-internal-hostname',
+    },
+    ADMIN: {
+        'binding': 'admin',
+        'config': 'os-admin-network',
+        'fallback': 'private-address',
+        'override': 'os-admin-hostname',
+    },
+    ACCESS: {
+        'binding': 'access',
+        'config': 'access-network',
+        'fallback': 'private-address',
+        'override': 'os-access-hostname',
+    },
+    # Note (thedac) bridge to begin the reconciliation between 'int' vs
+    # 'internal' binding names
+    'internal': {
+        'binding': 'internal',
+        'config': 'os-internal-network',
+        'fallback': 'private-address',
+        'override': 'os-internal-hostname',
+    },
+}
+
+
+def canonical_url(configs, endpoint_type=PUBLIC):
+    """Returns the correct HTTP URL to this host given the state of HTTPS
+    configuration, hacluster and charm configuration.
+
+    :param configs: OSTemplateRenderer config templating object to inspect
+                    for a complete https context.
+    :param endpoint_type: str endpoint type to resolve.
+    :param returns: str base URL for services on the current service unit.
+    """
+    scheme = _get_scheme(configs)
+
+    address = resolve_address(endpoint_type)
+    if is_ipv6(address):
+        address = "[{}]".format(address)
+
+    return '%s://%s' % (scheme, address)
+
+
+def _get_scheme(configs):
+    """Returns the scheme to use for the url (either http or https)
+    depending upon whether https is in the configs value.
+
+    :param configs: OSTemplateRenderer config templating object to inspect
+                    for a complete https context.
+    :returns: either 'http' or 'https' depending on whether https is
+              configured within the configs context.
+    """
+    scheme = 'http'
+    if configs and 'https' in configs.complete_contexts():
+        scheme = 'https'
+    return scheme
+
+
+def _get_address_override(endpoint_type=PUBLIC):
+    """Returns any address overrides that the user has defined based on the
+    endpoint type.
+
+    Note: this function allows for the service name to be inserted into the
+    address if the user specifies {service_name}.somehost.org.
+
+    :param endpoint_type: the type of endpoint to retrieve the override
+                          value for.
+    :returns: any endpoint address or hostname that the user has overridden
+              or None if an override is not present.
+    """
+    override_key = ADDRESS_MAP[endpoint_type]['override']
+    addr_override = config(override_key)
+    if not addr_override:
+        return None
+    else:
+        return addr_override.format(service_name=service_name())
+
+
+def local_address(unit_get_fallback='public-address'):
+    """Return a network address for this unit.
+
+    Attempt to retrieve a 'default' IP address for this unit
+    from network-get. If this is running with an old version of Juju then
+    fallback to unit_get.
+
+    Note on juju < 2.9 the binding to juju-info may not exist, so fall back to
+    the unit-get.
+
+    :param unit_get_fallback: Either 'public-address' or 'private-address'.
+                              Only used with old versions of Juju.
+    :type unit_get_fallback: str
+    :returns: IP Address
+    :rtype: str
+    """
+    try:
+        return network_get_primary_address('juju-info')
+    except (NotImplementedError, NoNetworkBinding):
+        return unit_get(unit_get_fallback)
+
+
+def resolve_address(endpoint_type=PUBLIC, override=True):
+    """Return unit address depending on net config.
+
+    If unit is clustered with vip(s) and has net splits defined, return vip on
+    correct network. If clustered with no nets defined, return primary vip.
+
+    If not clustered, return unit address ensuring address is on configured net
+    split if one is configured, or a Juju 2.0 extra-binding has been used.
+
+    :param endpoint_type: Network endpoing type
+    :param override: Accept hostname overrides or not
+    """
+    resolved_address = None
+    if override:
+        resolved_address = _get_address_override(endpoint_type)
+        if resolved_address:
+            return resolved_address
+
+    vips = config('vip')
+    if vips:
+        vips = vips.split()
+
+    net_type = ADDRESS_MAP[endpoint_type]['config']
+    net_addr = config(net_type)
+    net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
+    binding = ADDRESS_MAP[endpoint_type]['binding']
+    clustered = is_clustered()
+
+    if clustered and vips:
+        if net_addr:
+            for vip in vips:
+                if is_address_in_network(net_addr, vip):
+                    resolved_address = vip
+                    break
+        else:
+            # NOTE: endeavour to check vips against network space
+            #       bindings
+            try:
+                bound_cidr = resolve_network_cidr(
+                    network_get_primary_address(binding)
+                )
+                for vip in vips:
+                    if is_address_in_network(bound_cidr, vip):
+                        resolved_address = vip
+                        break
+            except (NotImplementedError, NoNetworkBinding):
+                # If no net-splits configured and no support for extra
+                # bindings/network spaces so we expect a single vip
+                resolved_address = vips[0]
+    else:
+        if config('prefer-ipv6'):
+            fallback_addr = get_ipv6_addr(exc_list=vips)[0]
+        else:
+            fallback_addr = local_address(unit_get_fallback=net_fallback)
+
+        if net_addr:
+            resolved_address = get_address_in_network(net_addr, fallback_addr)
+        else:
+            # NOTE: only try to use extra bindings if legacy network
+            #       configuration is not in use
+            try:
+                resolved_address = network_get_primary_address(binding)
+            except (NotImplementedError, NoNetworkBinding):
+                resolved_address = fallback_addr
+
+    if resolved_address is None:
+        raise ValueError("Unable to resolve a suitable IP address based on "
+                         "charm state and configuration. (net_type=%s, "
+                         "clustered=%s)" % (net_type, clustered))
+
+    return resolved_address
+
+
+def get_vip_in_network(network):
+    matching_vip = None
+    vips = config('vip')
+    if vips:
+        for vip in vips.split():
+            if is_address_in_network(network, vip):
+                matching_vip = vip
+    return matching_vip
+
+
+def get_default_api_bindings():
+    _default_bindings = []
+    for binding in [INTERNAL, ADMIN, PUBLIC]:
+        _default_bindings.append(ADDRESS_MAP[binding]['binding'])
+    return _default_bindings
diff --git a/charmhelpers/contrib/openstack/policyd.py b/charmhelpers/contrib/openstack/policyd.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2bb21e9db926bd2c4de8ab3e8d10d0837af563a
--- /dev/null
+++ b/charmhelpers/contrib/openstack/policyd.py
@@ -0,0 +1,801 @@
+# Copyright 2019 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import contextlib
+import os
+import six
+import shutil
+import yaml
+import zipfile
+
+import charmhelpers
+import charmhelpers.core.hookenv as hookenv
+import charmhelpers.core.host as ch_host
+
+# Features provided by this module:
+
+"""
+Policy.d helper functions
+=========================
+
+The functions in this module are designed, as a set, to provide an easy-to-use
+set of hooks for classic charms to add in /etc/<service-name>/policy.d/
+directory override YAML files.
+
+(For charms.openstack charms, a mixin class is provided for this
+functionality).
+
+In order to "hook" this functionality into a (classic) charm, two functions are
+provided:
+
+    maybe_do_policyd_overrides(openstack_release,
+                               service,
+                               blacklist_paths=none,
+                               blacklist_keys=none,
+                               template_function=none,
+                               restart_handler=none)
+
+    maybe_do_policyd_overrides_on_config_changed(openstack_release,
+                                                 service,
+                                                 blacklist_paths=None,
+                                                 blacklist_keys=None,
+                                                 template_function=None,
+                                                 restart_handler=None
+
+(See the docstrings for details on the parameters)
+
+The functions should be called from the install and upgrade hooks in the charm.
+The `maybe_do_policyd_overrides_on_config_changed` function is designed to be
+called on the config-changed hook, in that it does an additional check to
+ensure that an already overriden policy.d in an upgrade or install hooks isn't
+repeated.
+
+In order the *enable* this functionality, the charm's install, config_changed,
+and upgrade_charm hooks need to be modified, and a new config option (see
+below) needs to be added.  The README for the charm should also be updated.
+
+Examples from the keystone charm are:
+
+@hooks.hook('install.real')
+@harden()
+def install():
+    ...
+    # call the policy overrides handler which will install any policy overrides
+    maybe_do_policyd_overrides(os_release('keystone'), 'keystone')
+
+
+@hooks.hook('config-changed')
+@restart_on_change(restart_map(), restart_functions=restart_function_map())
+@harden()
+def config_changed():
+    ...
+    # call the policy overrides handler which will install any policy overrides
+    maybe_do_policyd_overrides_on_config_changed(os_release('keystone'),
+                                                 'keystone')
+
+@hooks.hook('upgrade-charm')
+@restart_on_change(restart_map(), stopstart=True)
+@harden()
+def upgrade_charm():
+    ...
+    # call the policy overrides handler which will install any policy overrides
+    maybe_do_policyd_overrides(os_release('keystone'), 'keystone')
+
+Status Line
+===========
+
+The workload status code in charm-helpers has been modified to detect if
+policy.d override code has been incorporated into the charm by checking for the
+new config variable (in the config.yaml).  If it has been, then the workload
+status line will automatically show "PO:" at the beginning of the workload
+status for that unit/service if the config option is set.  If the policy
+override is broken, the "PO (broken):" will be shown.  No changes to the charm
+(apart from those already mentioned) are needed to enable this functionality.
+(charms.openstack charms also get this functionality, but please see that
+library for further details).
+"""
+
+# The config.yaml for the charm should contain the following for the config
+# option:
+
+"""
+  use-policyd-override:
+    type: boolean
+    default: False
+    description: |
+      If True then use the resource file named 'policyd-override' to install
+      override YAML files in the service's policy.d directory.  The resource
+      file should be a ZIP file containing at least one yaml file with a .yaml
+      or .yml extension.  If False then remove the overrides.
+"""
+
+# The metadata.yaml for the charm should contain the following:
+"""
+resources:
+  policyd-override:
+    type: file
+    filename: policyd-override.zip
+    description: The policy.d overrides file
+"""
+
+# The README for the charm should contain the following:
+"""
+Policy Overrides
+----------------
+
+This feature allows for policy overrides using the `policy.d` directory.  This
+is an **advanced** feature and the policies that the OpenStack service supports
+should be clearly and unambiguously understood before trying to override, or
+add to, the default policies that the service uses.  The charm also has some
+policy defaults.  They should also be understood before being overridden.
+
+> **Caution**: It is possible to break the system (for tenants and other
+  services) if policies are incorrectly applied to the service.
+
+Policy overrides are YAML files that contain rules that will add to, or
+override, existing policy rules in the service.  The `policy.d` directory is
+a place to put the YAML override files.  This charm owns the
+`/etc/keystone/policy.d` directory, and as such, any manual changes to it will
+be overwritten on charm upgrades.
+
+Overrides are provided to the charm using a Juju resource called
+`policyd-override`.  The resource is a ZIP file.  This file, say
+`overrides.zip`, is attached to the charm by:
+
+
+    juju attach-resource <charm-name> policyd-override=overrides.zip
+
+The policy override is enabled in the charm using:
+
+    juju config <charm-name> use-policyd-override=true
+
+When `use-policyd-override` is `True` the status line of the charm will be
+prefixed with `PO:` indicating that policies have been overridden.  If the
+installation of the policy override YAML files failed for any reason then the
+status line will be prefixed with `PO (broken):`.  The log file for the charm
+will indicate the reason.  No policy override files are installed if the `PO
+(broken):` is shown.  The status line indicates that the overrides are broken,
+not that the policy for the service has failed. The policy will be the defaults
+for the charm and service.
+
+Policy overrides on one service may affect the functionality of another
+service. Therefore, it may be necessary to provide policy overrides for
+multiple service charms to achieve a consistent set of policies across the
+OpenStack system.  The charms for the other services that may need overrides
+should be checked to ensure that they support overrides before proceeding.
+"""
+
+POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl']
+POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl']
+POLICYD_RESOURCE_NAME = "policyd-override"
+POLICYD_CONFIG_NAME = "use-policyd-override"
+POLICYD_SUCCESS_FILENAME = "policyd-override-success"
+POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO
+POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin")
+
+
+class BadPolicyZipFile(Exception):
+
+    def __init__(self, log_message):
+        self.log_message = log_message
+
+    def __str__(self):
+        return self.log_message
+
+
+class BadPolicyYamlFile(Exception):
+
+    def __init__(self, log_message):
+        self.log_message = log_message
+
+    def __str__(self):
+        return self.log_message
+
+
+if six.PY2:
+    BadZipFile = zipfile.BadZipfile
+else:
+    BadZipFile = zipfile.BadZipFile
+
+
+def is_policyd_override_valid_on_this_release(openstack_release):
+    """Check that the charm is running on at least Ubuntu Xenial, and at
+    least the queens release.
+
+    :param openstack_release: the release codename that is installed.
+    :type openstack_release: str
+    :returns: True if okay
+    :rtype: bool
+    """
+    # NOTE(ajkavanagh) circular import!  This is because the status message
+    # generation code in utils has to call into this module, but this function
+    # needs the CompareOpenStackReleases() function.  The only way to solve
+    # this is either to put ALL of this module into utils, or refactor one or
+    # other of the CompareOpenStackReleases or status message generation code
+    # into a 3rd module.
+    import charmhelpers.contrib.openstack.utils as ch_utils
+    return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens'
+
+
+def maybe_do_policyd_overrides(openstack_release,
+                               service,
+                               blacklist_paths=None,
+                               blacklist_keys=None,
+                               template_function=None,
+                               restart_handler=None,
+                               user=None,
+                               group=None,
+                               config_changed=False):
+    """If the config option is set, get the resource file and process it to
+    enable the policy.d overrides for the service passed.
+
+    The param `openstack_release` is required as the policyd overrides feature
+    is only supported on openstack_release "queens" or later, and on ubuntu
+    "xenial" or later.  Prior to these versions, this feature is a NOP.
+
+    The optional template_function is a function that accepts a string and has
+    an opportunity to modify the loaded file prior to it being read by
+    yaml.safe_load().  This allows the charm to perform "templating" using
+    charm derived data.
+
+    The param blacklist_paths are paths (that are in the service's policy.d
+    directory that should not be touched).
+
+    The param blacklist_keys are keys that must not appear in the yaml file.
+    If they do, then the whole policy.d file fails.
+
+    The yaml file extracted from the resource_file (which is a zipped file) has
+    its file path reconstructed.  This, also, must not match any path in the
+    black list.
+
+    The param restart_handler is an optional Callable that is called to perform
+    the service restart if the policy.d file is changed.  This should normally
+    be None as oslo.policy automatically picks up changes in the policy.d
+    directory.  However, for any services where this is buggy then a
+    restart_handler can be used to force the policy.d files to be read.
+
+    If the config_changed param is True, then the handling is slightly
+    different: It will only perform the policyd overrides if the config is True
+    and the success file doesn't exist.  Otherwise, it does nothing as the
+    resource file has already been processed.
+
+    :param openstack_release: The openstack release that is installed.
+    :type openstack_release: str
+    :param service: the service name to construct the policy.d directory for.
+    :type service: str
+    :param blacklist_paths: optional list of paths to leave alone
+    :type blacklist_paths: Union[None, List[str]]
+    :param blacklist_keys: optional list of keys that mustn't appear in the
+                           yaml file's
+    :type blacklist_keys: Union[None, List[str]]
+    :param template_function: Optional function that can modify the string
+                              prior to being processed as a Yaml document.
+    :type template_function: Union[None, Callable[[str], str]]
+    :param restart_handler: The function to call if the service should be
+                            restarted.
+    :type restart_handler: Union[None, Callable[]]
+    :param user: The user to create/write files/directories as
+    :type user: Union[None, str]
+    :param group: the group to create/write files/directories as
+    :type group: Union[None, str]
+    :param config_changed: Set to True for config_changed hook.
+    :type config_changed: bool
+    """
+    _user = service if user is None else user
+    _group = service if group is None else group
+    if not is_policyd_override_valid_on_this_release(openstack_release):
+        return
+    hookenv.log("Running maybe_do_policyd_overrides",
+                level=POLICYD_LOG_LEVEL_DEFAULT)
+    config = hookenv.config()
+    try:
+        if not config.get(POLICYD_CONFIG_NAME, False):
+            clean_policyd_dir_for(service,
+                                  blacklist_paths,
+                                  user=_user,
+                                  group=_group)
+            if (os.path.isfile(_policy_success_file()) and
+                    restart_handler is not None and
+                    callable(restart_handler)):
+                restart_handler()
+            remove_policy_success_file()
+            return
+    except Exception as e:
+        hookenv.log("... ERROR: Exception is: {}".format(str(e)),
+                    level=POLICYD_CONFIG_NAME)
+        import traceback
+        hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT)
+        return
+    # if the policyd overrides have been performed when doing config_changed
+    # just return
+    if config_changed and is_policy_success_file_set():
+        hookenv.log("... already setup, so skipping.",
+                    level=POLICYD_LOG_LEVEL_DEFAULT)
+        return
+    # from now on it should succeed; if it doesn't then status line will show
+    # broken.
+    resource_filename = get_policy_resource_filename()
+    restart = process_policy_resource_file(
+        resource_filename, service, blacklist_paths, blacklist_keys,
+        template_function)
+    if restart and restart_handler is not None and callable(restart_handler):
+        restart_handler()
+
+
+@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead")
+def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs):
+    """This function is designed to be called from the config changed hook.
+
+    DEPRECATED: please use maybe_do_policyd_overrides() with the param
+    `config_changed` as `True`.
+
+    See maybe_do_policyd_overrides() for more details on the params.
+    """
+    if 'config_changed' not in kwargs.keys():
+        kwargs['config_changed'] = True
+    return maybe_do_policyd_overrides(*args, **kwargs)
+
+
+def get_policy_resource_filename():
+    """Function to extract the policy resource filename
+
+    :returns: The filename of the resource, if set, otherwise, if an error
+               occurs, then None is returned.
+    :rtype: Union[str, None]
+    """
+    try:
+        return hookenv.resource_get(POLICYD_RESOURCE_NAME)
+    except Exception:
+        return None
+
+
+@contextlib.contextmanager
+def open_and_filter_yaml_files(filepath, has_subdirs=False):
+    """Validate that the filepath provided is a zip file and contains at least
+    one (.yaml|.yml) file, and that the files are not duplicated when the zip
+    file is flattened.  Note that the yaml files are not checked.  This is the
+    first stage in validating the policy zipfile; individual yaml files are not
+    checked for validity or black listed keys.
+
+    If the has_subdirs param is True, then the files are flattened to the first
+    directory, and the files in the root are ignored.
+
+    An example of use is:
+
+        with open_and_filter_yaml_files(some_path) as zfp, g:
+            for zipinfo in g:
+                # do something with zipinfo ...
+
+    :param filepath: a filepath object that can be opened by zipfile
+    :type filepath: Union[AnyStr, os.PathLike[AntStr]]
+    :param has_subdirs: Keep first level of subdirectories in yaml file.
+    :type has_subdirs: bool
+    :returns: (zfp handle,
+               a generator of the (name, filename, ZipInfo object) tuples) as a
+               tuple.
+    :rtype: ContextManager[(zipfile.ZipFile,
+                            Generator[(name, str, str, zipfile.ZipInfo)])]
+    :raises: zipfile.BadZipFile
+    :raises: BadPolicyZipFile if duplicated yaml or missing
+    :raises: IOError if the filepath is not found
+    """
+    with zipfile.ZipFile(filepath, 'r') as zfp:
+        # first pass through; check for duplicates and at least one yaml file.
+        names = collections.defaultdict(int)
+        yamlfiles = _yamlfiles(zfp, has_subdirs)
+        for name, _, _, _ in yamlfiles:
+            names[name] += 1
+        # There must be at least 1 yaml file.
+        if len(names.keys()) == 0:
+            raise BadPolicyZipFile("contains no yaml files with {} extensions."
+                                   .format(", ".join(POLICYD_VALID_EXTS)))
+        # There must be no duplicates
+        duplicates = [n for n, c in names.items() if c > 1]
+        if duplicates:
+            raise BadPolicyZipFile("{} have duplicates in the zip file."
+                                   .format(", ".join(duplicates)))
+        # Finally, let's yield the generator
+        yield (zfp, yamlfiles)
+
+
+def _yamlfiles(zipfile, has_subdirs=False):
+    """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions)
+    and the infolist item from a zipfile.
+
+    If the `has_subdirs` param is True, the the only yaml files that have a
+    directory component are read, and then first part of the directory
+    component is kept, along with the filename in the name.  e.g. an entry with
+    a filename of:
+
+        compute/someotherdir/override.yaml
+
+    is returned as:
+
+        compute/override, yaml, override.yaml, <ZipInfo object>
+
+    This is to help with the special, additional, processing that the dashboard
+    charm requires.
+
+    :param zipfile: the zipfile to read zipinfo items from
+    :type zipfile: zipfile.ZipFile
+    :param has_subdirs: Keep first level of subdirectories in yaml file.
+    :type has_subdirs: bool
+    :returns: generator of (name, ext, filename, info item) for each
+              self-identified yaml file.
+    :rtype: List[(str, str, str, zipfile.ZipInfo)]
+    """
+    files = []
+    for infolist_item in zipfile.infolist():
+        try:
+            if infolist_item.is_dir():
+                continue
+        except AttributeError:
+            # fallback to "old" way to determine dir entry for pre-py36
+            if infolist_item.filename.endswith('/'):
+                continue
+        _dir, name_ext = os.path.split(infolist_item.filename)
+        name, ext = os.path.splitext(name_ext)
+        if has_subdirs and _dir != "":
+            name = os.path.join(_dir.split(os.path.sep)[0], name)
+        ext = ext.lower()
+        if ext and ext in POLICYD_VALID_EXTS:
+            files.append((name, ext, name_ext, infolist_item))
+    return files
+
+
+def read_and_validate_yaml(stream_or_doc, blacklist_keys=None):
+    """Read, validate and return the (first) yaml document from the stream.
+
+    The doc is read, and checked for a yaml file.  The the top-level keys are
+    checked against the blacklist_keys provided.  If there are problems then an
+    Exception is raised.  Otherwise the yaml document is returned as a Python
+    object that can be dumped back as a yaml file on the system.
+
+    The yaml file must only consist of a str:str mapping, and if not then the
+    yaml file is rejected.
+
+    :param stream_or_doc: the file object to read the yaml from
+    :type stream_or_doc: Union[AnyStr, IO[AnyStr]]
+    :param blacklist_keys: Any keys, which if in the yaml file, should cause
+        and error.
+    :type blacklisted_keys: Union[None, List[str]]
+    :returns: the yaml file as a python document
+    :rtype: Dict[str, str]
+    :raises: yaml.YAMLError if there is a problem with the document
+    :raises: BadPolicyYamlFile if file doesn't look right or there are
+             blacklisted keys in the file.
+    """
+    blacklist_keys = blacklist_keys or []
+    blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS)
+    doc = yaml.safe_load(stream_or_doc)
+    if not isinstance(doc, dict):
+        raise BadPolicyYamlFile("doesn't look like a policy file?")
+    keys = set(doc.keys())
+    blacklisted_keys_present = keys.intersection(blacklist_keys)
+    if blacklisted_keys_present:
+        raise BadPolicyYamlFile("blacklisted keys {} present."
+                                .format(", ".join(blacklisted_keys_present)))
+    if not all(isinstance(k, six.string_types) for k in keys):
+        raise BadPolicyYamlFile("keys in yaml aren't all strings?")
+    # check that the dictionary looks like a mapping of str to str
+    if not all(isinstance(v, six.string_types) for v in doc.values()):
+        raise BadPolicyYamlFile("values in yaml aren't all strings?")
+    return doc
+
+
+def policyd_dir_for(service):
+    """Return the policy directory for the named service.
+
+    :param service: str
+    :returns: the policy.d override directory.
+    :rtype: os.PathLike[str]
+    """
+    return os.path.join("/", "etc", service, "policy.d")
+
+
+def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None):
+    """Clean out the policyd directory except for items that should be kept.
+
+    The keep_paths, if used, should be set to the full path of the files that
+    should be kept in the policyd directory for the service.  Note that the
+    service name is passed in, and then the policyd_dir_for() function is used.
+    This is so that a coding error doesn't result in a sudden deletion of the
+    charm (say).
+
+    :param service: the service name to use to construct the policy.d dir.
+    :type service: str
+    :param keep_paths: optional list of paths to not delete.
+    :type keep_paths: Union[None, List[str]]
+    :param user: The user to create/write files/directories as
+    :type user: Union[None, str]
+    :param group: the group to create/write files/directories as
+    :type group: Union[None, str]
+    """
+    _user = service if user is None else user
+    _group = service if group is None else group
+    keep_paths = keep_paths or []
+    path = policyd_dir_for(service)
+    hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG)
+    if not os.path.exists(path):
+        ch_host.mkdir(path, owner=_user, group=_group, perms=0o775)
+    _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir
+    for direntry in _scanner(path):
+        # see if the path should be kept.
+        if direntry.path in keep_paths:
+            continue
+        # we remove any directories; it's ours and there shouldn't be any
+        if direntry.is_dir():
+            shutil.rmtree(direntry.path)
+        else:
+            os.remove(direntry.path)
+
+
+def maybe_create_directory_for(path, user, group):
+    """For the filename 'path', ensure that the directory for that path exists.
+
+    Note that if the directory already exists then the permissions are NOT
+    changed.
+
+    :param path: the filename including the path to it.
+    :type path: str
+    :param user: the user to create the directory as
+    :param group: the group to create the directory as
+    """
+    _dir, _ = os.path.split(path)
+    if not os.path.exists(_dir):
+        ch_host.mkdir(_dir, owner=user, group=group, perms=0o775)
+
+
+@contextlib.contextmanager
+def _fallback_scandir(path):
+    """Fallback os.scandir implementation.
+
+    provide a fallback implementation of os.scandir if this module ever gets
+    used in a py2 or py34 charm. Uses os.listdir() to get the names in the path,
+    and then mocks the is_dir() function using os.path.isdir() to check for
+    directory.
+
+    :param path: the path to list the directories for
+    :type path: str
+    :returns: Generator that provides _FBDirectory objects
+    :rtype: ContextManager[_FBDirectory]
+    """
+    for f in os.listdir(path):
+        yield _FBDirectory(f)
+
+
+class _FBDirectory(object):
+    """Mock a scandir Directory object with enough to use in
+    clean_policyd_dir_for
+    """
+
+    def __init__(self, path):
+        self.path = path
+
+    def is_dir(self):
+        return os.path.isdir(self.path)
+
+
+def path_for_policy_file(service, name):
+    """Return the full path for a policy.d file that will be written to the
+    service's policy.d directory.
+
+    It is constructed using policyd_dir_for(), the name and the ".yaml"
+    extension.
+
+    For horizon, for example, it's a bit more complicated.  The name param is
+    actually "override_service_dir/a_name", where target_service needs to be
+    one the allowed horizon override services.  This translation and check is
+    done in the _yamlfiles() function.
+
+    :param service: the service name
+    :type service: str
+    :param name: the name for the policy override
+    :type name: str
+    :returns: the full path name for the file
+    :rtype: os.PathLike[str]
+    """
+    return os.path.join(policyd_dir_for(service), name + ".yaml")
+
+
+def _policy_success_file():
+    """Return the file name for a successful drop of policy.d overrides
+
+    :returns: the path name for the file.
+    :rtype: str
+    """
+    return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME)
+
+
+def remove_policy_success_file():
+    """Remove the file that indicates successful policyd override."""
+    try:
+        os.remove(_policy_success_file())
+    except Exception:
+        pass
+
+
+def set_policy_success_file():
+    """Set the file that indicates successful policyd override."""
+    open(_policy_success_file(), "w").close()
+
+
+def is_policy_success_file_set():
+    """Returns True if the policy success file has been set.
+
+    This indicates that policies are overridden and working properly.
+
+    :returns: True if the policy file is set
+    :rtype: bool
+    """
+    return os.path.isfile(_policy_success_file())
+
+
+def policyd_status_message_prefix():
+    """Return the prefix str for the status line.
+
+    "PO:" indicating that the policy overrides are in place, or "PO (broken):"
+    if the policy is supposed to be working but there is no success file.
+
+    :returns: the prefix
+    :rtype: str
+    """
+    if is_policy_success_file_set():
+        return "PO:"
+    return "PO (broken):"
+
+
+def process_policy_resource_file(resource_file,
+                                 service,
+                                 blacklist_paths=None,
+                                 blacklist_keys=None,
+                                 template_function=None,
+                                 preserve_topdir=False,
+                                 preprocess_filename=None,
+                                 user=None,
+                                 group=None):
+    """Process the resource file (which should contain at least one yaml file)
+    and write those files to the service's policy.d directory.
+
+    The optional template_function is a function that accepts a python
+    string and has an opportunity to modify the document
+    prior to it being read by the yaml.safe_load() function and written to
+    disk. Note that this function does *not* say how the templating is done -
+    this is up to the charm to implement its chosen method.
+
+    The param blacklist_paths are paths (that are in the service's policy.d
+    directory that should not be touched).
+
+    The param blacklist_keys are keys that must not appear in the yaml file.
+    If they do, then the whole policy.d file fails.
+
+    The yaml file extracted from the resource_file (which is a zipped file) has
+    its file path reconstructed.  This, also, must not match any path in the
+    black list.
+
+    The yaml filename can be modified in two ways.  If the `preserve_topdir`
+    param is True, then files will be flattened to the top dir.  This allows
+    for creating sets of files that can be grouped into a single level tree
+    structure.
+
+    Secondly, if the `preprocess_filename` param is not None and callable()
+    then the name is passed to that function for preprocessing before being
+    converted to the end location.  This is to allow munging of the filename
+    prior to being tested for a blacklist path.
+
+    If any error occurs, then the policy.d directory is cleared, the error is
+    written to the log, and the status line will eventually show as failed.
+
+    :param resource_file: The zipped file to open and extract yaml files form.
+    :type resource_file: Union[AnyStr, os.PathLike[AnyStr]]
+    :param service: the service name to construct the policy.d directory for.
+    :type service: str
+    :param blacklist_paths: optional list of paths to leave alone
+    :type blacklist_paths: Union[None, List[str]]
+    :param blacklist_keys: optional list of keys that mustn't appear in the
+                           yaml file's
+    :type blacklist_keys: Union[None, List[str]]
+    :param template_function: Optional function that can modify the yaml
+                              document.
+    :type template_function: Union[None, Callable[[AnyStr], AnyStr]]
+    :param preserve_topdir: Keep the toplevel subdir
+    :type preserve_topdir: bool
+    :param preprocess_filename: Optional function to use to process filenames
+                                extracted from the resource file.
+    :type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]]
+    :param user: The user to create/write files/directories as
+    :type user: Union[None, str]
+    :param group: the group to create/write files/directories as
+    :type group: Union[None, str]
+    :returns: True if the processing was successful, False if not.
+    :rtype: boolean
+    """
+    hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG)
+    blacklist_paths = blacklist_paths or []
+    completed = False
+    _preprocess = None
+    if preprocess_filename is not None and callable(preprocess_filename):
+        _preprocess = preprocess_filename
+    _user = service if user is None else user
+    _group = service if group is None else group
+    try:
+        with open_and_filter_yaml_files(
+                resource_file, preserve_topdir) as (zfp, gen):
+            # first clear out the policy.d directory and clear success
+            remove_policy_success_file()
+            clean_policyd_dir_for(service,
+                                  blacklist_paths,
+                                  user=_user,
+                                  group=_group)
+            for name, ext, filename, zipinfo in gen:
+                # See if the name should be preprocessed.
+                if _preprocess is not None:
+                    name = _preprocess(name)
+                # construct a name for the output file.
+                yaml_filename = path_for_policy_file(service, name)
+                if yaml_filename in blacklist_paths:
+                    raise BadPolicyZipFile("policy.d name {} is blacklisted"
+                                           .format(yaml_filename))
+                with zfp.open(zipinfo) as fp:
+                    doc = fp.read()
+                    # if template_function is not None, then offer the document
+                    # to the template function
+                    if ext in POLICYD_TEMPLATE_EXTS:
+                        if (template_function is None or not
+                                callable(template_function)):
+                            raise BadPolicyZipFile(
+                                "Template {} but no template_function is "
+                                "available".format(filename))
+                        doc = template_function(doc)
+                    yaml_doc = read_and_validate_yaml(doc, blacklist_keys)
+                # we may have to create the directory
+                maybe_create_directory_for(yaml_filename, _user, _group)
+                ch_host.write_file(yaml_filename,
+                                   yaml.dump(yaml_doc).encode('utf-8'),
+                                   _user,
+                                   _group)
+        # Every thing worked, so we mark up a success.
+        completed = True
+    except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e:
+        hookenv.log("Processing {} failed: {}".format(resource_file, str(e)),
+                    level=POLICYD_LOG_LEVEL_DEFAULT)
+    except IOError as e:
+        # technically this shouldn't happen; it would be a programming error as
+        # the filename comes from Juju and thus, should exist.
+        hookenv.log(
+            "File {} failed with IOError.  This really shouldn't happen"
+            " -- error: {}".format(resource_file, str(e)),
+            level=POLICYD_LOG_LEVEL_DEFAULT)
+    except Exception as e:
+        import traceback
+        hookenv.log("General Exception({}) during policyd processing"
+                    .format(str(e)),
+                    level=POLICYD_LOG_LEVEL_DEFAULT)
+        hookenv.log(traceback.format_exc())
+    finally:
+        if not completed:
+            hookenv.log("Processing {} failed: cleaning policy.d directory"
+                        .format(resource_file),
+                        level=POLICYD_LOG_LEVEL_DEFAULT)
+            clean_policyd_dir_for(service,
+                                  blacklist_paths,
+                                  user=_user,
+                                  group=_group)
+        else:
+            # touch the success filename
+            hookenv.log("policy.d overrides installed.",
+                        level=POLICYD_LOG_LEVEL_DEFAULT)
+            set_policy_success_file()
+        return completed
diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f27aa6c93081bd0ef1d8b8d4a442aee45f6776c8
--- /dev/null
+++ b/charmhelpers/contrib/openstack/utils.py
@@ -0,0 +1,2420 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Common python helper functions used for OpenStack charms.
+from collections import OrderedDict, namedtuple
+from functools import wraps
+
+import subprocess
+import json
+import operator
+import os
+import sys
+import re
+import itertools
+import functools
+
+import six
+import traceback
+import uuid
+import yaml
+
+from charmhelpers import deprecate
+
+from charmhelpers.contrib.network import ip
+
+from charmhelpers.core import decorators, unitdata
+
+from charmhelpers.core.hookenv import (
+    WORKLOAD_STATES,
+    action_fail,
+    action_set,
+    config,
+    expected_peer_units,
+    expected_related_units,
+    log as juju_log,
+    charm_dir,
+    INFO,
+    ERROR,
+    metadata,
+    related_units,
+    relation_get,
+    relation_id,
+    relation_ids,
+    relation_set,
+    status_set,
+    hook_name,
+    application_version_set,
+    cached,
+    leader_set,
+    leader_get,
+    local_unit,
+)
+
+from charmhelpers.core.strutils import (
+    BasicStringComparator,
+    bool_from_string,
+)
+
+from charmhelpers.contrib.storage.linux.lvm import (
+    deactivate_lvm_volume_group,
+    is_lvm_physical_volume,
+    remove_lvm_physical_volume,
+)
+
+from charmhelpers.contrib.network.ip import (
+    get_ipv6_addr,
+    is_ipv6,
+    port_has_listener,
+)
+
+from charmhelpers.core.host import (
+    lsb_release,
+    mounts,
+    umount,
+    service_running,
+    service_pause,
+    service_resume,
+    service_stop,
+    service_start,
+    restart_on_change_helper,
+)
+
+from charmhelpers.fetch import (
+    apt_cache,
+    apt_install,
+    import_key as fetch_import_key,
+    add_source as fetch_add_source,
+    SourceConfigError,
+    GPGKeyError,
+    get_upstream_version,
+    filter_installed_packages,
+    filter_missing_packages,
+    ubuntu_apt_pkg as apt,
+)
+
+from charmhelpers.fetch.snap import (
+    snap_install,
+    snap_refresh,
+    valid_snap_channel,
+)
+
+from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
+from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
+from charmhelpers.contrib.openstack.exceptions import OSContextError
+from charmhelpers.contrib.openstack.policyd import (
+    policyd_status_message_prefix,
+    POLICYD_CONFIG_NAME,
+)
+
+from charmhelpers.contrib.openstack.ha.utils import (
+    expect_ha,
+)
+
+CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
+CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
+
+DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
+                   'restricted main multiverse universe')
+
+OPENSTACK_RELEASES = (
+    'diablo',
+    'essex',
+    'folsom',
+    'grizzly',
+    'havana',
+    'icehouse',
+    'juno',
+    'kilo',
+    'liberty',
+    'mitaka',
+    'newton',
+    'ocata',
+    'pike',
+    'queens',
+    'rocky',
+    'stein',
+    'train',
+    'ussuri',
+    'victoria',
+)
+
+UBUNTU_OPENSTACK_RELEASE = OrderedDict([
+    ('oneiric', 'diablo'),
+    ('precise', 'essex'),
+    ('quantal', 'folsom'),
+    ('raring', 'grizzly'),
+    ('saucy', 'havana'),
+    ('trusty', 'icehouse'),
+    ('utopic', 'juno'),
+    ('vivid', 'kilo'),
+    ('wily', 'liberty'),
+    ('xenial', 'mitaka'),
+    ('yakkety', 'newton'),
+    ('zesty', 'ocata'),
+    ('artful', 'pike'),
+    ('bionic', 'queens'),
+    ('cosmic', 'rocky'),
+    ('disco', 'stein'),
+    ('eoan', 'train'),
+    ('focal', 'ussuri'),
+    ('groovy', 'victoria'),
+])
+
+
+OPENSTACK_CODENAMES = OrderedDict([
+    ('2011.2', 'diablo'),
+    ('2012.1', 'essex'),
+    ('2012.2', 'folsom'),
+    ('2013.1', 'grizzly'),
+    ('2013.2', 'havana'),
+    ('2014.1', 'icehouse'),
+    ('2014.2', 'juno'),
+    ('2015.1', 'kilo'),
+    ('2015.2', 'liberty'),
+    ('2016.1', 'mitaka'),
+    ('2016.2', 'newton'),
+    ('2017.1', 'ocata'),
+    ('2017.2', 'pike'),
+    ('2018.1', 'queens'),
+    ('2018.2', 'rocky'),
+    ('2019.1', 'stein'),
+    ('2019.2', 'train'),
+    ('2020.1', 'ussuri'),
+    ('2020.2', 'victoria'),
+])
+
+# The ugly duckling - must list releases oldest to newest
+SWIFT_CODENAMES = OrderedDict([
+    ('diablo',
+        ['1.4.3']),
+    ('essex',
+        ['1.4.8']),
+    ('folsom',
+        ['1.7.4']),
+    ('grizzly',
+        ['1.7.6', '1.7.7', '1.8.0']),
+    ('havana',
+        ['1.9.0', '1.9.1', '1.10.0']),
+    ('icehouse',
+        ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
+    ('juno',
+        ['2.0.0', '2.1.0', '2.2.0']),
+    ('kilo',
+        ['2.2.1', '2.2.2']),
+    ('liberty',
+        ['2.3.0', '2.4.0', '2.5.0']),
+    ('mitaka',
+        ['2.5.0', '2.6.0', '2.7.0']),
+    ('newton',
+        ['2.8.0', '2.9.0', '2.10.0']),
+    ('ocata',
+        ['2.11.0', '2.12.0', '2.13.0']),
+    ('pike',
+        ['2.13.0', '2.15.0']),
+    ('queens',
+        ['2.16.0', '2.17.0']),
+    ('rocky',
+        ['2.18.0', '2.19.0']),
+    ('stein',
+        ['2.20.0', '2.21.0']),
+    ('train',
+        ['2.22.0', '2.23.0']),
+    ('ussuri',
+        ['2.24.0', '2.25.0']),
+    ('victoria',
+        ['2.25.0', '2.26.0']),
+])
+
+# >= Liberty version->codename mapping
+PACKAGE_CODENAMES = {
+    'nova-common': OrderedDict([
+        ('12', 'liberty'),
+        ('13', 'mitaka'),
+        ('14', 'newton'),
+        ('15', 'ocata'),
+        ('16', 'pike'),
+        ('17', 'queens'),
+        ('18', 'rocky'),
+        ('19', 'stein'),
+        ('20', 'train'),
+        ('21', 'ussuri'),
+        ('22', 'victoria'),
+    ]),
+    'neutron-common': OrderedDict([
+        ('7', 'liberty'),
+        ('8', 'mitaka'),
+        ('9', 'newton'),
+        ('10', 'ocata'),
+        ('11', 'pike'),
+        ('12', 'queens'),
+        ('13', 'rocky'),
+        ('14', 'stein'),
+        ('15', 'train'),
+        ('16', 'ussuri'),
+        ('17', 'victoria'),
+    ]),
+    'cinder-common': OrderedDict([
+        ('7', 'liberty'),
+        ('8', 'mitaka'),
+        ('9', 'newton'),
+        ('10', 'ocata'),
+        ('11', 'pike'),
+        ('12', 'queens'),
+        ('13', 'rocky'),
+        ('14', 'stein'),
+        ('15', 'train'),
+        ('16', 'ussuri'),
+        ('17', 'victoria'),
+    ]),
+    'keystone': OrderedDict([
+        ('8', 'liberty'),
+        ('9', 'mitaka'),
+        ('10', 'newton'),
+        ('11', 'ocata'),
+        ('12', 'pike'),
+        ('13', 'queens'),
+        ('14', 'rocky'),
+        ('15', 'stein'),
+        ('16', 'train'),
+        ('17', 'ussuri'),
+        ('18', 'victoria'),
+    ]),
+    'horizon-common': OrderedDict([
+        ('8', 'liberty'),
+        ('9', 'mitaka'),
+        ('10', 'newton'),
+        ('11', 'ocata'),
+        ('12', 'pike'),
+        ('13', 'queens'),
+        ('14', 'rocky'),
+        ('15', 'stein'),
+        ('16', 'train'),
+        ('18', 'ussuri'),
+        ('19', 'victoria'),
+    ]),
+    'ceilometer-common': OrderedDict([
+        ('5', 'liberty'),
+        ('6', 'mitaka'),
+        ('7', 'newton'),
+        ('8', 'ocata'),
+        ('9', 'pike'),
+        ('10', 'queens'),
+        ('11', 'rocky'),
+        ('12', 'stein'),
+        ('13', 'train'),
+        ('14', 'ussuri'),
+        ('15', 'victoria'),
+    ]),
+    'heat-common': OrderedDict([
+        ('5', 'liberty'),
+        ('6', 'mitaka'),
+        ('7', 'newton'),
+        ('8', 'ocata'),
+        ('9', 'pike'),
+        ('10', 'queens'),
+        ('11', 'rocky'),
+        ('12', 'stein'),
+        ('13', 'train'),
+        ('14', 'ussuri'),
+        ('15', 'victoria'),
+    ]),
+    'glance-common': OrderedDict([
+        ('11', 'liberty'),
+        ('12', 'mitaka'),
+        ('13', 'newton'),
+        ('14', 'ocata'),
+        ('15', 'pike'),
+        ('16', 'queens'),
+        ('17', 'rocky'),
+        ('18', 'stein'),
+        ('19', 'train'),
+        ('20', 'ussuri'),
+        ('21', 'victoria'),
+    ]),
+    'openstack-dashboard': OrderedDict([
+        ('8', 'liberty'),
+        ('9', 'mitaka'),
+        ('10', 'newton'),
+        ('11', 'ocata'),
+        ('12', 'pike'),
+        ('13', 'queens'),
+        ('14', 'rocky'),
+        ('15', 'stein'),
+        ('16', 'train'),
+        ('18', 'ussuri'),
+        ('19', 'victoria'),
+    ]),
+}
+
+DEFAULT_LOOPBACK_SIZE = '5G'
+
+DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading'
+
+DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY]
+
+
+class CompareOpenStackReleases(BasicStringComparator):
+    """Provide comparisons of OpenStack releases.
+
+    Use in the form of
+
+    if CompareOpenStackReleases(release) > 'mitaka':
+        # do something with mitaka
+    """
+    _list = OPENSTACK_RELEASES
+
+
+def error_out(msg):
+    juju_log("FATAL ERROR: %s" % msg, level='ERROR')
+    sys.exit(1)
+
+
+def get_installed_semantic_versioned_packages():
+    '''Get a list of installed packages which have OpenStack semantic versioning
+
+    :returns List of installed packages
+    :rtype: [pkg1, pkg2, ...]
+    '''
+    return filter_missing_packages(PACKAGE_CODENAMES.keys())
+
+
+def get_os_codename_install_source(src):
+    '''Derive OpenStack release codename from a given installation source.'''
+    ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+    rel = ''
+    if src is None:
+        return rel
+    if src in ['distro', 'distro-proposed', 'proposed']:
+        try:
+            rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
+        except KeyError:
+            e = 'Could not derive openstack release for '\
+                'this Ubuntu release: %s' % ubuntu_rel
+            error_out(e)
+        return rel
+
+    if src.startswith('cloud:'):
+        ca_rel = src.split(':')[1]
+        ca_rel = ca_rel.split('-')[1].split('/')[0]
+        return ca_rel
+
+    # Best guess match based on deb string provided
+    if (src.startswith('deb') or
+            src.startswith('ppa') or
+            src.startswith('snap')):
+        for v in OPENSTACK_CODENAMES.values():
+            if v in src:
+                return v
+
+
+def get_os_version_install_source(src):
+    codename = get_os_codename_install_source(src)
+    return get_os_version_codename(codename)
+
+
+def get_os_codename_version(vers):
+    '''Determine OpenStack codename from version number.'''
+    try:
+        return OPENSTACK_CODENAMES[vers]
+    except KeyError:
+        e = 'Could not determine OpenStack codename for version %s' % vers
+        error_out(e)
+
+
+def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
+    '''Determine OpenStack version number from codename.'''
+    for k, v in six.iteritems(version_map):
+        if v == codename:
+            return k
+    e = 'Could not derive OpenStack version for '\
+        'codename: %s' % codename
+    error_out(e)
+
+
+def get_os_version_codename_swift(codename):
+    '''Determine OpenStack version number of swift from codename.'''
+    for k, v in six.iteritems(SWIFT_CODENAMES):
+        if k == codename:
+            return v[-1]
+    e = 'Could not derive swift version for '\
+        'codename: %s' % codename
+    error_out(e)
+
+
+def get_swift_codename(version):
+    '''Determine OpenStack codename that corresponds to swift version.'''
+    codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
+
+    if len(codenames) > 1:
+        # If more than one release codename contains this version we determine
+        # the actual codename based on the highest available install source.
+        for codename in reversed(codenames):
+            releases = UBUNTU_OPENSTACK_RELEASE
+            release = [k for k, v in six.iteritems(releases) if codename in v]
+            ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
+            if six.PY3:
+                ret = ret.decode('UTF-8')
+            if codename in ret or release[0] in ret:
+                return codename
+    elif len(codenames) == 1:
+        return codenames[0]
+
+    # NOTE: fallback - attempt to match with just major.minor version
+    match = re.match(r'^(\d+)\.(\d+)', version)
+    if match:
+        major_minor_version = match.group(0)
+        for codename, versions in six.iteritems(SWIFT_CODENAMES):
+            for release_version in versions:
+                if release_version.startswith(major_minor_version):
+                    return codename
+
+    return None
+
+
+@deprecate("moved to charmhelpers.contrib.openstack.utils.get_installed_os_version()", "2021-01", log=juju_log)
+def get_os_codename_package(package, fatal=True):
+    '''Derive OpenStack release codename from an installed package.'''
+
+    codename = get_installed_os_version()
+    if codename:
+        return codename
+
+    if snap_install_requested():
+        cmd = ['snap', 'list', package]
+        try:
+            out = subprocess.check_output(cmd)
+            if six.PY3:
+                out = out.decode('UTF-8')
+        except subprocess.CalledProcessError:
+            return None
+        lines = out.split('\n')
+        for line in lines:
+            if package in line:
+                # Second item in list is Version
+                return line.split()[1]
+
+    cache = apt_cache()
+
+    try:
+        pkg = cache[package]
+    except Exception:
+        if not fatal:
+            return None
+        # the package is unknown to the current apt cache.
+        e = 'Could not determine version of package with no installation '\
+            'candidate: %s' % package
+        error_out(e)
+
+    if not pkg.current_ver:
+        if not fatal:
+            return None
+        # package is known, but no version is currently installed.
+        e = 'Could not determine version of uninstalled package: %s' % package
+        error_out(e)
+
+    vers = apt.upstream_version(pkg.current_ver.ver_str)
+    if 'swift' in pkg.name:
+        # Fully x.y.z match for swift versions
+        match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers)
+    else:
+        # x.y match only for 20XX.X
+        # and ignore patch level for other packages
+        match = re.match(r'^(\d+)\.(\d+)', vers)
+
+    if match:
+        vers = match.group(0)
+
+    # Generate a major version number for newer semantic
+    # versions of openstack projects
+    major_vers = vers.split('.')[0]
+    # >= Liberty independent project versions
+    if (package in PACKAGE_CODENAMES and
+            major_vers in PACKAGE_CODENAMES[package]):
+        return PACKAGE_CODENAMES[package][major_vers]
+    else:
+        # < Liberty co-ordinated project versions
+        try:
+            if 'swift' in pkg.name:
+                return get_swift_codename(vers)
+            else:
+                return OPENSTACK_CODENAMES[vers]
+        except KeyError:
+            if not fatal:
+                return None
+            e = 'Could not determine OpenStack codename for version %s' % vers
+            error_out(e)
+
+
+def get_os_version_package(pkg, fatal=True):
+    '''Derive OpenStack version number from an installed package.'''
+    codename = get_os_codename_package(pkg, fatal=fatal)
+
+    if not codename:
+        return None
+
+    if 'swift' in pkg:
+        vers_map = SWIFT_CODENAMES
+        for cname, version in six.iteritems(vers_map):
+            if cname == codename:
+                return version[-1]
+    else:
+        vers_map = OPENSTACK_CODENAMES
+        for version, cname in six.iteritems(vers_map):
+            if cname == codename:
+                return version
+    # e = "Could not determine OpenStack version for package: %s" % pkg
+    # error_out(e)
+
+
+def get_installed_os_version():
+    apt_install(filter_installed_packages(['openstack-release']), fatal=False)
+    print("OpenStack Release: {}".format(openstack_release()))
+    return openstack_release().get('OPENSTACK_CODENAME')
+
+
+@cached
+def openstack_release():
+    """Return /etc/os-release in a dict."""
+    d = {}
+    try:
+        with open('/etc/openstack-release', 'r') as lsb:
+            for l in lsb:
+                s = l.split('=')
+                if len(s) != 2:
+                    continue
+                d[s[0].strip()] = s[1].strip()
+    except FileNotFoundError:
+        pass
+    return d
+
+
+# Module local cache variable for the os_release.
+_os_rel = None
+
+
+def reset_os_release():
+    '''Unset the cached os_release version'''
+    global _os_rel
+    _os_rel = None
+
+
+def os_release(package, base=None, reset_cache=False, source_key=None):
+    """Returns OpenStack release codename from a cached global.
+
+    If reset_cache then unset the cached os_release version and return the
+    freshly determined version.
+
+    If the codename can not be determined from either an installed package or
+    the installation source, the earliest release supported by the charm should
+    be returned.
+
+    :param package: Name of package to determine release from
+    :type package: str
+    :param base: Fallback codename if endavours to determine from package fail
+    :type base: Optional[str]
+    :param reset_cache: Reset any cached codename value
+    :type reset_cache: bool
+    :param source_key: Name of source configuration option
+                       (default: 'openstack-origin')
+    :type source_key: Optional[str]
+    :returns: OpenStack release codename
+    :rtype: str
+    """
+    source_key = source_key or 'openstack-origin'
+    if not base:
+        base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']]
+    global _os_rel
+    if reset_cache:
+        reset_os_release()
+    if _os_rel:
+        return _os_rel
+    _os_rel = (
+        get_os_codename_package(package, fatal=False) or
+        get_os_codename_install_source(config(source_key)) or
+        base)
+    return _os_rel
+
+
+@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log)
+def import_key(keyid):
+    """Import a key, either ASCII armored, or a GPG key id.
+
+    @param keyid: the key in ASCII armor format, or a GPG key id.
+    @raises SystemExit() via sys.exit() on failure.
+    """
+    try:
+        return fetch_import_key(keyid)
+    except GPGKeyError as e:
+        error_out("Could not import key: {}".format(str(e)))
+
+
+def get_source_and_pgp_key(source_and_key):
+    """Look for a pgp key ID or ascii-armor key in the given input.
+
+    :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is
+        optional.
+    :returns (source_spec, key_id OR None) as a tuple.  Returns None for key_id
+        if there was no '|' in the source_and_key string.
+    """
+    try:
+        source, key = source_and_key.split('|', 2)
+        return source, key or None
+    except ValueError:
+        return source_and_key, None
+
+
+@deprecate("use charmhelpers.fetch.add_source() instead.",
+           "2017-07", log=juju_log)
+def configure_installation_source(source_plus_key):
+    """Configure an installation source.
+
+    The functionality is provided by charmhelpers.fetch.add_source()
+    The difference between the two functions is that add_source() signature
+    requires the key to be passed directly, whereas this function passes an
+    optional key by appending '|<key>' to the end of the source specificiation
+    'source'.
+
+    Another difference from add_source() is that the function calls sys.exit(1)
+    if the configuration fails, whereas add_source() raises
+    SourceConfigurationError().  Another difference, is that add_source()
+    silently fails (with a juju_log command) if there is no matching source to
+    configure, whereas this function fails with a sys.exit(1)
+
+    :param source: String_plus_key -- see above for details.
+
+    Note that the behaviour on error is to log the error to the juju log and
+    then call sys.exit(1).
+    """
+    if source_plus_key.startswith('snap'):
+        # Do nothing for snap installs
+        return
+    # extract the key if there is one, denoted by a '|' in the rel
+    source, key = get_source_and_pgp_key(source_plus_key)
+
+    # handle the ordinary sources via add_source
+    try:
+        fetch_add_source(source, key, fail_invalid=True)
+    except SourceConfigError as se:
+        error_out(str(se))
+
+
+def config_value_changed(option):
+    """
+    Determine if config value changed since last call to this function.
+    """
+    hook_data = unitdata.HookData()
+    with hook_data():
+        db = unitdata.kv()
+        current = config(option)
+        saved = db.get(option)
+        db.set(option, current)
+        if saved is None:
+            return False
+        return current != saved
+
+
+def get_endpoint_key(service_name, relation_id, unit_name):
+    """Return the key used to refer to an ep changed notification from a unit.
+
+    :param service_name: Service name eg nova, neutron, placement etc
+    :type service_name: str
+    :param relation_id: The id of the relation the unit is on.
+    :type relation_id: str
+    :param unit_name: The name of the unit publishing the notification.
+    :type unit_name: str
+    :returns: The key used to refer to an ep changed notification from a unit
+    :rtype: str
+    """
+    return '{}-{}-{}'.format(
+        service_name,
+        relation_id.replace(':', '_'),
+        unit_name.replace('/', '_'))
+
+
+def get_endpoint_notifications(service_names, rel_name='identity-service'):
+    """Return all notifications for the given services.
+
+    :param service_names: List of service name.
+    :type service_name: List
+    :param rel_name: Name of the relation to query
+    :type rel_name: str
+    :returns: A dict containing the source of the notification and its nonce.
+    :rtype: Dict[str, str]
+    """
+    notifications = {}
+    for rid in relation_ids(rel_name):
+        for unit in related_units(relid=rid):
+            ep_changed_json = relation_get(
+                rid=rid,
+                unit=unit,
+                attribute='ep_changed')
+            if ep_changed_json:
+                ep_changed = json.loads(ep_changed_json)
+                for service in service_names:
+                    if ep_changed.get(service):
+                        key = get_endpoint_key(service, rid, unit)
+                        notifications[key] = ep_changed[service]
+    return notifications
+
+
+def endpoint_changed(service_name, rel_name='identity-service'):
+    """Whether a new notification has been recieved for an endpoint.
+
+    :param service_name: Service name eg nova, neutron, placement etc
+    :type service_name: str
+    :param rel_name: Name of the relation to query
+    :type rel_name: str
+    :returns: Whether endpoint has changed
+    :rtype: bool
+    """
+    changed = False
+    with unitdata.HookData()() as t:
+        db = t[0]
+        notifications = get_endpoint_notifications(
+            [service_name],
+            rel_name=rel_name)
+        for key, nonce in notifications.items():
+            if db.get(key) != nonce:
+                juju_log(('New endpoint change notification found: '
+                          '{}={}').format(key, nonce),
+                         'INFO')
+                changed = True
+                break
+    return changed
+
+
+def save_endpoint_changed_triggers(service_names, rel_name='identity-service'):
+    """Save the enpoint triggers in  db so it can be tracked if they changed.
+
+    :param service_names: List of service name.
+    :type service_name: List
+    :param rel_name: Name of the relation to query
+    :type rel_name: str
+    """
+    with unitdata.HookData()() as t:
+        db = t[0]
+        notifications = get_endpoint_notifications(
+            service_names,
+            rel_name=rel_name)
+        for key, nonce in notifications.items():
+            db.set(key, nonce)
+
+
+def save_script_rc(script_path="scripts/scriptrc", **env_vars):
+    """
+    Write an rc file in the charm-delivered directory containing
+    exported environment variables provided by env_vars. Any charm scripts run
+    outside the juju hook environment can source this scriptrc to obtain
+    updated config information necessary to perform health checks or
+    service changes.
+    """
+    juju_rc_path = "%s/%s" % (charm_dir(), script_path)
+    if not os.path.exists(os.path.dirname(juju_rc_path)):
+        os.mkdir(os.path.dirname(juju_rc_path))
+    with open(juju_rc_path, 'wt') as rc_script:
+        rc_script.write(
+            "#!/bin/bash\n")
+        [rc_script.write('export %s=%s\n' % (u, p))
+         for u, p in six.iteritems(env_vars) if u != "script_path"]
+
+
+def openstack_upgrade_available(package):
+    """
+    Determines if an OpenStack upgrade is available from installation
+    source, based on version of installed package.
+
+    :param package: str: Name of installed package.
+
+    :returns: bool:    : Returns True if configured installation source offers
+                         a newer version of package.
+    """
+
+    src = config('openstack-origin')
+    cur_vers = get_os_version_package(package)
+    if not cur_vers:
+        # The package has not been installed yet do not attempt upgrade
+        return False
+    if "swift" in package:
+        codename = get_os_codename_install_source(src)
+        avail_vers = get_os_version_codename_swift(codename)
+    else:
+        try:
+            avail_vers = get_os_version_install_source(src)
+        except Exception:
+            avail_vers = cur_vers
+    apt.init()
+    return apt.version_compare(avail_vers, cur_vers) >= 1
+
+
+def ensure_block_device(block_device):
+    '''
+    Confirm block_device, create as loopback if necessary.
+
+    :param block_device: str: Full path of block device to ensure.
+
+    :returns: str: Full path of ensured block device.
+    '''
+    _none = ['None', 'none', None]
+    if (block_device in _none):
+        error_out('prepare_storage(): Missing required input: block_device=%s.'
+                  % block_device)
+
+    if block_device.startswith('/dev/'):
+        bdev = block_device
+    elif block_device.startswith('/'):
+        _bd = block_device.split('|')
+        if len(_bd) == 2:
+            bdev, size = _bd
+        else:
+            bdev = block_device
+            size = DEFAULT_LOOPBACK_SIZE
+        bdev = ensure_loopback_device(bdev, size)
+    else:
+        bdev = '/dev/%s' % block_device
+
+    if not is_block_device(bdev):
+        error_out('Failed to locate valid block device at %s' % bdev)
+
+    return bdev
+
+
+def clean_storage(block_device):
+    '''
+    Ensures a block device is clean.  That is:
+        - unmounted
+        - any lvm volume groups are deactivated
+        - any lvm physical device signatures removed
+        - partition table wiped
+
+    :param block_device: str: Full path to block device to clean.
+    '''
+    for mp, d in mounts():
+        if d == block_device:
+            juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
+                     (d, mp), level=INFO)
+            umount(mp, persist=True)
+
+    if is_lvm_physical_volume(block_device):
+        deactivate_lvm_volume_group(block_device)
+        remove_lvm_physical_volume(block_device)
+    else:
+        zap_disk(block_device)
+
+
+is_ip = ip.is_ip
+ns_query = ip.ns_query
+get_host_ip = ip.get_host_ip
+get_hostname = ip.get_hostname
+
+
+def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
+    mm_map = {}
+    if os.path.isfile(mm_file):
+        with open(mm_file, 'r') as f:
+            mm_map = json.load(f)
+    return mm_map
+
+
+def sync_db_with_multi_ipv6_addresses(database, database_user,
+                                      relation_prefix=None):
+    hosts = get_ipv6_addr(dynamic_only=False)
+
+    if config('vip'):
+        vips = config('vip').split()
+        for vip in vips:
+            if vip and is_ipv6(vip):
+                hosts.append(vip)
+
+    kwargs = {'database': database,
+              'username': database_user,
+              'hostname': json.dumps(hosts)}
+
+    if relation_prefix:
+        for key in list(kwargs.keys()):
+            kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
+            del kwargs[key]
+
+    for rid in relation_ids('shared-db'):
+        relation_set(relation_id=rid, **kwargs)
+
+
+def os_requires_version(ostack_release, pkg):
+    """
+    Decorator for hook to specify minimum supported release
+    """
+    def wrap(f):
+        @wraps(f)
+        def wrapped_f(*args):
+            if os_release(pkg) < ostack_release:
+                raise Exception("This hook is not supported on releases"
+                                " before %s" % ostack_release)
+            f(*args)
+        return wrapped_f
+    return wrap
+
+
+def os_workload_status(configs, required_interfaces, charm_func=None):
+    """
+    Decorator to set workload status based on complete contexts
+    """
+    def wrap(f):
+        @wraps(f)
+        def wrapped_f(*args, **kwargs):
+            # Run the original function first
+            f(*args, **kwargs)
+            # Set workload status now that contexts have been
+            # acted on
+            set_os_workload_status(configs, required_interfaces, charm_func)
+        return wrapped_f
+    return wrap
+
+
+def set_os_workload_status(configs, required_interfaces, charm_func=None,
+                           services=None, ports=None):
+    """Set the state of the workload status for the charm.
+
+    This calls _determine_os_workload_status() to get the new state, message
+    and sets the status using status_set()
+
+    @param configs: a templating.OSConfigRenderer() object
+    @param required_interfaces: {generic: [specific, specific2, ...]}
+    @param charm_func: a callable function that returns state, message. The
+                       signature is charm_func(configs) -> (state, message)
+    @param services: list of strings OR dictionary specifying services/ports
+    @param ports: OPTIONAL list of port numbers.
+    @returns state, message: the new workload status, user message
+    """
+    state, message = _determine_os_workload_status(
+        configs, required_interfaces, charm_func, services, ports)
+    status_set(state, message)
+
+
+def _determine_os_workload_status(
+        configs, required_interfaces, charm_func=None,
+        services=None, ports=None):
+    """Determine the state of the workload status for the charm.
+
+    This function returns the new workload status for the charm based
+    on the state of the interfaces, the paused state and whether the
+    services are actually running and any specified ports are open.
+
+    This checks:
+
+     1. if the unit should be paused, that it is actually paused.  If so the
+        state is 'maintenance' + message, else 'broken'.
+     2. that the interfaces/relations are complete.  If they are not then
+        it sets the state to either 'broken' or 'waiting' and an appropriate
+        message.
+     3. If all the relation data is set, then it checks that the actual
+        services really are running.  If not it sets the state to 'broken'.
+
+    If everything is okay then the state returns 'active'.
+
+    @param configs: a templating.OSConfigRenderer() object
+    @param required_interfaces: {generic: [specific, specific2, ...]}
+    @param charm_func: a callable function that returns state, message. The
+                       signature is charm_func(configs) -> (state, message)
+    @param services: list of strings OR dictionary specifying services/ports
+    @param ports: OPTIONAL list of port numbers.
+    @returns state, message: the new workload status, user message
+    """
+    state, message = _ows_check_if_paused(services, ports)
+
+    if state is None:
+        state, message = _ows_check_generic_interfaces(
+            configs, required_interfaces)
+
+    if state != 'maintenance' and charm_func:
+        # _ows_check_charm_func() may modify the state, message
+        state, message = _ows_check_charm_func(
+            state, message, lambda: charm_func(configs))
+
+    if state is None:
+        state, message = _ows_check_services_running(services, ports)
+
+    if state is None:
+        state = 'active'
+        message = "Unit is ready"
+        juju_log(message, 'INFO')
+
+    try:
+        if config(POLICYD_CONFIG_NAME):
+            message = "{} {}".format(policyd_status_message_prefix(), message)
+    except Exception:
+        pass
+
+    return state, message
+
+
+def _ows_check_if_paused(services=None, ports=None):
+    """Check if the unit is supposed to be paused, and if so check that the
+    services/ports (if passed) are actually stopped/not being listened to.
+
+    If the unit isn't supposed to be paused, just return None, None
+
+    If the unit is performing a series upgrade, return a message indicating
+    this.
+
+    @param services: OPTIONAL services spec or list of service names.
+    @param ports: OPTIONAL list of port numbers.
+    @returns state, message or None, None
+    """
+    if is_unit_upgrading_set():
+        state, message = check_actually_paused(services=services,
+                                               ports=ports)
+        if state is None:
+            # we're paused okay, so set maintenance and return
+            state = "blocked"
+            message = ("Ready for do-release-upgrade and reboot. "
+                       "Set complete when finished.")
+        return state, message
+
+    if is_unit_paused_set():
+        state, message = check_actually_paused(services=services,
+                                               ports=ports)
+        if state is None:
+            # we're paused okay, so set maintenance and return
+            state = "maintenance"
+            message = "Paused. Use 'resume' action to resume normal service."
+        return state, message
+    return None, None
+
+
+def _ows_check_generic_interfaces(configs, required_interfaces):
+    """Check the complete contexts to determine the workload status.
+
+     - Checks for missing or incomplete contexts
+     - juju log details of missing required data.
+     - determines the correct workload status
+     - creates an appropriate message for status_set(...)
+
+    if there are no problems then the function returns None, None
+
+    @param configs: a templating.OSConfigRenderer() object
+    @params required_interfaces: {generic_interface: [specific_interface], }
+    @returns state, message or None, None
+    """
+    incomplete_rel_data = incomplete_relation_data(configs,
+                                                   required_interfaces)
+    state = None
+    message = None
+    missing_relations = set()
+    incomplete_relations = set()
+
+    for generic_interface, relations_states in incomplete_rel_data.items():
+        related_interface = None
+        missing_data = {}
+        # Related or not?
+        for interface, relation_state in relations_states.items():
+            if relation_state.get('related'):
+                related_interface = interface
+                missing_data = relation_state.get('missing_data')
+                break
+        # No relation ID for the generic_interface?
+        if not related_interface:
+            juju_log("{} relation is missing and must be related for "
+                     "functionality. ".format(generic_interface), 'WARN')
+            state = 'blocked'
+            missing_relations.add(generic_interface)
+        else:
+            # Relation ID eists but no related unit
+            if not missing_data:
+                # Edge case - relation ID exists but departings
+                _hook_name = hook_name()
+                if (('departed' in _hook_name or 'broken' in _hook_name) and
+                        related_interface in _hook_name):
+                    state = 'blocked'
+                    missing_relations.add(generic_interface)
+                    juju_log("{} relation's interface, {}, "
+                             "relationship is departed or broken "
+                             "and is required for functionality."
+                             "".format(generic_interface, related_interface),
+                             "WARN")
+                # Normal case relation ID exists but no related unit
+                # (joining)
+                else:
+                    juju_log("{} relations's interface, {}, is related but has"
+                             " no units in the relation."
+                             "".format(generic_interface, related_interface),
+                             "INFO")
+            # Related unit exists and data missing on the relation
+            else:
+                juju_log("{} relation's interface, {}, is related awaiting "
+                         "the following data from the relationship: {}. "
+                         "".format(generic_interface, related_interface,
+                                   ", ".join(missing_data)), "INFO")
+            if state != 'blocked':
+                state = 'waiting'
+            if generic_interface not in missing_relations:
+                incomplete_relations.add(generic_interface)
+
+    if missing_relations:
+        message = "Missing relations: {}".format(", ".join(missing_relations))
+        if incomplete_relations:
+            message += "; incomplete relations: {}" \
+                       "".format(", ".join(incomplete_relations))
+        state = 'blocked'
+    elif incomplete_relations:
+        message = "Incomplete relations: {}" \
+                  "".format(", ".join(incomplete_relations))
+        state = 'waiting'
+
+    return state, message
+
+
+def _ows_check_charm_func(state, message, charm_func_with_configs):
+    """Run a custom check function for the charm to see if it wants to
+    change the state.  This is only run if not in 'maintenance' and
+    tests to see if the new state is more important that the previous
+    one determined by the interfaces/relations check.
+
+    @param state: the previously determined state so far.
+    @param message: the user orientated message so far.
+    @param charm_func: a callable function that returns state, message
+    @returns state, message strings.
+    """
+    if charm_func_with_configs:
+        charm_state, charm_message = charm_func_with_configs()
+        if (charm_state != 'active' and
+                charm_state != 'unknown' and
+                charm_state is not None):
+            state = workload_state_compare(state, charm_state)
+            if message:
+                charm_message = charm_message.replace("Incomplete relations: ",
+                                                      "")
+                message = "{}, {}".format(message, charm_message)
+            else:
+                message = charm_message
+    return state, message
+
+
+def _ows_check_services_running(services, ports):
+    """Check that the services that should be running are actually running
+    and that any ports specified are being listened to.
+
+    @param services: list of strings OR dictionary specifying services/ports
+    @param ports: list of ports
+    @returns state, message: strings or None, None
+    """
+    messages = []
+    state = None
+    if services is not None:
+        services = _extract_services_list_helper(services)
+        services_running, running = _check_running_services(services)
+        if not all(running):
+            messages.append(
+                "Services not running that should be: {}"
+                .format(", ".join(_filter_tuples(services_running, False))))
+            state = 'blocked'
+        # also verify that the ports that should be open are open
+        # NB, that ServiceManager objects only OPTIONALLY have ports
+        map_not_open, ports_open = (
+            _check_listening_on_services_ports(services))
+        if not all(ports_open):
+            # find which service has missing ports. They are in service
+            # order which makes it a bit easier.
+            message_parts = {service: ", ".join([str(v) for v in open_ports])
+                             for service, open_ports in map_not_open.items()}
+            message = ", ".join(
+                ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
+            messages.append(
+                "Services with ports not open that should be: {}"
+                .format(message))
+            state = 'blocked'
+
+    if ports is not None:
+        # and we can also check ports which we don't know the service for
+        ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
+        if not all(ports_open_bools):
+            messages.append(
+                "Ports which should be open, but are not: {}"
+                .format(", ".join([str(p) for p, v in ports_open
+                                   if not v])))
+            state = 'blocked'
+
+    if state is not None:
+        message = "; ".join(messages)
+        return state, message
+
+    return None, None
+
+
+def _extract_services_list_helper(services):
+    """Extract a OrderedDict of {service: [ports]} of the supplied services
+    for use by the other functions.
+
+    The services object can either be:
+      - None : no services were passed (an empty dict is returned)
+      - a list of strings
+      - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+      - An array of [{'service': service_name, ...}, ...]
+
+    @param services: see above
+    @returns OrderedDict(service: [ports], ...)
+    """
+    if services is None:
+        return {}
+    if isinstance(services, dict):
+        services = services.values()
+    # either extract the list of services from the dictionary, or if
+    # it is a simple string, use that. i.e. works with mixed lists.
+    _s = OrderedDict()
+    for s in services:
+        if isinstance(s, dict) and 'service' in s:
+            _s[s['service']] = s.get('ports', [])
+        if isinstance(s, str):
+            _s[s] = []
+    return _s
+
+
+def _check_running_services(services):
+    """Check that the services dict provided is actually running and provide
+    a list of (service, boolean) tuples for each service.
+
+    Returns both a zipped list of (service, boolean) and a list of booleans
+    in the same order as the services.
+
+    @param services: OrderedDict of strings: [ports], one for each service to
+                     check.
+    @returns [(service, boolean), ...], : results for checks
+             [boolean]                  : just the result of the service checks
+    """
+    services_running = [service_running(s) for s in services]
+    return list(zip(services, services_running)), services_running
+
+
+def _check_listening_on_services_ports(services, test=False):
+    """Check that the unit is actually listening (has the port open) on the
+    ports that the service specifies are open. If test is True then the
+    function returns the services with ports that are open rather than
+    closed.
+
+    Returns an OrderedDict of service: ports and a list of booleans
+
+    @param services: OrderedDict(service: [port, ...], ...)
+    @param test: default=False, if False, test for closed, otherwise open.
+    @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
+    """
+    test = not(not(test))  # ensure test is True or False
+    all_ports = list(itertools.chain(*services.values()))
+    ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
+    map_ports = OrderedDict()
+    matched_ports = [p for p, opened in zip(all_ports, ports_states)
+                     if opened == test]  # essentially opened xor test
+    for service, ports in services.items():
+        set_ports = set(ports).intersection(matched_ports)
+        if set_ports:
+            map_ports[service] = set_ports
+    return map_ports, ports_states
+
+
+def _check_listening_on_ports_list(ports):
+    """Check that the ports list given are being listened to
+
+    Returns a list of ports being listened to and a list of the
+    booleans.
+
+    @param ports: LIST of port numbers.
+    @returns [(port_num, boolean), ...], [boolean]
+    """
+    ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
+    return zip(ports, ports_open), ports_open
+
+
+def _filter_tuples(services_states, state):
+    """Return a simple list from a list of tuples according to the condition
+
+    @param services_states: LIST of (string, boolean): service and running
+           state.
+    @param state: Boolean to match the tuple against.
+    @returns [LIST of strings] that matched the tuple RHS.
+    """
+    return [s for s, b in services_states if b == state]
+
+
+def workload_state_compare(current_workload_state, workload_state):
+    """ Return highest priority of two states"""
+    hierarchy = {'unknown': -1,
+                 'active': 0,
+                 'maintenance': 1,
+                 'waiting': 2,
+                 'blocked': 3,
+                 }
+
+    if hierarchy.get(workload_state) is None:
+        workload_state = 'unknown'
+    if hierarchy.get(current_workload_state) is None:
+        current_workload_state = 'unknown'
+
+    # Set workload_state based on hierarchy of statuses
+    if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
+        return current_workload_state
+    else:
+        return workload_state
+
+
+def incomplete_relation_data(configs, required_interfaces):
+    """Check complete contexts against required_interfaces
+    Return dictionary of incomplete relation data.
+
+    configs is an OSConfigRenderer object with configs registered
+
+    required_interfaces is a dictionary of required general interfaces
+    with dictionary values of possible specific interfaces.
+    Example:
+    required_interfaces = {'database': ['shared-db', 'pgsql-db']}
+
+    The interface is said to be satisfied if anyone of the interfaces in the
+    list has a complete context.
+
+    Return dictionary of incomplete or missing required contexts with relation
+    status of interfaces and any missing data points. Example:
+        {'message':
+             {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
+              'zeromq-configuration': {'related': False}},
+         'identity':
+             {'identity-service': {'related': False}},
+         'database':
+             {'pgsql-db': {'related': False},
+              'shared-db': {'related': True}}}
+    """
+    complete_ctxts = configs.complete_contexts()
+    incomplete_relations = [
+        svc_type
+        for svc_type, interfaces in required_interfaces.items()
+        if not set(interfaces).intersection(complete_ctxts)]
+    return {
+        i: configs.get_incomplete_context_data(required_interfaces[i])
+        for i in incomplete_relations}
+
+
+def do_action_openstack_upgrade(package, upgrade_callback, configs):
+    """Perform action-managed OpenStack upgrade.
+
+    Upgrades packages to the configured openstack-origin version and sets
+    the corresponding action status as a result.
+
+    If the charm was installed from source we cannot upgrade it.
+    For backwards compatibility a config flag (action-managed-upgrade) must
+    be set for this code to run, otherwise a full service level upgrade will
+    fire on config-changed.
+
+    @param package: package name for determining if upgrade available
+    @param upgrade_callback: function callback to charm's upgrade function
+    @param configs: templating object derived from OSConfigRenderer class
+
+    @return: True if upgrade successful; False if upgrade failed or skipped
+    """
+    ret = False
+
+    if openstack_upgrade_available(package):
+        if config('action-managed-upgrade'):
+            juju_log('Upgrading OpenStack release')
+
+            try:
+                upgrade_callback(configs=configs)
+                action_set({'outcome': 'success, upgrade completed.'})
+                ret = True
+            except Exception:
+                action_set({'outcome': 'upgrade failed, see traceback.'})
+                action_set({'traceback': traceback.format_exc()})
+                action_fail('do_openstack_upgrade resulted in an '
+                            'unexpected error')
+        else:
+            action_set({'outcome': 'action-managed-upgrade config is '
+                                   'False, skipped upgrade.'})
+    else:
+        action_set({'outcome': 'no upgrade available.'})
+
+    return ret
+
+
+def remote_restart(rel_name, remote_service=None):
+    trigger = {
+        'restart-trigger': str(uuid.uuid4()),
+    }
+    if remote_service:
+        trigger['remote-service'] = remote_service
+    for rid in relation_ids(rel_name):
+        # This subordinate can be related to two seperate services using
+        # different subordinate relations so only issue the restart if
+        # the principle is conencted down the relation we think it is
+        if related_units(relid=rid):
+            relation_set(relation_id=rid,
+                         relation_settings=trigger,
+                         )
+
+
+def check_actually_paused(services=None, ports=None):
+    """Check that services listed in the services object and ports
+    are actually closed (not listened to), to verify that the unit is
+    properly paused.
+
+    @param services: See _extract_services_list_helper
+    @returns status, : string for status (None if okay)
+             message : string for problem for status_set
+    """
+    state = None
+    message = None
+    messages = []
+    if services is not None:
+        services = _extract_services_list_helper(services)
+        services_running, services_states = _check_running_services(services)
+        if any(services_states):
+            # there shouldn't be any running so this is a problem
+            messages.append("these services running: {}"
+                            .format(", ".join(
+                                _filter_tuples(services_running, True))))
+            state = "blocked"
+        ports_open, ports_open_bools = (
+            _check_listening_on_services_ports(services, True))
+        if any(ports_open_bools):
+            message_parts = {service: ", ".join([str(v) for v in open_ports])
+                             for service, open_ports in ports_open.items()}
+            message = ", ".join(
+                ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
+            messages.append(
+                "these service:ports are open: {}".format(message))
+            state = 'blocked'
+    if ports is not None:
+        ports_open, bools = _check_listening_on_ports_list(ports)
+        if any(bools):
+            messages.append(
+                "these ports which should be closed, but are open: {}"
+                .format(", ".join([str(p) for p, v in ports_open if v])))
+            state = 'blocked'
+    if messages:
+        message = ("Services should be paused but {}"
+                   .format(", ".join(messages)))
+    return state, message
+
+
+def set_unit_paused():
+    """Set the unit to a paused state in the local kv() store.
+    This does NOT actually pause the unit
+    """
+    with unitdata.HookData()() as t:
+        kv = t[0]
+        kv.set('unit-paused', True)
+
+
+def clear_unit_paused():
+    """Clear the unit from a paused state in the local kv() store
+    This does NOT actually restart any services - it only clears the
+    local state.
+    """
+    with unitdata.HookData()() as t:
+        kv = t[0]
+        kv.set('unit-paused', False)
+
+
+def is_unit_paused_set():
+    """Return the state of the kv().get('unit-paused').
+    This does NOT verify that the unit really is paused.
+
+    To help with units that don't have HookData() (testing)
+    if it excepts, return False
+    """
+    try:
+        with unitdata.HookData()() as t:
+            kv = t[0]
+            # transform something truth-y into a Boolean.
+            return not(not(kv.get('unit-paused')))
+    except Exception:
+        return False
+
+
+def manage_payload_services(action, services=None, charm_func=None):
+    """Run an action against all services.
+
+    An optional charm_func() can be called. It should raise an Exception to
+    indicate that the function failed. If it was succesfull it should return
+    None or an optional message.
+
+    The signature for charm_func is:
+    charm_func() -> message: str
+
+    charm_func() is executed after any services are stopped, if supplied.
+
+    The services object can either be:
+      - None : no services were passed (an empty dict is returned)
+      - a list of strings
+      - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+      - An array of [{'service': service_name, ...}, ...]
+
+    :param action: Action to run: pause, resume, start or stop.
+    :type action: str
+    :param services: See above
+    :type services: See above
+    :param charm_func: function to run for custom charm pausing.
+    :type charm_func: f()
+    :returns: Status boolean and list of messages
+    :rtype: (bool, [])
+    :raises: RuntimeError
+    """
+    actions = {
+        'pause': service_pause,
+        'resume': service_resume,
+        'start': service_start,
+        'stop': service_stop}
+    action = action.lower()
+    if action not in actions.keys():
+        raise RuntimeError(
+            "action: {} must be one of: {}".format(action,
+                                                   ', '.join(actions.keys())))
+    services = _extract_services_list_helper(services)
+    messages = []
+    success = True
+    if services:
+        for service in services.keys():
+            rc = actions[action](service)
+            if not rc:
+                success = False
+                messages.append("{} didn't {} cleanly.".format(service,
+                                                               action))
+    if charm_func:
+        try:
+            message = charm_func()
+            if message:
+                messages.append(message)
+        except Exception as e:
+            success = False
+            messages.append(str(e))
+    return success, messages
+
+
+def make_wait_for_ports_barrier(ports, retry_count=5):
+    """Make a function to wait for port shutdowns.
+
+    Create a function which closes over the provided ports. The function will
+    retry probing ports until they are closed or the retry count has been reached.
+
+    """
+    @decorators.retry_on_predicate(retry_count, operator.not_, base_delay=0.1)
+    def retry_port_check():
+        _, ports_states = _check_listening_on_ports_list(ports)
+        juju_log("Probe ports {}, result: {}".format(ports, ports_states), level="DEBUG")
+        return any(ports_states)
+    return retry_port_check
+
+
+def pause_unit(assess_status_func, services=None, ports=None,
+               charm_func=None):
+    """Pause a unit by stopping the services and setting 'unit-paused'
+    in the local kv() store.
+
+    Also checks that the services have stopped and ports are no longer
+    being listened to.
+
+    An optional charm_func() can be called that can either raise an
+    Exception or return non None, None to indicate that the unit
+    didn't pause cleanly.
+
+    The signature for charm_func is:
+    charm_func() -> message: string
+
+    charm_func() is executed after any services are stopped, if supplied.
+
+    The services object can either be:
+      - None : no services were passed (an empty dict is returned)
+      - a list of strings
+      - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+      - An array of [{'service': service_name, ...}, ...]
+
+    @param assess_status_func: (f() -> message: string | None) or None
+    @param services: OPTIONAL see above
+    @param ports: OPTIONAL list of port
+    @param charm_func: function to run for custom charm pausing.
+    @returns None
+    @raises Exception(message) on an error for action_fail().
+    """
+    _, messages = manage_payload_services(
+        'pause',
+        services=services,
+        charm_func=charm_func)
+    set_unit_paused()
+
+    if assess_status_func:
+        message = assess_status_func()
+        if message:
+            messages.append(message)
+    if messages and not is_unit_upgrading_set():
+        raise Exception("Couldn't pause: {}".format("; ".join(messages)))
+
+
+def resume_unit(assess_status_func, services=None, ports=None,
+                charm_func=None):
+    """Resume a unit by starting the services and clearning 'unit-paused'
+    in the local kv() store.
+
+    Also checks that the services have started and ports are being listened to.
+
+    An optional charm_func() can be called that can either raise an
+    Exception or return non None to indicate that the unit
+    didn't resume cleanly.
+
+    The signature for charm_func is:
+    charm_func() -> message: string
+
+    charm_func() is executed after any services are started, if supplied.
+
+    The services object can either be:
+      - None : no services were passed (an empty dict is returned)
+      - a list of strings
+      - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+      - An array of [{'service': service_name, ...}, ...]
+
+    @param assess_status_func: (f() -> message: string | None) or None
+    @param services: OPTIONAL see above
+    @param ports: OPTIONAL list of port
+    @param charm_func: function to run for custom charm resuming.
+    @returns None
+    @raises Exception(message) on an error for action_fail().
+    """
+    _, messages = manage_payload_services(
+        'resume',
+        services=services,
+        charm_func=charm_func)
+    clear_unit_paused()
+    if assess_status_func:
+        message = assess_status_func()
+        if message:
+            messages.append(message)
+    if messages:
+        raise Exception("Couldn't resume: {}".format("; ".join(messages)))
+
+
+def make_assess_status_func(*args, **kwargs):
+    """Creates an assess_status_func() suitable for handing to pause_unit()
+    and resume_unit().
+
+    This uses the _determine_os_workload_status(...) function to determine
+    what the workload_status should be for the unit.  If the unit is
+    not in maintenance or active states, then the message is returned to
+    the caller.  This is so an action that doesn't result in either a
+    complete pause or complete resume can signal failure with an action_fail()
+    """
+    def _assess_status_func():
+        state, message = _determine_os_workload_status(*args, **kwargs)
+        status_set(state, message)
+        if state not in ['maintenance', 'active']:
+            return message
+        return None
+
+    return _assess_status_func
+
+
+def pausable_restart_on_change(restart_map, stopstart=False,
+                               restart_functions=None):
+    """A restart_on_change decorator that checks to see if the unit is
+    paused. If it is paused then the decorated function doesn't fire.
+
+    This is provided as a helper, as the @restart_on_change(...) decorator
+    is in core.host, yet the openstack specific helpers are in this file
+    (contrib.openstack.utils).  Thus, this needs to be an optional feature
+    for openstack charms (or charms that wish to use the openstack
+    pause/resume type features).
+
+    It is used as follows:
+
+        from contrib.openstack.utils import (
+            pausable_restart_on_change as restart_on_change)
+
+        @restart_on_change(restart_map, stopstart=<boolean>)
+        def some_hook(...):
+            pass
+
+    see core.utils.restart_on_change() for more details.
+
+    Note restart_map can be a callable, in which case, restart_map is only
+    evaluated at runtime.  This means that it is lazy and the underlying
+    function won't be called if the decorated function is never called.  Note,
+    retains backwards compatibility for passing a non-callable dictionary.
+
+    @param f: the function to decorate
+    @param restart_map: (optionally callable, which then returns the
+        restart_map) the restart map {conf_file: [services]}
+    @param stopstart: DEFAULT false; whether to stop, start or just restart
+    @returns decorator to use a restart_on_change with pausability
+    """
+    def wrap(f):
+        # py27 compatible nonlocal variable.  When py3 only, replace with
+        # nonlocal keyword
+        __restart_map_cache = {'cache': None}
+
+        @functools.wraps(f)
+        def wrapped_f(*args, **kwargs):
+            if is_unit_paused_set():
+                return f(*args, **kwargs)
+            if __restart_map_cache['cache'] is None:
+                __restart_map_cache['cache'] = restart_map() \
+                    if callable(restart_map) else restart_map
+            # otherwise, normal restart_on_change functionality
+            return restart_on_change_helper(
+                (lambda: f(*args, **kwargs)), __restart_map_cache['cache'],
+                stopstart, restart_functions)
+        return wrapped_f
+    return wrap
+
+
+def ordered(orderme):
+    """Converts the provided dictionary into a collections.OrderedDict.
+
+    The items in the returned OrderedDict will be inserted based on the
+    natural sort order of the keys. Nested dictionaries will also be sorted
+    in order to ensure fully predictable ordering.
+
+    :param orderme: the dict to order
+    :return: collections.OrderedDict
+    :raises: ValueError: if `orderme` isn't a dict instance.
+    """
+    if not isinstance(orderme, dict):
+        raise ValueError('argument must be a dict type')
+
+    result = OrderedDict()
+    for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]):
+        if isinstance(v, dict):
+            result[k] = ordered(v)
+        else:
+            result[k] = v
+
+    return result
+
+
+def config_flags_parser(config_flags):
+    """Parses config flags string into dict.
+
+    This parsing method supports a few different formats for the config
+    flag values to be parsed:
+
+      1. A string in the simple format of key=value pairs, with the possibility
+         of specifying multiple key value pairs within the same string. For
+         example, a string in the format of 'key1=value1, key2=value2' will
+         return a dict of:
+
+             {'key1': 'value1', 'key2': 'value2'}.
+
+      2. A string in the above format, but supporting a comma-delimited list
+         of values for the same key. For example, a string in the format of
+         'key1=value1, key2=value3,value4,value5' will return a dict of:
+
+             {'key1': 'value1', 'key2': 'value2,value3,value4'}
+
+      3. A string containing a colon character (:) prior to an equal
+         character (=) will be treated as yaml and parsed as such. This can be
+         used to specify more complex key value pairs. For example,
+         a string in the format of 'key1: subkey1=value1, subkey2=value2' will
+         return a dict of:
+
+             {'key1', 'subkey1=value1, subkey2=value2'}
+
+    The provided config_flags string may be a list of comma-separated values
+    which themselves may be comma-separated list of values.
+    """
+    # If we find a colon before an equals sign then treat it as yaml.
+    # Note: limit it to finding the colon first since this indicates assignment
+    # for inline yaml.
+    colon = config_flags.find(':')
+    equals = config_flags.find('=')
+    if colon > 0:
+        if colon < equals or equals < 0:
+            return ordered(yaml.safe_load(config_flags))
+
+    if config_flags.find('==') >= 0:
+        juju_log("config_flags is not in expected format (key=value)",
+                 level=ERROR)
+        raise OSContextError
+
+    # strip the following from each value.
+    post_strippers = ' ,'
+    # we strip any leading/trailing '=' or ' ' from the string then
+    # split on '='.
+    split = config_flags.strip(' =').split('=')
+    limit = len(split)
+    flags = OrderedDict()
+    for i in range(0, limit - 1):
+        current = split[i]
+        next = split[i + 1]
+        vindex = next.rfind(',')
+        if (i == limit - 2) or (vindex < 0):
+            value = next
+        else:
+            value = next[:vindex]
+
+        if i == 0:
+            key = current
+        else:
+            # if this not the first entry, expect an embedded key.
+            index = current.rfind(',')
+            if index < 0:
+                juju_log("Invalid config value(s) at index %s" % (i),
+                         level=ERROR)
+                raise OSContextError
+            key = current[index + 1:]
+
+        # Add to collection.
+        flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
+
+    return flags
+
+
+def os_application_version_set(package):
+    '''Set version of application for Juju 2.0 and later'''
+    application_version = get_upstream_version(package)
+    # NOTE(jamespage) if not able to figure out package version, fallback to
+    #                 openstack codename version detection.
+    if not application_version:
+        application_version_set(os_release(package))
+    else:
+        application_version_set(application_version)
+
+
+def os_application_status_set(check_function):
+    """Run the supplied function and set the application status accordingly.
+
+    :param check_function: Function to run to get app states and messages.
+    :type check_function: function
+    """
+    state, message = check_function()
+    status_set(state, message, application=True)
+
+
+def enable_memcache(source=None, release=None, package=None):
+    """Determine if memcache should be enabled on the local unit
+
+    @param release: release of OpenStack currently deployed
+    @param package: package to derive OpenStack version deployed
+    @returns boolean Whether memcache should be enabled
+    """
+    _release = None
+    if release:
+        _release = release
+    else:
+        _release = os_release(package)
+    if not _release:
+        _release = get_os_codename_install_source(source)
+
+    return CompareOpenStackReleases(_release) >= 'mitaka'
+
+
+def token_cache_pkgs(source=None, release=None):
+    """Determine additional packages needed for token caching
+
+    @param source: source string for charm
+    @param release: release of OpenStack currently deployed
+    @returns List of package to enable token caching
+    """
+    packages = []
+    if enable_memcache(source=source, release=release):
+        packages.extend(['memcached', 'python-memcache'])
+    return packages
+
+
+def update_json_file(filename, items):
+    """Updates the json `filename` with a given dict.
+    :param filename: path to json file (e.g. /etc/glance/policy.json)
+    :param items: dict of items to update
+    """
+    if not items:
+        return
+
+    with open(filename) as fd:
+        policy = json.load(fd)
+
+    # Compare before and after and if nothing has changed don't write the file
+    # since that could cause unnecessary service restarts.
+    before = json.dumps(policy, indent=4, sort_keys=True)
+    policy.update(items)
+    after = json.dumps(policy, indent=4, sort_keys=True)
+    if before == after:
+        return
+
+    with open(filename, "w") as fd:
+        fd.write(after)
+
+
+@cached
+def snap_install_requested():
+    """ Determine if installing from snaps
+
+    If openstack-origin is of the form snap:track/channel[/branch]
+    and channel is in SNAPS_CHANNELS return True.
+    """
+    origin = config('openstack-origin') or ""
+    if not origin.startswith('snap:'):
+        return False
+
+    _src = origin[5:]
+    if '/' in _src:
+        channel = _src.split('/')[1]
+    else:
+        # Handle snap:track with no channel
+        channel = 'stable'
+    return valid_snap_channel(channel)
+
+
+def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
+    """Generate a dictionary of snap install information from origin
+
+    @param snaps: List of snaps
+    @param src: String of openstack-origin or source of the form
+        snap:track/channel
+    @param mode: String classic, devmode or jailmode
+    @returns: Dictionary of snaps with channels and modes
+    """
+
+    if not src.startswith('snap:'):
+        juju_log("Snap source is not a snap origin", 'WARN')
+        return {}
+
+    _src = src[5:]
+    channel = '--channel={}'.format(_src)
+
+    return {snap: {'channel': channel, 'mode': mode}
+            for snap in snaps}
+
+
+def install_os_snaps(snaps, refresh=False):
+    """Install OpenStack snaps from channel and with mode
+
+    @param snaps: Dictionary of snaps with channels and modes of the form:
+        {'snap_name': {'channel': 'snap_channel',
+                       'mode': 'snap_mode'}}
+        Where channel is a snapstore channel and mode is --classic, --devmode
+        or --jailmode.
+    @param post_snap_install: Callback function to run after snaps have been
+    installed
+    """
+
+    def _ensure_flag(flag):
+        if flag.startswith('--'):
+            return flag
+        return '--{}'.format(flag)
+
+    if refresh:
+        for snap in snaps.keys():
+            snap_refresh(snap,
+                         _ensure_flag(snaps[snap]['channel']),
+                         _ensure_flag(snaps[snap]['mode']))
+    else:
+        for snap in snaps.keys():
+            snap_install(snap,
+                         _ensure_flag(snaps[snap]['channel']),
+                         _ensure_flag(snaps[snap]['mode']))
+
+
+def set_unit_upgrading():
+    """Set the unit to a upgrading state in the local kv() store.
+    """
+    with unitdata.HookData()() as t:
+        kv = t[0]
+        kv.set('unit-upgrading', True)
+
+
+def clear_unit_upgrading():
+    """Clear the unit from a upgrading state in the local kv() store
+    """
+    with unitdata.HookData()() as t:
+        kv = t[0]
+        kv.set('unit-upgrading', False)
+
+
+def is_unit_upgrading_set():
+    """Return the state of the kv().get('unit-upgrading').
+
+    To help with units that don't have HookData() (testing)
+    if it excepts, return False
+    """
+    try:
+        with unitdata.HookData()() as t:
+            kv = t[0]
+            # transform something truth-y into a Boolean.
+            return not(not(kv.get('unit-upgrading')))
+    except Exception:
+        return False
+
+
+def series_upgrade_prepare(pause_unit_helper=None, configs=None):
+    """ Run common series upgrade prepare tasks.
+
+    :param pause_unit_helper: function: Function to pause unit
+    :param configs: OSConfigRenderer object: Configurations
+    :returns None:
+    """
+    set_unit_upgrading()
+    if pause_unit_helper and configs:
+        if not is_unit_paused_set():
+            pause_unit_helper(configs)
+
+
+def series_upgrade_complete(resume_unit_helper=None, configs=None):
+    """ Run common series upgrade complete tasks.
+
+    :param resume_unit_helper: function: Function to resume unit
+    :param configs: OSConfigRenderer object: Configurations
+    :returns None:
+    """
+    clear_unit_paused()
+    clear_unit_upgrading()
+    if configs:
+        configs.write_all()
+        if resume_unit_helper:
+            resume_unit_helper(configs)
+
+
+def is_db_initialised():
+    """Check leader storage to see if database has been initialised.
+
+    :returns: Whether DB has been initialised
+    :rtype: bool
+    """
+    db_initialised = None
+    if leader_get('db-initialised') is None:
+        juju_log(
+            'db-initialised key missing, assuming db is not initialised',
+            'DEBUG')
+        db_initialised = False
+    else:
+        db_initialised = bool_from_string(leader_get('db-initialised'))
+    juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG')
+    return db_initialised
+
+
+def set_db_initialised():
+    """Add flag to leader storage to indicate database has been initialised.
+    """
+    juju_log('Setting db-initialised to True', 'DEBUG')
+    leader_set({'db-initialised': True})
+
+
+def is_db_maintenance_mode(relid=None):
+    """Check relation data from notifications of db in maintenance mode.
+
+    :returns: Whether db has notified it is in maintenance mode.
+    :rtype: bool
+    """
+    juju_log('Checking for maintenance notifications', 'DEBUG')
+    if relid:
+        r_ids = [relid]
+    else:
+        r_ids = relation_ids('shared-db')
+    rids_units = [(r, u) for r in r_ids for u in related_units(r)]
+    notifications = []
+    for r_id, unit in rids_units:
+        settings = relation_get(unit=unit, rid=r_id)
+        for key, value in settings.items():
+            if value and key in DB_MAINTENANCE_KEYS:
+                juju_log(
+                    'Unit: {}, Key: {}, Value: {}'.format(unit, key, value),
+                    'DEBUG')
+                try:
+                    notifications.append(bool_from_string(value))
+                except ValueError:
+                    juju_log(
+                        'Could not discern bool from {}'.format(value),
+                        'WARN')
+                    pass
+    return True in notifications
+
+
+@cached
+def container_scoped_relations():
+    """Get all the container scoped relations
+
+    :returns: List of relation names
+    :rtype: List
+    """
+    md = metadata()
+    relations = []
+    for relation_type in ('provides', 'requires', 'peers'):
+        for relation in md.get(relation_type, []):
+            if md[relation_type][relation].get('scope') == 'container':
+                relations.append(relation)
+    return relations
+
+
+def is_db_ready(use_current_context=False, rel_name=None):
+    """Check remote database is ready to be used.
+
+    Database relations are expected to provide a list of 'allowed' units to
+    confirm that the database is ready for use by those units.
+
+    If db relation has provided this information and local unit is a member,
+    returns True otherwise False.
+
+    :param use_current_context: Whether to limit checks to current hook
+                                context.
+    :type use_current_context: bool
+    :param rel_name: Name of relation to check
+    :type rel_name: string
+    :returns: Whether remote db is ready.
+    :rtype: bool
+    :raises: Exception
+    """
+    key = 'allowed_units'
+
+    rel_name = rel_name or 'shared-db'
+    this_unit = local_unit()
+
+    if use_current_context:
+        if relation_id() in relation_ids(rel_name):
+            rids_units = [(None, None)]
+        else:
+            raise Exception("use_current_context=True but not in {} "
+                            "rel hook contexts (currently in {})."
+                            .format(rel_name, relation_id()))
+    else:
+        rids_units = [(r_id, u)
+                      for r_id in relation_ids(rel_name)
+                      for u in related_units(r_id)]
+
+    for rid, unit in rids_units:
+        allowed_units = relation_get(rid=rid, unit=unit, attribute=key)
+        if allowed_units and this_unit in allowed_units.split():
+            juju_log("This unit ({}) is in allowed unit list from {}".format(
+                this_unit,
+                unit), 'DEBUG')
+            return True
+
+    juju_log("This unit was not found in any allowed unit list")
+    return False
+
+
+def is_expected_scale(peer_relation_name='cluster'):
+    """Query juju goal-state to determine whether our peer- and dependency-
+    relations are at the expected scale.
+
+    Useful for deferring per unit per relation housekeeping work until we are
+    ready to complete it successfully and without unnecessary repetiton.
+
+    Always returns True if version of juju used does not support goal-state.
+
+    :param peer_relation_name: Name of peer relation
+    :type rel_name: string
+    :returns: True or False
+    :rtype: bool
+    """
+    def _get_relation_id(rel_type):
+        return next((rid for rid in relation_ids(reltype=rel_type)), None)
+
+    Relation = namedtuple('Relation', 'rel_type rel_id')
+    peer_rid = _get_relation_id(peer_relation_name)
+    # Units with no peers should still have a peer relation.
+    if not peer_rid:
+        juju_log('Not at expected scale, no peer relation found', 'DEBUG')
+        return False
+    expected_relations = [
+        Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))]
+    if expect_ha():
+        expected_relations.append(
+            Relation(
+                rel_type='ha',
+                rel_id=_get_relation_id('ha')))
+    juju_log(
+        'Checking scale of {} relations'.format(
+            ','.join([r.rel_type for r in expected_relations])),
+        'DEBUG')
+    try:
+        if (len(related_units(relid=peer_rid)) <
+                len(list(expected_peer_units()))):
+            return False
+        for rel in expected_relations:
+            if not rel.rel_id:
+                juju_log(
+                    'Expected to find {} relation, but it is missing'.format(
+                        rel.rel_type),
+                    'DEBUG')
+                return False
+            # Goal state returns every unit even for container scoped
+            # relations but the charm only ever has a relation with
+            # the local unit.
+            if rel.rel_type in container_scoped_relations():
+                expected_count = 1
+            else:
+                expected_count = len(
+                    list(expected_related_units(reltype=rel.rel_type)))
+            if len(related_units(relid=rel.rel_id)) < expected_count:
+                juju_log(
+                    ('Not at expected scale, not enough units on {} '
+                     'relation'.format(rel.rel_type)),
+                    'DEBUG')
+                return False
+    except NotImplementedError:
+        return True
+    juju_log('All checks have passed, unit is at expected scale', 'DEBUG')
+    return True
+
+
+def get_peer_key(unit_name):
+    """Get the peer key for this unit.
+
+    The peer key is the key a unit uses to publish its status down the peer
+    relation
+
+    :param unit_name: Name of unit
+    :type unit_name: string
+    :returns: Peer key for given unit
+    :rtype: string
+    """
+    return 'unit-state-{}'.format(unit_name.replace('/', '-'))
+
+
+UNIT_READY = 'READY'
+UNIT_NOTREADY = 'NOTREADY'
+UNIT_UNKNOWN = 'UNKNOWN'
+UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN]
+
+
+def inform_peers_unit_state(state, relation_name='cluster'):
+    """Inform peers of the state of this unit.
+
+    :param state: State of unit to publish
+    :type state: string
+    :param relation_name: Name of relation to publish state on
+    :type relation_name: string
+    """
+    if state not in UNIT_STATES:
+        raise ValueError(
+            "Setting invalid state {} for unit".format(state))
+    this_unit = local_unit()
+    for r_id in relation_ids(relation_name):
+        juju_log('Telling peer behind relation {} that {} is {}'.format(
+            r_id, this_unit, state), 'DEBUG')
+        relation_set(relation_id=r_id,
+                     relation_settings={
+                         get_peer_key(this_unit): state})
+
+
+def get_peers_unit_state(relation_name='cluster'):
+    """Get the state of all peers.
+
+    :param relation_name: Name of relation to check peers on.
+    :type relation_name: string
+    :returns: Unit states keyed on unit name.
+    :rtype: dict
+    :raises: ValueError
+    """
+    r_ids = relation_ids(relation_name)
+    rids_units = [(r, u) for r in r_ids for u in related_units(r)]
+    unit_states = {}
+    for r_id, unit in rids_units:
+        settings = relation_get(unit=unit, rid=r_id)
+        unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN)
+        if unit_states[unit] not in UNIT_STATES:
+            raise ValueError(
+                "Unit in unknown state {}".format(unit_states[unit]))
+    return unit_states
+
+
+def are_peers_ready(relation_name='cluster'):
+    """Check if all peers are ready.
+
+    :param relation_name: Name of relation to check peers on.
+    :type relation_name: string
+    :returns: Whether all units are ready.
+    :rtype: bool
+    """
+    unit_states = get_peers_unit_state(relation_name).values()
+    juju_log('{} peers are in the following states: {}'.format(
+        relation_name, unit_states), 'DEBUG')
+    return all(state == UNIT_READY for state in unit_states)
+
+
+def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'):
+    """Inform peers if this unit is ready.
+
+    The check function should return a tuple (state, message). A state
+    of 'READY' indicates the unit is READY.
+
+    :param check_unit_ready_func: Function to run to check readiness
+    :type check_unit_ready_func: function
+    :param relation_name: Name of relation to check peers on.
+    :type relation_name: string
+    """
+    unit_ready, msg = check_unit_ready_func()
+    if unit_ready:
+        state = UNIT_READY
+    else:
+        state = UNIT_NOTREADY
+    juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG')
+    inform_peers_unit_state(state, relation_name)
+
+
+def check_api_unit_ready(check_db_ready=True):
+    """Check if this unit is ready.
+
+    :param check_db_ready: Include checks of database readiness.
+    :type check_db_ready: bool
+    :returns: Whether unit state is ready and status message
+    :rtype: (bool, str)
+    """
+    unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready)
+    return unit_state == WORKLOAD_STATES.ACTIVE, msg
+
+
+def get_api_unit_status(check_db_ready=True):
+    """Return a workload status and message for this unit.
+
+    :param check_db_ready: Include checks of database readiness.
+    :type check_db_ready: bool
+    :returns: Workload state and message
+    :rtype: (bool, str)
+    """
+    unit_state = WORKLOAD_STATES.ACTIVE
+    msg = 'Unit is ready'
+    if is_db_maintenance_mode():
+        unit_state = WORKLOAD_STATES.MAINTENANCE
+        msg = 'Database in maintenance mode.'
+    elif is_unit_paused_set():
+        unit_state = WORKLOAD_STATES.BLOCKED
+        msg = 'Unit paused.'
+    elif check_db_ready and not is_db_ready():
+        unit_state = WORKLOAD_STATES.WAITING
+        msg = 'Allowed_units list provided but this unit not present'
+    elif not is_db_initialised():
+        unit_state = WORKLOAD_STATES.WAITING
+        msg = 'Database not initialised'
+    elif not is_expected_scale():
+        unit_state = WORKLOAD_STATES.WAITING
+        msg = 'Charm and its dependencies not yet at expected scale'
+    juju_log(msg, 'DEBUG')
+    return unit_state, msg
+
+
+def check_api_application_ready():
+    """Check if this application is ready.
+
+    :returns: Whether application state is ready and status message
+    :rtype: (bool, str)
+    """
+    app_state, msg = get_api_application_status()
+    return app_state == WORKLOAD_STATES.ACTIVE, msg
+
+
+def get_api_application_status():
+    """Return a workload status and message for this application.
+
+    :returns: Workload state and message
+    :rtype: (bool, str)
+    """
+    app_state, msg = get_api_unit_status()
+    if app_state == WORKLOAD_STATES.ACTIVE:
+        if are_peers_ready():
+            msg = 'Application Ready'
+        else:
+            app_state = WORKLOAD_STATES.WAITING
+            msg = 'Some units are not ready'
+    juju_log(msg, 'DEBUG')
+    return app_state, msg
diff --git a/charmhelpers/contrib/python.py b/charmhelpers/contrib/python.py
new file mode 100644
index 0000000000000000000000000000000000000000..84cba8c4eba34fdd705f4ee39628ebd33b5175a2
--- /dev/null
+++ b/charmhelpers/contrib/python.py
@@ -0,0 +1,21 @@
+# Copyright 2014-2019 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+# deprecated aliases for backwards compatibility
+from charmhelpers.fetch.python import debug  # noqa
+from charmhelpers.fetch.python import packages  # noqa
+from charmhelpers.fetch.python import rpdb  # noqa
+from charmhelpers.fetch.python import version  # noqa
diff --git a/charmhelpers/contrib/storage/__init__.py b/charmhelpers/contrib/storage/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b
--- /dev/null
+++ b/charmhelpers/contrib/storage/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charmhelpers/contrib/storage/linux/__init__.py b/charmhelpers/contrib/storage/linux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b
--- /dev/null
+++ b/charmhelpers/contrib/storage/linux/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1c617545e9beb29016f141b5e056bb04ff82e0a
--- /dev/null
+++ b/charmhelpers/contrib/storage/linux/ceph.py
@@ -0,0 +1,2381 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Copyright 2012 Canonical Ltd.
+#
+# This file is sourced from lp:openstack-charm-helpers
+#
+# Authors:
+#  James Page <james.page@ubuntu.com>
+#  Adam Gandelman <adamg@ubuntu.com>
+#
+
+import collections
+import errno
+import hashlib
+import math
+import six
+
+import os
+import shutil
+import json
+import time
+import uuid
+
+from subprocess import (
+    check_call,
+    check_output,
+    CalledProcessError,
+)
+from charmhelpers import deprecate
+from charmhelpers.core.hookenv import (
+    application_name,
+    config,
+    service_name,
+    local_unit,
+    relation_get,
+    relation_ids,
+    relation_set,
+    related_units,
+    log,
+    DEBUG,
+    INFO,
+    WARNING,
+    ERROR,
+)
+from charmhelpers.core.host import (
+    mount,
+    mounts,
+    service_start,
+    service_stop,
+    service_running,
+    umount,
+    cmp_pkgrevno,
+)
+from charmhelpers.fetch import (
+    apt_install,
+)
+from charmhelpers.core.unitdata import kv
+
+from charmhelpers.core.kernel import modprobe
+from charmhelpers.contrib.openstack.utils import config_flags_parser
+
+KEYRING = '/etc/ceph/ceph.client.{}.keyring'
+KEYFILE = '/etc/ceph/ceph.client.{}.key'
+
+CEPH_CONF = """[global]
+auth supported = {auth}
+keyring = {keyring}
+mon host = {mon_hosts}
+log to syslog = {use_syslog}
+err to syslog = {use_syslog}
+clog to syslog = {use_syslog}
+"""
+
+# The number of placement groups per OSD to target for placement group
+# calculations. This number is chosen as 100 due to the ceph PG Calc
+# documentation recommending to choose 100 for clusters which are not
+# expected to increase in the foreseeable future. Since the majority of the
+# calculations are done on deployment, target the case of non-expanding
+# clusters as the default.
+DEFAULT_PGS_PER_OSD_TARGET = 100
+DEFAULT_POOL_WEIGHT = 10.0
+LEGACY_PG_COUNT = 200
+DEFAULT_MINIMUM_PGS = 2
+AUTOSCALER_DEFAULT_PGS = 32
+
+
+class OsdPostUpgradeError(Exception):
+    """Error class for OSD post-upgrade operations."""
+    pass
+
+
+class OSDSettingConflict(Exception):
+    """Error class for conflicting osd setting requests."""
+    pass
+
+
+class OSDSettingNotAllowed(Exception):
+    """Error class for a disallowed setting."""
+    pass
+
+
+OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed)
+
+OSD_SETTING_WHITELIST = [
+    'osd heartbeat grace',
+    'osd heartbeat interval',
+]
+
+
+def _order_dict_by_key(rdict):
+    """Convert a dictionary into an OrderedDict sorted by key.
+
+    :param rdict: Dictionary to be ordered.
+    :type rdict: dict
+    :returns: Ordered Dictionary.
+    :rtype: collections.OrderedDict
+    """
+    return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0]))
+
+
+def get_osd_settings(relation_name):
+    """Consolidate requested osd settings from all clients.
+
+    Consolidate requested osd settings from all clients. Check that the
+    requested setting is on the whitelist and it does not conflict with
+    any other requested settings.
+
+    :returns: Dictionary of settings
+    :rtype: dict
+
+    :raises: OSDSettingNotAllowed
+    :raises: OSDSettingConflict
+    """
+    rel_ids = relation_ids(relation_name)
+    osd_settings = {}
+    for relid in rel_ids:
+        for unit in related_units(relid):
+            unit_settings = relation_get('osd-settings', unit, relid) or '{}'
+            unit_settings = json.loads(unit_settings)
+            for key, value in unit_settings.items():
+                if key not in OSD_SETTING_WHITELIST:
+                    msg = 'Illegal settings "{}"'.format(key)
+                    raise OSDSettingNotAllowed(msg)
+                if key in osd_settings:
+                    if osd_settings[key] != unit_settings[key]:
+                        msg = 'Conflicting settings for "{}"'.format(key)
+                        raise OSDSettingConflict(msg)
+                else:
+                    osd_settings[key] = value
+    return _order_dict_by_key(osd_settings)
+
+
+def send_application_name(relid=None):
+    """Send the application name down the relation.
+
+    :param relid: Relation id to set application name in.
+    :type relid: str
+    """
+    relation_set(
+        relation_id=relid,
+        relation_settings={'application-name': application_name()})
+
+
+def send_osd_settings():
+    """Pass on requested OSD settings to osd units."""
+    try:
+        settings = get_osd_settings('client')
+    except OSD_SETTING_EXCEPTIONS as e:
+        # There is a problem with the settings, not passing them on. Update
+        # status will notify the user.
+        log(e, level=ERROR)
+        return
+    data = {
+        'osd-settings': json.dumps(settings, sort_keys=True)}
+    for relid in relation_ids('osd'):
+        relation_set(relation_id=relid,
+                     relation_settings=data)
+
+
+def validator(value, valid_type, valid_range=None):
+    """Helper function for type validation.
+
+    Used to validate these:
+    https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
+    https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
+
+    Example input:
+        validator(value=1,
+                  valid_type=int,
+                  valid_range=[0, 2])
+
+    This says I'm testing value=1.  It must be an int inclusive in [0,2]
+
+    :param value: The value to validate.
+    :type value: any
+    :param valid_type: The type that value should be.
+    :type valid_type: any
+    :param valid_range: A range of values that value can assume.
+    :type valid_range: Optional[Union[List,Tuple]]
+    :raises: AssertionError, ValueError
+    """
+    assert isinstance(value, valid_type), (
+        "{} is not a {}".format(value, valid_type))
+    if valid_range is not None:
+        assert isinstance(
+            valid_range, list) or isinstance(valid_range, tuple), (
+                "valid_range must be of type List or Tuple, "
+                "was given {} of type {}"
+                .format(valid_range, type(valid_range)))
+        # If we're dealing with strings
+        if isinstance(value, six.string_types):
+            assert value in valid_range, (
+                "{} is not in the list {}".format(value, valid_range))
+        # Integer, float should have a min and max
+        else:
+            if len(valid_range) != 2:
+                raise ValueError(
+                    "Invalid valid_range list of {} for {}. "
+                    "List must be [min,max]".format(valid_range, value))
+            assert value >= valid_range[0], (
+                "{} is less than minimum allowed value of {}"
+                .format(value, valid_range[0]))
+            assert value <= valid_range[1], (
+                "{} is greater than maximum allowed value of {}"
+                .format(value, valid_range[1]))
+
+
+class PoolCreationError(Exception):
+    """A custom exception to inform the caller that a pool creation failed.
+
+    Provides an error message
+    """
+
+    def __init__(self, message):
+        super(PoolCreationError, self).__init__(message)
+
+
+class BasePool(object):
+    """An object oriented approach to Ceph pool creation.
+
+    This base class is inherited by ReplicatedPool and ErasurePool. Do not call
+    create() on this base class as it will raise an exception.
+
+    Instantiate a child class and call create().
+    """
+    # Dictionary that maps pool operation properties to Tuples with valid type
+    # and valid range
+    op_validation_map = {
+        'compression-algorithm': (str, ('lz4', 'snappy', 'zlib', 'zstd')),
+        'compression-mode': (str, ('none', 'passive', 'aggressive', 'force')),
+        'compression-required-ratio': (float, None),
+        'compression-min-blob-size': (int, None),
+        'compression-min-blob-size-hdd': (int, None),
+        'compression-min-blob-size-ssd': (int, None),
+        'compression-max-blob-size': (int, None),
+        'compression-max-blob-size-hdd': (int, None),
+        'compression-max-blob-size-ssd': (int, None),
+        'rbd-mirroring-mode': (str, ('image', 'pool'))
+    }
+
+    def __init__(self, service, name=None, percent_data=None, app_name=None,
+                 op=None):
+        """Initialize BasePool object.
+
+        Pool information is either initialized from individual keyword
+        arguments or from a individual CephBrokerRq operation Dict.
+
+        :param service: The Ceph user name to run commands under.
+        :type service: str
+        :param name: Name of pool to operate on.
+        :type name: str
+        :param percent_data: The expected pool size in relation to all
+                             available resources in the Ceph cluster. Will be
+                             used to set the ``target_size_ratio`` pool
+                             property. (default: 10.0)
+        :type percent_data: Optional[float]
+        :param app_name: Ceph application name, usually one of:
+                         ('cephfs', 'rbd', 'rgw') (default: 'unknown')
+        :type app_name: Optional[str]
+        :param op: Broker request Op to compile pool data from.
+        :type op: Optional[Dict[str,any]]
+        :raises: KeyError
+        """
+        # NOTE: Do not perform initialization steps that require live data from
+        # a running cluster here. The *Pool classes may be used for validation.
+        self.service = service
+        self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
+        self.op = op or {}
+
+        if op:
+            # When initializing from op the `name` attribute is required and we
+            # will fail with KeyError if it is not provided.
+            self.name = op['name']
+            self.percent_data = op.get('weight')
+            self.app_name = op.get('app-name')
+        else:
+            self.name = name
+            self.percent_data = percent_data
+            self.app_name = app_name
+
+        # Set defaults for these if they are not provided
+        self.percent_data = self.percent_data or 10.0
+        self.app_name = self.app_name or 'unknown'
+
+    def validate(self):
+        """Check that value of supplied operation parameters are valid.
+
+        :raises: ValueError
+        """
+        for op_key, op_value in self.op.items():
+            if op_key in self.op_validation_map and op_value is not None:
+                valid_type, valid_range = self.op_validation_map[op_key]
+                try:
+                    validator(op_value, valid_type, valid_range)
+                except (AssertionError, ValueError) as e:
+                    # Normalize on ValueError, also add information about which
+                    # variable we had an issue with.
+                    raise ValueError("'{}': {}".format(op_key, str(e)))
+
+    def _create(self):
+        """Perform the pool creation, method MUST be overridden by child class.
+        """
+        raise NotImplementedError
+
+    def _post_create(self):
+        """Perform common post pool creation tasks.
+
+        Note that pool properties subject to change during the lifetime of a
+        pool / deployment should go into the ``update`` method.
+
+        Do not add calls for a specific pool type here, those should go into
+        one of the pool specific classes.
+        """
+        if self.nautilus_or_later:
+            # Ensure we set the expected pool ratio
+            update_pool(
+                client=self.service,
+                pool=self.name,
+                settings={
+                    'target_size_ratio': str(
+                        self.percent_data / 100.0),
+                })
+        try:
+            set_app_name_for_pool(client=self.service,
+                                  pool=self.name,
+                                  name=self.app_name)
+        except CalledProcessError:
+            log('Could not set app name for pool {}'
+                .format(self.name),
+                level=WARNING)
+        if 'pg_autoscaler' in enabled_manager_modules():
+            try:
+                enable_pg_autoscale(self.service, self.name)
+            except CalledProcessError as e:
+                log('Could not configure auto scaling for pool {}: {}'
+                    .format(self.name, e),
+                    level=WARNING)
+
+    def create(self):
+        """Create pool and perform any post pool creation tasks.
+
+        To allow for sharing of common code among pool specific classes the
+        processing has been broken out into the private methods ``_create``
+        and ``_post_create``.
+
+        Do not add any pool type specific handling here, that should go into
+        one of the pool specific classes.
+        """
+        if not pool_exists(self.service, self.name):
+            self.validate()
+            self._create()
+            self._post_create()
+            self.update()
+
+    def set_quota(self):
+        """Set a quota if requested.
+
+        :raises: CalledProcessError
+        """
+        max_bytes = self.op.get('max-bytes')
+        max_objects = self.op.get('max-objects')
+        if max_bytes or max_objects:
+            set_pool_quota(service=self.service, pool_name=self.name,
+                           max_bytes=max_bytes, max_objects=max_objects)
+
+    def set_compression(self):
+        """Set compression properties if requested.
+
+        :raises: CalledProcessError
+        """
+        compression_properties = {
+            key.replace('-', '_'): value
+            for key, value in self.op.items()
+            if key in (
+                'compression-algorithm',
+                'compression-mode',
+                'compression-required-ratio',
+                'compression-min-blob-size',
+                'compression-min-blob-size-hdd',
+                'compression-min-blob-size-ssd',
+                'compression-max-blob-size',
+                'compression-max-blob-size-hdd',
+                'compression-max-blob-size-ssd') and value}
+        if compression_properties:
+            update_pool(self.service, self.name, compression_properties)
+
+    def update(self):
+        """Update properties for an already existing pool.
+
+        Do not add calls for a specific pool type here, those should go into
+        one of the pool specific classes.
+        """
+        self.validate()
+        self.set_quota()
+        self.set_compression()
+
+    def add_cache_tier(self, cache_pool, mode):
+        """Adds a new cache tier to an existing pool.
+
+        :param cache_pool: The cache tier pool name to add.
+        :type cache_pool: str
+        :param mode: The caching mode to use for this pool.
+                     valid range = ["readonly", "writeback"]
+        :type mode: str
+        """
+        # Check the input types and values
+        validator(value=cache_pool, valid_type=six.string_types)
+        validator(
+            value=mode, valid_type=six.string_types,
+            valid_range=["readonly", "writeback"])
+
+        check_call([
+            'ceph', '--id', self.service,
+            'osd', 'tier', 'add', self.name, cache_pool,
+        ])
+        check_call([
+            'ceph', '--id', self.service,
+            'osd', 'tier', 'cache-mode', cache_pool, mode,
+        ])
+        check_call([
+            'ceph', '--id', self.service,
+            'osd', 'tier', 'set-overlay', self.name, cache_pool,
+        ])
+        check_call([
+            'ceph', '--id', self.service,
+            'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom',
+        ])
+
+    def remove_cache_tier(self, cache_pool):
+        """Removes a cache tier from Ceph.
+
+        Flushes all dirty objects from writeback pools and waits for that to
+        complete.
+
+        :param cache_pool: The cache tier pool name to remove.
+        :type cache_pool: str
+        """
+        # read-only is easy, writeback is much harder
+        mode = get_cache_mode(self.service, cache_pool)
+        if mode == 'readonly':
+            check_call([
+                'ceph', '--id', self.service,
+                'osd', 'tier', 'cache-mode', cache_pool, 'none'
+            ])
+            check_call([
+                'ceph', '--id', self.service,
+                'osd', 'tier', 'remove', self.name, cache_pool,
+            ])
+
+        elif mode == 'writeback':
+            pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
+                                'cache-mode', cache_pool, 'forward']
+            if cmp_pkgrevno('ceph-common', '10.1') >= 0:
+                # Jewel added a mandatory flag
+                pool_forward_cmd.append('--yes-i-really-mean-it')
+
+            check_call(pool_forward_cmd)
+            # Flush the cache and wait for it to return
+            check_call([
+                'rados', '--id', self.service,
+                '-p', cache_pool, 'cache-flush-evict-all'])
+            check_call([
+                'ceph', '--id', self.service,
+                'osd', 'tier', 'remove-overlay', self.name])
+            check_call([
+                'ceph', '--id', self.service,
+                'osd', 'tier', 'remove', self.name, cache_pool])
+
+    def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT,
+                device_class=None):
+        """Return the number of placement groups to use when creating the pool.
+
+        Returns the number of placement groups which should be specified when
+        creating the pool. This is based upon the calculation guidelines
+        provided by the Ceph Placement Group Calculator (located online at
+        http://ceph.com/pgcalc/).
+
+        The number of placement groups are calculated using the following:
+
+            (Target PGs per OSD) * (OSD #) * (%Data)
+            ----------------------------------------
+                         (Pool size)
+
+        Per the upstream guidelines, the OSD # should really be considered
+        based on the number of OSDs which are eligible to be selected by the
+        pool. Since the pool creation doesn't specify any of CRUSH set rules,
+        the default rule will be dependent upon the type of pool being
+        created (replicated or erasure).
+
+        This code makes no attempt to determine the number of OSDs which can be
+        selected for the specific rule, rather it is left to the user to tune
+        in the form of 'expected-osd-count' config option.
+
+        :param pool_size: pool_size is either the number of replicas for
+            replicated pools or the K+M sum for erasure coded pools
+        :type pool_size: int
+        :param percent_data: the percentage of data that is expected to
+            be contained in the pool for the specific OSD set. Default value
+            is to assume 10% of the data is for this pool, which is a
+            relatively low % of the data but allows for the pg_num to be
+            increased. NOTE: the default is primarily to handle the scenario
+            where related charms requiring pools has not been upgraded to
+            include an update to indicate their relative usage of the pools.
+        :type percent_data: float
+        :param device_class: class of storage to use for basis of pgs
+            calculation; ceph supports nvme, ssd and hdd by default based
+            on presence of devices of each type in the deployment.
+        :type device_class: str
+        :returns: The number of pgs to use.
+        :rtype: int
+        """
+
+        # Note: This calculation follows the approach that is provided
+        # by the Ceph PG Calculator located at http://ceph.com/pgcalc/.
+        validator(value=pool_size, valid_type=int)
+
+        # Ensure that percent data is set to something - even with a default
+        # it can be set to None, which would wreak havoc below.
+        if percent_data is None:
+            percent_data = DEFAULT_POOL_WEIGHT
+
+        # If the expected-osd-count is specified, then use the max between
+        # the expected-osd-count and the actual osd_count
+        osd_list = get_osds(self.service, device_class)
+        expected = config('expected-osd-count') or 0
+
+        if osd_list:
+            if device_class:
+                osd_count = len(osd_list)
+            else:
+                osd_count = max(expected, len(osd_list))
+
+            # Log a message to provide some insight if the calculations claim
+            # to be off because someone is setting the expected count and
+            # there are more OSDs in reality. Try to make a proper guess
+            # based upon the cluster itself.
+            if not device_class and expected and osd_count != expected:
+                log("Found more OSDs than provided expected count. "
+                    "Using the actual count instead", INFO)
+        elif expected:
+            # Use the expected-osd-count in older ceph versions to allow for
+            # a more accurate pg calculations
+            osd_count = expected
+        else:
+            # NOTE(james-page): Default to 200 for older ceph versions
+            # which don't support OSD query from cli
+            return LEGACY_PG_COUNT
+
+        percent_data /= 100.0
+        target_pgs_per_osd = config(
+            'pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET
+        num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size
+
+        # NOTE: ensure a sane minimum number of PGS otherwise we don't get any
+        #       reasonable data distribution in minimal OSD configurations
+        if num_pg < DEFAULT_MINIMUM_PGS:
+            num_pg = DEFAULT_MINIMUM_PGS
+
+        # The CRUSH algorithm has a slight optimization for placement groups
+        # with powers of 2 so find the nearest power of 2. If the nearest
+        # power of 2 is more than 25% below the original value, the next
+        # highest value is used. To do this, find the nearest power of 2 such
+        # that 2^n <= num_pg, check to see if its within the 25% tolerance.
+        exponent = math.floor(math.log(num_pg, 2))
+        nearest = 2 ** exponent
+        if (num_pg - nearest) > (num_pg * 0.25):
+            # Choose the next highest power of 2 since the nearest is more
+            # than 25% below the original value.
+            return int(nearest * 2)
+        else:
+            return int(nearest)
+
+
+class Pool(BasePool):
+    """Compability shim for any descendents external to this library."""
+
+    @deprecate(
+        'The ``Pool`` baseclass has been replaced by ``BasePool`` class.')
+    def __init__(self, service, name):
+        super(Pool, self).__init__(service, name=name)
+
+    def create(self):
+        pass
+
+
+class ReplicatedPool(BasePool):
+    def __init__(self, service, name=None, pg_num=None, replicas=None,
+                 percent_data=None, app_name=None, op=None):
+        """Initialize ReplicatedPool object.
+
+        Pool information is either initialized from individual keyword
+        arguments or from a individual CephBrokerRq operation Dict.
+
+        Please refer to the docstring of the ``BasePool`` class for
+        documentation of the common parameters.
+
+        :param pg_num: Express wish for number of Placement Groups (this value
+                       is subject to validation against a running cluster prior
+                       to use to avoid creating a pool with too many PGs)
+        :type pg_num: int
+        :param replicas: Number of copies there should be of each object added
+                         to this replicated pool.
+        :type replicas: int
+        :raises: KeyError
+        """
+        # NOTE: Do not perform initialization steps that require live data from
+        # a running cluster here. The *Pool classes may be used for validation.
+
+        # The common parameters are handled in our parents initializer
+        super(ReplicatedPool, self).__init__(
+            service=service, name=name, percent_data=percent_data,
+            app_name=app_name, op=op)
+
+        if op:
+            # When initializing from op `replicas` is a required attribute, and
+            # we will fail with KeyError if it is not provided.
+            self.replicas = op['replicas']
+            self.pg_num = op.get('pg_num')
+        else:
+            self.replicas = replicas or 2
+            self.pg_num = pg_num
+
+    def _create(self):
+        # Do extra validation on pg_num with data from live cluster
+        if self.pg_num:
+            # Since the number of placement groups were specified, ensure
+            # that there aren't too many created.
+            max_pgs = self.get_pgs(self.replicas, 100.0)
+            self.pg_num = min(self.pg_num, max_pgs)
+        else:
+            self.pg_num = self.get_pgs(self.replicas, self.percent_data)
+
+        # Create it
+        if self.nautilus_or_later:
+            cmd = [
+                'ceph', '--id', self.service, 'osd', 'pool', 'create',
+                '--pg-num-min={}'.format(
+                    min(AUTOSCALER_DEFAULT_PGS, self.pg_num)
+                ),
+                self.name, str(self.pg_num)
+            ]
+        else:
+            cmd = [
+                'ceph', '--id', self.service, 'osd', 'pool', 'create',
+                self.name, str(self.pg_num)
+            ]
+        check_call(cmd)
+
+    def _post_create(self):
+        # Set the pool replica size
+        update_pool(client=self.service,
+                    pool=self.name,
+                    settings={'size': str(self.replicas)})
+        # Perform other common post pool creation tasks
+        super(ReplicatedPool, self)._post_create()
+
+
+class ErasurePool(BasePool):
+    """Default jerasure erasure coded pool."""
+
+    def __init__(self, service, name=None, erasure_code_profile=None,
+                 percent_data=None, app_name=None, op=None,
+                 allow_ec_overwrites=False):
+        """Initialize ReplicatedPool object.
+
+        Pool information is either initialized from individual keyword
+        arguments or from a individual CephBrokerRq operation Dict.
+
+        Please refer to the docstring of the ``BasePool`` class for
+        documentation of the common parameters.
+
+        :param erasure_code_profile: EC Profile to use (default: 'default')
+        :type erasure_code_profile: Optional[str]
+        """
+        # NOTE: Do not perform initialization steps that require live data from
+        # a running cluster here. The *Pool classes may be used for validation.
+
+        # The common parameters are handled in our parents initializer
+        super(ErasurePool, self).__init__(
+            service=service, name=name, percent_data=percent_data,
+            app_name=app_name, op=op)
+
+        if op:
+            # Note that the different default when initializing from op stems
+            # from different handling of this in the `charms.ceph` library.
+            self.erasure_code_profile = op.get('erasure-profile',
+                                               'default-canonical')
+            self.allow_ec_overwrites = op.get('allow-ec-overwrites')
+        else:
+            # We keep the class default when initialized from keyword arguments
+            # to not break the API for any other consumers.
+            self.erasure_code_profile = erasure_code_profile or 'default'
+            self.allow_ec_overwrites = allow_ec_overwrites
+
+    def _create(self):
+        # Try to find the erasure profile information in order to properly
+        # size the number of placement groups. The size of an erasure
+        # coded placement group is calculated as k+m.
+        erasure_profile = get_erasure_profile(self.service,
+                                              self.erasure_code_profile)
+
+        # Check for errors
+        if erasure_profile is None:
+            msg = ("Failed to discover erasure profile named "
+                   "{}".format(self.erasure_code_profile))
+            log(msg, level=ERROR)
+            raise PoolCreationError(msg)
+        if 'k' not in erasure_profile or 'm' not in erasure_profile:
+            # Error
+            msg = ("Unable to find k (data chunks) or m (coding chunks) "
+                   "in erasure profile {}".format(erasure_profile))
+            log(msg, level=ERROR)
+            raise PoolCreationError(msg)
+
+        k = int(erasure_profile['k'])
+        m = int(erasure_profile['m'])
+        pgs = self.get_pgs(k + m, self.percent_data)
+        self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
+        # Create it
+        if self.nautilus_or_later:
+            cmd = [
+                'ceph', '--id', self.service, 'osd', 'pool', 'create',
+                '--pg-num-min={}'.format(
+                    min(AUTOSCALER_DEFAULT_PGS, pgs)
+                ),
+                self.name, str(pgs), str(pgs),
+                'erasure', self.erasure_code_profile
+            ]
+        else:
+            cmd = [
+                'ceph', '--id', self.service, 'osd', 'pool', 'create',
+                self.name, str(pgs), str(pgs),
+                'erasure', self.erasure_code_profile
+            ]
+        check_call(cmd)
+
+    def _post_create(self):
+        super(ErasurePool, self)._post_create()
+        if self.allow_ec_overwrites:
+            update_pool(self.service, self.name,
+                        {'allow_ec_overwrites': 'true'})
+
+
+def enabled_manager_modules():
+    """Return a list of enabled manager modules.
+
+    :rtype: List[str]
+    """
+    cmd = ['ceph', 'mgr', 'module', 'ls']
+    try:
+        modules = check_output(cmd)
+        if six.PY3:
+            modules = modules.decode('UTF-8')
+    except CalledProcessError as e:
+        log("Failed to list ceph modules: {}".format(e), WARNING)
+        return []
+    modules = json.loads(modules)
+    return modules['enabled_modules']
+
+
+def enable_pg_autoscale(service, pool_name):
+    """Enable Ceph's PG autoscaler for the specified pool.
+
+    :param service: The Ceph user name to run the command under
+    :type service: str
+    :param pool_name: The name of the pool to enable sutoscaling on
+    :type pool_name: str
+    :raises: CalledProcessError if the command fails
+    """
+    check_call([
+        'ceph', '--id', service,
+        'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on'])
+
+
+def get_mon_map(service):
+    """Return the current monitor map.
+
+    :param service: The Ceph user name to run the command under
+    :type service: str
+    :returns: Dictionary with monitor map data
+    :rtype: Dict[str,any]
+    :raises: ValueError if the monmap fails to parse, CalledProcessError if our
+             ceph command fails.
+    """
+    try:
+        mon_status = check_output(['ceph', '--id', service,
+                                   'mon_status', '--format=json'])
+        if six.PY3:
+            mon_status = mon_status.decode('UTF-8')
+        try:
+            return json.loads(mon_status)
+        except ValueError as v:
+            log("Unable to parse mon_status json: {}. Error: {}"
+                .format(mon_status, str(v)))
+            raise
+    except CalledProcessError as e:
+        log("mon_status command failed with message: {}"
+            .format(str(e)))
+        raise
+
+
+def hash_monitor_names(service):
+    """Get a sorted list of monitor hashes in ascending order.
+
+    Uses the get_mon_map() function to get information about the monitor
+    cluster. Hash the name of each monitor.
+
+    :param service: The Ceph user name to run the command under.
+    :type service: str
+    :returns: a sorted list of monitor hashes in an ascending order.
+    :rtype : List[str]
+    :raises: CalledProcessError, ValueError
+    """
+    try:
+        hash_list = []
+        monitor_list = get_mon_map(service=service)
+        if monitor_list['monmap']['mons']:
+            for mon in monitor_list['monmap']['mons']:
+                hash_list.append(
+                    hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
+            return sorted(hash_list)
+        else:
+            return None
+    except (ValueError, CalledProcessError):
+        raise
+
+
+def monitor_key_delete(service, key):
+    """Delete a key and value pair from the monitor cluster.
+
+    Deletes a key value pair on the monitor cluster.
+
+    :param service: The Ceph user name to run the command under
+    :type service: str
+    :param key: The key to delete.
+    :type key: str
+    :raises: CalledProcessError
+    """
+    try:
+        check_output(
+            ['ceph', '--id', service,
+             'config-key', 'del', str(key)])
+    except CalledProcessError as e:
+        log("Monitor config-key put failed with message: {}"
+            .format(e.output))
+        raise
+
+
+def monitor_key_set(service, key, value):
+    """Set a key value pair on the monitor cluster.
+
+    :param service: The Ceph user name to run the command under.
+    :type service str
+    :param key: The key to set.
+    :type key: str
+    :param value: The value to set. This will be coerced into a string.
+    :type value: str
+    :raises: CalledProcessError
+    """
+    try:
+        check_output(
+            ['ceph', '--id', service,
+             'config-key', 'put', str(key), str(value)])
+    except CalledProcessError as e:
+        log("Monitor config-key put failed with message: {}"
+            .format(e.output))
+        raise
+
+
+def monitor_key_get(service, key):
+    """Get the value of an existing key in the monitor cluster.
+
+    :param service: The Ceph user name to run the command under
+    :type service: str
+    :param key: The key to search for.
+    :type key: str
+    :return: Returns the value of that key or None if not found.
+    :rtype: Optional[str]
+    """
+    try:
+        output = check_output(
+            ['ceph', '--id', service,
+             'config-key', 'get', str(key)]).decode('UTF-8')
+        return output
+    except CalledProcessError as e:
+        log("Monitor config-key get failed with message: {}"
+            .format(e.output))
+        return None
+
+
+def monitor_key_exists(service, key):
+    """Search for existence of key in the monitor cluster.
+
+    :param service: The Ceph user name to run the command under.
+    :type service: str
+    :param key: The key to search for.
+    :type key: str
+    :return: Returns True if the key exists, False if not.
+    :rtype: bool
+    :raises: CalledProcessError if an unknown error occurs.
+    """
+    try:
+        check_call(
+            ['ceph', '--id', service,
+             'config-key', 'exists', str(key)])
+        # I can return true here regardless because Ceph returns
+        # ENOENT if the key wasn't found
+        return True
+    except CalledProcessError as e:
+        if e.returncode == errno.ENOENT:
+            return False
+        else:
+            log("Unknown error from ceph config-get exists: {} {}"
+                .format(e.returncode, e.output))
+            raise
+
+
+def get_erasure_profile(service, name):
+    """Get an existing erasure code profile if it exists.
+
+    :param service: The Ceph user name to run the command under.
+    :type service: str
+    :param name: Name of profile.
+    :type name: str
+    :returns: Dictionary with profile data.
+    :rtype: Optional[Dict[str]]
+    """
+    try:
+        out = check_output(['ceph', '--id', service,
+                            'osd', 'erasure-code-profile', 'get',
+                            name, '--format=json'])
+        if six.PY3:
+            out = out.decode('UTF-8')
+        return json.loads(out)
+    except (CalledProcessError, OSError, ValueError):
+        return None
+
+
+def pool_set(service, pool_name, key, value):
+    """Sets a value for a RADOS pool in ceph.
+
+    :param service: The Ceph user name to run the command under.
+    :type service: str
+    :param pool_name: Name of pool to set property on.
+    :type pool_name: str
+    :param key: Property key.
+    :type key: str
+    :param value: Value, will be coerced into str and shifted to lowercase.
+    :type value: str
+    :raises: CalledProcessError
+    """
+    cmd = [
+        'ceph', '--id', service,
+        'osd', 'pool', 'set', pool_name, key, str(value).lower()]
+    check_call(cmd)
+
+
+def snapshot_pool(service, pool_name, snapshot_name):
+    """Snapshots a RADOS pool in Ceph.
+
+    :param service: The Ceph user name to run the command under.
+    :type service: str
+    :param pool_name: Name of pool to snapshot.
+    :type pool_name: str
+    :param snapshot_name: Name of snapshot to create.
+    :type snapshot_name: str
+    :raises: CalledProcessError
+    """
+    cmd = [
+        'ceph', '--id', service,
+        'osd', 'pool', 'mksnap', pool_name, snapshot_name]
+    check_call(cmd)
+
+
+def remove_pool_snapshot(service, pool_name, snapshot_name):
+    """Remove a snapshot from a RADOS pool in Ceph.
+
+    :param service: The Ceph user name to run the command under.
+    :type service: str
+    :param pool_name: Name of pool to remove snapshot from.
+    :type pool_name: str
+    :param snapshot_name: Name of snapshot to remove.
+    :type snapshot_name: str
+    :raises: CalledProcessError
+    """
+    cmd = [
+        'ceph', '--id', service,
+        'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
+    check_call(cmd)
+
+
+def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
+    """Set byte quota on a RADOS pool in Ceph.
+
+    :param service: The Ceph user name to run the command under
+    :type service: str
+    :param pool_name: Name of pool
+    :type pool_name: str
+    :param max_bytes: Maximum bytes quota to apply
+    :type max_bytes: int
+    :param max_objects: Maximum objects quota to apply
+    :type max_objects: int
+    :raises: subprocess.CalledProcessError
+    """
+    cmd = [
+        'ceph', '--id', service,
+        'osd', 'pool', 'set-quota', pool_name]
+    if max_bytes:
+        cmd = cmd + ['max_bytes', str(max_bytes)]
+    if max_objects:
+        cmd = cmd + ['max_objects', str(max_objects)]
+    check_call(cmd)
+
+
+def remove_pool_quota(service, pool_name):
+    """Remove byte quota on a RADOS pool in Ceph.
+
+    :param service: The Ceph user name to run the command under.
+    :type service: str
+    :param pool_name: Name of pool to remove quota from.
+    :type pool_name: str
+    :raises: CalledProcessError
+    """
+    cmd = [
+        'ceph', '--id', service,
+        'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
+    check_call(cmd)
+
+
+def remove_erasure_profile(service, profile_name):
+    """Remove erasure code profile.
+
+    :param service: The Ceph user name to run the command under
+    :type service: str
+    :param profile_name: Name of profile to remove.
+    :type profile_name: str
+    :raises: CalledProcessError
+    """
+    cmd = [
+        'ceph', '--id', service,
+        'osd', 'erasure-code-profile', 'rm', profile_name]
+    check_call(cmd)
+
+
+def create_erasure_profile(service, profile_name,
+                           erasure_plugin_name='jerasure',
+                           failure_domain=None,
+                           data_chunks=2, coding_chunks=1,
+                           locality=None, durability_estimator=None,
+                           helper_chunks=None,
+                           scalar_mds=None,
+                           crush_locality=None,
+                           device_class=None,
+                           erasure_plugin_technique=None):
+    """Create a new erasure code profile if one does not already exist for it.
+
+    Profiles are considered immutable so will not be updated if the named
+    profile already exists.
+
+    Please refer to [0] for more details.
+
+    0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
+
+    :param service: The Ceph user name to run the command under.
+    :type service: str
+    :param profile_name: Name of profile.
+    :type profile_name: str
+    :param erasure_plugin_name: Erasure code plugin.
+    :type erasure_plugin_name: str
+    :param failure_domain: Failure domain, one of:
+                           ('chassis', 'datacenter', 'host', 'osd', 'pdu',
+                            'pod', 'rack', 'region', 'room', 'root', 'row').
+    :type failure_domain: str
+    :param data_chunks: Number of data chunks.
+    :type data_chunks: int
+    :param coding_chunks: Number of coding chunks.
+    :type coding_chunks: int
+    :param locality: Locality.
+    :type locality: int
+    :param durability_estimator: Durability estimator.
+    :type durability_estimator: int
+    :param helper_chunks: int
+    :type helper_chunks: int
+    :param device_class: Restrict placement to devices of specific class.
+    :type device_class: str
+    :param scalar_mds: one of ['isa', 'jerasure', 'shec']
+    :type scalar_mds: str
+    :param crush_locality: LRC locality faulure domain, one of:
+                           ('chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod',
+                            'rack', 'region', 'room', 'root', 'row') or unset.
+    :type crush_locaity: str
+    :param erasure_plugin_technique: Coding technique for EC plugin
+    :type erasure_plugin_technique: str
+    :return: None.  Can raise CalledProcessError, ValueError or AssertionError
+    """
+    if erasure_profile_exists(service, profile_name):
+        log('EC profile {} exists, skipping update'.format(profile_name),
+            level=WARNING)
+        return
+
+    plugin_techniques = {
+        'jerasure': [
+            'reed_sol_van',
+            'reed_sol_r6_op',
+            'cauchy_orig',
+            'cauchy_good',
+            'liberation',
+            'blaum_roth',
+            'liber8tion'
+        ],
+        'lrc': [],
+        'isa': [
+            'reed_sol_van',
+            'cauchy',
+        ],
+        'shec': [
+            'single',
+            'multiple'
+        ],
+        'clay': [],
+    }
+    failure_domains = [
+        'chassis', 'datacenter',
+        'host', 'osd',
+        'pdu', 'pod',
+        'rack', 'region',
+        'room', 'root',
+        'row',
+    ]
+    device_classes = [
+        'ssd',
+        'hdd',
+        'nvme'
+    ]
+
+    validator(erasure_plugin_name, six.string_types,
+              list(plugin_techniques.keys()))
+
+    cmd = [
+        'ceph', '--id', service,
+        'osd', 'erasure-code-profile', 'set', profile_name,
+        'plugin={}'.format(erasure_plugin_name),
+        'k={}'.format(str(data_chunks)),
+        'm={}'.format(str(coding_chunks)),
+    ]
+
+    if erasure_plugin_technique:
+        validator(erasure_plugin_technique, six.string_types,
+                  plugin_techniques[erasure_plugin_name])
+        cmd.append('technique={}'.format(erasure_plugin_technique))
+
+    luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
+
+    # Set failure domain from options if not provided in args
+    if not failure_domain and config('customize-failure-domain'):
+        # Defaults to 'host' so just need to deal with
+        # setting 'rack' if feature is enabled
+        failure_domain = 'rack'
+
+    if failure_domain:
+        validator(failure_domain, six.string_types, failure_domains)
+        # failure_domain changed in luminous
+        if luminous_or_later:
+            cmd.append('crush-failure-domain={}'.format(failure_domain))
+        else:
+            cmd.append('ruleset-failure-domain={}'.format(failure_domain))
+
+    # device class new in luminous
+    if luminous_or_later and device_class:
+        validator(device_class, six.string_types, device_classes)
+        cmd.append('crush-device-class={}'.format(device_class))
+    else:
+        log('Skipping device class configuration (ceph < 12.0.0)',
+            level=DEBUG)
+
+    # Add plugin specific information
+    if erasure_plugin_name == 'lrc':
+        # LRC mandatory configuration
+        if locality:
+            cmd.append('l={}'.format(str(locality)))
+        else:
+            raise ValueError("locality must be provided for lrc plugin")
+        # LRC optional configuration
+        if crush_locality:
+            validator(crush_locality, six.string_types, failure_domains)
+            cmd.append('crush-locality={}'.format(crush_locality))
+
+    if erasure_plugin_name == 'shec':
+        # SHEC optional configuration
+        if durability_estimator:
+            cmd.append('c={}'.format((durability_estimator)))
+
+    if erasure_plugin_name == 'clay':
+        # CLAY optional configuration
+        if helper_chunks:
+            cmd.append('d={}'.format(str(helper_chunks)))
+        if scalar_mds:
+            cmd.append('scalar-mds={}'.format(scalar_mds))
+
+    check_call(cmd)
+
+
+def rename_pool(service, old_name, new_name):
+    """Rename a Ceph pool from old_name to new_name.
+
+    :param service: The Ceph user name to run the command under.
+    :type service: str
+    :param old_name: Name of pool subject to rename.
+    :type old_name: str
+    :param new_name: Name to rename pool to.
+    :type new_name: str
+    """
+    validator(value=old_name, valid_type=six.string_types)
+    validator(value=new_name, valid_type=six.string_types)
+
+    cmd = [
+        'ceph', '--id', service,
+        'osd', 'pool', 'rename', old_name, new_name]
+    check_call(cmd)
+
+
+def erasure_profile_exists(service, name):
+    """Check to see if an Erasure code profile already exists.
+
+    :param service: The Ceph user name to run the command under
+    :type service: str
+    :param name: Name of profile to look for.
+    :type name: str
+    :returns: True if it exists, False otherwise.
+    :rtype: bool
+    """
+    validator(value=name, valid_type=six.string_types)
+    try:
+        check_call(['ceph', '--id', service,
+                    'osd', 'erasure-code-profile', 'get',
+                    name])
+        return True
+    except CalledProcessError:
+        return False
+
+
+def get_cache_mode(service, pool_name):
+    """Find the current caching mode of the pool_name given.
+
+    :param service: The Ceph user name to run the command under
+    :type service: str
+    :param pool_name: Name of pool.
+    :type pool_name: str
+    :returns: Current cache mode.
+    :rtype: Optional[int]
+    """
+    validator(value=service, valid_type=six.string_types)
+    validator(value=pool_name, valid_type=six.string_types)
+    out = check_output(['ceph', '--id', service,
+                        'osd', 'dump', '--format=json'])
+    if six.PY3:
+        out = out.decode('UTF-8')
+    try:
+        osd_json = json.loads(out)
+        for pool in osd_json['pools']:
+            if pool['pool_name'] == pool_name:
+                return pool['cache_mode']
+        return None
+    except ValueError:
+        raise
+
+
+def pool_exists(service, name):
+    """Check to see if a RADOS pool already exists."""
+    try:
+        out = check_output(['rados', '--id', service, 'lspools'])
+        if six.PY3:
+            out = out.decode('UTF-8')
+    except CalledProcessError:
+        return False
+
+    return name in out.split()
+
+
+def get_osds(service, device_class=None):
+    """Return a list of all Ceph Object Storage Daemons currently in the
+    cluster (optionally filtered by storage device class).
+
+    :param device_class: Class of storage device for OSD's
+    :type device_class: str
+    """
+    luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
+    if luminous_or_later and device_class:
+        out = check_output(['ceph', '--id', service,
+                            'osd', 'crush', 'class',
+                            'ls-osd', device_class,
+                            '--format=json'])
+    else:
+        out = check_output(['ceph', '--id', service,
+                            'osd', 'ls',
+                            '--format=json'])
+    if six.PY3:
+        out = out.decode('UTF-8')
+    return json.loads(out)
+
+
+def install():
+    """Basic Ceph client installation."""
+    ceph_dir = "/etc/ceph"
+    if not os.path.exists(ceph_dir):
+        os.mkdir(ceph_dir)
+
+    apt_install('ceph-common', fatal=True)
+
+
+def rbd_exists(service, pool, rbd_img):
+    """Check to see if a RADOS block device exists."""
+    try:
+        out = check_output(['rbd', 'list', '--id',
+                            service, '--pool', pool])
+        if six.PY3:
+            out = out.decode('UTF-8')
+    except CalledProcessError:
+        return False
+
+    return rbd_img in out
+
+
+def create_rbd_image(service, pool, image, sizemb):
+    """Create a new RADOS block device."""
+    cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
+           '--pool', pool]
+    check_call(cmd)
+
+
+def update_pool(client, pool, settings):
+    """Update pool properties.
+
+    :param client: Client/User-name to authenticate with.
+    :type client: str
+    :param pool: Name of pool to operate on
+    :type pool: str
+    :param settings: Dictionary with key/value pairs to set.
+    :type settings: Dict[str, str]
+    :raises: CalledProcessError
+    """
+    cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
+    for k, v in six.iteritems(settings):
+        check_call(cmd + [k, v])
+
+
+def set_app_name_for_pool(client, pool, name):
+    """Calls `osd pool application enable` for the specified pool name
+
+    :param client: Name of the ceph client to use
+    :type client: str
+    :param pool: Pool to set app name for
+    :type pool: str
+    :param name: app name for the specified pool
+    :type name: str
+
+    :raises: CalledProcessError if ceph call fails
+    """
+    if cmp_pkgrevno('ceph-common', '12.0.0') >= 0:
+        cmd = ['ceph', '--id', client, 'osd', 'pool',
+               'application', 'enable', pool, name]
+        check_call(cmd)
+
+
+def create_pool(service, name, replicas=3, pg_num=None):
+    """Create a new RADOS pool."""
+    if pool_exists(service, name):
+        log("Ceph pool {} already exists, skipping creation".format(name),
+            level=WARNING)
+        return
+
+    if not pg_num:
+        # Calculate the number of placement groups based
+        # on upstream recommended best practices.
+        osds = get_osds(service)
+        if osds:
+            pg_num = (len(osds) * 100 // replicas)
+        else:
+            # NOTE(james-page): Default to 200 for older ceph versions
+            # which don't support OSD query from cli
+            pg_num = 200
+
+    cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
+    check_call(cmd)
+
+    update_pool(service, name, settings={'size': str(replicas)})
+
+
+def delete_pool(service, name):
+    """Delete a RADOS pool from ceph."""
+    cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
+           '--yes-i-really-really-mean-it']
+    check_call(cmd)
+
+
+def _keyfile_path(service):
+    return KEYFILE.format(service)
+
+
+def _keyring_path(service):
+    return KEYRING.format(service)
+
+
+def add_key(service, key):
+    """Add a key to a keyring.
+
+    Creates the keyring if it doesn't already exist.
+
+    Logs and returns if the key is already in the keyring.
+    """
+    keyring = _keyring_path(service)
+    if os.path.exists(keyring):
+        with open(keyring, 'r') as ring:
+            if key in ring.read():
+                log('Ceph keyring exists at %s and has not changed.' % keyring,
+                    level=DEBUG)
+                return
+            log('Updating existing keyring %s.' % keyring, level=DEBUG)
+
+    cmd = ['ceph-authtool', keyring, '--create-keyring',
+           '--name=client.{}'.format(service), '--add-key={}'.format(key)]
+    check_call(cmd)
+    log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
+
+
+def create_keyring(service, key):
+    """Deprecated. Please use the more accurately named 'add_key'"""
+    return add_key(service, key)
+
+
+def delete_keyring(service):
+    """Delete an existing Ceph keyring."""
+    keyring = _keyring_path(service)
+    if not os.path.exists(keyring):
+        log('Keyring does not exist at %s' % keyring, level=WARNING)
+        return
+
+    os.remove(keyring)
+    log('Deleted ring at %s.' % keyring, level=INFO)
+
+
+def create_key_file(service, key):
+    """Create a file containing key."""
+    keyfile = _keyfile_path(service)
+    if os.path.exists(keyfile):
+        log('Keyfile exists at %s.' % keyfile, level=WARNING)
+        return
+
+    with open(keyfile, 'w') as fd:
+        fd.write(key)
+
+    log('Created new keyfile at %s.' % keyfile, level=INFO)
+
+
+def get_ceph_nodes(relation='ceph'):
+    """Query named relation to determine current nodes."""
+    hosts = []
+    for r_id in relation_ids(relation):
+        for unit in related_units(r_id):
+            hosts.append(relation_get('private-address', unit=unit, rid=r_id))
+
+    return hosts
+
+
+def configure(service, key, auth, use_syslog):
+    """Perform basic configuration of Ceph."""
+    add_key(service, key)
+    create_key_file(service, key)
+    hosts = get_ceph_nodes()
+    with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
+        ceph_conf.write(CEPH_CONF.format(auth=auth,
+                                         keyring=_keyring_path(service),
+                                         mon_hosts=",".join(map(str, hosts)),
+                                         use_syslog=use_syslog))
+    modprobe('rbd')
+
+
+def image_mapped(name):
+    """Determine whether a RADOS block device is mapped locally."""
+    try:
+        out = check_output(['rbd', 'showmapped'])
+        if six.PY3:
+            out = out.decode('UTF-8')
+    except CalledProcessError:
+        return False
+
+    return name in out
+
+
+def map_block_storage(service, pool, image):
+    """Map a RADOS block device for local use."""
+    cmd = [
+        'rbd',
+        'map',
+        '{}/{}'.format(pool, image),
+        '--user',
+        service,
+        '--secret',
+        _keyfile_path(service),
+    ]
+    check_call(cmd)
+
+
+def filesystem_mounted(fs):
+    """Determine whether a filesytems is already mounted."""
+    return fs in [f for f, m in mounts()]
+
+
+def make_filesystem(blk_device, fstype='ext4', timeout=10):
+    """Make a new filesystem on the specified block device."""
+    count = 0
+    e_noent = errno.ENOENT
+    while not os.path.exists(blk_device):
+        if count >= timeout:
+            log('Gave up waiting on block device %s' % blk_device,
+                level=ERROR)
+            raise IOError(e_noent, os.strerror(e_noent), blk_device)
+
+        log('Waiting for block device %s to appear' % blk_device,
+            level=DEBUG)
+        count += 1
+        time.sleep(1)
+    else:
+        log('Formatting block device %s as filesystem %s.' %
+            (blk_device, fstype), level=INFO)
+        check_call(['mkfs', '-t', fstype, blk_device])
+
+
+def place_data_on_block_device(blk_device, data_src_dst):
+    """Migrate data in data_src_dst to blk_device and then remount."""
+    # mount block device into /mnt
+    mount(blk_device, '/mnt')
+    # copy data to /mnt
+    copy_files(data_src_dst, '/mnt')
+    # umount block device
+    umount('/mnt')
+    # Grab user/group ID's from original source
+    _dir = os.stat(data_src_dst)
+    uid = _dir.st_uid
+    gid = _dir.st_gid
+    # re-mount where the data should originally be
+    # TODO: persist is currently a NO-OP in core.host
+    mount(blk_device, data_src_dst, persist=True)
+    # ensure original ownership of new mount.
+    os.chown(data_src_dst, uid, gid)
+
+
+def copy_files(src, dst, symlinks=False, ignore=None):
+    """Copy files from src to dst."""
+    for item in os.listdir(src):
+        s = os.path.join(src, item)
+        d = os.path.join(dst, item)
+        if os.path.isdir(s):
+            shutil.copytree(s, d, symlinks, ignore)
+        else:
+            shutil.copy2(s, d)
+
+
+def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
+                        blk_device, fstype, system_services=[],
+                        replicas=3):
+    """NOTE: This function must only be called from a single service unit for
+    the same rbd_img otherwise data loss will occur.
+
+    Ensures given pool and RBD image exists, is mapped to a block device,
+    and the device is formatted and mounted at the given mount_point.
+
+    If formatting a device for the first time, data existing at mount_point
+    will be migrated to the RBD device before being re-mounted.
+
+    All services listed in system_services will be stopped prior to data
+    migration and restarted when complete.
+    """
+    # Ensure pool, RBD image, RBD mappings are in place.
+    if not pool_exists(service, pool):
+        log('Creating new pool {}.'.format(pool), level=INFO)
+        create_pool(service, pool, replicas=replicas)
+
+    if not rbd_exists(service, pool, rbd_img):
+        log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
+        create_rbd_image(service, pool, rbd_img, sizemb)
+
+    if not image_mapped(rbd_img):
+        log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
+            level=INFO)
+        map_block_storage(service, pool, rbd_img)
+
+    # make file system
+    # TODO: What happens if for whatever reason this is run again and
+    # the data is already in the rbd device and/or is mounted??
+    # When it is mounted already, it will fail to make the fs
+    # XXX: This is really sketchy!  Need to at least add an fstab entry
+    #      otherwise this hook will blow away existing data if its executed
+    #      after a reboot.
+    if not filesystem_mounted(mount_point):
+        make_filesystem(blk_device, fstype)
+
+        for svc in system_services:
+            if service_running(svc):
+                log('Stopping services {} prior to migrating data.'
+                    .format(svc), level=DEBUG)
+                service_stop(svc)
+
+        place_data_on_block_device(blk_device, mount_point)
+
+        for svc in system_services:
+            log('Starting service {} after migrating data.'
+                .format(svc), level=DEBUG)
+            service_start(svc)
+
+
+def ensure_ceph_keyring(service, user=None, group=None,
+                        relation='ceph', key=None):
+    """Ensures a ceph keyring is created for a named service and optionally
+    ensures user and group ownership.
+
+    @returns boolean: Flag to indicate whether a key was successfully written
+                      to disk based on either relation data or a supplied key
+    """
+    if not key:
+        for rid in relation_ids(relation):
+            for unit in related_units(rid):
+                key = relation_get('key', rid=rid, unit=unit)
+                if key:
+                    break
+
+    if not key:
+        return False
+
+    add_key(service=service, key=key)
+    keyring = _keyring_path(service)
+    if user and group:
+        check_call(['chown', '%s.%s' % (user, group), keyring])
+
+    return True
+
+
+class CephBrokerRq(object):
+    """Ceph broker request.
+
+    Multiple operations can be added to a request and sent to the Ceph broker
+    to be executed.
+
+    Request is json-encoded for sending over the wire.
+
+    The API is versioned and defaults to version 1.
+    """
+
+    def __init__(self, api_version=1, request_id=None, raw_request_data=None):
+        """Initialize CephBrokerRq object.
+
+        Builds a new empty request or rebuilds a request from on-wire JSON
+        data.
+
+        :param api_version: API version for request (default: 1).
+        :type api_version: Optional[int]
+        :param request_id: Unique identifier for request.
+                           (default: string representation of generated UUID)
+        :type request_id: Optional[str]
+        :param raw_request_data: JSON-encoded string to build request from.
+        :type raw_request_data: Optional[str]
+        :raises: KeyError
+        """
+        if raw_request_data:
+            request_data = json.loads(raw_request_data)
+            self.api_version = request_data['api-version']
+            self.request_id = request_data['request-id']
+            self.set_ops(request_data['ops'])
+        else:
+            self.api_version = api_version
+            if request_id:
+                self.request_id = request_id
+            else:
+                self.request_id = str(uuid.uuid1())
+            self.ops = []
+
+    def add_op(self, op):
+        """Add an op if it is not already in the list.
+
+        :param op: Operation to add.
+        :type op: dict
+        """
+        if op not in self.ops:
+            self.ops.append(op)
+
+    def add_op_request_access_to_group(self, name, namespace=None,
+                                       permission=None, key_name=None,
+                                       object_prefix_permissions=None):
+        """
+        Adds the requested permissions to the current service's Ceph key,
+        allowing the key to access only the specified pools or
+        object prefixes. object_prefix_permissions should be a dictionary
+        keyed on the permission with the corresponding value being a list
+        of prefixes to apply that permission to.
+            {
+                'rwx': ['prefix1', 'prefix2'],
+                'class-read': ['prefix3']}
+        """
+        self.add_op({
+            'op': 'add-permissions-to-key', 'group': name,
+            'namespace': namespace,
+            'name': key_name or service_name(),
+            'group-permission': permission,
+            'object-prefix-permissions': object_prefix_permissions})
+
+    def add_op_create_pool(self, name, replica_count=3, pg_num=None,
+                           weight=None, group=None, namespace=None,
+                           app_name=None, max_bytes=None, max_objects=None):
+        """DEPRECATED: Use ``add_op_create_replicated_pool()`` or
+                       ``add_op_create_erasure_pool()`` instead.
+        """
+        return self.add_op_create_replicated_pool(
+            name, replica_count=replica_count, pg_num=pg_num, weight=weight,
+            group=group, namespace=namespace, app_name=app_name,
+            max_bytes=max_bytes, max_objects=max_objects)
+
+    # Use function parameters and docstring to define types in a compatible
+    # manner.
+    #
+    # NOTE: Our caller should always use a kwarg Dict when calling us so
+    # no need to maintain fixed order/position for parameters. Please keep them
+    # sorted by name when adding new ones.
+    def _partial_build_common_op_create(self,
+                                        app_name=None,
+                                        compression_algorithm=None,
+                                        compression_mode=None,
+                                        compression_required_ratio=None,
+                                        compression_min_blob_size=None,
+                                        compression_min_blob_size_hdd=None,
+                                        compression_min_blob_size_ssd=None,
+                                        compression_max_blob_size=None,
+                                        compression_max_blob_size_hdd=None,
+                                        compression_max_blob_size_ssd=None,
+                                        group=None,
+                                        max_bytes=None,
+                                        max_objects=None,
+                                        namespace=None,
+                                        rbd_mirroring_mode='pool',
+                                        weight=None):
+        """Build common part of a create pool operation.
+
+        :param app_name: Tag pool with application name. Note that there is
+                         certain protocols emerging upstream with regard to
+                         meaningful application names to use.
+                         Examples are 'rbd' and 'rgw'.
+        :type app_name: Optional[str]
+        :param compression_algorithm: Compressor to use, one of:
+                                      ('lz4', 'snappy', 'zlib', 'zstd')
+        :type compression_algorithm: Optional[str]
+        :param compression_mode: When to compress data, one of:
+                                 ('none', 'passive', 'aggressive', 'force')
+        :type compression_mode: Optional[str]
+        :param compression_required_ratio: Minimum compression ratio for data
+                                           chunk, if the requested ratio is not
+                                           achieved the compressed version will
+                                           be thrown away and the original
+                                           stored.
+        :type compression_required_ratio: Optional[float]
+        :param compression_min_blob_size: Chunks smaller than this are never
+                                          compressed (unit: bytes).
+        :type compression_min_blob_size: Optional[int]
+        :param compression_min_blob_size_hdd: Chunks smaller than this are not
+                                              compressed when destined to
+                                              rotational media (unit: bytes).
+        :type compression_min_blob_size_hdd: Optional[int]
+        :param compression_min_blob_size_ssd: Chunks smaller than this are not
+                                              compressed when destined to flash
+                                              media (unit: bytes).
+        :type compression_min_blob_size_ssd: Optional[int]
+        :param compression_max_blob_size: Chunks larger than this are broken
+                                          into N * compression_max_blob_size
+                                          chunks before being compressed
+                                          (unit: bytes).
+        :type compression_max_blob_size: Optional[int]
+        :param compression_max_blob_size_hdd: Chunks larger than this are
+                                              broken into
+                                              N * compression_max_blob_size_hdd
+                                              chunks before being compressed
+                                              when destined for rotational
+                                              media (unit: bytes)
+        :type compression_max_blob_size_hdd: Optional[int]
+        :param compression_max_blob_size_ssd: Chunks larger than this are
+                                              broken into
+                                              N * compression_max_blob_size_ssd
+                                              chunks before being compressed
+                                              when destined for flash media
+                                              (unit: bytes).
+        :type compression_max_blob_size_ssd: Optional[int]
+        :param group: Group to add pool to
+        :type group: Optional[str]
+        :param max_bytes: Maximum bytes quota to apply
+        :type max_bytes: Optional[int]
+        :param max_objects: Maximum objects quota to apply
+        :type max_objects: Optional[int]
+        :param namespace: Group namespace
+        :type namespace: Optional[str]
+        :param rbd_mirroring_mode: Pool mirroring mode used when Ceph RBD
+                                   mirroring is enabled.
+        :type rbd_mirroring_mode: Optional[str]
+        :param weight: The percentage of data that is expected to be contained
+                       in the pool from the total available space on the OSDs.
+                       Used to calculate number of Placement Groups to create
+                       for pool.
+        :type weight: Optional[float]
+        :returns: Dictionary with kwarg name as key.
+        :rtype: Dict[str,any]
+        :raises: AssertionError
+        """
+        return {
+            'app-name': app_name,
+            'compression-algorithm': compression_algorithm,
+            'compression-mode': compression_mode,
+            'compression-required-ratio': compression_required_ratio,
+            'compression-min-blob-size': compression_min_blob_size,
+            'compression-min-blob-size-hdd': compression_min_blob_size_hdd,
+            'compression-min-blob-size-ssd': compression_min_blob_size_ssd,
+            'compression-max-blob-size': compression_max_blob_size,
+            'compression-max-blob-size-hdd': compression_max_blob_size_hdd,
+            'compression-max-blob-size-ssd': compression_max_blob_size_ssd,
+            'group': group,
+            'max-bytes': max_bytes,
+            'max-objects': max_objects,
+            'group-namespace': namespace,
+            'rbd-mirroring-mode': rbd_mirroring_mode,
+            'weight': weight,
+        }
+
+    def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
+                                      **kwargs):
+        """Adds an operation to create a replicated pool.
+
+        Refer to docstring for ``_partial_build_common_op_create`` for
+        documentation of keyword arguments.
+
+        :param name: Name of pool to create
+        :type name: str
+        :param replica_count: Number of copies Ceph should keep of your data.
+        :type replica_count: int
+        :param pg_num: Request specific number of Placement Groups to create
+                       for pool.
+        :type pg_num: int
+        :raises: AssertionError if provided data is of invalid type/range
+        """
+        if pg_num and kwargs.get('weight'):
+            raise ValueError('pg_num and weight are mutually exclusive')
+
+        op = {
+            'op': 'create-pool',
+            'name': name,
+            'replicas': replica_count,
+            'pg_num': pg_num,
+        }
+        op.update(self._partial_build_common_op_create(**kwargs))
+
+        # Initialize Pool-object to validate type and range of ops.
+        pool = ReplicatedPool('dummy-service', op=op)
+        pool.validate()
+
+        self.add_op(op)
+
+    def add_op_create_erasure_pool(self, name, erasure_profile=None,
+                                   allow_ec_overwrites=False, **kwargs):
+        """Adds an operation to create a erasure coded pool.
+
+        Refer to docstring for ``_partial_build_common_op_create`` for
+        documentation of keyword arguments.
+
+        :param name: Name of pool to create
+        :type name: str
+        :param erasure_profile: Name of erasure code profile to use.  If not
+                                set the ceph-mon unit handling the broker
+                                request will set its default value.
+        :type erasure_profile: str
+        :param allow_ec_overwrites: allow EC pools to be overriden
+        :type allow_ec_overwrites: bool
+        :raises: AssertionError if provided data is of invalid type/range
+        """
+        op = {
+            'op': 'create-pool',
+            'name': name,
+            'pool-type': 'erasure',
+            'erasure-profile': erasure_profile,
+            'allow-ec-overwrites': allow_ec_overwrites,
+        }
+        op.update(self._partial_build_common_op_create(**kwargs))
+
+        # Initialize Pool-object to validate type and range of ops.
+        pool = ErasurePool('dummy-service', op)
+        pool.validate()
+
+        self.add_op(op)
+
+    def add_op_create_erasure_profile(self, name,
+                                      erasure_type='jerasure',
+                                      erasure_technique=None,
+                                      k=None, m=None,
+                                      failure_domain=None,
+                                      lrc_locality=None,
+                                      shec_durability_estimator=None,
+                                      clay_helper_chunks=None,
+                                      device_class=None,
+                                      clay_scalar_mds=None,
+                                      lrc_crush_locality=None):
+        """Adds an operation to create a erasure coding profile.
+
+        :param name: Name of profile to create
+        :type name: str
+        :param erasure_type: Which of the erasure coding plugins should be used
+        :type erasure_type: string
+        :param erasure_technique: EC plugin technique to use
+        :type erasure_technique: string
+        :param k: Number of data chunks
+        :type k: int
+        :param m: Number of coding chunks
+        :type m: int
+        :param lrc_locality: Group the coding and data chunks into sets of size locality
+                             (lrc plugin)
+        :type lrc_locality: int
+        :param durability_estimator: The number of parity chuncks each of which includes
+                                     a data chunk in its calculation range (shec plugin)
+        :type durability_estimator: int
+        :param helper_chunks: The number of helper chunks to use for recovery operations
+                              (clay plugin)
+        :type: helper_chunks: int
+        :param failure_domain: Type of failure domain from Ceph bucket types
+                               to be used
+        :type failure_domain: string
+        :param device_class: Device class to use for profile (ssd, hdd)
+        :type device_class: string
+        :param clay_scalar_mds: Plugin to use for CLAY layered construction
+                                (jerasure|isa|shec)
+        :type clay_scaler_mds: string
+        :param lrc_crush_locality: Type of crush bucket in which set of chunks
+                                   defined by lrc_locality will be stored.
+        :type lrc_crush_locality: string
+        """
+        self.add_op({'op': 'create-erasure-profile',
+                     'name': name,
+                     'k': k,
+                     'm': m,
+                     'l': lrc_locality,
+                     'c': shec_durability_estimator,
+                     'd': clay_helper_chunks,
+                     'erasure-type': erasure_type,
+                     'erasure-technique': erasure_technique,
+                     'failure-domain': failure_domain,
+                     'device-class': device_class,
+                     'scalar-mds': clay_scalar_mds,
+                     'crush-locality': lrc_crush_locality})
+
+    def set_ops(self, ops):
+        """Set request ops to provided value.
+
+        Useful for injecting ops that come from a previous request
+        to allow comparisons to ensure validity.
+        """
+        self.ops = ops
+
+    @property
+    def request(self):
+        return json.dumps({'api-version': self.api_version, 'ops': self.ops,
+                           'request-id': self.request_id})
+
+    def _ops_equal(self, other):
+        keys_to_compare = [
+            'replicas', 'name', 'op', 'pg_num', 'group-permission',
+            'object-prefix-permissions',
+        ]
+        keys_to_compare += list(self._partial_build_common_op_create().keys())
+        if len(self.ops) == len(other.ops):
+            for req_no in range(0, len(self.ops)):
+                for key in keys_to_compare:
+                    if self.ops[req_no].get(key) != other.ops[req_no].get(key):
+                        return False
+        else:
+            return False
+        return True
+
+    def __eq__(self, other):
+        if not isinstance(other, self.__class__):
+            return False
+        if self.api_version == other.api_version and \
+                self._ops_equal(other):
+            return True
+        else:
+            return False
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class CephBrokerRsp(object):
+    """Ceph broker response.
+
+    Response is json-decoded and contents provided as methods/properties.
+
+    The API is versioned and defaults to version 1.
+    """
+
+    def __init__(self, encoded_rsp):
+        self.api_version = None
+        self.rsp = json.loads(encoded_rsp)
+
+    @property
+    def request_id(self):
+        return self.rsp.get('request-id')
+
+    @property
+    def exit_code(self):
+        return self.rsp.get('exit-code')
+
+    @property
+    def exit_msg(self):
+        return self.rsp.get('stderr')
+
+
+# Ceph Broker Conversation:
+# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
+# and send that request to ceph via the ceph relation. The CephBrokerRq has a
+# unique id so that the client can identity which CephBrokerRsp is associated
+# with the request. Ceph will also respond to each client unit individually
+# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
+# via key broker-rsp-glance-0
+#
+# To use this the charm can just do something like:
+#
+# from charmhelpers.contrib.storage.linux.ceph import (
+#     send_request_if_needed,
+#     is_request_complete,
+#     CephBrokerRq,
+# )
+#
+# @hooks.hook('ceph-relation-changed')
+# def ceph_changed():
+#     rq = CephBrokerRq()
+#     rq.add_op_create_pool(name='poolname', replica_count=3)
+#
+#     if is_request_complete(rq):
+#         <Request complete actions>
+#     else:
+#         send_request_if_needed(get_ceph_request())
+#
+# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
+# of glance having sent a request to ceph which ceph has successfully processed
+#  'ceph:8': {
+#      'ceph/0': {
+#          'auth': 'cephx',
+#          'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
+#          'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
+#          'ceph-public-address': '10.5.44.103',
+#          'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
+#          'private-address': '10.5.44.103',
+#      },
+#      'glance/0': {
+#          'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
+#                         '"ops": [{"replicas": 3, "name": "glance", '
+#                         '"op": "create-pool"}]}'),
+#          'private-address': '10.5.44.109',
+#      },
+#  }
+
+def get_previous_request(rid):
+    """Return the last ceph broker request sent on a given relation
+
+    :param rid: Relation id to query for request
+    :type rid: str
+    :returns: CephBrokerRq object or None if relation data not found.
+    :rtype: Optional[CephBrokerRq]
+    """
+    broker_req = relation_get(attribute='broker_req', rid=rid,
+                              unit=local_unit())
+    if broker_req:
+        return CephBrokerRq(raw_request_data=broker_req)
+
+
+def get_request_states(request, relation='ceph'):
+    """Return a dict of requests per relation id with their corresponding
+       completion state.
+
+    This allows a charm, which has a request for ceph, to see whether there is
+    an equivalent request already being processed and if so what state that
+    request is in.
+
+    @param request: A CephBrokerRq object
+    """
+    complete = []
+    requests = {}
+    for rid in relation_ids(relation):
+        complete = False
+        previous_request = get_previous_request(rid)
+        if request == previous_request:
+            sent = True
+            complete = is_request_complete_for_rid(previous_request, rid)
+        else:
+            sent = False
+            complete = False
+
+        requests[rid] = {
+            'sent': sent,
+            'complete': complete,
+        }
+
+    return requests
+
+
+def is_request_sent(request, relation='ceph'):
+    """Check to see if a functionally equivalent request has already been sent
+
+    Returns True if a similair request has been sent
+
+    @param request: A CephBrokerRq object
+    """
+    states = get_request_states(request, relation=relation)
+    for rid in states.keys():
+        if not states[rid]['sent']:
+            return False
+
+    return True
+
+
+def is_request_complete(request, relation='ceph'):
+    """Check to see if a functionally equivalent request has already been
+    completed
+
+    Returns True if a similair request has been completed
+
+    @param request: A CephBrokerRq object
+    """
+    states = get_request_states(request, relation=relation)
+    for rid in states.keys():
+        if not states[rid]['complete']:
+            return False
+
+    return True
+
+
+def is_request_complete_for_rid(request, rid):
+    """Check if a given request has been completed on the given relation
+
+    @param request: A CephBrokerRq object
+    @param rid: Relation ID
+    """
+    broker_key = get_broker_rsp_key()
+    for unit in related_units(rid):
+        rdata = relation_get(rid=rid, unit=unit)
+        if rdata.get(broker_key):
+            rsp = CephBrokerRsp(rdata.get(broker_key))
+            if rsp.request_id == request.request_id:
+                if not rsp.exit_code:
+                    return True
+        else:
+            # The remote unit sent no reply targeted at this unit so either the
+            # remote ceph cluster does not support unit targeted replies or it
+            # has not processed our request yet.
+            if rdata.get('broker_rsp'):
+                request_data = json.loads(rdata['broker_rsp'])
+                if request_data.get('request-id'):
+                    log('Ignoring legacy broker_rsp without unit key as remote '
+                        'service supports unit specific replies', level=DEBUG)
+                else:
+                    log('Using legacy broker_rsp as remote service does not '
+                        'supports unit specific replies', level=DEBUG)
+                    rsp = CephBrokerRsp(rdata['broker_rsp'])
+                    if not rsp.exit_code:
+                        return True
+
+    return False
+
+
+def get_broker_rsp_key():
+    """Return broker response key for this unit
+
+    This is the key that ceph is going to use to pass request status
+    information back to this unit
+    """
+    return 'broker-rsp-' + local_unit().replace('/', '-')
+
+
+def send_request_if_needed(request, relation='ceph'):
+    """Send broker request if an equivalent request has not already been sent
+
+    @param request: A CephBrokerRq object
+    """
+    if is_request_sent(request, relation=relation):
+        log('Request already sent but not complete, not sending new request',
+            level=DEBUG)
+    else:
+        for rid in relation_ids(relation):
+            log('Sending request {}'.format(request.request_id), level=DEBUG)
+            relation_set(relation_id=rid, broker_req=request.request)
+            relation_set(relation_id=rid, relation_settings={'unit-name': local_unit()})
+
+
+def has_broker_rsp(rid=None, unit=None):
+    """Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data.
+
+    :param rid: The relation to check (default of None means current relation)
+    :type rid: Union[str, None]
+    :param unit: The remote unit to check (default of None means current unit)
+    :type unit: Union[str, None]
+    :returns: True if broker key exists and is set to something 'truthy'
+    :rtype: bool
+    """
+    rdata = relation_get(rid=rid, unit=unit) or {}
+    broker_rsp = rdata.get(get_broker_rsp_key())
+    return True if broker_rsp else False
+
+
+def is_broker_action_done(action, rid=None, unit=None):
+    """Check whether broker action has completed yet.
+
+    @param action: name of action to be performed
+    @returns True if action complete otherwise False
+    """
+    rdata = relation_get(rid=rid, unit=unit) or {}
+    broker_rsp = rdata.get(get_broker_rsp_key())
+    if not broker_rsp:
+        return False
+
+    rsp = CephBrokerRsp(broker_rsp)
+    unit_name = local_unit().partition('/')[2]
+    key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
+    kvstore = kv()
+    val = kvstore.get(key=key)
+    if val and val == rsp.request_id:
+        return True
+
+    return False
+
+
+def mark_broker_action_done(action, rid=None, unit=None):
+    """Mark action as having been completed.
+
+    @param action: name of action to be performed
+    @returns None
+    """
+    rdata = relation_get(rid=rid, unit=unit) or {}
+    broker_rsp = rdata.get(get_broker_rsp_key())
+    if not broker_rsp:
+        return
+
+    rsp = CephBrokerRsp(broker_rsp)
+    unit_name = local_unit().partition('/')[2]
+    key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
+    kvstore = kv()
+    kvstore.set(key=key, value=rsp.request_id)
+    kvstore.flush()
+
+
+class CephConfContext(object):
+    """Ceph config (ceph.conf) context.
+
+    Supports user-provided Ceph configuration settings. Use can provide a
+    dictionary as the value for the config-flags charm option containing
+    Ceph configuration settings keyede by their section in ceph.conf.
+    """
+    def __init__(self, permitted_sections=None):
+        self.permitted_sections = permitted_sections or []
+
+    def __call__(self):
+        conf = config('config-flags')
+        if not conf:
+            return {}
+
+        conf = config_flags_parser(conf)
+        if not isinstance(conf, dict):
+            log("Provided config-flags is not a dictionary - ignoring",
+                level=WARNING)
+            return {}
+
+        permitted = self.permitted_sections
+        if permitted:
+            diff = set(conf.keys()).difference(set(permitted))
+            if diff:
+                log("Config-flags contains invalid keys '%s' - they will be "
+                    "ignored" % (', '.join(diff)), level=WARNING)
+
+        ceph_conf = {}
+        for key in conf:
+            if permitted and key not in permitted:
+                log("Ignoring key '%s'" % key, level=WARNING)
+                continue
+
+            ceph_conf[key] = conf[key]
+        return ceph_conf
+
+
+class CephOSDConfContext(CephConfContext):
+    """Ceph config (ceph.conf) context.
+
+    Consolidates settings from config-flags via CephConfContext with
+    settings provided by the mons. The config-flag values are preserved in
+    conf['osd'], settings from the mons which do not clash with config-flag
+    settings are in conf['osd_from_client'] and finally settings which do
+    clash are in conf['osd_from_client_conflict']. Rather than silently drop
+    the conflicting settings they are provided in the context so they can be
+    rendered commented out to give some visability to the admin.
+    """
+
+    def __init__(self, permitted_sections=None):
+        super(CephOSDConfContext, self).__init__(
+            permitted_sections=permitted_sections)
+        try:
+            self.settings_from_mons = get_osd_settings('mon')
+        except OSDSettingConflict:
+            log(
+                "OSD settings from mons are inconsistent, ignoring them",
+                level=WARNING)
+            self.settings_from_mons = {}
+
+    def filter_osd_from_mon_settings(self):
+        """Filter settings from client relation against config-flags.
+
+        :returns: A tuple (
+            ,config-flag values,
+            ,client settings which do not conflict with config-flag values,
+            ,client settings which confilct with config-flag values)
+        :rtype: (OrderedDict, OrderedDict, OrderedDict)
+        """
+        ceph_conf = super(CephOSDConfContext, self).__call__()
+        conflicting_entries = {}
+        clear_entries = {}
+        for key, value in self.settings_from_mons.items():
+            if key in ceph_conf.get('osd', {}):
+                if ceph_conf['osd'][key] != value:
+                    conflicting_entries[key] = value
+            else:
+                clear_entries[key] = value
+        clear_entries = _order_dict_by_key(clear_entries)
+        conflicting_entries = _order_dict_by_key(conflicting_entries)
+        return ceph_conf, clear_entries, conflicting_entries
+
+    def __call__(self):
+        """Construct OSD config context.
+
+        Standard context with two additional special keys.
+            osd_from_client_conflict: client settings which confilct with
+                                      config-flag values
+            osd_from_client: settings which do not conflict with config-flag
+                             values
+
+        :returns: OSD config context dict.
+        :rtype: dict
+        """
+        conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings()
+        conf['osd_from_client_conflict'] = osd_conflict
+        conf['osd_from_client'] = osd_clear
+        return conf
diff --git a/charmhelpers/contrib/storage/linux/loopback.py b/charmhelpers/contrib/storage/linux/loopback.py
new file mode 100644
index 0000000000000000000000000000000000000000..74bab40e43a978e3d9e1e2f9c8975368092145c0
--- /dev/null
+++ b/charmhelpers/contrib/storage/linux/loopback.py
@@ -0,0 +1,92 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+from subprocess import (
+    check_call,
+    check_output,
+)
+
+import six
+
+
+##################################################
+# loopback device helpers.
+##################################################
+def loopback_devices():
+    '''
+    Parse through 'losetup -a' output to determine currently mapped
+    loopback devices. Output is expected to look like:
+
+        /dev/loop0: [0807]:961814 (/tmp/my.img)
+
+    or:
+
+        /dev/loop0: [0807]:961814 (/tmp/my.img (deleted))
+
+    :returns: dict: a dict mapping {loopback_dev: backing_file}
+    '''
+    loopbacks = {}
+    cmd = ['losetup', '-a']
+    output = check_output(cmd)
+    if six.PY3:
+        output = output.decode('utf-8')
+    devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != '']
+    for dev, _, f in devs:
+        loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0]
+    return loopbacks
+
+
+def create_loopback(file_path):
+    '''
+    Create a loopback device for a given backing file.
+
+    :returns: str: Full path to new loopback device (eg, /dev/loop0)
+    '''
+    file_path = os.path.abspath(file_path)
+    check_call(['losetup', '--find', file_path])
+    for d, f in six.iteritems(loopback_devices()):
+        if f == file_path:
+            return d
+
+
+def ensure_loopback_device(path, size):
+    '''
+    Ensure a loopback device exists for a given backing file path and size.
+    If it a loopback device is not mapped to file, a new one will be created.
+
+    TODO: Confirm size of found loopback device.
+
+    :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
+    '''
+    for d, f in six.iteritems(loopback_devices()):
+        if f == path:
+            return d
+
+    if not os.path.exists(path):
+        cmd = ['truncate', '--size', size, path]
+        check_call(cmd)
+
+    return create_loopback(path)
+
+
+def is_mapped_loopback_device(device):
+    """
+    Checks if a given device name is an existing/mapped loopback device.
+    :param device: str: Full path to the device (eg, /dev/loop1).
+    :returns: str: Path to the backing file if is a loopback device
+    empty string otherwise
+    """
+    return loopback_devices().get(device, "")
diff --git a/charmhelpers/contrib/storage/linux/lvm.py b/charmhelpers/contrib/storage/linux/lvm.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8bde69263f0e917d32d0e5d70abba1409b26012
--- /dev/null
+++ b/charmhelpers/contrib/storage/linux/lvm.py
@@ -0,0 +1,182 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+from subprocess import (
+    CalledProcessError,
+    check_call,
+    check_output,
+    Popen,
+    PIPE,
+)
+
+
+##################################################
+# LVM helpers.
+##################################################
+def deactivate_lvm_volume_group(block_device):
+    '''
+    Deactivate any volume gruop associated with an LVM physical volume.
+
+    :param block_device: str: Full path to LVM physical volume
+    '''
+    vg = list_lvm_volume_group(block_device)
+    if vg:
+        cmd = ['vgchange', '-an', vg]
+        check_call(cmd)
+
+
+def is_lvm_physical_volume(block_device):
+    '''
+    Determine whether a block device is initialized as an LVM PV.
+
+    :param block_device: str: Full path of block device to inspect.
+
+    :returns: boolean: True if block device is a PV, False if not.
+    '''
+    try:
+        check_output(['pvdisplay', block_device])
+        return True
+    except CalledProcessError:
+        return False
+
+
+def remove_lvm_physical_volume(block_device):
+    '''
+    Remove LVM PV signatures from a given block device.
+
+    :param block_device: str: Full path of block device to scrub.
+    '''
+    p = Popen(['pvremove', '-ff', block_device],
+              stdin=PIPE)
+    p.communicate(input='y\n')
+
+
+def list_lvm_volume_group(block_device):
+    '''
+    List LVM volume group associated with a given block device.
+
+    Assumes block device is a valid LVM PV.
+
+    :param block_device: str: Full path of block device to inspect.
+
+    :returns: str: Name of volume group associated with block device or None
+    '''
+    vg = None
+    pvd = check_output(['pvdisplay', block_device]).splitlines()
+    for lvm in pvd:
+        lvm = lvm.decode('UTF-8')
+        if lvm.strip().startswith('VG Name'):
+            vg = ' '.join(lvm.strip().split()[2:])
+    return vg
+
+
+def create_lvm_physical_volume(block_device):
+    '''
+    Initialize a block device as an LVM physical volume.
+
+    :param block_device: str: Full path of block device to initialize.
+
+    '''
+    check_call(['pvcreate', block_device])
+
+
+def create_lvm_volume_group(volume_group, block_device):
+    '''
+    Create an LVM volume group backed by a given block device.
+
+    Assumes block device has already been initialized as an LVM PV.
+
+    :param volume_group: str: Name of volume group to create.
+    :block_device: str: Full path of PV-initialized block device.
+    '''
+    check_call(['vgcreate', volume_group, block_device])
+
+
+def list_logical_volumes(select_criteria=None, path_mode=False):
+    '''
+    List logical volumes
+
+    :param select_criteria: str: Limit list to those volumes matching this
+                                 criteria (see 'lvs -S help' for more details)
+    :param path_mode: bool: return logical volume name in 'vg/lv' format, this
+                            format is required for some commands like lvextend
+    :returns: [str]: List of logical volumes
+    '''
+    lv_diplay_attr = 'lv_name'
+    if path_mode:
+        # Parsing output logic relies on the column order
+        lv_diplay_attr = 'vg_name,' + lv_diplay_attr
+    cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
+    if select_criteria:
+        cmd.extend(['--select', select_criteria])
+    lvs = []
+    for lv in check_output(cmd).decode('UTF-8').splitlines():
+        if not lv:
+            continue
+        if path_mode:
+            lvs.append('/'.join(lv.strip().split()))
+        else:
+            lvs.append(lv.strip())
+    return lvs
+
+
+list_thin_logical_volume_pools = functools.partial(
+    list_logical_volumes,
+    select_criteria='lv_attr =~ ^t')
+
+list_thin_logical_volumes = functools.partial(
+    list_logical_volumes,
+    select_criteria='lv_attr =~ ^V')
+
+
+def extend_logical_volume_by_device(lv_name, block_device):
+    '''
+    Extends the size of logical volume lv_name by the amount of free space on
+    physical volume block_device.
+
+    :param lv_name: str: name of logical volume to be extended (vg/lv format)
+    :param block_device: str: name of block_device to be allocated to lv_name
+    '''
+    cmd = ['lvextend', lv_name, block_device]
+    check_call(cmd)
+
+
+def create_logical_volume(lv_name, volume_group, size=None):
+    '''
+    Create a new logical volume in an existing volume group
+
+    :param lv_name: str: name of logical volume to be created.
+    :param volume_group: str: Name of volume group to use for the new volume.
+    :param size: str: Size of logical volume to create (100% if not supplied)
+    :raises subprocess.CalledProcessError: in the event that the lvcreate fails.
+    '''
+    if size:
+        check_call([
+            'lvcreate',
+            '--yes',
+            '-L',
+            '{}'.format(size),
+            '-n', lv_name, volume_group
+        ])
+    # create the lv with all the space available, this is needed because the
+    # system call is different for LVM
+    else:
+        check_call([
+            'lvcreate',
+            '--yes',
+            '-l',
+            '100%FREE',
+            '-n', lv_name, volume_group
+        ])
diff --git a/charmhelpers/contrib/storage/linux/utils.py b/charmhelpers/contrib/storage/linux/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..a35617606cf52d7cffc04ac245811b770fd95e8e
--- /dev/null
+++ b/charmhelpers/contrib/storage/linux/utils.py
@@ -0,0 +1,128 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+from stat import S_ISBLK
+
+from subprocess import (
+    CalledProcessError,
+    check_call,
+    check_output,
+    call
+)
+
+
+def _luks_uuid(dev):
+    """
+    Check to see if dev is a LUKS encrypted volume, returning the UUID
+    of volume if it is.
+
+    :param: dev: path to block device to check.
+    :returns: str. UUID of LUKS device or None if not a LUKS device
+    """
+    try:
+        cmd = ['cryptsetup', 'luksUUID', dev]
+        return check_output(cmd).decode('UTF-8').strip()
+    except CalledProcessError:
+        return None
+
+
+def is_luks_device(dev):
+    """
+    Determine if dev is a LUKS-formatted block device.
+
+    :param: dev: A full path to a block device to check for LUKS header
+    presence
+    :returns: boolean: indicates whether a device is used based on LUKS header.
+    """
+    return True if _luks_uuid(dev) else False
+
+
+def is_mapped_luks_device(dev):
+    """
+    Determine if dev is a mapped LUKS device
+    :param: dev: A full path to a block device to be checked
+    :returns: boolean: indicates whether a device is mapped
+    """
+    _, dirs, _ = next(os.walk(
+        '/sys/class/block/{}/holders/'
+        .format(os.path.basename(os.path.realpath(dev))))
+    )
+    is_held = len(dirs) > 0
+    return is_held and is_luks_device(dev)
+
+
+def is_block_device(path):
+    '''
+    Confirm device at path is a valid block device node.
+
+    :returns: boolean: True if path is a block device, False if not.
+    '''
+    if not os.path.exists(path):
+        return False
+    return S_ISBLK(os.stat(path).st_mode)
+
+
+def zap_disk(block_device):
+    '''
+    Clear a block device of partition table. Relies on sgdisk, which is
+    installed as pat of the 'gdisk' package in Ubuntu.
+
+    :param block_device: str: Full path of block device to clean.
+    '''
+    # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
+    # sometimes sgdisk exits non-zero; this is OK, dd will clean up
+    call(['sgdisk', '--zap-all', '--', block_device])
+    call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
+    dev_end = check_output(['blockdev', '--getsz',
+                            block_device]).decode('UTF-8')
+    gpt_end = int(dev_end.split()[0]) - 100
+    check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
+                'bs=1M', 'count=1'])
+    check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
+                'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
+
+
+def is_device_mounted(device):
+    '''Given a device path, return True if that device is mounted, and False
+    if it isn't.
+
+    :param device: str: Full path of the device to check.
+    :returns: boolean: True if the path represents a mounted device, False if
+        it doesn't.
+    '''
+    try:
+        out = check_output(['lsblk', '-P', device]).decode('UTF-8')
+    except Exception:
+        return False
+    return bool(re.search(r'MOUNTPOINT=".+"', out))
+
+
+def mkfs_xfs(device, force=False, inode_size=1024):
+    """Format device with XFS filesystem.
+
+    By default this should fail if the device already has a filesystem on it.
+    :param device: Full path to device to format
+    :ptype device: tr
+    :param force: Force operation
+    :ptype: force: boolean
+    :param inode_size: XFS inode size in bytes
+    :ptype inode_size: int"""
+    cmd = ['mkfs.xfs']
+    if force:
+        cmd.append("-f")
+
+    cmd += ['-i', "size={}".format(inode_size), device]
+    check_call(cmd)
diff --git a/charmhelpers/core/__init__.py b/charmhelpers/core/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7567b863e3a5ad2b7a7f44958b4166e0c3d346b
--- /dev/null
+++ b/charmhelpers/core/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charmhelpers/core/decorators.py b/charmhelpers/core/decorators.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7e95d171888c9c1c804200e0997837191c923fc
--- /dev/null
+++ b/charmhelpers/core/decorators.py
@@ -0,0 +1,93 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Copyright 2014 Canonical Ltd.
+#
+# Authors:
+#  Edward Hope-Morley <opentastic@gmail.com>
+#
+
+import time
+
+from charmhelpers.core.hookenv import (
+    log,
+    INFO,
+)
+
+
+def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
+    """If the decorated function raises exception exc_type, allow num_retries
+    retry attempts before raise the exception.
+    """
+    def _retry_on_exception_inner_1(f):
+        def _retry_on_exception_inner_2(*args, **kwargs):
+            retries = num_retries
+            multiplier = 1
+            while True:
+                try:
+                    return f(*args, **kwargs)
+                except exc_type:
+                    if not retries:
+                        raise
+
+                delay = base_delay * multiplier
+                multiplier += 1
+                log("Retrying '%s' %d more times (delay=%s)" %
+                    (f.__name__, retries, delay), level=INFO)
+                retries -= 1
+                if delay:
+                    time.sleep(delay)
+
+        return _retry_on_exception_inner_2
+
+    return _retry_on_exception_inner_1
+
+
+def retry_on_predicate(num_retries, predicate_fun, base_delay=0):
+    """Retry based on return value
+
+    The return value of the decorated function is passed to the given predicate_fun. If the
+    result of the predicate is False, retry the decorated function up to num_retries times
+
+    An exponential backoff up to base_delay^num_retries seconds can be introduced by setting
+    base_delay to a nonzero value. The default is to run with a zero (i.e. no) delay
+
+    :param num_retries: Max. number of retries to perform
+    :type num_retries: int
+    :param predicate_fun: Predicate function to determine if a retry is necessary
+    :type predicate_fun: callable
+    :param base_delay: Starting value in seconds for exponential delay, defaults to 0 (no delay)
+    :type base_delay: float
+    """
+    def _retry_on_pred_inner_1(f):
+        def _retry_on_pred_inner_2(*args, **kwargs):
+            retries = num_retries
+            multiplier = 1
+            delay = base_delay
+            while True:
+                result = f(*args, **kwargs)
+                if predicate_fun(result) or retries <= 0:
+                    return result
+                delay *= multiplier
+                multiplier += 1
+                log("Result {}, retrying '{}' {} more times (delay={})".format(
+                    result, f.__name__, retries, delay), level=INFO)
+                retries -= 1
+                if delay:
+                    time.sleep(delay)
+
+        return _retry_on_pred_inner_2
+
+    return _retry_on_pred_inner_1
diff --git a/charmhelpers/core/files.py b/charmhelpers/core/files.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdd82b75709c13da0d534bf4962822984a3c1867
--- /dev/null
+++ b/charmhelpers/core/files.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
+
+import os
+import subprocess
+
+
+def sed(filename, before, after, flags='g'):
+    """
+    Search and replaces the given pattern on filename.
+
+    :param filename: relative or absolute file path.
+    :param before: expression to be replaced (see 'man sed')
+    :param after: expression to replace with (see 'man sed')
+    :param flags: sed-compatible regex flags in example, to make
+    the  search and replace case insensitive, specify ``flags="i"``.
+    The ``g`` flag is always specified regardless, so you do not
+    need to remember to include it when overriding this parameter.
+    :returns: If the sed command exit code was zero then return,
+    otherwise raise CalledProcessError.
+    """
+    expression = r's/{0}/{1}/{2}'.format(before,
+                                         after, flags)
+
+    return subprocess.check_call(["sed", "-i", "-r", "-e",
+                                  expression,
+                                  os.path.expanduser(filename)])
diff --git a/charmhelpers/core/fstab.py b/charmhelpers/core/fstab.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9fa9152c765c538adad3fd9bc45a46018c89b72
--- /dev/null
+++ b/charmhelpers/core/fstab.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import os
+
+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
+
+
+class Fstab(io.FileIO):
+    """This class extends file in order to implement a file reader/writer
+    for file `/etc/fstab`
+    """
+
+    class Entry(object):
+        """Entry class represents a non-comment line on the `/etc/fstab` file
+        """
+        def __init__(self, device, mountpoint, filesystem,
+                     options, d=0, p=0):
+            self.device = device
+            self.mountpoint = mountpoint
+            self.filesystem = filesystem
+
+            if not options:
+                options = "defaults"
+
+            self.options = options
+            self.d = int(d)
+            self.p = int(p)
+
+        def __eq__(self, o):
+            return str(self) == str(o)
+
+        def __str__(self):
+            return "{} {} {} {} {} {}".format(self.device,
+                                              self.mountpoint,
+                                              self.filesystem,
+                                              self.options,
+                                              self.d,
+                                              self.p)
+
+    DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
+
+    def __init__(self, path=None):
+        if path:
+            self._path = path
+        else:
+            self._path = self.DEFAULT_PATH
+        super(Fstab, self).__init__(self._path, 'rb+')
+
+    def _hydrate_entry(self, line):
+        # NOTE: use split with no arguments to split on any
+        #       whitespace including tabs
+        return Fstab.Entry(*filter(
+            lambda x: x not in ('', None),
+            line.strip("\n").split()))
+
+    @property
+    def entries(self):
+        self.seek(0)
+        for line in self.readlines():
+            line = line.decode('us-ascii')
+            try:
+                if line.strip() and not line.strip().startswith("#"):
+                    yield self._hydrate_entry(line)
+            except ValueError:
+                pass
+
+    def get_entry_by_attr(self, attr, value):
+        for entry in self.entries:
+            e_attr = getattr(entry, attr)
+            if e_attr == value:
+                return entry
+        return None
+
+    def add_entry(self, entry):
+        if self.get_entry_by_attr('device', entry.device):
+            return False
+
+        self.write((str(entry) + '\n').encode('us-ascii'))
+        self.truncate()
+        return entry
+
+    def remove_entry(self, entry):
+        self.seek(0)
+
+        lines = [l.decode('us-ascii') for l in self.readlines()]
+
+        found = False
+        for index, line in enumerate(lines):
+            if line.strip() and not line.strip().startswith("#"):
+                if self._hydrate_entry(line) == entry:
+                    found = True
+                    break
+
+        if not found:
+            return False
+
+        lines.remove(line)
+
+        self.seek(0)
+        self.write(''.join(lines).encode('us-ascii'))
+        self.truncate()
+        return True
+
+    @classmethod
+    def remove_by_mountpoint(cls, mountpoint, path=None):
+        fstab = cls(path=path)
+        entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
+        if entry:
+            return fstab.remove_entry(entry)
+        return False
+
+    @classmethod
+    def add(cls, device, mountpoint, filesystem, options=None, path=None):
+        return cls(path=path).add_entry(Fstab.Entry(device,
+                                                    mountpoint, filesystem,
+                                                    options=options))
diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py
new file mode 100644
index 0000000000000000000000000000000000000000..db7ce7282b4c96c8a33abf309a340377216922ec
--- /dev/null
+++ b/charmhelpers/core/hookenv.py
@@ -0,0 +1,1613 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"Interactions with the Juju environment"
+# Copyright 2013 Canonical Ltd.
+#
+# Authors:
+#  Charm Helpers Developers <juju@lists.ubuntu.com>
+
+from __future__ import print_function
+import copy
+from distutils.version import LooseVersion
+from enum import Enum
+from functools import wraps
+from collections import namedtuple
+import glob
+import os
+import json
+import yaml
+import re
+import subprocess
+import sys
+import errno
+import tempfile
+from subprocess import CalledProcessError
+
+from charmhelpers import deprecate
+
+import six
+if not six.PY3:
+    from UserDict import UserDict
+else:
+    from collections import UserDict
+
+
+CRITICAL = "CRITICAL"
+ERROR = "ERROR"
+WARNING = "WARNING"
+INFO = "INFO"
+DEBUG = "DEBUG"
+TRACE = "TRACE"
+MARKER = object()
+SH_MAX_ARG = 131071
+
+
+RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
+                 'This may not be compatible with software you are '
+                 'running in your shell.')
+
+
+class WORKLOAD_STATES(Enum):
+    ACTIVE = 'active'
+    BLOCKED = 'blocked'
+    MAINTENANCE = 'maintenance'
+    WAITING = 'waiting'
+
+
+cache = {}
+
+
+def cached(func):
+    """Cache return values for multiple executions of func + args
+
+    For example::
+
+        @cached
+        def unit_get(attribute):
+            pass
+
+        unit_get('test')
+
+    will cache the result of unit_get + 'test' for future calls.
+    """
+    @wraps(func)
+    def wrapper(*args, **kwargs):
+        global cache
+        key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
+        try:
+            return cache[key]
+        except KeyError:
+            pass  # Drop out of the exception handler scope.
+        res = func(*args, **kwargs)
+        cache[key] = res
+        return res
+    wrapper._wrapped = func
+    return wrapper
+
+
+def flush(key):
+    """Flushes any entries from function cache where the
+    key is found in the function+args """
+    flush_list = []
+    for item in cache:
+        if key in item:
+            flush_list.append(item)
+    for item in flush_list:
+        del cache[item]
+
+
+def log(message, level=None):
+    """Write a message to the juju log"""
+    command = ['juju-log']
+    if level:
+        command += ['-l', level]
+    if not isinstance(message, six.string_types):
+        message = repr(message)
+    command += [message[:SH_MAX_ARG]]
+    # Missing juju-log should not cause failures in unit tests
+    # Send log output to stderr
+    try:
+        subprocess.call(command)
+    except OSError as e:
+        if e.errno == errno.ENOENT:
+            if level:
+                message = "{}: {}".format(level, message)
+            message = "juju-log: {}".format(message)
+            print(message, file=sys.stderr)
+        else:
+            raise
+
+
+def function_log(message):
+    """Write a function progress message"""
+    command = ['function-log']
+    if not isinstance(message, six.string_types):
+        message = repr(message)
+    command += [message[:SH_MAX_ARG]]
+    # Missing function-log should not cause failures in unit tests
+    # Send function_log output to stderr
+    try:
+        subprocess.call(command)
+    except OSError as e:
+        if e.errno == errno.ENOENT:
+            message = "function-log: {}".format(message)
+            print(message, file=sys.stderr)
+        else:
+            raise
+
+
+class Serializable(UserDict):
+    """Wrapper, an object that can be serialized to yaml or json"""
+
+    def __init__(self, obj):
+        # wrap the object
+        UserDict.__init__(self)
+        self.data = obj
+
+    def __getattr__(self, attr):
+        # See if this object has attribute.
+        if attr in ("json", "yaml", "data"):
+            return self.__dict__[attr]
+        # Check for attribute in wrapped object.
+        got = getattr(self.data, attr, MARKER)
+        if got is not MARKER:
+            return got
+        # Proxy to the wrapped object via dict interface.
+        try:
+            return self.data[attr]
+        except KeyError:
+            raise AttributeError(attr)
+
+    def __getstate__(self):
+        # Pickle as a standard dictionary.
+        return self.data
+
+    def __setstate__(self, state):
+        # Unpickle into our wrapper.
+        self.data = state
+
+    def json(self):
+        """Serialize the object to json"""
+        return json.dumps(self.data)
+
+    def yaml(self):
+        """Serialize the object to yaml"""
+        return yaml.dump(self.data)
+
+
+def execution_environment():
+    """A convenient bundling of the current execution context"""
+    context = {}
+    context['conf'] = config()
+    if relation_id():
+        context['reltype'] = relation_type()
+        context['relid'] = relation_id()
+        context['rel'] = relation_get()
+    context['unit'] = local_unit()
+    context['rels'] = relations()
+    context['env'] = os.environ
+    return context
+
+
+def in_relation_hook():
+    """Determine whether we're running in a relation hook"""
+    return 'JUJU_RELATION' in os.environ
+
+
+def relation_type():
+    """The scope for the current relation hook"""
+    return os.environ.get('JUJU_RELATION', None)
+
+
+@cached
+def relation_id(relation_name=None, service_or_unit=None):
+    """The relation ID for the current or a specified relation"""
+    if not relation_name and not service_or_unit:
+        return os.environ.get('JUJU_RELATION_ID', None)
+    elif relation_name and service_or_unit:
+        service_name = service_or_unit.split('/')[0]
+        for relid in relation_ids(relation_name):
+            remote_service = remote_service_name(relid)
+            if remote_service == service_name:
+                return relid
+    else:
+        raise ValueError('Must specify neither or both of relation_name and service_or_unit')
+
+
+def local_unit():
+    """Local unit ID"""
+    return os.environ['JUJU_UNIT_NAME']
+
+
+def remote_unit():
+    """The remote unit for the current relation hook"""
+    return os.environ.get('JUJU_REMOTE_UNIT', None)
+
+
+def application_name():
+    """
+    The name of the deployed application this unit belongs to.
+    """
+    return local_unit().split('/')[0]
+
+
+def service_name():
+    """
+    .. deprecated:: 0.19.1
+       Alias for :func:`application_name`.
+    """
+    return application_name()
+
+
+def model_name():
+    """
+    Name of the model that this unit is deployed in.
+    """
+    return os.environ['JUJU_MODEL_NAME']
+
+
+def model_uuid():
+    """
+    UUID of the model that this unit is deployed in.
+    """
+    return os.environ['JUJU_MODEL_UUID']
+
+
+def principal_unit():
+    """Returns the principal unit of this unit, otherwise None"""
+    # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
+    principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None)
+    # If it's empty, then this unit is the principal
+    if principal_unit == '':
+        return os.environ['JUJU_UNIT_NAME']
+    elif principal_unit is not None:
+        return principal_unit
+    # For Juju 2.1 and below, let's try work out the principle unit by
+    # the various charms' metadata.yaml.
+    for reltype in relation_types():
+        for rid in relation_ids(reltype):
+            for unit in related_units(rid):
+                md = _metadata_unit(unit)
+                if not md:
+                    continue
+                subordinate = md.pop('subordinate', None)
+                if not subordinate:
+                    return unit
+    return None
+
+
+@cached
+def remote_service_name(relid=None):
+    """The remote service name for a given relation-id (or the current relation)"""
+    if relid is None:
+        unit = remote_unit()
+    else:
+        units = related_units(relid)
+        unit = units[0] if units else None
+    return unit.split('/')[0] if unit else None
+
+
+def hook_name():
+    """The name of the currently executing hook"""
+    return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
+
+
+class Config(dict):
+    """A dictionary representation of the charm's config.yaml, with some
+    extra features:
+
+    - See which values in the dictionary have changed since the previous hook.
+    - For values that have changed, see what the previous value was.
+    - Store arbitrary data for use in a later hook.
+
+    NOTE: Do not instantiate this object directly - instead call
+    ``hookenv.config()``, which will return an instance of :class:`Config`.
+
+    Example usage::
+
+        >>> # inside a hook
+        >>> from charmhelpers.core import hookenv
+        >>> config = hookenv.config()
+        >>> config['foo']
+        'bar'
+        >>> # store a new key/value for later use
+        >>> config['mykey'] = 'myval'
+
+
+        >>> # user runs `juju set mycharm foo=baz`
+        >>> # now we're inside subsequent config-changed hook
+        >>> config = hookenv.config()
+        >>> config['foo']
+        'baz'
+        >>> # test to see if this val has changed since last hook
+        >>> config.changed('foo')
+        True
+        >>> # what was the previous value?
+        >>> config.previous('foo')
+        'bar'
+        >>> # keys/values that we add are preserved across hooks
+        >>> config['mykey']
+        'myval'
+
+    """
+    CONFIG_FILE_NAME = '.juju-persistent-config'
+
+    def __init__(self, *args, **kw):
+        super(Config, self).__init__(*args, **kw)
+        self.implicit_save = True
+        self._prev_dict = None
+        self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
+        if os.path.exists(self.path) and os.stat(self.path).st_size:
+            self.load_previous()
+        atexit(self._implicit_save)
+
+    def load_previous(self, path=None):
+        """Load previous copy of config from disk.
+
+        In normal usage you don't need to call this method directly - it
+        is called automatically at object initialization.
+
+        :param path:
+
+            File path from which to load the previous config. If `None`,
+            config is loaded from the default location. If `path` is
+            specified, subsequent `save()` calls will write to the same
+            path.
+
+        """
+        self.path = path or self.path
+        with open(self.path) as f:
+            try:
+                self._prev_dict = json.load(f)
+            except ValueError as e:
+                log('Found but was unable to parse previous config data, '
+                    'ignoring which will report all values as changed - {}'
+                    .format(str(e)), level=ERROR)
+                return
+        for k, v in copy.deepcopy(self._prev_dict).items():
+            if k not in self:
+                self[k] = v
+
+    def changed(self, key):
+        """Return True if the current value for this key is different from
+        the previous value.
+
+        """
+        if self._prev_dict is None:
+            return True
+        return self.previous(key) != self.get(key)
+
+    def previous(self, key):
+        """Return previous value for this key, or None if there
+        is no previous value.
+
+        """
+        if self._prev_dict:
+            return self._prev_dict.get(key)
+        return None
+
+    def save(self):
+        """Save this config to disk.
+
+        If the charm is using the :mod:`Services Framework <services.base>`
+        or :meth:'@hook <Hooks.hook>' decorator, this
+        is called automatically at the end of successful hook execution.
+        Otherwise, it should be called directly by user code.
+
+        To disable automatic saves, set ``implicit_save=False`` on this
+        instance.
+
+        """
+        with open(self.path, 'w') as f:
+            os.fchmod(f.fileno(), 0o600)
+            json.dump(self, f)
+
+    def _implicit_save(self):
+        if self.implicit_save:
+            self.save()
+
+
+_cache_config = None
+
+
+def config(scope=None):
+    """
+    Get the juju charm configuration (scope==None) or individual key,
+    (scope=str).  The returned value is a Python data structure loaded as
+    JSON from the Juju config command.
+
+    :param scope: If set, return the value for the specified key.
+    :type scope: Optional[str]
+    :returns: Either the whole config as a Config, or a key from it.
+    :rtype: Any
+    """
+    global _cache_config
+    config_cmd_line = ['config-get', '--all', '--format=json']
+    try:
+        # JSON Decode Exception for Python3.5+
+        exc_json = json.decoder.JSONDecodeError
+    except AttributeError:
+        # JSON Decode Exception for Python2.7 through Python3.4
+        exc_json = ValueError
+    try:
+        if _cache_config is None:
+            config_data = json.loads(
+                subprocess.check_output(config_cmd_line).decode('UTF-8'))
+            _cache_config = Config(config_data)
+        if scope is not None:
+            return _cache_config.get(scope)
+        return _cache_config
+    except (exc_json, UnicodeDecodeError) as e:
+        log('Unable to parse output from config-get: config_cmd_line="{}" '
+            'message="{}"'
+            .format(config_cmd_line, str(e)), level=ERROR)
+        return None
+
+
+@cached
+def relation_get(attribute=None, unit=None, rid=None):
+    """Get relation information"""
+    _args = ['relation-get', '--format=json']
+    if rid:
+        _args.append('-r')
+        _args.append(rid)
+    _args.append(attribute or '-')
+    if unit:
+        _args.append(unit)
+    try:
+        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+    except ValueError:
+        return None
+    except CalledProcessError as e:
+        if e.returncode == 2:
+            return None
+        raise
+
+
+def relation_set(relation_id=None, relation_settings=None, **kwargs):
+    """Set relation information for the current unit"""
+    relation_settings = relation_settings if relation_settings else {}
+    relation_cmd_line = ['relation-set']
+    accepts_file = "--file" in subprocess.check_output(
+        relation_cmd_line + ["--help"], universal_newlines=True)
+    if relation_id is not None:
+        relation_cmd_line.extend(('-r', relation_id))
+    settings = relation_settings.copy()
+    settings.update(kwargs)
+    for key, value in settings.items():
+        # Force value to be a string: it always should, but some call
+        # sites pass in things like dicts or numbers.
+        if value is not None:
+            settings[key] = "{}".format(value)
+    if accepts_file:
+        # --file was introduced in Juju 1.23.2. Use it by default if
+        # available, since otherwise we'll break if the relation data is
+        # too big. Ideally we should tell relation-set to read the data from
+        # stdin, but that feature is broken in 1.23.2: Bug #1454678.
+        with tempfile.NamedTemporaryFile(delete=False) as settings_file:
+            settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
+        subprocess.check_call(
+            relation_cmd_line + ["--file", settings_file.name])
+        os.remove(settings_file.name)
+    else:
+        for key, value in settings.items():
+            if value is None:
+                relation_cmd_line.append('{}='.format(key))
+            else:
+                relation_cmd_line.append('{}={}'.format(key, value))
+        subprocess.check_call(relation_cmd_line)
+    # Flush cache of any relation-gets for local unit
+    flush(local_unit())
+
+
+def relation_clear(r_id=None):
+    ''' Clears any relation data already set on relation r_id '''
+    settings = relation_get(rid=r_id,
+                            unit=local_unit())
+    for setting in settings:
+        if setting not in ['public-address', 'private-address']:
+            settings[setting] = None
+    relation_set(relation_id=r_id,
+                 **settings)
+
+
+@cached
+def relation_ids(reltype=None):
+    """A list of relation_ids"""
+    reltype = reltype or relation_type()
+    relid_cmd_line = ['relation-ids', '--format=json']
+    if reltype is not None:
+        relid_cmd_line.append(reltype)
+        return json.loads(
+            subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
+    return []
+
+
+@cached
+def related_units(relid=None):
+    """A list of related units"""
+    relid = relid or relation_id()
+    units_cmd_line = ['relation-list', '--format=json']
+    if relid is not None:
+        units_cmd_line.extend(('-r', relid))
+    return json.loads(
+        subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
+
+
+def expected_peer_units():
+    """Get a generator for units we expect to join peer relation based on
+    goal-state.
+
+    The local unit is excluded from the result to make it easy to gauge
+    completion of all peers joining the relation with existing hook tools.
+
+    Example usage:
+    log('peer {} of {} joined peer relation'
+        .format(len(related_units()),
+                len(list(expected_peer_units()))))
+
+    This function will raise NotImplementedError if used with juju versions
+    without goal-state support.
+
+    :returns: iterator
+    :rtype: types.GeneratorType
+    :raises: NotImplementedError
+    """
+    if not has_juju_version("2.4.0"):
+        # goal-state first appeared in 2.4.0.
+        raise NotImplementedError("goal-state")
+    _goal_state = goal_state()
+    return (key for key in _goal_state['units']
+            if '/' in key and key != local_unit())
+
+
+def expected_related_units(reltype=None):
+    """Get a generator for units we expect to join relation based on
+    goal-state.
+
+    Note that you can not use this function for the peer relation, take a look
+    at expected_peer_units() for that.
+
+    This function will raise KeyError if you request information for a
+    relation type for which juju goal-state does not have information.  It will
+    raise NotImplementedError if used with juju versions without goal-state
+    support.
+
+    Example usage:
+    log('participant {} of {} joined relation {}'
+        .format(len(related_units()),
+                len(list(expected_related_units())),
+                relation_type()))
+
+    :param reltype: Relation type to list data for, default is to list data for
+                    the realtion type we are currently executing a hook for.
+    :type reltype: str
+    :returns: iterator
+    :rtype: types.GeneratorType
+    :raises: KeyError, NotImplementedError
+    """
+    if not has_juju_version("2.4.4"):
+        # goal-state existed in 2.4.0, but did not list individual units to
+        # join a relation in 2.4.1 through 2.4.3. (LP: #1794739)
+        raise NotImplementedError("goal-state relation unit count")
+    reltype = reltype or relation_type()
+    _goal_state = goal_state()
+    return (key for key in _goal_state['relations'][reltype] if '/' in key)
+
+
+@cached
+def relation_for_unit(unit=None, rid=None):
+    """Get the json represenation of a unit's relation"""
+    unit = unit or remote_unit()
+    relation = relation_get(unit=unit, rid=rid)
+    for key in relation:
+        if key.endswith('-list'):
+            relation[key] = relation[key].split()
+    relation['__unit__'] = unit
+    return relation
+
+
+@cached
+def relations_for_id(relid=None):
+    """Get relations of a specific relation ID"""
+    relation_data = []
+    relid = relid or relation_ids()
+    for unit in related_units(relid):
+        unit_data = relation_for_unit(unit, relid)
+        unit_data['__relid__'] = relid
+        relation_data.append(unit_data)
+    return relation_data
+
+
+@cached
+def relations_of_type(reltype=None):
+    """Get relations of a specific type"""
+    relation_data = []
+    reltype = reltype or relation_type()
+    for relid in relation_ids(reltype):
+        for relation in relations_for_id(relid):
+            relation['__relid__'] = relid
+            relation_data.append(relation)
+    return relation_data
+
+
+@cached
+def metadata():
+    """Get the current charm metadata.yaml contents as a python object"""
+    with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
+        return yaml.safe_load(md)
+
+
+def _metadata_unit(unit):
+    """Given the name of a unit (e.g. apache2/0), get the unit charm's
+    metadata.yaml. Very similar to metadata() but allows us to inspect
+    other units. Unit needs to be co-located, such as a subordinate or
+    principal/primary.
+
+    :returns: metadata.yaml as a python object.
+
+    """
+    basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
+    unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
+    joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')
+    if not os.path.exists(joineddir):
+        return None
+    with open(joineddir) as md:
+        return yaml.safe_load(md)
+
+
+@cached
+def relation_types():
+    """Get a list of relation types supported by this charm"""
+    rel_types = []
+    md = metadata()
+    for key in ('provides', 'requires', 'peers'):
+        section = md.get(key)
+        if section:
+            rel_types.extend(section.keys())
+    return rel_types
+
+
+@cached
+def peer_relation_id():
+    '''Get the peers relation id if a peers relation has been joined, else None.'''
+    md = metadata()
+    section = md.get('peers')
+    if section:
+        for key in section:
+            relids = relation_ids(key)
+            if relids:
+                return relids[0]
+    return None
+
+
+@cached
+def relation_to_interface(relation_name):
+    """
+    Given the name of a relation, return the interface that relation uses.
+
+    :returns: The interface name, or ``None``.
+    """
+    return relation_to_role_and_interface(relation_name)[1]
+
+
+@cached
+def relation_to_role_and_interface(relation_name):
+    """
+    Given the name of a relation, return the role and the name of the interface
+    that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
+
+    :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
+    """
+    _metadata = metadata()
+    for role in ('provides', 'requires', 'peers'):
+        interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
+        if interface:
+            return role, interface
+    return None, None
+
+
+@cached
+def role_and_interface_to_relations(role, interface_name):
+    """
+    Given a role and interface name, return a list of relation names for the
+    current charm that use that interface under that role (where role is one
+    of ``provides``, ``requires``, or ``peers``).
+
+    :returns: A list of relation names.
+    """
+    _metadata = metadata()
+    results = []
+    for relation_name, relation in _metadata.get(role, {}).items():
+        if relation['interface'] == interface_name:
+            results.append(relation_name)
+    return results
+
+
+@cached
+def interface_to_relations(interface_name):
+    """
+    Given an interface, return a list of relation names for the current
+    charm that use that interface.
+
+    :returns: A list of relation names.
+    """
+    results = []
+    for role in ('provides', 'requires', 'peers'):
+        results.extend(role_and_interface_to_relations(role, interface_name))
+    return results
+
+
+@cached
+def charm_name():
+    """Get the name of the current charm as is specified on metadata.yaml"""
+    return metadata().get('name')
+
+
+@cached
+def relations():
+    """Get a nested dictionary of relation data for all related units"""
+    rels = {}
+    for reltype in relation_types():
+        relids = {}
+        for relid in relation_ids(reltype):
+            units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
+            for unit in related_units(relid):
+                reldata = relation_get(unit=unit, rid=relid)
+                units[unit] = reldata
+            relids[relid] = units
+        rels[reltype] = relids
+    return rels
+
+
+@cached
+def is_relation_made(relation, keys='private-address'):
+    '''
+    Determine whether a relation is established by checking for
+    presence of key(s).  If a list of keys is provided, they
+    must all be present for the relation to be identified as made
+    '''
+    if isinstance(keys, str):
+        keys = [keys]
+    for r_id in relation_ids(relation):
+        for unit in related_units(r_id):
+            context = {}
+            for k in keys:
+                context[k] = relation_get(k, rid=r_id,
+                                          unit=unit)
+            if None not in context.values():
+                return True
+    return False
+
+
+def _port_op(op_name, port, protocol="TCP"):
+    """Open or close a service network port"""
+    _args = [op_name]
+    icmp = protocol.upper() == "ICMP"
+    if icmp:
+        _args.append(protocol)
+    else:
+        _args.append('{}/{}'.format(port, protocol))
+    try:
+        subprocess.check_call(_args)
+    except subprocess.CalledProcessError:
+        # Older Juju pre 2.3 doesn't support ICMP
+        # so treat it as a no-op if it fails.
+        if not icmp:
+            raise
+
+
+def open_port(port, protocol="TCP"):
+    """Open a service network port"""
+    _port_op('open-port', port, protocol)
+
+
+def close_port(port, protocol="TCP"):
+    """Close a service network port"""
+    _port_op('close-port', port, protocol)
+
+
+def open_ports(start, end, protocol="TCP"):
+    """Opens a range of service network ports"""
+    _args = ['open-port']
+    _args.append('{}-{}/{}'.format(start, end, protocol))
+    subprocess.check_call(_args)
+
+
+def close_ports(start, end, protocol="TCP"):
+    """Close a range of service network ports"""
+    _args = ['close-port']
+    _args.append('{}-{}/{}'.format(start, end, protocol))
+    subprocess.check_call(_args)
+
+
+def opened_ports():
+    """Get the opened ports
+
+    *Note that this will only show ports opened in a previous hook*
+
+    :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
+    """
+    _args = ['opened-ports', '--format=json']
+    return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+
+
+@cached
+def unit_get(attribute):
+    """Get the unit ID for the remote unit"""
+    _args = ['unit-get', '--format=json', attribute]
+    try:
+        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+    except ValueError:
+        return None
+
+
+def unit_public_ip():
+    """Get this unit's public IP address"""
+    return unit_get('public-address')
+
+
+def unit_private_ip():
+    """Get this unit's private IP address"""
+    return unit_get('private-address')
+
+
+@cached
+def storage_get(attribute=None, storage_id=None):
+    """Get storage attributes"""
+    _args = ['storage-get', '--format=json']
+    if storage_id:
+        _args.extend(('-s', storage_id))
+    if attribute:
+        _args.append(attribute)
+    try:
+        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+    except ValueError:
+        return None
+
+
+@cached
+def storage_list(storage_name=None):
+    """List the storage IDs for the unit"""
+    _args = ['storage-list', '--format=json']
+    if storage_name:
+        _args.append(storage_name)
+    try:
+        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+    except ValueError:
+        return None
+    except OSError as e:
+        import errno
+        if e.errno == errno.ENOENT:
+            # storage-list does not exist
+            return []
+        raise
+
+
+class UnregisteredHookError(Exception):
+    """Raised when an undefined hook is called"""
+    pass
+
+
+class Hooks(object):
+    """A convenient handler for hook functions.
+
+    Example::
+
+        hooks = Hooks()
+
+        # register a hook, taking its name from the function name
+        @hooks.hook()
+        def install():
+            pass  # your code here
+
+        # register a hook, providing a custom hook name
+        @hooks.hook("config-changed")
+        def config_changed():
+            pass  # your code here
+
+        if __name__ == "__main__":
+            # execute a hook based on the name the program is called by
+            hooks.execute(sys.argv)
+    """
+
+    def __init__(self, config_save=None):
+        super(Hooks, self).__init__()
+        self._hooks = {}
+
+        # For unknown reasons, we allow the Hooks constructor to override
+        # config().implicit_save.
+        if config_save is not None:
+            config().implicit_save = config_save
+
+    def register(self, name, function):
+        """Register a hook"""
+        self._hooks[name] = function
+
+    def execute(self, args):
+        """Execute a registered hook based on args[0]"""
+        _run_atstart()
+        hook_name = os.path.basename(args[0])
+        if hook_name in self._hooks:
+            try:
+                self._hooks[hook_name]()
+            except SystemExit as x:
+                if x.code is None or x.code == 0:
+                    _run_atexit()
+                raise
+            _run_atexit()
+        else:
+            raise UnregisteredHookError(hook_name)
+
+    def hook(self, *hook_names):
+        """Decorator, registering them as hooks"""
+        def wrapper(decorated):
+            for hook_name in hook_names:
+                self.register(hook_name, decorated)
+            else:
+                self.register(decorated.__name__, decorated)
+                if '_' in decorated.__name__:
+                    self.register(
+                        decorated.__name__.replace('_', '-'), decorated)
+            return decorated
+        return wrapper
+
+
+class NoNetworkBinding(Exception):
+    pass
+
+
+def charm_dir():
+    """Return the root directory of the current charm"""
+    d = os.environ.get('JUJU_CHARM_DIR')
+    if d is not None:
+        return d
+    return os.environ.get('CHARM_DIR')
+
+
+def cmd_exists(cmd):
+    """Return True if the specified cmd exists in the path"""
+    return any(
+        os.access(os.path.join(path, cmd), os.X_OK)
+        for path in os.environ["PATH"].split(os.pathsep)
+    )
+
+
+@cached
+@deprecate("moved to function_get()", log=log)
+def action_get(key=None):
+    """
+    .. deprecated:: 0.20.7
+       Alias for :func:`function_get`.
+
+    Gets the value of an action parameter, or all key/value param pairs.
+    """
+    cmd = ['action-get']
+    if key is not None:
+        cmd.append(key)
+    cmd.append('--format=json')
+    action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+    return action_data
+
+
+@cached
+def function_get(key=None):
+    """Gets the value of an action parameter, or all key/value param pairs"""
+    cmd = ['function-get']
+    # Fallback for older charms.
+    if not cmd_exists('function-get'):
+        cmd = ['action-get']
+
+    if key is not None:
+        cmd.append(key)
+    cmd.append('--format=json')
+    function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+    return function_data
+
+
+@deprecate("moved to function_set()", log=log)
+def action_set(values):
+    """
+    .. deprecated:: 0.20.7
+       Alias for :func:`function_set`.
+
+    Sets the values to be returned after the action finishes.
+    """
+    cmd = ['action-set']
+    for k, v in list(values.items()):
+        cmd.append('{}={}'.format(k, v))
+    subprocess.check_call(cmd)
+
+
+def function_set(values):
+    """Sets the values to be returned after the function finishes"""
+    cmd = ['function-set']
+    # Fallback for older charms.
+    if not cmd_exists('function-get'):
+        cmd = ['action-set']
+
+    for k, v in list(values.items()):
+        cmd.append('{}={}'.format(k, v))
+    subprocess.check_call(cmd)
+
+
+@deprecate("moved to function_fail()", log=log)
+def action_fail(message):
+    """
+    .. deprecated:: 0.20.7
+       Alias for :func:`function_fail`.
+
+    Sets the action status to failed and sets the error message.
+
+    The results set by action_set are preserved.
+    """
+    subprocess.check_call(['action-fail', message])
+
+
+def function_fail(message):
+    """Sets the function status to failed and sets the error message.
+
+    The results set by function_set are preserved."""
+    cmd = ['function-fail']
+    # Fallback for older charms.
+    if not cmd_exists('function-fail'):
+        cmd = ['action-fail']
+    cmd.append(message)
+
+    subprocess.check_call(cmd)
+
+
+def action_name():
+    """Get the name of the currently executing action."""
+    return os.environ.get('JUJU_ACTION_NAME')
+
+
+def function_name():
+    """Get the name of the currently executing function."""
+    return os.environ.get('JUJU_FUNCTION_NAME') or action_name()
+
+
+def action_uuid():
+    """Get the UUID of the currently executing action."""
+    return os.environ.get('JUJU_ACTION_UUID')
+
+
+def function_id():
+    """Get the ID of the currently executing function."""
+    return os.environ.get('JUJU_FUNCTION_ID') or action_uuid()
+
+
+def action_tag():
+    """Get the tag for the currently executing action."""
+    return os.environ.get('JUJU_ACTION_TAG')
+
+
+def function_tag():
+    """Get the tag for the currently executing function."""
+    return os.environ.get('JUJU_FUNCTION_TAG') or action_tag()
+
+
+def status_set(workload_state, message, application=False):
+    """Set the workload state with a message
+
+    Use status-set to set the workload state with a message which is visible
+    to the user via juju status. If the status-set command is not found then
+    assume this is juju < 1.23 and juju-log the message instead.
+
+    workload_state   -- valid juju workload state. str or WORKLOAD_STATES
+    message          -- status update message
+    application      -- Whether this is an application state set
+    """
+    bad_state_msg = '{!r} is not a valid workload state'
+
+    if isinstance(workload_state, str):
+        try:
+            # Convert string to enum.
+            workload_state = WORKLOAD_STATES[workload_state.upper()]
+        except KeyError:
+            raise ValueError(bad_state_msg.format(workload_state))
+
+    if workload_state not in WORKLOAD_STATES:
+        raise ValueError(bad_state_msg.format(workload_state))
+
+    cmd = ['status-set']
+    if application:
+        cmd.append('--application')
+    cmd.extend([workload_state.value, message])
+    try:
+        ret = subprocess.call(cmd)
+        if ret == 0:
+            return
+    except OSError as e:
+        if e.errno != errno.ENOENT:
+            raise
+    log_message = 'status-set failed: {} {}'.format(workload_state.value,
+                                                    message)
+    log(log_message, level='INFO')
+
+
+def status_get():
+    """Retrieve the previously set juju workload state and message
+
+    If the status-get command is not found then assume this is juju < 1.23 and
+    return 'unknown', ""
+
+    """
+    cmd = ['status-get', "--format=json", "--include-data"]
+    try:
+        raw_status = subprocess.check_output(cmd)
+    except OSError as e:
+        if e.errno == errno.ENOENT:
+            return ('unknown', "")
+        else:
+            raise
+    else:
+        status = json.loads(raw_status.decode("UTF-8"))
+        return (status["status"], status["message"])
+
+
+def translate_exc(from_exc, to_exc):
+    def inner_translate_exc1(f):
+        @wraps(f)
+        def inner_translate_exc2(*args, **kwargs):
+            try:
+                return f(*args, **kwargs)
+            except from_exc:
+                raise to_exc
+
+        return inner_translate_exc2
+
+    return inner_translate_exc1
+
+
+def application_version_set(version):
+    """Charm authors may trigger this command from any hook to output what
+    version of the application is running. This could be a package version,
+    for instance postgres version 9.5. It could also be a build number or
+    version control revision identifier, for instance git sha 6fb7ba68. """
+
+    cmd = ['application-version-set']
+    cmd.append(version)
+    try:
+        subprocess.check_call(cmd)
+    except OSError:
+        log("Application Version: {}".format(version))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+@cached
+def goal_state():
+    """Juju goal state values"""
+    cmd = ['goal-state', '--format=json']
+    return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def is_leader():
+    """Does the current unit hold the juju leadership
+
+    Uses juju to determine whether the current unit is the leader of its peers
+    """
+    cmd = ['is-leader', '--format=json']
+    return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def leader_get(attribute=None):
+    """Juju leader get value(s)"""
+    cmd = ['leader-get', '--format=json'] + [attribute or '-']
+    return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def leader_set(settings=None, **kwargs):
+    """Juju leader set value(s)"""
+    # Don't log secrets.
+    # log("Juju leader-set '%s'" % (settings), level=DEBUG)
+    cmd = ['leader-set']
+    settings = settings or {}
+    settings.update(kwargs)
+    for k, v in settings.items():
+        if v is None:
+            cmd.append('{}='.format(k))
+        else:
+            cmd.append('{}={}'.format(k, v))
+    subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_register(ptype, klass, pid):
+    """ is used while a hook is running to let Juju know that a
+        payload has been started."""
+    cmd = ['payload-register']
+    for x in [ptype, klass, pid]:
+        cmd.append(x)
+    subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_unregister(klass, pid):
+    """ is used while a hook is running to let Juju know
+    that a payload has been manually stopped. The <class> and <id> provided
+    must match a payload that has been previously registered with juju using
+    payload-register."""
+    cmd = ['payload-unregister']
+    for x in [klass, pid]:
+        cmd.append(x)
+    subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_status_set(klass, pid, status):
+    """is used to update the current status of a registered payload.
+    The <class> and <id> provided must match a payload that has been previously
+    registered with juju using payload-register. The <status> must be one of the
+    follow: starting, started, stopping, stopped"""
+    cmd = ['payload-status-set']
+    for x in [klass, pid, status]:
+        cmd.append(x)
+    subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def resource_get(name):
+    """used to fetch the resource path of the given name.
+
+    <name> must match a name of defined resource in metadata.yaml
+
+    returns either a path or False if resource not available
+    """
+    if not name:
+        return False
+
+    cmd = ['resource-get', name]
+    try:
+        return subprocess.check_output(cmd).decode('UTF-8')
+    except subprocess.CalledProcessError:
+        return False
+
+
+@cached
+def juju_version():
+    """Full version string (eg. '1.23.3.1-trusty-amd64')"""
+    # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
+    jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
+    return subprocess.check_output([jujud, 'version'],
+                                   universal_newlines=True).strip()
+
+
+def has_juju_version(minimum_version):
+    """Return True if the Juju version is at least the provided version"""
+    return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
+
+
+_atexit = []
+_atstart = []
+
+
+def atstart(callback, *args, **kwargs):
+    '''Schedule a callback to run before the main hook.
+
+    Callbacks are run in the order they were added.
+
+    This is useful for modules and classes to perform initialization
+    and inject behavior. In particular:
+
+        - Run common code before all of your hooks, such as logging
+          the hook name or interesting relation data.
+        - Defer object or module initialization that requires a hook
+          context until we know there actually is a hook context,
+          making testing easier.
+        - Rather than requiring charm authors to include boilerplate to
+          invoke your helper's behavior, have it run automatically if
+          your object is instantiated or module imported.
+
+    This is not at all useful after your hook framework as been launched.
+    '''
+    global _atstart
+    _atstart.append((callback, args, kwargs))
+
+
+def atexit(callback, *args, **kwargs):
+    '''Schedule a callback to run on successful hook completion.
+
+    Callbacks are run in the reverse order that they were added.'''
+    _atexit.append((callback, args, kwargs))
+
+
+def _run_atstart():
+    '''Hook frameworks must invoke this before running the main hook body.'''
+    global _atstart
+    for callback, args, kwargs in _atstart:
+        callback(*args, **kwargs)
+    del _atstart[:]
+
+
+def _run_atexit():
+    '''Hook frameworks must invoke this after the main hook body has
+    successfully completed. Do not invoke it if the hook fails.'''
+    global _atexit
+    for callback, args, kwargs in reversed(_atexit):
+        callback(*args, **kwargs)
+    del _atexit[:]
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def network_get_primary_address(binding):
+    '''
+    Deprecated since Juju 2.3; use network_get()
+
+    Retrieve the primary network address for a named binding
+
+    :param binding: string. The name of a relation of extra-binding
+    :return: string. The primary IP address for the named binding
+    :raise: NotImplementedError if run on Juju < 2.0
+    '''
+    cmd = ['network-get', '--primary-address', binding]
+    try:
+        response = subprocess.check_output(
+            cmd,
+            stderr=subprocess.STDOUT).decode('UTF-8').strip()
+    except CalledProcessError as e:
+        if 'no network config found for binding' in e.output.decode('UTF-8'):
+            raise NoNetworkBinding("No network binding for {}"
+                                   .format(binding))
+        else:
+            raise
+    return response
+
+
+def network_get(endpoint, relation_id=None):
+    """
+    Retrieve the network details for a relation endpoint
+
+    :param endpoint: string. The name of a relation endpoint
+    :param relation_id: int. The ID of the relation for the current context.
+    :return: dict. The loaded YAML output of the network-get query.
+    :raise: NotImplementedError if request not supported by the Juju version.
+    """
+    if not has_juju_version('2.2'):
+        raise NotImplementedError(juju_version())  # earlier versions require --primary-address
+    if relation_id and not has_juju_version('2.3'):
+        raise NotImplementedError  # 2.3 added the -r option
+
+    cmd = ['network-get', endpoint, '--format', 'yaml']
+    if relation_id:
+        cmd.append('-r')
+        cmd.append(relation_id)
+    response = subprocess.check_output(
+        cmd,
+        stderr=subprocess.STDOUT).decode('UTF-8').strip()
+    return yaml.safe_load(response)
+
+
+def add_metric(*args, **kwargs):
+    """Add metric values. Values may be expressed with keyword arguments. For
+    metric names containing dashes, these may be expressed as one or more
+    'key=value' positional arguments. May only be called from the collect-metrics
+    hook."""
+    _args = ['add-metric']
+    _kvpairs = []
+    _kvpairs.extend(args)
+    _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()])
+    _args.extend(sorted(_kvpairs))
+    try:
+        subprocess.check_call(_args)
+        return
+    except EnvironmentError as e:
+        if e.errno != errno.ENOENT:
+            raise
+    log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs))
+    log(log_message, level='INFO')
+
+
+def meter_status():
+    """Get the meter status, if running in the meter-status-changed hook."""
+    return os.environ.get('JUJU_METER_STATUS')
+
+
+def meter_info():
+    """Get the meter status information, if running in the meter-status-changed
+    hook."""
+    return os.environ.get('JUJU_METER_INFO')
+
+
+def iter_units_for_relation_name(relation_name):
+    """Iterate through all units in a relation
+
+    Generator that iterates through all the units in a relation and yields
+    a named tuple with rid and unit field names.
+
+    Usage:
+    data = [(u.rid, u.unit)
+            for u in iter_units_for_relation_name(relation_name)]
+
+    :param relation_name: string relation name
+    :yield: Named Tuple with rid and unit field names
+    """
+    RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
+    for rid in relation_ids(relation_name):
+        for unit in related_units(rid):
+            yield RelatedUnit(rid, unit)
+
+
+def ingress_address(rid=None, unit=None):
+    """
+    Retrieve the ingress-address from a relation when available.
+    Otherwise, return the private-address.
+
+    When used on the consuming side of the relation (unit is a remote
+    unit), the ingress-address is the IP address that this unit needs
+    to use to reach the provided service on the remote unit.
+
+    When used on the providing side of the relation (unit == local_unit()),
+    the ingress-address is the IP address that is advertised to remote
+    units on this relation. Remote units need to use this address to
+    reach the local provided service on this unit.
+
+    Note that charms may document some other method to use in
+    preference to the ingress_address(), such as an address provided
+    on a different relation attribute or a service discovery mechanism.
+    This allows charms to redirect inbound connections to their peers
+    or different applications such as load balancers.
+
+    Usage:
+    addresses = [ingress_address(rid=u.rid, unit=u.unit)
+                 for u in iter_units_for_relation_name(relation_name)]
+
+    :param rid: string relation id
+    :param unit: string unit name
+    :side effect: calls relation_get
+    :return: string IP address
+    """
+    settings = relation_get(rid=rid, unit=unit)
+    return (settings.get('ingress-address') or
+            settings.get('private-address'))
+
+
+def egress_subnets(rid=None, unit=None):
+    """
+    Retrieve the egress-subnets from a relation.
+
+    This function is to be used on the providing side of the
+    relation, and provides the ranges of addresses that client
+    connections may come from. The result is uninteresting on
+    the consuming side of a relation (unit == local_unit()).
+
+    Returns a stable list of subnets in CIDR format.
+    eg. ['192.168.1.0/24', '2001::F00F/128']
+
+    If egress-subnets is not available, falls back to using the published
+    ingress-address, or finally private-address.
+
+    :param rid: string relation id
+    :param unit: string unit name
+    :side effect: calls relation_get
+    :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
+    """
+    def _to_range(addr):
+        if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
+            addr += '/32'
+        elif ':' in addr and '/' not in addr:  # IPv6
+            addr += '/128'
+        return addr
+
+    settings = relation_get(rid=rid, unit=unit)
+    if 'egress-subnets' in settings:
+        return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
+    if 'ingress-address' in settings:
+        return [_to_range(settings['ingress-address'])]
+    if 'private-address' in settings:
+        return [_to_range(settings['private-address'])]
+    return []  # Should never happen
+
+
+def unit_doomed(unit=None):
+    """Determines if the unit is being removed from the model
+
+    Requires Juju 2.4.1.
+
+    :param unit: string unit name, defaults to local_unit
+    :side effect: calls goal_state
+    :side effect: calls local_unit
+    :side effect: calls has_juju_version
+    :return: True if the unit is being removed, already gone, or never existed
+    """
+    if not has_juju_version("2.4.1"):
+        # We cannot risk blindly returning False for 'we don't know',
+        # because that could cause data loss; if call sites don't
+        # need an accurate answer, they likely don't need this helper
+        # at all.
+        # goal-state existed in 2.4.0, but did not handle removals
+        # correctly until 2.4.1.
+        raise NotImplementedError("is_doomed")
+    if unit is None:
+        unit = local_unit()
+    gs = goal_state()
+    units = gs.get('units', {})
+    if unit not in units:
+        return True
+    # I don't think 'dead' units ever show up in the goal-state, but
+    # check anyway in addition to 'dying'.
+    return units[unit]['status'] in ('dying', 'dead')
+
+
+def env_proxy_settings(selected_settings=None):
+    """Get proxy settings from process environment variables.
+
+    Get charm proxy settings from environment variables that correspond to
+    juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see
+    lp:1782236) and juju-ftp-proxy in a format suitable for passing to an
+    application that reacts to proxy settings passed as environment variables.
+    Some applications support lowercase or uppercase notation (e.g. curl), some
+    support only lowercase (e.g. wget), there are also subjectively rare cases
+    of only uppercase notation support. no_proxy CIDR and wildcard support also
+    varies between runtimes and applications as there is no enforced standard.
+
+    Some applications may connect to multiple destinations and expose config
+    options that would affect only proxy settings for a specific destination
+    these should be handled in charms in an application-specific manner.
+
+    :param selected_settings: format only a subset of possible settings
+    :type selected_settings: list
+    :rtype: Option(None, dict[str, str])
+    """
+    SUPPORTED_SETTINGS = {
+        'http': 'HTTP_PROXY',
+        'https': 'HTTPS_PROXY',
+        'no_proxy': 'NO_PROXY',
+        'ftp': 'FTP_PROXY'
+    }
+    if selected_settings is None:
+        selected_settings = SUPPORTED_SETTINGS
+
+    selected_vars = [v for k, v in SUPPORTED_SETTINGS.items()
+                     if k in selected_settings]
+    proxy_settings = {}
+    for var in selected_vars:
+        var_val = os.getenv(var)
+        if var_val:
+            proxy_settings[var] = var_val
+            proxy_settings[var.lower()] = var_val
+        # Now handle juju-prefixed environment variables. The legacy vs new
+        # environment variable usage is mutually exclusive
+        charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var))
+        if charm_var_val:
+            proxy_settings[var] = charm_var_val
+            proxy_settings[var.lower()] = charm_var_val
+    if 'no_proxy' in proxy_settings:
+        if _contains_range(proxy_settings['no_proxy']):
+            log(RANGE_WARNING, level=WARNING)
+    return proxy_settings if proxy_settings else None
+
+
+def _contains_range(addresses):
+    """Check for cidr or wildcard domain in a string.
+
+    Given a string comprising a comma seperated list of ip addresses
+    and domain names, determine whether the string contains IP ranges
+    or wildcard domains.
+
+    :param addresses: comma seperated list of domains and ip addresses.
+    :type addresses: str
+    """
+    return (
+        # Test for cidr (e.g. 10.20.20.0/24)
+        "/" in addresses or
+        # Test for wildcard domains (*.foo.com or .foo.com)
+        "*" in addresses or
+        addresses.startswith(".") or
+        ",." in addresses or
+        " ." in addresses)
diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py
new file mode 100644
index 0000000000000000000000000000000000000000..f826f6fe3c9f0fd1031d801689d2935a429e6e10
--- /dev/null
+++ b/charmhelpers/core/host.py
@@ -0,0 +1,1119 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tools for working with the host system"""
+# Copyright 2012 Canonical Ltd.
+#
+# Authors:
+#  Nick Moffitt <nick.moffitt@canonical.com>
+#  Matthew Wedgwood <matthew.wedgwood@canonical.com>
+
+import errno
+import os
+import re
+import pwd
+import glob
+import grp
+import random
+import string
+import subprocess
+import hashlib
+import functools
+import itertools
+import six
+
+from contextlib import contextmanager
+from collections import OrderedDict
+from .hookenv import log, INFO, DEBUG, local_unit, charm_name
+from .fstab import Fstab
+from charmhelpers.osplatform import get_platform
+
+__platform__ = get_platform()
+if __platform__ == "ubuntu":
+    from charmhelpers.core.host_factory.ubuntu import (  # NOQA:F401
+        service_available,
+        add_new_group,
+        lsb_release,
+        cmp_pkgrevno,
+        CompareHostReleases,
+        get_distrib_codename,
+        arch
+    )  # flake8: noqa -- ignore F401 for this import
+elif __platform__ == "centos":
+    from charmhelpers.core.host_factory.centos import (  # NOQA:F401
+        service_available,
+        add_new_group,
+        lsb_release,
+        cmp_pkgrevno,
+        CompareHostReleases,
+    )  # flake8: noqa -- ignore F401 for this import
+
+UPDATEDB_PATH = '/etc/updatedb.conf'
+CA_CERT_DIR = '/usr/local/share/ca-certificates'
+
+
+def service_start(service_name, **kwargs):
+    """Start a system service.
+
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
+
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be reloaded. The follow-
+    ing example stops the ceph-osd service for instance id=4:
+
+    service_stop('ceph-osd', id=4)
+
+    :param service_name: the name of the service to stop
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for systemd enabled systems.
+    """
+    return service('start', service_name, **kwargs)
+
+
+def service_stop(service_name, **kwargs):
+    """Stop a system service.
+
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
+
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be reloaded. The follow-
+    ing example stops the ceph-osd service for instance id=4:
+
+    service_stop('ceph-osd', id=4)
+
+    :param service_name: the name of the service to stop
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for systemd enabled systems.
+    """
+    return service('stop', service_name, **kwargs)
+
+
+def service_restart(service_name, **kwargs):
+    """Restart a system service.
+
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
+
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be restarted. The follow-
+    ing example restarts the ceph-osd service for instance id=4:
+
+    service_restart('ceph-osd', id=4)
+
+    :param service_name: the name of the service to restart
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the  init system's commandline. kwargs
+                     are ignored for init systems not allowing additional
+                     parameters via the commandline (systemd).
+    """
+    return service('restart', service_name)
+
+
+def service_reload(service_name, restart_on_failure=False, **kwargs):
+    """Reload a system service, optionally falling back to restart if
+    reload fails.
+
+    The specified service name is managed via the system level init system.
+    Some init systems (e.g. upstart) require that additional arguments be
+    provided in order to directly control service instances whereas other init
+    systems allow for addressing instances of a service directly by name (e.g.
+    systemd).
+
+    The kwargs allow for the additional parameters to be passed to underlying
+    init systems for those systems which require/allow for them. For example,
+    the ceph-osd upstart script requires the id parameter to be passed along
+    in order to identify which running daemon should be reloaded. The follow-
+    ing example restarts the ceph-osd service for instance id=4:
+
+    service_reload('ceph-osd', id=4)
+
+    :param service_name: the name of the service to reload
+    :param restart_on_failure: boolean indicating whether to fallback to a
+                               restart if the reload fails.
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the  init system's commandline. kwargs
+                     are ignored for init systems not allowing additional
+                     parameters via the commandline (systemd).
+    """
+    service_result = service('reload', service_name, **kwargs)
+    if not service_result and restart_on_failure:
+        service_result = service('restart', service_name, **kwargs)
+    return service_result
+
+
+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
+                  **kwargs):
+    """Pause a system service.
+
+    Stop it, and prevent it from starting again at boot.
+
+    :param service_name: the name of the service to pause
+    :param init_dir: path to the upstart init directory
+    :param initd_dir: path to the sysv init directory
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for init systems which do not support
+                     key=value arguments via the commandline.
+    """
+    stopped = True
+    if service_running(service_name, **kwargs):
+        stopped = service_stop(service_name, **kwargs)
+    upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
+    sysv_file = os.path.join(initd_dir, service_name)
+    if init_is_systemd(service_name=service_name):
+        service('disable', service_name)
+        service('mask', service_name)
+    elif os.path.exists(upstart_file):
+        override_path = os.path.join(
+            init_dir, '{}.override'.format(service_name))
+        with open(override_path, 'w') as fh:
+            fh.write("manual\n")
+    elif os.path.exists(sysv_file):
+        subprocess.check_call(["update-rc.d", service_name, "disable"])
+    else:
+        raise ValueError(
+            "Unable to detect {0} as SystemD, Upstart {1} or"
+            " SysV {2}".format(
+                service_name, upstart_file, sysv_file))
+    return stopped
+
+
+def service_resume(service_name, init_dir="/etc/init",
+                   initd_dir="/etc/init.d", **kwargs):
+    """Resume a system service.
+
+    Reenable starting again at boot. Start the service.
+
+    :param service_name: the name of the service to resume
+    :param init_dir: the path to the init dir
+    :param initd dir: the path to the initd dir
+    :param **kwargs: additional parameters to pass to the init system when
+                     managing services. These will be passed as key=value
+                     parameters to the init system's commandline. kwargs
+                     are ignored for systemd enabled systems.
+    """
+    upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
+    sysv_file = os.path.join(initd_dir, service_name)
+    if init_is_systemd(service_name=service_name):
+        service('unmask', service_name)
+        service('enable', service_name)
+    elif os.path.exists(upstart_file):
+        override_path = os.path.join(
+            init_dir, '{}.override'.format(service_name))
+        if os.path.exists(override_path):
+            os.unlink(override_path)
+    elif os.path.exists(sysv_file):
+        subprocess.check_call(["update-rc.d", service_name, "enable"])
+    else:
+        raise ValueError(
+            "Unable to detect {0} as SystemD, Upstart {1} or"
+            " SysV {2}".format(
+                service_name, upstart_file, sysv_file))
+    started = service_running(service_name, **kwargs)
+
+    if not started:
+        started = service_start(service_name, **kwargs)
+    return started
+
+
+def service(action, service_name, **kwargs):
+    """Control a system service.
+
+    :param action: the action to take on the service
+    :param service_name: the name of the service to perform th action on
+    :param **kwargs: additional params to be passed to the service command in
+                    the form of key=value.
+    """
+    if init_is_systemd(service_name=service_name):
+        cmd = ['systemctl', action, service_name]
+    else:
+        cmd = ['service', service_name, action]
+        for key, value in six.iteritems(kwargs):
+            parameter = '%s=%s' % (key, value)
+            cmd.append(parameter)
+    return subprocess.call(cmd) == 0
+
+
+_UPSTART_CONF = "/etc/init/{}.conf"
+_INIT_D_CONF = "/etc/init.d/{}"
+
+
+def service_running(service_name, **kwargs):
+    """Determine whether a system service is running.
+
+    :param service_name: the name of the service
+    :param **kwargs: additional args to pass to the service command. This is
+                     used to pass additional key=value arguments to the
+                     service command line for managing specific instance
+                     units (e.g. service ceph-osd status id=2). The kwargs
+                     are ignored in systemd services.
+    """
+    if init_is_systemd(service_name=service_name):
+        return service('is-active', service_name)
+    else:
+        if os.path.exists(_UPSTART_CONF.format(service_name)):
+            try:
+                cmd = ['status', service_name]
+                for key, value in six.iteritems(kwargs):
+                    parameter = '%s=%s' % (key, value)
+                    cmd.append(parameter)
+                output = subprocess.check_output(
+                    cmd, stderr=subprocess.STDOUT).decode('UTF-8')
+            except subprocess.CalledProcessError:
+                return False
+            else:
+                # This works for upstart scripts where the 'service' command
+                # returns a consistent string to represent running
+                # 'start/running'
+                if ("start/running" in output or
+                        "is running" in output or
+                        "up and running" in output):
+                    return True
+        elif os.path.exists(_INIT_D_CONF.format(service_name)):
+            # Check System V scripts init script return codes
+            return service('status', service_name)
+        return False
+
+
+SYSTEMD_SYSTEM = '/run/systemd/system'
+
+
+def init_is_systemd(service_name=None):
+    """
+    Returns whether the host uses systemd for the specified service.
+
+    @param Optional[str] service_name: specific name of service
+    """
+    if str(service_name).startswith("snap."):
+        return True
+    if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
+        return False
+    return os.path.isdir(SYSTEMD_SYSTEM)
+
+
+def adduser(username, password=None, shell='/bin/bash',
+            system_user=False, primary_group=None,
+            secondary_groups=None, uid=None, home_dir=None):
+    """Add a user to the system.
+
+    Will log but otherwise succeed if the user already exists.
+
+    :param str username: Username to create
+    :param str password: Password for user; if ``None``, create a system user
+    :param str shell: The default shell for the user
+    :param bool system_user: Whether to create a login or system user
+    :param str primary_group: Primary group for user; defaults to username
+    :param list secondary_groups: Optional list of additional groups
+    :param int uid: UID for user being created
+    :param str home_dir: Home directory for user
+
+    :returns: The password database entry struct, as returned by `pwd.getpwnam`
+    """
+    try:
+        user_info = pwd.getpwnam(username)
+        log('user {0} already exists!'.format(username))
+        if uid:
+            user_info = pwd.getpwuid(int(uid))
+            log('user with uid {0} already exists!'.format(uid))
+    except KeyError:
+        log('creating user {0}'.format(username))
+        cmd = ['useradd']
+        if uid:
+            cmd.extend(['--uid', str(uid)])
+        if home_dir:
+            cmd.extend(['--home', str(home_dir)])
+        if system_user or password is None:
+            cmd.append('--system')
+        else:
+            cmd.extend([
+                '--create-home',
+                '--shell', shell,
+                '--password', password,
+            ])
+        if not primary_group:
+            try:
+                grp.getgrnam(username)
+                primary_group = username  # avoid "group exists" error
+            except KeyError:
+                pass
+        if primary_group:
+            cmd.extend(['-g', primary_group])
+        if secondary_groups:
+            cmd.extend(['-G', ','.join(secondary_groups)])
+        cmd.append(username)
+        subprocess.check_call(cmd)
+        user_info = pwd.getpwnam(username)
+    return user_info
+
+
+def user_exists(username):
+    """Check if a user exists"""
+    try:
+        pwd.getpwnam(username)
+        user_exists = True
+    except KeyError:
+        user_exists = False
+    return user_exists
+
+
+def uid_exists(uid):
+    """Check if a uid exists"""
+    try:
+        pwd.getpwuid(uid)
+        uid_exists = True
+    except KeyError:
+        uid_exists = False
+    return uid_exists
+
+
+def group_exists(groupname):
+    """Check if a group exists"""
+    try:
+        grp.getgrnam(groupname)
+        group_exists = True
+    except KeyError:
+        group_exists = False
+    return group_exists
+
+
+def gid_exists(gid):
+    """Check if a gid exists"""
+    try:
+        grp.getgrgid(gid)
+        gid_exists = True
+    except KeyError:
+        gid_exists = False
+    return gid_exists
+
+
+def add_group(group_name, system_group=False, gid=None):
+    """Add a group to the system
+
+    Will log but otherwise succeed if the group already exists.
+
+    :param str group_name: group to create
+    :param bool system_group: Create system group
+    :param int gid: GID for user being created
+
+    :returns: The password database entry struct, as returned by `grp.getgrnam`
+    """
+    try:
+        group_info = grp.getgrnam(group_name)
+        log('group {0} already exists!'.format(group_name))
+        if gid:
+            group_info = grp.getgrgid(gid)
+            log('group with gid {0} already exists!'.format(gid))
+    except KeyError:
+        log('creating group {0}'.format(group_name))
+        add_new_group(group_name, system_group, gid)
+        group_info = grp.getgrnam(group_name)
+    return group_info
+
+
+def add_user_to_group(username, group):
+    """Add a user to a group"""
+    cmd = ['gpasswd', '-a', username, group]
+    log("Adding user {} to group {}".format(username, group))
+    subprocess.check_call(cmd)
+
+
+def chage(username, lastday=None, expiredate=None, inactive=None,
+          mindays=None, maxdays=None, root=None, warndays=None):
+    """Change user password expiry information
+
+    :param str username: User to update
+    :param str lastday: Set when password was changed in YYYY-MM-DD format
+    :param str expiredate: Set when user's account will no longer be
+                           accessible in YYYY-MM-DD format.
+                           -1 will remove an account expiration date.
+    :param str inactive: Set the number of days of inactivity after a password
+                         has expired before the account is locked.
+                         -1 will remove an account's inactivity.
+    :param str mindays: Set the minimum number of days between password
+                        changes to MIN_DAYS.
+                        0 indicates the password can be changed anytime.
+    :param str maxdays: Set the maximum number of days during which a
+                        password is valid.
+                        -1 as MAX_DAYS will remove checking maxdays
+    :param str root: Apply changes in the CHROOT_DIR directory
+    :param str warndays: Set the number of days of warning before a password
+                         change is required
+    :raises subprocess.CalledProcessError: if call to chage fails
+    """
+    cmd = ['chage']
+    if root:
+        cmd.extend(['--root', root])
+    if lastday:
+        cmd.extend(['--lastday', lastday])
+    if expiredate:
+        cmd.extend(['--expiredate', expiredate])
+    if inactive:
+        cmd.extend(['--inactive', inactive])
+    if mindays:
+        cmd.extend(['--mindays', mindays])
+    if maxdays:
+        cmd.extend(['--maxdays', maxdays])
+    if warndays:
+        cmd.extend(['--warndays', warndays])
+    cmd.append(username)
+    subprocess.check_call(cmd)
+
+
+remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
+
+
+def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
+    """Replicate the contents of a path"""
+    options = options or ['--delete', '--executability']
+    cmd = ['/usr/bin/rsync', flags]
+    if timeout:
+        cmd = ['timeout', str(timeout)] + cmd
+    cmd.extend(options)
+    cmd.append(from_path)
+    cmd.append(to_path)
+    log(" ".join(cmd))
+    return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
+
+
+def symlink(source, destination):
+    """Create a symbolic link"""
+    log("Symlinking {} as {}".format(source, destination))
+    cmd = [
+        'ln',
+        '-sf',
+        source,
+        destination,
+    ]
+    subprocess.check_call(cmd)
+
+
+def mkdir(path, owner='root', group='root', perms=0o555, force=False):
+    """Create a directory"""
+    log("Making dir {} {}:{} {:o}".format(path, owner, group,
+                                          perms))
+    uid = pwd.getpwnam(owner).pw_uid
+    gid = grp.getgrnam(group).gr_gid
+    realpath = os.path.abspath(path)
+    path_exists = os.path.exists(realpath)
+    if path_exists and force:
+        if not os.path.isdir(realpath):
+            log("Removing non-directory file {} prior to mkdir()".format(path))
+            os.unlink(realpath)
+            os.makedirs(realpath, perms)
+    elif not path_exists:
+        os.makedirs(realpath, perms)
+    os.chown(realpath, uid, gid)
+    os.chmod(realpath, perms)
+
+
+def write_file(path, content, owner='root', group='root', perms=0o444):
+    """Create or overwrite a file with the contents of a byte string."""
+    uid = pwd.getpwnam(owner).pw_uid
+    gid = grp.getgrnam(group).gr_gid
+    # lets see if we can grab the file and compare the context, to avoid doing
+    # a write.
+    existing_content = None
+    existing_uid, existing_gid, existing_perms = None, None, None
+    try:
+        with open(path, 'rb') as target:
+            existing_content = target.read()
+        stat = os.stat(path)
+        existing_uid, existing_gid, existing_perms = (
+            stat.st_uid, stat.st_gid, stat.st_mode
+        )
+    except Exception:
+        pass
+    if content != existing_content:
+        log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
+            level=DEBUG)
+        with open(path, 'wb') as target:
+            os.fchown(target.fileno(), uid, gid)
+            os.fchmod(target.fileno(), perms)
+            if six.PY3 and isinstance(content, six.string_types):
+                content = content.encode('UTF-8')
+            target.write(content)
+        return
+    # the contents were the same, but we might still need to change the
+    # ownership or permissions.
+    if existing_uid != uid:
+        log("Changing uid on already existing content: {} -> {}"
+            .format(existing_uid, uid), level=DEBUG)
+        os.chown(path, uid, -1)
+    if existing_gid != gid:
+        log("Changing gid on already existing content: {} -> {}"
+            .format(existing_gid, gid), level=DEBUG)
+        os.chown(path, -1, gid)
+    if existing_perms != perms:
+        log("Changing permissions on existing content: {} -> {}"
+            .format(existing_perms, perms), level=DEBUG)
+        os.chmod(path, perms)
+
+
+def fstab_remove(mp):
+    """Remove the given mountpoint entry from /etc/fstab"""
+    return Fstab.remove_by_mountpoint(mp)
+
+
+def fstab_add(dev, mp, fs, options=None):
+    """Adds the given device entry to the /etc/fstab file"""
+    return Fstab.add(dev, mp, fs, options=options)
+
+
+def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
+    """Mount a filesystem at a particular mountpoint"""
+    cmd_args = ['mount']
+    if options is not None:
+        cmd_args.extend(['-o', options])
+    cmd_args.extend([device, mountpoint])
+    try:
+        subprocess.check_output(cmd_args)
+    except subprocess.CalledProcessError as e:
+        log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
+        return False
+
+    if persist:
+        return fstab_add(device, mountpoint, filesystem, options=options)
+    return True
+
+
+def umount(mountpoint, persist=False):
+    """Unmount a filesystem"""
+    cmd_args = ['umount', mountpoint]
+    try:
+        subprocess.check_output(cmd_args)
+    except subprocess.CalledProcessError as e:
+        log('Error unmounting {}\n{}'.format(mountpoint, e.output))
+        return False
+
+    if persist:
+        return fstab_remove(mountpoint)
+    return True
+
+
+def mounts():
+    """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
+    with open('/proc/mounts') as f:
+        # [['/mount/point','/dev/path'],[...]]
+        system_mounts = [m[1::-1] for m in [l.strip().split()
+                                            for l in f.readlines()]]
+    return system_mounts
+
+
+def fstab_mount(mountpoint):
+    """Mount filesystem using fstab"""
+    cmd_args = ['mount', mountpoint]
+    try:
+        subprocess.check_output(cmd_args)
+    except subprocess.CalledProcessError as e:
+        log('Error unmounting {}\n{}'.format(mountpoint, e.output))
+        return False
+    return True
+
+
+def file_hash(path, hash_type='md5'):
+    """Generate a hash checksum of the contents of 'path' or None if not found.
+
+    :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
+                          such as md5, sha1, sha256, sha512, etc.
+    """
+    if os.path.exists(path):
+        h = getattr(hashlib, hash_type)()
+        with open(path, 'rb') as source:
+            h.update(source.read())
+        return h.hexdigest()
+    else:
+        return None
+
+
+def path_hash(path):
+    """Generate a hash checksum of all files matching 'path'. Standard
+    wildcards like '*' and '?' are supported, see documentation for the 'glob'
+    module for more information.
+
+    :return: dict: A { filename: hash } dictionary for all matched files.
+                   Empty if none found.
+    """
+    return {
+        filename: file_hash(filename)
+        for filename in glob.iglob(path)
+    }
+
+
+def check_hash(path, checksum, hash_type='md5'):
+    """Validate a file using a cryptographic checksum.
+
+    :param str checksum: Value of the checksum used to validate the file.
+    :param str hash_type: Hash algorithm used to generate `checksum`.
+        Can be any hash algorithm supported by :mod:`hashlib`,
+        such as md5, sha1, sha256, sha512, etc.
+    :raises ChecksumError: If the file fails the checksum
+
+    """
+    actual_checksum = file_hash(path, hash_type)
+    if checksum != actual_checksum:
+        raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
+
+
+class ChecksumError(ValueError):
+    """A class derived from Value error to indicate the checksum failed."""
+    pass
+
+
+def restart_on_change(restart_map, stopstart=False, restart_functions=None):
+    """Restart services based on configuration files changing
+
+    This function is used a decorator, for example::
+
+        @restart_on_change({
+            '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
+            '/etc/apache/sites-enabled/*': [ 'apache2' ]
+            })
+        def config_changed():
+            pass  # your code here
+
+    In this example, the cinder-api and cinder-volume services
+    would be restarted if /etc/ceph/ceph.conf is changed by the
+    ceph_client_changed function. The apache2 service would be
+    restarted if any file matching the pattern got changed, created
+    or removed. Standard wildcards are supported, see documentation
+    for the 'glob' module for more information.
+
+    @param restart_map: {path_file_name: [service_name, ...]
+    @param stopstart: DEFAULT false; whether to stop, start OR restart
+    @param restart_functions: nonstandard functions to use to restart services
+                              {svc: func, ...}
+    @returns result from decorated function
+    """
+    def wrap(f):
+        @functools.wraps(f)
+        def wrapped_f(*args, **kwargs):
+            return restart_on_change_helper(
+                (lambda: f(*args, **kwargs)), restart_map, stopstart,
+                restart_functions)
+        return wrapped_f
+    return wrap
+
+
+def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
+                             restart_functions=None):
+    """Helper function to perform the restart_on_change function.
+
+    This is provided for decorators to restart services if files described
+    in the restart_map have changed after an invocation of lambda_f().
+
+    @param lambda_f: function to call.
+    @param restart_map: {file: [service, ...]}
+    @param stopstart: whether to stop, start or restart a service
+    @param restart_functions: nonstandard functions to use to restart services
+                              {svc: func, ...}
+    @returns result of lambda_f()
+    """
+    if restart_functions is None:
+        restart_functions = {}
+    checksums = {path: path_hash(path) for path in restart_map}
+    r = lambda_f()
+    # create a list of lists of the services to restart
+    restarts = [restart_map[path]
+                for path in restart_map
+                if path_hash(path) != checksums[path]]
+    # create a flat list of ordered services without duplicates from lists
+    services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
+    if services_list:
+        actions = ('stop', 'start') if stopstart else ('restart',)
+        for service_name in services_list:
+            if service_name in restart_functions:
+                restart_functions[service_name](service_name)
+            else:
+                for action in actions:
+                    service(action, service_name)
+    return r
+
+
+def pwgen(length=None):
+    """Generate a random pasword."""
+    if length is None:
+        # A random length is ok to use a weak PRNG
+        length = random.choice(range(35, 45))
+    alphanumeric_chars = [
+        l for l in (string.ascii_letters + string.digits)
+        if l not in 'l0QD1vAEIOUaeiou']
+    # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
+    # actual password
+    random_generator = random.SystemRandom()
+    random_chars = [
+        random_generator.choice(alphanumeric_chars) for _ in range(length)]
+    return(''.join(random_chars))
+
+
+def is_phy_iface(interface):
+    """Returns True if interface is not virtual, otherwise False."""
+    if interface:
+        sys_net = '/sys/class/net'
+        if os.path.isdir(sys_net):
+            for iface in glob.glob(os.path.join(sys_net, '*')):
+                if '/virtual/' in os.path.realpath(iface):
+                    continue
+
+                if interface == os.path.basename(iface):
+                    return True
+
+    return False
+
+
+def get_bond_master(interface):
+    """Returns bond master if interface is bond slave otherwise None.
+
+    NOTE: the provided interface is expected to be physical
+    """
+    if interface:
+        iface_path = '/sys/class/net/%s' % (interface)
+        if os.path.exists(iface_path):
+            if '/virtual/' in os.path.realpath(iface_path):
+                return None
+
+            master = os.path.join(iface_path, 'master')
+            if os.path.exists(master):
+                master = os.path.realpath(master)
+                # make sure it is a bond master
+                if os.path.exists(os.path.join(master, 'bonding')):
+                    return os.path.basename(master)
+
+    return None
+
+
+def list_nics(nic_type=None):
+    """Return a list of nics of given type(s)"""
+    if isinstance(nic_type, six.string_types):
+        int_types = [nic_type]
+    else:
+        int_types = nic_type
+
+    interfaces = []
+    if nic_type:
+        for int_type in int_types:
+            cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
+            ip_output = subprocess.check_output(
+                cmd).decode('UTF-8', errors='replace')
+            ip_output = ip_output.split('\n')
+            ip_output = (line for line in ip_output if line)
+            for line in ip_output:
+                if line.split()[1].startswith(int_type):
+                    matched = re.search('.*: (' + int_type +
+                                        r'[0-9]+\.[0-9]+)@.*', line)
+                    if matched:
+                        iface = matched.groups()[0]
+                    else:
+                        iface = line.split()[1].replace(":", "")
+
+                    if iface not in interfaces:
+                        interfaces.append(iface)
+    else:
+        cmd = ['ip', 'a']
+        ip_output = subprocess.check_output(
+            cmd).decode('UTF-8', errors='replace').split('\n')
+        ip_output = (line.strip() for line in ip_output if line)
+
+        key = re.compile(r'^[0-9]+:\s+(.+):')
+        for line in ip_output:
+            matched = re.search(key, line)
+            if matched:
+                iface = matched.group(1)
+                iface = iface.partition("@")[0]
+                if iface not in interfaces:
+                    interfaces.append(iface)
+
+    return interfaces
+
+
+def set_nic_mtu(nic, mtu):
+    """Set the Maximum Transmission Unit (MTU) on a network interface."""
+    cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
+    subprocess.check_call(cmd)
+
+
+def get_nic_mtu(nic):
+    """Return the Maximum Transmission Unit (MTU) for a network interface."""
+    cmd = ['ip', 'addr', 'show', nic]
+    ip_output = subprocess.check_output(
+        cmd).decode('UTF-8', errors='replace').split('\n')
+    mtu = ""
+    for line in ip_output:
+        words = line.split()
+        if 'mtu' in words:
+            mtu = words[words.index("mtu") + 1]
+    return mtu
+
+
+def get_nic_hwaddr(nic):
+    """Return the Media Access Control (MAC) for a network interface."""
+    cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
+    ip_output = subprocess.check_output(cmd).decode('UTF-8', errors='replace')
+    hwaddr = ""
+    words = ip_output.split()
+    if 'link/ether' in words:
+        hwaddr = words[words.index('link/ether') + 1]
+    return hwaddr
+
+
+@contextmanager
+def chdir(directory):
+    """Change the current working directory to a different directory for a code
+    block and return the previous directory after the block exits. Useful to
+    run commands from a specified directory.
+
+    :param str directory: The directory path to change to for this context.
+    """
+    cur = os.getcwd()
+    try:
+        yield os.chdir(directory)
+    finally:
+        os.chdir(cur)
+
+
+def chownr(path, owner, group, follow_links=True, chowntopdir=False):
+    """Recursively change user and group ownership of files and directories
+    in given path. Doesn't chown path itself by default, only its children.
+
+    :param str path: The string path to start changing ownership.
+    :param str owner: The owner string to use when looking up the uid.
+    :param str group: The group string to use when looking up the gid.
+    :param bool follow_links: Also follow and chown links if True
+    :param bool chowntopdir: Also chown path itself if True
+    """
+    uid = pwd.getpwnam(owner).pw_uid
+    gid = grp.getgrnam(group).gr_gid
+    if follow_links:
+        chown = os.chown
+    else:
+        chown = os.lchown
+
+    if chowntopdir:
+        broken_symlink = os.path.lexists(path) and not os.path.exists(path)
+        if not broken_symlink:
+            chown(path, uid, gid)
+    for root, dirs, files in os.walk(path, followlinks=follow_links):
+        for name in dirs + files:
+            full = os.path.join(root, name)
+            try:
+                chown(full, uid, gid)
+            except (IOError, OSError) as e:
+                # Intended to ignore "file not found". Catching both to be
+                # compatible with both Python 2.7 and 3.x.
+                if e.errno == errno.ENOENT:
+                    pass
+
+
+def lchownr(path, owner, group):
+    """Recursively change user and group ownership of files and directories
+    in a given path, not following symbolic links. See the documentation for
+    'os.lchown' for more information.
+
+    :param str path: The string path to start changing ownership.
+    :param str owner: The owner string to use when looking up the uid.
+    :param str group: The group string to use when looking up the gid.
+    """
+    chownr(path, owner, group, follow_links=False)
+
+
+def owner(path):
+    """Returns a tuple containing the username & groupname owning the path.
+
+    :param str path: the string path to retrieve the ownership
+    :return tuple(str, str): A (username, groupname) tuple containing the
+                             name of the user and group owning the path.
+    :raises OSError: if the specified path does not exist
+    """
+    stat = os.stat(path)
+    username = pwd.getpwuid(stat.st_uid)[0]
+    groupname = grp.getgrgid(stat.st_gid)[0]
+    return username, groupname
+
+
+def get_total_ram():
+    """The total amount of system RAM in bytes.
+
+    This is what is reported by the OS, and may be overcommitted when
+    there are multiple containers hosted on the same machine.
+    """
+    with open('/proc/meminfo', 'r') as f:
+        for line in f.readlines():
+            if line:
+                key, value, unit = line.split()
+                if key == 'MemTotal:':
+                    assert unit == 'kB', 'Unknown unit'
+                    return int(value) * 1024  # Classic, not KiB.
+        raise NotImplementedError()
+
+
+UPSTART_CONTAINER_TYPE = '/run/container_type'
+
+
+def is_container():
+    """Determine whether unit is running in a container
+
+    @return: boolean indicating if unit is in a container
+    """
+    if init_is_systemd():
+        # Detect using systemd-detect-virt
+        return subprocess.call(['systemd-detect-virt',
+                                '--container']) == 0
+    else:
+        # Detect using upstart container file marker
+        return os.path.exists(UPSTART_CONTAINER_TYPE)
+
+
+def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
+    """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list.
+
+    This method has no effect if the path specified by updatedb_path does not
+    exist or is not a file.
+
+    @param path: string the path to add to the updatedb.conf PRUNEPATHS value
+    @param updatedb_path: the path the updatedb.conf file
+    """
+    if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path):
+        # If the updatedb.conf file doesn't exist then don't attempt to update
+        # the file as the package providing mlocate may not be installed on
+        # the local system
+        return
+
+    with open(updatedb_path, 'r+') as f_id:
+        updatedb_text = f_id.read()
+        output = updatedb(updatedb_text, path)
+        f_id.seek(0)
+        f_id.write(output)
+        f_id.truncate()
+
+
+def updatedb(updatedb_text, new_path):
+    lines = [line for line in updatedb_text.split("\n")]
+    for i, line in enumerate(lines):
+        if line.startswith("PRUNEPATHS="):
+            paths_line = line.split("=")[1].replace('"', '')
+            paths = paths_line.split(" ")
+            if new_path not in paths:
+                paths.append(new_path)
+                lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
+    output = "\n".join(lines)
+    return output
+
+
+def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
+    """ Modulo distribution
+
+    This helper uses the unit number, a modulo value and a constant wait time
+    to produce a calculated wait time distribution. This is useful in large
+    scale deployments to distribute load during an expensive operation such as
+    service restarts.
+
+    If you have 1000 nodes that need to restart 100 at a time 1 minute at a
+    time:
+
+      time.wait(modulo_distribution(modulo=100, wait=60))
+      restart()
+
+    If you need restarts to happen serially set modulo to the exact number of
+    nodes and set a high constant wait time:
+
+      time.wait(modulo_distribution(modulo=10, wait=120))
+      restart()
+
+    @param modulo: int The modulo number creates the group distribution
+    @param wait: int The constant time wait value
+    @param non_zero_wait: boolean Override unit % modulo == 0,
+                          return modulo * wait. Used to avoid collisions with
+                          leader nodes which are often given priority.
+    @return: int Calculated time to wait for unit operation
+    """
+    unit_number = int(local_unit().split('/')[1])
+    calculated_wait_time = (unit_number % modulo) * wait
+    if non_zero_wait and calculated_wait_time == 0:
+        return modulo * wait
+    else:
+        return calculated_wait_time
+
+
+def install_ca_cert(ca_cert, name=None):
+    """
+    Install the given cert as a trusted CA.
+
+    The ``name`` is the stem of the filename where the cert is written, and if
+    not provided, it will default to ``juju-{charm_name}``.
+
+    If the cert is empty or None, or is unchanged, nothing is done.
+    """
+    if not ca_cert:
+        return
+    if not isinstance(ca_cert, bytes):
+        ca_cert = ca_cert.encode('utf8')
+    if not name:
+        name = 'juju-{}'.format(charm_name())
+    cert_file = '{}/{}.crt'.format(CA_CERT_DIR, name)
+    new_hash = hashlib.md5(ca_cert).hexdigest()
+    if file_hash(cert_file) == new_hash:
+        return
+    log("Installing new CA cert at: {}".format(cert_file), level=INFO)
+    write_file(cert_file, ca_cert)
+    subprocess.check_call(['update-ca-certificates', '--fresh'])
+
+
+def get_system_env(key, default=None):
+    """Get data from system environment as represented in ``/etc/environment``.
+
+    :param key: Key to look up
+    :type key: str
+    :param default: Value to return if key is not found
+    :type default: any
+    :returns: Value for key if found or contents of default parameter
+    :rtype: any
+    :raises: subprocess.CalledProcessError
+    """
+    env_file = '/etc/environment'
+    # use the shell and env(1) to parse the global environments file.  This is
+    # done to get the correct result even if the user has shell variable
+    # substitutions or other shell logic in that file.
+    output = subprocess.check_output(
+        ['env', '-i', '/bin/bash', '-c',
+         'set -a && source {} && env'.format(env_file)],
+        universal_newlines=True)
+    for k, v in (line.split('=', 1)
+                 for line in output.splitlines() if '=' in line):
+        if k == key:
+            return v
+    else:
+        return default
diff --git a/charmhelpers/core/host_factory/__init__.py b/charmhelpers/core/host_factory/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/charmhelpers/core/host_factory/centos.py b/charmhelpers/core/host_factory/centos.py
new file mode 100644
index 0000000000000000000000000000000000000000..7781a3961f23ce0b161ae08b11710466af8de814
--- /dev/null
+++ b/charmhelpers/core/host_factory/centos.py
@@ -0,0 +1,72 @@
+import subprocess
+import yum
+import os
+
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+class CompareHostReleases(BasicStringComparator):
+    """Provide comparisons of Host releases.
+
+    Use in the form of
+
+    if CompareHostReleases(release) > 'trusty':
+        # do something with mitaka
+    """
+
+    def __init__(self, item):
+        raise NotImplementedError(
+            "CompareHostReleases() is not implemented for CentOS")
+
+
+def service_available(service_name):
+    # """Determine whether a system service is available."""
+    if os.path.isdir('/run/systemd/system'):
+        cmd = ['systemctl', 'is-enabled', service_name]
+    else:
+        cmd = ['service', service_name, 'is-enabled']
+    return subprocess.call(cmd) == 0
+
+
+def add_new_group(group_name, system_group=False, gid=None):
+    cmd = ['groupadd']
+    if gid:
+        cmd.extend(['--gid', str(gid)])
+    if system_group:
+        cmd.append('-r')
+    cmd.append(group_name)
+    subprocess.check_call(cmd)
+
+
+def lsb_release():
+    """Return /etc/os-release in a dict."""
+    d = {}
+    with open('/etc/os-release', 'r') as lsb:
+        for l in lsb:
+            s = l.split('=')
+            if len(s) != 2:
+                continue
+            d[s[0].strip()] = s[1].strip()
+    return d
+
+
+def cmp_pkgrevno(package, revno, pkgcache=None):
+    """Compare supplied revno with the revno of the installed package.
+
+    *  1 => Installed revno is greater than supplied arg
+    *  0 => Installed revno is the same as supplied arg
+    * -1 => Installed revno is less than supplied arg
+
+    This function imports YumBase function if the pkgcache argument
+    is None.
+    """
+    if not pkgcache:
+        y = yum.YumBase()
+        packages = y.doPackageLists()
+        pkgcache = {i.Name: i.version for i in packages['installed']}
+    pkg = pkgcache[package]
+    if pkg > revno:
+        return 1
+    if pkg < revno:
+        return -1
+    return 0
diff --git a/charmhelpers/core/host_factory/ubuntu.py b/charmhelpers/core/host_factory/ubuntu.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3ec69478193df68cbf08253d8e017277c7b2b1c
--- /dev/null
+++ b/charmhelpers/core/host_factory/ubuntu.py
@@ -0,0 +1,117 @@
+import subprocess
+
+from charmhelpers.core.hookenv import cached
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+UBUNTU_RELEASES = (
+    'lucid',
+    'maverick',
+    'natty',
+    'oneiric',
+    'precise',
+    'quantal',
+    'raring',
+    'saucy',
+    'trusty',
+    'utopic',
+    'vivid',
+    'wily',
+    'xenial',
+    'yakkety',
+    'zesty',
+    'artful',
+    'bionic',
+    'cosmic',
+    'disco',
+    'eoan',
+    'focal',
+    'groovy'
+)
+
+
+class CompareHostReleases(BasicStringComparator):
+    """Provide comparisons of Ubuntu releases.
+
+    Use in the form of
+
+    if CompareHostReleases(release) > 'trusty':
+        # do something with mitaka
+    """
+    _list = UBUNTU_RELEASES
+
+
+def service_available(service_name):
+    """Determine whether a system service is available"""
+    try:
+        subprocess.check_output(
+            ['service', service_name, 'status'],
+            stderr=subprocess.STDOUT).decode('UTF-8')
+    except subprocess.CalledProcessError as e:
+        return b'unrecognized service' not in e.output
+    else:
+        return True
+
+
+def add_new_group(group_name, system_group=False, gid=None):
+    cmd = ['addgroup']
+    if gid:
+        cmd.extend(['--gid', str(gid)])
+    if system_group:
+        cmd.append('--system')
+    else:
+        cmd.extend([
+            '--group',
+        ])
+    cmd.append(group_name)
+    subprocess.check_call(cmd)
+
+
+def lsb_release():
+    """Return /etc/lsb-release in a dict"""
+    d = {}
+    with open('/etc/lsb-release', 'r') as lsb:
+        for l in lsb:
+            k, v = l.split('=')
+            d[k.strip()] = v.strip()
+    return d
+
+
+def get_distrib_codename():
+    """Return the codename of the distribution
+    :returns: The codename
+    :rtype: str
+    """
+    return lsb_release()['DISTRIB_CODENAME'].lower()
+
+
+def cmp_pkgrevno(package, revno, pkgcache=None):
+    """Compare supplied revno with the revno of the installed package.
+
+    *  1 => Installed revno is greater than supplied arg
+    *  0 => Installed revno is the same as supplied arg
+    * -1 => Installed revno is less than supplied arg
+
+    This function imports apt_cache function from charmhelpers.fetch if
+    the pkgcache argument is None. Be sure to add charmhelpers.fetch if
+    you call this function, or pass an apt_pkg.Cache() instance.
+    """
+    from charmhelpers.fetch import apt_pkg
+    if not pkgcache:
+        from charmhelpers.fetch import apt_cache
+        pkgcache = apt_cache()
+    pkg = pkgcache[package]
+    return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
+
+
+@cached
+def arch():
+    """Return the package architecture as a string.
+
+    :returns: the architecture
+    :rtype: str
+    :raises: subprocess.CalledProcessError if dpkg command fails
+    """
+    return subprocess.check_output(
+        ['dpkg', '--print-architecture']
+    ).rstrip().decode('UTF-8')
diff --git a/charmhelpers/core/hugepage.py b/charmhelpers/core/hugepage.py
new file mode 100644
index 0000000000000000000000000000000000000000..54b5b5e2fcf81eea5f2ebfbceb620ea68d725584
--- /dev/null
+++ b/charmhelpers/core/hugepage.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import yaml
+from charmhelpers.core import fstab
+from charmhelpers.core import sysctl
+from charmhelpers.core.host import (
+    add_group,
+    add_user_to_group,
+    fstab_mount,
+    mkdir,
+)
+from charmhelpers.core.strutils import bytes_from_string
+from subprocess import check_output
+
+
+def hugepage_support(user, group='hugetlb', nr_hugepages=256,
+                     max_map_count=65536, mnt_point='/run/hugepages/kvm',
+                     pagesize='2MB', mount=True, set_shmmax=False):
+    """Enable hugepages on system.
+
+    Args:
+    user (str)  -- Username to allow access to hugepages to
+    group (str) -- Group name to own hugepages
+    nr_hugepages (int) -- Number of pages to reserve
+    max_map_count (int) -- Number of Virtual Memory Areas a process can own
+    mnt_point (str) -- Directory to mount hugepages on
+    pagesize (str) -- Size of hugepages
+    mount (bool) -- Whether to Mount hugepages
+    """
+    group_info = add_group(group)
+    gid = group_info.gr_gid
+    add_user_to_group(user, group)
+    if max_map_count < 2 * nr_hugepages:
+        max_map_count = 2 * nr_hugepages
+    sysctl_settings = {
+        'vm.nr_hugepages': nr_hugepages,
+        'vm.max_map_count': max_map_count,
+        'vm.hugetlb_shm_group': gid,
+    }
+    if set_shmmax:
+        shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
+        shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
+        if shmmax_minsize > shmmax_current:
+            sysctl_settings['kernel.shmmax'] = shmmax_minsize
+    sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
+    mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
+    lfstab = fstab.Fstab()
+    fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
+    if fstab_entry:
+        lfstab.remove_entry(fstab_entry)
+    entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
+                         'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
+    lfstab.add_entry(entry)
+    if mount:
+        fstab_mount(mnt_point)
diff --git a/charmhelpers/core/kernel.py b/charmhelpers/core/kernel.py
new file mode 100644
index 0000000000000000000000000000000000000000..e01f4f8ba73ee0d5ab7553740c2590a50e42f96d
--- /dev/null
+++ b/charmhelpers/core/kernel.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import subprocess
+
+from charmhelpers.osplatform import get_platform
+from charmhelpers.core.hookenv import (
+    log,
+    INFO
+)
+
+__platform__ = get_platform()
+if __platform__ == "ubuntu":
+    from charmhelpers.core.kernel_factory.ubuntu import (  # NOQA:F401
+        persistent_modprobe,
+        update_initramfs,
+    )  # flake8: noqa -- ignore F401 for this import
+elif __platform__ == "centos":
+    from charmhelpers.core.kernel_factory.centos import (  # NOQA:F401
+        persistent_modprobe,
+        update_initramfs,
+    )  # flake8: noqa -- ignore F401 for this import
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
+
+
+def modprobe(module, persist=True):
+    """Load a kernel module and configure for auto-load on reboot."""
+    cmd = ['modprobe', module]
+
+    log('Loading kernel module %s' % module, level=INFO)
+
+    subprocess.check_call(cmd)
+    if persist:
+        persistent_modprobe(module)
+
+
+def rmmod(module, force=False):
+    """Remove a module from the linux kernel"""
+    cmd = ['rmmod']
+    if force:
+        cmd.append('-f')
+    cmd.append(module)
+    log('Removing kernel module %s' % module, level=INFO)
+    return subprocess.check_call(cmd)
+
+
+def lsmod():
+    """Shows what kernel modules are currently loaded"""
+    return subprocess.check_output(['lsmod'],
+                                   universal_newlines=True)
+
+
+def is_module_loaded(module):
+    """Checks if a kernel module is already loaded"""
+    matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
+    return len(matches) > 0
diff --git a/charmhelpers/core/kernel_factory/__init__.py b/charmhelpers/core/kernel_factory/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/charmhelpers/core/kernel_factory/centos.py b/charmhelpers/core/kernel_factory/centos.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c402c1157900ff1ad5c6c296a409c9e8fb96d2b
--- /dev/null
+++ b/charmhelpers/core/kernel_factory/centos.py
@@ -0,0 +1,17 @@
+import subprocess
+import os
+
+
+def persistent_modprobe(module):
+    """Load a kernel module and configure for auto-load on reboot."""
+    if not os.path.exists('/etc/rc.modules'):
+        open('/etc/rc.modules', 'a')
+        os.chmod('/etc/rc.modules', 111)
+    with open('/etc/rc.modules', 'r+') as modules:
+        if module not in modules.read():
+            modules.write('modprobe %s\n' % module)
+
+
+def update_initramfs(version='all'):
+    """Updates an initramfs image."""
+    return subprocess.check_call(["dracut", "-f", version])
diff --git a/charmhelpers/core/kernel_factory/ubuntu.py b/charmhelpers/core/kernel_factory/ubuntu.py
new file mode 100644
index 0000000000000000000000000000000000000000..3de372fd3df38fe151cf79243f129cb504516f22
--- /dev/null
+++ b/charmhelpers/core/kernel_factory/ubuntu.py
@@ -0,0 +1,13 @@
+import subprocess
+
+
+def persistent_modprobe(module):
+    """Load a kernel module and configure for auto-load on reboot."""
+    with open('/etc/modules', 'r+') as modules:
+        if module not in modules.read():
+            modules.write(module + "\n")
+
+
+def update_initramfs(version='all'):
+    """Updates an initramfs image."""
+    return subprocess.check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charmhelpers/core/services/__init__.py b/charmhelpers/core/services/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..61fd074edc09de434859e48ae1b36baef0503708
--- /dev/null
+++ b/charmhelpers/core/services/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .base import *  # NOQA
+from .helpers import *  # NOQA
diff --git a/charmhelpers/core/services/base.py b/charmhelpers/core/services/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..179ad4f0c367dd6b13c10b201c3752d1c8daf05e
--- /dev/null
+++ b/charmhelpers/core/services/base.py
@@ -0,0 +1,362 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import json
+from inspect import getargspec
+from collections import Iterable, OrderedDict
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+__all__ = ['ServiceManager', 'ManagerCallback',
+           'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
+           'service_restart', 'service_stop']
+
+
+class ServiceManager(object):
+    def __init__(self, services=None):
+        """
+        Register a list of services, given their definitions.
+
+        Service definitions are dicts in the following formats (all keys except
+        'service' are optional)::
+
+            {
+                "service": <service name>,
+                "required_data": <list of required data contexts>,
+                "provided_data": <list of provided data contexts>,
+                "data_ready": <one or more callbacks>,
+                "data_lost": <one or more callbacks>,
+                "start": <one or more callbacks>,
+                "stop": <one or more callbacks>,
+                "ports": <list of ports to manage>,
+            }
+
+        The 'required_data' list should contain dicts of required data (or
+        dependency managers that act like dicts and know how to collect the data).
+        Only when all items in the 'required_data' list are populated are the list
+        of 'data_ready' and 'start' callbacks executed.  See `is_ready()` for more
+        information.
+
+        The 'provided_data' list should contain relation data providers, most likely
+        a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
+        that will indicate a set of data to set on a given relation.
+
+        The 'data_ready' value should be either a single callback, or a list of
+        callbacks, to be called when all items in 'required_data' pass `is_ready()`.
+        Each callback will be called with the service name as the only parameter.
+        After all of the 'data_ready' callbacks are called, the 'start' callbacks
+        are fired.
+
+        The 'data_lost' value should be either a single callback, or a list of
+        callbacks, to be called when a 'required_data' item no longer passes
+        `is_ready()`.  Each callback will be called with the service name as the
+        only parameter.  After all of the 'data_lost' callbacks are called,
+        the 'stop' callbacks are fired.
+
+        The 'start' value should be either a single callback, or a list of
+        callbacks, to be called when starting the service, after the 'data_ready'
+        callbacks are complete.  Each callback will be called with the service
+        name as the only parameter.  This defaults to
+        `[host.service_start, services.open_ports]`.
+
+        The 'stop' value should be either a single callback, or a list of
+        callbacks, to be called when stopping the service.  If the service is
+        being stopped because it no longer has all of its 'required_data', this
+        will be called after all of the 'data_lost' callbacks are complete.
+        Each callback will be called with the service name as the only parameter.
+        This defaults to `[services.close_ports, host.service_stop]`.
+
+        The 'ports' value should be a list of ports to manage.  The default
+        'start' handler will open the ports after the service is started,
+        and the default 'stop' handler will close the ports prior to stopping
+        the service.
+
+
+        Examples:
+
+        The following registers an Upstart service called bingod that depends on
+        a mongodb relation and which runs a custom `db_migrate` function prior to
+        restarting the service, and a Runit service called spadesd::
+
+            manager = services.ServiceManager([
+                {
+                    'service': 'bingod',
+                    'ports': [80, 443],
+                    'required_data': [MongoRelation(), config(), {'my': 'data'}],
+                    'data_ready': [
+                        services.template(source='bingod.conf'),
+                        services.template(source='bingod.ini',
+                                          target='/etc/bingod.ini',
+                                          owner='bingo', perms=0400),
+                    ],
+                },
+                {
+                    'service': 'spadesd',
+                    'data_ready': services.template(source='spadesd_run.j2',
+                                                    target='/etc/sv/spadesd/run',
+                                                    perms=0555),
+                    'start': runit_start,
+                    'stop': runit_stop,
+                },
+            ])
+            manager.manage()
+        """
+        self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
+        self._ready = None
+        self.services = OrderedDict()
+        for service in services or []:
+            service_name = service['service']
+            self.services[service_name] = service
+
+    def manage(self):
+        """
+        Handle the current hook by doing The Right Thing with the registered services.
+        """
+        hookenv._run_atstart()
+        try:
+            hook_name = hookenv.hook_name()
+            if hook_name == 'stop':
+                self.stop_services()
+            else:
+                self.reconfigure_services()
+                self.provide_data()
+        except SystemExit as x:
+            if x.code is None or x.code == 0:
+                hookenv._run_atexit()
+        hookenv._run_atexit()
+
+    def provide_data(self):
+        """
+        Set the relation data for each provider in the ``provided_data`` list.
+
+        A provider must have a `name` attribute, which indicates which relation
+        to set data on, and a `provide_data()` method, which returns a dict of
+        data to set.
+
+        The `provide_data()` method can optionally accept two parameters:
+
+          * ``remote_service`` The name of the remote service that the data will
+            be provided to.  The `provide_data()` method will be called once
+            for each connected service (not unit).  This allows the method to
+            tailor its data to the given service.
+          * ``service_ready`` Whether or not the service definition had all of
+            its requirements met, and thus the ``data_ready`` callbacks run.
+
+        Note that the ``provided_data`` methods are now called **after** the
+        ``data_ready`` callbacks are run.  This gives the ``data_ready`` callbacks
+        a chance to generate any data necessary for the providing to the remote
+        services.
+        """
+        for service_name, service in self.services.items():
+            service_ready = self.is_ready(service_name)
+            for provider in service.get('provided_data', []):
+                for relid in hookenv.relation_ids(provider.name):
+                    units = hookenv.related_units(relid)
+                    if not units:
+                        continue
+                    remote_service = units[0].split('/')[0]
+                    argspec = getargspec(provider.provide_data)
+                    if len(argspec.args) > 1:
+                        data = provider.provide_data(remote_service, service_ready)
+                    else:
+                        data = provider.provide_data()
+                    if data:
+                        hookenv.relation_set(relid, data)
+
+    def reconfigure_services(self, *service_names):
+        """
+        Update all files for one or more registered services, and,
+        if ready, optionally restart them.
+
+        If no service names are given, reconfigures all registered services.
+        """
+        for service_name in service_names or self.services.keys():
+            if self.is_ready(service_name):
+                self.fire_event('data_ready', service_name)
+                self.fire_event('start', service_name, default=[
+                    service_restart,
+                    manage_ports])
+                self.save_ready(service_name)
+            else:
+                if self.was_ready(service_name):
+                    self.fire_event('data_lost', service_name)
+                self.fire_event('stop', service_name, default=[
+                    manage_ports,
+                    service_stop])
+                self.save_lost(service_name)
+
+    def stop_services(self, *service_names):
+        """
+        Stop one or more registered services, by name.
+
+        If no service names are given, stops all registered services.
+        """
+        for service_name in service_names or self.services.keys():
+            self.fire_event('stop', service_name, default=[
+                manage_ports,
+                service_stop])
+
+    def get_service(self, service_name):
+        """
+        Given the name of a registered service, return its service definition.
+        """
+        service = self.services.get(service_name)
+        if not service:
+            raise KeyError('Service not registered: %s' % service_name)
+        return service
+
+    def fire_event(self, event_name, service_name, default=None):
+        """
+        Fire a data_ready, data_lost, start, or stop event on a given service.
+        """
+        service = self.get_service(service_name)
+        callbacks = service.get(event_name, default)
+        if not callbacks:
+            return
+        if not isinstance(callbacks, Iterable):
+            callbacks = [callbacks]
+        for callback in callbacks:
+            if isinstance(callback, ManagerCallback):
+                callback(self, service_name, event_name)
+            else:
+                callback(service_name)
+
+    def is_ready(self, service_name):
+        """
+        Determine if a registered service is ready, by checking its 'required_data'.
+
+        A 'required_data' item can be any mapping type, and is considered ready
+        if `bool(item)` evaluates as True.
+        """
+        service = self.get_service(service_name)
+        reqs = service.get('required_data', [])
+        return all(bool(req) for req in reqs)
+
+    def _load_ready_file(self):
+        if self._ready is not None:
+            return
+        if os.path.exists(self._ready_file):
+            with open(self._ready_file) as fp:
+                self._ready = set(json.load(fp))
+        else:
+            self._ready = set()
+
+    def _save_ready_file(self):
+        if self._ready is None:
+            return
+        with open(self._ready_file, 'w') as fp:
+            json.dump(list(self._ready), fp)
+
+    def save_ready(self, service_name):
+        """
+        Save an indicator that the given service is now data_ready.
+        """
+        self._load_ready_file()
+        self._ready.add(service_name)
+        self._save_ready_file()
+
+    def save_lost(self, service_name):
+        """
+        Save an indicator that the given service is no longer data_ready.
+        """
+        self._load_ready_file()
+        self._ready.discard(service_name)
+        self._save_ready_file()
+
+    def was_ready(self, service_name):
+        """
+        Determine if the given service was previously data_ready.
+        """
+        self._load_ready_file()
+        return service_name in self._ready
+
+
+class ManagerCallback(object):
+    """
+    Special case of a callback that takes the `ServiceManager` instance
+    in addition to the service name.
+
+    Subclasses should implement `__call__` which should accept three parameters:
+
+        * `manager`       The `ServiceManager` instance
+        * `service_name`  The name of the service it's being triggered for
+        * `event_name`    The name of the event that this callback is handling
+    """
+    def __call__(self, manager, service_name, event_name):
+        raise NotImplementedError()
+
+
+class PortManagerCallback(ManagerCallback):
+    """
+    Callback class that will open or close ports, for use as either
+    a start or stop action.
+    """
+    def __call__(self, manager, service_name, event_name):
+        service = manager.get_service(service_name)
+        # turn this generator into a list,
+        # as we'll be going over it multiple times
+        new_ports = list(service.get('ports', []))
+        port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
+        if os.path.exists(port_file):
+            with open(port_file) as fp:
+                old_ports = fp.read().split(',')
+            for old_port in old_ports:
+                if bool(old_port) and not self.ports_contains(old_port, new_ports):
+                    hookenv.close_port(old_port)
+        with open(port_file, 'w') as fp:
+            fp.write(','.join(str(port) for port in new_ports))
+        for port in new_ports:
+            # A port is either a number or 'ICMP'
+            protocol = 'TCP'
+            if str(port).upper() == 'ICMP':
+                protocol = 'ICMP'
+            if event_name == 'start':
+                hookenv.open_port(port, protocol)
+            elif event_name == 'stop':
+                hookenv.close_port(port, protocol)
+
+    def ports_contains(self, port, ports):
+        if not bool(port):
+            return False
+        if str(port).upper() != 'ICMP':
+            port = int(port)
+        return port in ports
+
+
+def service_stop(service_name):
+    """
+    Wrapper around host.service_stop to prevent spurious "unknown service"
+    messages in the logs.
+    """
+    if host.service_running(service_name):
+        host.service_stop(service_name)
+
+
+def service_restart(service_name):
+    """
+    Wrapper around host.service_restart to prevent spurious "unknown service"
+    messages in the logs.
+    """
+    if host.service_available(service_name):
+        if host.service_running(service_name):
+            host.service_restart(service_name)
+        else:
+            host.service_start(service_name)
+
+
+# Convenience aliases
+open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charmhelpers/core/services/helpers.py b/charmhelpers/core/services/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e6e30d2fe0d9c73ffdc42d70b77e864b6379c53
--- /dev/null
+++ b/charmhelpers/core/services/helpers.py
@@ -0,0 +1,290 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import yaml
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+from charmhelpers.core import templating
+
+from charmhelpers.core.services.base import ManagerCallback
+
+
+__all__ = ['RelationContext', 'TemplateCallback',
+           'render_template', 'template']
+
+
+class RelationContext(dict):
+    """
+    Base class for a context generator that gets relation data from juju.
+
+    Subclasses must provide the attributes `name`, which is the name of the
+    interface of interest, `interface`, which is the type of the interface of
+    interest, and `required_keys`, which is the set of keys required for the
+    relation to be considered complete.  The data for all interfaces matching
+    the `name` attribute that are complete will used to populate the dictionary
+    values (see `get_data`, below).
+
+    The generated context will be namespaced under the relation :attr:`name`,
+    to prevent potential naming conflicts.
+
+    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+    :param list additional_required_keys: Extend the list of :attr:`required_keys`
+    """
+    name = None
+    interface = None
+
+    def __init__(self, name=None, additional_required_keys=None):
+        if not hasattr(self, 'required_keys'):
+            self.required_keys = []
+
+        if name is not None:
+            self.name = name
+        if additional_required_keys:
+            self.required_keys.extend(additional_required_keys)
+        self.get_data()
+
+    def __bool__(self):
+        """
+        Returns True if all of the required_keys are available.
+        """
+        return self.is_ready()
+
+    __nonzero__ = __bool__
+
+    def __repr__(self):
+        return super(RelationContext, self).__repr__()
+
+    def is_ready(self):
+        """
+        Returns True if all of the `required_keys` are available from any units.
+        """
+        ready = len(self.get(self.name, [])) > 0
+        if not ready:
+            hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
+        return ready
+
+    def _is_ready(self, unit_data):
+        """
+        Helper method that tests a set of relation data and returns True if
+        all of the `required_keys` are present.
+        """
+        return set(unit_data.keys()).issuperset(set(self.required_keys))
+
+    def get_data(self):
+        """
+        Retrieve the relation data for each unit involved in a relation and,
+        if complete, store it in a list under `self[self.name]`.  This
+        is automatically called when the RelationContext is instantiated.
+
+        The units are sorted lexographically first by the service ID, then by
+        the unit ID.  Thus, if an interface has two other services, 'db:1'
+        and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
+        and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
+        set of data, the relation data for the units will be stored in the
+        order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
+
+        If you only care about a single unit on the relation, you can just
+        access it as `{{ interface[0]['key'] }}`.  However, if you can at all
+        support multiple units on a relation, you should iterate over the list,
+        like::
+
+            {% for unit in interface -%}
+                {{ unit['key'] }}{% if not loop.last %},{% endif %}
+            {%- endfor %}
+
+        Note that since all sets of relation data from all related services and
+        units are in a single list, if you need to know which service or unit a
+        set of data came from, you'll need to extend this class to preserve
+        that information.
+        """
+        if not hookenv.relation_ids(self.name):
+            return
+
+        ns = self.setdefault(self.name, [])
+        for rid in sorted(hookenv.relation_ids(self.name)):
+            for unit in sorted(hookenv.related_units(rid)):
+                reldata = hookenv.relation_get(rid=rid, unit=unit)
+                if self._is_ready(reldata):
+                    ns.append(reldata)
+
+    def provide_data(self):
+        """
+        Return data to be relation_set for this interface.
+        """
+        return {}
+
+
+class MysqlRelation(RelationContext):
+    """
+    Relation context for the `mysql` interface.
+
+    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+    :param list additional_required_keys: Extend the list of :attr:`required_keys`
+    """
+    name = 'db'
+    interface = 'mysql'
+
+    def __init__(self, *args, **kwargs):
+        self.required_keys = ['host', 'user', 'password', 'database']
+        RelationContext.__init__(self, *args, **kwargs)
+
+
+class HttpRelation(RelationContext):
+    """
+    Relation context for the `http` interface.
+
+    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+    :param list additional_required_keys: Extend the list of :attr:`required_keys`
+    """
+    name = 'website'
+    interface = 'http'
+
+    def __init__(self, *args, **kwargs):
+        self.required_keys = ['host', 'port']
+        RelationContext.__init__(self, *args, **kwargs)
+
+    def provide_data(self):
+        return {
+            'host': hookenv.unit_get('private-address'),
+            'port': 80,
+        }
+
+
+class RequiredConfig(dict):
+    """
+    Data context that loads config options with one or more mandatory options.
+
+    Once the required options have been changed from their default values, all
+    config options will be available, namespaced under `config` to prevent
+    potential naming conflicts (for example, between a config option and a
+    relation property).
+
+    :param list *args: List of options that must be changed from their default values.
+    """
+
+    def __init__(self, *args):
+        self.required_options = args
+        self['config'] = hookenv.config()
+        with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
+            self.config = yaml.load(fp).get('options', {})
+
+    def __bool__(self):
+        for option in self.required_options:
+            if option not in self['config']:
+                return False
+            current_value = self['config'][option]
+            default_value = self.config[option].get('default')
+            if current_value == default_value:
+                return False
+            if current_value in (None, '') and default_value in (None, ''):
+                return False
+        return True
+
+    def __nonzero__(self):
+        return self.__bool__()
+
+
+class StoredContext(dict):
+    """
+    A data context that always returns the data that it was first created with.
+
+    This is useful to do a one-time generation of things like passwords, that
+    will thereafter use the same value that was originally generated, instead
+    of generating a new value each time it is run.
+    """
+    def __init__(self, file_name, config_data):
+        """
+        If the file exists, populate `self` with the data from the file.
+        Otherwise, populate with the given data and persist it to the file.
+        """
+        if os.path.exists(file_name):
+            self.update(self.read_context(file_name))
+        else:
+            self.store_context(file_name, config_data)
+            self.update(config_data)
+
+    def store_context(self, file_name, config_data):
+        if not os.path.isabs(file_name):
+            file_name = os.path.join(hookenv.charm_dir(), file_name)
+        with open(file_name, 'w') as file_stream:
+            os.fchmod(file_stream.fileno(), 0o600)
+            yaml.dump(config_data, file_stream)
+
+    def read_context(self, file_name):
+        if not os.path.isabs(file_name):
+            file_name = os.path.join(hookenv.charm_dir(), file_name)
+        with open(file_name, 'r') as file_stream:
+            data = yaml.load(file_stream)
+            if not data:
+                raise OSError("%s is empty" % file_name)
+            return data
+
+
+class TemplateCallback(ManagerCallback):
+    """
+    Callback class that will render a Jinja2 template, for use as a ready
+    action.
+
+    :param str source: The template source file, relative to
+        `$CHARM_DIR/templates`
+
+    :param str target: The target to write the rendered template to (or None)
+    :param str owner: The owner of the rendered file
+    :param str group: The group of the rendered file
+    :param int perms: The permissions of the rendered file
+    :param partial on_change_action: functools partial to be executed when
+                                     rendered file changes
+    :param jinja2 loader template_loader: A jinja2 template loader
+
+    :return str: The rendered template
+    """
+    def __init__(self, source, target,
+                 owner='root', group='root', perms=0o444,
+                 on_change_action=None, template_loader=None):
+        self.source = source
+        self.target = target
+        self.owner = owner
+        self.group = group
+        self.perms = perms
+        self.on_change_action = on_change_action
+        self.template_loader = template_loader
+
+    def __call__(self, manager, service_name, event_name):
+        pre_checksum = ''
+        if self.on_change_action and os.path.isfile(self.target):
+            pre_checksum = host.file_hash(self.target)
+        service = manager.get_service(service_name)
+        context = {'ctx': {}}
+        for ctx in service.get('required_data', []):
+            context.update(ctx)
+            context['ctx'].update(ctx)
+
+        result = templating.render(self.source, self.target, context,
+                                   self.owner, self.group, self.perms,
+                                   template_loader=self.template_loader)
+        if self.on_change_action:
+            if pre_checksum == host.file_hash(self.target):
+                hookenv.log(
+                    'No change detected: {}'.format(self.target),
+                    hookenv.DEBUG)
+            else:
+                self.on_change_action()
+
+        return result
+
+
+# Convenience aliases for templates
+render_template = template = TemplateCallback
diff --git a/charmhelpers/core/strutils.py b/charmhelpers/core/strutils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8df0452f8203b53947eb137eed22d85ff62dff0
--- /dev/null
+++ b/charmhelpers/core/strutils.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import six
+import re
+
+
+def bool_from_string(value):
+    """Interpret string value as boolean.
+
+    Returns True if value translates to True otherwise False.
+    """
+    if isinstance(value, six.string_types):
+        value = six.text_type(value)
+    else:
+        msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+        raise ValueError(msg)
+
+    value = value.strip().lower()
+
+    if value in ['y', 'yes', 'true', 't', 'on']:
+        return True
+    elif value in ['n', 'no', 'false', 'f', 'off']:
+        return False
+
+    msg = "Unable to interpret string value '%s' as boolean" % (value)
+    raise ValueError(msg)
+
+
+def bytes_from_string(value):
+    """Interpret human readable string value as bytes.
+
+    Returns int
+    """
+    BYTE_POWER = {
+        'K': 1,
+        'KB': 1,
+        'M': 2,
+        'MB': 2,
+        'G': 3,
+        'GB': 3,
+        'T': 4,
+        'TB': 4,
+        'P': 5,
+        'PB': 5,
+    }
+    if isinstance(value, six.string_types):
+        value = six.text_type(value)
+    else:
+        msg = "Unable to interpret non-string value '%s' as bytes" % (value)
+        raise ValueError(msg)
+    matches = re.match("([0-9]+)([a-zA-Z]+)", value)
+    if matches:
+        size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+    else:
+        # Assume that value passed in is bytes
+        try:
+            size = int(value)
+        except ValueError:
+            msg = "Unable to interpret string value '%s' as bytes" % (value)
+            raise ValueError(msg)
+    return size
+
+
+class BasicStringComparator(object):
+    """Provides a class that will compare strings from an iterator type object.
+    Used to provide > and < comparisons on strings that may not necessarily be
+    alphanumerically ordered.  e.g. OpenStack or Ubuntu releases AFTER the
+    z-wrap.
+    """
+
+    _list = None
+
+    def __init__(self, item):
+        if self._list is None:
+            raise Exception("Must define the _list in the class definition!")
+        try:
+            self.index = self._list.index(item)
+        except Exception:
+            raise KeyError("Item '{}' is not in list '{}'"
+                           .format(item, self._list))
+
+    def __eq__(self, other):
+        assert isinstance(other, str) or isinstance(other, self.__class__)
+        return self.index == self._list.index(other)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __lt__(self, other):
+        assert isinstance(other, str) or isinstance(other, self.__class__)
+        return self.index < self._list.index(other)
+
+    def __ge__(self, other):
+        return not self.__lt__(other)
+
+    def __gt__(self, other):
+        assert isinstance(other, str) or isinstance(other, self.__class__)
+        return self.index > self._list.index(other)
+
+    def __le__(self, other):
+        return not self.__gt__(other)
+
+    def __str__(self):
+        """Always give back the item at the index so it can be used in
+        comparisons like:
+
+        s_mitaka = CompareOpenStack('mitaka')
+        s_newton = CompareOpenstack('newton')
+
+        assert s_newton > s_mitaka
+
+        @returns: <string>
+        """
+        return self._list[self.index]
diff --git a/charmhelpers/core/sysctl.py b/charmhelpers/core/sysctl.py
new file mode 100644
index 0000000000000000000000000000000000000000..386428d619bc38edf02dc088bf7ec32767c0ab94
--- /dev/null
+++ b/charmhelpers/core/sysctl.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import yaml
+
+from subprocess import check_call, CalledProcessError
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+    ERROR,
+    WARNING,
+)
+
+from charmhelpers.core.host import is_container
+
+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
+
+
+def create(sysctl_dict, sysctl_file, ignore=False):
+    """Creates a sysctl.conf file from a YAML associative array
+
+    :param sysctl_dict: a dict or YAML-formatted string of sysctl
+                        options eg "{ 'kernel.max_pid': 1337 }"
+    :type sysctl_dict: str
+    :param sysctl_file: path to the sysctl file to be saved
+    :type sysctl_file: str or unicode
+    :param ignore: If True, ignore "unknown variable" errors.
+    :type ignore: bool
+    :returns: None
+    """
+    if type(sysctl_dict) is not dict:
+        try:
+            sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
+        except yaml.YAMLError:
+            log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
+                level=ERROR)
+            return
+    else:
+        sysctl_dict_parsed = sysctl_dict
+
+    with open(sysctl_file, "w") as fd:
+        for key, value in sysctl_dict_parsed.items():
+            fd.write("{}={}\n".format(key, value))
+
+    log("Updating sysctl_file: {} values: {}".format(sysctl_file,
+                                                     sysctl_dict_parsed),
+        level=DEBUG)
+
+    call = ["sysctl", "-p", sysctl_file]
+    if ignore:
+        call.append("-e")
+
+    try:
+        check_call(call)
+    except CalledProcessError as e:
+        if is_container():
+            log("Error setting some sysctl keys in this container: {}".format(e.output),
+                level=WARNING)
+        else:
+            raise e
diff --git a/charmhelpers/core/templating.py b/charmhelpers/core/templating.py
new file mode 100644
index 0000000000000000000000000000000000000000..9014015c14ee0b48c775562cd4f0d30884944439
--- /dev/null
+++ b/charmhelpers/core/templating.py
@@ -0,0 +1,93 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+def render(source, target, context, owner='root', group='root',
+           perms=0o444, templates_dir=None, encoding='UTF-8',
+           template_loader=None, config_template=None):
+    """
+    Render a template.
+
+    The `source` path, if not absolute, is relative to the `templates_dir`.
+
+    The `target` path should be absolute.  It can also be `None`, in which
+    case no file will be written.
+
+    The context should be a dict containing the values to be replaced in the
+    template.
+
+    config_template may be provided to render from a provided template instead
+    of loading from a file.
+
+    The `owner`, `group`, and `perms` options will be passed to `write_file`.
+
+    If omitted, `templates_dir` defaults to the `templates` folder in the charm.
+
+    The rendered template will be written to the file as well as being returned
+    as a string.
+
+    Note: Using this requires python-jinja2 or python3-jinja2; if it is not
+    installed, calling this will attempt to use charmhelpers.fetch.apt_install
+    to install it.
+    """
+    try:
+        from jinja2 import FileSystemLoader, Environment, exceptions
+    except ImportError:
+        try:
+            from charmhelpers.fetch import apt_install
+        except ImportError:
+            hookenv.log('Could not import jinja2, and could not import '
+                        'charmhelpers.fetch to install it',
+                        level=hookenv.ERROR)
+            raise
+        if sys.version_info.major == 2:
+            apt_install('python-jinja2', fatal=True)
+        else:
+            apt_install('python3-jinja2', fatal=True)
+        from jinja2 import FileSystemLoader, Environment, exceptions
+
+    if template_loader:
+        template_env = Environment(loader=template_loader)
+    else:
+        if templates_dir is None:
+            templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
+        template_env = Environment(loader=FileSystemLoader(templates_dir))
+
+    # load from a string if provided explicitly
+    if config_template is not None:
+        template = template_env.from_string(config_template)
+    else:
+        try:
+            source = source
+            template = template_env.get_template(source)
+        except exceptions.TemplateNotFound as e:
+            hookenv.log('Could not load template %s from %s.' %
+                        (source, templates_dir),
+                        level=hookenv.ERROR)
+            raise e
+    content = template.render(context)
+    if target is not None:
+        target_dir = os.path.dirname(target)
+        if not os.path.exists(target_dir):
+            # This is a terrible default directory permission, as the file
+            # or its siblings will often contain secrets.
+            host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
+        host.write_file(target, content.encode(encoding), owner, group, perms)
+    return content
diff --git a/charmhelpers/core/unitdata.py b/charmhelpers/core/unitdata.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab554327b343f896880523fc627c1abea84be29a
--- /dev/null
+++ b/charmhelpers/core/unitdata.py
@@ -0,0 +1,525 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Authors:
+#  Kapil Thangavelu <kapil.foss@gmail.com>
+#
+"""
+Intro
+-----
+
+A simple way to store state in units. This provides a key value
+storage with support for versioned, transactional operation,
+and can calculate deltas from previous values to simplify unit logic
+when processing changes.
+
+
+Hook Integration
+----------------
+
+There are several extant frameworks for hook execution, including
+
+ - charmhelpers.core.hookenv.Hooks
+ - charmhelpers.core.services.ServiceManager
+
+The storage classes are framework agnostic, one simple integration is
+via the HookData contextmanager. It will record the current hook
+execution environment (including relation data, config data, etc.),
+setup a transaction and allow easy access to the changes from
+previously seen values. One consequence of the integration is the
+reservation of particular keys ('rels', 'unit', 'env', 'config',
+'charm_revisions') for their respective values.
+
+Here's a fully worked integration example using hookenv.Hooks::
+
+       from charmhelper.core import hookenv, unitdata
+
+       hook_data = unitdata.HookData()
+       db = unitdata.kv()
+       hooks = hookenv.Hooks()
+
+       @hooks.hook
+       def config_changed():
+           # Print all changes to configuration from previously seen
+           # values.
+           for changed, (prev, cur) in hook_data.conf.items():
+               print('config changed', changed,
+                     'previous value', prev,
+                     'current value',  cur)
+
+           # Get some unit specific bookeeping
+           if not db.get('pkg_key'):
+               key = urllib.urlopen('https://example.com/pkg_key').read()
+               db.set('pkg_key', key)
+
+           # Directly access all charm config as a mapping.
+           conf = db.getrange('config', True)
+
+           # Directly access all relation data as a mapping
+           rels = db.getrange('rels', True)
+
+       if __name__ == '__main__':
+           with hook_data():
+               hook.execute()
+
+
+A more basic integration is via the hook_scope context manager which simply
+manages transaction scope (and records hook name, and timestamp)::
+
+  >>> from unitdata import kv
+  >>> db = kv()
+  >>> with db.hook_scope('install'):
+  ...    # do work, in transactional scope.
+  ...    db.set('x', 1)
+  >>> db.get('x')
+  1
+
+
+Usage
+-----
+
+Values are automatically json de/serialized to preserve basic typing
+and complex data struct capabilities (dicts, lists, ints, booleans, etc).
+
+Individual values can be manipulated via get/set::
+
+   >>> kv.set('y', True)
+   >>> kv.get('y')
+   True
+
+   # We can set complex values (dicts, lists) as a single key.
+   >>> kv.set('config', {'a': 1, 'b': True'})
+
+   # Also supports returning dictionaries as a record which
+   # provides attribute access.
+   >>> config = kv.get('config', record=True)
+   >>> config.b
+   True
+
+
+Groups of keys can be manipulated with update/getrange::
+
+   >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
+   >>> kv.getrange('gui.', strip=True)
+   {'z': 1, 'y': 2}
+
+When updating values, its very helpful to understand which values
+have actually changed and how have they changed. The storage
+provides a delta method to provide for this::
+
+   >>> data = {'debug': True, 'option': 2}
+   >>> delta = kv.delta(data, 'config.')
+   >>> delta.debug.previous
+   None
+   >>> delta.debug.current
+   True
+   >>> delta
+   {'debug': (None, True), 'option': (None, 2)}
+
+Note the delta method does not persist the actual change, it needs to
+be explicitly saved via 'update' method::
+
+   >>> kv.update(data, 'config.')
+
+Values modified in the context of a hook scope retain historical values
+associated to the hookname.
+
+   >>> with db.hook_scope('config-changed'):
+   ...      db.set('x', 42)
+   >>> db.gethistory('x')
+   [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
+    (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
+
+"""
+
+import collections
+import contextlib
+import datetime
+import itertools
+import json
+import os
+import pprint
+import sqlite3
+import sys
+
+__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
+
+
+class Storage(object):
+    """Simple key value database for local unit state within charms.
+
+    Modifications are not persisted unless :meth:`flush` is called.
+
+    To support dicts, lists, integer, floats, and booleans values
+    are automatically json encoded/decoded.
+
+    Note: to facilitate unit testing, ':memory:' can be passed as the
+    path parameter which causes sqlite3 to only build the db in memory.
+    This should only be used for testing purposes.
+    """
+    def __init__(self, path=None):
+        self.db_path = path
+        if path is None:
+            if 'UNIT_STATE_DB' in os.environ:
+                self.db_path = os.environ['UNIT_STATE_DB']
+            else:
+                self.db_path = os.path.join(
+                    os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+        if self.db_path != ':memory:':
+            with open(self.db_path, 'a') as f:
+                os.fchmod(f.fileno(), 0o600)
+        self.conn = sqlite3.connect('%s' % self.db_path)
+        self.cursor = self.conn.cursor()
+        self.revision = None
+        self._closed = False
+        self._init()
+
+    def close(self):
+        if self._closed:
+            return
+        self.flush(False)
+        self.cursor.close()
+        self.conn.close()
+        self._closed = True
+
+    def get(self, key, default=None, record=False):
+        self.cursor.execute('select data from kv where key=?', [key])
+        result = self.cursor.fetchone()
+        if not result:
+            return default
+        if record:
+            return Record(json.loads(result[0]))
+        return json.loads(result[0])
+
+    def getrange(self, key_prefix, strip=False):
+        """
+        Get a range of keys starting with a common prefix as a mapping of
+        keys to values.
+
+        :param str key_prefix: Common prefix among all keys
+        :param bool strip: Optionally strip the common prefix from the key
+            names in the returned dict
+        :return dict: A (possibly empty) dict of key-value mappings
+        """
+        self.cursor.execute("select key, data from kv where key like ?",
+                            ['%s%%' % key_prefix])
+        result = self.cursor.fetchall()
+
+        if not result:
+            return {}
+        if not strip:
+            key_prefix = ''
+        return dict([
+            (k[len(key_prefix):], json.loads(v)) for k, v in result])
+
+    def update(self, mapping, prefix=""):
+        """
+        Set the values of multiple keys at once.
+
+        :param dict mapping: Mapping of keys to values
+        :param str prefix: Optional prefix to apply to all keys in `mapping`
+            before setting
+        """
+        for k, v in mapping.items():
+            self.set("%s%s" % (prefix, k), v)
+
+    def unset(self, key):
+        """
+        Remove a key from the database entirely.
+        """
+        self.cursor.execute('delete from kv where key=?', [key])
+        if self.revision and self.cursor.rowcount:
+            self.cursor.execute(
+                'insert into kv_revisions values (?, ?, ?)',
+                [key, self.revision, json.dumps('DELETED')])
+
+    def unsetrange(self, keys=None, prefix=""):
+        """
+        Remove a range of keys starting with a common prefix, from the database
+        entirely.
+
+        :param list keys: List of keys to remove.
+        :param str prefix: Optional prefix to apply to all keys in ``keys``
+            before removing.
+        """
+        if keys is not None:
+            keys = ['%s%s' % (prefix, key) for key in keys]
+            self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
+            if self.revision and self.cursor.rowcount:
+                self.cursor.execute(
+                    'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
+                    list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
+        else:
+            self.cursor.execute('delete from kv where key like ?',
+                                ['%s%%' % prefix])
+            if self.revision and self.cursor.rowcount:
+                self.cursor.execute(
+                    'insert into kv_revisions values (?, ?, ?)',
+                    ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
+
+    def set(self, key, value):
+        """
+        Set a value in the database.
+
+        :param str key: Key to set the value for
+        :param value: Any JSON-serializable value to be set
+        """
+        serialized = json.dumps(value)
+
+        self.cursor.execute('select data from kv where key=?', [key])
+        exists = self.cursor.fetchone()
+
+        # Skip mutations to the same value
+        if exists:
+            if exists[0] == serialized:
+                return value
+
+        if not exists:
+            self.cursor.execute(
+                'insert into kv (key, data) values (?, ?)',
+                (key, serialized))
+        else:
+            self.cursor.execute('''
+            update kv
+            set data = ?
+            where key = ?''', [serialized, key])
+
+        # Save
+        if not self.revision:
+            return value
+
+        self.cursor.execute(
+            'select 1 from kv_revisions where key=? and revision=?',
+            [key, self.revision])
+        exists = self.cursor.fetchone()
+
+        if not exists:
+            self.cursor.execute(
+                '''insert into kv_revisions (
+                revision, key, data) values (?, ?, ?)''',
+                (self.revision, key, serialized))
+        else:
+            self.cursor.execute(
+                '''
+                update kv_revisions
+                set data = ?
+                where key = ?
+                and   revision = ?''',
+                [serialized, key, self.revision])
+
+        return value
+
+    def delta(self, mapping, prefix):
+        """
+        return a delta containing values that have changed.
+        """
+        previous = self.getrange(prefix, strip=True)
+        if not previous:
+            pk = set()
+        else:
+            pk = set(previous.keys())
+        ck = set(mapping.keys())
+        delta = DeltaSet()
+
+        # added
+        for k in ck.difference(pk):
+            delta[k] = Delta(None, mapping[k])
+
+        # removed
+        for k in pk.difference(ck):
+            delta[k] = Delta(previous[k], None)
+
+        # changed
+        for k in pk.intersection(ck):
+            c = mapping[k]
+            p = previous[k]
+            if c != p:
+                delta[k] = Delta(p, c)
+
+        return delta
+
+    @contextlib.contextmanager
+    def hook_scope(self, name=""):
+        """Scope all future interactions to the current hook execution
+        revision."""
+        assert not self.revision
+        self.cursor.execute(
+            'insert into hooks (hook, date) values (?, ?)',
+            (name or sys.argv[0],
+             datetime.datetime.utcnow().isoformat()))
+        self.revision = self.cursor.lastrowid
+        try:
+            yield self.revision
+            self.revision = None
+        except Exception:
+            self.flush(False)
+            self.revision = None
+            raise
+        else:
+            self.flush()
+
+    def flush(self, save=True):
+        if save:
+            self.conn.commit()
+        elif self._closed:
+            return
+        else:
+            self.conn.rollback()
+
+    def _init(self):
+        self.cursor.execute('''
+            create table if not exists kv (
+               key text,
+               data text,
+               primary key (key)
+               )''')
+        self.cursor.execute('''
+            create table if not exists kv_revisions (
+               key text,
+               revision integer,
+               data text,
+               primary key (key, revision)
+               )''')
+        self.cursor.execute('''
+            create table if not exists hooks (
+               version integer primary key autoincrement,
+               hook text,
+               date text
+               )''')
+        self.conn.commit()
+
+    def gethistory(self, key, deserialize=False):
+        self.cursor.execute(
+            '''
+            select kv.revision, kv.key, kv.data, h.hook, h.date
+            from kv_revisions kv,
+                 hooks h
+            where kv.key=?
+             and kv.revision = h.version
+            ''', [key])
+        if deserialize is False:
+            return self.cursor.fetchall()
+        return map(_parse_history, self.cursor.fetchall())
+
+    def debug(self, fh=sys.stderr):
+        self.cursor.execute('select * from kv')
+        pprint.pprint(self.cursor.fetchall(), stream=fh)
+        self.cursor.execute('select * from kv_revisions')
+        pprint.pprint(self.cursor.fetchall(), stream=fh)
+
+
+def _parse_history(d):
+    return (d[0], d[1], json.loads(d[2]), d[3],
+            datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
+
+
+class HookData(object):
+    """Simple integration for existing hook exec frameworks.
+
+    Records all unit information, and stores deltas for processing
+    by the hook.
+
+    Sample::
+
+       from charmhelper.core import hookenv, unitdata
+
+       changes = unitdata.HookData()
+       db = unitdata.kv()
+       hooks = hookenv.Hooks()
+
+       @hooks.hook
+       def config_changed():
+           # View all changes to configuration
+           for changed, (prev, cur) in changes.conf.items():
+               print('config changed', changed,
+                     'previous value', prev,
+                     'current value',  cur)
+
+           # Get some unit specific bookeeping
+           if not db.get('pkg_key'):
+               key = urllib.urlopen('https://example.com/pkg_key').read()
+               db.set('pkg_key', key)
+
+       if __name__ == '__main__':
+           with changes():
+               hook.execute()
+
+    """
+    def __init__(self):
+        self.kv = kv()
+        self.conf = None
+        self.rels = None
+
+    @contextlib.contextmanager
+    def __call__(self):
+        from charmhelpers.core import hookenv
+        hook_name = hookenv.hook_name()
+
+        with self.kv.hook_scope(hook_name):
+            self._record_charm_version(hookenv.charm_dir())
+            delta_config, delta_relation = self._record_hook(hookenv)
+            yield self.kv, delta_config, delta_relation
+
+    def _record_charm_version(self, charm_dir):
+        # Record revisions.. charm revisions are meaningless
+        # to charm authors as they don't control the revision.
+        # so logic dependnent on revision is not particularly
+        # useful, however it is useful for debugging analysis.
+        charm_rev = open(
+            os.path.join(charm_dir, 'revision')).read().strip()
+        charm_rev = charm_rev or '0'
+        revs = self.kv.get('charm_revisions', [])
+        if charm_rev not in revs:
+            revs.append(charm_rev.strip() or '0')
+            self.kv.set('charm_revisions', revs)
+
+    def _record_hook(self, hookenv):
+        data = hookenv.execution_environment()
+        self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
+        self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
+        self.kv.set('env', dict(data['env']))
+        self.kv.set('unit', data['unit'])
+        self.kv.set('relid', data.get('relid'))
+        return conf_delta, rels_delta
+
+
+class Record(dict):
+
+    __slots__ = ()
+
+    def __getattr__(self, k):
+        if k in self:
+            return self[k]
+        raise AttributeError(k)
+
+
+class DeltaSet(Record):
+
+    __slots__ = ()
+
+
+Delta = collections.namedtuple('Delta', ['previous', 'current'])
+
+
+_KV = None
+
+
+def kv():
+    global _KV
+    if _KV is None:
+        _KV = Storage()
+    return _KV
diff --git a/charmhelpers/fetch/__init__.py b/charmhelpers/fetch/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0cc7fc850a0632568ad78aae9716be718c9ff6b5
--- /dev/null
+++ b/charmhelpers/fetch/__init__.py
@@ -0,0 +1,209 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib
+from charmhelpers.osplatform import get_platform
+from yaml import safe_load
+from charmhelpers.core.hookenv import (
+    config,
+    log,
+)
+
+import six
+if six.PY3:
+    from urllib.parse import urlparse, urlunparse
+else:
+    from urlparse import urlparse, urlunparse
+
+
+# The order of this list is very important. Handlers should be listed in from
+# least- to most-specific URL matching.
+FETCH_HANDLERS = (
+    'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
+    'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
+    'charmhelpers.fetch.giturl.GitUrlFetchHandler',
+)
+
+
+class SourceConfigError(Exception):
+    pass
+
+
+class UnhandledSource(Exception):
+    pass
+
+
+class AptLockError(Exception):
+    pass
+
+
+class GPGKeyError(Exception):
+    """Exception occurs when a GPG key cannot be fetched or used.  The message
+    indicates what the problem is.
+    """
+    pass
+
+
+class BaseFetchHandler(object):
+
+    """Base class for FetchHandler implementations in fetch plugins"""
+
+    def can_handle(self, source):
+        """Returns True if the source can be handled. Otherwise returns
+        a string explaining why it cannot"""
+        return "Wrong source type"
+
+    def install(self, source):
+        """Try to download and unpack the source. Return the path to the
+        unpacked files or raise UnhandledSource."""
+        raise UnhandledSource("Wrong source type {}".format(source))
+
+    def parse_url(self, url):
+        return urlparse(url)
+
+    def base_url(self, url):
+        """Return url without querystring or fragment"""
+        parts = list(self.parse_url(url))
+        parts[4:] = ['' for i in parts[4:]]
+        return urlunparse(parts)
+
+
+__platform__ = get_platform()
+module = "charmhelpers.fetch.%s" % __platform__
+fetch = importlib.import_module(module)
+
+filter_installed_packages = fetch.filter_installed_packages
+filter_missing_packages = fetch.filter_missing_packages
+install = fetch.apt_install
+upgrade = fetch.apt_upgrade
+update = _fetch_update = fetch.apt_update
+purge = fetch.apt_purge
+add_source = fetch.add_source
+
+if __platform__ == "ubuntu":
+    apt_cache = fetch.apt_cache
+    apt_install = fetch.apt_install
+    apt_update = fetch.apt_update
+    apt_upgrade = fetch.apt_upgrade
+    apt_purge = fetch.apt_purge
+    apt_autoremove = fetch.apt_autoremove
+    apt_mark = fetch.apt_mark
+    apt_hold = fetch.apt_hold
+    apt_unhold = fetch.apt_unhold
+    import_key = fetch.import_key
+    get_upstream_version = fetch.get_upstream_version
+    apt_pkg = fetch.ubuntu_apt_pkg
+    get_apt_dpkg_env = fetch.get_apt_dpkg_env
+elif __platform__ == "centos":
+    yum_search = fetch.yum_search
+
+
+def configure_sources(update=False,
+                      sources_var='install_sources',
+                      keys_var='install_keys'):
+    """Configure multiple sources from charm configuration.
+
+    The lists are encoded as yaml fragments in the configuration.
+    The fragment needs to be included as a string. Sources and their
+    corresponding keys are of the types supported by add_source().
+
+    Example config:
+        install_sources: |
+          - "ppa:foo"
+          - "http://example.com/repo precise main"
+        install_keys: |
+          - null
+          - "a1b2c3d4"
+
+    Note that 'null' (a.k.a. None) should not be quoted.
+    """
+    sources = safe_load((config(sources_var) or '').strip()) or []
+    keys = safe_load((config(keys_var) or '').strip()) or None
+
+    if isinstance(sources, six.string_types):
+        sources = [sources]
+
+    if keys is None:
+        for source in sources:
+            add_source(source, None)
+    else:
+        if isinstance(keys, six.string_types):
+            keys = [keys]
+
+        if len(sources) != len(keys):
+            raise SourceConfigError(
+                'Install sources and keys lists are different lengths')
+        for source, key in zip(sources, keys):
+            add_source(source, key)
+    if update:
+        _fetch_update(fatal=True)
+
+
+def install_remote(source, *args, **kwargs):
+    """Install a file tree from a remote source.
+
+    The specified source should be a url of the form:
+        scheme://[host]/path[#[option=value][&...]]
+
+    Schemes supported are based on this modules submodules.
+    Options supported are submodule-specific.
+    Additional arguments are passed through to the submodule.
+
+    For example::
+
+        dest = install_remote('http://example.com/archive.tgz',
+                              checksum='deadbeef',
+                              hash_type='sha1')
+
+    This will download `archive.tgz`, validate it using SHA1 and, if
+    the file is ok, extract it and return the directory in which it
+    was extracted.  If the checksum fails, it will raise
+    :class:`charmhelpers.core.host.ChecksumError`.
+    """
+    # We ONLY check for True here because can_handle may return a string
+    # explaining why it can't handle a given source.
+    handlers = [h for h in plugins() if h.can_handle(source) is True]
+    for handler in handlers:
+        try:
+            return handler.install(source, *args, **kwargs)
+        except UnhandledSource as e:
+            log('Install source attempt unsuccessful: {}'.format(e),
+                level='WARNING')
+    raise UnhandledSource("No handler found for source {}".format(source))
+
+
+def install_from_config(config_var_name):
+    """Install a file from config."""
+    charm_config = config()
+    source = charm_config[config_var_name]
+    return install_remote(source)
+
+
+def plugins(fetch_handlers=None):
+    if not fetch_handlers:
+        fetch_handlers = FETCH_HANDLERS
+    plugin_list = []
+    for handler_name in fetch_handlers:
+        package, classname = handler_name.rsplit('.', 1)
+        try:
+            handler_class = getattr(
+                importlib.import_module(package),
+                classname)
+            plugin_list.append(handler_class())
+        except NotImplementedError:
+            # Skip missing plugins so that they can be ommitted from
+            # installation if desired
+            log("FetchHandler {} not found, skipping plugin".format(
+                handler_name))
+    return plugin_list
diff --git a/charmhelpers/fetch/archiveurl.py b/charmhelpers/fetch/archiveurl.py
new file mode 100644
index 0000000000000000000000000000000000000000..d25587adeff102c3fc9e402f98746fccbd8a3693
--- /dev/null
+++ b/charmhelpers/fetch/archiveurl.py
@@ -0,0 +1,165 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import hashlib
+import re
+
+from charmhelpers.fetch import (
+    BaseFetchHandler,
+    UnhandledSource
+)
+from charmhelpers.payload.archive import (
+    get_archive_handler,
+    extract,
+)
+from charmhelpers.core.host import mkdir, check_hash
+
+import six
+if six.PY3:
+    from urllib.request import (
+        build_opener, install_opener, urlopen, urlretrieve,
+        HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
+    )
+    from urllib.parse import urlparse, urlunparse, parse_qs
+    from urllib.error import URLError
+else:
+    from urllib import urlretrieve
+    from urllib2 import (
+        build_opener, install_opener, urlopen,
+        HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
+        URLError
+    )
+    from urlparse import urlparse, urlunparse, parse_qs
+
+
+def splituser(host):
+    '''urllib.splituser(), but six's support of this seems broken'''
+    _userprog = re.compile('^(.*)@(.*)$')
+    match = _userprog.match(host)
+    if match:
+        return match.group(1, 2)
+    return None, host
+
+
+def splitpasswd(user):
+    '''urllib.splitpasswd(), but six's support of this is missing'''
+    _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
+    match = _passwdprog.match(user)
+    if match:
+        return match.group(1, 2)
+    return user, None
+
+
+class ArchiveUrlFetchHandler(BaseFetchHandler):
+    """
+    Handler to download archive files from arbitrary URLs.
+
+    Can fetch from http, https, ftp, and file URLs.
+
+    Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
+
+    Installs the contents of the archive in $CHARM_DIR/fetched/.
+    """
+    def can_handle(self, source):
+        url_parts = self.parse_url(source)
+        if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
+            # XXX: Why is this returning a boolean and a string? It's
+            # doomed to fail since "bool(can_handle('foo://'))"  will be True.
+            return "Wrong source type"
+        if get_archive_handler(self.base_url(source)):
+            return True
+        return False
+
+    def download(self, source, dest):
+        """
+        Download an archive file.
+
+        :param str source: URL pointing to an archive file.
+        :param str dest: Local path location to download archive file to.
+        """
+        # propagate all exceptions
+        # URLError, OSError, etc
+        proto, netloc, path, params, query, fragment = urlparse(source)
+        if proto in ('http', 'https'):
+            auth, barehost = splituser(netloc)
+            if auth is not None:
+                source = urlunparse((proto, barehost, path, params, query, fragment))
+                username, password = splitpasswd(auth)
+                passman = HTTPPasswordMgrWithDefaultRealm()
+                # Realm is set to None in add_password to force the username and password
+                # to be used whatever the realm
+                passman.add_password(None, source, username, password)
+                authhandler = HTTPBasicAuthHandler(passman)
+                opener = build_opener(authhandler)
+                install_opener(opener)
+        response = urlopen(source)
+        try:
+            with open(dest, 'wb') as dest_file:
+                dest_file.write(response.read())
+        except Exception as e:
+            if os.path.isfile(dest):
+                os.unlink(dest)
+            raise e
+
+    # Mandatory file validation via Sha1 or MD5 hashing.
+    def download_and_validate(self, url, hashsum, validate="sha1"):
+        tempfile, headers = urlretrieve(url)
+        check_hash(tempfile, hashsum, validate)
+        return tempfile
+
+    def install(self, source, dest=None, checksum=None, hash_type='sha1'):
+        """
+        Download and install an archive file, with optional checksum validation.
+
+        The checksum can also be given on the `source` URL's fragment.
+        For example::
+
+            handler.install('http://example.com/file.tgz#sha1=deadbeef')
+
+        :param str source: URL pointing to an archive file.
+        :param str dest: Local destination path to install to. If not given,
+            installs to `$CHARM_DIR/archives/archive_file_name`.
+        :param str checksum: If given, validate the archive file after download.
+        :param str hash_type: Algorithm used to generate `checksum`.
+            Can be any hash alrgorithm supported by :mod:`hashlib`,
+            such as md5, sha1, sha256, sha512, etc.
+
+        """
+        url_parts = self.parse_url(source)
+        dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
+        if not os.path.exists(dest_dir):
+            mkdir(dest_dir, perms=0o755)
+        dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
+        try:
+            self.download(source, dld_file)
+        except URLError as e:
+            raise UnhandledSource(e.reason)
+        except OSError as e:
+            raise UnhandledSource(e.strerror)
+        options = parse_qs(url_parts.fragment)
+        for key, value in options.items():
+            if not six.PY3:
+                algorithms = hashlib.algorithms
+            else:
+                algorithms = hashlib.algorithms_available
+            if key in algorithms:
+                if len(value) != 1:
+                    raise TypeError(
+                        "Expected 1 hash value, not %d" % len(value))
+                expected = value[0]
+                check_hash(dld_file, expected, key)
+        if checksum:
+            check_hash(dld_file, checksum, hash_type)
+        return extract(dld_file, dest)
diff --git a/charmhelpers/fetch/bzrurl.py b/charmhelpers/fetch/bzrurl.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4ab3ff1e6bc7dde24e8ed568a3dc0c6012ddea6
--- /dev/null
+++ b/charmhelpers/fetch/bzrurl.py
@@ -0,0 +1,76 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from subprocess import STDOUT, check_output
+from charmhelpers.fetch import (
+    BaseFetchHandler,
+    UnhandledSource,
+    filter_installed_packages,
+    install,
+)
+from charmhelpers.core.host import mkdir
+
+
+if filter_installed_packages(['bzr']) != []:
+    install(['bzr'])
+    if filter_installed_packages(['bzr']) != []:
+        raise NotImplementedError('Unable to install bzr')
+
+
+class BzrUrlFetchHandler(BaseFetchHandler):
+    """Handler for bazaar branches via generic and lp URLs."""
+
+    def can_handle(self, source):
+        url_parts = self.parse_url(source)
+        if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
+            return False
+        elif not url_parts.scheme:
+            return os.path.exists(os.path.join(source, '.bzr'))
+        else:
+            return True
+
+    def branch(self, source, dest, revno=None):
+        if not self.can_handle(source):
+            raise UnhandledSource("Cannot handle {}".format(source))
+        cmd_opts = []
+        if revno:
+            cmd_opts += ['-r', str(revno)]
+        if os.path.exists(dest):
+            cmd = ['bzr', 'pull']
+            cmd += cmd_opts
+            cmd += ['--overwrite', '-d', dest, source]
+        else:
+            cmd = ['bzr', 'branch']
+            cmd += cmd_opts
+            cmd += [source, dest]
+        check_output(cmd, stderr=STDOUT)
+
+    def install(self, source, dest=None, revno=None):
+        url_parts = self.parse_url(source)
+        branch_name = url_parts.path.strip("/").split("/")[-1]
+        if dest:
+            dest_dir = os.path.join(dest, branch_name)
+        else:
+            dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
+                                    branch_name)
+
+        if dest and not os.path.exists(dest):
+            mkdir(dest, perms=0o755)
+
+        try:
+            self.branch(source, dest_dir, revno)
+        except OSError as e:
+            raise UnhandledSource(e.strerror)
+        return dest_dir
diff --git a/charmhelpers/fetch/centos.py b/charmhelpers/fetch/centos.py
new file mode 100644
index 0000000000000000000000000000000000000000..a91dcff0645ed541a79cd72af3112bdff393719a
--- /dev/null
+++ b/charmhelpers/fetch/centos.py
@@ -0,0 +1,171 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess
+import os
+import time
+import six
+import yum
+
+from tempfile import NamedTemporaryFile
+from charmhelpers.core.hookenv import log
+
+YUM_NO_LOCK = 1  # The return code for "couldn't acquire lock" in YUM.
+YUM_NO_LOCK_RETRY_DELAY = 10  # Wait 10 seconds between apt lock checks.
+YUM_NO_LOCK_RETRY_COUNT = 30  # Retry to acquire the lock X times.
+
+
+def filter_installed_packages(packages):
+    """Return a list of packages that require installation."""
+    yb = yum.YumBase()
+    package_list = yb.doPackageLists()
+    temp_cache = {p.base_package_name: 1 for p in package_list['installed']}
+
+    _pkgs = [p for p in packages if not temp_cache.get(p, False)]
+    return _pkgs
+
+
+def install(packages, options=None, fatal=False):
+    """Install one or more packages."""
+    cmd = ['yum', '--assumeyes']
+    if options is not None:
+        cmd.extend(options)
+    cmd.append('install')
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+    log("Installing {} with options: {}".format(packages,
+                                                options))
+    _run_yum_command(cmd, fatal)
+
+
+def upgrade(options=None, fatal=False, dist=False):
+    """Upgrade all packages."""
+    cmd = ['yum', '--assumeyes']
+    if options is not None:
+        cmd.extend(options)
+    cmd.append('upgrade')
+    log("Upgrading with options: {}".format(options))
+    _run_yum_command(cmd, fatal)
+
+
+def update(fatal=False):
+    """Update local yum cache."""
+    cmd = ['yum', '--assumeyes', 'update']
+    log("Update with fatal: {}".format(fatal))
+    _run_yum_command(cmd, fatal)
+
+
+def purge(packages, fatal=False):
+    """Purge one or more packages."""
+    cmd = ['yum', '--assumeyes', 'remove']
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+    log("Purging {}".format(packages))
+    _run_yum_command(cmd, fatal)
+
+
+def yum_search(packages):
+    """Search for a package."""
+    output = {}
+    cmd = ['yum', 'search']
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+    log("Searching for {}".format(packages))
+    result = subprocess.check_output(cmd)
+    for package in list(packages):
+        output[package] = package in result
+    return output
+
+
+def add_source(source, key=None):
+    """Add a package source to this system.
+
+    @param source: a URL with a rpm package
+
+    @param key: A key to be added to the system's keyring and used
+    to verify the signatures on packages. Ideally, this should be an
+    ASCII format GPG public key including the block headers. A GPG key
+    id may also be used, but be aware that only insecure protocols are
+    available to retrieve the actual public key from a public keyserver
+    placing your Juju environment at risk.
+    """
+    if source is None:
+        log('Source is not present. Skipping')
+        return
+
+    if source.startswith('http'):
+        directory = '/etc/yum.repos.d/'
+        for filename in os.listdir(directory):
+            with open(directory + filename, 'r') as rpm_file:
+                if source in rpm_file.read():
+                    break
+        else:
+            log("Add source: {!r}".format(source))
+            # write in the charms.repo
+            with open(directory + 'Charms.repo', 'a') as rpm_file:
+                rpm_file.write('[%s]\n' % source[7:].replace('/', '_'))
+                rpm_file.write('name=%s\n' % source[7:])
+                rpm_file.write('baseurl=%s\n\n' % source)
+    else:
+        log("Unknown source: {!r}".format(source))
+
+    if key:
+        if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
+            with NamedTemporaryFile('w+') as key_file:
+                key_file.write(key)
+                key_file.flush()
+                key_file.seek(0)
+                subprocess.check_call(['rpm', '--import', key_file.name])
+        else:
+            subprocess.check_call(['rpm', '--import', key])
+
+
+def _run_yum_command(cmd, fatal=False):
+    """Run an YUM command.
+
+    Checks the output and retry if the fatal flag is set to True.
+
+    :param: cmd: str: The yum command to run.
+    :param: fatal: bool: Whether the command's output should be checked and
+        retried.
+    """
+    env = os.environ.copy()
+
+    if fatal:
+        retry_count = 0
+        result = None
+
+        # If the command is considered "fatal", we need to retry if the yum
+        # lock was not acquired.
+
+        while result is None or result == YUM_NO_LOCK:
+            try:
+                result = subprocess.check_call(cmd, env=env)
+            except subprocess.CalledProcessError as e:
+                retry_count = retry_count + 1
+                if retry_count > YUM_NO_LOCK_RETRY_COUNT:
+                    raise
+                result = e.returncode
+                log("Couldn't acquire YUM lock. Will retry in {} seconds."
+                    "".format(YUM_NO_LOCK_RETRY_DELAY))
+                time.sleep(YUM_NO_LOCK_RETRY_DELAY)
+
+    else:
+        subprocess.call(cmd, env=env)
diff --git a/charmhelpers/fetch/giturl.py b/charmhelpers/fetch/giturl.py
new file mode 100644
index 0000000000000000000000000000000000000000..070ca9bb5c1a2fdef39f88606ffcaf39bb049410
--- /dev/null
+++ b/charmhelpers/fetch/giturl.py
@@ -0,0 +1,69 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from subprocess import check_output, CalledProcessError, STDOUT
+from charmhelpers.fetch import (
+    BaseFetchHandler,
+    UnhandledSource,
+    filter_installed_packages,
+    install,
+)
+
+if filter_installed_packages(['git']) != []:
+    install(['git'])
+    if filter_installed_packages(['git']) != []:
+        raise NotImplementedError('Unable to install git')
+
+
+class GitUrlFetchHandler(BaseFetchHandler):
+    """Handler for git branches via generic and github URLs."""
+
+    def can_handle(self, source):
+        url_parts = self.parse_url(source)
+        # TODO (mattyw) no support for ssh git@ yet
+        if url_parts.scheme not in ('http', 'https', 'git', ''):
+            return False
+        elif not url_parts.scheme:
+            return os.path.exists(os.path.join(source, '.git'))
+        else:
+            return True
+
+    def clone(self, source, dest, branch="master", depth=None):
+        if not self.can_handle(source):
+            raise UnhandledSource("Cannot handle {}".format(source))
+
+        if os.path.exists(dest):
+            cmd = ['git', '-C', dest, 'pull', source, branch]
+        else:
+            cmd = ['git', 'clone', source, dest, '--branch', branch]
+            if depth:
+                cmd.extend(['--depth', depth])
+        check_output(cmd, stderr=STDOUT)
+
+    def install(self, source, branch="master", dest=None, depth=None):
+        url_parts = self.parse_url(source)
+        branch_name = url_parts.path.strip("/").split("/")[-1]
+        if dest:
+            dest_dir = os.path.join(dest, branch_name)
+        else:
+            dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
+                                    branch_name)
+        try:
+            self.clone(source, dest_dir, branch, depth)
+        except CalledProcessError as e:
+            raise UnhandledSource(e)
+        except OSError as e:
+            raise UnhandledSource(e.strerror)
+        return dest_dir
diff --git a/charmhelpers/fetch/python/__init__.py b/charmhelpers/fetch/python/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bff99dc93c64f80716e2d5a2b6d0d4e8a2436955
--- /dev/null
+++ b/charmhelpers/fetch/python/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2019 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/charmhelpers/fetch/python/debug.py b/charmhelpers/fetch/python/debug.py
new file mode 100644
index 0000000000000000000000000000000000000000..757135ee4cf3b5ff4c02305126f5ca3940892afc
--- /dev/null
+++ b/charmhelpers/fetch/python/debug.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import atexit
+import sys
+
+from charmhelpers.fetch.python.rpdb import Rpdb
+from charmhelpers.core.hookenv import (
+    open_port,
+    close_port,
+    ERROR,
+    log
+)
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
+
+DEFAULT_ADDR = "0.0.0.0"
+DEFAULT_PORT = 4444
+
+
+def _error(message):
+    log(message, level=ERROR)
+
+
+def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
+    """
+    Set a trace point using the remote debugger
+    """
+    atexit.register(close_port, port)
+    try:
+        log("Starting a remote python debugger session on %s:%s" % (addr,
+                                                                    port))
+        open_port(port)
+        debugger = Rpdb(addr=addr, port=port)
+        debugger.set_trace(sys._getframe().f_back)
+    except Exception:
+        _error("Cannot start a remote debug session on %s:%s" % (addr,
+                                                                 port))
diff --git a/charmhelpers/fetch/python/packages.py b/charmhelpers/fetch/python/packages.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e95028bc540aace84a2ec6c1bcc4de2663e8a87
--- /dev/null
+++ b/charmhelpers/fetch/python/packages.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import six
+import subprocess
+import sys
+
+from charmhelpers.fetch import apt_install, apt_update
+from charmhelpers.core.hookenv import charm_dir, log
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
+
+
+def pip_execute(*args, **kwargs):
+    """Overriden pip_execute() to stop sys.path being changed.
+
+    The act of importing main from the pip module seems to cause add wheels
+    from the /usr/share/python-wheels which are installed by various tools.
+    This function ensures that sys.path remains the same after the call is
+    executed.
+    """
+    try:
+        _path = sys.path
+        try:
+            from pip import main as _pip_execute
+        except ImportError:
+            apt_update()
+            if six.PY2:
+                apt_install('python-pip')
+            else:
+                apt_install('python3-pip')
+            from pip import main as _pip_execute
+        _pip_execute(*args, **kwargs)
+    finally:
+        sys.path = _path
+
+
+def parse_options(given, available):
+    """Given a set of options, check if available"""
+    for key, value in sorted(given.items()):
+        if not value:
+            continue
+        if key in available:
+            yield "--{0}={1}".format(key, value)
+
+
+def pip_install_requirements(requirements, constraints=None, **options):
+    """Install a requirements file.
+
+    :param constraints: Path to pip constraints file.
+    http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
+    """
+    command = ["install"]
+
+    available_options = ('proxy', 'src', 'log', )
+    for option in parse_options(options, available_options):
+        command.append(option)
+
+    command.append("-r {0}".format(requirements))
+    if constraints:
+        command.append("-c {0}".format(constraints))
+        log("Installing from file: {} with constraints {} "
+            "and options: {}".format(requirements, constraints, command))
+    else:
+        log("Installing from file: {} with options: {}".format(requirements,
+                                                               command))
+    pip_execute(command)
+
+
+def pip_install(package, fatal=False, upgrade=False, venv=None,
+                constraints=None, **options):
+    """Install a python package"""
+    if venv:
+        venv_python = os.path.join(venv, 'bin/pip')
+        command = [venv_python, "install"]
+    else:
+        command = ["install"]
+
+    available_options = ('proxy', 'src', 'log', 'index-url', )
+    for option in parse_options(options, available_options):
+        command.append(option)
+
+    if upgrade:
+        command.append('--upgrade')
+
+    if constraints:
+        command.extend(['-c', constraints])
+
+    if isinstance(package, list):
+        command.extend(package)
+    else:
+        command.append(package)
+
+    log("Installing {} package with options: {}".format(package,
+                                                        command))
+    if venv:
+        subprocess.check_call(command)
+    else:
+        pip_execute(command)
+
+
+def pip_uninstall(package, **options):
+    """Uninstall a python package"""
+    command = ["uninstall", "-q", "-y"]
+
+    available_options = ('proxy', 'log', )
+    for option in parse_options(options, available_options):
+        command.append(option)
+
+    if isinstance(package, list):
+        command.extend(package)
+    else:
+        command.append(package)
+
+    log("Uninstalling {} package with options: {}".format(package,
+                                                          command))
+    pip_execute(command)
+
+
+def pip_list():
+    """Returns the list of current python installed packages
+    """
+    return pip_execute(["list"])
+
+
+def pip_create_virtualenv(path=None):
+    """Create an isolated Python environment."""
+    if six.PY2:
+        apt_install('python-virtualenv')
+    else:
+        apt_install('python3-virtualenv')
+
+    if path:
+        venv_path = path
+    else:
+        venv_path = os.path.join(charm_dir(), 'venv')
+
+    if not os.path.exists(venv_path):
+        subprocess.check_call(['virtualenv', venv_path])
diff --git a/charmhelpers/fetch/python/rpdb.py b/charmhelpers/fetch/python/rpdb.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b31610c22fc2d24fe5097016cf45728f87de4ae
--- /dev/null
+++ b/charmhelpers/fetch/python/rpdb.py
@@ -0,0 +1,56 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Remote Python Debugger (pdb wrapper)."""
+
+import pdb
+import socket
+import sys
+
+__author__ = "Bertrand Janin <b@janin.com>"
+__version__ = "0.1.3"
+
+
+class Rpdb(pdb.Pdb):
+
+    def __init__(self, addr="127.0.0.1", port=4444):
+        """Initialize the socket and initialize pdb."""
+
+        # Backup stdin and stdout before replacing them by the socket handle
+        self.old_stdout = sys.stdout
+        self.old_stdin = sys.stdin
+
+        # Open a 'reusable' socket to let the webapp reload on the same port
+        self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
+        self.skt.bind((addr, port))
+        self.skt.listen(1)
+        (clientsocket, address) = self.skt.accept()
+        handle = clientsocket.makefile('rw')
+        pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
+        sys.stdout = sys.stdin = handle
+
+    def shutdown(self):
+        """Revert stdin and stdout, close the socket."""
+        sys.stdout = self.old_stdout
+        sys.stdin = self.old_stdin
+        self.skt.close()
+        self.set_continue()
+
+    def do_continue(self, arg):
+        """Stop all operation on ``continue``."""
+        self.shutdown()
+        return 1
+
+    do_EOF = do_quit = do_exit = do_c = do_cont = do_continue
diff --git a/charmhelpers/fetch/python/version.py b/charmhelpers/fetch/python/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..3eb421036ff737f8ff1684e85ff87703e30fe543
--- /dev/null
+++ b/charmhelpers/fetch/python/version.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
+
+
+def current_version():
+    """Current system python version"""
+    return sys.version_info
+
+
+def current_version_string():
+    """Current system python version as string major.minor.micro"""
+    return "{0}.{1}.{2}".format(sys.version_info.major,
+                                sys.version_info.minor,
+                                sys.version_info.micro)
diff --git a/charmhelpers/fetch/snap.py b/charmhelpers/fetch/snap.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc70aa941bc4f0bb5ff126237db65705b9e4a10a
--- /dev/null
+++ b/charmhelpers/fetch/snap.py
@@ -0,0 +1,150 @@
+# Copyright 2014-2017 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Charm helpers snap for classic charms.
+
+If writing reactive charms, use the snap layer:
+https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html
+"""
+import subprocess
+import os
+from time import sleep
+from charmhelpers.core.hookenv import log
+
+__author__ = 'Joseph Borg <joseph.borg@canonical.com>'
+
+# The return code for "couldn't acquire lock" in Snap
+# (hopefully this will be improved).
+SNAP_NO_LOCK = 1
+SNAP_NO_LOCK_RETRY_DELAY = 10  # Wait X seconds between Snap lock checks.
+SNAP_NO_LOCK_RETRY_COUNT = 30  # Retry to acquire the lock X times.
+SNAP_CHANNELS = [
+    'edge',
+    'beta',
+    'candidate',
+    'stable',
+]
+
+
+class CouldNotAcquireLockException(Exception):
+    pass
+
+
+class InvalidSnapChannel(Exception):
+    pass
+
+
+def _snap_exec(commands):
+    """
+    Execute snap commands.
+
+    :param commands: List commands
+    :return: Integer exit code
+    """
+    assert type(commands) == list
+
+    retry_count = 0
+    return_code = None
+
+    while return_code is None or return_code == SNAP_NO_LOCK:
+        try:
+            return_code = subprocess.check_call(['snap'] + commands,
+                                                env=os.environ)
+        except subprocess.CalledProcessError as e:
+            retry_count += + 1
+            if retry_count > SNAP_NO_LOCK_RETRY_COUNT:
+                raise CouldNotAcquireLockException(
+                    'Could not aquire lock after {} attempts'
+                    .format(SNAP_NO_LOCK_RETRY_COUNT))
+            return_code = e.returncode
+            log('Snap failed to acquire lock, trying again in {} seconds.'
+                .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN')
+            sleep(SNAP_NO_LOCK_RETRY_DELAY)
+
+    return return_code
+
+
+def snap_install(packages, *flags):
+    """
+    Install a snap package.
+
+    :param packages: String or List String package name
+    :param flags: List String flags to pass to install command
+    :return: Integer return code from snap
+    """
+    if type(packages) is not list:
+        packages = [packages]
+
+    flags = list(flags)
+
+    message = 'Installing snap(s) "%s"' % ', '.join(packages)
+    if flags:
+        message += ' with option(s) "%s"' % ', '.join(flags)
+
+    log(message, level='INFO')
+    return _snap_exec(['install'] + flags + packages)
+
+
+def snap_remove(packages, *flags):
+    """
+    Remove a snap package.
+
+    :param packages: String or List String package name
+    :param flags: List String flags to pass to remove command
+    :return: Integer return code from snap
+    """
+    if type(packages) is not list:
+        packages = [packages]
+
+    flags = list(flags)
+
+    message = 'Removing snap(s) "%s"' % ', '.join(packages)
+    if flags:
+        message += ' with options "%s"' % ', '.join(flags)
+
+    log(message, level='INFO')
+    return _snap_exec(['remove'] + flags + packages)
+
+
+def snap_refresh(packages, *flags):
+    """
+    Refresh / Update snap package.
+
+    :param packages: String or List String package name
+    :param flags: List String flags to pass to refresh command
+    :return: Integer return code from snap
+    """
+    if type(packages) is not list:
+        packages = [packages]
+
+    flags = list(flags)
+
+    message = 'Refreshing snap(s) "%s"' % ', '.join(packages)
+    if flags:
+        message += ' with options "%s"' % ', '.join(flags)
+
+    log(message, level='INFO')
+    return _snap_exec(['refresh'] + flags + packages)
+
+
+def valid_snap_channel(channel):
+    """ Validate snap channel exists
+
+    :raises InvalidSnapChannel: When channel does not exist
+    :return: Boolean
+    """
+    if channel.lower() in SNAP_CHANNELS:
+        return True
+    else:
+        raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel))
diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py
new file mode 100644
index 0000000000000000000000000000000000000000..b59530199644ada56321e021a87635a617ac9d8c
--- /dev/null
+++ b/charmhelpers/fetch/ubuntu.py
@@ -0,0 +1,813 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import OrderedDict
+import platform
+import re
+import six
+import subprocess
+import sys
+import time
+
+from charmhelpers.core.host import get_distrib_codename, get_system_env
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+    WARNING,
+    env_proxy_settings,
+)
+from charmhelpers.fetch import SourceConfigError, GPGKeyError
+from charmhelpers.fetch import ubuntu_apt_pkg
+
+PROPOSED_POCKET = (
+    "# Proposed\n"
+    "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe "
+    "multiverse restricted\n")
+PROPOSED_PORTS_POCKET = (
+    "# Proposed\n"
+    "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe "
+    "multiverse restricted\n")
+# Only supports 64bit and ppc64 at the moment.
+ARCH_TO_PROPOSED_POCKET = {
+    'x86_64': PROPOSED_POCKET,
+    'ppc64le': PROPOSED_PORTS_POCKET,
+    'aarch64': PROPOSED_PORTS_POCKET,
+    's390x': PROPOSED_PORTS_POCKET,
+}
+CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
+CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
+CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
+deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
+"""
+CLOUD_ARCHIVE_POCKETS = {
+    # Folsom
+    'folsom': 'precise-updates/folsom',
+    'folsom/updates': 'precise-updates/folsom',
+    'precise-folsom': 'precise-updates/folsom',
+    'precise-folsom/updates': 'precise-updates/folsom',
+    'precise-updates/folsom': 'precise-updates/folsom',
+    'folsom/proposed': 'precise-proposed/folsom',
+    'precise-folsom/proposed': 'precise-proposed/folsom',
+    'precise-proposed/folsom': 'precise-proposed/folsom',
+    # Grizzly
+    'grizzly': 'precise-updates/grizzly',
+    'grizzly/updates': 'precise-updates/grizzly',
+    'precise-grizzly': 'precise-updates/grizzly',
+    'precise-grizzly/updates': 'precise-updates/grizzly',
+    'precise-updates/grizzly': 'precise-updates/grizzly',
+    'grizzly/proposed': 'precise-proposed/grizzly',
+    'precise-grizzly/proposed': 'precise-proposed/grizzly',
+    'precise-proposed/grizzly': 'precise-proposed/grizzly',
+    # Havana
+    'havana': 'precise-updates/havana',
+    'havana/updates': 'precise-updates/havana',
+    'precise-havana': 'precise-updates/havana',
+    'precise-havana/updates': 'precise-updates/havana',
+    'precise-updates/havana': 'precise-updates/havana',
+    'havana/proposed': 'precise-proposed/havana',
+    'precise-havana/proposed': 'precise-proposed/havana',
+    'precise-proposed/havana': 'precise-proposed/havana',
+    # Icehouse
+    'icehouse': 'precise-updates/icehouse',
+    'icehouse/updates': 'precise-updates/icehouse',
+    'precise-icehouse': 'precise-updates/icehouse',
+    'precise-icehouse/updates': 'precise-updates/icehouse',
+    'precise-updates/icehouse': 'precise-updates/icehouse',
+    'icehouse/proposed': 'precise-proposed/icehouse',
+    'precise-icehouse/proposed': 'precise-proposed/icehouse',
+    'precise-proposed/icehouse': 'precise-proposed/icehouse',
+    # Juno
+    'juno': 'trusty-updates/juno',
+    'juno/updates': 'trusty-updates/juno',
+    'trusty-juno': 'trusty-updates/juno',
+    'trusty-juno/updates': 'trusty-updates/juno',
+    'trusty-updates/juno': 'trusty-updates/juno',
+    'juno/proposed': 'trusty-proposed/juno',
+    'trusty-juno/proposed': 'trusty-proposed/juno',
+    'trusty-proposed/juno': 'trusty-proposed/juno',
+    # Kilo
+    'kilo': 'trusty-updates/kilo',
+    'kilo/updates': 'trusty-updates/kilo',
+    'trusty-kilo': 'trusty-updates/kilo',
+    'trusty-kilo/updates': 'trusty-updates/kilo',
+    'trusty-updates/kilo': 'trusty-updates/kilo',
+    'kilo/proposed': 'trusty-proposed/kilo',
+    'trusty-kilo/proposed': 'trusty-proposed/kilo',
+    'trusty-proposed/kilo': 'trusty-proposed/kilo',
+    # Liberty
+    'liberty': 'trusty-updates/liberty',
+    'liberty/updates': 'trusty-updates/liberty',
+    'trusty-liberty': 'trusty-updates/liberty',
+    'trusty-liberty/updates': 'trusty-updates/liberty',
+    'trusty-updates/liberty': 'trusty-updates/liberty',
+    'liberty/proposed': 'trusty-proposed/liberty',
+    'trusty-liberty/proposed': 'trusty-proposed/liberty',
+    'trusty-proposed/liberty': 'trusty-proposed/liberty',
+    # Mitaka
+    'mitaka': 'trusty-updates/mitaka',
+    'mitaka/updates': 'trusty-updates/mitaka',
+    'trusty-mitaka': 'trusty-updates/mitaka',
+    'trusty-mitaka/updates': 'trusty-updates/mitaka',
+    'trusty-updates/mitaka': 'trusty-updates/mitaka',
+    'mitaka/proposed': 'trusty-proposed/mitaka',
+    'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
+    'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
+    # Newton
+    'newton': 'xenial-updates/newton',
+    'newton/updates': 'xenial-updates/newton',
+    'xenial-newton': 'xenial-updates/newton',
+    'xenial-newton/updates': 'xenial-updates/newton',
+    'xenial-updates/newton': 'xenial-updates/newton',
+    'newton/proposed': 'xenial-proposed/newton',
+    'xenial-newton/proposed': 'xenial-proposed/newton',
+    'xenial-proposed/newton': 'xenial-proposed/newton',
+    # Ocata
+    'ocata': 'xenial-updates/ocata',
+    'ocata/updates': 'xenial-updates/ocata',
+    'xenial-ocata': 'xenial-updates/ocata',
+    'xenial-ocata/updates': 'xenial-updates/ocata',
+    'xenial-updates/ocata': 'xenial-updates/ocata',
+    'ocata/proposed': 'xenial-proposed/ocata',
+    'xenial-ocata/proposed': 'xenial-proposed/ocata',
+    'xenial-proposed/ocata': 'xenial-proposed/ocata',
+    # Pike
+    'pike': 'xenial-updates/pike',
+    'xenial-pike': 'xenial-updates/pike',
+    'xenial-pike/updates': 'xenial-updates/pike',
+    'xenial-updates/pike': 'xenial-updates/pike',
+    'pike/proposed': 'xenial-proposed/pike',
+    'xenial-pike/proposed': 'xenial-proposed/pike',
+    'xenial-proposed/pike': 'xenial-proposed/pike',
+    # Queens
+    'queens': 'xenial-updates/queens',
+    'xenial-queens': 'xenial-updates/queens',
+    'xenial-queens/updates': 'xenial-updates/queens',
+    'xenial-updates/queens': 'xenial-updates/queens',
+    'queens/proposed': 'xenial-proposed/queens',
+    'xenial-queens/proposed': 'xenial-proposed/queens',
+    'xenial-proposed/queens': 'xenial-proposed/queens',
+    # Rocky
+    'rocky': 'bionic-updates/rocky',
+    'bionic-rocky': 'bionic-updates/rocky',
+    'bionic-rocky/updates': 'bionic-updates/rocky',
+    'bionic-updates/rocky': 'bionic-updates/rocky',
+    'rocky/proposed': 'bionic-proposed/rocky',
+    'bionic-rocky/proposed': 'bionic-proposed/rocky',
+    'bionic-proposed/rocky': 'bionic-proposed/rocky',
+    # Stein
+    'stein': 'bionic-updates/stein',
+    'bionic-stein': 'bionic-updates/stein',
+    'bionic-stein/updates': 'bionic-updates/stein',
+    'bionic-updates/stein': 'bionic-updates/stein',
+    'stein/proposed': 'bionic-proposed/stein',
+    'bionic-stein/proposed': 'bionic-proposed/stein',
+    'bionic-proposed/stein': 'bionic-proposed/stein',
+    # Train
+    'train': 'bionic-updates/train',
+    'bionic-train': 'bionic-updates/train',
+    'bionic-train/updates': 'bionic-updates/train',
+    'bionic-updates/train': 'bionic-updates/train',
+    'train/proposed': 'bionic-proposed/train',
+    'bionic-train/proposed': 'bionic-proposed/train',
+    'bionic-proposed/train': 'bionic-proposed/train',
+    # Ussuri
+    'ussuri': 'bionic-updates/ussuri',
+    'bionic-ussuri': 'bionic-updates/ussuri',
+    'bionic-ussuri/updates': 'bionic-updates/ussuri',
+    'bionic-updates/ussuri': 'bionic-updates/ussuri',
+    'ussuri/proposed': 'bionic-proposed/ussuri',
+    'bionic-ussuri/proposed': 'bionic-proposed/ussuri',
+    'bionic-proposed/ussuri': 'bionic-proposed/ussuri',
+    # Victoria
+    'victoria': 'focal-updates/victoria',
+    'focal-victoria': 'focal-updates/victoria',
+    'focal-victoria/updates': 'focal-updates/victoria',
+    'focal-updates/victoria': 'focal-updates/victoria',
+    'victoria/proposed': 'focal-proposed/victoria',
+    'focal-victoria/proposed': 'focal-proposed/victoria',
+    'focal-proposed/victoria': 'focal-proposed/victoria',
+}
+
+
+APT_NO_LOCK = 100  # The return code for "couldn't acquire lock" in APT.
+CMD_RETRY_DELAY = 10  # Wait 10 seconds between command retries.
+CMD_RETRY_COUNT = 3  # Retry a failing fatal command X times.
+
+
+def filter_installed_packages(packages):
+    """Return a list of packages that require installation."""
+    cache = apt_cache()
+    _pkgs = []
+    for package in packages:
+        try:
+            p = cache[package]
+            p.current_ver or _pkgs.append(package)
+        except KeyError:
+            log('Package {} has no installation candidate.'.format(package),
+                level='WARNING')
+            _pkgs.append(package)
+    return _pkgs
+
+
+def filter_missing_packages(packages):
+    """Return a list of packages that are installed.
+
+    :param packages: list of packages to evaluate.
+    :returns list: Packages that are installed.
+    """
+    return list(
+        set(packages) -
+        set(filter_installed_packages(packages))
+    )
+
+
+def apt_cache(*_, **__):
+    """Shim returning an object simulating the apt_pkg Cache.
+
+    :param _: Accept arguments for compability, not used.
+    :type _: any
+    :param __: Accept keyword arguments for compability, not used.
+    :type __: any
+    :returns:Object used to interrogate the system apt and dpkg databases.
+    :rtype:ubuntu_apt_pkg.Cache
+    """
+    if 'apt_pkg' in sys.modules:
+        # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module
+        # in conjunction with the apt_cache helper function, they may expect us
+        # to call ``apt_pkg.init()`` for them.
+        #
+        # Detect this situation, log a warning and make the call to
+        # ``apt_pkg.init()`` to avoid the consumer Python interpreter from
+        # crashing with a segmentation fault.
+        log('Support for use of upstream ``apt_pkg`` module in conjunction'
+            'with charm-helpers is deprecated since 2019-06-25', level=WARNING)
+        sys.modules['apt_pkg'].init()
+    return ubuntu_apt_pkg.Cache()
+
+
+def apt_install(packages, options=None, fatal=False):
+    """Install one or more packages.
+
+    :param packages: Package(s) to install
+    :type packages: Option[str, List[str]]
+    :param options: Options to pass on to apt-get
+    :type options: Option[None, List[str]]
+    :param fatal: Whether the command's output should be checked and
+                  retried.
+    :type fatal: bool
+    :raises: subprocess.CalledProcessError
+    """
+    if options is None:
+        options = ['--option=Dpkg::Options::=--force-confold']
+
+    cmd = ['apt-get', '--assume-yes']
+    cmd.extend(options)
+    cmd.append('install')
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+    log("Installing {} with options: {}".format(packages,
+                                                options))
+    _run_apt_command(cmd, fatal)
+
+
+def apt_upgrade(options=None, fatal=False, dist=False):
+    """Upgrade all packages.
+
+    :param options: Options to pass on to apt-get
+    :type options: Option[None, List[str]]
+    :param fatal: Whether the command's output should be checked and
+                  retried.
+    :type fatal: bool
+    :param dist: Whether ``dist-upgrade`` should be used over ``upgrade``
+    :type dist: bool
+    :raises: subprocess.CalledProcessError
+    """
+    if options is None:
+        options = ['--option=Dpkg::Options::=--force-confold']
+
+    cmd = ['apt-get', '--assume-yes']
+    cmd.extend(options)
+    if dist:
+        cmd.append('dist-upgrade')
+    else:
+        cmd.append('upgrade')
+    log("Upgrading with options: {}".format(options))
+    _run_apt_command(cmd, fatal)
+
+
+def apt_update(fatal=False):
+    """Update local apt cache."""
+    cmd = ['apt-get', 'update']
+    _run_apt_command(cmd, fatal)
+
+
+def apt_purge(packages, fatal=False):
+    """Purge one or more packages.
+
+    :param packages: Package(s) to install
+    :type packages: Option[str, List[str]]
+    :param fatal: Whether the command's output should be checked and
+                  retried.
+    :type fatal: bool
+    :raises: subprocess.CalledProcessError
+    """
+    cmd = ['apt-get', '--assume-yes', 'purge']
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+    log("Purging {}".format(packages))
+    _run_apt_command(cmd, fatal)
+
+
+def apt_autoremove(purge=True, fatal=False):
+    """Purge one or more packages.
+    :param purge: Whether the ``--purge`` option should be passed on or not.
+    :type purge: bool
+    :param fatal: Whether the command's output should be checked and
+                  retried.
+    :type fatal: bool
+    :raises: subprocess.CalledProcessError
+    """
+    cmd = ['apt-get', '--assume-yes', 'autoremove']
+    if purge:
+        cmd.append('--purge')
+    _run_apt_command(cmd, fatal)
+
+
+def apt_mark(packages, mark, fatal=False):
+    """Flag one or more packages using apt-mark."""
+    log("Marking {} as {}".format(packages, mark))
+    cmd = ['apt-mark', mark]
+    if isinstance(packages, six.string_types):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+
+    if fatal:
+        subprocess.check_call(cmd, universal_newlines=True)
+    else:
+        subprocess.call(cmd, universal_newlines=True)
+
+
+def apt_hold(packages, fatal=False):
+    return apt_mark(packages, 'hold', fatal=fatal)
+
+
+def apt_unhold(packages, fatal=False):
+    return apt_mark(packages, 'unhold', fatal=fatal)
+
+
+def import_key(key):
+    """Import an ASCII Armor key.
+
+    A Radix64 format keyid is also supported for backwards
+    compatibility. In this case Ubuntu keyserver will be
+    queried for a key via HTTPS by its keyid. This method
+    is less preferrable because https proxy servers may
+    require traffic decryption which is equivalent to a
+    man-in-the-middle attack (a proxy server impersonates
+    keyserver TLS certificates and has to be explicitly
+    trusted by the system).
+
+    :param key: A GPG key in ASCII armor format,
+                  including BEGIN and END markers or a keyid.
+    :type key: (bytes, str)
+    :raises: GPGKeyError if the key could not be imported
+    """
+    key = key.strip()
+    if '-' in key or '\n' in key:
+        # Send everything not obviously a keyid to GPG to import, as
+        # we trust its validation better than our own. eg. handling
+        # comments before the key.
+        log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
+        if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
+                '-----END PGP PUBLIC KEY BLOCK-----' in key):
+            log("Writing provided PGP key in the binary format", level=DEBUG)
+            if six.PY3:
+                key_bytes = key.encode('utf-8')
+            else:
+                key_bytes = key
+            key_name = _get_keyid_by_gpg_key(key_bytes)
+            key_gpg = _dearmor_gpg_key(key_bytes)
+            _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg)
+        else:
+            raise GPGKeyError("ASCII armor markers missing from GPG key")
+    else:
+        log("PGP key found (looks like Radix64 format)", level=WARNING)
+        log("SECURELY importing PGP key from keyserver; "
+            "full key not provided.", level=WARNING)
+        # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL
+        # to retrieve GPG keys. `apt-key adv` command is deprecated as is
+        # apt-key in general as noted in its manpage. See lp:1433761 for more
+        # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop
+        # gpg
+        key_asc = _get_key_by_keyid(key)
+        # write the key in GPG format so that apt-key list shows it
+        key_gpg = _dearmor_gpg_key(key_asc)
+        _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg)
+
+
+def _get_keyid_by_gpg_key(key_material):
+    """Get a GPG key fingerprint by GPG key material.
+    Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded
+    or binary GPG key material. Can be used, for example, to generate file
+    names for keys passed via charm options.
+
+    :param key_material: ASCII armor-encoded or binary GPG key material
+    :type key_material: bytes
+    :raises: GPGKeyError if invalid key material has been provided
+    :returns: A GPG key fingerprint
+    :rtype: str
+    """
+    # Use the same gpg command for both Xenial and Bionic
+    cmd = 'gpg --with-colons --with-fingerprint'
+    ps = subprocess.Popen(cmd.split(),
+                          stdout=subprocess.PIPE,
+                          stderr=subprocess.PIPE,
+                          stdin=subprocess.PIPE)
+    out, err = ps.communicate(input=key_material)
+    if six.PY3:
+        out = out.decode('utf-8')
+        err = err.decode('utf-8')
+    if 'gpg: no valid OpenPGP data found.' in err:
+        raise GPGKeyError('Invalid GPG key material provided')
+    # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
+    return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
+
+
+def _get_key_by_keyid(keyid):
+    """Get a key via HTTPS from the Ubuntu keyserver.
+    Different key ID formats are supported by SKS keyservers (the longer ones
+    are more secure, see "dead beef attack" and https://evil32.com/). Since
+    HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will
+    impersonate keyserver.ubuntu.com and generate a certificate with
+    keyserver.ubuntu.com in the CN field or in SubjAltName fields of a
+    certificate. If such proxy behavior is expected it is necessary to add the
+    CA certificate chain containing the intermediate CA of the SSLBump proxy to
+    every machine that this code runs on via ca-certs cloud-init directive (via
+    cloudinit-userdata model-config) or via other means (such as through a
+    custom charm option). Also note that DNS resolution for the hostname in a
+    URL is done at a proxy server - not at the client side.
+
+    8-digit (32 bit) key ID
+    https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6
+    16-digit (64 bit) key ID
+    https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6
+    40-digit key ID:
+    https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6
+
+    :param keyid: An 8, 16 or 40 hex digit keyid to find a key for
+    :type keyid: (bytes, str)
+    :returns: A key material for the specified GPG key id
+    :rtype: (str, bytes)
+    :raises: subprocess.CalledProcessError
+    """
+    # options=mr - machine-readable output (disables html wrappers)
+    keyserver_url = ('https://keyserver.ubuntu.com'
+                     '/pks/lookup?op=get&options=mr&exact=on&search=0x{}')
+    curl_cmd = ['curl', keyserver_url.format(keyid)]
+    # use proxy server settings in order to retrieve the key
+    return subprocess.check_output(curl_cmd,
+                                   env=env_proxy_settings(['https']))
+
+
+def _dearmor_gpg_key(key_asc):
+    """Converts a GPG key in the ASCII armor format to the binary format.
+
+    :param key_asc: A GPG key in ASCII armor format.
+    :type key_asc: (str, bytes)
+    :returns: A GPG key in binary format
+    :rtype: (str, bytes)
+    :raises: GPGKeyError
+    """
+    ps = subprocess.Popen(['gpg', '--dearmor'],
+                          stdout=subprocess.PIPE,
+                          stderr=subprocess.PIPE,
+                          stdin=subprocess.PIPE)
+    out, err = ps.communicate(input=key_asc)
+    # no need to decode output as it is binary (invalid utf-8), only error
+    if six.PY3:
+        err = err.decode('utf-8')
+    if 'gpg: no valid OpenPGP data found.' in err:
+        raise GPGKeyError('Invalid GPG key material. Check your network setup'
+                          ' (MTU, routing, DNS) and/or proxy server settings'
+                          ' as well as destination keyserver status.')
+    else:
+        return out
+
+
+def _write_apt_gpg_keyfile(key_name, key_material):
+    """Writes GPG key material into a file at a provided path.
+
+    :param key_name: A key name to use for a key file (could be a fingerprint)
+    :type key_name: str
+    :param key_material: A GPG key material (binary)
+    :type key_material: (str, bytes)
+    """
+    with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
+              'wb') as keyf:
+        keyf.write(key_material)
+
+
+def add_source(source, key=None, fail_invalid=False):
+    """Add a package source to this system.
+
+    @param source: a URL or sources.list entry, as supported by
+    add-apt-repository(1). Examples::
+
+        ppa:charmers/example
+        deb https://stub:key@private.example.com/ubuntu trusty main
+
+    In addition:
+        'proposed:' may be used to enable the standard 'proposed'
+        pocket for the release.
+        'cloud:' may be used to activate official cloud archive pockets,
+        such as 'cloud:icehouse'
+        'distro' may be used as a noop
+
+    Full list of source specifications supported by the function are:
+
+    'distro': A NOP; i.e. it has no effect.
+    'proposed': the proposed deb spec [2] is wrtten to
+      /etc/apt/sources.list/proposed
+    'distro-proposed': adds <version>-proposed to the debs [2]
+    'ppa:<ppa-name>': add-apt-repository --yes <ppa_name>
+    'deb <deb-spec>': add-apt-repository --yes deb <deb-spec>
+    'http://....': add-apt-repository --yes http://...
+    'cloud-archive:<spec>': add-apt-repository -yes cloud-archive:<spec>
+    'cloud:<release>[-staging]': specify a Cloud Archive pocket <release> with
+      optional staging version.  If staging is used then the staging PPA [2]
+      with be used.  If staging is NOT used then the cloud archive [3] will be
+      added, and the 'ubuntu-cloud-keyring' package will be added for the
+      current distro.
+
+    Otherwise the source is not recognised and this is logged to the juju log.
+    However, no error is raised, unless sys_error_on_exit is True.
+
+    [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
+        where {} is replaced with the derived pocket name.
+    [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \
+        main universe multiverse restricted
+        where {} is replaced with the lsb_release codename (e.g. xenial)
+    [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu <pocket>
+        to /etc/apt/sources.list.d/cloud-archive-list
+
+    @param key: A key to be added to the system's APT keyring and used
+    to verify the signatures on packages. Ideally, this should be an
+    ASCII format GPG public key including the block headers. A GPG key
+    id may also be used, but be aware that only insecure protocols are
+    available to retrieve the actual public key from a public keyserver
+    placing your Juju environment at risk. ppa and cloud archive keys
+    are securely added automtically, so sould not be provided.
+
+    @param fail_invalid: (boolean) if True, then the function raises a
+    SourceConfigError is there is no matching installation source.
+
+    @raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a
+    valid pocket in CLOUD_ARCHIVE_POCKETS
+    """
+    _mapping = OrderedDict([
+        (r"^distro$", lambda: None),  # This is a NOP
+        (r"^(?:proposed|distro-proposed)$", _add_proposed),
+        (r"^cloud-archive:(.*)$", _add_apt_repository),
+        (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
+        (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
+        (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
+        (r"^cloud:(.*)$", _add_cloud_pocket),
+        (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
+    ])
+    if source is None:
+        source = ''
+    for r, fn in six.iteritems(_mapping):
+        m = re.match(r, source)
+        if m:
+            if key:
+                # Import key before adding the source which depends on it,
+                # as refreshing packages could fail otherwise.
+                try:
+                    import_key(key)
+                except GPGKeyError as e:
+                    raise SourceConfigError(str(e))
+            # call the associated function with the captured groups
+            # raises SourceConfigError on error.
+            fn(*m.groups())
+            break
+    else:
+        # nothing matched.  log an error and maybe sys.exit
+        err = "Unknown source: {!r}".format(source)
+        log(err)
+        if fail_invalid:
+            raise SourceConfigError(err)
+
+
+def _add_proposed():
+    """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
+
+    Uses get_distrib_codename to determine the correct stanza for
+    the deb line.
+
+    For intel architecutres PROPOSED_POCKET is used for the release, but for
+    other architectures PROPOSED_PORTS_POCKET is used for the release.
+    """
+    release = get_distrib_codename()
+    arch = platform.machine()
+    if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
+        raise SourceConfigError("Arch {} not supported for (distro-)proposed"
+                                .format(arch))
+    with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
+        apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release))
+
+
+def _add_apt_repository(spec):
+    """Add the spec using add_apt_repository
+
+    :param spec: the parameter to pass to add_apt_repository
+    :type spec: str
+    """
+    if '{series}' in spec:
+        series = get_distrib_codename()
+        spec = spec.replace('{series}', series)
+    # software-properties package for bionic properly reacts to proxy settings
+    # passed as environment variables (See lp:1433761). This is not the case
+    # LTS and non-LTS releases below bionic.
+    _run_with_retries(['add-apt-repository', '--yes', spec],
+                      cmd_env=env_proxy_settings(['https', 'http']))
+
+
+def _add_cloud_pocket(pocket):
+    """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
+
+    Note that this overwrites the existing file if there is one.
+
+    This function also converts the simple pocket in to the actual pocket using
+    the CLOUD_ARCHIVE_POCKETS mapping.
+
+    :param pocket: string representing the pocket to add a deb spec for.
+    :raises: SourceConfigError if the cloud pocket doesn't exist or the
+        requested release doesn't match the current distro version.
+    """
+    apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
+                fatal=True)
+    if pocket not in CLOUD_ARCHIVE_POCKETS:
+        raise SourceConfigError(
+            'Unsupported cloud: source option %s' %
+            pocket)
+    actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
+    with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
+        apt.write(CLOUD_ARCHIVE.format(actual_pocket))
+
+
+def _add_cloud_staging(cloud_archive_release, openstack_release):
+    """Add the cloud staging repository which is in
+    ppa:ubuntu-cloud-archive/<openstack_release>-staging
+
+    This function checks that the cloud_archive_release matches the current
+    codename for the distro that charm is being installed on.
+
+    :param cloud_archive_release: string, codename for the release.
+    :param openstack_release: String, codename for the openstack release.
+    :raises: SourceConfigError if the cloud_archive_release doesn't match the
+        current version of the os.
+    """
+    _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
+    ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release)
+    cmd = 'add-apt-repository -y {}'.format(ppa)
+    _run_with_retries(cmd.split(' '))
+
+
+def _add_cloud_distro_check(cloud_archive_release, openstack_release):
+    """Add the cloud pocket, but also check the cloud_archive_release against
+    the current distro, and use the openstack_release as the full lookup.
+
+    This just calls _add_cloud_pocket() with the openstack_release as pocket
+    to get the correct cloud-archive.list for dpkg to work with.
+
+    :param cloud_archive_release:String, codename for the distro release.
+    :param openstack_release: String, spec for the release to look up in the
+        CLOUD_ARCHIVE_POCKETS
+    :raises: SourceConfigError if this is the wrong distro, or the pocket spec
+        doesn't exist.
+    """
+    _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
+    _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release))
+
+
+def _verify_is_ubuntu_rel(release, os_release):
+    """Verify that the release is in the same as the current ubuntu release.
+
+    :param release: String, lowercase for the release.
+    :param os_release: String, the os_release being asked for
+    :raises: SourceConfigError if the release is not the same as the ubuntu
+        release.
+    """
+    ubuntu_rel = get_distrib_codename()
+    if release != ubuntu_rel:
+        raise SourceConfigError(
+            'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'
+            'version ({})'.format(release, os_release, ubuntu_rel))
+
+
+def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
+                      retry_message="", cmd_env=None):
+    """Run a command and retry until success or max_retries is reached.
+
+    :param cmd: The apt command to run.
+    :type cmd: str
+    :param max_retries: The number of retries to attempt on a fatal
+                        command. Defaults to CMD_RETRY_COUNT.
+    :type max_retries: int
+    :param retry_exitcodes: Optional additional exit codes to retry.
+                            Defaults to retry on exit code 1.
+    :type retry_exitcodes: tuple
+    :param retry_message: Optional log prefix emitted during retries.
+    :type retry_message: str
+    :param: cmd_env: Environment variables to add to the command run.
+    :type cmd_env: Option[None, Dict[str, str]]
+    """
+    env = get_apt_dpkg_env()
+    if cmd_env:
+        env.update(cmd_env)
+
+    if not retry_message:
+        retry_message = "Failed executing '{}'".format(" ".join(cmd))
+    retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY)
+
+    retry_count = 0
+    result = None
+
+    retry_results = (None,) + retry_exitcodes
+    while result in retry_results:
+        try:
+            result = subprocess.check_call(cmd, env=env)
+        except subprocess.CalledProcessError as e:
+            retry_count = retry_count + 1
+            if retry_count > max_retries:
+                raise
+            result = e.returncode
+            log(retry_message)
+            time.sleep(CMD_RETRY_DELAY)
+
+
+def _run_apt_command(cmd, fatal=False):
+    """Run an apt command with optional retries.
+
+    :param cmd: The apt command to run.
+    :type cmd: str
+    :param fatal: Whether the command's output should be checked and
+                  retried.
+    :type fatal: bool
+    """
+    if fatal:
+        _run_with_retries(
+            cmd, retry_exitcodes=(1, APT_NO_LOCK,),
+            retry_message="Couldn't acquire DPKG lock")
+    else:
+        subprocess.call(cmd, env=get_apt_dpkg_env())
+
+
+def get_upstream_version(package):
+    """Determine upstream version based on installed package
+
+    @returns None (if not installed) or the upstream version
+    """
+    cache = apt_cache()
+    try:
+        pkg = cache[package]
+    except Exception:
+        # the package is unknown to the current apt cache.
+        return None
+
+    if not pkg.current_ver:
+        # package is known, but no version is currently installed.
+        return None
+
+    return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str)
+
+
+def get_apt_dpkg_env():
+    """Get environment suitable for execution of APT and DPKG tools.
+
+    We keep this in a helper function instead of in a global constant to
+    avoid execution on import of the library.
+    :returns: Environment suitable for execution of APT and DPKG tools.
+    :rtype: Dict[str, str]
+    """
+    # The fallback is used in the event of ``/etc/environment`` not containing
+    # avalid PATH variable.
+    return {'DEBIAN_FRONTEND': 'noninteractive',
+            'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')}
diff --git a/charmhelpers/fetch/ubuntu_apt_pkg.py b/charmhelpers/fetch/ubuntu_apt_pkg.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2fbe0e51079b986c96cf71f39e7c70d19c3a50b
--- /dev/null
+++ b/charmhelpers/fetch/ubuntu_apt_pkg.py
@@ -0,0 +1,312 @@
+# Copyright 2019 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Provide a subset of the ``python-apt`` module API.
+
+Data collection is done through subprocess calls to ``apt-cache`` and
+``dpkg-query`` commands.
+
+The main purpose for this module is to avoid dependency on the
+``python-apt`` python module.
+
+The indicated python module is a wrapper around the ``apt`` C++ library
+which is tightly connected to the version of the distribution it was
+shipped on.  It is not developed in a backward/forward compatible manner.
+
+This in turn makes it incredibly hard to distribute as a wheel for a piece
+of python software that supports a span of distro releases [0][1].
+
+Upstream feedback like [2] does not give confidence in this ever changing,
+so with this we get rid of the dependency.
+
+0: https://github.com/juju-solutions/layer-basic/pull/135
+1: https://bugs.launchpad.net/charm-octavia/+bug/1824112
+2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10
+"""
+
+import locale
+import os
+import subprocess
+import sys
+
+
+class _container(dict):
+    """Simple container for attributes."""
+    __getattr__ = dict.__getitem__
+    __setattr__ = dict.__setitem__
+
+
+class Package(_container):
+    """Simple container for package attributes."""
+
+
+class Version(_container):
+    """Simple container for version attributes."""
+
+
+class Cache(object):
+    """Simulation of ``apt_pkg`` Cache object."""
+    def __init__(self, progress=None):
+        pass
+
+    def __contains__(self, package):
+        try:
+            pkg = self.__getitem__(package)
+            return pkg is not None
+        except KeyError:
+            return False
+
+    def __getitem__(self, package):
+        """Get information about a package from apt and dpkg databases.
+
+        :param package: Name of package
+        :type package: str
+        :returns: Package object
+        :rtype: object
+        :raises: KeyError, subprocess.CalledProcessError
+        """
+        apt_result = self._apt_cache_show([package])[package]
+        apt_result['name'] = apt_result.pop('package')
+        pkg = Package(apt_result)
+        dpkg_result = self._dpkg_list([package]).get(package, {})
+        current_ver = None
+        installed_version = dpkg_result.get('version')
+        if installed_version:
+            current_ver = Version({'ver_str': installed_version})
+        pkg.current_ver = current_ver
+        pkg.architecture = dpkg_result.get('architecture')
+        return pkg
+
+    def _dpkg_list(self, packages):
+        """Get data from system dpkg database for package.
+
+        :param packages: Packages to get data from
+        :type packages: List[str]
+        :returns: Structured data about installed packages, keys like
+                  ``dpkg-query --list``
+        :rtype: dict
+        :raises: subprocess.CalledProcessError
+        """
+        pkgs = {}
+        cmd = ['dpkg-query', '--list']
+        cmd.extend(packages)
+        if locale.getlocale() == (None, None):
+            # subprocess calls out to locale.getpreferredencoding(False) to
+            # determine encoding.  Workaround for Trusty where the
+            # environment appears to not be set up correctly.
+            locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
+        try:
+            output = subprocess.check_output(cmd,
+                                             stderr=subprocess.STDOUT,
+                                             universal_newlines=True)
+        except subprocess.CalledProcessError as cp:
+            # ``dpkg-query`` may return error and at the same time have
+            # produced useful output, for example when asked for multiple
+            # packages where some are not installed
+            if cp.returncode != 1:
+                raise
+            output = cp.output
+        headings = []
+        for line in output.splitlines():
+            if line.startswith('||/'):
+                headings = line.split()
+                headings.pop(0)
+                continue
+            elif (line.startswith('|') or line.startswith('+') or
+                  line.startswith('dpkg-query:')):
+                continue
+            else:
+                data = line.split(None, 4)
+                status = data.pop(0)
+                if status not in ('ii', 'hi'):
+                    continue
+                pkg = {}
+                pkg.update({k.lower(): v for k, v in zip(headings, data)})
+                if 'name' in pkg:
+                    pkgs.update({pkg['name']: pkg})
+        return pkgs
+
+    def _apt_cache_show(self, packages):
+        """Get data from system apt cache for package.
+
+        :param packages: Packages to get data from
+        :type packages: List[str]
+        :returns: Structured data about package, keys like
+                  ``apt-cache show``
+        :rtype: dict
+        :raises: subprocess.CalledProcessError
+        """
+        pkgs = {}
+        cmd = ['apt-cache', 'show', '--no-all-versions']
+        cmd.extend(packages)
+        if locale.getlocale() == (None, None):
+            # subprocess calls out to locale.getpreferredencoding(False) to
+            # determine encoding.  Workaround for Trusty where the
+            # environment appears to not be set up correctly.
+            locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
+        try:
+            output = subprocess.check_output(cmd,
+                                             stderr=subprocess.STDOUT,
+                                             universal_newlines=True)
+            previous = None
+            pkg = {}
+            for line in output.splitlines():
+                if not line:
+                    if 'package' in pkg:
+                        pkgs.update({pkg['package']: pkg})
+                        pkg = {}
+                    continue
+                if line.startswith(' '):
+                    if previous and previous in pkg:
+                        pkg[previous] += os.linesep + line.lstrip()
+                    continue
+                if ':' in line:
+                    kv = line.split(':', 1)
+                    key = kv[0].lower()
+                    if key == 'n':
+                        continue
+                    previous = key
+                    pkg.update({key: kv[1].lstrip()})
+        except subprocess.CalledProcessError as cp:
+            # ``apt-cache`` returns 100 if none of the packages asked for
+            # exist in the apt cache.
+            if cp.returncode != 100:
+                raise
+        return pkgs
+
+
+class Config(_container):
+    def __init__(self):
+        super(Config, self).__init__(self._populate())
+
+    def _populate(self):
+        cfgs = {}
+        cmd = ['apt-config', 'dump']
+        output = subprocess.check_output(cmd,
+                                         stderr=subprocess.STDOUT,
+                                         universal_newlines=True)
+        for line in output.splitlines():
+            if not line.startswith("CommandLine"):
+                k, v = line.split(" ", 1)
+                cfgs[k] = v.strip(";").strip("\"")
+
+        return cfgs
+
+
+# Backwards compatibility with old apt_pkg module
+sys.modules[__name__].config = Config()
+
+
+def init():
+    """Compability shim that does nothing."""
+    pass
+
+
+def upstream_version(version):
+    """Extracts upstream version from a version string.
+
+    Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/
+                                apt-pkg/deb/debversion.cc#L259
+
+    :param version: Version string
+    :type version: str
+    :returns: Upstream version
+    :rtype: str
+    """
+    if version:
+        version = version.split(':')[-1]
+        version = version.split('-')[0]
+    return version
+
+
+def version_compare(a, b):
+    """Compare the given versions.
+
+    Call out to ``dpkg`` to make sure the code doing the comparison is
+    compatible with what the ``apt`` library would do.  Mimic the return
+    values.
+
+    Upstream reference:
+    https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html
+            ?highlight=version_compare#apt_pkg.version_compare
+
+    :param a: version string
+    :type a: str
+    :param b: version string
+    :type b: str
+    :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b,
+              <0 if ``a`` is smaller than ``b``
+    :rtype: int
+    :raises: subprocess.CalledProcessError, RuntimeError
+    """
+    for op in ('gt', 1), ('eq', 0), ('lt', -1):
+        try:
+            subprocess.check_call(['dpkg', '--compare-versions',
+                                   a, op[0], b],
+                                  stderr=subprocess.STDOUT,
+                                  universal_newlines=True)
+            return op[1]
+        except subprocess.CalledProcessError as cp:
+            if cp.returncode == 1:
+                continue
+            raise
+    else:
+        raise RuntimeError('Unable to compare "{}" and "{}", according to '
+                           'our logic they are neither greater, equal nor '
+                           'less than each other.')
+
+
+class PkgVersion():
+    """Allow package versions to be compared.
+
+    For example::
+
+        >>> import charmhelpers.fetch as fetch
+        >>> (fetch.apt_pkg.PkgVersion('2:20.4.0') <
+        ...  fetch.apt_pkg.PkgVersion('2:20.5.0'))
+        True
+        >>> pkgs = [fetch.apt_pkg.PkgVersion('2:20.4.0'),
+        ...         fetch.apt_pkg.PkgVersion('2:21.4.0'),
+        ...         fetch.apt_pkg.PkgVersion('2:17.4.0')]
+        >>> pkgs.sort()
+        >>> pkgs
+        [2:17.4.0, 2:20.4.0, 2:21.4.0]
+    """
+
+    def __init__(self, version):
+        self.version = version
+
+    def __lt__(self, other):
+        return version_compare(self.version, other.version) == -1
+
+    def __le__(self, other):
+        return self.__lt__(other) or self.__eq__(other)
+
+    def __gt__(self, other):
+        return version_compare(self.version, other.version) == 1
+
+    def __ge__(self, other):
+        return self.__gt__(other) or self.__eq__(other)
+
+    def __eq__(self, other):
+        return version_compare(self.version, other.version) == 0
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __repr__(self):
+        return self.version
+
+    def __hash__(self):
+        return hash(repr(self))
diff --git a/charmhelpers/osplatform.py b/charmhelpers/osplatform.py
new file mode 100644
index 0000000000000000000000000000000000000000..78c81af5955caee51271c830d58cacac2cab9bcc
--- /dev/null
+++ b/charmhelpers/osplatform.py
@@ -0,0 +1,46 @@
+import platform
+import os
+
+
+def get_platform():
+    """Return the current OS platform.
+
+    For example: if current os platform is Ubuntu then a string "ubuntu"
+    will be returned (which is the name of the module).
+    This string is used to decide which platform module should be imported.
+    """
+    # linux_distribution is deprecated and will be removed in Python 3.7
+    # Warnings *not* disabled, as we certainly need to fix this.
+    if hasattr(platform, 'linux_distribution'):
+        tuple_platform = platform.linux_distribution()
+        current_platform = tuple_platform[0]
+    else:
+        current_platform = _get_platform_from_fs()
+
+    if "Ubuntu" in current_platform:
+        return "ubuntu"
+    elif "CentOS" in current_platform:
+        return "centos"
+    elif "debian" in current_platform:
+        # Stock Python does not detect Ubuntu and instead returns debian.
+        # Or at least it does in some build environments like Travis CI
+        return "ubuntu"
+    elif "elementary" in current_platform:
+        # ElementaryOS fails to run tests locally without this.
+        return "ubuntu"
+    else:
+        raise RuntimeError("This module is not supported on {}."
+                           .format(current_platform))
+
+
+def _get_platform_from_fs():
+    """Get Platform from /etc/os-release."""
+    with open(os.path.join(os.sep, 'etc', 'os-release')) as fin:
+        content = dict(
+            line.split('=', 1)
+            for line in fin.read().splitlines()
+            if '=' in line
+        )
+    for k, v in content.items():
+        content[k] = v.strip('"')
+    return content["NAME"]
diff --git a/charmhelpers/payload/__init__.py b/charmhelpers/payload/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee55cb3d2baddb556df910f1d41638c3c7f39c59
--- /dev/null
+++ b/charmhelpers/payload/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"Tools for working with files injected into a charm just before deployment."
diff --git a/charmhelpers/payload/execd.py b/charmhelpers/payload/execd.py
new file mode 100644
index 0000000000000000000000000000000000000000..1502aa0b596f0b1a2017ccb4543a35999774431d
--- /dev/null
+++ b/charmhelpers/payload/execd.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import subprocess
+from charmhelpers.core import hookenv
+
+
+def default_execd_dir():
+    return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
+
+
+def execd_module_paths(execd_dir=None):
+    """Generate a list of full paths to modules within execd_dir."""
+    if not execd_dir:
+        execd_dir = default_execd_dir()
+
+    if not os.path.exists(execd_dir):
+        return
+
+    for subpath in os.listdir(execd_dir):
+        module = os.path.join(execd_dir, subpath)
+        if os.path.isdir(module):
+            yield module
+
+
+def execd_submodule_paths(command, execd_dir=None):
+    """Generate a list of full paths to the specified command within exec_dir.
+    """
+    for module_path in execd_module_paths(execd_dir):
+        path = os.path.join(module_path, command)
+        if os.access(path, os.X_OK) and os.path.isfile(path):
+            yield path
+
+
+def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT):
+    """Run command for each module within execd_dir which defines it."""
+    for submodule_path in execd_submodule_paths(command, execd_dir):
+        try:
+            subprocess.check_output(submodule_path, stderr=stderr,
+                                    universal_newlines=True)
+        except subprocess.CalledProcessError as e:
+            hookenv.log("Error ({}) running  {}. Output: {}".format(
+                e.returncode, e.cmd, e.output))
+            if die_on_error:
+                sys.exit(e.returncode)
+
+
+def execd_preinstall(execd_dir=None):
+    """Run charm-pre-install for each module within execd_dir."""
+    execd_run('charm-pre-install', execd_dir=execd_dir)
diff --git a/config.yaml b/config.yaml
index d8780fe9afd3decb340bf2998ec16b59053b4904..533dac62f88ac03b83ef421217759f35e005a28c 100644
--- a/config.yaml
+++ b/config.yaml
@@ -59,3 +59,18 @@ options:
       .
       Valid options are "cephx" and "none". If "none" is specified, keys will
       still be created and deployed so that it can be enabled later.
+  user-keys:
+    type: string
+    default: ""
+    description: |
+      A space-separated list of <username>:<cephx-base64-key> pairs used to
+      lookup authentication keys for a specific user instead of trying to
+      create a user and a key via ceph-mon.
+  admin-user:
+    type: string
+    default: "client.admin"
+    description: |
+      A configurable admin user name. Used for scenarios where pools are
+      pre-created and the user given to charm-ceph-proxy simply needs to
+      check the existence of a given pool and error out if one does not
+      exist. Can be used in conjunction with user-keys.
diff --git a/files/nagios/check_ceph_status.py b/files/nagios/check_ceph_status.py
index cb8d1a1a0bbedb4a6a7a6b945b018d09ab268364..c70e64598eb6a189fb2390e5d23a7cec7b8eb54a 100755
--- a/files/nagios/check_ceph_status.py
+++ b/files/nagios/check_ceph_status.py
@@ -15,30 +15,37 @@ def check_ceph_status(args):
         nagios_plugin.check_file_freshness(args.status_file, 3600)
         with open(args.status_file, "r") as f:
             lines = f.readlines()
-            status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1)
+            status_data = dict(
+                line.strip().split(' ', 1) for line in lines if len(line) > 1
+            )
     else:
         lines = subprocess.check_output(["ceph", "status"]).split('\n')
-        status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1)
+        status_data = dict(
+            line.strip().split(' ', 1) for line in lines if len(line) > 1
+        )
 
-    if ('health' not in status_data
-            or 'monmap' not in status_data
-            or 'osdmap'not in status_data):
+    if ('health' not in status_data or
+            'monmap' not in status_data or
+            'osdmap'not in status_data):
         raise nagios_plugin.UnknownError('UNKNOWN: status data is incomplete')
 
     if status_data['health'] != 'HEALTH_OK':
-        msg = 'CRITICAL: ceph health status: "{}"'.format(status_data['health'])
+        msg = 'CRITICAL: ceph health status: "{}"'.format(
+            status_data['health'])
         raise nagios_plugin.CriticalError(msg)
-    osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap'])
+    osds = re.search(r"^.*: (\d+) osds: (\d+) up, (\d+) in",
+                     status_data['osdmap'])
     if osds.group(1) > osds.group(2):  # not all OSDs are "up"
         msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format(
             osds.group(1), osds.group(2))
         raise nagios_plugin.CriticalError(msg)
-    print "All OK"
+    print("All OK")
 
 
 if __name__ == '__main__':
     parser = argparse.ArgumentParser(description='Check ceph status')
     parser.add_argument('-f', '--file', dest='status_file',
-                        default=False, help='Optional file with "ceph status" output')
+                        default=False,
+                        help='Optional file with "ceph status" output')
     args = parser.parse_args()
     nagios_plugin.try_check(check_ceph_status, args)
diff --git a/hooks/ceph.py b/hooks/ceph.py
index 44b28497ddf32576b4ad55662cbb2c23474f856e..96d242bda12e98153563f5b67b03bd43493608cd 100644
--- a/hooks/ceph.py
+++ b/hooks/ceph.py
@@ -11,6 +11,7 @@ import time
 import os
 import re
 import sys
+import collections
 
 from charmhelpers.contrib.storage.linux.utils import (
     is_block_device,
@@ -27,10 +28,12 @@ from charmhelpers.core.host import (
 )
 from charmhelpers.core.hookenv import (
     log,
+    DEBUG,
     ERROR,
     cached,
     status_set,
     WARNING,
+    config,
 )
 from charmhelpers.fetch import (
     apt_cache
@@ -43,7 +46,8 @@ LEADER = 'leader'
 PEON = 'peon'
 QUORUM = [LEADER, PEON]
 
-PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs']
+PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs']
+PACKAGES_FOCAL = ['ceph', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs']
 
 
 def ceph_user():
@@ -85,7 +89,7 @@ def get_version():
     package = "ceph"
     try:
         pkg = cache[package]
-    except:
+    except Exception:
         # the package is unknown to the current apt cache.
         e = 'Could not determine version of package with no installation ' \
             'candidate: %s' % package
@@ -100,7 +104,7 @@ def get_version():
 
     # x.y match only for 20XX.X
     # and ignore patch level for other packages
-    match = re.match('^(\d+)\.(\d+)', vers)
+    match = re.match(r'^(\d+)\.(\d+)', vers)
 
     if match:
         vers = match.group(0)
@@ -126,7 +130,7 @@ def is_quorum():
     ]
     if os.path.exists(asok):
         try:
-            result = json.loads(subprocess.check_output(cmd))
+            result = json.loads(subprocess.check_output(cmd).decode('utf-8'))
         except subprocess.CalledProcessError:
             return False
         except ValueError:
@@ -153,7 +157,7 @@ def is_leader():
     ]
     if os.path.exists(asok):
         try:
-            result = json.loads(subprocess.check_output(cmd))
+            result = json.loads(subprocess.check_output(cmd).decode('utf-8'))
         except subprocess.CalledProcessError:
             return False
         except ValueError:
@@ -199,7 +203,9 @@ DISK_FORMATS = [
 
 def is_osd_disk(dev):
     try:
-        info = subprocess.check_output(['sgdisk', '-i', '1', dev])
+        info = (subprocess
+                .check_output(['sgdisk', '-i', '1', dev])
+                .decode('utf-8'))
         info = info.split("\n")  # IGNORE:E1103
         for line in info:
             if line.startswith(
@@ -264,10 +270,11 @@ def generate_monitor_secret():
         '--name=mon.',
         '--gen-key'
     ]
-    res = subprocess.check_output(cmd)
+    res = subprocess.check_output(cmd).decode('utf-8')
 
     return "{}==".format(res.split('=')[1].strip())
 
+
 # OSD caps taken from ceph-create-keys
 _osd_bootstrap_caps = {
     'mon': [
@@ -305,7 +312,7 @@ def get_osd_bootstrap_key():
         # Attempt to get/create a key using the OSD bootstrap profile first
         key = get_named_key('bootstrap-osd',
                             _osd_bootstrap_caps_profile)
-    except:
+    except Exception:
         # If that fails try with the older style permissions
         key = get_named_key('bootstrap-osd',
                             _osd_bootstrap_caps)
@@ -329,6 +336,7 @@ def import_radosgw_key(key):
         ]
         subprocess.check_call(cmd)
 
+
 # OSD caps taken from ceph-create-keys
 _radosgw_caps = {
     'mon': ['allow rw'],
@@ -339,14 +347,19 @@ _upgrade_caps = {
 }
 
 
-def get_radosgw_key():
-    return get_named_key('radosgw.gateway', _radosgw_caps)
+def get_radosgw_key(name='radosgw.gateway'):
+    return get_named_key(name, _radosgw_caps)
 
 
-_default_caps = {
-    'mon': ['allow rw'],
-    'osd': ['allow rwx']
-}
+def get_mds_key(name='mds'):
+    return get_named_key(name, _radosgw_caps)
+
+
+_default_caps = collections.OrderedDict([
+    ('mon', ['allow r',
+             'allow command "osd blacklist"']),
+    ('osd', ['allow rwx']),
+])
 
 admin_caps = {
     'mds': ['allow'],
@@ -354,6 +367,12 @@ admin_caps = {
     'osd': ['allow *']
 }
 
+mds_caps = collections.OrderedDict([
+    ('osd', ['allow *']),
+    ('mds', ['allow']),
+    ('mon', ['allow rwx']),
+])
+
 osd_upgrade_caps = {
     'mon': ['allow command "config-key"',
             'allow command "osd tree"',
@@ -369,30 +388,84 @@ def get_upgrade_key():
     return get_named_key('upgrade-osd', _upgrade_caps)
 
 
-def get_named_key(name, caps=None):
+def _config_user_key(name):
+    user_keys_list = config('user-keys')
+    if user_keys_list:
+        for ukpair in user_keys_list.split(' '):
+            uk = ukpair.split(':')
+            if len(uk) == 2:
+                user_type, k = uk
+                t, u = user_type.split('.')
+                if u == name:
+                    return k
+
+
+def get_named_key(name, caps=None, pool_list=None):
+    """Retrieve a specific named cephx key.
+
+    :param name: String Name of key to get.
+    :param pool_list: The list of pools to give access to
+    :param caps: dict of cephx capabilities
+    :returns: Returns a cephx key
+    """
+    key_name = 'client.{}'.format(name)
+    try:
+        # Does the key already exist?
+        output = str(subprocess.check_output(
+            [
+                'sudo',
+                '-u', ceph_user(),
+                'ceph',
+                '--name', config('admin-user'),
+                '--keyring',
+                '/var/lib/ceph/mon/ceph-{}/keyring'.format(
+                    get_unit_hostname()
+                ),
+                'auth',
+                'get',
+                key_name,
+            ]).decode('UTF-8')).strip()
+        # NOTE(jamespage);
+        # Apply any changes to key capabilities, dealing with
+        # upgrades which requires new caps for operation.
+        upgrade_key_caps(key_name,
+                         caps or _default_caps,
+                         pool_list)
+        return parse_key(output)
+    except subprocess.CalledProcessError:
+        # Couldn't get the key, time to create it!
+        log("Creating new key for {}".format(name), level=DEBUG)
     caps = caps or _default_caps
     cmd = [
         "sudo",
         "-u",
         ceph_user(),
         'ceph',
-        '--name', 'client.admin',
+        '--name', config('admin-user'),
         '--keyring',
         '/var/lib/ceph/mon/ceph-{}/keyring'.format(
             get_unit_hostname()
         ),
-        'auth', 'get-or-create', 'client.{}'.format(name),
+        'auth', 'get-or-create', key_name,
     ]
     # Add capabilities
-    for subsystem, subcaps in caps.iteritems():
-        cmd.extend([
-            subsystem,
-            '; '.join(subcaps),
-        ])
-    return parse_key(subprocess.check_output(cmd).strip())  # IGNORE:E1103
+    for subsystem, subcaps in caps.items():
+        if subsystem == 'osd':
+            if pool_list:
+                # This will output a string similar to:
+                # "pool=rgw pool=rbd pool=something"
+                pools = " ".join(['pool={0}'.format(i) for i in pool_list])
+                subcaps[0] = subcaps[0] + " " + pools
+        cmd.extend([subsystem, '; '.join(subcaps)])
+
+    log("Calling check_output: {}".format(cmd), level=DEBUG)
+    return parse_key(str(subprocess
+                         .check_output(cmd)
+                         .decode('UTF-8'))
+                     .strip())  # IGNORE:E1103
 
 
-def upgrade_key_caps(key, caps):
+def upgrade_key_caps(key, caps, pool_list=None):
     """ Upgrade key to have capabilities caps """
     if not is_leader():
         # Not the MON leader OR not clustered
@@ -400,7 +473,13 @@ def upgrade_key_caps(key, caps):
     cmd = [
         "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key
     ]
-    for subsystem, subcaps in caps.iteritems():
+    for subsystem, subcaps in caps.items():
+        if subsystem == 'osd':
+            if pool_list:
+                # This will output a string similar to:
+                # "pool=rgw pool=rbd pool=something"
+                pools = " ".join(['pool={0}'.format(i) for i in pool_list])
+                subcaps[0] = subcaps[0] + " " + pools
         cmd.extend([subsystem, '; '.join(subcaps)])
     subprocess.check_call(cmd)
 
@@ -449,7 +528,7 @@ def bootstrap_monitor_cluster(secret):
                 service_restart('ceph-mon')
             else:
                 service_restart('ceph-mon-all')
-        except:
+        except Exception:
             raise
         finally:
             os.unlink(keyring)
diff --git a/hooks/ceph_hooks.py b/hooks/ceph_hooks.py
index 960eeba2f792ce9533f66129252ba647b4a4be9a..5c1435ec05b126b4be77ac909dbfc841e5175525 100755
--- a/hooks/ceph_hooks.py
+++ b/hooks/ceph_hooks.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
 
 #
 # Copyright 2012 Canonical Ltd.
@@ -13,6 +13,20 @@ import os
 import shutil
 import sys
 
+
+_path = os.path.dirname(os.path.realpath(__file__))
+_root = os.path.abspath(os.path.join(_path, '..'))
+_lib = os.path.abspath(os.path.join(_path, '../lib'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+
+_add_path(_root)
+_add_path(_lib)
+
 import ceph
 from charmhelpers.core.hookenv import (
     log,
@@ -28,8 +42,11 @@ from charmhelpers.core.hookenv import (
     service_name,
     status_set,)
 from charmhelpers.core.host import (
+    cmp_pkgrevno,
+    CompareHostReleases,
+    lsb_release,
     mkdir,
-    cmp_pkgrevno,)
+)
 from charmhelpers.fetch import (
     apt_install,
     apt_update,
@@ -38,15 +55,21 @@ from charmhelpers.fetch import (
 )
 from charmhelpers.payload.execd import execd_preinstall
 from charmhelpers.contrib.openstack.alternatives import install_alternative
+from charmhelpers.contrib.openstack.utils import (
+    clear_unit_paused,
+    clear_unit_upgrading,
+    is_unit_upgrading_set,
+    set_unit_paused,
+    set_unit_upgrading,
+)
 
 from charmhelpers.core.templating import render
 
-from ceph_broker import (
+from charms_ceph.broker import (
     process_requests
 )
 
 from utils import get_unit_hostname
-from charmhelpers.contrib.hardening.harden import harden
 
 hooks = Hooks()
 
@@ -59,17 +82,24 @@ def install_upstart_scripts():
 
 
 @hooks.hook('install.real')
-@harden()
 def install():
     execd_preinstall()
+    package_install()
+    install_upstart_scripts()
+
+
+def package_install():
     add_source(config('source'), config('key'))
     apt_update(fatal=True)
-    apt_install(packages=ceph.PACKAGES, fatal=True)
-    install_upstart_scripts()
+    _release = lsb_release()['DISTRIB_CODENAME'].lower()
+    if CompareHostReleases(_release) >= "focal":
+        _packages = ceph.PACKAGES_FOCAL
+    else:
+        _packages = ceph.PACKAGES
+    apt_install(packages=_packages, fatal=True)
 
 
 def emit_cephconf():
-
     cephcontext = {
         'auth_supported': config('auth-supported'),
         'mon_hosts': config('monitor-hosts'),
@@ -86,11 +116,16 @@ def emit_cephconf():
     render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644)
     install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                         charm_ceph_conf, 100)
-    keyring = 'ceph.client.admin.keyring'
+
+    keyring_template = 'ceph.keyring'
+    keyring = 'ceph.{}.keyring'.format(config('admin-user'))
     keyring_path = '/etc/ceph/' + keyring
-    ctx = {'admin_key': config('admin-key')}
+    ctx = {
+        'admin_key': config('admin-key'),
+        'admin_user': config('admin-user'),
+    }
     user = ceph.ceph_user()
-    render(keyring, keyring_path, ctx, owner=user, perms=0o600)
+    render(keyring_template, keyring_path, ctx, owner=user, perms=0o600)
 
     keyring = 'keyring'
     keyring_path = (
@@ -105,8 +140,11 @@ def emit_cephconf():
 
 
 @hooks.hook('config-changed')
-@harden()
 def config_changed():
+    c = config()
+    if c.previous('source') != config('source') or \
+       c.previous('key') != config('key'):
+        package_install()
     emit_cephconf()
 
 
@@ -118,7 +156,11 @@ def notify_radosgws():
 
 def notify_client():
     for relid in relation_ids('client'):
-        client_relation_joined(relid)
+        for unit in related_units(relid):
+            client_relation_joined(relid=relid, unit=unit)
+    for relid in relation_ids('mds'):
+        for unit in related_units(relid):
+            mds_relation_joined(relid=relid, unit=unit)
 
 
 @hooks.hook('radosgw-relation-changed')
@@ -137,12 +179,20 @@ def radosgw_relation(relid=None, unit=None):
         ceph_addrs = config('monitor-hosts')
         data = {
             'fsid': config('fsid'),
-            'radosgw_key': ceph.get_radosgw_key(),
             'auth': config('auth-supported'),
             'ceph-public-address': ceph_addrs,
         }
+        key_name = relation_get('key_name', unit=unit, rid=relid)
+        if key_name:
+            # New style, per unit keys
+            data['{}_key'.format(key_name)] = (
+                ceph.get_radosgw_key(name=key_name)
+            )
+        else:
+            # Old style global radosgw key
+            data['radosgw_key'] = ceph.get_radosgw_key()
 
-        settings = relation_get(rid=relid, unit=unit)
+        settings = relation_get(rid=relid, unit=unit) or {}
         """Process broker request(s)."""
         if 'broker_req' in settings:
             rsp = process_requests(settings['broker_req'])
@@ -156,8 +206,39 @@ def radosgw_relation(relid=None, unit=None):
         log('FSID or admin key not provided, please configure them')
 
 
+@hooks.hook('mds-relation-joined')
+@hooks.hook('mds-relation-changed')
+def mds_relation_joined(relid=None, unit=None):
+    if ready():
+        log('ceph-proxy config ok - providing mds client with keys')
+        mds_name = relation_get(attribute='mds-name',
+                                rid=relid, unit=unit)
+        if not unit:
+            unit = remote_unit()
+
+        ceph_addrs = config('monitor-hosts')
+        data = {
+            'fsid': config('fsid'),
+            '{}_mds_key'.format(mds_name):
+                ceph.get_mds_key(name=mds_name),
+            'auth': config('auth-supported'),
+            'ceph-public-address': ceph_addrs,
+        }
+
+        settings = relation_get(rid=relid, unit=unit) or {}
+        if 'broker_req' in settings:
+            rsp = process_requests(settings['broker_req'])
+            unit_id = unit.replace('/', '-')
+            unit_response_key = 'broker-rsp-' + unit_id
+            data[unit_response_key] = rsp
+        log('MDS: relation_set (%s): %s' % (relid, str(data)), level=DEBUG)
+        relation_set(relation_id=relid, relation_settings=data)
+    else:
+        log('MDS: FSID or admin key not provided, please configure them')
+
+
 @hooks.hook('client-relation-joined')
-def client_relation_joined(relid=None):
+def client_relation_joined(relid=None, unit=None):
     if ready():
         service_name = None
         if relid is None:
@@ -167,13 +248,23 @@ def client_relation_joined(relid=None):
             units = related_units(relid)
             if len(units) > 0:
                 service_name = units[0].split('/')[0]
-
+        if unit is None:
+            unit = units[0]
         if service_name is not None:
             ceph_addrs = config('monitor-hosts')
             data = {'key': ceph.get_named_key(service_name),
                     'auth': config('auth-supported'),
                     'ceph-public-address': ceph_addrs}
 
+            settings = relation_get(rid=relid, unit=unit) or {}
+            data_update = {}
+            if 'broker_req' in settings:
+                rsp = process_requests(settings['broker_req'])
+                unit_id = unit.replace('/', '-')
+                unit_response_key = 'broker-rsp-' + unit_id
+                data_update[unit_response_key] = rsp
+            data.update(data_update)
+
             log('relation_set (%s): %s' % (relid, str(data)), level=DEBUG)
             relation_set(relation_id=relid,
                          relation_settings=data)
@@ -185,7 +276,7 @@ def client_relation_joined(relid=None):
 def client_relation_changed():
     """Process broker requests from ceph client relations."""
     if ready():
-        settings = relation_get()
+        settings = relation_get() or {}
         if 'broker_req' in settings:
             # the request is processed only by the leader as reported by juju
             if not is_leader():
@@ -212,6 +303,12 @@ def ready():
 
 def assess_status():
     '''Assess status of current unit'''
+    if is_unit_upgrading_set():
+        status_set("blocked",
+                   "Ready for do-release-upgrade and reboot. "
+                   "Set complete when finished.")
+        return
+
     if ready():
         status_set('active', 'Ready to proxy settings')
     else:
@@ -219,11 +316,31 @@ def assess_status():
 
 
 @hooks.hook('update-status')
-@harden()
 def update_status():
     log('Updating status.')
 
 
+@hooks.hook('pre-series-upgrade')
+def pre_series_upgrade():
+    log("Running prepare series upgrade hook", "INFO")
+    # NOTE: The Ceph packages handle the series upgrade gracefully.
+    # In order to indicate the step of the series upgrade process for
+    # administrators and automated scripts, the charm sets the paused and
+    # upgrading states.
+    set_unit_paused()
+    set_unit_upgrading()
+
+
+@hooks.hook('post-series-upgrade')
+def post_series_upgrade():
+    log("Running complete series upgrade hook", "INFO")
+    # In order to indicate the step of the series upgrade process for
+    # administrators and automated scripts, the charm clears the paused and
+    # upgrading states.
+    clear_unit_paused()
+    clear_unit_upgrading()
+
+
 if __name__ == '__main__':
     try:
         hooks.execute(sys.argv)
diff --git a/hooks/install b/hooks/install
index 29ff68948033d91316d7e01d4cbc2b44f61ba8f5..869ee2044454e4eecfd95f738627986e226ddcd3 100755
--- a/hooks/install
+++ b/hooks/install
@@ -1,8 +1,8 @@
-#!/bin/bash
+#!/bin/bash -e
 # Wrapper to deal with newer Ubuntu versions that don't have py2 installed
 # by default.
 
-declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml' 'dnspython')
+declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml')
 
 check_and_install() {
     pkg="${1}-${2}"
@@ -11,10 +11,11 @@ check_and_install() {
     fi
 }
 
-PYTHON="python"
+PYTHON="python3"
 
 for dep in ${DEPS[@]}; do
     check_and_install ${PYTHON} ${dep}
 done
 
+./hooks/install_deps
 exec ./hooks/install.real
diff --git a/hooks/install_deps b/hooks/install_deps
new file mode 100755
index 0000000000000000000000000000000000000000..c480f29e9fabdc9c4658773141ee4594853d6316
--- /dev/null
+++ b/hooks/install_deps
@@ -0,0 +1,18 @@
+#!/bin/bash -e
+# Wrapper to ensure that python dependencies are installed before we get into
+# the python part of the hook execution
+
+declare -a DEPS=('dnspython' 'pyudev')
+
+check_and_install() {
+    pkg="${1}-${2}"
+    if ! dpkg -s ${pkg} 2>&1 > /dev/null; then
+        apt-get -y install ${pkg}
+    fi
+}
+
+PYTHON="python3"
+
+for dep in ${DEPS[@]}; do
+    check_and_install ${PYTHON} ${dep}
+done
diff --git a/hooks/mds-relation-changed b/hooks/mds-relation-changed
new file mode 120000
index 0000000000000000000000000000000000000000..52d966304306513a362d25eb46bdc556d9d3c33b
--- /dev/null
+++ b/hooks/mds-relation-changed
@@ -0,0 +1 @@
+ceph_hooks.py
\ No newline at end of file
diff --git a/hooks/mds-relation-joined b/hooks/mds-relation-joined
new file mode 120000
index 0000000000000000000000000000000000000000..52d966304306513a362d25eb46bdc556d9d3c33b
--- /dev/null
+++ b/hooks/mds-relation-joined
@@ -0,0 +1 @@
+ceph_hooks.py
\ No newline at end of file
diff --git a/hooks/post-series-upgrade b/hooks/post-series-upgrade
new file mode 120000
index 0000000000000000000000000000000000000000..52d966304306513a362d25eb46bdc556d9d3c33b
--- /dev/null
+++ b/hooks/post-series-upgrade
@@ -0,0 +1 @@
+ceph_hooks.py
\ No newline at end of file
diff --git a/hooks/pre-series-upgrade b/hooks/pre-series-upgrade
new file mode 120000
index 0000000000000000000000000000000000000000..52d966304306513a362d25eb46bdc556d9d3c33b
--- /dev/null
+++ b/hooks/pre-series-upgrade
@@ -0,0 +1 @@
+ceph_hooks.py
\ No newline at end of file
diff --git a/hooks/upgrade-charm b/hooks/upgrade-charm
new file mode 100755
index 0000000000000000000000000000000000000000..c32fb38ce88e19c139c9bab19ebf67221b1fbf50
--- /dev/null
+++ b/hooks/upgrade-charm
@@ -0,0 +1,6 @@
+#!/bin/bash -e
+# Wrapper to ensure that old python bytecode isn't hanging around
+# after we upgrade the charm with newer libraries
+rm -rf **/*.pyc
+
+./hooks/install_deps
diff --git a/hooks/utils.py b/hooks/utils.py
index 5b68a1e72e5843be58ccdcd5f4595475e0003d5a..d1cf50096a3127f3704b65f2237f054eb3c8e0b3 100644
--- a/hooks/utils.py
+++ b/hooks/utils.py
@@ -43,9 +43,9 @@ except ImportError:
 
 def enable_pocket(pocket):
     apt_sources = "/etc/apt/sources.list"
-    with open(apt_sources, "r") as sources:
+    with open(apt_sources, "rt") as sources:
         lines = sources.readlines()
-    with open(apt_sources, "w") as sources:
+    with open(apt_sources, "wt") as sources:
         for line in lines:
             if pocket in line:
                 sources.write(re.sub('^# deb', 'deb', line))
diff --git a/lib/charms_ceph/__init__.py b/lib/charms_ceph/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/charms_ceph/broker.py b/lib/charms_ceph/broker.py
new file mode 100644
index 0000000000000000000000000000000000000000..d00baedcaa25c0d447ebe662c5045ce885b1b0ac
--- /dev/null
+++ b/lib/charms_ceph/broker.py
@@ -0,0 +1,912 @@
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import json
+import os
+
+from subprocess import check_call, check_output, CalledProcessError
+from tempfile import NamedTemporaryFile
+
+from charms_ceph.utils import (
+    get_cephfs,
+    get_osd_weight
+)
+from charms_ceph.crush_utils import Crushmap
+
+from charmhelpers.core.hookenv import (
+    log,
+    DEBUG,
+    INFO,
+    ERROR,
+)
+from charmhelpers.contrib.storage.linux.ceph import (
+    create_erasure_profile,
+    delete_pool,
+    erasure_profile_exists,
+    get_osds,
+    monitor_key_get,
+    monitor_key_set,
+    pool_exists,
+    pool_set,
+    remove_pool_snapshot,
+    rename_pool,
+    snapshot_pool,
+    validator,
+    ErasurePool,
+    BasePool,
+    ReplicatedPool,
+)
+
+# This comes from http://docs.ceph.com/docs/master/rados/operations/pools/
+# This should do a decent job of preventing people from passing in bad values.
+# It will give a useful error message
+
+POOL_KEYS = {
+    # "Ceph Key Name": [Python type, [Valid Range]]
+    "size": [int],
+    "min_size": [int],
+    "crash_replay_interval": [int],
+    "pgp_num": [int],  # = or < pg_num
+    "crush_ruleset": [int],
+    "hashpspool": [bool],
+    "nodelete": [bool],
+    "nopgchange": [bool],
+    "nosizechange": [bool],
+    "write_fadvise_dontneed": [bool],
+    "noscrub": [bool],
+    "nodeep-scrub": [bool],
+    "hit_set_type": [str, ["bloom", "explicit_hash",
+                           "explicit_object"]],
+    "hit_set_count": [int, [1, 1]],
+    "hit_set_period": [int],
+    "hit_set_fpp": [float, [0.0, 1.0]],
+    "cache_target_dirty_ratio": [float],
+    "cache_target_dirty_high_ratio": [float],
+    "cache_target_full_ratio": [float],
+    "target_max_bytes": [int],
+    "target_max_objects": [int],
+    "cache_min_flush_age": [int],
+    "cache_min_evict_age": [int],
+    "fast_read": [bool],
+    "allow_ec_overwrites": [bool],
+    "compression_mode": [str, ["none", "passive", "aggressive", "force"]],
+    "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]],
+    "compression_required_ratio": [float, [0.0, 1.0]],
+    "crush_rule": [str],
+}
+
+CEPH_BUCKET_TYPES = [
+    'osd',
+    'host',
+    'chassis',
+    'rack',
+    'row',
+    'pdu',
+    'pod',
+    'room',
+    'datacenter',
+    'region',
+    'root'
+]
+
+
+def decode_req_encode_rsp(f):
+    """Decorator to decode incoming requests and encode responses."""
+
+    def decode_inner(req):
+        return json.dumps(f(json.loads(req)))
+
+    return decode_inner
+
+
+@decode_req_encode_rsp
+def process_requests(reqs):
+    """Process Ceph broker request(s).
+
+    This is a versioned api. API version must be supplied by the client making
+    the request.
+
+    :param reqs: dict of request parameters.
+    :returns: dict. exit-code and reason if not 0
+    """
+    request_id = reqs.get('request-id')
+    try:
+        version = reqs.get('api-version')
+        if version == 1:
+            log('Processing request {}'.format(request_id), level=DEBUG)
+            resp = process_requests_v1(reqs['ops'])
+            if request_id:
+                resp['request-id'] = request_id
+
+            return resp
+
+    except Exception as exc:
+        log(str(exc), level=ERROR)
+        msg = ("Unexpected error occurred while processing requests: %s" %
+               reqs)
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+
+    msg = ("Missing or invalid api version ({})".format(version))
+    resp = {'exit-code': 1, 'stderr': msg}
+    if request_id:
+        resp['request-id'] = request_id
+
+    return resp
+
+
+def handle_create_erasure_profile(request, service):
+    """Create an erasure profile.
+
+    :param request: dict of request operations and params
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0
+    """
+    # "isa" | "lrc" | "shec" | "clay" or it defaults to "jerasure"
+    erasure_type = request.get('erasure-type')
+    # dependent on erasure coding type
+    erasure_technique = request.get('erasure-technique')
+    # "host" | "rack" | ...
+    failure_domain = request.get('failure-domain')
+    name = request.get('name')
+    # Binary Distribution Matrix (BDM) parameters
+    bdm_k = request.get('k')
+    bdm_m = request.get('m')
+    # LRC parameters
+    bdm_l = request.get('l')
+    crush_locality = request.get('crush-locality')
+    # SHEC parameters
+    bdm_c = request.get('c')
+    # CLAY parameters
+    bdm_d = request.get('d')
+    scalar_mds = request.get('scalar-mds')
+    # Device Class
+    device_class = request.get('device-class')
+
+    if failure_domain and failure_domain not in CEPH_BUCKET_TYPES:
+        msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES)
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+
+    create_erasure_profile(service=service,
+                           erasure_plugin_name=erasure_type,
+                           profile_name=name,
+                           failure_domain=failure_domain,
+                           data_chunks=bdm_k,
+                           coding_chunks=bdm_m,
+                           locality=bdm_l,
+                           durability_estimator=bdm_d,
+                           helper_chunks=bdm_c,
+                           scalar_mds=scalar_mds,
+                           crush_locality=crush_locality,
+                           device_class=device_class,
+                           erasure_plugin_technique=erasure_technique)
+
+    return {'exit-code': 0}
+
+
+def handle_add_permissions_to_key(request, service):
+    """Groups are defined by the key cephx.groups.(namespace-)?-(name). This
+    key will contain a dict serialized to JSON with data about the group,
+    including pools and members.
+
+    A group can optionally have a namespace defined that will be used to
+    further restrict pool access.
+    """
+    resp = {'exit-code': 0}
+
+    service_name = request.get('name')
+    group_name = request.get('group')
+    group_namespace = request.get('group-namespace')
+    if group_namespace:
+        group_name = "{}-{}".format(group_namespace, group_name)
+    group = get_group(group_name=group_name)
+    service_obj = get_service_groups(service=service_name,
+                                     namespace=group_namespace)
+    if request.get('object-prefix-permissions'):
+        service_obj['object_prefix_perms'] = request.get(
+            'object-prefix-permissions')
+    format("Service object: {}".format(service_obj))
+    permission = request.get('group-permission') or "rwx"
+    if service_name not in group['services']:
+        group['services'].append(service_name)
+    save_group(group=group, group_name=group_name)
+    if permission not in service_obj['group_names']:
+        service_obj['group_names'][permission] = []
+    if group_name not in service_obj['group_names'][permission]:
+        service_obj['group_names'][permission].append(group_name)
+    save_service(service=service_obj, service_name=service_name)
+    service_obj['groups'] = _build_service_groups(service_obj,
+                                                  group_namespace)
+    update_service_permissions(service_name, service_obj, group_namespace)
+
+    return resp
+
+
+def handle_set_key_permissions(request, service):
+    """Ensure the key has the requested permissions."""
+    permissions = request.get('permissions')
+    client = request.get('client')
+    call = ['ceph', '--id', service, 'auth', 'caps',
+            'client.{}'.format(client)] + permissions
+    try:
+        check_call(call)
+    except CalledProcessError as e:
+        log("Error updating key capabilities: {}".format(e), level=ERROR)
+
+
+def update_service_permissions(service, service_obj=None, namespace=None):
+    """Update the key permissions for the named client in Ceph"""
+    if not service_obj:
+        service_obj = get_service_groups(service=service, namespace=namespace)
+    permissions = pool_permission_list_for_service(service_obj)
+    call = ['ceph', 'auth', 'caps', 'client.{}'.format(service)] + permissions
+    try:
+        check_call(call)
+    except CalledProcessError as e:
+        log("Error updating key capabilities: {}".format(e))
+
+
+def add_pool_to_group(pool, group, namespace=None):
+    """Add a named pool to a named group"""
+    group_name = group
+    if namespace:
+        group_name = "{}-{}".format(namespace, group_name)
+    group = get_group(group_name=group_name)
+    if pool not in group['pools']:
+        group["pools"].append(pool)
+    save_group(group, group_name=group_name)
+    for service in group['services']:
+        update_service_permissions(service, namespace=namespace)
+
+
+def pool_permission_list_for_service(service):
+    """Build the permission string for Ceph for a given service"""
+    permissions = []
+    permission_types = collections.OrderedDict()
+    for permission, group in sorted(service["group_names"].items()):
+        if permission not in permission_types:
+            permission_types[permission] = []
+        for item in group:
+            permission_types[permission].append(item)
+    for permission, groups in permission_types.items():
+        permission = "allow {}".format(permission)
+        for group in groups:
+            for pool in service['groups'][group].get('pools', []):
+                permissions.append("{} pool={}".format(permission, pool))
+    for permission, prefixes in sorted(
+            service.get("object_prefix_perms", {}).items()):
+        for prefix in prefixes:
+            permissions.append("allow {} object_prefix {}".format(permission,
+                                                                  prefix))
+    return ['mon', 'allow r, allow command "osd blacklist"',
+            'osd', ', '.join(permissions)]
+
+
+def get_service_groups(service, namespace=None):
+    """Services are objects stored with some metadata, they look like (for a
+    service named "nova"):
+    {
+        group_names: {'rwx': ['images']},
+        groups: {}
+    }
+    After populating the group, it looks like:
+    {
+        group_names: {'rwx': ['images']},
+        groups: {
+            'images': {
+                pools: ['glance'],
+                services: ['nova']
+            }
+        }
+    }
+    """
+    service_json = monitor_key_get(service='admin',
+                                   key="cephx.services.{}".format(service))
+    try:
+        service = json.loads(service_json)
+    except (TypeError, ValueError):
+        service = None
+    if service:
+        service['groups'] = _build_service_groups(service, namespace)
+    else:
+        service = {'group_names': {}, 'groups': {}}
+    return service
+
+
+def _build_service_groups(service, namespace=None):
+    """Rebuild the 'groups' dict for a service group
+
+    :returns: dict: dictionary keyed by group name of the following
+                    format:
+
+                    {
+                        'images': {
+                            pools: ['glance'],
+                            services: ['nova', 'glance]
+                         },
+                         'vms':{
+                            pools: ['nova'],
+                            services: ['nova']
+                         }
+                    }
+    """
+    all_groups = {}
+    for groups in service['group_names'].values():
+        for group in groups:
+            name = group
+            if namespace:
+                name = "{}-{}".format(namespace, name)
+            all_groups[group] = get_group(group_name=name)
+    return all_groups
+
+
+def get_group(group_name):
+    """A group is a structure to hold data about a named group, structured as:
+    {
+        pools: ['glance'],
+        services: ['nova']
+    }
+    """
+    group_key = get_group_key(group_name=group_name)
+    group_json = monitor_key_get(service='admin', key=group_key)
+    try:
+        group = json.loads(group_json)
+    except (TypeError, ValueError):
+        group = None
+    if not group:
+        group = {
+            'pools': [],
+            'services': []
+        }
+    return group
+
+
+def save_service(service_name, service):
+    """Persist a service in the monitor cluster"""
+    service['groups'] = {}
+    return monitor_key_set(service='admin',
+                           key="cephx.services.{}".format(service_name),
+                           value=json.dumps(service, sort_keys=True))
+
+
+def save_group(group, group_name):
+    """Persist a group in the monitor cluster"""
+    group_key = get_group_key(group_name=group_name)
+    return monitor_key_set(service='admin',
+                           key=group_key,
+                           value=json.dumps(group, sort_keys=True))
+
+
+def get_group_key(group_name):
+    """Build group key"""
+    return 'cephx.groups.{}'.format(group_name)
+
+
+def handle_erasure_pool(request, service):
+    """Create a new erasure coded pool.
+
+    :param request: dict of request operations and params.
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0.
+    """
+    pool_name = request.get('name')
+    erasure_profile = request.get('erasure-profile')
+    group_name = request.get('group')
+
+    if erasure_profile is None:
+        erasure_profile = "default-canonical"
+
+    if group_name:
+        group_namespace = request.get('group-namespace')
+        # Add the pool to the group named "group_name"
+        add_pool_to_group(pool=pool_name,
+                          group=group_name,
+                          namespace=group_namespace)
+
+    # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds
+    if not erasure_profile_exists(service=service, name=erasure_profile):
+        # TODO: Fail and tell them to create the profile or default
+        msg = ("erasure-profile {} does not exist.  Please create it with: "
+               "create-erasure-profile".format(erasure_profile))
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+
+    try:
+        pool = ErasurePool(service=service,
+                           op=request)
+    except KeyError:
+        msg = "Missing parameter."
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+
+    # Ok make the erasure pool
+    if not pool_exists(service=service, name=pool_name):
+        log("Creating pool '{}' (erasure_profile={})"
+            .format(pool.name, erasure_profile), level=INFO)
+        pool.create()
+
+    # Set/update properties that are allowed to change after pool creation.
+    pool.update()
+
+
+def handle_replicated_pool(request, service):
+    """Create a new replicated pool.
+
+    :param request: dict of request operations and params.
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0.
+    """
+    pool_name = request.get('name')
+    group_name = request.get('group')
+
+    # Optional params
+    # NOTE: Check this against the handling in the Pool classes, reconcile and
+    # remove.
+    pg_num = request.get('pg_num')
+    replicas = request.get('replicas')
+    if pg_num:
+        # Cap pg_num to max allowed just in case.
+        osds = get_osds(service)
+        if osds:
+            pg_num = min(pg_num, (len(osds) * 100 // replicas))
+            request.update({'pg_num': pg_num})
+
+    if group_name:
+        group_namespace = request.get('group-namespace')
+        # Add the pool to the group named "group_name"
+        add_pool_to_group(pool=pool_name,
+                          group=group_name,
+                          namespace=group_namespace)
+
+    try:
+        pool = ReplicatedPool(service=service,
+                              op=request)
+    except KeyError:
+        msg = "Missing parameter."
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+
+    if not pool_exists(service=service, name=pool_name):
+        log("Creating pool '{}' (replicas={})".format(pool.name, replicas),
+            level=INFO)
+        pool.create()
+    else:
+        log("Pool '{}' already exists - skipping create".format(pool.name),
+            level=DEBUG)
+
+    # Set/update properties that are allowed to change after pool creation.
+    pool.update()
+
+
+def handle_create_cache_tier(request, service):
+    """Create a cache tier on a cold pool.  Modes supported are
+    "writeback" and "readonly".
+
+    :param request: dict of request operations and params
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0
+    """
+    # mode = "writeback" | "readonly"
+    storage_pool = request.get('cold-pool')
+    cache_pool = request.get('hot-pool')
+    cache_mode = request.get('mode')
+
+    if cache_mode is None:
+        cache_mode = "writeback"
+
+    # cache and storage pool must exist first
+    if not pool_exists(service=service, name=storage_pool) or not pool_exists(
+            service=service, name=cache_pool):
+        msg = ("cold-pool: {} and hot-pool: {} must exist. Please create "
+               "them first".format(storage_pool, cache_pool))
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+
+    p = BasePool(service=service, name=storage_pool)
+    p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode)
+
+
+def handle_remove_cache_tier(request, service):
+    """Remove a cache tier from the cold pool.
+
+    :param request: dict of request operations and params
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0
+    """
+    storage_pool = request.get('cold-pool')
+    cache_pool = request.get('hot-pool')
+    # cache and storage pool must exist first
+    if not pool_exists(service=service, name=storage_pool) or not pool_exists(
+            service=service, name=cache_pool):
+        msg = ("cold-pool: {} or hot-pool: {} doesn't exist. Not "
+               "deleting cache tier".format(storage_pool, cache_pool))
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+
+    pool = BasePool(name=storage_pool, service=service)
+    pool.remove_cache_tier(cache_pool=cache_pool)
+
+
+def handle_set_pool_value(request, service, coerce=False):
+    """Sets an arbitrary pool value.
+
+    :param request: dict of request operations and params
+    :param service: The ceph client to run the command under.
+    :param coerce: Try to parse/coerce the value into the correct type.
+                   Used by the action code that only gets Str from Juju
+    :returns: dict. exit-code and reason if not 0
+    """
+    # Set arbitrary pool values
+    params = {'pool': request.get('name'),
+              'key': request.get('key'),
+              'value': request.get('value')}
+    if params['key'] not in POOL_KEYS:
+        msg = "Invalid key '{}'".format(params['key'])
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+
+    # Get the validation method
+    validator_params = POOL_KEYS[params['key']]
+    # BUG: #1838650 - the function needs to try to coerce the value param to
+    # the type required for the validator to pass.  Note, if this blows, then
+    # the param isn't parsable to the correct type.
+    if coerce:
+        try:
+            params['value'] = validator_params[0](params['value'])
+        except ValueError:
+            raise RuntimeError("Value {} isn't of type {}"
+                               .format(params['value'], validator_params[0]))
+    # end of BUG: #1838650
+    if len(validator_params) == 1:
+        # Validate that what the user passed is actually legal per Ceph's rules
+        validator(params['value'], validator_params[0])
+    else:
+        # Validate that what the user passed is actually legal per Ceph's rules
+        validator(params['value'], validator_params[0], validator_params[1])
+
+    # Set the value
+    pool_set(service=service, pool_name=params['pool'], key=params['key'],
+             value=params['value'])
+
+
+def handle_rgw_regionmap_update(request, service):
+    """Change the radosgw region map.
+
+    :param request: dict of request operations and params
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0
+    """
+    name = request.get('client-name')
+    if not name:
+        msg = "Missing rgw-region or client-name params"
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+    try:
+        check_output(['radosgw-admin',
+                      '--id', service,
+                      'regionmap', 'update', '--name', name])
+    except CalledProcessError as err:
+        log(err.output, level=ERROR)
+        return {'exit-code': 1, 'stderr': err.output}
+
+
+def handle_rgw_regionmap_default(request, service):
+    """Create a radosgw region map.
+
+    :param request: dict of request operations and params
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0
+    """
+    region = request.get('rgw-region')
+    name = request.get('client-name')
+    if not region or not name:
+        msg = "Missing rgw-region or client-name params"
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+    try:
+        check_output(
+            [
+                'radosgw-admin',
+                '--id', service,
+                'regionmap',
+                'default',
+                '--rgw-region', region,
+                '--name', name])
+    except CalledProcessError as err:
+        log(err.output, level=ERROR)
+        return {'exit-code': 1, 'stderr': err.output}
+
+
+def handle_rgw_zone_set(request, service):
+    """Create a radosgw zone.
+
+    :param request: dict of request operations and params
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0
+    """
+    json_file = request.get('zone-json')
+    name = request.get('client-name')
+    region_name = request.get('region-name')
+    zone_name = request.get('zone-name')
+    if not json_file or not name or not region_name or not zone_name:
+        msg = "Missing json-file or client-name params"
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+    infile = NamedTemporaryFile(delete=False)
+    with open(infile.name, 'w') as infile_handle:
+        infile_handle.write(json_file)
+    try:
+        check_output(
+            [
+                'radosgw-admin',
+                '--id', service,
+                'zone',
+                'set',
+                '--rgw-zone', zone_name,
+                '--infile', infile.name,
+                '--name', name,
+            ]
+        )
+    except CalledProcessError as err:
+        log(err.output, level=ERROR)
+        return {'exit-code': 1, 'stderr': err.output}
+    os.unlink(infile.name)
+
+
+def handle_put_osd_in_bucket(request, service):
+    """Move an osd into a specified crush bucket.
+
+    :param request: dict of request operations and params
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0
+    """
+    osd_id = request.get('osd')
+    target_bucket = request.get('bucket')
+    if not osd_id or not target_bucket:
+        msg = "Missing OSD ID or Bucket"
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+    crushmap = Crushmap()
+    try:
+        crushmap.ensure_bucket_is_present(target_bucket)
+        check_output(
+            [
+                'ceph',
+                '--id', service,
+                'osd',
+                'crush',
+                'set',
+                str(osd_id),
+                str(get_osd_weight(osd_id)),
+                "root={}".format(target_bucket)
+            ]
+        )
+
+    except Exception as exc:
+        msg = "Failed to move OSD " \
+              "{} into Bucket {} :: {}".format(osd_id, target_bucket, exc)
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+
+
+def handle_rgw_create_user(request, service):
+    """Create a new rados gateway user.
+
+    :param request: dict of request operations and params
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0
+    """
+    user_id = request.get('rgw-uid')
+    display_name = request.get('display-name')
+    name = request.get('client-name')
+    if not name or not display_name or not user_id:
+        msg = "Missing client-name, display-name or rgw-uid"
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+    try:
+        create_output = check_output(
+            [
+                'radosgw-admin',
+                '--id', service,
+                'user',
+                'create',
+                '--uid', user_id,
+                '--display-name', display_name,
+                '--name', name,
+                '--system'
+            ]
+        )
+        try:
+            user_json = json.loads(str(create_output.decode('UTF-8')))
+            return {'exit-code': 0, 'user': user_json}
+        except ValueError as err:
+            log(err, level=ERROR)
+            return {'exit-code': 1, 'stderr': err}
+
+    except CalledProcessError as err:
+        log(err.output, level=ERROR)
+        return {'exit-code': 1, 'stderr': err.output}
+
+
+def handle_create_cephfs(request, service):
+    """Create a new cephfs.
+
+    :param request: The broker request
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0
+    """
+    cephfs_name = request.get('mds_name')
+    data_pool = request.get('data_pool')
+    extra_pools = request.get('extra_pools', None) or []
+    metadata_pool = request.get('metadata_pool')
+    # Check if the user params were provided
+    if not cephfs_name or not data_pool or not metadata_pool:
+        msg = "Missing mds_name, data_pool or metadata_pool params"
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+
+    # Sanity check that the required pools exist
+    for pool_name in [data_pool, metadata_pool] + extra_pools:
+        if not pool_exists(service=service, name=pool_name):
+            msg = "CephFS pool {} does not exist. Cannot create CephFS".format(
+                pool_name)
+            log(msg, level=ERROR)
+            return {'exit-code': 1, 'stderr': msg}
+
+    if get_cephfs(service=service):
+        # CephFS new has already been called
+        log("CephFS already created")
+        return
+
+        # Finally create CephFS
+    try:
+        check_output(["ceph",
+                      '--id', service,
+                      "fs", "new", cephfs_name,
+                      metadata_pool,
+                      data_pool])
+    except CalledProcessError as err:
+        if err.returncode == 22:
+            log("CephFS already created")
+            return
+        else:
+            log(err.output, level=ERROR)
+            return {'exit-code': 1, 'stderr': err.output}
+    for pool_name in extra_pools:
+        cmd = ["ceph", '--id', service, "fs", "add_data_pool", cephfs_name,
+               pool_name]
+        try:
+            check_output(cmd)
+        except CalledProcessError as err:
+            log(err.output, level=ERROR)
+            return {'exit-code': 1, 'stderr': err.output}
+
+
+def handle_rgw_region_set(request, service):
+    # radosgw-admin region set --infile us.json --name client.radosgw.us-east-1
+    """Set the rados gateway region.
+
+    :param request: dict. The broker request.
+    :param service: The ceph client to run the command under.
+    :returns: dict. exit-code and reason if not 0
+    """
+    json_file = request.get('region-json')
+    name = request.get('client-name')
+    region_name = request.get('region-name')
+    zone_name = request.get('zone-name')
+    if not json_file or not name or not region_name or not zone_name:
+        msg = "Missing json-file or client-name params"
+        log(msg, level=ERROR)
+        return {'exit-code': 1, 'stderr': msg}
+    infile = NamedTemporaryFile(delete=False)
+    with open(infile.name, 'w') as infile_handle:
+        infile_handle.write(json_file)
+    try:
+        check_output(
+            [
+                'radosgw-admin',
+                '--id', service,
+                'region',
+                'set',
+                '--rgw-zone', zone_name,
+                '--infile', infile.name,
+                '--name', name,
+            ]
+        )
+    except CalledProcessError as err:
+        log(err.output, level=ERROR)
+        return {'exit-code': 1, 'stderr': err.output}
+    os.unlink(infile.name)
+
+
+def process_requests_v1(reqs):
+    """Process v1 requests.
+
+    Takes a list of requests (dicts) and processes each one. If an error is
+    found, processing stops and the client is notified in the response.
+
+    Returns a response dict containing the exit code (non-zero if any
+    operation failed along with an explanation).
+    """
+    ret = None
+    log("Processing {} ceph broker requests".format(len(reqs)), level=INFO)
+    for req in reqs:
+        op = req.get('op')
+        log("Processing op='{}'".format(op), level=DEBUG)
+        # Use admin client since we do not have other client key locations
+        # setup to use them for these operations.
+        svc = 'admin'
+        if op == "create-pool":
+            pool_type = req.get('pool-type')  # "replicated" | "erasure"
+
+            # Default to replicated if pool_type isn't given
+            if pool_type == 'erasure':
+                ret = handle_erasure_pool(request=req, service=svc)
+            else:
+                ret = handle_replicated_pool(request=req, service=svc)
+        elif op == "create-cephfs":
+            ret = handle_create_cephfs(request=req, service=svc)
+        elif op == "create-cache-tier":
+            ret = handle_create_cache_tier(request=req, service=svc)
+        elif op == "remove-cache-tier":
+            ret = handle_remove_cache_tier(request=req, service=svc)
+        elif op == "create-erasure-profile":
+            ret = handle_create_erasure_profile(request=req, service=svc)
+        elif op == "delete-pool":
+            pool = req.get('name')
+            ret = delete_pool(service=svc, name=pool)
+        elif op == "rename-pool":
+            old_name = req.get('name')
+            new_name = req.get('new-name')
+            ret = rename_pool(service=svc, old_name=old_name,
+                              new_name=new_name)
+        elif op == "snapshot-pool":
+            pool = req.get('name')
+            snapshot_name = req.get('snapshot-name')
+            ret = snapshot_pool(service=svc, pool_name=pool,
+                                snapshot_name=snapshot_name)
+        elif op == "remove-pool-snapshot":
+            pool = req.get('name')
+            snapshot_name = req.get('snapshot-name')
+            ret = remove_pool_snapshot(service=svc, pool_name=pool,
+                                       snapshot_name=snapshot_name)
+        elif op == "set-pool-value":
+            ret = handle_set_pool_value(request=req, service=svc)
+        elif op == "rgw-region-set":
+            ret = handle_rgw_region_set(request=req, service=svc)
+        elif op == "rgw-zone-set":
+            ret = handle_rgw_zone_set(request=req, service=svc)
+        elif op == "rgw-regionmap-update":
+            ret = handle_rgw_regionmap_update(request=req, service=svc)
+        elif op == "rgw-regionmap-default":
+            ret = handle_rgw_regionmap_default(request=req, service=svc)
+        elif op == "rgw-create-user":
+            ret = handle_rgw_create_user(request=req, service=svc)
+        elif op == "move-osd-to-bucket":
+            ret = handle_put_osd_in_bucket(request=req, service=svc)
+        elif op == "add-permissions-to-key":
+            ret = handle_add_permissions_to_key(request=req, service=svc)
+        elif op == 'set-key-permissions':
+            ret = handle_set_key_permissions(request=req, service=svc)
+        else:
+            msg = "Unknown operation '{}'".format(op)
+            log(msg, level=ERROR)
+            return {'exit-code': 1, 'stderr': msg}
+
+    if type(ret) == dict and 'exit-code' in ret:
+        return ret
+
+    return {'exit-code': 0}
diff --git a/lib/charms_ceph/crush_utils.py b/lib/charms_ceph/crush_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fe09fa4c7586bac05bfc2dfb0931888d1d2a9de
--- /dev/null
+++ b/lib/charms_ceph/crush_utils.py
@@ -0,0 +1,154 @@
+# Copyright 2014 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+from subprocess import check_output, CalledProcessError
+
+from charmhelpers.core.hookenv import (
+    log,
+    ERROR,
+)
+
+CRUSH_BUCKET = """root {name} {{
+    id {id}    # do not change unnecessarily
+    # weight 0.000
+    alg straw2
+    hash 0  # rjenkins1
+}}
+
+rule {name} {{
+    ruleset 0
+    type replicated
+    min_size 1
+    max_size 10
+    step take {name}
+    step chooseleaf firstn 0 type host
+    step emit
+}}"""
+
+# This regular expression looks for a string like:
+# root NAME {
+# id NUMBER
+# so that we can extract NAME and ID from the crushmap
+CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)")
+
+# This regular expression looks for ID strings in the crushmap like:
+# id NUMBER
+# so that we can extract the IDs from a crushmap
+CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)")
+
+
+class Crushmap(object):
+    """An object oriented approach to Ceph crushmap management."""
+
+    def __init__(self):
+        self._crushmap = self.load_crushmap()
+        roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap)
+        buckets = []
+        ids = list(map(
+            lambda x: int(x),
+            re.findall(CRUSHMAP_ID_RE, self._crushmap)))
+        ids = sorted(ids)
+        if roots != []:
+            for root in roots:
+                buckets.append(CRUSHBucket(root[0], root[1], True))
+
+        self._buckets = buckets
+        if ids != []:
+            self._ids = ids
+        else:
+            self._ids = [0]
+
+    def load_crushmap(self):
+        try:
+            crush = str(check_output(['ceph', 'osd', 'getcrushmap'])
+                        .decode('UTF-8'))
+            return str(check_output(['crushtool', '-d', '-'],
+                                    stdin=crush.stdout)
+                       .decode('UTF-8'))
+        except CalledProcessError as e:
+            log("Error occured while loading and decompiling CRUSH map:"
+                "{}".format(e), ERROR)
+            raise "Failed to read CRUSH map"
+
+    def ensure_bucket_is_present(self, bucket_name):
+        if bucket_name not in [bucket.name for bucket in self.buckets()]:
+            self.add_bucket(bucket_name)
+            self.save()
+
+    def buckets(self):
+        """Return a list of buckets that are in the Crushmap."""
+        return self._buckets
+
+    def add_bucket(self, bucket_name):
+        """Add a named bucket to Ceph"""
+        new_id = min(self._ids) - 1
+        self._ids.append(new_id)
+        self._buckets.append(CRUSHBucket(bucket_name, new_id))
+
+    def save(self):
+        """Persist Crushmap to Ceph"""
+        try:
+            crushmap = self.build_crushmap()
+            compiled = str(check_output(['crushtool', '-c', '/dev/stdin', '-o',
+                                         '/dev/stdout'], stdin=crushmap)
+                           .decode('UTF-8'))
+            ceph_output = str(check_output(['ceph', 'osd', 'setcrushmap', '-i',
+                                            '/dev/stdin'], stdin=compiled)
+                              .decode('UTF-8'))
+            return ceph_output
+        except CalledProcessError as e:
+            log("save error: {}".format(e))
+            raise "Failed to save CRUSH map."
+
+    def build_crushmap(self):
+        """Modifies the current CRUSH map to include the new buckets"""
+        tmp_crushmap = self._crushmap
+        for bucket in self._buckets:
+            if not bucket.default:
+                tmp_crushmap = "{}\n\n{}".format(
+                    tmp_crushmap,
+                    Crushmap.bucket_string(bucket.name, bucket.id))
+
+        return tmp_crushmap
+
+    @staticmethod
+    def bucket_string(name, id):
+        return CRUSH_BUCKET.format(name=name, id=id)
+
+
+class CRUSHBucket(object):
+    """CRUSH bucket description object."""
+
+    def __init__(self, name, id, default=False):
+        self.name = name
+        self.id = int(id)
+        self.default = default
+
+    def __repr__(self):
+        return "Bucket {{Name: {name}, ID: {id}}}".format(
+            name=self.name, id=self.id)
+
+    def __eq__(self, other):
+        """Override the default Equals behavior"""
+        if isinstance(other, self.__class__):
+            return self.__dict__ == other.__dict__
+        return NotImplemented
+
+    def __ne__(self, other):
+        """Define a non-equality test"""
+        if isinstance(other, self.__class__):
+            return not self.__eq__(other)
+        return NotImplemented
diff --git a/lib/charms_ceph/utils.py b/lib/charms_ceph/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..52d380b4c78881258e474cd604fc7906da251c64
--- /dev/null
+++ b/lib/charms_ceph/utils.py
@@ -0,0 +1,3388 @@
+# Copyright 2017 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import glob
+import json
+import os
+import pyudev
+import random
+import re
+import socket
+import subprocess
+import sys
+import time
+import uuid
+
+from contextlib import contextmanager
+from datetime import datetime
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import templating
+from charmhelpers.core.host import (
+    chownr,
+    cmp_pkgrevno,
+    lsb_release,
+    mkdir,
+    owner,
+    service_restart,
+    service_start,
+    service_stop,
+    CompareHostReleases,
+    write_file,
+    is_container,
+)
+from charmhelpers.core.hookenv import (
+    cached,
+    config,
+    log,
+    status_set,
+    DEBUG,
+    ERROR,
+    WARNING,
+    storage_get,
+    storage_list,
+)
+from charmhelpers.fetch import (
+    add_source,
+    apt_cache,
+    apt_install,
+    apt_purge,
+    apt_update,
+    filter_missing_packages
+)
+from charmhelpers.contrib.storage.linux.ceph import (
+    get_mon_map,
+    monitor_key_set,
+    monitor_key_exists,
+    monitor_key_get,
+)
+from charmhelpers.contrib.storage.linux.utils import (
+    is_block_device,
+    is_device_mounted,
+)
+from charmhelpers.contrib.openstack.utils import (
+    get_os_codename_install_source,
+)
+from charmhelpers.contrib.storage.linux import lvm
+from charmhelpers.core.unitdata import kv
+
+CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph')
+OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd')
+HDPARM_FILE = os.path.join(os.sep, 'etc', 'hdparm.conf')
+
+LEADER = 'leader'
+PEON = 'peon'
+QUORUM = [LEADER, PEON]
+
+PACKAGES = ['ceph', 'gdisk',
+            'radosgw', 'xfsprogs',
+            'lvm2', 'parted', 'smartmontools']
+
+REMOVE_PACKAGES = []
+CHRONY_PACKAGE = 'chrony'
+
+CEPH_KEY_MANAGER = 'ceph'
+VAULT_KEY_MANAGER = 'vault'
+KEY_MANAGERS = [
+    CEPH_KEY_MANAGER,
+    VAULT_KEY_MANAGER,
+]
+
+LinkSpeed = {
+    "BASE_10": 10,
+    "BASE_100": 100,
+    "BASE_1000": 1000,
+    "GBASE_10": 10000,
+    "GBASE_40": 40000,
+    "GBASE_100": 100000,
+    "UNKNOWN": None
+}
+
+# Mapping of adapter speed to sysctl settings
+NETWORK_ADAPTER_SYSCTLS = {
+    # 10Gb
+    LinkSpeed["GBASE_10"]: {
+        'net.core.rmem_default': 524287,
+        'net.core.wmem_default': 524287,
+        'net.core.rmem_max': 524287,
+        'net.core.wmem_max': 524287,
+        'net.core.optmem_max': 524287,
+        'net.core.netdev_max_backlog': 300000,
+        'net.ipv4.tcp_rmem': '10000000 10000000 10000000',
+        'net.ipv4.tcp_wmem': '10000000 10000000 10000000',
+        'net.ipv4.tcp_mem': '10000000 10000000 10000000'
+    },
+    # Mellanox 10/40Gb
+    LinkSpeed["GBASE_40"]: {
+        'net.ipv4.tcp_timestamps': 0,
+        'net.ipv4.tcp_sack': 1,
+        'net.core.netdev_max_backlog': 250000,
+        'net.core.rmem_max': 4194304,
+        'net.core.wmem_max': 4194304,
+        'net.core.rmem_default': 4194304,
+        'net.core.wmem_default': 4194304,
+        'net.core.optmem_max': 4194304,
+        'net.ipv4.tcp_rmem': '4096 87380 4194304',
+        'net.ipv4.tcp_wmem': '4096 65536 4194304',
+        'net.ipv4.tcp_low_latency': 1,
+        'net.ipv4.tcp_adv_win_scale': 1
+    }
+}
+
+
+class Partition(object):
+    def __init__(self, name, number, size, start, end, sectors, uuid):
+        """A block device partition.
+
+        :param name: Name of block device
+        :param number: Partition number
+        :param size: Capacity of the device
+        :param start: Starting block
+        :param end: Ending block
+        :param sectors: Number of blocks
+        :param uuid: UUID of the partition
+        """
+        self.name = name,
+        self.number = number
+        self.size = size
+        self.start = start
+        self.end = end
+        self.sectors = sectors
+        self.uuid = uuid
+
+    def __str__(self):
+        return "number: {} start: {} end: {} sectors: {} size: {} " \
+               "name: {} uuid: {}".format(self.number, self.start,
+                                          self.end,
+                                          self.sectors, self.size,
+                                          self.name, self.uuid)
+
+    def __eq__(self, other):
+        if isinstance(other, self.__class__):
+            return self.__dict__ == other.__dict__
+        return False
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+def unmounted_disks():
+    """List of unmounted block devices on the current host."""
+    disks = []
+    context = pyudev.Context()
+    for device in context.list_devices(DEVTYPE='disk'):
+        if device['SUBSYSTEM'] == 'block':
+            if device.device_node is None:
+                continue
+
+            matched = False
+            for block_type in [u'dm-', u'loop', u'ram', u'nbd']:
+                if block_type in device.device_node:
+                    matched = True
+            if matched:
+                continue
+
+            disks.append(device.device_node)
+    log("Found disks: {}".format(disks))
+    return [disk for disk in disks if not is_device_mounted(disk)]
+
+
+def save_sysctls(sysctl_dict, save_location):
+    """Persist the sysctls to the hard drive.
+
+    :param sysctl_dict: dict
+    :param save_location: path to save the settings to
+    :raises: IOError if anything goes wrong with writing.
+    """
+    try:
+        # Persist the settings for reboots
+        with open(save_location, "w") as fd:
+            for key, value in sysctl_dict.items():
+                fd.write("{}={}\n".format(key, value))
+
+    except IOError as e:
+        log("Unable to persist sysctl settings to {}. Error {}".format(
+            save_location, e), level=ERROR)
+        raise
+
+
+def tune_nic(network_interface):
+    """This will set optimal sysctls for the particular network adapter.
+
+    :param network_interface: string The network adapter name.
+    """
+    speed = get_link_speed(network_interface)
+    if speed in NETWORK_ADAPTER_SYSCTLS:
+        status_set('maintenance', 'Tuning device {}'.format(
+            network_interface))
+        sysctl_file = os.path.join(
+            os.sep,
+            'etc',
+            'sysctl.d',
+            '51-ceph-osd-charm-{}.conf'.format(network_interface))
+        try:
+            log("Saving sysctl_file: {} values: {}".format(
+                sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]),
+                level=DEBUG)
+            save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed],
+                         save_location=sysctl_file)
+        except IOError as e:
+            log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} "
+                "failed. {}".format(network_interface, e),
+                level=ERROR)
+
+        try:
+            # Apply the settings
+            log("Applying sysctl settings", level=DEBUG)
+            subprocess.check_output(["sysctl", "-p", sysctl_file])
+        except subprocess.CalledProcessError as err:
+            log('sysctl -p {} failed with error {}'.format(sysctl_file,
+                                                           err.output),
+                level=ERROR)
+    else:
+        log("No settings found for network adapter: {}".format(
+            network_interface), level=DEBUG)
+
+
+def get_link_speed(network_interface):
+    """This will find the link speed for a given network device. Returns None
+    if an error occurs.
+    :param network_interface: string The network adapter interface.
+    :returns: LinkSpeed
+    """
+    speed_path = os.path.join(os.sep, 'sys', 'class', 'net',
+                              network_interface, 'speed')
+    # I'm not sure where else we'd check if this doesn't exist
+    if not os.path.exists(speed_path):
+        return LinkSpeed["UNKNOWN"]
+
+    try:
+        with open(speed_path, 'r') as sysfs:
+            nic_speed = sysfs.readlines()
+
+            # Did we actually read anything?
+            if not nic_speed:
+                return LinkSpeed["UNKNOWN"]
+
+            # Try to find a sysctl match for this particular speed
+            for name, speed in LinkSpeed.items():
+                if speed == int(nic_speed[0].strip()):
+                    return speed
+            # Default to UNKNOWN if we can't find a match
+            return LinkSpeed["UNKNOWN"]
+    except IOError as e:
+        log("Unable to open {path} because of error: {error}".format(
+            path=speed_path,
+            error=e), level='error')
+        return LinkSpeed["UNKNOWN"]
+
+
+def persist_settings(settings_dict):
+    # Write all settings to /etc/hdparm.conf
+    """ This will persist the hard drive settings to the /etc/hdparm.conf file
+
+    The settings_dict should be in the form of {"uuid": {"key":"value"}}
+
+    :param settings_dict: dict of settings to save
+    """
+    if not settings_dict:
+        return
+
+    try:
+        templating.render(source='hdparm.conf', target=HDPARM_FILE,
+                          context=settings_dict)
+    except IOError as err:
+        log("Unable to open {path} because of error: {error}".format(
+            path=HDPARM_FILE, error=err), level=ERROR)
+    except Exception as e:
+        # The templating.render can raise a jinja2 exception if the
+        # template is not found. Rather than polluting the import
+        # space of this charm, simply catch Exception
+        log('Unable to render {path} due to error: {error}'.format(
+            path=HDPARM_FILE, error=e), level=ERROR)
+
+
+def set_max_sectors_kb(dev_name, max_sectors_size):
+    """This function sets the max_sectors_kb size of a given block device.
+
+    :param dev_name: Name of the block device to query
+    :param max_sectors_size: int of the max_sectors_size to save
+    """
+    max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue',
+                                       'max_sectors_kb')
+    try:
+        with open(max_sectors_kb_path, 'w') as f:
+            f.write(max_sectors_size)
+    except IOError as e:
+        log('Failed to write max_sectors_kb to {}. Error: {}'.format(
+            max_sectors_kb_path, e), level=ERROR)
+
+
+def get_max_sectors_kb(dev_name):
+    """This function gets the max_sectors_kb size of a given block device.
+
+    :param dev_name: Name of the block device to query
+    :returns: int which is either the max_sectors_kb or 0 on error.
+    """
+    max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue',
+                                       'max_sectors_kb')
+
+    # Read in what Linux has set by default
+    if os.path.exists(max_sectors_kb_path):
+        try:
+            with open(max_sectors_kb_path, 'r') as f:
+                max_sectors_kb = f.read().strip()
+                return int(max_sectors_kb)
+        except IOError as e:
+            log('Failed to read max_sectors_kb to {}. Error: {}'.format(
+                max_sectors_kb_path, e), level=ERROR)
+            # Bail.
+            return 0
+    return 0
+
+
+def get_max_hw_sectors_kb(dev_name):
+    """This function gets the max_hw_sectors_kb for a given block device.
+
+    :param dev_name: Name of the block device to query
+    :returns: int which is either the max_hw_sectors_kb or 0 on error.
+    """
+    max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue',
+                                          'max_hw_sectors_kb')
+    # Read in what the hardware supports
+    if os.path.exists(max_hw_sectors_kb_path):
+        try:
+            with open(max_hw_sectors_kb_path, 'r') as f:
+                max_hw_sectors_kb = f.read().strip()
+                return int(max_hw_sectors_kb)
+        except IOError as e:
+            log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format(
+                max_hw_sectors_kb_path, e), level=ERROR)
+            return 0
+    return 0
+
+
+def set_hdd_read_ahead(dev_name, read_ahead_sectors=256):
+    """This function sets the hard drive read ahead.
+
+    :param dev_name: Name of the block device to set read ahead on.
+    :param read_ahead_sectors: int How many sectors to read ahead.
+    """
+    try:
+        # Set the read ahead sectors to 256
+        log('Setting read ahead to {} for device {}'.format(
+            read_ahead_sectors,
+            dev_name))
+        subprocess.check_output(['hdparm',
+                                 '-a{}'.format(read_ahead_sectors),
+                                 dev_name])
+    except subprocess.CalledProcessError as e:
+        log('hdparm failed with error: {}'.format(e.output),
+            level=ERROR)
+
+
+def get_block_uuid(block_dev):
+    """This queries blkid to get the uuid for a block device.
+
+    :param block_dev: Name of the block device to query.
+    :returns: The UUID of the device or None on Error.
+    """
+    try:
+        block_info = str(subprocess
+                         .check_output(['blkid', '-o', 'export', block_dev])
+                         .decode('UTF-8'))
+        for tag in block_info.split('\n'):
+            parts = tag.split('=')
+            if parts[0] == 'UUID':
+                return parts[1]
+        return None
+    except subprocess.CalledProcessError as err:
+        log('get_block_uuid failed with error: {}'.format(err.output),
+            level=ERROR)
+        return None
+
+
+def check_max_sectors(save_settings_dict,
+                      block_dev,
+                      uuid):
+    """Tune the max_hw_sectors if needed.
+
+    make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at
+    least 1MB for spinning disks
+    If the box has a RAID card with cache this could go much bigger.
+
+    :param save_settings_dict: The dict used to persist settings
+    :param block_dev: A block device name: Example: /dev/sda
+    :param uuid: The uuid of the block device
+    """
+    dev_name = None
+    path_parts = os.path.split(block_dev)
+    if len(path_parts) == 2:
+        dev_name = path_parts[1]
+    else:
+        log('Unable to determine the block device name from path: {}'.format(
+            block_dev))
+        # Play it safe and bail
+        return
+    max_sectors_kb = get_max_sectors_kb(dev_name=dev_name)
+    max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name)
+
+    if max_sectors_kb < max_hw_sectors_kb:
+        # OK we have a situation where the hardware supports more than Linux is
+        # currently requesting
+        config_max_sectors_kb = hookenv.config('max-sectors-kb')
+        if config_max_sectors_kb < max_hw_sectors_kb:
+            # Set the max_sectors_kb to the config.yaml value if it is less
+            # than the max_hw_sectors_kb
+            log('Setting max_sectors_kb for device {} to {}'.format(
+                dev_name, config_max_sectors_kb))
+            save_settings_dict[
+                "drive_settings"][uuid][
+                "read_ahead_sect"] = config_max_sectors_kb
+            set_max_sectors_kb(dev_name=dev_name,
+                               max_sectors_size=config_max_sectors_kb)
+        else:
+            # Set to the max_hw_sectors_kb
+            log('Setting max_sectors_kb for device {} to {}'.format(
+                dev_name, max_hw_sectors_kb))
+            save_settings_dict[
+                "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb
+            set_max_sectors_kb(dev_name=dev_name,
+                               max_sectors_size=max_hw_sectors_kb)
+    else:
+        log('max_sectors_kb match max_hw_sectors_kb. No change needed for '
+            'device: {}'.format(block_dev))
+
+
+def tune_dev(block_dev):
+    """Try to make some intelligent decisions with HDD tuning. Future work will
+    include optimizing SSDs.
+
+    This function will change the read ahead sectors and the max write
+    sectors for each block device.
+
+    :param block_dev: A block device name: Example: /dev/sda
+    """
+    uuid = get_block_uuid(block_dev)
+    if uuid is None:
+        log('block device {} uuid is None. Unable to save to '
+            'hdparm.conf'.format(block_dev), level=DEBUG)
+        return
+    save_settings_dict = {}
+    log('Tuning device {}'.format(block_dev))
+    status_set('maintenance', 'Tuning device {}'.format(block_dev))
+    set_hdd_read_ahead(block_dev)
+    save_settings_dict["drive_settings"] = {}
+    save_settings_dict["drive_settings"][uuid] = {}
+    save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256
+
+    check_max_sectors(block_dev=block_dev,
+                      save_settings_dict=save_settings_dict,
+                      uuid=uuid)
+
+    persist_settings(settings_dict=save_settings_dict)
+    status_set('maintenance', 'Finished tuning device {}'.format(block_dev))
+
+
+def ceph_user():
+    if get_version() > 1:
+        return 'ceph'
+    else:
+        return "root"
+
+
+class CrushLocation(object):
+    def __init__(self,
+                 name,
+                 identifier,
+                 host,
+                 rack,
+                 row,
+                 datacenter,
+                 chassis,
+                 root):
+        self.name = name
+        self.identifier = identifier
+        self.host = host
+        self.rack = rack
+        self.row = row
+        self.datacenter = datacenter
+        self.chassis = chassis
+        self.root = root
+
+    def __str__(self):
+        return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \
+               "chassis :{} root: {}".format(self.name, self.identifier,
+                                             self.host, self.rack, self.row,
+                                             self.datacenter, self.chassis,
+                                             self.root)
+
+    def __eq__(self, other):
+        return not self.name < other.name and not other.name < self.name
+
+    def __ne__(self, other):
+        return self.name < other.name or other.name < self.name
+
+    def __gt__(self, other):
+        return self.name > other.name
+
+    def __ge__(self, other):
+        return not self.name < other.name
+
+    def __le__(self, other):
+        return self.name < other.name
+
+
+def get_osd_weight(osd_id):
+    """Returns the weight of the specified OSD.
+
+    :returns: Float
+    :raises: ValueError if the monmap fails to parse.
+    :raises: CalledProcessError if our ceph command fails.
+    """
+    try:
+        tree = str(subprocess
+                   .check_output(['ceph', 'osd', 'tree', '--format=json'])
+                   .decode('UTF-8'))
+        try:
+            json_tree = json.loads(tree)
+            # Make sure children are present in the json
+            if not json_tree['nodes']:
+                return None
+            for device in json_tree['nodes']:
+                if device['type'] == 'osd' and device['name'] == osd_id:
+                    return device['crush_weight']
+        except ValueError as v:
+            log("Unable to parse ceph tree json: {}. Error: {}".format(
+                tree, v))
+            raise
+    except subprocess.CalledProcessError as e:
+        log("ceph osd tree command failed with message: {}".format(
+            e))
+        raise
+
+
+def get_osd_tree(service):
+    """Returns the current osd map in JSON.
+
+    :returns: List.
+    :raises: ValueError if the monmap fails to parse.
+             Also raises CalledProcessError if our ceph command fails
+    """
+    try:
+        tree = str(subprocess
+                   .check_output(['ceph', '--id', service,
+                                  'osd', 'tree', '--format=json'])
+                   .decode('UTF-8'))
+        try:
+            json_tree = json.loads(tree)
+            crush_list = []
+            # Make sure children are present in the json
+            if not json_tree['nodes']:
+                return None
+            host_nodes = [
+                node for node in json_tree['nodes']
+                if node['type'] == 'host'
+            ]
+            for host in host_nodes:
+                crush_list.append(
+                    CrushLocation(
+                        name=host.get('name'),
+                        identifier=host['id'],
+                        host=host.get('host'),
+                        rack=host.get('rack'),
+                        row=host.get('row'),
+                        datacenter=host.get('datacenter'),
+                        chassis=host.get('chassis'),
+                        root=host.get('root')
+                    )
+                )
+            return crush_list
+        except ValueError as v:
+            log("Unable to parse ceph tree json: {}. Error: {}".format(
+                tree, v))
+            raise
+    except subprocess.CalledProcessError as e:
+        log("ceph osd tree command failed with message: {}".format(
+            e))
+        raise
+
+
+def _get_child_dirs(path):
+    """Returns a list of directory names in the specified path.
+
+    :param path: a full path listing of the parent directory to return child
+                 directory names
+    :returns: list. A list of child directories under the parent directory
+    :raises: ValueError if the specified path does not exist or is not a
+             directory,
+             OSError if an error occurs reading the directory listing
+    """
+    if not os.path.exists(path):
+        raise ValueError('Specified path "%s" does not exist' % path)
+    if not os.path.isdir(path):
+        raise ValueError('Specified path "%s" is not a directory' % path)
+
+    files_in_dir = [os.path.join(path, f) for f in os.listdir(path)]
+    return list(filter(os.path.isdir, files_in_dir))
+
+
+def _get_osd_num_from_dirname(dirname):
+    """Parses the dirname and returns the OSD id.
+
+    Parses a string in the form of 'ceph-{osd#}' and returns the osd number
+    from the directory name.
+
+    :param dirname: the directory name to return the OSD number from
+    :return int: the osd number the directory name corresponds to
+    :raises ValueError: if the osd number cannot be parsed from the provided
+                        directory name.
+    """
+    match = re.search(r'ceph-(?P<osd_id>\d+)', dirname)
+    if not match:
+        raise ValueError("dirname not in correct format: {}".format(dirname))
+
+    return match.group('osd_id')
+
+
+def get_local_osd_ids():
+    """This will list the /var/lib/ceph/osd/* directories and try
+    to split the ID off of the directory name and return it in
+    a list.
+
+    :returns: list. A list of osd identifiers
+    :raises: OSError if something goes wrong with listing the directory.
+    """
+    osd_ids = []
+    osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd')
+    if os.path.exists(osd_path):
+        try:
+            dirs = os.listdir(osd_path)
+            for osd_dir in dirs:
+                osd_id = osd_dir.split('-')[1]
+                if _is_int(osd_id):
+                    osd_ids.append(osd_id)
+        except OSError:
+            raise
+    return osd_ids
+
+
+def get_local_mon_ids():
+    """This will list the /var/lib/ceph/mon/* directories and try
+    to split the ID off of the directory name and return it in
+    a list.
+
+    :returns: list. A list of monitor identifiers
+    :raises: OSError if something goes wrong with listing the directory.
+    """
+    mon_ids = []
+    mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon')
+    if os.path.exists(mon_path):
+        try:
+            dirs = os.listdir(mon_path)
+            for mon_dir in dirs:
+                # Basically this takes everything after ceph- as the monitor ID
+                match = re.search('ceph-(?P<mon_id>.*)', mon_dir)
+                if match:
+                    mon_ids.append(match.group('mon_id'))
+        except OSError:
+            raise
+    return mon_ids
+
+
+def _is_int(v):
+    """Return True if the object v can be turned into an integer."""
+    try:
+        int(v)
+        return True
+    except ValueError:
+        return False
+
+
+def get_version():
+    """Derive Ceph release from an installed package."""
+    import apt_pkg as apt
+
+    cache = apt_cache()
+    package = "ceph"
+    try:
+        pkg = cache[package]
+    except KeyError:
+        # the package is unknown to the current apt cache.
+        e = 'Could not determine version of package with no installation ' \
+            'candidate: %s' % package
+        error_out(e)
+
+    if not pkg.current_ver:
+        # package is known, but no version is currently installed.
+        e = 'Could not determine version of uninstalled package: %s' % package
+        error_out(e)
+
+    vers = apt.upstream_version(pkg.current_ver.ver_str)
+
+    # x.y match only for 20XX.X
+    # and ignore patch level for other packages
+    match = re.match(r'^(\d+)\.(\d+)', vers)
+
+    if match:
+        vers = match.group(0)
+    return float(vers)
+
+
+def error_out(msg):
+    log("FATAL ERROR: {}".format(msg),
+        level=ERROR)
+    sys.exit(1)
+
+
+def is_quorum():
+    asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname())
+    cmd = [
+        "sudo",
+        "-u",
+        ceph_user(),
+        "ceph",
+        "--admin-daemon",
+        asok,
+        "mon_status"
+    ]
+    if os.path.exists(asok):
+        try:
+            result = json.loads(str(subprocess
+                                    .check_output(cmd)
+                                    .decode('UTF-8')))
+        except subprocess.CalledProcessError:
+            return False
+        except ValueError:
+            # Non JSON response from mon_status
+            return False
+        if result['state'] in QUORUM:
+            return True
+        else:
+            return False
+    else:
+        return False
+
+
+def is_leader():
+    asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname())
+    cmd = [
+        "sudo",
+        "-u",
+        ceph_user(),
+        "ceph",
+        "--admin-daemon",
+        asok,
+        "mon_status"
+    ]
+    if os.path.exists(asok):
+        try:
+            result = json.loads(str(subprocess
+                                    .check_output(cmd)
+                                    .decode('UTF-8')))
+        except subprocess.CalledProcessError:
+            return False
+        except ValueError:
+            # Non JSON response from mon_status
+            return False
+        if result['state'] == LEADER:
+            return True
+        else:
+            return False
+    else:
+        return False
+
+
+def manager_available():
+    # if manager daemon isn't on this release, just say it is Fine
+    if cmp_pkgrevno('ceph', '11.0.0') < 0:
+        return True
+    cmd = ["sudo", "-u", "ceph", "ceph", "mgr", "dump", "-f", "json"]
+    try:
+        result = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+        return result['available']
+    except subprocess.CalledProcessError as e:
+        log("'{}' failed: {}".format(" ".join(cmd), str(e)))
+        return False
+    except Exception:
+        return False
+
+
+def wait_for_quorum():
+    while not is_quorum():
+        log("Waiting for quorum to be reached")
+        time.sleep(3)
+
+
+def wait_for_manager():
+    while not manager_available():
+        log("Waiting for manager to be available")
+        time.sleep(5)
+
+
+def add_bootstrap_hint(peer):
+    asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname())
+    cmd = [
+        "sudo",
+        "-u",
+        ceph_user(),
+        "ceph",
+        "--admin-daemon",
+        asok,
+        "add_bootstrap_peer_hint",
+        peer
+    ]
+    if os.path.exists(asok):
+        # Ignore any errors for this call
+        subprocess.call(cmd)
+
+
+DISK_FORMATS = [
+    'xfs',
+    'ext4',
+    'btrfs'
+]
+
+CEPH_PARTITIONS = [
+    '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE',  # ceph encrypted disk in creation
+    '45B0969E-9B03-4F30-B4C6-5EC00CEFF106',  # ceph encrypted journal
+    '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D',  # ceph encrypted osd data
+    '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D',  # ceph osd data
+    '45B0969E-9B03-4F30-B4C6-B4B80CEFF106',  # ceph osd journal
+    '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE',  # ceph disk in creation
+]
+
+
+def get_partition_list(dev):
+    """Lists the partitions of a block device.
+
+    :param dev: Path to a block device. ex: /dev/sda
+    :returns: Returns a list of Partition objects.
+    :raises: CalledProcessException if lsblk fails
+    """
+    partitions_list = []
+    try:
+        partitions = get_partitions(dev)
+        # For each line of output
+        for partition in partitions:
+            parts = partition.split()
+            try:
+                partitions_list.append(
+                    Partition(number=parts[0],
+                              start=parts[1],
+                              end=parts[2],
+                              sectors=parts[3],
+                              size=parts[4],
+                              name=parts[5],
+                              uuid=parts[6])
+                )
+            except IndexError:
+                partitions_list.append(
+                    Partition(number=parts[0],
+                              start=parts[1],
+                              end=parts[2],
+                              sectors=parts[3],
+                              size=parts[4],
+                              name="",
+                              uuid=parts[5])
+                )
+
+        return partitions_list
+    except subprocess.CalledProcessError:
+        raise
+
+
+def is_pristine_disk(dev):
+    """
+    Read first 2048 bytes (LBA 0 - 3) of block device to determine whether it
+    is actually all zeros and safe for us to use.
+
+    Existing partitioning tools does not discern between a failure to read from
+    block device, failure to understand a partition table and the fact that a
+    block device has no partition table.  Since we need to be positive about
+    which is which we need to read the device directly and confirm ourselves.
+
+    :param dev: Path to block device
+    :type dev: str
+    :returns: True all 2048 bytes == 0x0, False if not
+    :rtype: bool
+    """
+    want_bytes = 2048
+
+    try:
+        f = open(dev, 'rb')
+    except OSError as e:
+        log(e)
+        return False
+
+    data = f.read(want_bytes)
+    read_bytes = len(data)
+    if read_bytes != want_bytes:
+        log('{}: short read, got {} bytes expected {}.'
+            .format(dev, read_bytes, want_bytes), level=WARNING)
+        return False
+
+    return all(byte == 0x0 for byte in data)
+
+
+def is_osd_disk(dev):
+    db = kv()
+    osd_devices = db.get('osd-devices', [])
+    if dev in osd_devices:
+        log('Device {} already processed by charm,'
+            ' skipping'.format(dev))
+        return True
+
+    partitions = get_partition_list(dev)
+    for partition in partitions:
+        try:
+            info = str(subprocess
+                       .check_output(['sgdisk', '-i', partition.number, dev])
+                       .decode('UTF-8'))
+            info = info.split("\n")  # IGNORE:E1103
+            for line in info:
+                for ptype in CEPH_PARTITIONS:
+                    sig = 'Partition GUID code: {}'.format(ptype)
+                    if line.startswith(sig):
+                        return True
+        except subprocess.CalledProcessError as e:
+            log("sgdisk inspection of partition {} on {} failed with "
+                "error: {}. Skipping".format(partition.minor, dev, e),
+                level=ERROR)
+    return False
+
+
+def start_osds(devices):
+    # Scan for ceph block devices
+    rescan_osd_devices()
+    if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and
+            cmp_pkgrevno('ceph', '14.2.0') < 0):
+        # Use ceph-disk activate for directory based OSD's
+        for dev_or_path in devices:
+            if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path):
+                subprocess.check_call(
+                    ['ceph-disk', 'activate', dev_or_path])
+
+
+def udevadm_settle():
+    cmd = ['udevadm', 'settle']
+    subprocess.call(cmd)
+
+
+def rescan_osd_devices():
+    cmd = [
+        'udevadm', 'trigger',
+        '--subsystem-match=block', '--action=add'
+    ]
+
+    subprocess.call(cmd)
+
+    udevadm_settle()
+
+
+_client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring'
+
+
+def is_bootstrapped():
+    return os.path.exists(
+        '/var/lib/ceph/mon/ceph-{}/done'.format(socket.gethostname()))
+
+
+def wait_for_bootstrap():
+    while not is_bootstrapped():
+        time.sleep(3)
+
+
+def generate_monitor_secret():
+    cmd = [
+        'ceph-authtool',
+        '/dev/stdout',
+        '--name=mon.',
+        '--gen-key'
+    ]
+    res = str(subprocess.check_output(cmd).decode('UTF-8'))
+
+    return "{}==".format(res.split('=')[1].strip())
+
+
+# OSD caps taken from ceph-create-keys
+_osd_bootstrap_caps = {
+    'mon': [
+        'allow command osd create ...',
+        'allow command osd crush set ...',
+        r'allow command auth add * osd allow\ * mon allow\ rwx',
+        'allow command mon getmap'
+    ]
+}
+
+_osd_bootstrap_caps_profile = {
+    'mon': [
+        'allow profile bootstrap-osd'
+    ]
+}
+
+
+def parse_key(raw_key):
+    # get-or-create appears to have different output depending
+    # on whether its 'get' or 'create'
+    # 'create' just returns the key, 'get' is more verbose and
+    # needs parsing
+    key = None
+    if len(raw_key.splitlines()) == 1:
+        key = raw_key
+    else:
+        for element in raw_key.splitlines():
+            if 'key' in element:
+                return element.split(' = ')[1].strip()  # IGNORE:E1103
+    return key
+
+
+def get_osd_bootstrap_key():
+    try:
+        # Attempt to get/create a key using the OSD bootstrap profile first
+        key = get_named_key('bootstrap-osd',
+                            _osd_bootstrap_caps_profile)
+    except Exception:
+        # If that fails try with the older style permissions
+        key = get_named_key('bootstrap-osd',
+                            _osd_bootstrap_caps)
+    return key
+
+
+_radosgw_keyring = "/etc/ceph/keyring.rados.gateway"
+
+
+def import_radosgw_key(key):
+    if not os.path.exists(_radosgw_keyring):
+        cmd = [
+            "sudo",
+            "-u",
+            ceph_user(),
+            'ceph-authtool',
+            _radosgw_keyring,
+            '--create-keyring',
+            '--name=client.radosgw.gateway',
+            '--add-key={}'.format(key)
+        ]
+        subprocess.check_call(cmd)
+
+
+# OSD caps taken from ceph-create-keys
+_radosgw_caps = {
+    'mon': ['allow rw'],
+    'osd': ['allow rwx']
+}
+_upgrade_caps = {
+    'mon': ['allow rwx']
+}
+
+
+def get_radosgw_key(pool_list=None, name=None):
+    return get_named_key(name=name or 'radosgw.gateway',
+                         caps=_radosgw_caps,
+                         pool_list=pool_list)
+
+
+def get_mds_key(name):
+    return create_named_keyring(entity='mds',
+                                name=name,
+                                caps=mds_caps)
+
+
+_mds_bootstrap_caps_profile = {
+    'mon': [
+        'allow profile bootstrap-mds'
+    ]
+}
+
+
+def get_mds_bootstrap_key():
+    return get_named_key('bootstrap-mds',
+                         _mds_bootstrap_caps_profile)
+
+
+_default_caps = collections.OrderedDict([
+    ('mon', ['allow r',
+             'allow command "osd blacklist"']),
+    ('osd', ['allow rwx']),
+])
+
+admin_caps = collections.OrderedDict([
+    ('mds', ['allow *']),
+    ('mgr', ['allow *']),
+    ('mon', ['allow *']),
+    ('osd', ['allow *'])
+])
+
+mds_caps = collections.OrderedDict([
+    ('osd', ['allow *']),
+    ('mds', ['allow']),
+    ('mon', ['allow rwx']),
+])
+
+osd_upgrade_caps = collections.OrderedDict([
+    ('mon', ['allow command "config-key"',
+             'allow command "osd tree"',
+             'allow command "config-key list"',
+             'allow command "config-key put"',
+             'allow command "config-key get"',
+             'allow command "config-key exists"',
+             'allow command "osd out"',
+             'allow command "osd in"',
+             'allow command "osd rm"',
+             'allow command "auth del"',
+             ])
+])
+
+rbd_mirror_caps = collections.OrderedDict([
+    ('mon', ['profile rbd; allow r']),
+    ('osd', ['profile rbd']),
+    ('mgr', ['allow r']),
+])
+
+
+def get_rbd_mirror_key(name):
+    return get_named_key(name=name, caps=rbd_mirror_caps)
+
+
+def create_named_keyring(entity, name, caps=None):
+    caps = caps or _default_caps
+    cmd = [
+        "sudo",
+        "-u",
+        ceph_user(),
+        'ceph',
+        '--name', 'mon.',
+        '--keyring',
+        '/var/lib/ceph/mon/ceph-{}/keyring'.format(
+            socket.gethostname()
+        ),
+        'auth', 'get-or-create', '{entity}.{name}'.format(entity=entity,
+                                                          name=name),
+    ]
+    for subsystem, subcaps in caps.items():
+        cmd.extend([subsystem, '; '.join(subcaps)])
+    log("Calling check_output: {}".format(cmd), level=DEBUG)
+    return (parse_key(str(subprocess
+                          .check_output(cmd)
+                          .decode('UTF-8'))
+                      .strip()))  # IGNORE:E1103
+
+
+def get_upgrade_key():
+    return get_named_key('upgrade-osd', _upgrade_caps)
+
+
+def get_named_key(name, caps=None, pool_list=None):
+    """Retrieve a specific named cephx key.
+
+    :param name: String Name of key to get.
+    :param pool_list: The list of pools to give access to
+    :param caps: dict of cephx capabilities
+    :returns: Returns a cephx key
+    """
+    key_name = 'client.{}'.format(name)
+    try:
+        # Does the key already exist?
+        output = str(subprocess.check_output(
+            [
+                'sudo',
+                '-u', ceph_user(),
+                'ceph',
+                '--name', 'mon.',
+                '--keyring',
+                '/var/lib/ceph/mon/ceph-{}/keyring'.format(
+                    socket.gethostname()
+                ),
+                'auth',
+                'get',
+                key_name,
+            ]).decode('UTF-8')).strip()
+        # NOTE(jamespage);
+        # Apply any changes to key capabilities, dealing with
+        # upgrades which requires new caps for operation.
+        upgrade_key_caps(key_name,
+                         caps or _default_caps,
+                         pool_list)
+        return parse_key(output)
+    except subprocess.CalledProcessError:
+        # Couldn't get the key, time to create it!
+        log("Creating new key for {}".format(name), level=DEBUG)
+    caps = caps or _default_caps
+    cmd = [
+        "sudo",
+        "-u",
+        ceph_user(),
+        'ceph',
+        '--name', 'mon.',
+        '--keyring',
+        '/var/lib/ceph/mon/ceph-{}/keyring'.format(
+            socket.gethostname()
+        ),
+        'auth', 'get-or-create', key_name,
+    ]
+    # Add capabilities
+    for subsystem, subcaps in caps.items():
+        if subsystem == 'osd':
+            if pool_list:
+                # This will output a string similar to:
+                # "pool=rgw pool=rbd pool=something"
+                pools = " ".join(['pool={0}'.format(i) for i in pool_list])
+                subcaps[0] = subcaps[0] + " " + pools
+        cmd.extend([subsystem, '; '.join(subcaps)])
+
+    log("Calling check_output: {}".format(cmd), level=DEBUG)
+    return parse_key(str(subprocess
+                         .check_output(cmd)
+                         .decode('UTF-8'))
+                     .strip())  # IGNORE:E1103
+
+
+def upgrade_key_caps(key, caps, pool_list=None):
+    """ Upgrade key to have capabilities caps """
+    if not is_leader():
+        # Not the MON leader OR not clustered
+        return
+    cmd = [
+        "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key
+    ]
+    for subsystem, subcaps in caps.items():
+        if subsystem == 'osd':
+            if pool_list:
+                # This will output a string similar to:
+                # "pool=rgw pool=rbd pool=something"
+                pools = " ".join(['pool={0}'.format(i) for i in pool_list])
+                subcaps[0] = subcaps[0] + " " + pools
+        cmd.extend([subsystem, '; '.join(subcaps)])
+    subprocess.check_call(cmd)
+
+
+@cached
+def systemd():
+    return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid'
+
+
+def use_bluestore():
+    """Determine whether bluestore should be used for OSD's
+
+    :returns: whether bluestore disk format should be used
+    :rtype: bool"""
+    if cmp_pkgrevno('ceph', '12.2.0') < 0:
+        return False
+    return config('bluestore')
+
+
+def bootstrap_monitor_cluster(secret):
+    """Bootstrap local ceph mon into the ceph cluster
+
+    :param secret: cephx secret to use for monitor authentication
+    :type secret: str
+    :raises: Exception if ceph mon cannot be bootstrapped
+    """
+    hostname = socket.gethostname()
+    path = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
+    done = '{}/done'.format(path)
+    if systemd():
+        init_marker = '{}/systemd'.format(path)
+    else:
+        init_marker = '{}/upstart'.format(path)
+
+    keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname)
+
+    if os.path.exists(done):
+        log('bootstrap_monitor_cluster: mon already initialized.')
+    else:
+        # Ceph >= 0.61.3 needs this for ceph-mon fs creation
+        mkdir('/var/run/ceph', owner=ceph_user(),
+              group=ceph_user(), perms=0o755)
+        mkdir(path, owner=ceph_user(), group=ceph_user(),
+              perms=0o755)
+        # end changes for Ceph >= 0.61.3
+        try:
+            _create_monitor(keyring,
+                            secret,
+                            hostname,
+                            path,
+                            done,
+                            init_marker)
+        except Exception:
+            raise
+        finally:
+            os.unlink(keyring)
+
+
+def _create_monitor(keyring, secret, hostname, path, done, init_marker):
+    """Create monitor filesystem and enable and start ceph-mon process
+
+    :param keyring: path to temporary keyring on disk
+    :type keyring: str
+    :param secret: cephx secret to use for monitor authentication
+    :type: secret: str
+    :param hostname: hostname of the local unit
+    :type hostname: str
+    :param path: full path to ceph mon directory
+    :type path: str
+    :param done: full path to 'done' marker for ceph mon
+    :type done: str
+    :param init_marker: full path to 'init' marker for ceph mon
+    :type init_marker: str
+    """
+    subprocess.check_call(['ceph-authtool', keyring,
+                           '--create-keyring', '--name=mon.',
+                           '--add-key={}'.format(secret),
+                           '--cap', 'mon', 'allow *'])
+    subprocess.check_call(['ceph-mon', '--mkfs',
+                           '-i', hostname,
+                           '--keyring', keyring])
+    chownr('/var/log/ceph', ceph_user(), ceph_user())
+    chownr(path, ceph_user(), ceph_user())
+    with open(done, 'w'):
+        pass
+    with open(init_marker, 'w'):
+        pass
+
+    if systemd():
+        if cmp_pkgrevno('ceph', '14.0.0') >= 0:
+            systemd_unit = 'ceph-mon@{}'.format(socket.gethostname())
+        else:
+            systemd_unit = 'ceph-mon'
+        subprocess.check_call(['systemctl', 'enable', systemd_unit])
+        service_restart(systemd_unit)
+    else:
+        service_restart('ceph-mon-all')
+
+
+def create_keyrings():
+    """Create keyrings for operation of ceph-mon units
+
+    NOTE: The quorum should be done before to execute this function.
+
+    :raises: Exception if keyrings cannot be created
+    """
+    if cmp_pkgrevno('ceph', '14.0.0') >= 0:
+        # NOTE(jamespage): At Nautilus, keys are created by the
+        #                  monitors automatically and just need
+        #                  exporting.
+        output = str(subprocess.check_output(
+            [
+                'sudo',
+                '-u', ceph_user(),
+                'ceph',
+                '--name', 'mon.',
+                '--keyring',
+                '/var/lib/ceph/mon/ceph-{}/keyring'.format(
+                    socket.gethostname()
+                ),
+                'auth', 'get', 'client.admin',
+            ]).decode('UTF-8')).strip()
+        if not output:
+            # NOTE: key not yet created, raise exception and retry
+            raise Exception
+        # NOTE: octopus wants newline at end of file LP: #1864706
+        output += '\n'
+        write_file(_client_admin_keyring, output,
+                   owner=ceph_user(), group=ceph_user(),
+                   perms=0o400)
+    else:
+        # NOTE(jamespage): Later ceph releases require explicit
+        #                  call to ceph-create-keys to setup the
+        #                  admin keys for the cluster; this command
+        #                  will wait for quorum in the cluster before
+        #                  returning.
+        # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older
+        #                 ceph releases too.  This improves bootstrap
+        #                 resilience as the charm will wait for
+        #                 presence of peer units before attempting
+        #                 to bootstrap.  Note that charms deploying
+        #                 ceph-mon service should disable running of
+        #                 `ceph-create-keys` service in init system.
+        cmd = ['ceph-create-keys', '--id', socket.gethostname()]
+        if cmp_pkgrevno('ceph', '12.0.0') >= 0:
+            # NOTE(fnordahl): The default timeout in ceph-create-keys of 600
+            #                 seconds is not adequate.  Increase timeout when
+            #                 timeout parameter available.  For older releases
+            #                 we rely on retry_on_exception decorator.
+            #                 LP#1719436
+            cmd.extend(['--timeout', '1800'])
+        subprocess.check_call(cmd)
+        osstat = os.stat(_client_admin_keyring)
+        if not osstat.st_size:
+            # NOTE(fnordahl): Retry will fail as long as this file exists.
+            #                 LP#1719436
+            os.remove(_client_admin_keyring)
+            raise Exception
+
+
+def update_monfs():
+    hostname = socket.gethostname()
+    monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
+    if systemd():
+        init_marker = '{}/systemd'.format(monfs)
+    else:
+        init_marker = '{}/upstart'.format(monfs)
+    if os.path.exists(monfs) and not os.path.exists(init_marker):
+        # Mark mon as managed by upstart so that
+        # it gets start correctly on reboots
+        with open(init_marker, 'w'):
+            pass
+
+
+def get_partitions(dev):
+    cmd = ['partx', '--raw', '--noheadings', dev]
+    try:
+        out = str(subprocess.check_output(cmd).decode('UTF-8')).splitlines()
+        log("get partitions: {}".format(out), level=DEBUG)
+        return out
+    except subprocess.CalledProcessError as e:
+        log("Can't get info for {0}: {1}".format(dev, e.output))
+        return []
+
+
+def get_lvs(dev):
+    """
+    List logical volumes for the provided block device
+
+    :param: dev: Full path to block device.
+    :raises subprocess.CalledProcessError: in the event that any supporting
+                                           operation failed.
+    :returns: list: List of logical volumes provided by the block device
+    """
+    if not lvm.is_lvm_physical_volume(dev):
+        return []
+    vg_name = lvm.list_lvm_volume_group(dev)
+    return lvm.list_logical_volumes('vg_name={}'.format(vg_name))
+
+
+def find_least_used_utility_device(utility_devices, lvs=False):
+    """
+    Find a utility device which has the smallest number of partitions
+    among other devices in the supplied list.
+
+    :utility_devices: A list of devices to be used for filestore journal
+    or bluestore wal or db.
+    :lvs: flag to indicate whether inspection should be based on LVM LV's
+    :return: string device name
+    """
+    if lvs:
+        usages = map(lambda a: (len(get_lvs(a)), a), utility_devices)
+    else:
+        usages = map(lambda a: (len(get_partitions(a)), a), utility_devices)
+    least = min(usages, key=lambda t: t[0])
+    return least[1]
+
+
+def get_devices(name):
+    """ Merge config and juju storage based devices
+
+    :name: THe name of the device type, eg: wal, osd, journal
+    :returns: Set(device names), which are strings
+    """
+    if config(name):
+        devices = [dev.strip() for dev in config(name).split(' ')]
+    else:
+        devices = []
+    storage_ids = storage_list(name)
+    devices.extend((storage_get('location', sid) for sid in storage_ids))
+    devices = filter(os.path.exists, devices)
+
+    return set(devices)
+
+
+def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False,
+           bluestore=False, key_manager=CEPH_KEY_MANAGER):
+    if dev.startswith('/dev'):
+        osdize_dev(dev, osd_format, osd_journal,
+                   ignore_errors, encrypt,
+                   bluestore, key_manager)
+    else:
+        if cmp_pkgrevno('ceph', '14.0.0') >= 0:
+            log("Directory backed OSDs can not be created on Nautilus",
+                level=WARNING)
+            return
+        osdize_dir(dev, encrypt, bluestore)
+
+
+def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
+               encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER):
+    """
+    Prepare a block device for use as a Ceph OSD
+
+    A block device will only be prepared once during the lifetime
+    of the calling charm unit; future executions will be skipped.
+
+    :param: dev: Full path to block device to use
+    :param: osd_format: Format for OSD filesystem
+    :param: osd_journal: List of block devices to use for OSD journals
+    :param: ignore_errors: Don't fail in the event of any errors during
+                           processing
+    :param: encrypt: Encrypt block devices using 'key_manager'
+    :param: bluestore: Use bluestore native ceph block device format
+    :param: key_manager: Key management approach for encryption keys
+    :raises subprocess.CalledProcessError: in the event that any supporting
+                                           subprocess operation failed
+    :raises ValueError: if an invalid key_manager is provided
+    """
+    if key_manager not in KEY_MANAGERS:
+        raise ValueError('Unsupported key manager: {}'.format(key_manager))
+
+    db = kv()
+    osd_devices = db.get('osd-devices', [])
+    try:
+        if dev in osd_devices:
+            log('Device {} already processed by charm,'
+                ' skipping'.format(dev))
+            return
+
+        if not os.path.exists(dev):
+            log('Path {} does not exist - bailing'.format(dev))
+            return
+
+        if not is_block_device(dev):
+            log('Path {} is not a block device - bailing'.format(dev))
+            return
+
+        if is_osd_disk(dev):
+            log('Looks like {} is already an'
+                ' OSD data or journal, skipping.'.format(dev))
+            if is_device_mounted(dev):
+                osd_devices.append(dev)
+            return
+
+        if is_device_mounted(dev):
+            log('Looks like {} is in use, skipping.'.format(dev))
+            return
+
+        if is_active_bluestore_device(dev):
+            log('{} is in use as an active bluestore block device,'
+                ' skipping.'.format(dev))
+            osd_devices.append(dev)
+            return
+
+        if is_mapped_luks_device(dev):
+            log('{} is a mapped LUKS device,'
+                ' skipping.'.format(dev))
+            return
+
+        if cmp_pkgrevno('ceph', '12.2.4') >= 0:
+            cmd = _ceph_volume(dev,
+                               osd_journal,
+                               encrypt,
+                               bluestore,
+                               key_manager)
+        else:
+            cmd = _ceph_disk(dev,
+                             osd_format,
+                             osd_journal,
+                             encrypt,
+                             bluestore)
+
+        try:
+            status_set('maintenance', 'Initializing device {}'.format(dev))
+            log("osdize cmd: {}".format(cmd))
+            subprocess.check_call(cmd)
+        except subprocess.CalledProcessError:
+            try:
+                lsblk_output = subprocess.check_output(
+                    ['lsblk', '-P']).decode('UTF-8')
+            except subprocess.CalledProcessError as e:
+                log("Couldn't get lsblk output: {}".format(e), ERROR)
+            if ignore_errors:
+                log('Unable to initialize device: {}'.format(dev), WARNING)
+                if lsblk_output:
+                    log('lsblk output: {}'.format(lsblk_output), DEBUG)
+            else:
+                log('Unable to initialize device: {}'.format(dev), ERROR)
+                if lsblk_output:
+                    log('lsblk output: {}'.format(lsblk_output), WARNING)
+                raise
+
+        # NOTE: Record processing of device only on success to ensure that
+        #       the charm only tries to initialize a device of OSD usage
+        #       once during its lifetime.
+        osd_devices.append(dev)
+    finally:
+        db.set('osd-devices', osd_devices)
+        db.flush()
+
+
+def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
+    """
+    Prepare a device for usage as a Ceph OSD using ceph-disk
+
+    :param: dev: Full path to use for OSD block device setup,
+                 The function looks up realpath of the device
+    :param: osd_journal: List of block devices to use for OSD journals
+    :param: encrypt: Use block device encryption (unsupported)
+    :param: bluestore: Use bluestore storage for OSD
+    :returns: list. 'ceph-disk' command and required parameters for
+                    execution by check_call
+    """
+    cmd = ['ceph-disk', 'prepare']
+
+    if encrypt:
+        cmd.append('--dmcrypt')
+
+    if osd_format and not bluestore:
+        cmd.append('--fs-type')
+        cmd.append(osd_format)
+
+    # NOTE(jamespage): enable experimental bluestore support
+    if use_bluestore():
+        cmd.append('--bluestore')
+        wal = get_devices('bluestore-wal')
+        if wal:
+            cmd.append('--block.wal')
+            least_used_wal = find_least_used_utility_device(wal)
+            cmd.append(least_used_wal)
+        db = get_devices('bluestore-db')
+        if db:
+            cmd.append('--block.db')
+            least_used_db = find_least_used_utility_device(db)
+            cmd.append(least_used_db)
+    elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore:
+        cmd.append('--filestore')
+
+    cmd.append(os.path.realpath(dev))
+
+    if osd_journal:
+        least_used = find_least_used_utility_device(osd_journal)
+        cmd.append(least_used)
+
+    return cmd
+
+
+def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
+                 key_manager=CEPH_KEY_MANAGER):
+    """
+    Prepare and activate a device for usage as a Ceph OSD using ceph-volume.
+
+    This also includes creation of all PV's, VG's and LV's required to
+    support the initialization of the OSD.
+
+    :param: dev: Full path to use for OSD block device setup
+    :param: osd_journal: List of block devices to use for OSD journals
+    :param: encrypt: Use block device encryption
+    :param: bluestore: Use bluestore storage for OSD
+    :param: key_manager: dm-crypt Key Manager to use
+    :raises subprocess.CalledProcessError: in the event that any supporting
+                                           LVM operation failed.
+    :returns: list. 'ceph-volume' command and required parameters for
+                    execution by check_call
+    """
+    cmd = ['ceph-volume', 'lvm', 'create']
+
+    osd_fsid = str(uuid.uuid4())
+    cmd.append('--osd-fsid')
+    cmd.append(osd_fsid)
+
+    if bluestore:
+        cmd.append('--bluestore')
+        main_device_type = 'block'
+    else:
+        cmd.append('--filestore')
+        main_device_type = 'data'
+
+    if encrypt and key_manager == CEPH_KEY_MANAGER:
+        cmd.append('--dmcrypt')
+
+    # On-disk journal volume creation
+    if not osd_journal and not bluestore:
+        journal_lv_type = 'journal'
+        cmd.append('--journal')
+        cmd.append(_allocate_logical_volume(
+            dev=dev,
+            lv_type=journal_lv_type,
+            osd_fsid=osd_fsid,
+            size='{}M'.format(calculate_volume_size('journal')),
+            encrypt=encrypt,
+            key_manager=key_manager)
+        )
+
+    cmd.append('--data')
+    cmd.append(_allocate_logical_volume(dev=dev,
+                                        lv_type=main_device_type,
+                                        osd_fsid=osd_fsid,
+                                        encrypt=encrypt,
+                                        key_manager=key_manager))
+
+    if bluestore:
+        for extra_volume in ('wal', 'db'):
+            devices = get_devices('bluestore-{}'.format(extra_volume))
+            if devices:
+                cmd.append('--block.{}'.format(extra_volume))
+                least_used = find_least_used_utility_device(devices,
+                                                            lvs=True)
+                cmd.append(_allocate_logical_volume(
+                    dev=least_used,
+                    lv_type=extra_volume,
+                    osd_fsid=osd_fsid,
+                    size='{}M'.format(calculate_volume_size(extra_volume)),
+                    shared=True,
+                    encrypt=encrypt,
+                    key_manager=key_manager)
+                )
+
+    elif osd_journal:
+        cmd.append('--journal')
+        least_used = find_least_used_utility_device(osd_journal,
+                                                    lvs=True)
+        cmd.append(_allocate_logical_volume(
+            dev=least_used,
+            lv_type='journal',
+            osd_fsid=osd_fsid,
+            size='{}M'.format(calculate_volume_size('journal')),
+            shared=True,
+            encrypt=encrypt,
+            key_manager=key_manager)
+        )
+
+    return cmd
+
+
+def _partition_name(dev):
+    """
+    Derive the first partition name for a block device
+
+    :param: dev: Full path to block device.
+    :returns: str: Full path to first partition on block device.
+    """
+    if dev[-1].isdigit():
+        return '{}p1'.format(dev)
+    else:
+        return '{}1'.format(dev)
+
+
+def is_active_bluestore_device(dev):
+    """
+    Determine whether provided device is part of an active
+    bluestore based OSD (as its block component).
+
+    :param: dev: Full path to block device to check for Bluestore usage.
+    :returns: boolean: indicating whether device is in active use.
+    """
+    if not lvm.is_lvm_physical_volume(dev):
+        return False
+
+    vg_name = lvm.list_lvm_volume_group(dev)
+    try:
+        lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0]
+    except IndexError:
+        return False
+
+    block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block')
+    for block_candidate in block_symlinks:
+        if os.path.islink(block_candidate):
+            target = os.readlink(block_candidate)
+            if target.endswith(lv_name):
+                return True
+
+    return False
+
+
+def is_luks_device(dev):
+    """
+    Determine if dev is a LUKS-formatted block device.
+
+    :param: dev: A full path to a block device to check for LUKS header
+    presence
+    :returns: boolean: indicates whether a device is used based on LUKS header.
+    """
+    return True if _luks_uuid(dev) else False
+
+
+def is_mapped_luks_device(dev):
+    """
+    Determine if dev is a mapped LUKS device
+    :param: dev: A full path to a block device to be checked
+    :returns: boolean: indicates whether a device is mapped
+    """
+    _, dirs, _ = next(os.walk(
+        '/sys/class/block/{}/holders/'
+        .format(os.path.basename(os.path.realpath(dev))))
+    )
+    is_held = len(dirs) > 0
+    return is_held and is_luks_device(dev)
+
+
+def get_conf(variable):
+    """
+    Get the value of the given configuration variable from the
+    cluster.
+
+    :param variable: ceph configuration variable
+    :returns: str. configured value for provided variable
+
+    """
+    return subprocess.check_output([
+        'ceph-osd',
+        '--show-config-value={}'.format(variable),
+        '--no-mon-config',
+    ]).strip()
+
+
+def calculate_volume_size(lv_type):
+    """
+    Determine the configured size for Bluestore DB/WAL or
+    Filestore Journal devices
+
+    :param lv_type: volume type (db, wal or journal)
+    :raises KeyError: if invalid lv_type is supplied
+    :returns: int. Configured size in megabytes for volume type
+    """
+    # lv_type -> ceph configuration option
+    _config_map = {
+        'db': 'bluestore_block_db_size',
+        'wal': 'bluestore_block_wal_size',
+        'journal': 'osd_journal_size',
+    }
+
+    # default sizes in MB
+    _default_size = {
+        'db': 1024,
+        'wal': 576,
+        'journal': 1024,
+    }
+
+    # conversion of ceph config units to MB
+    _units = {
+        'db': 1048576,  # Bytes -> MB
+        'wal': 1048576,  # Bytes -> MB
+        'journal': 1,  # Already in MB
+    }
+
+    configured_size = get_conf(_config_map[lv_type])
+
+    if configured_size is None or int(configured_size) == 0:
+        return _default_size[lv_type]
+    else:
+        return int(configured_size) / _units[lv_type]
+
+
+def _luks_uuid(dev):
+    """
+    Check to see if dev is a LUKS encrypted volume, returning the UUID
+    of volume if it is.
+
+    :param: dev: path to block device to check.
+    :returns: str. UUID of LUKS device or None if not a LUKS device
+    """
+    try:
+        cmd = ['cryptsetup', 'luksUUID', dev]
+        return subprocess.check_output(cmd).decode('UTF-8').strip()
+    except subprocess.CalledProcessError:
+        return None
+
+
+def _initialize_disk(dev, dev_uuid, encrypt=False,
+                     key_manager=CEPH_KEY_MANAGER):
+    """
+    Initialize a raw block device consuming 100% of the avaliable
+    disk space.
+
+    Function assumes that block device has already been wiped.
+
+    :param: dev: path to block device to initialize
+    :param: dev_uuid: UUID to use for any dm-crypt operations
+    :param: encrypt: Encrypt OSD devices using dm-crypt
+    :param: key_manager: Key management approach for dm-crypt keys
+    :raises: subprocess.CalledProcessError: if any parted calls fail
+    :returns: str: Full path to new partition.
+    """
+    use_vaultlocker = encrypt and key_manager == VAULT_KEY_MANAGER
+
+    if use_vaultlocker:
+        # NOTE(jamespage): Check to see if already initialized as a LUKS
+        #                  volume, which indicates this is a shared block
+        #                  device for journal, db or wal volumes.
+        luks_uuid = _luks_uuid(dev)
+        if luks_uuid:
+            return '/dev/mapper/crypt-{}'.format(luks_uuid)
+
+    dm_crypt = '/dev/mapper/crypt-{}'.format(dev_uuid)
+
+    if use_vaultlocker and not os.path.exists(dm_crypt):
+        subprocess.check_call([
+            'vaultlocker',
+            'encrypt',
+            '--uuid', dev_uuid,
+            dev,
+        ])
+        subprocess.check_call([
+            'dd',
+            'if=/dev/zero',
+            'of={}'.format(dm_crypt),
+            'bs=512',
+            'count=1',
+        ])
+
+    if use_vaultlocker:
+        return dm_crypt
+    else:
+        return dev
+
+
+def _allocate_logical_volume(dev, lv_type, osd_fsid,
+                             size=None, shared=False,
+                             encrypt=False,
+                             key_manager=CEPH_KEY_MANAGER):
+    """
+    Allocate a logical volume from a block device, ensuring any
+    required initialization and setup of PV's and VG's to support
+    the LV.
+
+    :param: dev: path to block device to allocate from.
+    :param: lv_type: logical volume type to create
+                     (data, block, journal, wal, db)
+    :param: osd_fsid: UUID of the OSD associate with the LV
+    :param: size: Size in LVM format for the device;
+                  if unset 100% of VG
+    :param: shared: Shared volume group (journal, wal, db)
+    :param: encrypt: Encrypt OSD devices using dm-crypt
+    :param: key_manager: dm-crypt Key Manager to use
+    :raises subprocess.CalledProcessError: in the event that any supporting
+                                           LVM or parted operation fails.
+    :returns: str: String in the format 'vg_name/lv_name'.
+    """
+    lv_name = "osd-{}-{}".format(lv_type, osd_fsid)
+    current_volumes = lvm.list_logical_volumes()
+    if shared:
+        dev_uuid = str(uuid.uuid4())
+    else:
+        dev_uuid = osd_fsid
+    pv_dev = _initialize_disk(dev, dev_uuid, encrypt, key_manager)
+
+    vg_name = None
+    if not lvm.is_lvm_physical_volume(pv_dev):
+        lvm.create_lvm_physical_volume(pv_dev)
+        if not os.path.exists(pv_dev):
+            # NOTE: trigger rescan to work around bug 1878752
+            rescan_osd_devices()
+        if shared:
+            vg_name = 'ceph-{}-{}'.format(lv_type,
+                                          str(uuid.uuid4()))
+        else:
+            vg_name = 'ceph-{}'.format(osd_fsid)
+        lvm.create_lvm_volume_group(vg_name, pv_dev)
+    else:
+        vg_name = lvm.list_lvm_volume_group(pv_dev)
+
+    if lv_name not in current_volumes:
+        lvm.create_logical_volume(lv_name, vg_name, size)
+
+    return "{}/{}".format(vg_name, lv_name)
+
+
+def osdize_dir(path, encrypt=False, bluestore=False):
+    """Ask ceph-disk to prepare a directory to become an osd.
+
+    :param path: str. The directory to osdize
+    :param encrypt: bool. Should the OSD directory be encrypted at rest
+    :returns: None
+    """
+
+    db = kv()
+    osd_devices = db.get('osd-devices', [])
+    if path in osd_devices:
+        log('Device {} already processed by charm,'
+            ' skipping'.format(path))
+        return
+
+    for t in ['upstart', 'systemd']:
+        if os.path.exists(os.path.join(path, t)):
+            log('Path {} is already used as an OSD dir - bailing'.format(path))
+            return
+
+    if cmp_pkgrevno('ceph', "0.56.6") < 0:
+        log('Unable to use directories for OSDs with ceph < 0.56.6',
+            level=ERROR)
+        return
+
+    mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755)
+    chownr('/var/lib/ceph', ceph_user(), ceph_user())
+    cmd = [
+        'sudo', '-u', ceph_user(),
+        'ceph-disk',
+        'prepare',
+        '--data-dir',
+        path
+    ]
+    if cmp_pkgrevno('ceph', '0.60') >= 0:
+        if encrypt:
+            cmd.append('--dmcrypt')
+
+    # NOTE(icey): enable experimental bluestore support
+    if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore:
+        cmd.append('--bluestore')
+    elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore:
+        cmd.append('--filestore')
+    log("osdize dir cmd: {}".format(cmd))
+    subprocess.check_call(cmd)
+
+    # NOTE: Record processing of device only on success to ensure that
+    #       the charm only tries to initialize a device of OSD usage
+    #       once during its lifetime.
+    osd_devices.append(path)
+    db.set('osd-devices', osd_devices)
+    db.flush()
+
+
+def filesystem_mounted(fs):
+    return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
+
+
+def get_running_osds():
+    """Returns a list of the pids of the current running OSD daemons"""
+    cmd = ['pgrep', 'ceph-osd']
+    try:
+        result = str(subprocess.check_output(cmd).decode('UTF-8'))
+        return result.split()
+    except subprocess.CalledProcessError:
+        return []
+
+
+def get_cephfs(service):
+    """List the Ceph Filesystems that exist.
+
+    :param service: The service name to run the ceph command under
+    :returns: list. Returns a list of the ceph filesystems
+    """
+    if get_version() < 0.86:
+        # This command wasn't introduced until 0.86 ceph
+        return []
+    try:
+        output = str(subprocess
+                     .check_output(["ceph", '--id', service, "fs", "ls"])
+                     .decode('UTF-8'))
+        if not output:
+            return []
+        """
+        Example subprocess output:
+        'name: ip-172-31-23-165, metadata pool: ip-172-31-23-165_metadata,
+         data pools: [ip-172-31-23-165_data ]\n'
+        output: filesystems: ['ip-172-31-23-165']
+        """
+        filesystems = []
+        for line in output.splitlines():
+            parts = line.split(',')
+            for part in parts:
+                if "name" in part:
+                    filesystems.append(part.split(' ')[1])
+    except subprocess.CalledProcessError:
+        return []
+
+
+def wait_for_all_monitors_to_upgrade(new_version, upgrade_key):
+    """Fairly self explanatory name. This function will wait
+    for all monitors in the cluster to upgrade or it will
+    return after a timeout period has expired.
+
+    :param new_version: str of the version to watch
+    :param upgrade_key: the cephx key name to use
+    """
+    done = False
+    start_time = time.time()
+    monitor_list = []
+
+    mon_map = get_mon_map('admin')
+    if mon_map['monmap']['mons']:
+        for mon in mon_map['monmap']['mons']:
+            monitor_list.append(mon['name'])
+    while not done:
+        try:
+            done = all(monitor_key_exists(upgrade_key, "{}_{}_{}_done".format(
+                "mon", mon, new_version
+            )) for mon in monitor_list)
+            current_time = time.time()
+            if current_time > (start_time + 10 * 60):
+                raise Exception
+            else:
+                # Wait 30 seconds and test again if all monitors are upgraded
+                time.sleep(30)
+        except subprocess.CalledProcessError:
+            raise
+
+
+# Edge cases:
+# 1. Previous node dies on upgrade, can we retry?
+def roll_monitor_cluster(new_version, upgrade_key):
+    """This is tricky to get right so here's what we're going to do.
+
+    There's 2 possible cases: Either I'm first in line or not.
+    If I'm not first in line I'll wait a random time between 5-30 seconds
+    and test to see if the previous monitor is upgraded yet.
+
+    :param new_version: str of the version to upgrade to
+    :param upgrade_key: the cephx key name to use when upgrading
+    """
+    log('roll_monitor_cluster called with {}'.format(new_version))
+    my_name = socket.gethostname()
+    monitor_list = []
+    mon_map = get_mon_map('admin')
+    if mon_map['monmap']['mons']:
+        for mon in mon_map['monmap']['mons']:
+            monitor_list.append(mon['name'])
+    else:
+        status_set('blocked', 'Unable to get monitor cluster information')
+        sys.exit(1)
+    log('monitor_list: {}'.format(monitor_list))
+
+    # A sorted list of osd unit names
+    mon_sorted_list = sorted(monitor_list)
+
+    # Install packages immediately but defer restarts to when it's our time.
+    upgrade_monitor(new_version, restart_daemons=False)
+    try:
+        position = mon_sorted_list.index(my_name)
+        log("upgrade position: {}".format(position))
+        if position == 0:
+            # I'm first!  Roll
+            # First set a key to inform others I'm about to roll
+            lock_and_roll(upgrade_key=upgrade_key,
+                          service='mon',
+                          my_name=my_name,
+                          version=new_version)
+        else:
+            # Check if the previous node has finished
+            status_set('waiting',
+                       'Waiting on {} to finish upgrading'.format(
+                           mon_sorted_list[position - 1]))
+            wait_on_previous_node(upgrade_key=upgrade_key,
+                                  service='mon',
+                                  previous_node=mon_sorted_list[position - 1],
+                                  version=new_version)
+            lock_and_roll(upgrade_key=upgrade_key,
+                          service='mon',
+                          my_name=my_name,
+                          version=new_version)
+        # NOTE(jamespage):
+        # Wait until all monitors have upgraded before bootstrapping
+        # the ceph-mgr daemons due to use of new mgr keyring profiles
+        if new_version == 'luminous':
+            wait_for_all_monitors_to_upgrade(new_version=new_version,
+                                             upgrade_key=upgrade_key)
+            bootstrap_manager()
+    except ValueError:
+        log("Failed to find {} in list {}.".format(
+            my_name, mon_sorted_list))
+        status_set('blocked', 'failed to upgrade monitor')
+
+
+# For E731 we can't assign a lambda, therefore, instead pass this.
+def noop():
+    pass
+
+
+def upgrade_monitor(new_version, kick_function=None, restart_daemons=True):
+    """Upgrade the current ceph monitor to the new version
+
+    :param new_version: String version to upgrade to.
+    """
+    if kick_function is None:
+        kick_function = noop
+    current_version = get_version()
+    status_set("maintenance", "Upgrading monitor")
+    log("Current ceph version is {}".format(current_version))
+    log("Upgrading to: {}".format(new_version))
+
+    # Needed to determine if whether to stop/start ceph-mgr
+    luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0
+
+    kick_function()
+    try:
+        add_source(config('source'), config('key'))
+        apt_update(fatal=True)
+    except subprocess.CalledProcessError as err:
+        log("Adding the ceph source failed with message: {}".format(
+            err))
+        status_set("blocked", "Upgrade to {} failed".format(new_version))
+        sys.exit(1)
+    kick_function()
+
+    try:
+        apt_install(packages=determine_packages(), fatal=True)
+        rm_packages = determine_packages_to_remove()
+        if rm_packages:
+            apt_purge(packages=rm_packages, fatal=True)
+    except subprocess.CalledProcessError as err:
+        log("Upgrading packages failed "
+            "with message: {}".format(err))
+        status_set("blocked", "Upgrade to {} failed".format(new_version))
+        sys.exit(1)
+
+    if not restart_daemons:
+        log("Packages upgraded but not restarting daemons yet.")
+        return
+
+    try:
+        if systemd():
+            service_stop('ceph-mon')
+            log("restarting ceph-mgr.target maybe: {}"
+                .format(luminous_or_later))
+            if luminous_or_later:
+                service_stop('ceph-mgr.target')
+        else:
+            service_stop('ceph-mon-all')
+
+        kick_function()
+
+        owner = ceph_user()
+
+        # Ensure the files and directories under /var/lib/ceph is chowned
+        # properly as part of the move to the Jewel release, which moved the
+        # ceph daemons to running as ceph:ceph instead of root:root.
+        if new_version == 'jewel':
+            # Ensure the ownership of Ceph's directories is correct
+            chownr(path=os.path.join(os.sep, "var", "lib", "ceph"),
+                   owner=owner,
+                   group=owner,
+                   follow_links=True)
+
+        kick_function()
+
+        # Ensure that mon directory is user writable
+        hostname = socket.gethostname()
+        path = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
+        mkdir(path, owner=ceph_user(), group=ceph_user(),
+              perms=0o755)
+
+        if systemd():
+            service_restart('ceph-mon')
+            log("starting ceph-mgr.target maybe: {}".format(luminous_or_later))
+            if luminous_or_later:
+                # due to BUG: #1849874 we have to force a restart to get it to
+                # drop the previous version of ceph-manager and start the new
+                # one.
+                service_restart('ceph-mgr.target')
+        else:
+            service_start('ceph-mon-all')
+    except subprocess.CalledProcessError as err:
+        log("Stopping ceph and upgrading packages failed "
+            "with message: {}".format(err))
+        status_set("blocked", "Upgrade to {} failed".format(new_version))
+        sys.exit(1)
+
+
+def lock_and_roll(upgrade_key, service, my_name, version):
+    """Create a lock on the ceph monitor cluster and upgrade.
+
+    :param upgrade_key: str. The cephx key to use
+    :param service: str. The cephx id to use
+    :param my_name: str. The current hostname
+    :param version: str. The version we are upgrading to
+    """
+    start_timestamp = time.time()
+
+    log('monitor_key_set {}_{}_{}_start {}'.format(
+        service,
+        my_name,
+        version,
+        start_timestamp))
+    monitor_key_set(upgrade_key, "{}_{}_{}_start".format(
+        service, my_name, version), start_timestamp)
+
+    # alive indication:
+    alive_function = (
+        lambda: monitor_key_set(
+            upgrade_key, "{}_{}_{}_alive"
+            .format(service, my_name, version), time.time()))
+    dog = WatchDog(kick_interval=3 * 60,
+                   kick_function=alive_function)
+
+    log("Rolling")
+
+    # This should be quick
+    if service == 'osd':
+        upgrade_osd(version, kick_function=dog.kick_the_dog)
+    elif service == 'mon':
+        upgrade_monitor(version, kick_function=dog.kick_the_dog)
+    else:
+        log("Unknown service {}. Unable to upgrade".format(service),
+            level=ERROR)
+    log("Done")
+
+    stop_timestamp = time.time()
+    # Set a key to inform others I am finished
+    log('monitor_key_set {}_{}_{}_done {}'.format(service,
+                                                  my_name,
+                                                  version,
+                                                  stop_timestamp))
+    status_set('maintenance', 'Finishing upgrade')
+    monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service,
+                                                        my_name,
+                                                        version),
+                    stop_timestamp)
+
+
+def wait_on_previous_node(upgrade_key, service, previous_node, version):
+    """A lock that sleeps the current thread while waiting for the previous
+    node to finish upgrading.
+
+    :param upgrade_key:
+    :param service: str. the cephx id to use
+    :param previous_node: str. The name of the previous node to wait on
+    :param version: str. The version we are upgrading to
+    :returns: None
+    """
+    log("Previous node is: {}".format(previous_node))
+
+    previous_node_started_f = (
+        lambda: monitor_key_exists(
+            upgrade_key,
+            "{}_{}_{}_start".format(service, previous_node, version)))
+    previous_node_finished_f = (
+        lambda: monitor_key_exists(
+            upgrade_key,
+            "{}_{}_{}_done".format(service, previous_node, version)))
+    previous_node_alive_time_f = (
+        lambda: monitor_key_get(
+            upgrade_key,
+            "{}_{}_{}_alive".format(service, previous_node, version)))
+
+    # wait for 30 minutes until the previous node starts.  We don't proceed
+    # unless we get a start condition.
+    try:
+        WatchDog.wait_until(previous_node_started_f, timeout=30 * 60)
+    except WatchDog.WatchDogTimeoutException:
+        log("Waited for previous node to start for 30 minutes. "
+            "It didn't start, so may have a serious issue. Continuing with "
+            "upgrade of this node.",
+            level=WARNING)
+        return
+
+    # keep the time it started from this nodes' perspective.
+    previous_node_started_at = time.time()
+    log("Detected that previous node {} has started.  Time now: {}"
+        .format(previous_node, previous_node_started_at))
+
+    # Now wait for the node to complete.  The node may optionally be kicking
+    # with the *_alive key, which allows this node to wait longer as it 'knows'
+    # the other node is proceeding.
+    try:
+        WatchDog.timed_wait(kicked_at_function=previous_node_alive_time_f,
+                            complete_function=previous_node_finished_f,
+                            wait_time=30 * 60,
+                            compatibility_wait_time=10 * 60,
+                            max_kick_interval=5 * 60)
+    except WatchDog.WatchDogDeadException:
+        # previous node was kicking, but timed out; log this condition and move
+        # on.
+        now = time.time()
+        waited = int((now - previous_node_started_at) / 60)
+        log("Previous node started, but has now not ticked for 5 minutes. "
+            "Waited total of {} mins on node {}. current time: {} > "
+            "previous node start time: {}. "
+            "Continuing with upgrade of this node."
+            .format(waited, previous_node, now, previous_node_started_at),
+            level=WARNING)
+    except WatchDog.WatchDogTimeoutException:
+        # previous node never kicked, or simply took too long; log this
+        # condition and move on.
+        now = time.time()
+        waited = int((now - previous_node_started_at) / 60)
+        log("Previous node is taking too long; assuming it has died."
+            "Waited {} mins on node {}. current time: {} > "
+            "previous node start time: {}. "
+            "Continuing with upgrade of this node."
+            .format(waited, previous_node, now, previous_node_started_at),
+            level=WARNING)
+
+
+class WatchDog(object):
+    """Watch a dog; basically a kickable timer with a timeout between two async
+    units.
+
+    The idea is that you have an overall timeout and then can kick that timeout
+    with intermediary hits, with a max time between those kicks allowed.
+
+    Note that this watchdog doesn't rely on the clock of the other side; just
+    roughly when it detects when the other side started.  All timings are based
+    on the local clock.
+
+    The kicker will not 'kick' more often than a set interval, regardless of
+    how often the kick_the_dog() function is called.  The kicker provides a
+    function (lambda: -> None) that is called when the kick interval is
+    reached.
+
+    The waiter calls the static method with a check function
+    (lambda: -> Boolean) that indicates when the wait should be over and the
+    maximum interval to wait.  e.g. 30 minutes with a 5 minute kick interval.
+
+    So the waiter calls wait(f, 30, 3) and the kicker sets up a 3 minute kick
+    interval, or however long it is expected for the key to propagate and to
+    allow for other delays.
+
+    There is a compatibility mode where if the otherside never kicks, then it
+    simply waits for the compatability timer.
+    """
+
+    class WatchDogDeadException(Exception):
+        pass
+
+    class WatchDogTimeoutException(Exception):
+        pass
+
+    def __init__(self, kick_interval=3 * 60, kick_function=None):
+        """Initialise a new WatchDog
+
+        :param kick_interval: the interval when this side kicks the other in
+            seconds.
+        :type kick_interval: Int
+        :param kick_function: The function to call that does the kick.
+        :type kick_function: Callable[]
+        """
+        self.start_time = time.time()
+        self.last_run_func = None
+        self.last_kick_at = None
+        self.kick_interval = kick_interval
+        self.kick_f = kick_function
+
+    def kick_the_dog(self):
+        """Might call the kick_function if it's time.
+
+        This function can be called as frequently as needed, but will run the
+        self.kick_function after kick_interval seconds have passed.
+        """
+        now = time.time()
+        if (self.last_run_func is None or
+                (now - self.last_run_func > self.kick_interval)):
+            if self.kick_f is not None:
+                self.kick_f()
+            self.last_run_func = now
+        self.last_kick_at = now
+
+    @staticmethod
+    def wait_until(wait_f, timeout=10 * 60):
+        """Wait for timeout seconds until the passed function return True.
+
+        :param wait_f: The function to call that will end the wait.
+        :type wait_f: Callable[[], Boolean]
+        :param timeout: The time to wait in seconds.
+        :type timeout: int
+        """
+        start_time = time.time()
+        while(not wait_f()):
+            now = time.time()
+            if now > start_time + timeout:
+                raise WatchDog.WatchDogTimeoutException()
+            wait_time = random.randrange(5, 30)
+            log('wait_until: waiting for {} seconds'.format(wait_time))
+            time.sleep(wait_time)
+
+    @staticmethod
+    def timed_wait(kicked_at_function,
+                   complete_function,
+                   wait_time=30 * 60,
+                   compatibility_wait_time=10 * 60,
+                   max_kick_interval=5 * 60):
+        """Wait a maximum time with an intermediate 'kick' time.
+
+        This function will wait for max_kick_interval seconds unless the
+        kicked_at_function() call returns a time that is not older that
+        max_kick_interval (in seconds).  i.e. the other side can signal that it
+        is still doing things during the max_kick_interval as long as it kicks
+        at least every max_kick_interval seconds.
+
+        The maximum wait is "wait_time", but the otherside must keep kicking
+        during this period.
+
+        The "compatibility_wait_time" is used if the other side never kicks
+        (i.e. the kicked_at_function() always returns None.  In this case the
+        function wait up to "compatibility_wait_time".
+
+        Note that the type of the return from the kicked_at_function is an
+        Optional[str], not a Float.  The function will coerce this to a float
+        for the comparison.  This represents the return value of
+        time.time() at the "other side".  It's a string to simplify the
+        function obtaining the time value from the other side.
+
+        The function raises WatchDogTimeoutException if either the
+        compatibility_wait_time or the wait_time are exceeded.
+
+        The function raises WatchDogDeadException if the max_kick_interval is
+        exceeded.
+
+        Note that it is possible that the first kick interval is extended to
+        compatibility_wait_time if the "other side" doesn't kick immediately.
+        The best solution is for the other side to kick early and often.
+
+        :param kicked_at_function: The function to call to retrieve the time
+            that the other side 'kicked' at.  None if the other side hasn't
+            kicked.
+        :type kicked_at_function: Callable[[], Optional[str]]
+        :param complete_function: The callable that returns True when done.
+        :type complete_function: Callable[[], Boolean]
+        :param wait_time: the maximum time to wait, even with kicks, in
+            seconds.
+        :type wait_time: int
+        :param compatibility_wait_time: The time to wait if no kicks are
+            received, in seconds.
+        :type compatibility_wait_time: int
+        :param max_kick_interval: The maximum time allowed between kicks before
+            the wait is over, in seconds:
+        :type max_kick_interval: int
+        :raises: WatchDog.WatchDogTimeoutException,
+                 WatchDog.WatchDogDeadException
+        """
+        start_time = time.time()
+        while True:
+            if complete_function():
+                break
+            # the time when the waiting for unit last kicked.
+            kicked_at = kicked_at_function()
+            now = time.time()
+            if kicked_at is None:
+                # assume other end doesn't do alive kicks
+                if (now - start_time > compatibility_wait_time):
+                    raise WatchDog.WatchDogTimeoutException()
+            else:
+                # other side is participating in kicks; must kick at least
+                # every 'max_kick_interval' to stay alive.
+                if (now - float(kicked_at) > max_kick_interval):
+                    raise WatchDog.WatchDogDeadException()
+            if (now - start_time > wait_time):
+                raise WatchDog.WatchDogTimeoutException()
+            delay_time = random.randrange(5, 30)
+            log('waiting for {} seconds'.format(delay_time))
+            time.sleep(delay_time)
+
+
+def get_upgrade_position(osd_sorted_list, match_name):
+    """Return the upgrade position for the given osd.
+
+    :param osd_sorted_list: Osds sorted
+    :type osd_sorted_list: [str]
+    :param match_name: The osd name to match
+    :type match_name: str
+    :returns: The position of the name
+    :rtype: int
+    :raises: ValueError if name is not found
+    """
+    for index, item in enumerate(osd_sorted_list):
+        if item.name == match_name:
+            return index
+    raise ValueError("osd name '{}' not found in get_upgrade_position list"
+                     .format(match_name))
+
+
+# Edge cases:
+# 1. Previous node dies on upgrade, can we retry?
+# 2. This assumes that the osd failure domain is not set to osd.
+#    It rolls an entire server at a time.
+def roll_osd_cluster(new_version, upgrade_key):
+    """This is tricky to get right so here's what we're going to do.
+
+    There's 2 possible cases: Either I'm first in line or not.
+    If I'm not first in line I'll wait a random time between 5-30 seconds
+    and test to see if the previous osd is upgraded yet.
+
+    TODO: If you're not in the same failure domain it's safe to upgrade
+     1. Examine all pools and adopt the most strict failure domain policy
+        Example: Pool 1: Failure domain = rack
+        Pool 2: Failure domain = host
+        Pool 3: Failure domain = row
+
+        outcome: Failure domain = host
+
+    :param new_version: str of the version to upgrade to
+    :param upgrade_key: the cephx key name to use when upgrading
+    """
+    log('roll_osd_cluster called with {}'.format(new_version))
+    my_name = socket.gethostname()
+    osd_tree = get_osd_tree(service=upgrade_key)
+    # A sorted list of osd unit names
+    osd_sorted_list = sorted(osd_tree)
+    log("osd_sorted_list: {}".format(osd_sorted_list))
+
+    try:
+        position = get_upgrade_position(osd_sorted_list, my_name)
+        log("upgrade position: {}".format(position))
+        if position == 0:
+            # I'm first!  Roll
+            # First set a key to inform others I'm about to roll
+            lock_and_roll(upgrade_key=upgrade_key,
+                          service='osd',
+                          my_name=my_name,
+                          version=new_version)
+        else:
+            # Check if the previous node has finished
+            status_set('waiting',
+                       'Waiting on {} to finish upgrading'.format(
+                           osd_sorted_list[position - 1].name))
+            wait_on_previous_node(
+                upgrade_key=upgrade_key,
+                service='osd',
+                previous_node=osd_sorted_list[position - 1].name,
+                version=new_version)
+            lock_and_roll(upgrade_key=upgrade_key,
+                          service='osd',
+                          my_name=my_name,
+                          version=new_version)
+    except ValueError:
+        log("Failed to find name {} in list {}".format(
+            my_name, osd_sorted_list))
+        status_set('blocked', 'failed to upgrade osd')
+
+
+def upgrade_osd(new_version, kick_function=None):
+    """Upgrades the current osd
+
+    :param new_version: str. The new version to upgrade to
+    """
+    if kick_function is None:
+        kick_function = noop
+
+    current_version = get_version()
+    status_set("maintenance", "Upgrading osd")
+    log("Current ceph version is {}".format(current_version))
+    log("Upgrading to: {}".format(new_version))
+
+    try:
+        add_source(config('source'), config('key'))
+        apt_update(fatal=True)
+    except subprocess.CalledProcessError as err:
+        log("Adding the ceph sources failed with message: {}".format(
+            err))
+        status_set("blocked", "Upgrade to {} failed".format(new_version))
+        sys.exit(1)
+
+    kick_function()
+
+    try:
+        # Upgrade the packages before restarting the daemons.
+        status_set('maintenance', 'Upgrading packages to %s' % new_version)
+        apt_install(packages=determine_packages(), fatal=True)
+        kick_function()
+
+        # If the upgrade does not need an ownership update of any of the
+        # directories in the osd service directory, then simply restart
+        # all of the OSDs at the same time as this will be the fastest
+        # way to update the code on the node.
+        if not dirs_need_ownership_update('osd'):
+            log('Restarting all OSDs to load new binaries', DEBUG)
+            with maintain_all_osd_states():
+                if systemd():
+                    service_restart('ceph-osd.target')
+                else:
+                    service_restart('ceph-osd-all')
+            return
+
+        # Need to change the ownership of all directories which are not OSD
+        # directories as well.
+        # TODO - this should probably be moved to the general upgrade function
+        #        and done before mon/osd.
+        update_owner(CEPH_BASE_DIR, recurse_dirs=False)
+        non_osd_dirs = filter(lambda x: not x == 'osd',
+                              os.listdir(CEPH_BASE_DIR))
+        non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x),
+                           non_osd_dirs)
+        for i, path in enumerate(non_osd_dirs):
+            if i % 100 == 0:
+                kick_function()
+            update_owner(path)
+
+        # Fast service restart wasn't an option because each of the OSD
+        # directories need the ownership updated for all the files on
+        # the OSD. Walk through the OSDs one-by-one upgrading the OSD.
+        for osd_dir in _get_child_dirs(OSD_BASE_DIR):
+            kick_function()
+            try:
+                osd_num = _get_osd_num_from_dirname(osd_dir)
+                _upgrade_single_osd(osd_num, osd_dir)
+            except ValueError as ex:
+                # Directory could not be parsed - junk directory?
+                log('Could not parse osd directory %s: %s' % (osd_dir, ex),
+                    WARNING)
+                continue
+
+    except (subprocess.CalledProcessError, IOError) as err:
+        log("Stopping ceph and upgrading packages failed "
+            "with message: {}".format(err))
+        status_set("blocked", "Upgrade to {} failed".format(new_version))
+        sys.exit(1)
+
+
+def _upgrade_single_osd(osd_num, osd_dir):
+    """Upgrades the single OSD directory.
+
+    :param osd_num: the num of the OSD
+    :param osd_dir: the directory of the OSD to upgrade
+    :raises CalledProcessError: if an error occurs in a command issued as part
+                                of the upgrade process
+    :raises IOError: if an error occurs reading/writing to a file as part
+                     of the upgrade process
+    """
+    with maintain_osd_state(osd_num):
+        stop_osd(osd_num)
+        disable_osd(osd_num)
+        update_owner(osd_dir)
+        enable_osd(osd_num)
+        start_osd(osd_num)
+
+
+def stop_osd(osd_num):
+    """Stops the specified OSD number.
+
+    :param osd_num: the osd number to stop
+    """
+    if systemd():
+        service_stop('ceph-osd@{}'.format(osd_num))
+    else:
+        service_stop('ceph-osd', id=osd_num)
+
+
+def start_osd(osd_num):
+    """Starts the specified OSD number.
+
+    :param osd_num: the osd number to start.
+    """
+    if systemd():
+        service_start('ceph-osd@{}'.format(osd_num))
+    else:
+        service_start('ceph-osd', id=osd_num)
+
+
+def disable_osd(osd_num):
+    """Disables the specified OSD number.
+
+    Ensures that the specified osd will not be automatically started at the
+    next reboot of the system. Due to differences between init systems,
+    this method cannot make any guarantees that the specified osd cannot be
+    started manually.
+
+    :param osd_num: the osd id which should be disabled.
+    :raises CalledProcessError: if an error occurs invoking the systemd cmd
+                                to disable the OSD
+    :raises IOError, OSError: if the attempt to read/remove the ready file in
+                              an upstart enabled system fails
+    """
+    if systemd():
+        # When running under systemd, the individual ceph-osd daemons run as
+        # templated units and can be directly addressed by referring to the
+        # templated service name ceph-osd@<osd_num>. Additionally, systemd
+        # allows one to disable a specific templated unit by running the
+        # 'systemctl disable ceph-osd@<osd_num>' command. When disabled, the
+        # OSD should remain disabled until re-enabled via systemd.
+        # Note: disabling an already disabled service in systemd returns 0, so
+        # no need to check whether it is enabled or not.
+        cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)]
+        subprocess.check_call(cmd)
+    else:
+        # Neither upstart nor the ceph-osd upstart script provides for
+        # disabling the starting of an OSD automatically. The specific OSD
+        # cannot be prevented from running manually, however it can be
+        # prevented from running automatically on reboot by removing the
+        # 'ready' file in the OSD's root directory. This is due to the
+        # ceph-osd-all upstart script checking for the presence of this file
+        # before starting the OSD.
+        ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num),
+                                  'ready')
+        if os.path.exists(ready_file):
+            os.unlink(ready_file)
+
+
+def enable_osd(osd_num):
+    """Enables the specified OSD number.
+
+    Ensures that the specified osd_num will be enabled and ready to start
+    automatically in the event of a reboot.
+
+    :param osd_num: the osd id which should be enabled.
+    :raises CalledProcessError: if the call to the systemd command issued
+                                fails when enabling the service
+    :raises IOError: if the attempt to write the ready file in an usptart
+                     enabled system fails
+    """
+    if systemd():
+        cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)]
+        subprocess.check_call(cmd)
+    else:
+        # When running on upstart, the OSDs are started via the ceph-osd-all
+        # upstart script which will only start the osd if it has a 'ready'
+        # file. Make sure that file exists.
+        ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num),
+                                  'ready')
+        with open(ready_file, 'w') as f:
+            f.write('ready')
+
+        # Make sure the correct user owns the file. It shouldn't be necessary
+        # as the upstart script should run with root privileges, but its better
+        # to have all the files matching ownership.
+        update_owner(ready_file)
+
+
+def update_owner(path, recurse_dirs=True):
+    """Changes the ownership of the specified path.
+
+    Changes the ownership of the specified path to the new ceph daemon user
+    using the system's native chown functionality. This may take awhile,
+    so this method will issue a set_status for any changes of ownership which
+    recurses into directory structures.
+
+    :param path: the path to recursively change ownership for
+    :param recurse_dirs: boolean indicating whether to recursively change the
+                         ownership of all the files in a path's subtree or to
+                         simply change the ownership of the path.
+    :raises CalledProcessError: if an error occurs issuing the chown system
+                                command
+    """
+    user = ceph_user()
+    user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user)
+    cmd = ['chown', user_group, path]
+    if os.path.isdir(path) and recurse_dirs:
+        status_set('maintenance', ('Updating ownership of %s to %s' %
+                                   (path, user)))
+        cmd.insert(1, '-R')
+
+    log('Changing ownership of {path} to {user}'.format(
+        path=path, user=user_group), DEBUG)
+    start = datetime.now()
+    subprocess.check_call(cmd)
+    elapsed_time = (datetime.now() - start)
+
+    log('Took {secs} seconds to change the ownership of path: {path}'.format(
+        secs=elapsed_time.total_seconds(), path=path), DEBUG)
+
+
+def get_osd_state(osd_num, osd_goal_state=None):
+    """Get OSD state or loop until OSD state matches OSD goal state.
+
+    If osd_goal_state is None, just return the current OSD state.
+    If osd_goal_state is not None, loop until the current OSD state matches
+    the OSD goal state.
+
+    :param osd_num: the osd id to get state for
+    :param osd_goal_state: (Optional) string indicating state to wait for
+                           Defaults to None
+    :returns: Returns a str, the OSD state.
+    :rtype: str
+    """
+    while True:
+        asok = "/var/run/ceph/ceph-osd.{}.asok".format(osd_num)
+        cmd = [
+            'ceph',
+            'daemon',
+            asok,
+            'status'
+        ]
+        try:
+            result = json.loads(str(subprocess
+                                    .check_output(cmd)
+                                    .decode('UTF-8')))
+        except (subprocess.CalledProcessError, ValueError) as e:
+            log("{}".format(e), level=DEBUG)
+            continue
+        osd_state = result['state']
+        log("OSD {} state: {}, goal state: {}".format(
+            osd_num, osd_state, osd_goal_state), level=DEBUG)
+        if not osd_goal_state:
+            return osd_state
+        if osd_state == osd_goal_state:
+            return osd_state
+        time.sleep(3)
+
+
+def get_all_osd_states(osd_goal_states=None):
+    """Get all OSD states or loop until all OSD states match OSD goal states.
+
+    If osd_goal_states is None, just return a dictionary of current OSD states.
+    If osd_goal_states is not None, loop until the current OSD states match
+    the OSD goal states.
+
+    :param osd_goal_states: (Optional) dict indicating states to wait for
+                            Defaults to None
+    :returns: Returns a dictionary of current OSD states.
+    :rtype: dict
+    """
+    osd_states = {}
+    for osd_num in get_local_osd_ids():
+        if not osd_goal_states:
+            osd_states[osd_num] = get_osd_state(osd_num)
+        else:
+            osd_states[osd_num] = get_osd_state(
+                osd_num,
+                osd_goal_state=osd_goal_states[osd_num])
+    return osd_states
+
+
+@contextmanager
+def maintain_osd_state(osd_num):
+    """Ensure the state of an OSD is maintained.
+
+    Ensures the state of an OSD is the same at the end of a block nested
+    in a with statement as it was at the beginning of the block.
+
+    :param osd_num: the osd id to maintain state for
+    """
+    osd_state = get_osd_state(osd_num)
+    try:
+        yield
+    finally:
+        get_osd_state(osd_num, osd_goal_state=osd_state)
+
+
+@contextmanager
+def maintain_all_osd_states():
+    """Ensure all local OSD states are maintained.
+
+    Ensures the states of all local OSDs are the same at the end of a
+    block nested in a with statement as they were at the beginning of
+    the block.
+    """
+    osd_states = get_all_osd_states()
+    try:
+        yield
+    finally:
+        get_all_osd_states(osd_goal_states=osd_states)
+
+
+def list_pools(client='admin'):
+    """This will list the current pools that Ceph has
+
+    :param client: (Optional) client id for ceph key to use
+                   Defaults to ``admin``
+    :type cilent: str
+    :returns: Returns a list of available pools.
+    :rtype: list
+    :raises: subprocess.CalledProcessError if the subprocess fails to run.
+    """
+    try:
+        pool_list = []
+        pools = subprocess.check_output(['rados', '--id', client, 'lspools'],
+                                        universal_newlines=True,
+                                        stderr=subprocess.STDOUT)
+        for pool in pools.splitlines():
+            pool_list.append(pool)
+        return pool_list
+    except subprocess.CalledProcessError as err:
+        log("rados lspools failed with error: {}".format(err.output))
+        raise
+
+
+def get_pool_param(pool, param, client='admin'):
+    """Get parameter from pool.
+
+    :param pool: Name of pool to get variable from
+    :type pool: str
+    :param param: Name of variable to get
+    :type param: str
+    :param client: (Optional) client id for ceph key to use
+                   Defaults to ``admin``
+    :type cilent: str
+    :returns: Value of variable on pool or None
+    :rtype: str or None
+    :raises: subprocess.CalledProcessError
+    """
+    try:
+        output = subprocess.check_output(
+            ['ceph', '--id', client, 'osd', 'pool', 'get', pool, param],
+            universal_newlines=True, stderr=subprocess.STDOUT)
+    except subprocess.CalledProcessError as cp:
+        if cp.returncode == 2 and 'ENOENT: option' in cp.output:
+            return None
+        raise
+    if ':' in output:
+        return output.split(':')[1].lstrip().rstrip()
+
+
+def get_pool_erasure_profile(pool, client='admin'):
+    """Get erasure code profile for pool.
+
+    :param pool: Name of pool to get variable from
+    :type pool: str
+    :param client: (Optional) client id for ceph key to use
+                   Defaults to ``admin``
+    :type cilent: str
+    :returns: Erasure code profile of pool or None
+    :rtype: str or None
+    :raises: subprocess.CalledProcessError
+    """
+    try:
+        return get_pool_param(pool, 'erasure_code_profile', client=client)
+    except subprocess.CalledProcessError as cp:
+        if cp.returncode == 13 and 'EACCES: pool' in cp.output:
+            # Not a Erasure coded pool
+            return None
+        raise
+
+
+def get_pool_quota(pool, client='admin'):
+    """Get pool quota.
+
+    :param pool: Name of pool to get variable from
+    :type pool: str
+    :param client: (Optional) client id for ceph key to use
+                   Defaults to ``admin``
+    :type cilent: str
+    :returns: Dictionary with quota variables
+    :rtype: dict
+    :raises: subprocess.CalledProcessError
+    """
+    output = subprocess.check_output(
+        ['ceph', '--id', client, 'osd', 'pool', 'get-quota', pool],
+        universal_newlines=True, stderr=subprocess.STDOUT)
+    rc = re.compile(r'\s+max\s+(\S+)\s*:\s+(\d+)')
+    result = {}
+    for line in output.splitlines():
+        m = rc.match(line)
+        if m:
+            result.update({'max_{}'.format(m.group(1)): m.group(2)})
+    return result
+
+
+def get_pool_applications(pool='', client='admin'):
+    """Get pool applications.
+
+    :param pool: (Optional) Name of pool to get applications for
+                 Defaults to get for all pools
+    :type pool: str
+    :param client: (Optional) client id for ceph key to use
+                   Defaults to ``admin``
+    :type cilent: str
+    :returns: Dictionary with pool name as key
+    :rtype: dict
+    :raises: subprocess.CalledProcessError
+    """
+
+    cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'get']
+    if pool:
+        cmd.append(pool)
+    try:
+        output = subprocess.check_output(cmd,
+                                         universal_newlines=True,
+                                         stderr=subprocess.STDOUT)
+    except subprocess.CalledProcessError as cp:
+        if cp.returncode == 2 and 'ENOENT' in cp.output:
+            return {}
+        raise
+    return json.loads(output)
+
+
+def list_pools_detail():
+    """Get detailed information about pools.
+
+    Structure:
+    {'pool_name_1': {'applications': {'application': {}},
+                     'parameters': {'pg_num': '42', 'size': '42'},
+                     'quota': {'max_bytes': '1000',
+                               'max_objects': '10'},
+                     },
+     'pool_name_2': ...
+     }
+
+    :returns: Dictionary with detailed pool information.
+    :rtype: dict
+    :raises: subproces.CalledProcessError
+    """
+    get_params = ['pg_num', 'size']
+    result = {}
+    applications = get_pool_applications()
+    for pool in list_pools():
+        result[pool] = {
+            'applications': applications.get(pool, {}),
+            'parameters': {},
+            'quota': get_pool_quota(pool),
+        }
+        for param in get_params:
+            result[pool]['parameters'].update({
+                param: get_pool_param(pool, param)})
+        erasure_profile = get_pool_erasure_profile(pool)
+        if erasure_profile:
+            result[pool]['parameters'].update({
+                'erasure_code_profile': erasure_profile})
+    return result
+
+
+def dirs_need_ownership_update(service):
+    """Determines if directories still need change of ownership.
+
+    Examines the set of directories under the /var/lib/ceph/{service} directory
+    and determines if they have the correct ownership or not. This is
+    necessary due to the upgrade from Hammer to Jewel where the daemon user
+    changes from root: to ceph:.
+
+    :param service: the name of the service folder to check (e.g. osd, mon)
+    :returns: boolean. True if the directories need a change of ownership,
+             False otherwise.
+    :raises IOError: if an error occurs reading the file stats from one of
+                     the child directories.
+    :raises OSError: if the specified path does not exist or some other error
+    """
+    expected_owner = expected_group = ceph_user()
+    path = os.path.join(CEPH_BASE_DIR, service)
+    for child in _get_child_dirs(path):
+        curr_owner, curr_group = owner(child)
+
+        if (curr_owner == expected_owner) and (curr_group == expected_group):
+            continue
+
+        # NOTE(lathiat): when config_changed runs on reboot, the OSD might not
+        # yet be mounted or started, and the underlying directory the OSD is
+        # mounted to is expected to be owned by root. So skip the check. This
+        # may also happen for OSD directories for OSDs that were removed.
+        if (service == 'osd' and
+                not os.path.exists(os.path.join(child, 'magic'))):
+            continue
+
+        log('Directory "%s" needs its ownership updated' % child, DEBUG)
+        return True
+
+    # All child directories had the expected ownership
+    return False
+
+
+# A dict of valid ceph upgrade paths. Mapping is old -> new
+UPGRADE_PATHS = collections.OrderedDict([
+    ('firefly', 'hammer'),
+    ('hammer', 'jewel'),
+    ('jewel', 'luminous'),
+    ('luminous', 'mimic'),
+    ('mimic', 'nautilus'),
+    ('nautilus', 'octopus'),
+])
+
+# Map UCA codenames to ceph codenames
+UCA_CODENAME_MAP = {
+    'icehouse': 'firefly',
+    'juno': 'firefly',
+    'kilo': 'hammer',
+    'liberty': 'hammer',
+    'mitaka': 'jewel',
+    'newton': 'jewel',
+    'ocata': 'jewel',
+    'pike': 'luminous',
+    'queens': 'luminous',
+    'rocky': 'mimic',
+    'stein': 'mimic',
+    'train': 'nautilus',
+    'ussuri': 'octopus',
+}
+
+
+def pretty_print_upgrade_paths():
+    """Pretty print supported upgrade paths for ceph"""
+    return ["{} -> {}".format(key, value)
+            for key, value in UPGRADE_PATHS.items()]
+
+
+def resolve_ceph_version(source):
+    """Resolves a version of ceph based on source configuration
+    based on Ubuntu Cloud Archive pockets.
+
+    @param: source: source configuration option of charm
+    :returns: ceph release codename or None if not resolvable
+    """
+    os_release = get_os_codename_install_source(source)
+    return UCA_CODENAME_MAP.get(os_release)
+
+
+def get_ceph_pg_stat():
+    """Returns the result of ceph pg stat.
+
+    :returns: dict
+    """
+    try:
+        tree = str(subprocess
+                   .check_output(['ceph', 'pg', 'stat', '--format=json'])
+                   .decode('UTF-8'))
+        try:
+            json_tree = json.loads(tree)
+            if not json_tree['num_pg_by_state']:
+                return None
+            return json_tree
+        except ValueError as v:
+            log("Unable to parse ceph pg stat json: {}. Error: {}".format(
+                tree, v))
+            raise
+    except subprocess.CalledProcessError as e:
+        log("ceph pg stat command failed with message: {}".format(e))
+        raise
+
+
+def get_ceph_health():
+    """Returns the health of the cluster from a 'ceph status'
+
+    :returns: dict tree of ceph status
+    :raises: CalledProcessError if our ceph command fails to get the overall
+             status, use get_ceph_health()['overall_status'].
+    """
+    try:
+        tree = str(subprocess
+                   .check_output(['ceph', 'status', '--format=json'])
+                   .decode('UTF-8'))
+        try:
+            json_tree = json.loads(tree)
+            # Make sure children are present in the json
+            if not json_tree['overall_status']:
+                return None
+
+            return json_tree
+        except ValueError as v:
+            log("Unable to parse ceph tree json: {}. Error: {}".format(
+                tree, v))
+            raise
+    except subprocess.CalledProcessError as e:
+        log("ceph status command failed with message: {}".format(e))
+        raise
+
+
+def reweight_osd(osd_num, new_weight):
+    """Changes the crush weight of an OSD to the value specified.
+
+    :param osd_num: the osd id which should be changed
+    :param new_weight: the new weight for the OSD
+    :returns: bool. True if output looks right, else false.
+    :raises CalledProcessError: if an error occurs invoking the systemd cmd
+    """
+    try:
+        cmd_result = str(subprocess
+                         .check_output(['ceph', 'osd', 'crush',
+                                        'reweight', "osd.{}".format(osd_num),
+                                        new_weight],
+                                       stderr=subprocess.STDOUT)
+                         .decode('UTF-8'))
+        expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format(
+                          ID=osd_num) + " to {}".format(new_weight)
+        log(cmd_result)
+        if expected_result in cmd_result:
+            return True
+        return False
+    except subprocess.CalledProcessError as e:
+        log("ceph osd crush reweight command failed"
+            " with message: {}".format(e))
+        raise
+
+
+def determine_packages():
+    """Determines packages for installation.
+
+    :returns: list of ceph packages
+    """
+    packages = PACKAGES.copy()
+    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan':
+        btrfs_package = 'btrfs-progs'
+    else:
+        btrfs_package = 'btrfs-tools'
+    packages.append(btrfs_package)
+    return packages
+
+
+def determine_packages_to_remove():
+    """Determines packages for removal
+
+    :returns: list of packages to be removed
+    """
+    rm_packages = REMOVE_PACKAGES.copy()
+    if is_container():
+        install_list = filter_missing_packages(CHRONY_PACKAGE)
+        if not install_list:
+            rm_packages.append(CHRONY_PACKAGE)
+    return rm_packages
+
+
+def bootstrap_manager():
+    hostname = socket.gethostname()
+    path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname)
+    keyring = os.path.join(path, 'keyring')
+
+    if os.path.exists(keyring):
+        log('bootstrap_manager: mgr already initialized.')
+    else:
+        mkdir(path, owner=ceph_user(), group=ceph_user())
+        subprocess.check_call(['ceph', 'auth', 'get-or-create',
+                               'mgr.{}'.format(hostname), 'mon',
+                               'allow profile mgr', 'osd', 'allow *',
+                               'mds', 'allow *', '--out-file',
+                               keyring])
+        chownr(path, ceph_user(), ceph_user())
+
+        unit = 'ceph-mgr@{}'.format(hostname)
+        subprocess.check_call(['systemctl', 'enable', unit])
+        service_restart(unit)
+
+
+def osd_noout(enable):
+    """Sets or unsets 'noout'
+
+    :param enable: bool. True to set noout, False to unset.
+    :returns: bool. True if output looks right.
+    :raises CalledProcessError: if an error occurs invoking the systemd cmd
+    """
+    operation = {
+        True: 'set',
+        False: 'unset',
+    }
+    try:
+        subprocess.check_call(['ceph', '--id', 'admin',
+                               'osd', operation[enable],
+                               'noout'])
+        log('running ceph osd {} noout'.format(operation[enable]))
+        return True
+    except subprocess.CalledProcessError as e:
+        log(e)
+        raise
+
+
+class OSDConfigSetError(Exception):
+    """Error occured applying OSD settings."""
+    pass
+
+
+def apply_osd_settings(settings):
+    """Applies the provided osd settings
+
+    Apply the provided settings to all local OSD unless settings are already
+    present. Settings stop being applied on encountering an error.
+
+    :param settings: dict. Dictionary of settings to apply.
+    :returns: bool. True if commands ran successfully.
+    :raises: OSDConfigSetError
+    """
+    current_settings = {}
+    base_cmd = 'ceph daemon osd.{osd_id} config --format=json'
+    get_cmd = base_cmd + ' get {key}'
+    set_cmd = base_cmd + ' set {key} {value}'
+
+    def _get_cli_key(key):
+        return(key.replace(' ', '_'))
+    # Retrieve the current values to check keys are correct and to make this a
+    # noop if setting are already applied.
+    for osd_id in get_local_osd_ids():
+        for key, value in sorted(settings.items()):
+            cli_key = _get_cli_key(key)
+            cmd = get_cmd.format(osd_id=osd_id, key=cli_key)
+            out = json.loads(
+                subprocess.check_output(cmd.split()).decode('UTF-8'))
+            if 'error' in out:
+                log("Error retrieving osd setting: {}".format(out['error']),
+                    level=ERROR)
+                return False
+            current_settings[key] = out[cli_key]
+        settings_diff = {
+            k: v
+            for k, v in settings.items()
+            if str(v) != str(current_settings[k])}
+        for key, value in sorted(settings_diff.items()):
+            log("Setting {} to {}".format(key, value), level=DEBUG)
+            cmd = set_cmd.format(
+                osd_id=osd_id,
+                key=_get_cli_key(key),
+                value=value)
+            out = json.loads(
+                subprocess.check_output(cmd.split()).decode('UTF-8'))
+            if 'error' in out:
+                log("Error applying osd setting: {}".format(out['error']),
+                    level=ERROR)
+                raise OSDConfigSetError
+    return True
diff --git a/metadata.yaml b/metadata.yaml
index 7462ead1a7fee8b07c97a785f3c2479abb248ae6..ed8d9f37fcf492d5f00a00f1568efb7a78937f1e 100644
--- a/metadata.yaml
+++ b/metadata.yaml
@@ -2,18 +2,19 @@ name: ceph-proxy
 summary: Proxy to Juju external Ceph cluster
 maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
 description: |
- Ceph is a distributed storage and network file system designed to provide
- excellent performance, reliability, and scalability.
+  Ceph is a distributed storage and network file system designed to provide
+  excellent performance, reliability, and scalability.
 tags:
-  - openstack
-  - storage
-  - file-servers
-  - misc
+- openstack
+- storage
+- file-servers
+- misc
 series:
-  - xenial
-  - zesty
-  - trusty
-  - yakkety
+- xenial
+- bionic
+- focal
+- groovy
+- hirsute
 extra-bindings:
   public:
   cluster:
@@ -22,3 +23,5 @@ provides:
     interface: ceph-client
   radosgw:
     interface: ceph-radosgw
+  mds:
+    interface: ceph-mds
diff --git a/osci.yaml b/osci.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c63b99b5e9519fb66ef85877575f0b6c723efad3
--- /dev/null
+++ b/osci.yaml
@@ -0,0 +1,34 @@
+- project:
+    templates:
+      - charm-unit-jobs
+    check:
+      jobs:
+      - bionic-queens # luminous
+      - bionic-stein
+      - bionic-train
+      - bionic-ussuri
+      - focal-ussuri
+      - focal-ussuri-ec
+      - focal-victoria
+      - focal-victoria-ec
+      - groovy-victoria
+      - groovy-victoria-ec
+- job:
+    name: focal-ussuri-ec
+    parent: func-target
+    dependencies: &smoke-jobs
+      - bionic-ussuri
+    vars:
+      tox_extra_args: erasure-coded:focal-ussuri-ec
+- job:
+    name: focal-victoria-ec
+    parent: func-target
+    dependencies: *smoke-jobs
+    vars:
+      tox_extra_args: erasure-coded:focal-victoria-ec
+- job:
+    name: groovy-victoria-ec
+    parent: func-target
+    dependencies: *smoke-jobs
+    vars:
+      tox_extra_args: erasure-coded:groovy-victoria-ec
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 6a3271b078b3af5f2b3cff572172d78317acbe48..360ecbaa155da5d99fbcdb45f8d92bf4ddbfb0f4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,12 +1,24 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
+# This file is managed centrally by release-tools and should not be modified
+# within individual charm repos.  See the 'global' dir contents for available
+# choices of *requirements.txt files for OpenStack Charms:
+#     https://github.com/openstack-charmers/release-tools
+#
+# TODO: Distill the func test requirements from the lint/unit test
+#       requirements.  They are intertwined.  Also, Zaza itself should specify
+#       all of its own requirements and if it doesn't, fix it there.
+#
 pbr>=1.8.0,<1.9.0
-PyYAML>=3.1.0
 simplejson>=2.2.0
 netifaces>=0.10.4
-netaddr>=0.7.12,!=0.7.16
+
+# Strange import error with newer netaddr:
+netaddr>0.7.16,<0.8.0
+
 Jinja2>=2.6  # BSD License (3 clause)
 six>=1.9.0
-dnspython>=1.12.0
+
+# dnspython 2.0.0 dropped py3.5 support
+dnspython<2.0.0; python_version < '3.6'
+dnspython; python_version >= '3.6'
+
 psutil>=1.1.1,<2.0.0
diff --git a/templates/ceph.keyring b/templates/ceph.keyring
new file mode 100644
index 0000000000000000000000000000000000000000..30832f94381d1db9dd359eb1fabf264a9233b2a1
--- /dev/null
+++ b/templates/ceph.keyring
@@ -0,0 +1,3 @@
+[{{ admin_user }}]
+	key = {{admin_key}}
+
diff --git a/templates/mon.keyring b/templates/mon.keyring
index 567c2ead3ab13ed8c81553019344044e0cc435f6..b8aa5bc404e3630c2c02298b571090ca838edfaa 100644
--- a/templates/mon.keyring
+++ b/templates/mon.keyring
@@ -1,3 +1,3 @@
-[client.admin]
+[{{ admin_user }}]
   key = {{admin_key}}
 
diff --git a/test-requirements.txt b/test-requirements.txt
index 9edd4bbf9725ef482235f9d4424f4d9c35f62658..9aea716be87299bc2ce8c52448f5852d2cee309e 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,27 +1,52 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-coverage>=3.6
-mock>=1.2
-flake8>=2.2.4,<=2.4.1
-os-testr>=0.4.1
-charm-tools>=2.0.0
-requests==2.6.0
-# BEGIN: Amulet OpenStack Charm Helper Requirements
-# Liberty client lower constraints
-amulet>=1.14.3,<2.0
-bundletester>=0.6.1,<1.0
-python-ceilometerclient>=1.5.0
-python-cinderclient>=1.4.0
-python-glanceclient>=1.1.0
-python-heatclient>=0.8.0
-python-keystoneclient>=1.7.1
-python-neutronclient>=3.1.0
-python-novaclient>=2.30.1
-python-openstackclient>=1.7.0
-python-swiftclient>=2.6.0
-pika>=0.10.0,<1.0
-distro-info
-# END: Amulet OpenStack Charm Helper Requirements
-# NOTE: workaround for 14.04 pip/tox
-pytz
+# This file is managed centrally by release-tools and should not be modified
+# within individual charm repos.  See the 'global' dir contents for available
+# choices of *requirements.txt files for OpenStack Charms:
+#     https://github.com/openstack-charmers/release-tools
+#
+# TODO: Distill the func test requirements from the lint/unit test
+#       requirements.  They are intertwined.  Also, Zaza itself should specify
+#       all of its own requirements and if it doesn't, fix it there.
+#
+setuptools<50.0.0  # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
+charm-tools>=2.4.4
+
+# Workaround until https://github.com/juju/charm-tools/pull/589 gets
+# published
+keyring<21
+
+requests>=2.18.4
+
+# Newer mock seems to have some syntax which is newer than python3.5 (e.g.
+# f'{something}'
+mock>=1.2,<4.0.0; python_version < '3.6'
+mock>=1.2; python_version >= '3.6'
+
+flake8>=2.2.4
+stestr>=2.2.0
+
+# Dependency of stestr. Workaround for
+# https://github.com/mtreinish/stestr/issues/145
+cliff<3.0.0
+
+# Dependencies of stestr. Newer versions use keywords that didn't exist in
+# python 3.5 yet (e.g. "ModuleNotFoundError")
+importlib-metadata<3.0.0; python_version < '3.6'
+importlib-resources<3.0.0; python_version < '3.6'
+
+# Some Zuul nodes sometimes pull newer versions of these dependencies which
+# dropped support for python 3.5:
+osprofiler<2.7.0;python_version<'3.6'
+stevedore<1.31.0;python_version<'3.6'
+debtcollector<1.22.0;python_version<'3.6'
+oslo.utils<=3.41.0;python_version<'3.6'
+
+coverage>=4.5.2
+pyudev              # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
+git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0'
+git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
+
+# Needed for charm-glance:
+git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.6'
+tempest<24.0.0;python_version<'3.6'
+
+croniter            # needed for charm-rabbitmq-server unit tests
diff --git a/tests/bundles/bionic-queens.yaml b/tests/bundles/bionic-queens.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c6ec26e9fbdde56abde77ebc0b2c2ac045801433
--- /dev/null
+++ b/tests/bundles/bionic-queens.yaml
@@ -0,0 +1,82 @@
+series: bionic
+applications:
+  ceph-mon:
+    charm: 'cs:~openstack-charmers-next/ceph-mon'
+    num_units: 3
+    options:
+      expected-osd-count: 3
+  ceph-osd:
+    charm: 'cs:~openstack-charmers-next/ceph-osd'
+    num_units: 3
+    storage:
+      osd-devices: 10G
+  ceph-proxy:
+    charm: 'ceph-proxy'
+    num_units: 1
+  ceph-radosgw:
+    charm: 'cs:~openstack-charmers-next/ceph-radosgw'
+    num_units: 1
+  cinder:
+    charm: 'cs:~openstack-charmers-next/cinder'
+    num_units: 1
+    options:
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+  cinder-ceph:
+    charm: 'cs:~openstack-charmers-next/cinder-ceph'
+    options:
+      restrict-ceph-pools: True
+  keystone:
+    charm: 'cs:~openstack-charmers-next/keystone'
+    num_units: 1
+    constraints: mem=1024
+  percona-cluster:
+    charm: 'cs:~openstack-charmers-next/percona-cluster'
+    num_units: 1
+    options:
+      dataset-size: 50%
+      max-connections: 1000
+      innodb-buffer-pool-size: 256M
+      root-password: ChangeMe123
+      sst-password: ChangeMe123
+    constraints: mem=4096
+  rabbitmq-server:
+    charm: 'cs:~openstack-charmers-next/rabbitmq-server'
+    num_units: 1
+    constraints: mem=1024
+relations:
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+  - - 'cinder:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'keystone:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+  - - 'glance:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/bionic-rocky.yaml b/tests/bundles/bionic-rocky.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a71711c7d286b6068283e4c8371318737fd19009
--- /dev/null
+++ b/tests/bundles/bionic-rocky.yaml
@@ -0,0 +1,99 @@
+series: bionic
+applications:
+  ceph-mon:
+    charm: 'cs:~openstack-charmers-next/ceph-mon'
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: cloud:bionic-rocky
+  ceph-osd:
+    charm: 'cs:~openstack-charmers-next/ceph-osd'
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+      source: cloud:bionic-rocky
+  ceph-proxy:
+    charm: 'ceph-proxy'
+    num_units: 1
+    options:
+      source: cloud:bionic-rocky
+  ceph-radosgw:
+    charm: 'cs:~openstack-charmers-next/ceph-radosgw'
+    num_units: 1
+    options:
+      source: cloud:bionic-rocky
+  cinder:
+    charm: 'cs:~openstack-charmers-next/cinder'
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-rocky
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+  cinder-ceph:
+    charm: 'cs:~openstack-charmers-next/cinder-ceph'
+    options:
+      restrict-ceph-pools: True
+  keystone:
+    charm: 'cs:~openstack-charmers-next/keystone'
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-rocky
+    constraints: mem=1024
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-rocky
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-rocky
+  percona-cluster:
+    charm: 'cs:~openstack-charmers-next/percona-cluster'
+    num_units: 1
+    options:
+      source: cloud:bionic-rocky
+      dataset-size: 50%
+      max-connections: 1000
+      innodb-buffer-pool-size: 256M
+      root-password: ChangeMe123
+      sst-password: ChangeMe123
+    constraints: mem=4096
+  rabbitmq-server:
+    charm: 'cs:~openstack-charmers-next/rabbitmq-server'
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: cloud:bionic-rocky
+relations:
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+  - - 'cinder:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'keystone:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+  - - 'glance:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/bionic-stein.yaml b/tests/bundles/bionic-stein.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2c1f5359b915d11e1491f888663d141b75521687
--- /dev/null
+++ b/tests/bundles/bionic-stein.yaml
@@ -0,0 +1,99 @@
+series: bionic
+applications:
+  ceph-mon:
+    charm: 'cs:~openstack-charmers-next/ceph-mon'
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: cloud:bionic-stein
+  ceph-osd:
+    charm: 'cs:~openstack-charmers-next/ceph-osd'
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+      source: cloud:bionic-stein
+  ceph-proxy:
+    charm: 'ceph-proxy'
+    num_units: 1
+    options:
+      source: cloud:bionic-stein
+  ceph-radosgw:
+    charm: 'cs:~openstack-charmers-next/ceph-radosgw'
+    num_units: 1
+    options:
+      source: cloud:bionic-stein
+  cinder:
+    charm: 'cs:~openstack-charmers-next/cinder'
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-stein
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+  cinder-ceph:
+    charm: 'cs:~openstack-charmers-next/cinder-ceph'
+    options:
+      restrict-ceph-pools: True
+  keystone:
+    charm: 'cs:~openstack-charmers-next/keystone'
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-stein
+    constraints: mem=1024
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-stein
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-stein
+  percona-cluster:
+    charm: 'cs:~openstack-charmers-next/percona-cluster'
+    num_units: 1
+    options:
+      source: cloud:bionic-stein
+      dataset-size: 50%
+      max-connections: 1000
+      innodb-buffer-pool-size: 256M
+      root-password: ChangeMe123
+      sst-password: ChangeMe123
+    constraints: mem=4096
+  rabbitmq-server:
+    charm: 'cs:~openstack-charmers-next/rabbitmq-server'
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: cloud:bionic-stein
+relations:
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+  - - 'cinder:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'keystone:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+  - - 'glance:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/bionic-train.yaml b/tests/bundles/bionic-train.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fd891bd65af094a232d0ee81017823cbe082dcad
--- /dev/null
+++ b/tests/bundles/bionic-train.yaml
@@ -0,0 +1,99 @@
+series: bionic
+applications:
+  ceph-mon:
+    charm: 'cs:~openstack-charmers-next/ceph-mon'
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: cloud:bionic-train
+  ceph-osd:
+    charm: 'cs:~openstack-charmers-next/ceph-osd'
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+      source: cloud:bionic-train
+  ceph-proxy:
+    charm: 'ceph-proxy'
+    num_units: 1
+    options:
+      source: cloud:bionic-train
+  ceph-radosgw:
+    charm: 'cs:~openstack-charmers-next/ceph-radosgw'
+    num_units: 1
+    options:
+      source: cloud:bionic-train
+  cinder:
+    charm: 'cs:~openstack-charmers-next/cinder'
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-train
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+  cinder-ceph:
+    charm: 'cs:~openstack-charmers-next/cinder-ceph'
+    options:
+      restrict-ceph-pools: True
+  keystone:
+    charm: 'cs:~openstack-charmers-next/keystone'
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-train
+    constraints: mem=1024
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-train
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-train
+  percona-cluster:
+    charm: 'cs:~openstack-charmers-next/percona-cluster'
+    num_units: 1
+    options:
+      source: cloud:bionic-train
+      dataset-size: 50%
+      max-connections: 1000
+      innodb-buffer-pool-size: 256M
+      root-password: ChangeMe123
+      sst-password: ChangeMe123
+    constraints: mem=4096
+  rabbitmq-server:
+    charm: 'cs:~openstack-charmers-next/rabbitmq-server'
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: cloud:bionic-train
+relations:
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+  - - 'cinder:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'keystone:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+  - - 'glance:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/bionic-ussuri.yaml b/tests/bundles/bionic-ussuri.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..33e2c0e998f8546eeb2475c201445fd8749723f0
--- /dev/null
+++ b/tests/bundles/bionic-ussuri.yaml
@@ -0,0 +1,100 @@
+series: bionic
+applications:
+  ceph-mon:
+    charm: 'cs:~openstack-charmers-next/ceph-mon'
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: cloud:bionic-ussuri
+  ceph-osd:
+    charm: 'cs:~openstack-charmers-next/ceph-osd'
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+      source: cloud:bionic-ussuri
+  ceph-proxy:
+    charm: 'ceph-proxy'
+    num_units: 1
+    options:
+      source: cloud:bionic-ussuri
+  ceph-radosgw:
+    charm: 'cs:~openstack-charmers-next/ceph-radosgw'
+    num_units: 1
+    options:
+      source: cloud:bionic-ussuri
+  cinder:
+    charm: 'cs:~openstack-charmers-next/cinder'
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-ussuri
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+  cinder-ceph:
+    charm: 'cs:~openstack-charmers-next/cinder-ceph'
+    options:
+      restrict-ceph-pools: True
+  keystone:
+    charm: 'cs:~openstack-charmers-next/keystone'
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-ussuri
+      admin-password: openstack
+    constraints: mem=1024
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-ussuri
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: cloud:bionic-ussuri
+  percona-cluster:
+    charm: 'cs:~openstack-charmers-next/percona-cluster'
+    num_units: 1
+    options:
+      source: cloud:bionic-ussuri
+      dataset-size: 50%
+      max-connections: 1000
+      innodb-buffer-pool-size: 256M
+      root-password: ChangeMe123
+      sst-password: ChangeMe123
+    constraints: mem=4096
+  rabbitmq-server:
+    charm: 'cs:~openstack-charmers-next/rabbitmq-server'
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: cloud:bionic-ussuri
+relations:
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+  - - 'cinder:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'keystone:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+  - - 'glance:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/focal-ussuri-ec.yaml b/tests/bundles/focal-ussuri-ec.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..100fe81c3713def3d05bd79cb2d65853327a555a
--- /dev/null
+++ b/tests/bundles/focal-ussuri-ec.yaml
@@ -0,0 +1,215 @@
+variables:
+  openstack-origin: &openstack-origin distro
+
+series: focal
+
+comment:
+- 'machines section to decide order of deployment. database sooner = faster'
+machines:
+  '0':
+    constraints: mem=3072M
+  '1':
+    constraints: mem=3072M
+  '2':
+    constraints: mem=3072M
+  '3':
+  '4':
+  '5':
+  '6':
+  '7':
+  '8':
+  '9':
+  '10':
+  '11':
+  '12':
+  '13':
+  '14':
+  '15':
+  '16':
+  '17':
+  '18':
+
+applications:
+
+  cinder-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  glance-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  keystone-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+
+  mysql-innodb-cluster:
+    charm: cs:~openstack-charmers-next/mysql-innodb-cluster
+    num_units: 3
+    options:
+      source: *openstack-origin
+    to:
+      - '0'
+      - '1'
+      - '2'
+
+  ceph-mon:
+    charm: cs:~openstack-charmers-next/ceph-mon
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: *openstack-origin
+    to:
+      - '3'
+      - '4'
+      - '5'
+
+  ceph-osd:
+    charm: cs:~openstack-charmers-next/ceph-osd
+    num_units: 6
+    storage:
+      osd-devices: 10G
+    options:
+      source: *openstack-origin
+    to:
+      - '6'
+      - '7'
+      - '8'
+      - '16'
+      - '17'
+      - '18'
+
+  ceph-proxy:
+    charm: ceph-proxy
+    num_units: 1
+    options:
+      source: *openstack-origin
+    to:
+      - '9'
+
+  ceph-radosgw:
+    charm: cs:~openstack-charmers-next/ceph-radosgw
+    num_units: 1
+    options:
+      source: *openstack-origin
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+    to:
+      - '10'
+
+  cinder:
+    charm: cs:~openstack-charmers-next/cinder
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+    to:
+      - '11'
+
+  cinder-ceph:
+    charm: cs:~openstack-charmers-next/cinder-ceph
+    options:
+      restrict-ceph-pools: True
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+      ec-profile-plugin: lrc
+      ec-profile-locality: 3
+
+  keystone:
+    charm: cs:~openstack-charmers-next/keystone
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      admin-password: openstack
+    constraints: mem=1024
+    to:
+      - '12'
+
+  rabbitmq-server:
+    charm: cs:~openstack-charmers-next/rabbitmq-server
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: *openstack-origin
+    to:
+      - '13'
+
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+      ec-profile-plugin: jerasure
+    to:
+      - '14'
+
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+      ec-profile-plugin: isa
+      libvirt-image-backend: rbd
+    to:
+      - '15'
+
+
+relations:
+
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'cinder:shared-db'
+    - 'cinder-mysql-router:shared-db'
+  - - 'cinder-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'keystone:shared-db'
+    - 'keystone-mysql-router:shared-db'
+  - - 'keystone-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'glance:shared-db'
+    - 'glance-mysql-router:shared-db'
+  - - 'glance-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'glance:ceph'
+    - 'ceph-proxy:client'
+
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'nova-compute:ceph'
+    - 'ceph-proxy:client'
diff --git a/tests/bundles/focal-ussuri.yaml b/tests/bundles/focal-ussuri.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d917b1c92639f1be1c0446aa306a53e2dcaed223
--- /dev/null
+++ b/tests/bundles/focal-ussuri.yaml
@@ -0,0 +1,186 @@
+variables:
+  openstack-origin: &openstack-origin distro
+
+series: focal
+
+comment:
+- 'machines section to decide order of deployment. database sooner = faster'
+machines:
+  '0':
+    constraints: mem=3072M
+  '1':
+    constraints: mem=3072M
+  '2':
+    constraints: mem=3072M
+  '3':
+  '4':
+  '5':
+  '6':
+  '7':
+  '8':
+  '9':
+  '10':
+  '11':
+  '12':
+  '13':
+  '14':
+  '15':
+
+applications:
+
+  cinder-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  glance-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  keystone-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+
+  mysql-innodb-cluster:
+    charm: cs:~openstack-charmers-next/mysql-innodb-cluster
+    num_units: 3
+    options:
+      source: *openstack-origin
+    to:
+      - '0'
+      - '1'
+      - '2'
+
+  ceph-mon:
+    charm: cs:~openstack-charmers-next/ceph-mon
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: *openstack-origin
+    to:
+      - '3'
+      - '4'
+      - '5'
+
+  ceph-osd:
+    charm: cs:~openstack-charmers-next/ceph-osd
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+      source: *openstack-origin
+    to:
+      - '6'
+      - '7'
+      - '8'
+
+  ceph-proxy:
+    charm: ceph-proxy
+    num_units: 1
+    options:
+      source: *openstack-origin
+    to:
+      - '9'
+
+  ceph-radosgw:
+    charm: cs:~openstack-charmers-next/ceph-radosgw
+    num_units: 1
+    options:
+      source: *openstack-origin
+    to:
+      - '10'
+
+  cinder:
+    charm: cs:~openstack-charmers-next/cinder
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+    to:
+      - '11'
+
+  cinder-ceph:
+    charm: cs:~openstack-charmers-next/cinder-ceph
+    options:
+      restrict-ceph-pools: True
+
+  keystone:
+    charm: cs:~openstack-charmers-next/keystone
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      admin-password: openstack
+    constraints: mem=1024
+    to:
+      - '12'
+
+  rabbitmq-server:
+    charm: cs:~openstack-charmers-next/rabbitmq-server
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: *openstack-origin
+    to:
+      - '13'
+
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+    to:
+      - '14'
+
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+    to:
+      - '15'
+
+
+relations:
+
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'cinder:shared-db'
+    - 'cinder-mysql-router:shared-db'
+  - - 'cinder-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'keystone:shared-db'
+    - 'keystone-mysql-router:shared-db'
+  - - 'keystone-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'glance:shared-db'
+    - 'glance-mysql-router:shared-db'
+  - - 'glance-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/focal-victoria-ec.yaml b/tests/bundles/focal-victoria-ec.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..25f015fd4b86d74545aeb90d4315d1348ad2f099
--- /dev/null
+++ b/tests/bundles/focal-victoria-ec.yaml
@@ -0,0 +1,215 @@
+variables:
+  openstack-origin: &openstack-origin cloud:focal-victoria
+
+series: focal
+
+comment:
+- 'machines section to decide order of deployment. database sooner = faster'
+machines:
+  '0':
+    constraints: mem=3072M
+  '1':
+    constraints: mem=3072M
+  '2':
+    constraints: mem=3072M
+  '3':
+  '4':
+  '5':
+  '6':
+  '7':
+  '8':
+  '9':
+  '10':
+  '11':
+  '12':
+  '13':
+  '14':
+  '15':
+  '16':
+  '17':
+  '18':
+
+applications:
+
+  cinder-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  glance-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  keystone-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+
+  mysql-innodb-cluster:
+    charm: cs:~openstack-charmers-next/mysql-innodb-cluster
+    num_units: 3
+    options:
+      source: *openstack-origin
+    to:
+      - '0'
+      - '1'
+      - '2'
+
+  ceph-mon:
+    charm: cs:~openstack-charmers-next/ceph-mon
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: *openstack-origin
+    to:
+      - '3'
+      - '4'
+      - '5'
+
+  ceph-osd:
+    charm: cs:~openstack-charmers-next/ceph-osd
+    num_units: 6
+    storage:
+      osd-devices: 10G
+    options:
+      source: *openstack-origin
+    to:
+      - '6'
+      - '7'
+      - '8'
+      - '16'
+      - '17'
+      - '18'
+
+  ceph-proxy:
+    charm: ceph-proxy
+    num_units: 1
+    options:
+      source: *openstack-origin
+    to:
+      - '9'
+
+  ceph-radosgw:
+    charm: cs:~openstack-charmers-next/ceph-radosgw
+    num_units: 1
+    options:
+      source: *openstack-origin
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+    to:
+      - '10'
+
+  cinder:
+    charm: cs:~openstack-charmers-next/cinder
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+    to:
+      - '11'
+
+  cinder-ceph:
+    charm: cs:~openstack-charmers-next/cinder-ceph
+    options:
+      restrict-ceph-pools: True
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+      ec-profile-plugin: lrc
+      ec-profile-locality: 3
+
+  keystone:
+    charm: cs:~openstack-charmers-next/keystone
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      admin-password: openstack
+    constraints: mem=1024
+    to:
+      - '12'
+
+  rabbitmq-server:
+    charm: cs:~openstack-charmers-next/rabbitmq-server
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: *openstack-origin
+    to:
+      - '13'
+
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+      ec-profile-plugin: jerasure
+    to:
+      - '14'
+
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+      ec-profile-plugin: isa
+      libvirt-image-backend: rbd
+    to:
+      - '15'
+
+
+relations:
+
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'cinder:shared-db'
+    - 'cinder-mysql-router:shared-db'
+  - - 'cinder-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'keystone:shared-db'
+    - 'keystone-mysql-router:shared-db'
+  - - 'keystone-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'glance:shared-db'
+    - 'glance-mysql-router:shared-db'
+  - - 'glance-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'glance:ceph'
+    - 'ceph-proxy:client'
+
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'nova-compute:ceph'
+    - 'ceph-proxy:client'
diff --git a/tests/bundles/focal-victoria.yaml b/tests/bundles/focal-victoria.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..da9782f4157e2dd30b78902d95f149166dc89a79
--- /dev/null
+++ b/tests/bundles/focal-victoria.yaml
@@ -0,0 +1,186 @@
+variables:
+  openstack-origin: &openstack-origin cloud:focal-victoria
+
+series: focal
+
+comment:
+- 'machines section to decide order of deployment. database sooner = faster'
+machines:
+  '0':
+    constraints: mem=3072M
+  '1':
+    constraints: mem=3072M
+  '2':
+    constraints: mem=3072M
+  '3':
+  '4':
+  '5':
+  '6':
+  '7':
+  '8':
+  '9':
+  '10':
+  '11':
+  '12':
+  '13':
+  '14':
+  '15':
+
+applications:
+
+  cinder-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  glance-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  keystone-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+
+  mysql-innodb-cluster:
+    charm: cs:~openstack-charmers-next/mysql-innodb-cluster
+    num_units: 3
+    options:
+      source: *openstack-origin
+    to:
+      - '0'
+      - '1'
+      - '2'
+
+  ceph-mon:
+    charm: cs:~openstack-charmers-next/ceph-mon
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: *openstack-origin
+    to:
+      - '3'
+      - '4'
+      - '5'
+
+  ceph-osd:
+    charm: cs:~openstack-charmers-next/ceph-osd
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+      source: *openstack-origin
+    to:
+      - '6'
+      - '7'
+      - '8'
+
+  ceph-proxy:
+    charm: ceph-proxy
+    num_units: 1
+    options:
+      source: *openstack-origin
+    to:
+      - '9'
+
+  ceph-radosgw:
+    charm: cs:~openstack-charmers-next/ceph-radosgw
+    num_units: 1
+    options:
+      source: *openstack-origin
+    to:
+      - '10'
+
+  cinder:
+    charm: cs:~openstack-charmers-next/cinder
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+    to:
+      - '11'
+
+  cinder-ceph:
+    charm: cs:~openstack-charmers-next/cinder-ceph
+    options:
+      restrict-ceph-pools: True
+
+  keystone:
+    charm: cs:~openstack-charmers-next/keystone
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      admin-password: openstack
+    constraints: mem=1024
+    to:
+      - '12'
+
+  rabbitmq-server:
+    charm: cs:~openstack-charmers-next/rabbitmq-server
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: *openstack-origin
+    to:
+      - '13'
+
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+    to:
+      - '14'
+
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+    to:
+      - '15'
+
+
+relations:
+
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'cinder:shared-db'
+    - 'cinder-mysql-router:shared-db'
+  - - 'cinder-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'keystone:shared-db'
+    - 'keystone-mysql-router:shared-db'
+  - - 'keystone-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'glance:shared-db'
+    - 'glance-mysql-router:shared-db'
+  - - 'glance-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/groovy-victoria-ec.yaml b/tests/bundles/groovy-victoria-ec.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b0b04d8f9a718a68289f2d19ec49be0453506194
--- /dev/null
+++ b/tests/bundles/groovy-victoria-ec.yaml
@@ -0,0 +1,215 @@
+variables:
+  openstack-origin: &openstack-origin distro
+
+series: groovy
+
+comment:
+- 'machines section to decide order of deployment. database sooner = faster'
+machines:
+  '0':
+    constraints: mem=3072M
+  '1':
+    constraints: mem=3072M
+  '2':
+    constraints: mem=3072M
+  '3':
+  '4':
+  '5':
+  '6':
+  '7':
+  '8':
+  '9':
+  '10':
+  '11':
+  '12':
+  '13':
+  '14':
+  '15':
+  '16':
+  '17':
+  '18':
+
+applications:
+
+  cinder-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  glance-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  keystone-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+
+  mysql-innodb-cluster:
+    charm: cs:~openstack-charmers-next/mysql-innodb-cluster
+    num_units: 3
+    options:
+      source: *openstack-origin
+    to:
+      - '0'
+      - '1'
+      - '2'
+
+  ceph-mon:
+    charm: cs:~openstack-charmers-next/ceph-mon
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: *openstack-origin
+    to:
+      - '3'
+      - '4'
+      - '5'
+
+  ceph-osd:
+    charm: cs:~openstack-charmers-next/ceph-osd
+    num_units: 6
+    storage:
+      osd-devices: 10G
+    options:
+      source: *openstack-origin
+    to:
+      - '6'
+      - '7'
+      - '8'
+      - '16'
+      - '17'
+      - '18'
+
+  ceph-proxy:
+    charm: ceph-proxy
+    num_units: 1
+    options:
+      source: *openstack-origin
+    to:
+      - '9'
+
+  ceph-radosgw:
+    charm: cs:~openstack-charmers-next/ceph-radosgw
+    num_units: 1
+    options:
+      source: *openstack-origin
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+    to:
+      - '10'
+
+  cinder:
+    charm: cs:~openstack-charmers-next/cinder
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+    to:
+      - '11'
+
+  cinder-ceph:
+    charm: cs:~openstack-charmers-next/cinder-ceph
+    options:
+      restrict-ceph-pools: True
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+      ec-profile-plugin: lrc
+      ec-profile-locality: 3
+
+  keystone:
+    charm: cs:~openstack-charmers-next/keystone
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      admin-password: openstack
+    constraints: mem=1024
+    to:
+      - '12'
+
+  rabbitmq-server:
+    charm: cs:~openstack-charmers-next/rabbitmq-server
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: *openstack-origin
+    to:
+      - '13'
+
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+      ec-profile-plugin: jerasure
+    to:
+      - '14'
+
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      pool-type: erasure-coded
+      ec-profile-k: 4
+      ec-profile-m: 2
+      ec-profile-plugin: isa
+      libvirt-image-backend: rbd
+    to:
+      - '15'
+
+
+relations:
+
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'cinder:shared-db'
+    - 'cinder-mysql-router:shared-db'
+  - - 'cinder-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'keystone:shared-db'
+    - 'keystone-mysql-router:shared-db'
+  - - 'keystone-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'glance:shared-db'
+    - 'glance-mysql-router:shared-db'
+  - - 'glance-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'glance:ceph'
+    - 'ceph-proxy:client'
+
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'nova-compute:ceph'
+    - 'ceph-proxy:client'
diff --git a/tests/bundles/groovy-victoria.yaml b/tests/bundles/groovy-victoria.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..74a29970853941ef106ab20ba34476bd02afb8c1
--- /dev/null
+++ b/tests/bundles/groovy-victoria.yaml
@@ -0,0 +1,186 @@
+variables:
+  openstack-origin: &openstack-origin distro
+
+series: groovy
+
+comment:
+- 'machines section to decide order of deployment. database sooner = faster'
+machines:
+  '0':
+    constraints: mem=3072M
+  '1':
+    constraints: mem=3072M
+  '2':
+    constraints: mem=3072M
+  '3':
+  '4':
+  '5':
+  '6':
+  '7':
+  '8':
+  '9':
+  '10':
+  '11':
+  '12':
+  '13':
+  '14':
+  '15':
+
+applications:
+
+  cinder-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  glance-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+  keystone-mysql-router:
+    charm: cs:~openstack-charmers-next/mysql-router
+
+  mysql-innodb-cluster:
+    charm: cs:~openstack-charmers-next/mysql-innodb-cluster
+    num_units: 3
+    options:
+      source: *openstack-origin
+    to:
+      - '0'
+      - '1'
+      - '2'
+
+  ceph-mon:
+    charm: cs:~openstack-charmers-next/ceph-mon
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: *openstack-origin
+    to:
+      - '3'
+      - '4'
+      - '5'
+
+  ceph-osd:
+    charm: cs:~openstack-charmers-next/ceph-osd
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+      source: *openstack-origin
+    to:
+      - '6'
+      - '7'
+      - '8'
+
+  ceph-proxy:
+    charm: ceph-proxy
+    num_units: 1
+    options:
+      source: *openstack-origin
+    to:
+      - '9'
+
+  ceph-radosgw:
+    charm: cs:~openstack-charmers-next/ceph-radosgw
+    num_units: 1
+    options:
+      source: *openstack-origin
+    to:
+      - '10'
+
+  cinder:
+    charm: cs:~openstack-charmers-next/cinder
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+    to:
+      - '11'
+
+  cinder-ceph:
+    charm: cs:~openstack-charmers-next/cinder-ceph
+    options:
+      restrict-ceph-pools: True
+
+  keystone:
+    charm: cs:~openstack-charmers-next/keystone
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+      admin-password: openstack
+    constraints: mem=1024
+    to:
+      - '12'
+
+  rabbitmq-server:
+    charm: cs:~openstack-charmers-next/rabbitmq-server
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: *openstack-origin
+    to:
+      - '13'
+
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+    to:
+      - '14'
+
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+    to:
+      - '15'
+
+
+relations:
+
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'cinder:shared-db'
+    - 'cinder-mysql-router:shared-db'
+  - - 'cinder-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'keystone:shared-db'
+    - 'keystone-mysql-router:shared-db'
+  - - 'keystone-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'glance:shared-db'
+    - 'glance-mysql-router:shared-db'
+  - - 'glance-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/trusty-mitaka.yaml b/tests/bundles/trusty-mitaka.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7dbef7a88e4511349e3ceb33e659cde683156339
--- /dev/null
+++ b/tests/bundles/trusty-mitaka.yaml
@@ -0,0 +1,115 @@
+series: trusty
+applications:
+  ceph-mon:
+    charm: 'cs:~openstack-charmers-next/ceph-mon'
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: cloud:trusty-mitaka
+  ceph-osd:
+    charm: 'cs:~openstack-charmers-next/ceph-osd'
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+      source: cloud:trusty-mitaka
+  ceph-proxy:
+    charm: 'ceph-proxy'
+    num_units: 1
+    options:
+      source: cloud:trusty-mitaka
+  ceph-radosgw:
+    charm: 'cs:~openstack-charmers-next/ceph-radosgw'
+    num_units: 1
+    options:
+      source: trusty-mitaka
+  cinder:
+    charm: 'cs:~openstack-charmers-next/cinder'
+    num_units: 1
+    options:
+      openstack-origin: cloud:trusty-mitaka
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: cloud:trusty-mitaka
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: cloud:trusty-mitaka
+  nova-cloud-controller:
+    charm: cs:~openstack-charmers-next/nova-cloud-controller
+    num_units: 1
+    options:
+      openstack-origin: cloud:trusty-mitaka
+  cinder-ceph:
+    charm: 'cs:~openstack-charmers-next/cinder-ceph'
+    options:
+      restrict-ceph-pools: True
+  keystone:
+    charm: 'cs:~openstack-charmers-next/keystone'
+    num_units: 1
+    options:
+      openstack-origin: cloud:trusty-mitaka
+    constraints: mem=1024
+  percona-cluster:
+    charm: 'cs:trusty/percona-cluster'
+    num_units: 1
+    options:
+      source: cloud:trusty-mitaka
+      dataset-size: 50%
+      max-connections: 1000
+      innodb-buffer-pool-size: 256M
+      root-password: ChangeMe123
+      sst-password: ChangeMe123
+    constraints: mem=4096
+  rabbitmq-server:
+    charm: 'cs:~openstack-charmers-next/rabbitmq-server'
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: cloud:trusty-mitaka
+relations:
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+  - - 'cinder:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'keystone:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+  - - 'glance:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
+  - - 'glance:image-service'
+    - 'nova-cloud-controller:image-service'
+  - - 'keystone:identity-service'
+    - 'nova-cloud-controller:identity-service'
+  - - 'nova-compute:cloud-compute'
+    - 'nova-cloud-controller:cloud-compute'
+  - - 'percona-cluster:shared-db'
+    - 'nova-cloud-controller:shared-db'
+  - - 'rabbitmq-server:amqp'
+    - 'nova-cloud-controller:amqp'
+
diff --git a/tests/bundles/xenial-mitaka.yaml b/tests/bundles/xenial-mitaka.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0c4ee4d4ccf946266e6e4ccfc8cfc8cdcea8a1eb
--- /dev/null
+++ b/tests/bundles/xenial-mitaka.yaml
@@ -0,0 +1,85 @@
+series: xenial
+applications:
+  ceph-mon:
+    charm: 'cs:~openstack-charmers-next/ceph-mon'
+    num_units: 3
+    options:
+      expected-osd-count: 3
+  ceph-osd:
+    charm: 'cs:~openstack-charmers-next/ceph-osd'
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+  ceph-proxy:
+    charm: 'ceph-proxy'
+    num_units: 1
+    options:
+  ceph-radosgw:
+    charm: 'cs:~openstack-charmers-next/ceph-radosgw'
+    num_units: 1
+    options:
+  cinder:
+    charm: 'cs:~openstack-charmers-next/cinder'
+    num_units: 1
+    options:
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+  cinder-ceph:
+    charm: 'cs:~openstack-charmers-next/cinder-ceph'
+    options:
+      restrict-ceph-pools: True
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+  keystone:
+    charm: 'cs:~openstack-charmers-next/keystone'
+    num_units: 1
+    constraints: mem=1024
+  percona-cluster:
+    charm: 'cs:~openstack-charmers-next/percona-cluster'
+    num_units: 1
+    options:
+      dataset-size: 50%
+      max-connections: 1000
+      innodb-buffer-pool-size: 256M
+      root-password: ChangeMe123
+      sst-password: ChangeMe123
+    constraints: mem=4096
+  rabbitmq-server:
+    charm: 'cs:~openstack-charmers-next/rabbitmq-server'
+    num_units: 1
+    constraints: mem=1024
+relations:
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+  - - 'cinder:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'keystone:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+  - - 'glance:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/xenial-ocata.yaml b/tests/bundles/xenial-ocata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d7aa8bd75cd4a127d0bbb3af17bfd1b3ec3d8120
--- /dev/null
+++ b/tests/bundles/xenial-ocata.yaml
@@ -0,0 +1,99 @@
+series: xenial
+applications:
+  ceph-mon:
+    charm: 'cs:~openstack-charmers-next/ceph-mon'
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: cloud:xenial-ocata
+  ceph-osd:
+    charm: 'cs:~openstack-charmers-next/ceph-osd'
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+      source: cloud:xenial-ocata
+  ceph-proxy:
+    charm: 'ceph-proxy'
+    num_units: 1
+    options:
+      source: cloud:xenial-ocata
+  ceph-radosgw:
+    charm: 'cs:~openstack-charmers-next/ceph-radosgw'
+    num_units: 1
+    options:
+      source: xenial-ocata
+  cinder:
+    charm: 'cs:~openstack-charmers-next/cinder'
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-ocata
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+  cinder-ceph:
+    charm: 'cs:~openstack-charmers-next/cinder-ceph'
+    options:
+      restrict-ceph-pools: True
+  keystone:
+    charm: 'cs:~openstack-charmers-next/keystone'
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-ocata
+    constraints: mem=1024
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-ocata
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-ocata
+  percona-cluster:
+    charm: 'cs:~openstack-charmers-next/percona-cluster'
+    num_units: 1
+    options:
+      source: cloud:xenial-ocata
+      dataset-size: 50%
+      max-connections: 1000
+      innodb-buffer-pool-size: 256M
+      root-password: ChangeMe123
+      sst-password: ChangeMe123
+    constraints: mem=4096
+  rabbitmq-server:
+    charm: 'cs:~openstack-charmers-next/rabbitmq-server'
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: cloud:xenial-ocata
+relations:
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+  - - 'cinder:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'keystone:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+  - - 'glance:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/xenial-pike.yaml b/tests/bundles/xenial-pike.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0c48f98680393c70faeb861f20e34f90afee6875
--- /dev/null
+++ b/tests/bundles/xenial-pike.yaml
@@ -0,0 +1,99 @@
+series: bionic
+applications:
+  ceph-mon:
+    charm: 'cs:~openstack-charmers-next/ceph-mon'
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: cloud:xenial-pike
+  ceph-osd:
+    charm: 'cs:~openstack-charmers-next/ceph-osd'
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+      source: cloud:xenial-pike
+  ceph-proxy:
+    charm: 'ceph-proxy'
+    num_units: 1
+    options:
+      source: cloud:xenial-pike
+  ceph-radosgw:
+    charm: 'cs:~openstack-charmers-next/ceph-radosgw'
+    num_units: 1
+    options:
+      source: xenial-pike
+  cinder:
+    charm: 'cs:~openstack-charmers-next/cinder'
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-pike
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+  cinder-ceph:
+    charm: 'cs:~openstack-charmers-next/cinder-ceph'
+    options:
+      restrict-ceph-pools: True
+  keystone:
+    charm: 'cs:~openstack-charmers-next/keystone'
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-pike
+    constraints: mem=1024
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-pike
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-pike
+  percona-cluster:
+    charm: 'cs:~openstack-charmers-next/percona-cluster'
+    num_units: 1
+    options:
+      source: cloud:xenial-pike
+      dataset-size: 50%
+      max-connections: 1000
+      innodb-buffer-pool-size: 256M
+      root-password: ChangeMe123
+      sst-password: ChangeMe123
+    constraints: mem=4096
+  rabbitmq-server:
+    charm: 'cs:~openstack-charmers-next/rabbitmq-server'
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: cloud:xenial-pike
+relations:
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+  - - 'cinder:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'keystone:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+  - - 'glance:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/bundles/xenial-queens.yaml b/tests/bundles/xenial-queens.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9fd00d56e1de1e8479617d7fcbcef922e676bbe2
--- /dev/null
+++ b/tests/bundles/xenial-queens.yaml
@@ -0,0 +1,99 @@
+series: xenial
+applications:
+  ceph-mon:
+    charm: 'cs:~openstack-charmers-next/ceph-mon'
+    num_units: 3
+    options:
+      expected-osd-count: 3
+      source: cloud:xenial-queens
+  ceph-osd:
+    charm: 'cs:~openstack-charmers-next/ceph-osd'
+    num_units: 3
+    storage:
+      osd-devices: 10G
+    options:
+      source: cloud:xenial-queens
+  ceph-proxy:
+    charm: 'ceph-proxy'
+    num_units: 1
+    options:
+      source: cloud:xenial-queens
+  ceph-radosgw:
+    charm: 'cs:~openstack-charmers-next/ceph-radosgw'
+    num_units: 1
+    options:
+      source: cloud:xenial-queens
+  cinder:
+    charm: 'cs:~openstack-charmers-next/cinder'
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-queens
+      block-device: ""
+      ephemeral-unmount: ""
+      glance-api-version: 2
+      overwrite: "false"
+    constraints: mem=2048
+  cinder-ceph:
+    charm: 'cs:~openstack-charmers-next/cinder-ceph'
+    options:
+      restrict-ceph-pools: True
+  keystone:
+    charm: 'cs:~openstack-charmers-next/keystone'
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-queens
+    constraints: mem=1024
+  glance:
+    charm: cs:~openstack-charmers-next/glance
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-queens
+  nova-compute:
+    charm: cs:~openstack-charmers-next/nova-compute
+    num_units: 1
+    options:
+      openstack-origin: cloud:xenial-queens
+  percona-cluster:
+    charm: 'cs:~openstack-charmers-next/percona-cluster'
+    num_units: 1
+    options:
+      source: cloud:xenial-queens
+      dataset-size: 50%
+      max-connections: 1000
+      innodb-buffer-pool-size: 256M
+      root-password: ChangeMe123
+      sst-password: ChangeMe123
+    constraints: mem=4096
+  rabbitmq-server:
+    charm: 'cs:~openstack-charmers-next/rabbitmq-server'
+    num_units: 1
+    constraints: mem=1024
+    options:
+      source: cloud:xenial-queens
+relations:
+  - - 'ceph-osd:mon'
+    - 'ceph-mon:osd'
+  - - 'ceph-proxy:radosgw'
+    - 'ceph-radosgw:mon'
+  - - 'cinder:amqp'
+    - 'rabbitmq-server:amqp'
+  - - 'cinder:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'keystone:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'cinder:identity-service'
+    - 'keystone:identity-service'
+  - - 'cinder-ceph:storage-backend'
+    - 'cinder:storage-backend'
+  - - 'cinder-ceph:ceph'
+    - 'ceph-proxy:client'
+  - - 'glance:image-service'
+    - 'nova-compute:image-service'
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+  - - 'glance:shared-db'
+    - 'percona-cluster:shared-db'
+  - - 'nova-compute:ceph-access'
+    - 'cinder-ceph:ceph-access'
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
diff --git a/tests/tests.yaml b/tests/tests.yaml
index 4cf93d016eb1f974c6a3479a9b2ed95e3c09921e..9849fca1595f5e41f2f50f690a0808670c415dbe 100644
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -1,17 +1,67 @@
-# Bootstrap the model if necessary.
-bootstrap: True
-# Re-use bootstrap node.
-reset: True
-# Use tox/requirements to drive the venv instead of bundletester's venv feature.
-virtualenv: False
-# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet.
-makefile: []
-# Do not specify juju PPA sources.  Juju is presumed to be pre-installed
-# and configured in all test runner environments.
-#sources:
-# Do not specify or rely on system packages.
-#packages:
-# Do not specify python packages here.  Use test-requirements.txt
-# and tox instead.  ie. The venv is constructed before bundletester
-# is invoked.
-#python-packages:
+charm_name: ceph-proxy
+
+configure:
+  - zaza.openstack.configure.ceph_proxy.setup_ceph_proxy
+  - erasure-coded:
+    - zaza.openstack.configure.ceph_proxy.setup_ceph_proxy
+
+tests:
+  - zaza.openstack.charm_tests.ceph.tests.CephProxyTest
+  - erasure-coded:
+    - zaza.openstack.charm_tests.ceph.tests.CephProxyTest
+    - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes
+
+gate_bundles:
+  - xenial-mitaka # jewel
+  - bionic-queens # luminous
+  - bionic-stein
+  - bionic-train
+  - bionic-ussuri
+  - focal-ussuri
+  - erasure-coded: focal-ussuri-ec
+  - focal-victoria
+  - erasure-coded: focal-victoria-ec
+  - groovy-victoria
+  - erasure-coded: groovy-victoria-ec
+
+dev_bundles:
+  # Icehouse
+  - trusty-icehouse
+  # Jewel
+  - trusty-mitaka
+  - xenial-ocata
+  # Pike
+  - xenial-pike
+  - xenial-queens # luminous
+  - bionic-rocky  # mimic
+
+smoke_bundles:
+  - focal-ussuri
+
+target_deploy_status:
+  ceph-proxy:
+    workload-status: blocked
+    workload-status-message: Ensure FSID and admin-key are set
+  ceph-radosgw:
+    workload-status: waiting
+    workload-status-message: "Incomplete relations: mon"
+  cinder-ceph:
+    workload-status: waiting
+    workload-status-message: "Incomplete relations: ceph"
+  keystone:
+    workload-status: active
+    workload-status-message: "Unit is ready"
+  nova-compute:
+    workload-status: waiting
+    workload-status-message: "Incomplete relations: storage-backend"
+  cinder-ceph:
+    workload-status: waiting
+    workload-status-message: "Ceph broker request incomplete"
+  glance:
+    workload-status: waiting
+    workload-status-message: "Incomplete relations: storage-backend"
+
+tests_options:
+  force_deploy:
+    - groovy-victoria
+    - groovy-victoria-ec
diff --git a/tox.ini b/tox.ini
index 7c2936e37d7b9825c70054578413f70060dedf2a..ab9593f3d7b38a1248659bc8e6edd0b07bc68641 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,85 +1,132 @@
-# Classic charm: ./tox.ini
+# Classic charm (with zaza): ./tox.ini
 # This file is managed centrally by release-tools and should not be modified
-# within individual charm repos.
+# within individual charm repos.  See the 'global' dir contents for available
+# choices of tox.ini for OpenStack Charms:
+#     https://github.com/openstack-charmers/release-tools
+#
+# TODO: Distill the func test requirements from the lint/unit test
+#       requirements.  They are intertwined.  Also, Zaza itself should specify
+#       all of its own requirements and if it doesn't, fix it there.
 [tox]
-envlist = pep8,py27
+envlist = pep8,py3
 skipsdist = True
+# NOTE: Avoid build/test env pollution by not enabling sitepackages.
+sitepackages = False
+# NOTE: Avoid false positives by not skipping missing interpreters.
+skip_missing_interpreters = False
+# NOTES:
+# * We avoid the new dependency resolver by pinning pip < 20.3, see
+#   https://github.com/pypa/pip/issues/9187
+# * Pinning dependencies requires tox >= 3.2.0, see
+#   https://tox.readthedocs.io/en/latest/config.html#conf-requires
+# * It is also necessary to pin virtualenv as a newer virtualenv would still
+#   lead to fetching the latest pip in the func* tox targets, see
+#   https://stackoverflow.com/a/38133283
+requires = pip < 20.3
+           virtualenv < 20.0
+# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci
+minversion = 3.2.0
 
 [testenv]
 setenv = VIRTUAL_ENV={envdir}
          PYTHONHASHSEED=0
          CHARM_DIR={envdir}
-         AMULET_SETUP_TIMEOUT=2700
 install_command =
-  pip install --allow-unverified python-apt {opts} {packages}
-commands = ostestr {posargs}
+  pip install {opts} {packages}
+commands = stestr run --slowest {posargs}
 whitelist_externals = juju
-passenv = HOME TERM AMULET_* CS_API_*
-
-[testenv:py27]
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
-       -r{toxinidir}/test-requirements.txt
+passenv = HOME TERM CS_* OS_* TEST_*
+deps = -r{toxinidir}/test-requirements.txt
 
 [testenv:py35]
 basepython = python3.5
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
 
-[testenv:pep8]
-basepython = python2.7
+[testenv:py36]
+basepython = python3.6
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
-commands = flake8 {posargs} hooks unit_tests tests actions lib
-           charm-proof
-
-[testenv:venv]
-commands = {posargs}
 
-[testenv:func27-noop]
-# DRY RUN - For Debug
-basepython = python2.7
+[testenv:py37]
+basepython = python3.7
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
-commands =
-    bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy
 
-[testenv:func27]
-# Charm Functional Test
-# Run all gate tests which are +x (expected to always pass)
-basepython = python2.7
+[testenv:py38]
+basepython = python3.8
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
-commands =
-    bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy
 
-[testenv:func27-smoke]
-# Charm Functional Test
-# Run a specific test as an Amulet smoke test (expected to always pass)
-basepython = python2.7
+[testenv:py3]
+basepython = python3
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
-commands =
-    bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy
 
-[testenv:func27-dfs]
-# Charm Functional Test
-# Run all deploy-from-source tests which are +x (may not always pass!)
-basepython = python2.7
+[testenv:pep8]
+basepython = python3
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
-commands =
-    bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy
+commands = flake8 {posargs} hooks unit_tests tests actions lib files
+           charm-proof
 
-[testenv:func27-dev]
-# Charm Functional Test
-# Run all development test targets which are +x (may not always pass!)
-basepython = python2.7
+[testenv:cover]
+# Technique based heavily upon
+# https://github.com/openstack/nova/blob/master/tox.ini
+basepython = python3
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
+setenv =
+    {[testenv]setenv}
+    PYTHON=coverage run
+commands =
+    coverage erase
+    stestr run --slowest {posargs}
+    coverage combine
+    coverage html -d cover
+    coverage xml -o cover/coverage.xml
+    coverage report
+
+[coverage:run]
+branch = True
+concurrency = multiprocessing
+parallel = True
+source =
+    .
+omit =
+    .tox/*
+    */charmhelpers/*
+    unit_tests/*
+
+[testenv:venv]
+basepython = python3
+commands = {posargs}
+
+[testenv:func-noop]
+basepython = python3
+commands =
+    functest-run-suite --help
+
+[testenv:func]
+basepython = python3
+commands =
+    functest-run-suite --keep-model
+
+[testenv:func-smoke]
+basepython = python3
+commands =
+    functest-run-suite --keep-model --smoke
+
+[testenv:func-dev]
+basepython = python3
+commands =
+    functest-run-suite --keep-model --dev
+
+[testenv:func-target]
+basepython = python3
 commands =
-    bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy
+    functest-run-suite --keep-model --bundle {posargs}
 
 [flake8]
-ignore = E402,E226
+ignore = E402,E226,W503,W504
 exclude = */charmhelpers
diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py
index f80aab3d112c7cc461e0990054fcdb3e09fe8c59..34acae80a6f0dced8e80dfbd5867137781959065 100644
--- a/unit_tests/__init__.py
+++ b/unit_tests/__init__.py
@@ -1,2 +1,20 @@
+import os
 import sys
-sys.path.append('hooks')
+
+
+_path = os.path.dirname(os.path.realpath(__file__))
+_actions = os.path.abspath(os.path.join(_path, '../actions'))
+_hooks = os.path.abspath(os.path.join(_path, '../hooks'))
+_charmhelpers = os.path.abspath(os.path.join(_path, '../charmhelpers'))
+_unit_tests = os.path.abspath(os.path.join(_path, '../unit_tests'))
+
+
+def _add_path(path):
+    if path not in sys.path:
+        sys.path.insert(1, path)
+
+
+_add_path(_actions)
+_add_path(_hooks)
+_add_path(_charmhelpers)
+_add_path(_unit_tests)
diff --git a/unit_tests/test_ceph.py b/unit_tests/test_ceph.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd8d7097d38ce108a97578442f382cb1e6bae39d
--- /dev/null
+++ b/unit_tests/test_ceph.py
@@ -0,0 +1,96 @@
+import collections
+import subprocess
+import unittest
+
+import mock
+
+import ceph
+
+
+class CephTestCase(unittest.TestCase):
+    def setUp(self):
+        super(CephTestCase, self).setUp()
+
+    @staticmethod
+    def populated_config_side_effect(key):
+        return {
+            'user-keys':
+            'client.cinder-ceph:AQAij2tbMNjMOhAAqInpXQLFrltDgmYid6KXbg== '
+            'client.glance:AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g== '
+            'client.gnocchi:AQDk7qJb0csAFRAAQqPU6HchVW3PT6ymgXdI/A== '
+            'client.nova-compute-kvm:'
+            'AQBkjmtb1hWxLxAA3UhxSblgFSCtHVoZ8W6rNQ== '
+            'client.radosgw.gateway:'
+            'AQBljmtb65mrHhAAGy9VRkfsatWVLb9EpoWDfw==',
+            'admin-user': 'client.myadmin'
+        }[key]
+
+    @staticmethod
+    def empty_config_side_effect(key):
+        return {
+            'user-keys': '',
+            'admin-user': 'client.myadmin'
+        }[key]
+
+    @mock.patch('ceph.config')
+    def test_config_user_key_populated(self, mock_config):
+        user_name = 'glance'
+        user_key = 'AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g=='
+
+        mock_config.side_effect = self.populated_config_side_effect
+        named_key = ceph._config_user_key(user_name)
+        self.assertEqual(user_key, named_key)
+
+    @mock.patch('ceph.config')
+    def test_config_empty_user_key(self, mock_config):
+        user_name = 'cinder-ceph'
+
+        mock_config.side_effect = self.empty_config_side_effect
+        named_key = ceph._config_user_key(user_name)
+        self.assertEqual(named_key, None)
+
+    @mock.patch.object(ceph, 'ceph_user')
+    @mock.patch('subprocess.check_output')
+    @mock.patch('ceph.config')
+    def test_get_named_key_new(self, mock_config, mock_check_output,
+                               mock_ceph_user):
+        mock_ceph_user.return_value = 'ceph'
+        user_name = 'cinder-ceph'
+        expected_key = 'AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g=='
+        expected_output = ('[client.testuser]\n        key = {}'
+                           .format(expected_key))
+
+        def check_output_side_effect(cmd):
+            if 'get-or-create' in cmd:
+                return expected_output.encode('utf-8')
+            else:
+                raise subprocess.CalledProcessError(1, "")
+
+        mock_config.side_effect = self.empty_config_side_effect
+        mock_check_output.side_effect = check_output_side_effect
+        named_key = ceph.get_named_key(user_name)
+        print(named_key)
+
+        self.assertEqual(expected_key, named_key)
+
+    @mock.patch('subprocess.check_output')
+    @mock.patch('ceph.get_unit_hostname')
+    @mock.patch('ceph.ceph_user')
+    @mock.patch('ceph.config')
+    def test_get_named_key_existing(self, mock_config, mock_ceph_user,
+                                    mock_get_unit_hostname, mock_check_output):
+        user_name = 'cinder-ceph'
+        expected_key = 'AQCnjmtbuEACMxAA7joUmgLIGI4/3LKkPzUy8g=='
+        expected_output = ('[client.testuser]\n        key = {}'
+                           .format(expected_key))
+        caps = collections.OrderedDict([('mon', ['allow rw']),
+                                        ('osd', ['allow rwx'])])
+        ceph_user = 'ceph'
+        ceph_proxy_host = 'cephproxy'
+        mock_get_unit_hostname.return_value = ceph_proxy_host
+
+        mock_check_output.return_value = expected_output.encode('utf-8')
+        mock_config.side_effect = self.empty_config_side_effect
+        mock_ceph_user.return_value = ceph_user
+        named_key = ceph.get_named_key(user_name, caps)
+        self.assertEqual(named_key, expected_key)
diff --git a/unit_tests/test_ceph_hooks.py b/unit_tests/test_ceph_hooks.py
index 802fce98915dcaf4f39533d9c5c9f7489630bb6e..63dccdb5e307b8a51933ea7a0ea353e469d7a195 100644
--- a/unit_tests/test_ceph_hooks.py
+++ b/unit_tests/test_ceph_hooks.py
@@ -50,15 +50,20 @@ class TestHooks(test_utils.CharmTestCase):
         self.remote_unit.return_value = 'client/0'
         self.log.side_effect = fake_log
 
+    @mock.patch.object(hooks.ceph, 'ceph_user')
+    @mock.patch.object(hooks, 'filter_installed_packages')
     @mock.patch('subprocess.check_output')
-    def test_radosgw_realtion(self, mock_check_output):
-
+    @mock.patch('ceph_hooks.apt_install')
+    def test_radosgw_relation(self, mock_apt_install, mock_check_output,
+                              mock_filter_installed_packages, mock_ceph_user):
+        mock_filter_installed_packages.return_value = []
+        mock_ceph_user.return_value = 'ceph'
         settings = {'ceph-public-address': '127.0.0.1:1234 [::1]:4321',
                     'radosgw_key': CEPH_KEY,
                     'auth': 'cephx',
                     'fsid': 'some-fsid'}
 
-        mock_check_output.return_value = CEPH_GET_KEY
+        mock_check_output.return_value = CEPH_GET_KEY.encode()
         self.relation_get.return_value = {}
         self.test_config.set('monitor-hosts', settings['ceph-public-address'])
         self.test_config.set('fsid', settings['fsid'])
@@ -66,6 +71,7 @@ class TestHooks(test_utils.CharmTestCase):
         hooks.radosgw_relation()
         self.relation_set.assert_called_with(relation_id=None,
                                              relation_settings=settings)
+        mock_apt_install.assert_called_with(packages=[])
 
     @mock.patch('ceph.ceph_user')
     @mock.patch.object(hooks, 'radosgw_relation')
@@ -76,11 +82,14 @@ class TestHooks(test_utils.CharmTestCase):
         self.test_config.set('monitor-hosts', '127.0.0.1:1234')
         self.test_config.set('fsid', 'abc123')
         self.test_config.set('admin-key', 'key123')
+        self.test_config.set('admin-user', 'client.myadmin')
 
         def c(k):
             x = {'radosgw': ['rados:1'],
                  'client': ['client:1'],
-                 'rados:1': ['rados/1']}
+                 'rados:1': ['rados/1'],
+                 'client:1': ['client/1'],
+                 }
             return x[k]
 
         self.relation_ids.side_effect = c
@@ -105,18 +114,25 @@ class TestHooks(test_utils.CharmTestCase):
                                                     '/etc/ceph/ceph.conf',
                                                     '%s/ceph.conf' % dirname,
                                                     100)
-        keyring = 'ceph.client.admin.keyring'
-        context = {'admin_key': self.test_config.get('admin-key')}
-        self.render.assert_any_call(keyring,
-                                    '/etc/ceph/' + keyring,
+        keyring_template = 'ceph.keyring'
+        keyring_name = 'ceph.{}.keyring'.format(
+            self.test_config.get('admin-user'))
+        context = {
+            'admin_key': self.test_config.get('admin-key'),
+            'admin_user': self.test_config.get('admin-user'),
+        }
+        self.render.assert_any_call(keyring_template,
+                                    '/etc/ceph/' + keyring_name,
                                     context, owner='ceph-user', perms=0o600)
 
         mock_rgw_rel.assert_called_with(relid='rados:1', unit='rados/1')
-        mock_client_rel.assert_called_with('client:1')
+        mock_client_rel.assert_called_with(relid='client:1', unit='client/1')
 
+    @mock.patch.object(hooks.ceph, 'ceph_user')
     @mock.patch('subprocess.check_output')
-    def test_client_relation_joined(self, mock_check_output):
-        mock_check_output.return_value = CEPH_GET_KEY
+    def test_client_relation_joined(self, mock_check_output, mock_ceph_user):
+        mock_check_output.return_value = CEPH_GET_KEY.encode()
+        mock_ceph_user.return_value = 'ceph'
         self.test_config.set('monitor-hosts', '127.0.0.1:1234')
         self.test_config.set('fsid', 'abc123')
         self.test_config.set('admin-key', 'some-admin-key')
@@ -130,3 +146,32 @@ class TestHooks(test_utils.CharmTestCase):
 
         self.relation_set.assert_called_with(relation_id='client:1',
                                              relation_settings=data)
+
+    @mock.patch('ceph_hooks.emit_cephconf')
+    @mock.patch('ceph_hooks.package_install')
+    def test_config_get_skips_package_update(self,
+                                             mock_package_install,
+                                             mock_emit_cephconf):
+        previous_test_config = test_utils.TestConfig()
+        previous_test_config.set('source', 'distro')
+        previous_test_config.set('key', '')
+        previous = mock.MagicMock().return_value
+        previous.previous.side_effect = lambda x: previous_test_config.get(x)
+        self.config.side_effect = [previous, "distro", ""]
+        hooks.config_changed()
+        mock_package_install.assert_not_called()
+        mock_emit_cephconf.assert_any_call()
+
+    @mock.patch('ceph_hooks.emit_cephconf')
+    @mock.patch('ceph_hooks.package_install')
+    def test_update_apt_source(self, mock_package_install, mock_emit_cephconf):
+
+        previous_test_config = test_utils.TestConfig()
+        previous_test_config.set('source', 'distro')
+        previous_test_config.set('key', '')
+        previous = mock.MagicMock().return_value
+        previous.previous.side_effect = lambda x: previous_test_config.get(x)
+        self.config.side_effect = [previous, "cloud:cosmic-mimic", ""]
+        hooks.config_changed()
+        mock_package_install.assert_called_with()
+        mock_emit_cephconf.assert_called_with()
diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py
index 663a0488a4289a6b686e0804d28f1f230b77c939..ed0e7a1e44370d58bae2e773ab7acc1bc8b2850d 100644
--- a/unit_tests/test_utils.py
+++ b/unit_tests/test_utils.py
@@ -36,7 +36,7 @@ def get_default_config():
     '''
     default_config = {}
     config = load_config()
-    for k, v in config.iteritems():
+    for k, v in config.items():
         if 'default' in v:
             default_config[k] = v['default']
         else:
@@ -110,7 +110,7 @@ def patch_open():
 
     Yields the mock for "open" and "file", respectively.'''
     mock_open = MagicMock(spec=open)
-    mock_file = MagicMock(spec=file)
+    mock_file = MagicMock(spec=file)  # noqa - transitional py2 py3
 
     @contextmanager
     def stub_open(*args, **kwargs):