From 9322139d0a3e73da1de28ffab06f6ebd9bc4e9a6 Mon Sep 17 00:00:00 2001
From: Chris MacNaughton <chris.macnaughton@canonical.com>
Date: Thu, 4 Apr 2019 10:11:13 +0200
Subject: [PATCH] Sync charm-helpers for Stein release

As a part of the Stein release, we need to ensure
that charmhelpers is up to date.

Change-Id: I0a713373aaa7d36474e575667c91856f6690ba76
---
 charmhelpers/cli/unitdata.py                  |   9 +
 .../contrib/openstack/audits/__init__.py      |  96 +++++++--
 .../audits/openstack_security_guide.py        | 113 ++++-------
 charmhelpers/contrib/openstack/cert_utils.py  |  18 +-
 charmhelpers/contrib/openstack/context.py     |  34 +++-
 charmhelpers/contrib/openstack/ip.py          |   2 +-
 .../contrib/openstack/templates/logrotate     |   9 +
 charmhelpers/contrib/openstack/utils.py       |   4 +-
 charmhelpers/contrib/storage/linux/ceph.py    | 116 ++++++++---
 charmhelpers/contrib/storage/linux/utils.py   |  41 ++++
 charmhelpers/core/hookenv.py                  |  74 +++++++
 charmhelpers/core/host.py                     |   1 +
 charmhelpers/core/host_factory/ubuntu.py      |  14 ++
 charmhelpers/core/sysctl.py                   |  13 +-
 charmhelpers/fetch/ubuntu.py                  | 183 ++++++++++++++----
 15 files changed, 562 insertions(+), 165 deletions(-)
 create mode 100644 charmhelpers/contrib/openstack/templates/logrotate

diff --git a/charmhelpers/cli/unitdata.py b/charmhelpers/cli/unitdata.py
index c572858..acce846 100644
--- a/charmhelpers/cli/unitdata.py
+++ b/charmhelpers/cli/unitdata.py
@@ -19,9 +19,16 @@ from charmhelpers.core import unitdata
 @cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
 def unitdata_cmd(subparser):
     nested = subparser.add_subparsers()
+
     get_cmd = nested.add_parser('get', help='Retrieve data')
     get_cmd.add_argument('key', help='Key to retrieve the value of')
     get_cmd.set_defaults(action='get', value=None)
+
+    getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data')
+    getrange_cmd.add_argument('key', metavar='prefix',
+                              help='Prefix of the keys to retrieve')
+    getrange_cmd.set_defaults(action='getrange', value=None)
+
     set_cmd = nested.add_parser('set', help='Store data')
     set_cmd.add_argument('key', help='Key to set')
     set_cmd.add_argument('value', help='Value to store')
@@ -30,6 +37,8 @@ def unitdata_cmd(subparser):
     def _unitdata_cmd(action, key, value):
         if action == 'get':
             return unitdata.kv().get(key)
+        elif action == 'getrange':
+            return unitdata.kv().getrange(key)
         elif action == 'set':
             unitdata.kv().set(key, value)
             unitdata.kv().flush()
diff --git a/charmhelpers/contrib/openstack/audits/__init__.py b/charmhelpers/contrib/openstack/audits/__init__.py
index 12b01b3..7f7e5f7 100644
--- a/charmhelpers/contrib/openstack/audits/__init__.py
+++ b/charmhelpers/contrib/openstack/audits/__init__.py
@@ -19,7 +19,7 @@ from enum import Enum
 import traceback
 
 from charmhelpers.core.host import cmp_pkgrevno
-
+import charmhelpers.contrib.openstack.utils as openstack_utils
 import charmhelpers.core.hookenv as hookenv
 
 
@@ -39,7 +39,7 @@ def audit(*args):
     deployed system that matches the given configuration
 
     :param args: List of functions to filter tests against
-    :type args: List[Callable(Config)]
+    :type args: List[Callable[Dict]]
     """
     def wrapper(f):
         test_name = f.__name__
@@ -58,28 +58,92 @@ def audit(*args):
 
 
 def is_audit_type(*args):
-    """This audit is included in the specified kinds of audits."""
-    def should_run(audit_options):
+    """This audit is included in the specified kinds of audits.
+
+    :param *args: List of AuditTypes to include this audit in
+    :type args: List[AuditType]
+    :rtype: Callable[Dict]
+    """
+    def _is_audit_type(audit_options):
         if audit_options.get('audit_type') in args:
             return True
         else:
             return False
-    return should_run
+    return _is_audit_type
 
 
 def since_package(pkg, pkg_version):
-    """This audit should be run after the specified package version (incl)."""
-    return lambda audit_options=None: cmp_pkgrevno(pkg, pkg_version) >= 0
+    """This audit should be run after the specified package version (incl).
+
+    :param pkg: Package name to compare
+    :type pkg: str
+    :param release: The package version
+    :type release: str
+    :rtype: Callable[Dict]
+    """
+    def _since_package(audit_options=None):
+        return cmp_pkgrevno(pkg, pkg_version) >= 0
+
+    return _since_package
 
 
 def before_package(pkg, pkg_version):
-    """This audit should be run before the specified package version (excl)."""
-    return lambda audit_options=None: not since_package(pkg, pkg_version)()
+    """This audit should be run before the specified package version (excl).
+
+    :param pkg: Package name to compare
+    :type pkg: str
+    :param release: The package version
+    :type release: str
+    :rtype: Callable[Dict]
+    """
+    def _before_package(audit_options=None):
+        return not since_package(pkg, pkg_version)()
+
+    return _before_package
+
+
+def since_openstack_release(pkg, release):
+    """This audit should run after the specified OpenStack version (incl).
+
+    :param pkg: Package name to compare
+    :type pkg: str
+    :param release: The OpenStack release codename
+    :type release: str
+    :rtype: Callable[Dict]
+    """
+    def _since_openstack_release(audit_options=None):
+        _release = openstack_utils.get_os_codename_package(pkg)
+        return openstack_utils.CompareOpenStackReleases(_release) >= release
+
+    return _since_openstack_release
+
+
+def before_openstack_release(pkg, release):
+    """This audit should run before the specified OpenStack version (excl).
+
+    :param pkg: Package name to compare
+    :type pkg: str
+    :param release: The OpenStack release codename
+    :type release: str
+    :rtype: Callable[Dict]
+    """
+    def _before_openstack_release(audit_options=None):
+        return not since_openstack_release(pkg, release)()
+
+    return _before_openstack_release
 
 
 def it_has_config(config_key):
-    """This audit should be run based on specified config keys."""
-    return lambda audit_options: audit_options.get(config_key) is not None
+    """This audit should be run based on specified config keys.
+
+    :param config_key: Config key to look for
+    :type config_key: str
+    :rtype: Callable[Dict]
+    """
+    def _it_has_config(audit_options):
+        return audit_options.get(config_key) is not None
+
+    return _it_has_config
 
 
 def run(audit_options):
@@ -87,6 +151,8 @@ def run(audit_options):
 
     :param audit_options: Configuration for the audit
     :type audit_options: Config
+
+    :rtype: Dict[str, str]
     """
     errors = {}
     results = {}
@@ -127,7 +193,13 @@ def run(audit_options):
 
 
 def action_parse_results(result):
-    """Parse the result of `run` in the context of an action."""
+    """Parse the result of `run` in the context of an action.
+
+    :param result: The result of running the security-checklist
+        action on a unit
+    :type result: Dict[str, Dict[str, str]]
+    :rtype: int
+    """
     passed = True
     for test, result in result.items():
         if result['success']:
diff --git a/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/charmhelpers/contrib/openstack/audits/openstack_security_guide.py
index ba5e248..e5b7ac1 100644
--- a/charmhelpers/contrib/openstack/audits/openstack_security_guide.py
+++ b/charmhelpers/contrib/openstack/audits/openstack_security_guide.py
@@ -30,14 +30,20 @@ from charmhelpers.core.hookenv import (
     cached,
 )
 
+"""
+The Security Guide suggests a specific list of files inside the
+config directory for the service having 640 specifically, but
+by ensuring the containing directory is 750, only the owner can
+write, and only the group can read files within the directory.
 
+By  restricting access to the containing directory, we can more
+effectively ensure that there is no accidental leakage if a new
+file is added to the service without being added to the security
+guide, and to this check.
+"""
 FILE_ASSERTIONS = {
     'barbican': {
-        # From security guide
-        '/etc/barbican/barbican.conf': {'group': 'barbican', 'mode': '640'},
-        '/etc/barbican/barbican-api-paste.ini':
-            {'group': 'barbican', 'mode': '640'},
-        '/etc/barbican/policy.json': {'group': 'barbican', 'mode': '640'},
+        '/etc/barbican': {'group': 'barbican', 'mode': '750'},
     },
     'ceph-mon': {
         '/var/lib/charm/ceph-mon/ceph.conf':
@@ -60,82 +66,29 @@ FILE_ASSERTIONS = {
             {'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
     },
     'cinder': {
-        # From security guide
-        '/etc/cinder/cinder.conf': {'group': 'cinder', 'mode': '640'},
-        '/etc/cinder/api-paste.conf': {'group': 'cinder', 'mode': '640'},
-        '/etc/cinder/rootwrap.conf': {'group': 'cinder', 'mode': '640'},
+        '/etc/cinder': {'group': 'cinder', 'mode': '750'},
     },
     'glance': {
-        # From security guide
-        '/etc/glance/glance-api-paste.ini': {'group': 'glance', 'mode': '640'},
-        '/etc/glance/glance-api.conf': {'group': 'glance', 'mode': '640'},
-        '/etc/glance/glance-cache.conf': {'group': 'glance', 'mode': '640'},
-        '/etc/glance/glance-manage.conf': {'group': 'glance', 'mode': '640'},
-        '/etc/glance/glance-registry-paste.ini':
-            {'group': 'glance', 'mode': '640'},
-        '/etc/glance/glance-registry.conf': {'group': 'glance', 'mode': '640'},
-        '/etc/glance/glance-scrubber.conf': {'group': 'glance', 'mode': '640'},
-        '/etc/glance/glance-swift-store.conf':
-            {'group': 'glance', 'mode': '640'},
-        '/etc/glance/policy.json': {'group': 'glance', 'mode': '640'},
-        '/etc/glance/schema-image.json': {'group': 'glance', 'mode': '640'},
-        '/etc/glance/schema.json': {'group': 'glance', 'mode': '640'},
+        '/etc/glance': {'group': 'glance', 'mode': '750'},
     },
     'keystone': {
-        # From security guide
-        '/etc/keystone/keystone.conf': {'group': 'keystone', 'mode': '640'},
-        '/etc/keystone/keystone-paste.ini':
-            {'group': 'keystone', 'mode': '640'},
-        '/etc/keystone/policy.json': {'group': 'keystone', 'mode': '640'},
-        '/etc/keystone/logging.conf': {'group': 'keystone', 'mode': '640'},
-        '/etc/keystone/ssl/certs/signing_cert.pem':
-            {'group': 'keystone', 'mode': '640'},
-        '/etc/keystone/ssl/private/signing_key.pem':
-            {'group': 'keystone', 'mode': '640'},
-        '/etc/keystone/ssl/certs/ca.pem': {'group': 'keystone', 'mode': '640'},
+        '/etc/keystone':
+            {'owner': 'keystone', 'group': 'keystone', 'mode': '750'},
     },
     'manilla': {
-        # From security guide
-        '/etc/manila/manila.conf': {'group': 'manilla', 'mode': '640'},
-        '/etc/manila/api-paste.ini': {'group': 'manilla', 'mode': '640'},
-        '/etc/manila/policy.json': {'group': 'manilla', 'mode': '640'},
-        '/etc/manila/rootwrap.conf': {'group': 'manilla', 'mode': '640'},
+        '/etc/manila': {'group': 'manilla', 'mode': '750'},
     },
     'neutron-gateway': {
-        '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
-        '/etc/neutron/rootwrap.conf': {'mode': '640'},
-        '/etc/neutron/rootwrap.d': {'mode': '755'},
-        '/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
+        '/etc/neutron': {'group': 'neutron', 'mode': '750'},
     },
     'neutron-api': {
-        # From security guide
-        '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
-        '/etc/nova/api-paste.ini': {'group': 'neutron', 'mode': '640'},
-        '/etc/neutron/rootwrap.conf': {'group': 'neutron', 'mode': '640'},
-        # Additional validations
-        '/etc/neutron/rootwrap.d': {'mode': '755'},
-        '/etc/neutron/neutron_lbaas.conf': {'mode': '644'},
-        '/etc/neutron/neutron_vpnaas.conf': {'mode': '644'},
-        '/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
+        '/etc/neutron/': {'group': 'neutron', 'mode': '750'},
     },
     'nova-cloud-controller': {
-        # From security guide
-        '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
-        '/etc/nova/nova.conf': {'group': 'nova', 'mode': '750'},
-        '/etc/nova/*': {'group': 'nova', 'mode': '640'},
-        # Additional validations
-        '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
+        '/etc/nova': {'group': 'nova', 'mode': '750'},
     },
     'nova-compute': {
-        # From security guide
-        '/etc/nova/nova.conf': {'group': 'nova', 'mode': '640'},
-        '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
-        '/etc/nova/rootwrap.conf': {'group': 'nova', 'mode': '640'},
-        # Additional Validations
-        '/etc/nova/nova-compute.conf': {'group': 'nova', 'mode': '640'},
-        '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
-        '/etc/nova/nm.conf': {'mode': '644'},
-        '/etc/nova/*': {'group': 'nova', 'mode': '640'},
+        '/etc/nova/': {'group': 'nova', 'mode': '750'},
     },
     'openstack-dashboard': {
         # From security guide
@@ -178,7 +131,7 @@ def _config_ini(path):
     return dict(conf)
 
 
-def _validate_file_ownership(owner, group, file_name):
+def _validate_file_ownership(owner, group, file_name, optional=False):
     """
     Validate that a specified file is owned by `owner:group`.
 
@@ -188,12 +141,16 @@ def _validate_file_ownership(owner, group, file_name):
     :type group: str
     :param file_name: Path to the file to verify
     :type file_name: str
+    :param optional: Is this file optional,
+                     ie: Should this test fail when it's missing
+    :type optional: bool
     """
     try:
         ownership = _stat(file_name)
     except subprocess.CalledProcessError as e:
         print("Error reading file: {}".format(e))
-        assert False, "Specified file does not exist: {}".format(file_name)
+        if not optional:
+            assert False, "Specified file does not exist: {}".format(file_name)
     assert owner == ownership.owner, \
         "{} has an incorrect owner: {} should be {}".format(
             file_name, ownership.owner, owner)
@@ -203,7 +160,7 @@ def _validate_file_ownership(owner, group, file_name):
     print("Validate ownership of {}: PASS".format(file_name))
 
 
-def _validate_file_mode(mode, file_name):
+def _validate_file_mode(mode, file_name, optional=False):
     """
     Validate that a specified file has the specified permissions.
 
@@ -211,12 +168,16 @@ def _validate_file_mode(mode, file_name):
     :type owner: str
     :param file_name: Path to the file to verify
     :type file_name: str
+    :param optional: Is this file optional,
+                     ie: Should this test fail when it's missing
+    :type optional: bool
     """
     try:
         ownership = _stat(file_name)
     except subprocess.CalledProcessError as e:
         print("Error reading file: {}".format(e))
-        assert False, "Specified file does not exist: {}".format(file_name)
+        if not optional:
+            assert False, "Specified file does not exist: {}".format(file_name)
     assert mode == ownership.mode, \
         "{} has an incorrect mode: {} should be {}".format(
             file_name, ownership.mode, mode)
@@ -243,14 +204,15 @@ def validate_file_ownership(config):
                     "Invalid ownership configuration: {}".format(key))
         owner = options.get('owner', config.get('owner', 'root'))
         group = options.get('group', config.get('group', 'root'))
+        optional = options.get('optional', config.get('optional', 'False'))
         if '*' in file_name:
             for file in glob.glob(file_name):
                 if file not in files.keys():
                     if os.path.isfile(file):
-                        _validate_file_ownership(owner, group, file)
+                        _validate_file_ownership(owner, group, file, optional)
         else:
             if os.path.isfile(file_name):
-                _validate_file_ownership(owner, group, file_name)
+                _validate_file_ownership(owner, group, file_name, optional)
 
 
 @audit(is_audit_type(AuditType.OpenStackSecurityGuide),
@@ -264,14 +226,15 @@ def validate_file_permissions(config):
                 raise RuntimeError(
                     "Invalid ownership configuration: {}".format(key))
         mode = options.get('mode', config.get('permissions', '600'))
+        optional = options.get('optional', config.get('optional', 'False'))
         if '*' in file_name:
             for file in glob.glob(file_name):
                 if file not in files.keys():
                     if os.path.isfile(file):
-                        _validate_file_mode(mode, file)
+                        _validate_file_mode(mode, file, optional)
         else:
             if os.path.isfile(file_name):
-                _validate_file_mode(mode, file_name)
+                _validate_file_mode(mode, file_name, optional)
 
 
 @audit(is_audit_type(AuditType.OpenStackSecurityGuide))
diff --git a/charmhelpers/contrib/openstack/cert_utils.py b/charmhelpers/contrib/openstack/cert_utils.py
index 3a3c6de..47b8603 100644
--- a/charmhelpers/contrib/openstack/cert_utils.py
+++ b/charmhelpers/contrib/openstack/cert_utils.py
@@ -180,13 +180,17 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
             os.symlink(hostname_key, custom_key)
 
 
-def install_certs(ssl_dir, certs, chain=None):
+def install_certs(ssl_dir, certs, chain=None, user='root', group='root'):
     """Install the certs passed into the ssl dir and append the chain if
        provided.
 
     :param ssl_dir: str Directory to create symlinks in
     :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}}
     :param chain: str Chain to be appended to certs
+    :param user: (Optional) Owner of certificate files. Defaults to 'root'
+    :type user: str
+    :param group: (Optional) Group of certificate files. Defaults to 'root'
+    :type group: str
     """
     for cn, bundle in certs.items():
         cert_filename = 'cert_{}'.format(cn)
@@ -197,21 +201,25 @@ def install_certs(ssl_dir, certs, chain=None):
             # trust certs signed by an intermediate in the chain
             cert_data = cert_data + os.linesep + chain
         write_file(
-            path=os.path.join(ssl_dir, cert_filename),
+            path=os.path.join(ssl_dir, cert_filename), owner=user, group=group,
             content=cert_data, perms=0o640)
         write_file(
-            path=os.path.join(ssl_dir, key_filename),
+            path=os.path.join(ssl_dir, key_filename), owner=user, group=group,
             content=bundle['key'], perms=0o640)
 
 
 def process_certificates(service_name, relation_id, unit,
-                         custom_hostname_link=None):
+                         custom_hostname_link=None, user='root', group='root'):
     """Process the certificates supplied down the relation
 
     :param service_name: str Name of service the certifcates are for.
     :param relation_id: str Relation id providing the certs
     :param unit: str Unit providing the certs
     :param custom_hostname_link: str Name of custom link to create
+    :param user: (Optional) Owner of certificate files. Defaults to 'root'
+    :type user: str
+    :param group: (Optional) Group of certificate files. Defaults to 'root'
+    :type group: str
     """
     data = relation_get(rid=relation_id, unit=unit)
     ssl_dir = os.path.join('/etc/apache2/ssl/', service_name)
@@ -223,7 +231,7 @@ def process_certificates(service_name, relation_id, unit,
     if certs:
         certs = json.loads(certs)
         install_ca_cert(ca.encode())
-        install_certs(ssl_dir, certs, chain)
+        install_certs(ssl_dir, certs, chain, user=user, group=group)
         create_ip_cert_links(
             ssl_dir,
             custom_hostname_link=custom_hostname_link)
diff --git a/charmhelpers/contrib/openstack/context.py b/charmhelpers/contrib/openstack/context.py
index fc634cc..d513371 100644
--- a/charmhelpers/contrib/openstack/context.py
+++ b/charmhelpers/contrib/openstack/context.py
@@ -792,6 +792,7 @@ class ApacheSSLContext(OSContextGenerator):
     # and service namespace accordingly.
     external_ports = []
     service_namespace = None
+    user = group = 'root'
 
     def enable_modules(self):
         cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers']
@@ -810,9 +811,11 @@ class ApacheSSLContext(OSContextGenerator):
                 key_filename = 'key'
 
             write_file(path=os.path.join(ssl_dir, cert_filename),
-                       content=b64decode(cert), perms=0o640)
+                       content=b64decode(cert), owner=self.user,
+                       group=self.group, perms=0o640)
             write_file(path=os.path.join(ssl_dir, key_filename),
-                       content=b64decode(key), perms=0o640)
+                       content=b64decode(key), owner=self.user,
+                       group=self.group, perms=0o640)
 
     def configure_ca(self):
         ca_cert = get_ca_cert()
@@ -1932,3 +1935,30 @@ class VersionsContext(OSContextGenerator):
         return {
             'openstack_release': ostack,
             'operating_system_release': osystem}
+
+
+class LogrotateContext(OSContextGenerator):
+    """Common context generator for logrotate."""
+
+    def __init__(self, location, interval, count):
+        """
+        :param location: Absolute path for the logrotate config file
+        :type location: str
+        :param interval: The interval for the rotations. Valid values are
+                         'daily', 'weekly', 'monthly', 'yearly'
+        :type interval: str
+        :param count: The logrotate count option configures the 'count' times
+                      the log files are being rotated before being
+        :type count: int
+        """
+        self.location = location
+        self.interval = interval
+        self.count = 'rotate {}'.format(count)
+
+    def __call__(self):
+        ctxt = {
+            'logrotate_logs_location': self.location,
+            'logrotate_interval': self.interval,
+            'logrotate_count': self.count,
+        }
+        return ctxt
diff --git a/charmhelpers/contrib/openstack/ip.py b/charmhelpers/contrib/openstack/ip.py
index df83b91..723aebc 100644
--- a/charmhelpers/contrib/openstack/ip.py
+++ b/charmhelpers/contrib/openstack/ip.py
@@ -159,7 +159,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
                     if is_address_in_network(bound_cidr, vip):
                         resolved_address = vip
                         break
-            except NotImplementedError:
+            except (NotImplementedError, NoNetworkBinding):
                 # If no net-splits configured and no support for extra
                 # bindings/network spaces so we expect a single vip
                 resolved_address = vips[0]
diff --git a/charmhelpers/contrib/openstack/templates/logrotate b/charmhelpers/contrib/openstack/templates/logrotate
new file mode 100644
index 0000000..b2900d0
--- /dev/null
+++ b/charmhelpers/contrib/openstack/templates/logrotate
@@ -0,0 +1,9 @@
+/var/log/{{ logrotate_logs_location }}/*.log {
+    {{ logrotate_interval }}
+    {{ logrotate_count }}
+    compress
+    delaycompress
+    missingok
+    notifempty
+    copytruncate
+}
diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py
index 86b011b..e5e2536 100644
--- a/charmhelpers/contrib/openstack/utils.py
+++ b/charmhelpers/contrib/openstack/utils.py
@@ -194,7 +194,7 @@ SWIFT_CODENAMES = OrderedDict([
     ('rocky',
         ['2.18.0', '2.19.0']),
     ('stein',
-        ['2.19.0']),
+        ['2.20.0']),
 ])
 
 # >= Liberty version->codename mapping
@@ -656,7 +656,7 @@ def openstack_upgrade_available(package):
     else:
         avail_vers = get_os_version_install_source(src)
     apt.init()
-    return apt.version_compare(avail_vers, cur_vers) == 1
+    return apt.version_compare(avail_vers, cur_vers) >= 1
 
 
 def ensure_block_device(block_device):
diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py
index 22aa978..2c62092 100644
--- a/charmhelpers/contrib/storage/linux/ceph.py
+++ b/charmhelpers/contrib/storage/linux/ceph.py
@@ -186,7 +186,7 @@ class Pool(object):
         elif mode == 'writeback':
             pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
                                 'cache-mode', cache_pool, 'forward']
-            if cmp_pkgrevno('ceph', '10.1') >= 0:
+            if cmp_pkgrevno('ceph-common', '10.1') >= 0:
                 # Jewel added a mandatory flag
                 pool_forward_cmd.append('--yes-i-really-mean-it')
 
@@ -582,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name):
         raise
 
 
-# max_bytes should be an int or long
-def set_pool_quota(service, pool_name, max_bytes):
+def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
     """
-    :param service: six.string_types. The Ceph user name to run the command under
-    :param pool_name: six.string_types
-    :param max_bytes: int or long
-    :return: None.  Can raise CalledProcessError
+    :param service: The Ceph user name to run the command under
+    :type service: str
+    :param pool_name: Name of pool
+    :type pool_name: str
+    :param max_bytes: Maximum bytes quota to apply
+    :type max_bytes: int
+    :param max_objects: Maximum objects quota to apply
+    :type max_objects: int
+    :raises: subprocess.CalledProcessError
     """
-    # Set a byte quota on a RADOS pool in ceph.
-    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
-           'max_bytes', str(max_bytes)]
-    try:
-        check_call(cmd)
-    except CalledProcessError:
-        raise
+    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name]
+    if max_bytes:
+        cmd = cmd + ['max_bytes', str(max_bytes)]
+    if max_objects:
+        cmd = cmd + ['max_objects', str(max_objects)]
+    check_call(cmd)
 
 
 def remove_pool_quota(service, pool_name):
@@ -661,7 +664,7 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
     if locality is not None and durability_estimator is not None:
         raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
 
-    luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
+    luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
     # failure_domain changed in luminous
     if luminous_or_later:
         cmd.append('crush-failure-domain=' + failure_domain)
@@ -766,7 +769,7 @@ def get_osds(service, device_class=None):
     :param device_class: Class of storage device for OSD's
     :type device_class: str
     """
-    luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
+    luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
     if luminous_or_later and device_class:
         out = check_output(['ceph', '--id', service,
                             'osd', 'crush', 'class',
@@ -832,7 +835,7 @@ def set_app_name_for_pool(client, pool, name):
 
     :raises: CalledProcessError if ceph call fails
     """
-    if cmp_pkgrevno('ceph', '12.0.0') >= 0:
+    if cmp_pkgrevno('ceph-common', '12.0.0') >= 0:
         cmd = ['ceph', '--id', client, 'osd', 'pool',
                'application', 'enable', pool, name]
         check_call(cmd)
@@ -1153,19 +1156,46 @@ class CephBrokerRq(object):
 
     def add_op_create_pool(self, name, replica_count=3, pg_num=None,
                            weight=None, group=None, namespace=None,
-                           app_name=None):
-        """Adds an operation to create a pool.
-
-        @param pg_num setting:  optional setting. If not provided, this value
-        will be calculated by the broker based on how many OSDs are in the
-        cluster at the time of creation. Note that, if provided, this value
-        will be capped at the current available maximum.
-        @param weight: the percentage of data the pool makes up
+                           app_name=None, max_bytes=None, max_objects=None):
+        """DEPRECATED: Use ``add_op_create_replicated_pool()`` or
+                       ``add_op_create_erasure_pool()`` instead.
+        """
+        return self.add_op_create_replicated_pool(
+            name, replica_count=replica_count, pg_num=pg_num, weight=weight,
+            group=group, namespace=namespace, app_name=app_name,
+            max_bytes=max_bytes, max_objects=max_objects)
+
+    def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
+                                      weight=None, group=None, namespace=None,
+                                      app_name=None, max_bytes=None,
+                                      max_objects=None):
+        """Adds an operation to create a replicated pool.
+
+        :param name: Name of pool to create
+        :type name: str
+        :param replica_count: Number of copies Ceph should keep of your data.
+        :type replica_count: int
+        :param pg_num: Request specific number of Placement Groups to create
+                       for pool.
+        :type pg_num: int
+        :param weight: The percentage of data that is expected to be contained
+                       in the pool from the total available space on the OSDs.
+                       Used to calculate number of Placement Groups to create
+                       for pool.
+        :type weight: float
+        :param group: Group to add pool to
+        :type group: str
+        :param namespace: Group namespace
+        :type namespace: str
         :param app_name: (Optional) Tag pool with application name.  Note that
                          there is certain protocols emerging upstream with
                          regard to meaningful application names to use.
                          Examples are ``rbd`` and ``rgw``.
         :type app_name: str
+        :param max_bytes: Maximum bytes quota to apply
+        :type max_bytes: int
+        :param max_objects: Maximum objects quota to apply
+        :type max_objects: int
         """
         if pg_num and weight:
             raise ValueError('pg_num and weight are mutually exclusive')
@@ -1173,7 +1203,41 @@ class CephBrokerRq(object):
         self.ops.append({'op': 'create-pool', 'name': name,
                          'replicas': replica_count, 'pg_num': pg_num,
                          'weight': weight, 'group': group,
-                         'group-namespace': namespace, 'app-name': app_name})
+                         'group-namespace': namespace, 'app-name': app_name,
+                         'max-bytes': max_bytes, 'max-objects': max_objects})
+
+    def add_op_create_erasure_pool(self, name, erasure_profile=None,
+                                   weight=None, group=None, app_name=None,
+                                   max_bytes=None, max_objects=None):
+        """Adds an operation to create a erasure coded pool.
+
+        :param name: Name of pool to create
+        :type name: str
+        :param erasure_profile: Name of erasure code profile to use.  If not
+                                set the ceph-mon unit handling the broker
+                                request will set its default value.
+        :type erasure_profile: str
+        :param weight: The percentage of data that is expected to be contained
+                       in the pool from the total available space on the OSDs.
+        :type weight: float
+        :param group: Group to add pool to
+        :type group: str
+        :param app_name: (Optional) Tag pool with application name.  Note that
+                         there is certain protocols emerging upstream with
+                         regard to meaningful application names to use.
+                         Examples are ``rbd`` and ``rgw``.
+        :type app_name: str
+        :param max_bytes: Maximum bytes quota to apply
+        :type max_bytes: int
+        :param max_objects: Maximum objects quota to apply
+        :type max_objects: int
+        """
+        self.ops.append({'op': 'create-pool', 'name': name,
+                         'pool-type': 'erasure',
+                         'erasure-profile': erasure_profile,
+                         'weight': weight,
+                         'group': group, 'app-name': app_name,
+                         'max-bytes': max_bytes, 'max-objects': max_objects})
 
     def set_ops(self, ops):
         """Set request ops to provided value.
diff --git a/charmhelpers/contrib/storage/linux/utils.py b/charmhelpers/contrib/storage/linux/utils.py
index 6f846b0..c57aaf3 100644
--- a/charmhelpers/contrib/storage/linux/utils.py
+++ b/charmhelpers/contrib/storage/linux/utils.py
@@ -17,12 +17,53 @@ import re
 from stat import S_ISBLK
 
 from subprocess import (
+    CalledProcessError,
     check_call,
     check_output,
     call
 )
 
 
+def _luks_uuid(dev):
+    """
+    Check to see if dev is a LUKS encrypted volume, returning the UUID
+    of volume if it is.
+
+    :param: dev: path to block device to check.
+    :returns: str. UUID of LUKS device or None if not a LUKS device
+    """
+    try:
+        cmd = ['cryptsetup', 'luksUUID', dev]
+        return check_output(cmd).decode('UTF-8').strip()
+    except CalledProcessError:
+        return None
+
+
+def is_luks_device(dev):
+    """
+    Determine if dev is a LUKS-formatted block device.
+
+    :param: dev: A full path to a block device to check for LUKS header
+    presence
+    :returns: boolean: indicates whether a device is used based on LUKS header.
+    """
+    return True if _luks_uuid(dev) else False
+
+
+def is_mapped_luks_device(dev):
+    """
+    Determine if dev is a mapped LUKS device
+    :param: dev: A full path to a block device to be checked
+    :returns: boolean: indicates whether a device is mapped
+    """
+    _, dirs, _ = next(os.walk(
+        '/sys/class/block/{}/holders/'
+        .format(os.path.basename(os.path.realpath(dev))))
+    )
+    is_held = len(dirs) > 0
+    return is_held and is_luks_device(dev)
+
+
 def is_block_device(path):
     '''
     Confirm device at path is a valid block device node.
diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py
index 2e28765..4744eb4 100644
--- a/charmhelpers/core/hookenv.py
+++ b/charmhelpers/core/hookenv.py
@@ -50,6 +50,11 @@ TRACE = "TRACE"
 MARKER = object()
 SH_MAX_ARG = 131071
 
+
+RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
+                 'This may not be compatible with software you are '
+                 'running in your shell.')
+
 cache = {}
 
 
@@ -1414,3 +1419,72 @@ def unit_doomed(unit=None):
     # I don't think 'dead' units ever show up in the goal-state, but
     # check anyway in addition to 'dying'.
     return units[unit]['status'] in ('dying', 'dead')
+
+
+def env_proxy_settings(selected_settings=None):
+    """Get proxy settings from process environment variables.
+
+    Get charm proxy settings from environment variables that correspond to
+    juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
+    see lp:1782236) in a format suitable for passing to an application that
+    reacts to proxy settings passed as environment variables. Some applications
+    support lowercase or uppercase notation (e.g. curl), some support only
+    lowercase (e.g. wget), there are also subjectively rare cases of only
+    uppercase notation support. no_proxy CIDR and wildcard support also varies
+    between runtimes and applications as there is no enforced standard.
+
+    Some applications may connect to multiple destinations and expose config
+    options that would affect only proxy settings for a specific destination
+    these should be handled in charms in an application-specific manner.
+
+    :param selected_settings: format only a subset of possible settings
+    :type selected_settings: list
+    :rtype: Option(None, dict[str, str])
+    """
+    SUPPORTED_SETTINGS = {
+        'http': 'HTTP_PROXY',
+        'https': 'HTTPS_PROXY',
+        'no_proxy': 'NO_PROXY',
+        'ftp': 'FTP_PROXY'
+    }
+    if selected_settings is None:
+        selected_settings = SUPPORTED_SETTINGS
+
+    selected_vars = [v for k, v in SUPPORTED_SETTINGS.items()
+                     if k in selected_settings]
+    proxy_settings = {}
+    for var in selected_vars:
+        var_val = os.getenv(var)
+        if var_val:
+            proxy_settings[var] = var_val
+            proxy_settings[var.lower()] = var_val
+        # Now handle juju-prefixed environment variables. The legacy vs new
+        # environment variable usage is mutually exclusive
+        charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var))
+        if charm_var_val:
+            proxy_settings[var] = charm_var_val
+            proxy_settings[var.lower()] = charm_var_val
+    if 'no_proxy' in proxy_settings:
+        if _contains_range(proxy_settings['no_proxy']):
+            log(RANGE_WARNING, level=WARNING)
+    return proxy_settings if proxy_settings else None
+
+
+def _contains_range(addresses):
+    """Check for cidr or wildcard domain in a string.
+
+    Given a string comprising a comma seperated list of ip addresses
+    and domain names, determine whether the string contains IP ranges
+    or wildcard domains.
+
+    :param addresses: comma seperated list of domains and ip addresses.
+    :type addresses: str
+    """
+    return (
+        # Test for cidr (e.g. 10.20.20.0/24)
+        "/" in addresses or
+        # Test for wildcard domains (*.foo.com or .foo.com)
+        "*" in addresses or
+        addresses.startswith(".") or
+        ",." in addresses or
+        " ." in addresses)
diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py
index 47c1fc3..32754ff 100644
--- a/charmhelpers/core/host.py
+++ b/charmhelpers/core/host.py
@@ -47,6 +47,7 @@ if __platform__ == "ubuntu":
         cmp_pkgrevno,
         CompareHostReleases,
         get_distrib_codename,
+        arch
     )  # flake8: noqa -- ignore F401 for this import
 elif __platform__ == "centos":
     from charmhelpers.core.host_factory.centos import (  # NOQA:F401
diff --git a/charmhelpers/core/host_factory/ubuntu.py b/charmhelpers/core/host_factory/ubuntu.py
index d7e920e..a3162fa 100644
--- a/charmhelpers/core/host_factory/ubuntu.py
+++ b/charmhelpers/core/host_factory/ubuntu.py
@@ -1,5 +1,6 @@
 import subprocess
 
+from charmhelpers.core.hookenv import cached
 from charmhelpers.core.strutils import BasicStringComparator
 
 
@@ -97,3 +98,16 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
         pkgcache = apt_cache()
     pkg = pkgcache[package]
     return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
+
+
+@cached
+def arch():
+    """Return the package architecture as a string.
+
+    :returns: the architecture
+    :rtype: str
+    :raises: subprocess.CalledProcessError if dpkg command fails
+    """
+    return subprocess.check_output(
+        ['dpkg', '--print-architecture']
+    ).rstrip().decode('UTF-8')
diff --git a/charmhelpers/core/sysctl.py b/charmhelpers/core/sysctl.py
index 1f188d8..f1f4a28 100644
--- a/charmhelpers/core/sysctl.py
+++ b/charmhelpers/core/sysctl.py
@@ -28,7 +28,7 @@ from charmhelpers.core.hookenv import (
 __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
 
 
-def create(sysctl_dict, sysctl_file):
+def create(sysctl_dict, sysctl_file, ignore=False):
     """Creates a sysctl.conf file from a YAML associative array
 
     :param sysctl_dict: a dict or YAML-formatted string of sysctl
@@ -36,6 +36,8 @@ def create(sysctl_dict, sysctl_file):
     :type sysctl_dict: str
     :param sysctl_file: path to the sysctl file to be saved
     :type sysctl_file: str or unicode
+    :param ignore: If True, ignore "unknown variable" errors.
+    :type ignore: bool
     :returns: None
     """
     if type(sysctl_dict) is not dict:
@@ -52,7 +54,12 @@ def create(sysctl_dict, sysctl_file):
         for key, value in sysctl_dict_parsed.items():
             fd.write("{}={}\n".format(key, value))
 
-    log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
+    log("Updating sysctl_file: {} values: {}".format(sysctl_file,
+                                                     sysctl_dict_parsed),
         level=DEBUG)
 
-    check_call(["sysctl", "-p", sysctl_file])
+    call = ["sysctl", "-p", sysctl_file]
+    if ignore:
+        call.append("-e")
+
+    check_call(call)
diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py
index 8a5cadf..c6d9341 100644
--- a/charmhelpers/fetch/ubuntu.py
+++ b/charmhelpers/fetch/ubuntu.py
@@ -19,15 +19,14 @@ import re
 import six
 import time
 import subprocess
-from tempfile import NamedTemporaryFile
 
-from charmhelpers.core.host import (
-    lsb_release
-)
+from charmhelpers.core.host import get_distrib_codename
+
 from charmhelpers.core.hookenv import (
     log,
     DEBUG,
     WARNING,
+    env_proxy_settings,
 )
 from charmhelpers.fetch import SourceConfigError, GPGKeyError
 
@@ -303,12 +302,17 @@ def import_key(key):
     """Import an ASCII Armor key.
 
     A Radix64 format keyid is also supported for backwards
-    compatibility, but should never be used; the key retrieval
-    mechanism is insecure and subject to man-in-the-middle attacks
-    voiding all signature checks using that key.
-
-    :param keyid: The key in ASCII armor format,
-                  including BEGIN and END markers.
+    compatibility. In this case Ubuntu keyserver will be
+    queried for a key via HTTPS by its keyid. This method
+    is less preferrable because https proxy servers may
+    require traffic decryption which is equivalent to a
+    man-in-the-middle attack (a proxy server impersonates
+    keyserver TLS certificates and has to be explicitly
+    trusted by the system).
+
+    :param key: A GPG key in ASCII armor format,
+                  including BEGIN and END markers or a keyid.
+    :type key: (bytes, str)
     :raises: GPGKeyError if the key could not be imported
     """
     key = key.strip()
@@ -319,35 +323,131 @@ def import_key(key):
         log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
         if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
                 '-----END PGP PUBLIC KEY BLOCK-----' in key):
-            log("Importing ASCII Armor PGP key", level=DEBUG)
-            with NamedTemporaryFile() as keyfile:
-                with open(keyfile.name, 'w') as fd:
-                    fd.write(key)
-                    fd.write("\n")
-                cmd = ['apt-key', 'add', keyfile.name]
-                try:
-                    subprocess.check_call(cmd)
-                except subprocess.CalledProcessError:
-                    error = "Error importing PGP key '{}'".format(key)
-                    log(error)
-                    raise GPGKeyError(error)
+            log("Writing provided PGP key in the binary format", level=DEBUG)
+            if six.PY3:
+                key_bytes = key.encode('utf-8')
+            else:
+                key_bytes = key
+            key_name = _get_keyid_by_gpg_key(key_bytes)
+            key_gpg = _dearmor_gpg_key(key_bytes)
+            _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg)
         else:
             raise GPGKeyError("ASCII armor markers missing from GPG key")
     else:
-        # We should only send things obviously not a keyid offsite
-        # via this unsecured protocol, as it may be a secret or part
-        # of one.
         log("PGP key found (looks like Radix64 format)", level=WARNING)
-        log("INSECURLY importing PGP key from keyserver; "
+        log("SECURELY importing PGP key from keyserver; "
             "full key not provided.", level=WARNING)
-        cmd = ['apt-key', 'adv', '--keyserver',
-               'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
-        try:
-            _run_with_retries(cmd)
-        except subprocess.CalledProcessError:
-            error = "Error importing PGP key '{}'".format(key)
-            log(error)
-            raise GPGKeyError(error)
+        # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL
+        # to retrieve GPG keys. `apt-key adv` command is deprecated as is
+        # apt-key in general as noted in its manpage. See lp:1433761 for more
+        # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop
+        # gpg
+        key_asc = _get_key_by_keyid(key)
+        # write the key in GPG format so that apt-key list shows it
+        key_gpg = _dearmor_gpg_key(key_asc)
+        _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg)
+
+
+def _get_keyid_by_gpg_key(key_material):
+    """Get a GPG key fingerprint by GPG key material.
+    Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded
+    or binary GPG key material. Can be used, for example, to generate file
+    names for keys passed via charm options.
+
+    :param key_material: ASCII armor-encoded or binary GPG key material
+    :type key_material: bytes
+    :raises: GPGKeyError if invalid key material has been provided
+    :returns: A GPG key fingerprint
+    :rtype: str
+    """
+    # Use the same gpg command for both Xenial and Bionic
+    cmd = 'gpg --with-colons --with-fingerprint'
+    ps = subprocess.Popen(cmd.split(),
+                          stdout=subprocess.PIPE,
+                          stderr=subprocess.PIPE,
+                          stdin=subprocess.PIPE)
+    out, err = ps.communicate(input=key_material)
+    if six.PY3:
+        out = out.decode('utf-8')
+        err = err.decode('utf-8')
+    if 'gpg: no valid OpenPGP data found.' in err:
+        raise GPGKeyError('Invalid GPG key material provided')
+    # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
+    return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
+
+
+def _get_key_by_keyid(keyid):
+    """Get a key via HTTPS from the Ubuntu keyserver.
+    Different key ID formats are supported by SKS keyservers (the longer ones
+    are more secure, see "dead beef attack" and https://evil32.com/). Since
+    HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will
+    impersonate keyserver.ubuntu.com and generate a certificate with
+    keyserver.ubuntu.com in the CN field or in SubjAltName fields of a
+    certificate. If such proxy behavior is expected it is necessary to add the
+    CA certificate chain containing the intermediate CA of the SSLBump proxy to
+    every machine that this code runs on via ca-certs cloud-init directive (via
+    cloudinit-userdata model-config) or via other means (such as through a
+    custom charm option). Also note that DNS resolution for the hostname in a
+    URL is done at a proxy server - not at the client side.
+
+    8-digit (32 bit) key ID
+    https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6
+    16-digit (64 bit) key ID
+    https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6
+    40-digit key ID:
+    https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6
+
+    :param keyid: An 8, 16 or 40 hex digit keyid to find a key for
+    :type keyid: (bytes, str)
+    :returns: A key material for the specified GPG key id
+    :rtype: (str, bytes)
+    :raises: subprocess.CalledProcessError
+    """
+    # options=mr - machine-readable output (disables html wrappers)
+    keyserver_url = ('https://keyserver.ubuntu.com'
+                     '/pks/lookup?op=get&options=mr&exact=on&search=0x{}')
+    curl_cmd = ['curl', keyserver_url.format(keyid)]
+    # use proxy server settings in order to retrieve the key
+    return subprocess.check_output(curl_cmd,
+                                   env=env_proxy_settings(['https']))
+
+
+def _dearmor_gpg_key(key_asc):
+    """Converts a GPG key in the ASCII armor format to the binary format.
+
+    :param key_asc: A GPG key in ASCII armor format.
+    :type key_asc: (str, bytes)
+    :returns: A GPG key in binary format
+    :rtype: (str, bytes)
+    :raises: GPGKeyError
+    """
+    ps = subprocess.Popen(['gpg', '--dearmor'],
+                          stdout=subprocess.PIPE,
+                          stderr=subprocess.PIPE,
+                          stdin=subprocess.PIPE)
+    out, err = ps.communicate(input=key_asc)
+    # no need to decode output as it is binary (invalid utf-8), only error
+    if six.PY3:
+        err = err.decode('utf-8')
+    if 'gpg: no valid OpenPGP data found.' in err:
+        raise GPGKeyError('Invalid GPG key material. Check your network setup'
+                          ' (MTU, routing, DNS) and/or proxy server settings'
+                          ' as well as destination keyserver status.')
+    else:
+        return out
+
+
+def _write_apt_gpg_keyfile(key_name, key_material):
+    """Writes GPG key material into a file at a provided path.
+
+    :param key_name: A key name to use for a key file (could be a fingerprint)
+    :type key_name: str
+    :param key_material: A GPG key material (binary)
+    :type key_material: (str, bytes)
+    """
+    with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
+              'wb') as keyf:
+        keyf.write(key_material)
 
 
 def add_source(source, key=None, fail_invalid=False):
@@ -442,13 +542,13 @@ def add_source(source, key=None, fail_invalid=False):
 def _add_proposed():
     """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
 
-    Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for
+    Uses get_distrib_codename to determine the correct stanza for
     the deb line.
 
     For intel architecutres PROPOSED_POCKET is used for the release, but for
     other architectures PROPOSED_PORTS_POCKET is used for the release.
     """
-    release = lsb_release()['DISTRIB_CODENAME']
+    release = get_distrib_codename()
     arch = platform.machine()
     if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
         raise SourceConfigError("Arch {} not supported for (distro-)proposed"
@@ -461,11 +561,16 @@ def _add_apt_repository(spec):
     """Add the spec using add_apt_repository
 
     :param spec: the parameter to pass to add_apt_repository
+    :type spec: str
     """
     if '{series}' in spec:
-        series = lsb_release()['DISTRIB_CODENAME']
+        series = get_distrib_codename()
         spec = spec.replace('{series}', series)
-    _run_with_retries(['add-apt-repository', '--yes', spec])
+    # software-properties package for bionic properly reacts to proxy settings
+    # passed as environment variables (See lp:1433761). This is not the case
+    # LTS and non-LTS releases below bionic.
+    _run_with_retries(['add-apt-repository', '--yes', spec],
+                      cmd_env=env_proxy_settings(['https']))
 
 
 def _add_cloud_pocket(pocket):
@@ -534,7 +639,7 @@ def _verify_is_ubuntu_rel(release, os_release):
     :raises: SourceConfigError if the release is not the same as the ubuntu
         release.
     """
-    ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+    ubuntu_rel = get_distrib_codename()
     if release != ubuntu_rel:
         raise SourceConfigError(
             'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'
-- 
GitLab