diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 3e345e683537accd90fb395cc747fe7bac1adc4a..0000000000000000000000000000000000000000 --- a/.coveragerc +++ /dev/null @@ -1,7 +0,0 @@ -[report] -# Regexes for lines to exclude from consideration -exclude_lines = - if __name__ == .__main__.: -include= - hooks/keystone_* - actions/actions.py diff --git a/.gitignore b/.gitignore index a958ad5be3dfaf47805657190295356b2caa6a70..5c68cf0fb5b4723fbc5dbbe93dd2f512d3845ca5 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ func-results.json .local __pycache__ .stestr +.idea diff --git a/.gitreview b/.gitreview index 6d1fcc95f0bae630fd2a1b337f2fa924d4f7c494..899b3ed445d8bfa115c4bcb41ec2d4b9370db7d7 100644 --- a/.gitreview +++ b/.gitreview @@ -1,6 +1,6 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 project=openstack/charm-keystone.git -defaultbranch=stable/17.11 +defaultbranch=stable/19.10 diff --git a/.pydevproject b/.pydevproject index 9ebe13fe3885a24679f937e9e7496dfd93df2b09..66f5d7bc397e171565ea11a49ebcb2f0895c1d82 100644 --- a/.pydevproject +++ b/.pydevproject @@ -1,9 +1,14 @@ <?xml version="1.0" encoding="UTF-8" standalone="no"?> <?eclipse-pydev version="1.0"?><pydev_project> -<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property> -<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property> -<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH"> -<path>/keystone/hooks</path> -<path>/keystone/unit_tests</path> -</pydev_pathproperty> + + <pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property> + + <pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property> + + <pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH"> + <path>/keystone/hooks</path> + <path>/keystone/unit_tests</path> + <path>/${PROJECT_DIR_NAME}</path> + </pydev_pathproperty> + </pydev_project> diff --git a/.stestr.conf b/.stestr.conf new file mode 100644 index 0000000000000000000000000000000000000000..5fcccaca861e750fcf37f412c13e560614c8b3b4 --- /dev/null +++ b/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 801646bb85613d0f544705a9f4e6459442204692..0000000000000000000000000000000000000000 --- a/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION - -test_id_option=--load-list $IDFILE -test_list_option=--list diff --git a/.zuul.yaml b/.zuul.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7332a874885ab55cabbce643a377541157fe6e5b --- /dev/null +++ b/.zuul.yaml @@ -0,0 +1,5 @@ +- project: + templates: + - python35-charm-jobs + - openstack-python3-train-jobs + - openstack-cover-jobs diff --git a/Makefile b/Makefile index acf5891741ce688487c9705ba2a6a85db70f5a61..77aff5deb3539043869af2fa1b1bc05cd55439ab 100644 --- a/Makefile +++ b/Makefile @@ -12,8 +12,8 @@ test: @tox -e py27 functional_test: - @echo Starting Amulet tests... - @tox -e func27 + @echo Starting Zaza functional tests... + @tox -e func bin/charm_helpers_sync.py: @mkdir -p bin @@ -22,7 +22,6 @@ bin/charm_helpers_sync.py: sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml - @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint test export OUTPUT=`charm push . cs:~$(USER)/$(NAME)`; echo $$OUTPUT diff --git a/README.md b/README.md index 04ad9a0e629aa21096ee12aeeaa59cc29322c489..5aac7c22b3743afc179cf6664eb9e0efd893181b 100644 --- a/README.md +++ b/README.md @@ -1,175 +1,153 @@ Overview ======== -This charm provides Keystone, the Openstack identity service. It's target -platform is (ideally) Ubuntu LTS + Openstack. - -This is a modified version, which adds support for Identity -Federations, based on SAML or OIDC. +This charm provides Keystone, the OpenStack identity service. Its target +platform is (ideally) Ubuntu LTS + OpenStack. Usage ===== The following interfaces are provided: - - nrpe-external-master: Used to generate Nagios checks. - - - identity-service: Openstack API endpoints request an entry in the - Keystone service catalog + endpoint template catalog. When a relation - is established, Keystone receives: service name, region, public_url, - admin_url and internal_url. It first checks that the requested service - is listed as a supported service. This list should stay updated to - support current Openstack core services. If the service is supported, - an entry in the service catalog is created, an endpoint template is - created and a admin token is generated. The other end of the relation - receives the token as well as info on which ports Keystone is listening - on. - - - keystone-service: This is currently only used by Horizon/dashboard - as its interaction with Keystone is different from other Openstack API - services. That is, Horizon requests a Keystone role and token exists. - During a relation, Horizon requests its configured default role and - Keystone responds with a token and the auth + admin ports on which - Keystone is listening. - - - identity-admin: Charms use this relation to obtain the credentials - for the admin user. This is intended for charms that automatically - provision users, tenants, etc. or that otherwise automate using the - Openstack cluster deployment. - - - identity-notifications: Used to broadcast messages to any services - listening on the interface. - - - identity-credentials: Charms use this relation to obtain keystone - credentials without creating a service catalog entry. Set 'username' - only on the relation and keystone will set defaults and return - authentication details. Possible relation settings: - username: Username to be created. - project: Project (tenant) name to be created. Defaults to services - project. - requested_roles: Comma delimited list of roles to be created - requested_grants: Comma delimited list of roles to be granted. - Defaults to Admin role. - domain: Keystone v3 domain the user will be created in. Defaults - to the Default domain. +- nrpe-external-master: Used to generate Nagios checks. + +- identity-service: OpenStack API endpoints request an entry in the + Keystone service catalog + endpoint template catalog. When a relation is + established, Keystone receives: `service_name`, `region`, `public_url`, + `admin_url` and `internal_url`. It first checks that the requested service is + listed as a supported service. This list should stay updated to support + current OpenStack core services. If the service is supported, an entry in the + service catalog is created, an endpoint template is created and an admin token + is generated. The other end of the relation receives the token as well as + info on which ports Keystone is listening on. + +- keystone-service: This is currently only used by Horizon/dashboard + as its interaction with Keystone is different from other OpenStack API + services. That is, Horizon requests a Keystone role and token exists. During + a relation, Horizon requests its configured default role and Keystone + responds with a token and the auth + admin ports on which Keystone is + listening. + +- identity-admin: Charms use this relation to obtain the credentials + for the admin user. This is intended for charms that automatically provision + users, tenants, etc. or that otherwise automate using the OpenStack cluster + deployment. + +- identity-notifications: Used to broadcast messages to any services + listening on the interface. + +- identity-credentials: Charms use this relation to obtain keystone + credentials without creating a service catalog entry. Set 'username' + only on the relation and keystone will set defaults and return + authentication details. Possible relation settings: + - `username` Username to be created. + - `project` Project (tenant) name to be created. Defaults to services + project. + - `requested_roles` Comma delimited list of roles to be created. + - `requested_grants` Comma delimited list of roles to be granted. + Defaults to Admin role. + - `domain` Keystone v3 domain the user will be created in. Defaults + to the Default domain. Database -------- -Keystone requires a database. By default, a local sqlite database is used. -The charm supports relations to a shared-db via mysql-shared interface. When -a new data store is configured, the charm ensures the minimum administrator -credentials exist (as configured via charm configuration) +Keystone requires a database. The charm supports relation to a shared database +server through the `mysql-shared` interface. When a new data store is +configured, the charm ensures the minimum administrator credentials exist (as +configured in charm configuration) HA/Clustering ------------- There are two mutually exclusive high availability options: using virtual IP(s) or DNS. In both cases, a relationship to hacluster is required which -provides the corosync back end HA functionality. +provides the corosync backend HA functionality. -To use virtual IP(s) the clustered nodes must be on the same subnet such that +To use virtual IP(s), the clustered nodes must be on the same subnet such that the VIP is a valid IP on the subnet for one of the node's interfaces and each node has an interface in said subnet. The VIP becomes a highly-available API endpoint. -At a minimum, the config option 'vip' must be set in order to use virtual IP +At a minimum, the config option `vip` must be set in order to use virtual IP HA. If multiple networks are being used, a VIP should be provided for each -network, separated by spaces. Optionally, vip_iface or vip_cidr may be +network, separated by spaces. Optionally, `vip_iface` or `vip_cidr` may be specified. -To use DNS high availability there are several prerequisites. However, DNS HA +To use DNS high availability, there are several prerequisites. However, DNS HA does not require the clustered nodes to be on the same subnet. -Currently the DNS HA feature is only available for MAAS 2.0 or greater +Currently, the DNS HA feature is only available for MAAS 2.0 or greater environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s) must be pre-registered in MAAS before use with DNS HA. -At a minimum, the config option 'dns-ha' must be set to true and at least one -of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must -be set in order to use DNS HA. One or more of the above hostnames may be set. +At a minimum, the configuration option `dns-ha` must be set to true and at +least one of `os-public-hostname`, `os-internal-hostname` or +`os-admin-hostname` must be set in order to use DNS HA. One or more of the +above hostnames may be set. The charm will throw an exception in the following circumstances: -If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster -If both 'vip' and 'dns-ha' are set as they are mutually exclusive -If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are set - -SSL/HTTPS ---------- -Support for SSL and https endpoint is provided via a set of configuration -options on the charm. There are two types supported; +- If neither `vip` nor `dns-ha` is set and the charm is related to hacluster -use-https - if enabled this option tells Keystone to configure the identity -endpoint as https. Under this model the keystone charm will either use the CA -as provided by the user (see ssl_* options below) or will generate its own and -sync across peers. The cert will be distributed to all service endpoints which -will be configured to use https. +- If both `vip` and `dns-ha` are set as they are mutually exclusive -https-service-endpoints - if enabled this option tells Keystone to configure -ALL endpoints as https. Under this model the keystone charm will either use the -CA as provided by the user (see ssl_* options below) or will generate its own -and sync across peers. The cert will be distributed to all service endpoints -which will be configured to use https as well as configuring themselves to be -used as https. +- If `dns-ha` is set and none of the `os-{admin,internal,public}-hostname` + configuration options are set -When configuring the charms to use SSL there are three charm config options as -ssl_ca, ssl_cert and ssl_key. - -- The user can provide their own CA, SSL cert and key using the options ssl_ca, - ssl_cert, ssl_key. +TLS/HTTPS +--------- -- The user can provide SSL cert and key using ssl_cert and ssl_key when the cert - is signed by a trusted CA. +Support for TLS and HTTPS endpoints can be enabled through configuration +options. -- If not provided, the keystone charm will automatically generate a CA and certs - to distribute to endpoints. +To enable TLS and HTTPS endpoints with a certificate signed by your own +Certificate Authority, set the following configuration options: -When the charm configures itself as a CA (generally only recommended for test -purposes) it will elect an "ssl-cert-master" whose duty is to generate the CA -and certs and ensure they are distributed across all peers. This leader is -distinct from the charm leader as elected by Juju so that if the Juju leader -switches we still have the ability to know which unit held the last-known-good -copy of CA/cert data. If the Juju leader switches the charm should eventually -work it out and migrate the ssl-cert-master to the new leader unit. +- `ssl_ca` -One side-effect of this is that if the unit currently elected as -ssl-cert-master goes down, the remaining peer units or indeed any new units -will not be able to sync the ssl data of the master or re-elect a new master. -This does currently require manual intervention to resolve. If no action is -taken, it will be assumed that this unit may come back at some point and -therefore must be known to be in-sync with the rest before continuing. +- `ssl_cert` -It is possible to check which unit is the ssl-cert-master with: +- `ssl_key` -~$ juju run --unit keystone/0 "relation-ids cluster" -cluster:6 -~$ juju run --unit keystone/0 "relation-get -r cluster:6 ssl-cert-master keystone/0" -keystone/0 +Example bundle usage: -If the master unit goes down and you want to manually migrate it to another -unit (that you are 100% sure holds an authoritative copy of the ssl certs) -you can do: + keystone: + charm: cs:keystone + num_units: 1 + options: + ssl_ca: include-base64://path-to-PEM-formatted-ca-data + ssl_cert: include-base64://path-to-PEM-formatted-certificate-data + ssl_key: include-base64://path-to-PEM-formatted-key-data -~$ juju run --unit keystone/0 "relation-set -r cluster:6 ssl-cert-master=keystone/1" +> **Note**: If your certificate is signed by a Certificate Authority present in + the CA Certificate Store in operating systems used in your deployment, you do + not need to provide the `ssl_ca` configuration option. -Where keystone/1 is known to hold a good copy of the CA/cert info and is -preferrably also the cluster leader. +> **Note**: The `include-base64` bundle keyword tells Juju to source a file and + Base64 encode it before storing it as a configuration option value. The path + can be absolute or relative to the location of the bundle file. -Network Space support +Network Space Support --------------------- -This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. +This charm supports the use of Juju Network Spaces, allowing the charm to be +bound to network space configurations managed directly by Juju. This is only +supported with Juju 2.0 and above. -API endpoints can be bound to distinct network spaces supporting the network separation of public, internal and admin endpoints. +API endpoints can be bound to distinct network spaces supporting the network +separation of public, internal and admin endpoints. -Access to the underlying MySQL instance can also be bound to a specific space using the shared-db relation. +Access to the underlying MySQL instance can also be bound to a specific space +using the shared-db relation. To use this feature, use the --bind option when deploying the charm: - juju deploy keystone --bind "public=public-space internal=internal-space admin=admin-space shared-db=internal-space" + juju deploy keystone --bind \ + "public=public-space internal=internal-space admin=admin-space shared-db=internal-space" -alternatively these can also be provided as part of a juju native bundle configuration: +Alternatively, these can also be provided as part of a juju native bundle +configuration: keystone: charm: cs:xenial/keystone @@ -180,59 +158,150 @@ alternatively these can also be provided as part of a juju native bundle configu internal: internal-space shared-db: internal-space -NOTE: Spaces must be configured in the underlying provider prior to attempting to use them. +NOTE: Spaces must be configured in the underlying provider prior to attempting +to use them. -NOTE: Existing deployments using os-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. +NOTE: Existing deployments using `os\-\*-network` configuration options will +continue to function; these options are preferred over any network space +binding provided if set. -Federated Authentication ------------------------- +Policy Overrides +---------------- -Support for federated authentication is provided according to [this guide](https://cloud.garr.it/support/kb/cloud/federated_auth/). +This feature allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the OpenStack service supports +should be clearly and unambiguously understood before trying to override, or +add to, the default policies that the service uses. The charm also has some +policy defaults. They should also be understood before being overridden. -The saml-*/shibsp-* and oidc-* charm parameters are used to configure and enable support -for SAML2 and OIDC authentication. +> **Caution**: It is possible to break the system (for tenants and other + services) if policies are incorrectly applied to the service. -The following parameters must be provided: +Policy overrides are YAML files that contain rules that will add to, or +override, existing policy rules in the service. The `policy.d` directory is +a place to put the YAML override files. This charm owns the +`/etc/keystone/policy.d` directory, and as such, any manual changes to it will +be overwritten on charm upgrades. - - common: - - trusted-dashboard +Overrides are provided to the charm using a Juju resource called +`policyd-override`. The resource is a ZIP file. This file, say +`overrides.zip`, is attached to the charm by: -N.B. Trusted dashboard must contain the full path e.g. "https://dashboard.cloud.garr.it/auth/websso" - - SAML2/SHIBSP: - - enable-saml2 - - saml2-metadata-url - - saml2-metadata-signer-url - - saml2-fed-users-domain - - saml2-mapping - - shibsp-cert - - shibsp-key - - shibsp-identity-provider - - shibsp-idp-remote-ids - - shibsp-idp-remote-ids-fetch - - shibsp-discovery-url - - shibsp-discovery-eds-tar - - shibsp-support-contact + juju attach-resource keystone policyd-override=overrides.zip -When configuring a single IdP: - - shibsp-idp-metadata-file / shibsp-idp-metadata-url +The policy override is enabled in the charm using: - - OIDC: - - enable-oidc - - oidc-client-id - - oidc-client-secret - - oidc-crypto-passphrase - - oidc-identity-provider - - oidc-idp-remote-id - - oidc-provider-metadata-url - - oidc-mapping - - oidc2-fed-users-domain - - oidc-redirect-uri + juju config keystone use-policyd-override=true -Additionally, to provide an undestandable error message, when a user -tries to login to the dashboard and authentication succeeds, but no -mapping between the federated user and an OpenStack user exists, the -following parameter must be set to an appropriate error message URL: +When `use-policyd-override` is `True` the status line of the charm will be +prefixed with `PO:` indicating that policies have been overridden. If the +installation of the policy override YAML files failed for any reason then the +status line will be prefixed with `PO (broken):`. The log file for the charm +will indicate the reason. No policy override files are installed if the `PO +(broken):` is shown. The status line indicates that the overrides are broken, +not that the policy for the service has failed. The policy will be the defaults +for the charm and service. - no-user-mapping-url +Policy overrides on one service may affect the functionality of another +service. Therefore, it may be necessary to provide policy overrides for +multiple service charms to achieve a consistent set of policies across the +OpenStack system. The charms for the other services that may need overrides +should be checked to ensure that they support overrides before proceeding. + +Token Support +------------- +As the keystone charm supports multiple releases of the OpenStack software, it +also supports two keystone token systems: UUID and Fernet. The capabilities are: + +- pre 'ocata': UUID tokens only. +- ocata and pike: UUID or Fernet tokens, configured via the 'token-provider' + configuration parameter. +- rocky and later: Fernet tokens only. + +Fernet tokens were added to OpenStack to solve the problem of keystone being +required to persist tokens to a common database (cluster) like UUID tokens, +and solve the problem of size for PKI or PKIZ tokens. + +For further information, please see [Fernet - Frequently Asked +Questions](https://docs.openstack.org/keystone/pike/admin/identity-fernet-token-faq.html). + +### Theory of Operation + +Fernet keys are used to generate tokens; they are generated by keystone +and have an expiration date. The key repository is a directory, and each +key is an integer number, with the highest number being the primary key. Key +'0' is the staged key, that will be the next primary. Other keys are secondary +keys. + +New tokens are only ever generated from the primary key, whilst the secondary +keys are used to validate existing tokens. The staging key is not used to +generate tokens but can be used to validate tokens as the staging key might be +the new primary key on the master due to a rotation and the keys have not yet +been synchronised across all the units. + +Fernet keys need to be rotated at periodic intervals, and the keys need to be +synchronised to each of the other keystone units. Keys should only be rotated +on the master keystone unit and must be synchronised *before* they are rotated +again. *Over rotation* occurs if a unit rotates its keys such that there is +no suitable decoding key on another unit that can decode a token that has been +generated on the master. This happens if two key rotations are done on the +master before a synchronisation has been successfully performed. This should +be avoided. Over rotations can also cause validation keys to be removed +*before* a token's expiration which would result in failed validations. + +There are 3 parts to the **Key Rotation Strategy**: + +1. The rotation frequency +2. The token lifespan +3. The number of active keys + +There needs to be at least 3 keys as a minimum. The actual number of keys is +determined by the *token lifespan* and the *rotation frequency*. The +*max_active_keys* must be one greater than the *token lifespan* / *rotation +frequency* + +To quote from the [FAQ](https://docs.openstack.org/keystone/queens/admin/identity-fernet-token-faq.html): + + The number of max_active_keys for a deployment can be determined by + dividing the token lifetime, in hours, by the frequency of rotation in + hours and adding two. Better illustrated as: + +### Configuring Key Lifetime + +In the keystone charm, the _rotation frequency_ is calculated +automatically from the `token-expiration` and the `fernet-max-active-keys` +configuration parameters. For example, with an expiration of 24 hours and +6 active keys, the rotation frequency is calculated as: + + +```python +token_expiration = 24 # actually 3600, as it's in seconds +max_active_keys = 6 +rotation_frequency = token_expiration / (max_active_keys - 2) +``` + +Thus, the `fernet-max-active-keys` can never be less than 3 (which is +enforced in the charm), which would make the rotation frequency the *same* +as the token expiration time. + +NOTE: To increase the rotation frequency, _either_ increase +`fernet-max-active-keys` or reduce `token-expiration`, and, to decrease +rotation frequency, do the opposite. + +NOTE: If the configuration parameters are used to significantly reduce the +key lifetime, then it is possible to over-rotate the verification keys +such that services will hold tokens that cannot be verified but haven't +yet expired. This should be avoided by only making small changes and +verifying that current tokens will still be able to be verified. In +particular, `fernet-max-active-keys` affects the rotation time. + +### Upgrades + +When an older keystone charm is upgraded to this version, NO change will +occur to the token system. That is, an ocata system will continue to use +UUID tokens. In order to change the token system to Fernet, change the +`token-provider` configuration item to `fernet`. This will switch the +token system over. There may be a small outage in the _control plane_, +but the running instances will be unaffected. diff --git a/actions.yaml b/actions.yaml index 767bc8abaa8cdd3cfd50094ae41c1499354ca761..edcf721e0faa17588c740cd790cbd3d409dfbce9 100644 --- a/actions.yaml +++ b/actions.yaml @@ -13,3 +13,5 @@ openstack-upgrade: description: | Perform openstack upgrades. Config option action-managed-upgrade must be set to True. +security-checklist: + description: Validate the running configuration against the OpenStack security guides checklist diff --git a/actions/actions.py b/actions/actions.py index 37cdb2517d005370c330cbff1450883bc15b8e49..083d262aa1591c5236c883804a00af21a1d6f328 100755 --- a/actions/actions.py +++ b/actions/actions.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -17,9 +17,21 @@ import sys import os +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) + from charmhelpers.core.hookenv import action_fail -from hooks.keystone_utils import ( +from keystone_utils import ( pause_unit_helper, resume_unit_helper, register_configs, @@ -52,7 +64,7 @@ def main(args): try: action = ACTIONS[action_name] except KeyError: - return "Action %s undefined" % action_name + return "Action {} undefined".format(action_name) else: try: action(args) diff --git a/actions/charmhelpers b/actions/charmhelpers deleted file mode 120000 index 702de734b0c015b34565dfbd7ba8c48ace8cb262..0000000000000000000000000000000000000000 --- a/actions/charmhelpers +++ /dev/null @@ -1 +0,0 @@ -../charmhelpers \ No newline at end of file diff --git a/actions/hooks b/actions/hooks deleted file mode 120000 index f631275e19cd320f570733cb0ce1f287d6f02702..0000000000000000000000000000000000000000 --- a/actions/hooks +++ /dev/null @@ -1 +0,0 @@ -../hooks \ No newline at end of file diff --git a/actions/openstack_upgrade.py b/actions/openstack_upgrade.py index fde3284c8598c1f636717ca23c03431a45ec7842..aca6b6994f6b134629ebaae33dcd161f03b63c28 100755 --- a/actions/openstack_upgrade.py +++ b/actions/openstack_upgrade.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -17,7 +17,17 @@ import os import sys -sys.path.append('hooks/') +_path = os.path.dirname(os.path.realpath(__file__)) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_hooks) +_add_path(_root) from charmhelpers.contrib.openstack.utils import ( do_action_openstack_upgrade, @@ -43,7 +53,8 @@ def openstack_upgrade(): if (do_action_openstack_upgrade('keystone', do_openstack_upgrade, register_configs())): - os.execl('./hooks/config-changed-postupgrade', '') + os.execl('./hooks/config-changed-postupgrade', + 'config-changed-postupgrade') if __name__ == '__main__': openstack_upgrade() diff --git a/actions/security-checklist b/actions/security-checklist new file mode 120000 index 0000000000000000000000000000000000000000..474649707b395a3558bb0f8e36eb441f9ffcdb6a --- /dev/null +++ b/actions/security-checklist @@ -0,0 +1 @@ +security_checklist.py \ No newline at end of file diff --git a/actions/security_checklist.py b/actions/security_checklist.py new file mode 100755 index 0000000000000000000000000000000000000000..d7a6c27fdd8d73fff1b2f98b7abf7e296d859d64 --- /dev/null +++ b/actions/security_checklist.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 +# +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import configparser +import os +import sys + +sys.path.append('.') + +import charmhelpers.contrib.openstack.audits as audits +from charmhelpers.contrib.openstack.audits import ( + openstack_security_guide, +) + + +# Via the openstack_security_guide above, we are running the following +# security assertions automatically: +# +# - Check-Identity-01 - validate-file-ownership +# - Check-Identity-02 - validate-file-permissions + + +@audits.audit(audits.is_audit_type(audits.AuditType.OpenStackSecurityGuide),) +def uses_sha256_for_hashing_tokens(audit_options): + """Validate that SHA256 is used to hash tokens. + + Security Guide Check Name: Check-Identity-04 + + :param audit_options: Dictionary of options for audit configuration + :type audit_options: Dict + :raises: AssertionError if the assertion fails. + """ + section = audit_options['keystone-conf'].get('token') + assert section is not None, "Missing section 'token'" + provider = section.get('provider') + algorithm = section.get("hash_algorithm") + if provider and "pki" in provider: + assert "SHA256" == algorithm, \ + "Weak hash algorithm used with PKI provider: ".format( + algorithm) + + +@audits.audit(audits.is_audit_type(audits.AuditType.OpenStackSecurityGuide), + audits.since_openstack_release('keystone', 'juno')) +def check_max_request_body_size(audit_options): + """Validate that a sane max_request_body_size is set. + + Security Guide Check Name: Check-Identity-05 + + :param audit_options: Dictionary of options for audit configuration + :type audit_options: Dict + :raises: AssertionError if the assertion fails. + """ + default = audit_options['keystone-conf'].get('DEFAULT', {}) + oslo_middleware = audit_options['keystone-conf'] \ + .get('oslo_middleware', {}) + # assert section is not None, "Missing section 'DEFAULT'" + assert (default.get('max_request_body_size') or + oslo_middleware.get('max_request_body_size') is not None), \ + "max_request_body_size should be set" + + +@audits.audit(audits.is_audit_type(audits.AuditType.OpenStackSecurityGuide)) +def disable_admin_token(audit_options): + """Validate that the admin token is disabled. + + Security Guide Check Name: Check-Identity-06 + + :param audit_options: Dictionary of options for audit configuration + :type audit_options: Dict + :raises: AssertionError if the assertion fails. + """ + default = audit_options['keystone-conf'].get('DEFAULT') + assert default is not None, "Missing section 'DEFAULT'" + assert default.get('admin_token') is None, \ + "admin_token should be unset" + keystone_paste = _config_file('/etc/keystone/keystone-paste.ini') + section = keystone_paste.get('filter:admin_token_auth') + if section is not None: + assert section.get('AdminTokenAuthMiddleware') is None, \ + 'AdminTokenAuthMiddleware should be unset in keystone-paste.ini' + + +@audits.audit(audits.is_audit_type(audits.AuditType.OpenStackSecurityGuide)) +def insecure_debug_is_false(audit_options): + """Valudaite that insecure_debug is false. + + Security Guide Check Name: Check-Identity-07 + + :param audit_options: Dictionary of options for audit configuration + :type audit_options: Dict + :raises: AssertionError if the assertion fails. + """ + section = audit_options['keystone-conf'].get('DEFAULT') + assert section is not None, "Missing section 'DEFAULT'" + insecure_debug = section.get('insecure_debug') + if insecure_debug is not None: + assert insecure_debug == "false", \ + "insecure_debug should be false" + + +@audits.audit(audits.is_audit_type(audits.AuditType.OpenStackSecurityGuide), + audits.since_openstack_release('keystone', 'pike'), + audits.before_openstack_release('keystone', 'rocky')) +def uses_fernet_token(audit_options): + """Validate that fernet tokens are used. + + Security Guide Check Name: Check-Identity-08 + + :param audit_options: Dictionary of options for audit configuration + :type audit_options: Dict + :raises: AssertionError if the assertion fails. + """ + section = audit_options['keystone-conf'].get('token') + assert section is not None, "Missing section 'token'" + assert "fernet" == section.get("provider"), \ + "Fernet tokens are not used" + + +@audits.audit(audits.is_audit_type(audits.AuditType.OpenStackSecurityGuide), + audits.since_openstack_release('keystone', 'rocky')) +def uses_fernet_token_after_default(audit_options): + """Validate that fernet tokens are used. + + :param audit_options: Dictionary of options for audit configuration + :type audit_options: Dict + :raises: AssertionError if the assertion fails. + """ + section = audit_options['keystone-conf'].get('token') + assert section is not None, "Missing section 'token'" + provider = section.get("provider") + if provider: + assert "fernet" == provider, "Fernet tokens are not used" + + +def _config_file(path): + """Read and parse config file at `path` as an ini file. + + :param path: Path of the file + :type path: List[str] + :returns: Parsed contents of the file at path + :rtype Dict: + """ + conf = configparser.ConfigParser() + conf.read(os.path.join(*path)) + return dict(conf) + + +def main(): + config = { + 'config_path': '/etc/keystone', + 'config_file': 'keystone.conf', + 'audit_type': audits.AuditType.OpenStackSecurityGuide, + 'files': openstack_security_guide.FILE_ASSERTIONS['keystone'], + 'excludes': [ + 'validate-uses-keystone', + 'validate-uses-tls-for-glance', + 'validate-uses-tls-for-keystone', + ], + } + config['keystone-conf'] = _config_file( + [config['config_path'], config['config_file']]) + return audits.action_parse_results(audits.run(config)) + +if __name__ == "__main__": + sys.exit(main()) diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index 2d86fd25e0dc40c78d0669b8b278899cd9e6ddcb..ba873f5c4bdc3c8eda9cad56e177a35ea307c1a6 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -1,4 +1,4 @@ -repo: https://github.com/juju/charm-helpers +repo: https://github.com/juju/charm-helpers@stable/19.10 destination: charmhelpers include: - core @@ -15,6 +15,7 @@ include: - payload - contrib.peerstorage - contrib.network.ip - - contrib.python.packages + - contrib.python - contrib.charmsupport - contrib.hardening|inc=* + - contrib.openstack.policyd diff --git a/charm-helpers-tests.yaml b/charm-helpers-tests.yaml deleted file mode 100644 index f64f0dde9a873ea77c6fa18b3b37bccd23c1ae4c..0000000000000000000000000000000000000000 --- a/charm-helpers-tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -repo: https://github.com/juju/charm-helpers -destination: tests/charmhelpers -include: - - contrib.amulet - - contrib.openstack.amulet - - core - - osplatform diff --git a/charmhelpers/__init__.py b/charmhelpers/__init__.py index e7aa471541a8a5871df11684ca8c579a30203a80..61ef90719b5d5d759de1a6b80a1ea748d8bb0911 100644 --- a/charmhelpers/__init__.py +++ b/charmhelpers/__init__.py @@ -23,22 +23,22 @@ import subprocess import sys try: - import six # flake8: noqa + import six # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa + import six # NOQA:F401 try: - import yaml # flake8: noqa + import yaml # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa + import yaml # NOQA:F401 # Holds a list of mapping of mangled function names that have been deprecated diff --git a/charmhelpers/cli/unitdata.py b/charmhelpers/cli/unitdata.py index c57285822f37e77ba928c0db66d4f7710435278d..acce846f84ef32ed0b5829cf08e67ad33f0eb5d1 100644 --- a/charmhelpers/cli/unitdata.py +++ b/charmhelpers/cli/unitdata.py @@ -19,9 +19,16 @@ from charmhelpers.core import unitdata @cmdline.subcommand_builder('unitdata', description="Store and retrieve data") def unitdata_cmd(subparser): nested = subparser.add_subparsers() + get_cmd = nested.add_parser('get', help='Retrieve data') get_cmd.add_argument('key', help='Key to retrieve the value of') get_cmd.set_defaults(action='get', value=None) + + getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data') + getrange_cmd.add_argument('key', metavar='prefix', + help='Prefix of the keys to retrieve') + getrange_cmd.set_defaults(action='getrange', value=None) + set_cmd = nested.add_parser('set', help='Store data') set_cmd.add_argument('key', help='Key to set') set_cmd.add_argument('value', help='Value to store') @@ -30,6 +37,8 @@ def unitdata_cmd(subparser): def _unitdata_cmd(action, key, value): if action == 'get': return unitdata.kv().get(key) + elif action == 'getrange': + return unitdata.kv().getrange(key) elif action == 'set': unitdata.kv().set(key, value) unitdata.kv().flush() diff --git a/charmhelpers/contrib/charmsupport/nrpe.py b/charmhelpers/contrib/charmsupport/nrpe.py index 1c55b30f259d3ac16c2974486334430507bfc5ac..a3d89936d8b1b9966571e7248379800a7bb8190c 100644 --- a/charmhelpers/contrib/charmsupport/nrpe.py +++ b/charmhelpers/contrib/charmsupport/nrpe.py @@ -33,6 +33,7 @@ from charmhelpers.core.hookenv import ( hook_name, local_unit, log, + relation_get, relation_ids, relation_set, relations_of_type, @@ -126,7 +127,7 @@ class CheckException(Exception): class Check(object): - shortname_re = '[A-Za-z0-9-_.]+$' + shortname_re = '[A-Za-z0-9-_.@]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed @@ -260,11 +261,23 @@ class NRPE(object): relation = relation_ids('nrpe-external-master') if relation: log("Setting charm primary status {}".format(primary)) - for rid in relation_ids('nrpe-external-master'): + for rid in relation: relation_set(relation_id=rid, relation_settings={'primary': self.primary}) + self.remove_check_queue = set() def add_check(self, *args, **kwargs): + shortname = None + if kwargs.get('shortname') is None: + if len(args) > 0: + shortname = args[0] + else: + shortname = kwargs['shortname'] + self.checks.append(Check(*args, **kwargs)) + try: + self.remove_check_queue.remove(shortname) + except KeyError: + pass def remove_check(self, *args, **kwargs): if kwargs.get('shortname') is None: @@ -281,6 +294,7 @@ class NRPE(object): check = Check(*args, **kwargs) check.remove(self.hostname) + self.remove_check_queue.add(kwargs['shortname']) def write(self): try: @@ -305,7 +319,7 @@ class NRPE(object): # update-status hooks are configured to firing every 5 minutes by # default. When nagios-nrpe-server is restarted, the nagios server - # reports checks failing causing unneccessary alerts. Let's not restart + # reports checks failing causing unnecessary alerts. Let's not restart # on update-status hooks. if not hook_name() == 'update-status': service('restart', 'nagios-nrpe-server') @@ -313,7 +327,24 @@ class NRPE(object): monitor_ids = relation_ids("local-monitors") + \ relation_ids("nrpe-external-master") for rid in monitor_ids: - relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + reldata = relation_get(unit=local_unit(), rid=rid) + if 'monitors' in reldata: + # update the existing set of monitors with the new data + old_monitors = yaml.safe_load(reldata['monitors']) + old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] + # remove keys that are in the remove_check_queue + old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() + if k not in self.remove_check_queue} + # update/add nrpe_monitors + old_nrpe_monitors.update(nrpe_monitors) + old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors + # write back to the relation + relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) + else: + # write a brand new set of monitors, as no existing ones. + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + self.remove_check_queue.clear() def get_nagios_hostcontext(relation_name='nrpe-external-master'): @@ -410,16 +441,26 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): os.chmod(checkpath, 0o644) -def copy_nrpe_checks(): +def copy_nrpe_checks(nrpe_files_dir=None): """ Copy the nrpe checks into place """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', - 'charmhelpers', 'contrib', 'openstack', - 'files') - + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/charmhelpers/contrib/hahelpers/apache.py b/charmhelpers/contrib/hahelpers/apache.py index 22acb683e6cec8648451543d235286be151dbf2a..94af50934d1086e3360c7cba96aeaa5919c70085 100644 --- a/charmhelpers/contrib/hahelpers/apache.py +++ b/charmhelpers/contrib/hahelpers/apache.py @@ -23,8 +23,8 @@ # import os -import subprocess +from charmhelpers.core import host from charmhelpers.core.hookenv import ( config as config_get, relation_get, @@ -82,14 +82,4 @@ def retrieve_ca_cert(cert_file): def install_ca_cert(ca_cert): - if ca_cert: - cert_file = ('/usr/local/share/ca-certificates/' - 'keystone_juju_ca_cert.crt') - old_cert = retrieve_ca_cert(cert_file) - if old_cert and old_cert == ca_cert: - log("CA cert is the same as installed version", level=INFO) - else: - log("Installing new CA cert", level=INFO) - with open(cert_file, 'wb') as crt: - crt.write(ca_cert) - subprocess.check_call(['update-ca-certificates', '--fresh']) + host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') diff --git a/charmhelpers/contrib/hardening/apache/checks/config.py b/charmhelpers/contrib/hardening/apache/checks/config.py index 06482aac1a8efb2bc6ae3e84da355d210287fc88..341da9eee10f73cbe3d7e7e5cf91b57b4d2a89b4 100644 --- a/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/charmhelpers/contrib/hardening/apache/checks/config.py @@ -14,6 +14,7 @@ import os import re +import six import subprocess @@ -95,6 +96,8 @@ class ApacheConfContext(object): ctxt = settings['hardening'] out = subprocess.check_output(['apache2', '-v']) + if six.PY3: + out = out.decode('utf-8') ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' diff --git a/charmhelpers/contrib/hardening/audits/apache.py b/charmhelpers/contrib/hardening/audits/apache.py index d32bf44e8fec1afe3cbd0c0d0114a6591ebcbd9c..04825f5ada0c5b0bb9fc0955baa9a10fa199184d 100644 --- a/charmhelpers/contrib/hardening/audits/apache.py +++ b/charmhelpers/contrib/hardening/audits/apache.py @@ -15,7 +15,7 @@ import re import subprocess -from six import string_types +import six from charmhelpers.core.hookenv import ( log, @@ -35,7 +35,7 @@ class DisabledModuleAudit(BaseAudit): def __init__(self, modules): if modules is None: self.modules = [] - elif isinstance(modules, string_types): + elif isinstance(modules, six.string_types): self.modules = [modules] else: self.modules = modules @@ -69,6 +69,8 @@ class DisabledModuleAudit(BaseAudit): def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" output = subprocess.check_output(['apache2ctl', '-M']) + if six.PY3: + output = output.decode('utf-8') modules = [] for line in output.splitlines(): # Each line of the enabled module output looks like: diff --git a/charmhelpers/contrib/hardening/audits/apt.py b/charmhelpers/contrib/hardening/audits/apt.py index 3dc14e3c6f26a98ebd19bf747acc5001cf91765e..67521e172942078ec6538b851c440a10247320d0 100644 --- a/charmhelpers/contrib/hardening/audits/apt.py +++ b/charmhelpers/contrib/hardening/audits/apt.py @@ -13,7 +13,6 @@ # limitations under the License. from __future__ import absolute_import # required for external apt import -from apt import apt_pkg from six import string_types from charmhelpers.fetch import ( @@ -26,6 +25,7 @@ from charmhelpers.core.hookenv import ( WARNING, ) from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg class AptConfig(BaseAudit): diff --git a/charmhelpers/contrib/hardening/harden.py b/charmhelpers/contrib/hardening/harden.py index b55764cdf55e4c94d4adda85e0f3e907c4593a8d..63f21b9c9855065da3be875c01a2c94db7df47b4 100644 --- a/charmhelpers/contrib/hardening/harden.py +++ b/charmhelpers/contrib/hardening/harden.py @@ -27,6 +27,8 @@ from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks from charmhelpers.contrib.hardening.apache.checks import run_apache_checks +_DISABLE_HARDENING_FOR_UNIT_TEST = False + def harden(overrides=None): """Hardening decorator. @@ -47,16 +49,28 @@ def harden(overrides=None): provided with 'harden' config. :returns: Returns value returned by decorated function once executed. """ + if overrides is None: + overrides = [] + def _harden_inner1(f): - log("Hardening function '%s'" % (f.__name__), level=DEBUG) + # As this has to be py2.7 compat, we can't use nonlocal. Use a trick + # to capture the dictionary that can then be updated. + _logged = {'done': False} def _harden_inner2(*args, **kwargs): + # knock out hardening via a config var; normally it won't get + # disabled. + if _DISABLE_HARDENING_FOR_UNIT_TEST: + return f(*args, **kwargs) + if not _logged['done']: + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + _logged['done'] = True RUN_CATALOG = OrderedDict([('os', run_os_checks), ('ssh', run_ssh_checks), ('mysql', run_mysql_checks), ('apache', run_apache_checks)]) - enabled = overrides or (config("harden") or "").split() + enabled = overrides[:] or (config("harden") or "").split() if enabled: modules_to_run = [] # modules will always be performed in the following order diff --git a/charmhelpers/contrib/openstack/amulet/deployment.py b/charmhelpers/contrib/openstack/amulet/deployment.py index 5afbbd87c13e2b168b088c4da51b3b63ab4d07a2..77925cc23932ccba0c3732100669a2405c4915e3 100644 --- a/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/charmhelpers/contrib/openstack/amulet/deployment.py @@ -21,6 +21,9 @@ from collections import OrderedDict from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) DEBUG = logging.DEBUG ERROR = logging.ERROR @@ -165,7 +168,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', 'cinder-backup', 'nexentaedge-data', 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) + 'cinder-nexentaedge', 'nexentaedge-mgmt', + 'ceilometer-agent'])) if self.openstack: for svc in services: @@ -271,11 +275,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): release. """ # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, @@ -291,6 +292,12 @@ class OpenStackAmuletDeployment(AmuletDeployment): ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, + ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('bionic', 'cloud:bionic-stein'): self.bionic_stein, + ('bionic', 'cloud:bionic-train'): self.bionic_train, + ('cosmic', None): self.cosmic_rocky, + ('disco', None): self.disco_stein, + ('eoan', None): self.eoan_train, } return releases[(self.series, self.openstack)] @@ -306,6 +313,9 @@ class OpenStackAmuletDeployment(AmuletDeployment): ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] @@ -313,6 +323,23 @@ class OpenStackAmuletDeployment(AmuletDeployment): else: return releases[self.series] + def get_percona_service_entry(self, memory_constraint=None): + """Return a amulet service entry for percona cluster. + + :param memory_constraint: Override the default memory constraint + in the service entry. + :type memory_constraint: str + :returns: Amulet service entry. + :rtype: dict + """ + memory_constraint = memory_constraint or '3072M' + svc_entry = { + 'name': 'percona-cluster', + 'constraints': {'mem': memory_constraint}} + if self._get_openstack_release() <= self.trusty_mitaka: + svc_entry['location'] = 'cs:trusty/percona-cluster' + return svc_entry + def get_ceph_expected_pools(self, radosgw=False): """Return a list of expected ceph pools in a ceph + cinder + glance test scenario, based on OpenStack release and whether ceph radosgw diff --git a/charmhelpers/contrib/openstack/amulet/utils.py b/charmhelpers/contrib/openstack/amulet/utils.py index b71b2b1910959f5dbe7860ff3d14d45b8e9e2d90..7d95a59047b236a691f81b56d5c19fb314cb70f4 100644 --- a/charmhelpers/contrib/openstack/amulet/utils.py +++ b/charmhelpers/contrib/openstack/amulet/utils.py @@ -24,7 +24,8 @@ import urlparse import cinderclient.v1.client as cinder_client import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1.client as glance_client +import glanceclient.v1 as glance_client +import glanceclient.v2 as glance_clientv2 import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client from keystoneauth1.identity import ( @@ -40,6 +41,7 @@ import novaclient import pika import swiftclient +from charmhelpers.core.decorators import retry_on_exception from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -50,6 +52,18 @@ ERROR = logging.ERROR NOVA_CLIENT_VERSION = "2" +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', + 'xenial_newton', 'yakkety_newton', + 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', + 'xenial_queens', 'bionic_queens', + 'bionic_rocky', 'cosmic_rocky', + 'bionic_stein', 'disco_stein', + 'bionic_train', 'eoan_train', +] + class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. @@ -63,7 +77,34 @@ class OpenStackAmuletUtils(AmuletUtils): super(OpenStackAmuletUtils, self).__init__(log_level) def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports @@ -92,7 +133,7 @@ class OpenStackAmuletUtils(AmuletUtils): return 'endpoint not found' def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): + public_port, expected, expected_num_eps=3): """Validate keystone v3 endpoint data. Validate the v3 endpoint data which has changed from v2. The @@ -138,10 +179,89 @@ class OpenStackAmuletUtils(AmuletUtils): if ret: return 'unexpected endpoint data - {}'.format(ret) - if len(found) != 3: + if len(found) != expected_num_eps: return 'Unexpected number of endpoints found' - def validate_svc_catalog_endpoint_data(self, expected, actual): + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. Validate a list of actual service catalog endpoints vs a list of @@ -310,6 +430,7 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + @retry_on_exception(num_retries=5, base_delay=1) def keystone_wait_for_propagation(self, sentry_relation_pairs, api_version): """Iterate over list of sentry and relation tuples and verify that @@ -328,7 +449,7 @@ class OpenStackAmuletUtils(AmuletUtils): if rel.get('api_version') != str(api_version): raise Exception("api_version not propagated through relation" " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) + "".format(rel.get('api_version'), api_version)) def keystone_configure_api_version(self, sentry_relation_pairs, deployment, api_version): @@ -350,16 +471,13 @@ class OpenStackAmuletUtils(AmuletUtils): deployment._auto_wait_for_status() self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): + def authenticate_cinder_admin(self, keystone, api_version=2): """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) + self.log.debug('Authenticating cinder admin...') _clients = { 1: cinder_client.Client, 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) + return _clients[api_version](session=keystone.session) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -367,13 +485,36 @@ class OpenStackAmuletUtils(AmuletUtils): project_domain_name=None, project_name=None): """Authenticate with Keystone""" self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: auth = v2.Password( username=username, password=password, @@ -381,12 +522,7 @@ class OpenStackAmuletUtils(AmuletUtils): auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client else: - ep = base_ep + "/v3" auth = v3.Password( user_domain_name=user_domain_name, username=username, @@ -397,10 +533,57 @@ class OpenStackAmuletUtils(AmuletUtils): auth_url=ep ) sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None, api_version=2): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + # 11 => xenial_queens + if api_version == 3 or (openstack_release and openstack_release >= 11): + client_class = keystone_client_v3.Client + api_version = 3 + else: + client_class = keystone_client.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant=None, api_version=None, @@ -440,13 +623,13 @@ class OpenStackAmuletUtils(AmuletUtils): return self.authenticate_keystone(keystone_ip, user, password, project_name=tenant) - def authenticate_glance_admin(self, keystone): + def authenticate_glance_admin(self, keystone, force_v1_client=False): """Authenticates admin user with glance.""" self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', interface='adminURL') - if keystone.session: - return glance_client.Client(ep, session=keystone.session) + if not force_v1_client and keystone.session: + return glance_clientv2.Client("2", session=keystone.session) else: return glance_client.Client(ep, token=keystone.auth_token) @@ -502,42 +685,66 @@ class OpenStackAmuletUtils(AmuletUtils): nova.flavors.create(name, ram, vcpus, disk, flavorid, ephemeral, swap, rxtx_factor, is_public) - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection + def glance_create_image(self, glance, image_name, image_url, + download_dir='tests', + hypervisor_type=None, + disk_format='qcow2', + architecture='x86_64', + container_format='bare'): + """Download an image and upload it to glance, validate its status + and return an image object pointer. KVM defaults, can override for + LXD. + + :param glance: pointer to authenticated glance api connection :param image_name: display name for new image + :param image_url: url to retrieve + :param download_dir: directory to store downloaded image file + :param hypervisor_type: glance image hypervisor property + :param disk_format: glance image disk format + :param architecture: glance image architecture property + :param container_format: glance image container format :returns: glance image pointer """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) + self.log.debug('Creating glance image ({}) from ' + '{}...'.format(image_name, image_url)) - # Download cirros image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + # Download image + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: proxies = {'http': http_proxy} opener = urllib.FancyURLopener(proxies) else: opener = urllib.FancyURLopener() - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() + abs_file_name = os.path.join(download_dir, image_name) + if not os.path.exists(abs_file_name): + opener.retrieve(image_url, abs_file_name) # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) + glance_properties = { + 'architecture': architecture, + } + if hypervisor_type: + glance_properties['hypervisor_type'] = hypervisor_type + # Create glance image + if float(glance.version) < 2.0: + with open(abs_file_name) as f: + image = glance.images.create( + name=image_name, + is_public=True, + disk_format=disk_format, + container_format=container_format, + properties=glance_properties, + data=f) + else: + image = glance.images.create( + name=image_name, + visibility="public", + disk_format=disk_format, + container_format=container_format) + glance.images.upload(image.id, open(abs_file_name, 'rb')) + glance.images.update(image.id, **glance_properties) # Wait for image to reach active status img_id = image.id @@ -552,24 +759,68 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('Validating image attributes...') val_img_name = glance.images.get(img_id).name val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public val_img_cfmt = glance.images.get(img_id).container_format val_img_dfmt = glance.images.get(img_id).disk_format + + if float(glance.version) < 2.0: + val_img_pub = glance.images.get(img_id).is_public + else: + val_img_pub = glance.images.get(img_id).visibility == "public" + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' 'container fmt:{} disk fmt:{}'.format( val_img_name, val_img_pub, img_id, val_img_stat, val_img_cfmt, val_img_dfmt)) if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': + and val_img_pub is True and val_img_cfmt == container_format \ + and val_img_dfmt == disk_format: self.log.debug(msg_attr) else: - msg = ('Volume validation failed, {}'.format(msg_attr)) + msg = ('Image validation failed, {}'.format(msg_attr)) amulet.raise_status(amulet.FAIL, msg=msg) return image + def create_cirros_image(self, glance, image_name, hypervisor_type=None): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :param hypervisor_type: glance image hypervisor property + :returns: glance image pointer + """ + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'glance_create_image instead of ' + 'create_cirros_image.') + + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Get cirros image URL + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + f.close() + + return self.glance_create_image( + glance, + image_name, + cirros_url, + hypervisor_type=hypervisor_type) + def delete_image(self, glance, image): """Delete the specified image.""" @@ -821,6 +1072,9 @@ class OpenStackAmuletUtils(AmuletUtils): cmd, code, output)) amulet.raise_status(amulet.FAIL, msg=msg) + # For mimic ceph osd lspools output + output = output.replace("\n", ",") + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str(output).split(','): pool_id_name = pool.split(' ') @@ -858,9 +1112,12 @@ class OpenStackAmuletUtils(AmuletUtils): :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) diff --git a/charmhelpers/contrib/openstack/audits/__init__.py b/charmhelpers/contrib/openstack/audits/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7f7e5f79a5d5fe3cb374814e32ea16f6060f4f27 --- /dev/null +++ b/charmhelpers/contrib/openstack/audits/__init__.py @@ -0,0 +1,212 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OpenStack Security Audit code""" + +import collections +from enum import Enum +import traceback + +from charmhelpers.core.host import cmp_pkgrevno +import charmhelpers.contrib.openstack.utils as openstack_utils +import charmhelpers.core.hookenv as hookenv + + +class AuditType(Enum): + OpenStackSecurityGuide = 1 + + +_audits = {} + +Audit = collections.namedtuple('Audit', 'func filters') + + +def audit(*args): + """Decorator to register an audit. + + These are used to generate audits that can be run on a + deployed system that matches the given configuration + + :param args: List of functions to filter tests against + :type args: List[Callable[Dict]] + """ + def wrapper(f): + test_name = f.__name__ + if _audits.get(test_name): + raise RuntimeError( + "Test name '{}' used more than once" + .format(test_name)) + non_callables = [fn for fn in args if not callable(fn)] + if non_callables: + raise RuntimeError( + "Configuration includes non-callable filters: {}" + .format(non_callables)) + _audits[test_name] = Audit(func=f, filters=args) + return f + return wrapper + + +def is_audit_type(*args): + """This audit is included in the specified kinds of audits. + + :param *args: List of AuditTypes to include this audit in + :type args: List[AuditType] + :rtype: Callable[Dict] + """ + def _is_audit_type(audit_options): + if audit_options.get('audit_type') in args: + return True + else: + return False + return _is_audit_type + + +def since_package(pkg, pkg_version): + """This audit should be run after the specified package version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _since_package(audit_options=None): + return cmp_pkgrevno(pkg, pkg_version) >= 0 + + return _since_package + + +def before_package(pkg, pkg_version): + """This audit should be run before the specified package version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _before_package(audit_options=None): + return not since_package(pkg, pkg_version)() + + return _before_package + + +def since_openstack_release(pkg, release): + """This audit should run after the specified OpenStack version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _since_openstack_release(audit_options=None): + _release = openstack_utils.get_os_codename_package(pkg) + return openstack_utils.CompareOpenStackReleases(_release) >= release + + return _since_openstack_release + + +def before_openstack_release(pkg, release): + """This audit should run before the specified OpenStack version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _before_openstack_release(audit_options=None): + return not since_openstack_release(pkg, release)() + + return _before_openstack_release + + +def it_has_config(config_key): + """This audit should be run based on specified config keys. + + :param config_key: Config key to look for + :type config_key: str + :rtype: Callable[Dict] + """ + def _it_has_config(audit_options): + return audit_options.get(config_key) is not None + + return _it_has_config + + +def run(audit_options): + """Run the configured audits with the specified audit_options. + + :param audit_options: Configuration for the audit + :type audit_options: Config + + :rtype: Dict[str, str] + """ + errors = {} + results = {} + for name, audit in sorted(_audits.items()): + result_name = name.replace('_', '-') + if result_name in audit_options.get('excludes', []): + print( + "Skipping {} because it is" + "excluded in audit config" + .format(result_name)) + continue + if all(p(audit_options) for p in audit.filters): + try: + audit.func(audit_options) + print("{}: PASS".format(name)) + results[result_name] = { + 'success': True, + } + except AssertionError as e: + print("{}: FAIL ({})".format(name, e)) + results[result_name] = { + 'success': False, + 'message': e, + } + except Exception as e: + print("{}: ERROR ({})".format(name, e)) + errors[name] = e + results[result_name] = { + 'success': False, + 'message': e, + } + for name, error in errors.items(): + print("=" * 20) + print("Error in {}: ".format(name)) + traceback.print_tb(error.__traceback__) + print() + return results + + +def action_parse_results(result): + """Parse the result of `run` in the context of an action. + + :param result: The result of running the security-checklist + action on a unit + :type result: Dict[str, Dict[str, str]] + :rtype: int + """ + passed = True + for test, result in result.items(): + if result['success']: + hookenv.action_set({test: 'PASS'}) + else: + hookenv.action_set({test: 'FAIL - {}'.format(result['message'])}) + passed = False + if not passed: + hookenv.action_fail("One or more tests failed") + return 0 if passed else 1 diff --git a/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/charmhelpers/contrib/openstack/audits/openstack_security_guide.py new file mode 100644 index 0000000000000000000000000000000000000000..b7b8a60f56641a693acc695ef3f077f9a9d5477e --- /dev/null +++ b/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -0,0 +1,270 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import configparser +import glob +import os.path +import subprocess + +from charmhelpers.contrib.openstack.audits import ( + audit, + AuditType, + # filters + is_audit_type, + it_has_config, +) + +from charmhelpers.core.hookenv import ( + cached, +) + +""" +The Security Guide suggests a specific list of files inside the +config directory for the service having 640 specifically, but +by ensuring the containing directory is 750, only the owner can +write, and only the group can read files within the directory. + +By restricting access to the containing directory, we can more +effectively ensure that there is no accidental leakage if a new +file is added to the service without being added to the security +guide, and to this check. +""" +FILE_ASSERTIONS = { + 'barbican': { + '/etc/barbican': {'group': 'barbican', 'mode': '750'}, + }, + 'ceph-mon': { + '/var/lib/charm/ceph-mon/ceph.conf': + {'owner': 'root', 'group': 'root', 'mode': '644'}, + '/etc/ceph/ceph.client.admin.keyring': + {'owner': 'ceph', 'group': 'ceph'}, + '/etc/ceph/rbdmap': {'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'} + }, + 'ceph-osd': { + '/var/lib/charm/ceph-osd/ceph.conf': + {'owner': 'ceph', 'group': 'ceph', 'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}, + '/var/lib/ceph/radosgw': + {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + }, + 'cinder': { + '/etc/cinder': {'group': 'cinder', 'mode': '750'}, + }, + 'glance': { + '/etc/glance': {'group': 'glance', 'mode': '750'}, + }, + 'keystone': { + '/etc/keystone': + {'owner': 'keystone', 'group': 'keystone', 'mode': '750'}, + }, + 'manilla': { + '/etc/manila': {'group': 'manilla', 'mode': '750'}, + }, + 'neutron-gateway': { + '/etc/neutron': {'group': 'neutron', 'mode': '750'}, + }, + 'neutron-api': { + '/etc/neutron/': {'group': 'neutron', 'mode': '750'}, + }, + 'nova-cloud-controller': { + '/etc/nova': {'group': 'nova', 'mode': '750'}, + }, + 'nova-compute': { + '/etc/nova/': {'group': 'nova', 'mode': '750'}, + }, + 'openstack-dashboard': { + # From security guide + '/etc/openstack-dashboard/local_settings.py': + {'group': 'horizon', 'mode': '640'}, + }, +} + +Ownership = collections.namedtuple('Ownership', 'owner group mode') + + +@cached +def _stat(file): + """ + Get the Ownership information from a file. + + :param file: The path to a file to stat + :type file: str + :returns: owner, group, and mode of the specified file + :rtype: Ownership + :raises subprocess.CalledProcessError: If the underlying stat fails + """ + out = subprocess.check_output( + ['stat', '-c', '%U %G %a', file]).decode('utf-8') + return Ownership(*out.strip().split(' ')) + + +@cached +def _config_ini(path): + """ + Parse an ini file + + :param path: The path to a file to parse + :type file: str + :returns: Configuration contained in path + :rtype: Dict + """ + # When strict is enabled, duplicate options are not allowed in the + # parsed INI; however, Oslo allows duplicate values. This change + # causes us to ignore the duplicate values which is acceptable as + # long as we don't validate any multi-value options + conf = configparser.ConfigParser(strict=False) + conf.read(path) + return dict(conf) + + +def _validate_file_ownership(owner, group, file_name, optional=False): + """ + Validate that a specified file is owned by `owner:group`. + + :param owner: Name of the owner + :type owner: str + :param group: Name of the group + :type group: str + :param file_name: Path to the file to verify + :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) + assert owner == ownership.owner, \ + "{} has an incorrect owner: {} should be {}".format( + file_name, ownership.owner, owner) + assert group == ownership.group, \ + "{} has an incorrect group: {} should be {}".format( + file_name, ownership.group, group) + print("Validate ownership of {}: PASS".format(file_name)) + + +def _validate_file_mode(mode, file_name, optional=False): + """ + Validate that a specified file has the specified permissions. + + :param mode: file mode that is desires + :type owner: str + :param file_name: Path to the file to verify + :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) + assert mode == ownership.mode, \ + "{} has an incorrect mode: {} should be {}".format( + file_name, ownership.mode, mode) + print("Validate mode of {}: PASS".format(file_name)) + + +@cached +def _config_section(config, section): + """Read the configuration file and return a section.""" + path = os.path.join(config.get('config_path'), config.get('config_file')) + conf = _config_ini(path) + return conf.get(section) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_ownership(config): + """Verify that configuration files are owned by the correct user/group.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + owner = options.get('owner', config.get('owner', 'root')) + group = options.get('group', config.get('group', 'root')) + optional = options.get('optional', config.get('optional', False)) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_ownership(owner, group, file, optional) + else: + if os.path.isfile(file_name): + _validate_file_ownership(owner, group, file_name, optional) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_permissions(config): + """Verify that permissions on configuration files are secure enough.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + mode = options.get('mode', config.get('permissions', '600')) + optional = options.get('optional', config.get('optional', False)) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_mode(mode, file, optional) + else: + if os.path.isfile(file_name): + _validate_file_mode(mode, file_name, optional) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_keystone(audit_options): + """Validate that the service uses Keystone for authentication.""" + section = _config_section(audit_options, 'DEFAULT') + assert section is not None, "Missing section 'DEFAULT'" + assert section.get('auth_strategy') == "keystone", \ + "Application is not using Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_keystone(audit_options): + """Verify that TLS is used to communicate with Keystone.""" + section = _config_section(audit_options, 'keystone_authtoken') + assert section is not None, "Missing section 'keystone_authtoken'" + assert not section.get('insecure') and \ + "https://" in section.get("auth_uri"), \ + "TLS is not used for Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_glance(audit_options): + """Verify that TLS is used to communicate with Glance.""" + section = _config_section(audit_options, 'glance') + assert section is not None, "Missing section 'glance'" + assert not section.get('insecure') and \ + "https://" in section.get("api_servers"), \ + "TLS is not used for Glance" diff --git a/charmhelpers/contrib/openstack/cert_utils.py b/charmhelpers/contrib/openstack/cert_utils.py index de853b5371e915fb58bd6b29c1ea16a90ce7d08f..b494af64aeae55db44b669990725de81705104b2 100644 --- a/charmhelpers/contrib/openstack/cert_utils.py +++ b/charmhelpers/contrib/openstack/cert_utils.py @@ -25,7 +25,9 @@ from charmhelpers.core.hookenv import ( local_unit, network_get_primary_address, config, + related_units, relation_get, + relation_ids, unit_get, NoNetworkBinding, log, @@ -104,9 +106,11 @@ class CertRequest(object): sans = sorted(list(set(entry['addresses']))) request[entry['cn']] = {'sans': sans} if self.json_encode: - return {'cert_requests': json.dumps(request, sort_keys=True)} + req = {'cert_requests': json.dumps(request, sort_keys=True)} else: - return {'cert_requests': request} + req = {'cert_requests': request} + req['unit_name'] = local_unit().replace('/', '_') + return req def get_certificate_request(json_encode=True): @@ -178,13 +182,17 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None): os.symlink(hostname_key, custom_key) -def install_certs(ssl_dir, certs, chain=None): +def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): """Install the certs passed into the ssl dir and append the chain if provided. :param ssl_dir: str Directory to create symlinks in :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} :param chain: str Chain to be appended to certs + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str """ for cn, bundle in certs.items(): cert_filename = 'cert_{}'.format(cn) @@ -193,23 +201,29 @@ def install_certs(ssl_dir, certs, chain=None): if chain: # Append chain file so that clients that trust the root CA will # trust certs signed by an intermediate in the chain - cert_data = cert_data + chain + cert_data = cert_data + os.linesep + chain write_file( - path=os.path.join(ssl_dir, cert_filename), + path=os.path.join(ssl_dir, cert_filename), owner=user, group=group, content=cert_data, perms=0o640) write_file( - path=os.path.join(ssl_dir, key_filename), + path=os.path.join(ssl_dir, key_filename), owner=user, group=group, content=bundle['key'], perms=0o640) def process_certificates(service_name, relation_id, unit, - custom_hostname_link=None): + custom_hostname_link=None, user='root', group='root'): """Process the certificates supplied down the relation :param service_name: str Name of service the certifcates are for. :param relation_id: str Relation id providing the certs :param unit: str Unit providing the certs :param custom_hostname_link: str Name of custom link to create + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str + :returns: True if certificates processed for local unit or False + :rtype: bool """ data = relation_get(rid=relation_id, unit=unit) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) @@ -221,7 +235,55 @@ def process_certificates(service_name, relation_id, unit, if certs: certs = json.loads(certs) install_ca_cert(ca.encode()) - install_certs(ssl_dir, certs, chain) + install_certs(ssl_dir, certs, chain, user=user, group=group) create_ip_cert_links( ssl_dir, custom_hostname_link=custom_hostname_link) + return True + return False + + +def get_requests_for_local_unit(relation_name=None): + """Extract any certificates data targeted at this unit down relation_name. + + :param relation_name: str Name of relation to check for data. + :returns: List of bundles of certificates. + :rtype: List of dicts + """ + local_name = local_unit().replace('/', '_') + raw_certs_key = '{}.processed_requests'.format(local_name) + relation_name = relation_name or 'certificates' + bundles = [] + for rid in relation_ids(relation_name): + for unit in related_units(rid): + data = relation_get(rid=rid, unit=unit) + if data.get(raw_certs_key): + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': json.loads(data[raw_certs_key])}) + return bundles + + +def get_bundle_for_cn(cn, relation_name=None): + """Extract certificates for the given cn. + + :param cn: str Canonical Name on certificate. + :param relation_name: str Relation to check for certificates down. + :returns: Dictionary of certificate data, + :rtype: dict. + """ + entries = get_requests_for_local_unit(relation_name) + cert_bundle = {} + for entry in entries: + for _cn, bundle in entry['certs'].items(): + if _cn == cn: + cert_bundle = { + 'cert': bundle['cert'], + 'key': bundle['key'], + 'chain': entry['chain'], + 'ca': entry['ca']} + break + if cert_bundle: + break + return cert_bundle diff --git a/charmhelpers/contrib/openstack/context.py b/charmhelpers/contrib/openstack/context.py index e6c0e9feb4de9efdb97397b85979f7a4ffcbcfd0..940fb6c98056d1ee35fc04ed33ba71349d923d96 100644 --- a/charmhelpers/contrib/openstack/context.py +++ b/charmhelpers/contrib/openstack/context.py @@ -18,6 +18,7 @@ import json import math import os import re +import socket import time from base64 import b64decode from subprocess import check_call, CalledProcessError @@ -29,6 +30,7 @@ from charmhelpers.fetch import ( filter_installed_packages, ) from charmhelpers.core.hookenv import ( + NoNetworkBinding, config, is_relation_made, local_unit, @@ -101,6 +103,10 @@ from charmhelpers.contrib.openstack.utils import ( git_determine_python_path, enable_memcache, snap_install_requested, + get_os_codename_install_source, + enable_memcache, + CompareOpenStackReleases, + os_release, ) from charmhelpers.core.unitdata import kv @@ -116,6 +122,7 @@ except ImportError: CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] HAPROXY_RUN_DIR = '/var/run/haproxy/' +DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" def ensure_packages(packages): @@ -246,13 +253,10 @@ class SharedDBContext(OSContextGenerator): 'database': self.database, 'database_user': self.user, 'database_password': rdata.get(password_setting), - 'database_type': 'mysql' + 'database_type': 'mysql+pymysql' } - # Note(coreycb): We can drop mysql+pymysql if we want when the - # following review lands, though it seems mysql+pymysql would - # be preferred. https://review.openstack.org/#/c/462190/ - if snap_install_requested(): - ctxt['database_type'] = 'mysql+pymysql' + if CompareOpenStackReleases(rel) < 'queens': + ctxt['database_type'] = 'mysql' if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt @@ -345,6 +349,75 @@ class IdentityServiceContext(OSContextGenerator): mkdir(path=cachedir, owner=self.service_user, group=self.service_user, perms=0o700) + return cachedir + return None + + def _get_pkg_name(self, python_name='keystonemiddleware'): + """Get corresponding distro installed package for python + package name. + + :param python_name: nameof the python package + :type: string + """ + pkg_names = map(lambda x: x + python_name, ('python3-', 'python-')) + + for pkg in pkg_names: + if not filter_installed_packages((pkg,)): + return pkg + + return None + + def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): + """Build Jinja2 context for full rendering of [keystone_authtoken] + section with variable names included. Re-constructed from former + template 'section-keystone-auth-mitaka'. + + :param ctxt: Jinja2 context returned from self.__call__() + :type: dict + :param keystonemiddleware_os_rel: OpenStack release name of + keystonemiddleware package installed + """ + c = collections.OrderedDict((('auth_type', 'password'),)) + + # 'www_authenticate_uri' replaced 'auth_uri' since Stein, + # see keystonemiddleware upstream sources for more info + if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': + c.update(( + ('www_authenticate_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + else: + c.update(( + ('auth_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + + c.update(( + ('auth_url', "{}://{}:{}/v3".format( + ctxt.get('auth_protocol', ''), + ctxt.get('auth_host', ''), + ctxt.get('auth_port', ''))), + ('project_domain_name', ctxt.get('admin_domain_name', '')), + ('user_domain_name', ctxt.get('admin_domain_name', '')), + ('project_name', ctxt.get('admin_tenant_name', '')), + ('username', ctxt.get('admin_user', '')), + ('password', ctxt.get('admin_password', '')), + ('signing_dir', ctxt.get('signing_dir', '')),)) + + return c + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + keystonemiddleware_os_release = None + if self._get_pkg_name(): + keystonemiddleware_os_release = os_release(self._get_pkg_name()) + + cachedir = self._setup_pki_cache() + if cachedir: ctxt['signing_dir'] = cachedir for rid in relation_ids(self.rel_name): @@ -370,8 +443,18 @@ class IdentityServiceContext(OSContextGenerator): 'api_version': api_version}) if float(api_version) > 2: - ctxt.update({'admin_domain_name': - rdata.get('service_domain')}) + ctxt.update({ + 'admin_domain_name': rdata.get('service_domain'), + 'service_project_id': rdata.get('service_tenant_id'), + 'service_domain_id': rdata.get('service_domain_id')}) + + # we keep all veriables in ctxt for compatibility and + # add nested dictionary for keystone_authtoken generic + # templating + if keystonemiddleware_os_release: + ctxt['keystone_authtoken'] = \ + self._get_keystone_authtoken_ctxt( + ctxt, keystonemiddleware_os_release) if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse @@ -383,6 +466,86 @@ class IdentityServiceContext(OSContextGenerator): return {} +class NovaVendorMetadataContext(OSContextGenerator): + """Context used for configuring nova vendor metadata on nova.conf file.""" + + def __init__(self, os_release_pkg, interfaces=None): + """Initialize the NovaVendorMetadataContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + :param interfaces: list of string values to be used as the Context's + relation interfaces. + :type interfaces: List[str] + """ + self.os_release_pkg = os_release_pkg + if interfaces is not None: + self.interfaces = interfaces + + def __call__(self): + cmp_os_release = CompareOpenStackReleases( + os_release(self.os_release_pkg)) + ctxt = {'vendor_data': False} + + vdata_providers = [] + vdata = config('vendor-data') + vdata_url = config('vendor-data-url') + + if vdata: + try: + # validate the JSON. If invalid, we do not set anything here + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data'] = True + # Mitaka does not support DynamicJSON + # so vendordata_providers is not needed + if cmp_os_release > 'mitaka': + vdata_providers.append('StaticJSON') + + if vdata_url: + if cmp_os_release > 'mitaka': + ctxt['vendor_data_url'] = vdata_url + vdata_providers.append('DynamicJSON') + else: + log('Dynamic vendor data unsupported' + ' for {}.'.format(cmp_os_release), level=ERROR) + if vdata_providers: + ctxt['vendordata_providers'] = ','.join(vdata_providers) + + return ctxt + + +class NovaVendorMetadataJSONContext(OSContextGenerator): + """Context used for writing nova vendor metadata json file.""" + + def __init__(self, os_release_pkg): + """Initialize the NovaVendorMetadataJSONContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + """ + self.os_release_pkg = os_release_pkg + + def __call__(self): + ctxt = {'vendor_data_json': '{}'} + + vdata = config('vendor-data') + if vdata: + try: + # validate the JSON. If invalid, we return empty. + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data_json'] = vdata + + return ctxt + + class AMQPContext(OSContextGenerator): def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): @@ -494,6 +657,19 @@ class AMQPContext(OSContextGenerator): ctxt['oslo_messaging_flags'] = config_flags_parser( oslo_messaging_flags) + oslo_messaging_driver = conf.get( + 'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER) + if oslo_messaging_driver: + ctxt['oslo_messaging_driver'] = oslo_messaging_driver + + notification_format = conf.get('notification-format', None) + if notification_format: + ctxt['notification_format'] = notification_format + + send_notifications_to_logs = conf.get('send-notifications-to-logs', None) + if send_notifications_to_logs: + ctxt['send_notifications_to_logs'] = send_notifications_to_logs + if not self.complete: return {} @@ -545,6 +721,25 @@ class CephContext(OSContextGenerator): ensure_packages(['ceph-common']) return ctxt + def context_complete(self, ctxt): + """Overridden here to ensure the context is actually complete. + + We set `key` and `auth` to None here, by default, to ensure + that the context will always evaluate to incomplete until the + Ceph relation has actually sent these details; otherwise, + there is a potential race condition between the relation + appearing and the first unit actually setting this data on the + relation. + + :param ctxt: The current context members + :type ctxt: Dict[str, ANY] + :returns: True if the context is complete + :rtype: bool + """ + if 'auth' not in ctxt or 'key' not in ctxt: + return False + return super(CephContext, self).context_complete(ctxt) + class HAProxyContext(OSContextGenerator): """Provides half a context for the haproxy template, which describes @@ -570,7 +765,7 @@ class HAProxyContext(OSContextGenerator): addr = get_host_ip(unit_get('private-address')) l_unit = local_unit().replace('/', '-') - cluster_hosts = {} + cluster_hosts = collections.OrderedDict() # NOTE(jamespage): build out map of configured network endpoints # and associated backends @@ -706,6 +901,7 @@ class ApacheSSLContext(OSContextGenerator): # and service namespace accordingly. external_ports = [] service_namespace = None + user = group = 'root' def enable_modules(self): cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] @@ -782,7 +978,7 @@ class ApacheSSLContext(OSContextGenerator): addr = network_get_primary_address( ADDRESS_MAP[net_type]['binding'] ) - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): addr = fallback endpoint = resolve_address(net_type) @@ -1012,7 +1208,9 @@ class NeutronPortContext(OSContextGenerator): hwaddr_to_nic = {} hwaddr_to_ip = {} - for nic in list_nics(): + extant_nics = list_nics() + + for nic in extant_nics: # Ignore virtual interfaces (bond masters will be identified from # their slaves) if not is_phy_iface(nic): @@ -1043,10 +1241,11 @@ class NeutronPortContext(OSContextGenerator): # Entry is a MAC address for a valid interface that doesn't # have an IP address assigned yet. resolved.append(hwaddr_to_nic[entry]) - else: - # If the passed entry is not a MAC address, assume it's a valid - # interface, and that the user put it there on purpose (we can - # trust it to be the real external network). + elif entry in extant_nics: + # If the passed entry is not a MAC address and the interface + # exists, assume it's a valid interface, and that the user put + # it there on purpose (we can trust it to be the real external + # network). resolved.append(entry) # Ensure no duplicates @@ -1112,7 +1311,7 @@ class SubordinateConfigContext(OSContextGenerator): The subordinate interface allows subordinates to export their configuration requirements to the principle for multiple config - files and multiple serivces. Ie, a subordinate that has interfaces + files and multiple services. Ie, a subordinate that has interfaces to both glance and nova may export to following yaml blob as json:: glance: @@ -1294,11 +1493,12 @@ class WorkerConfigContext(OSContextGenerator): class WSGIWorkerConfigContext(WorkerConfigContext): def __init__(self, name=None, script=None, admin_script=None, - public_script=None, process_weight=1.00, + public_script=None, user=None, group=None, + process_weight=1.00, admin_process_weight=0.25, public_process_weight=0.75): self.service_name = name - self.user = name - self.group = name + self.user = user or name + self.group = group or name self.script = script self.admin_script = admin_script self.public_script = public_script @@ -1334,11 +1534,11 @@ class ZeroMQContext(OSContextGenerator): ctxt = {} if is_relation_made('zeromq-configuration', 'host'): for rid in relation_ids('zeromq-configuration'): - for unit in related_units(rid): - ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) - ctxt['zmq_host'] = relation_get('host', unit, rid) - ctxt['zmq_redis_address'] = relation_get( - 'zmq_redis_address', unit, rid) + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) return ctxt @@ -1425,6 +1625,26 @@ class NeutronAPIContext(OSContextGenerator): 'rel_key': 'enable-qos', 'default': False, }, + 'enable_nsg_logging': { + 'rel_key': 'enable-nsg-logging', + 'default': False, + }, + 'enable_nfg_logging': { + 'rel_key': 'enable-nfg-logging', + 'default': False, + }, + 'enable_port_forwarding': { + 'rel_key': 'enable-port-forwarding', + 'default': False, + }, + 'global_physnet_mtu': { + 'rel_key': 'global-physnet-mtu', + 'default': 1500, + }, + 'physical_network_mtus': { + 'rel_key': 'physical-network-mtus', + 'default': None, + }, } ctxt = self.get_neutron_options({}) for rid in relation_ids('neutron-plugin-api'): @@ -1436,10 +1656,22 @@ class NeutronAPIContext(OSContextGenerator): if 'l2-population' in rdata: ctxt.update(self.get_neutron_options(rdata)) + extension_drivers = [] + if ctxt['enable_qos']: - ctxt['extension_drivers'] = 'qos' - else: - ctxt['extension_drivers'] = '' + extension_drivers.append('qos') + + if ctxt['enable_nsg_logging']: + extension_drivers.append('log') + + ctxt['extension_drivers'] = ','.join(extension_drivers) + + l3_extension_plugins = [] + + if ctxt['enable_port_forwarding']: + l3_extension_plugins.append('port_forwarding') + + ctxt['l3_extension_plugins'] = l3_extension_plugins return ctxt @@ -1481,13 +1713,13 @@ class DataPortContext(NeutronPortContext): def __call__(self): ports = config('data-port') if ports: - # Map of {port/mac:bridge} + # Map of {bridge:port/mac} portmap = parse_data_port_mappings(ports) ports = portmap.keys() # Resolve provided ports or mac addresses and filter out those # already attached to a bridge. resolved = self.resolve_ports(ports) - # FIXME: is this necessary? + # Rebuild port index using resolved and filtered ports. normalized = {get_nic_hwaddr(port): port for port in resolved if port not in ports} normalized.update({port: port for port in resolved @@ -1570,6 +1802,82 @@ class InternalEndpointContext(OSContextGenerator): return {'use_internal_endpoints': config('use-internal-endpoints')} +class VolumeAPIContext(InternalEndpointContext): + """Volume API context. + + This context provides information regarding the volume endpoint to use + when communicating between services. It determines which version of the + API is appropriate for use. + + This value will be determined in the resulting context dictionary + returned from calling the VolumeAPIContext object. Information provided + by this context is as follows: + + volume_api_version: the volume api version to use, currently + 'v2' or 'v3' + volume_catalog_info: the information to use for a cinder client + configuration that consumes API endpoints from the keystone + catalog. This is defined as the type:name:endpoint_type string. + """ + # FIXME(wolsen) This implementation is based on the provider being able + # to specify the package version to check but does not guarantee that the + # volume service api version selected is available. In practice, it is + # quite likely the volume service *is* providing the v3 volume service. + # This should be resolved when the service-discovery spec is implemented. + def __init__(self, pkg): + """ + Creates a new VolumeAPIContext for use in determining which version + of the Volume API should be used for communication. A package codename + should be supplied for determining the currently installed OpenStack + version. + + :param pkg: the package codename to use in order to determine the + component version (e.g. nova-common). See + charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more. + """ + super(VolumeAPIContext, self).__init__() + self._ctxt = None + if not pkg: + raise ValueError('package name must be provided in order to ' + 'determine current OpenStack version.') + self.pkg = pkg + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """Determines the Volume API endpoint information. + + Determines the appropriate version of the API that should be used + as well as the catalog_info string that would be supplied. Returns + a dict containing the volume_api_version and the volume_catalog_info. + """ + rel = os_release(self.pkg) + version = '2' + if CompareOpenStackReleases(rel) >= 'pike': + version = '3' + + service_type = 'volumev{version}'.format(version=version) + service_name = 'cinderv{version}'.format(version=version) + endpoint_type = 'publicURL' + if config('use-internal-endpoints'): + endpoint_type = 'internalURL' + catalog_info = '{type}:{name}:{endpoint}'.format( + type=service_type, name=service_name, endpoint=endpoint_type) + + return { + 'volume_api_version': version, + 'volume_catalog_info': catalog_info, + } + + def __call__(self): + return self.ctxt + + class AppArmorContext(OSContextGenerator): """Base class for apparmor contexts.""" @@ -1705,3 +2013,89 @@ class MemcacheContext(OSContextGenerator): ctxt['memcache_server_formatted'], ctxt['memcache_port']) return ctxt + + +class EnsureDirContext(OSContextGenerator): + ''' + Serves as a generic context to create a directory as a side-effect. + + Useful for software that supports drop-in files (.d) in conjunction + with config option-based templates. Examples include: + * OpenStack oslo.policy drop-in files; + * systemd drop-in config files; + * other software that supports overriding defaults with .d files + + Another use-case is when a subordinate generates a configuration for + primary to render in a separate directory. + + Some software requires a user to create a target directory to be + scanned for drop-in files with a specific format. This is why this + context is needed to do that before rendering a template. + ''' + + def __init__(self, dirname, **kwargs): + '''Used merely to ensure that a given directory exists.''' + self.dirname = dirname + self.kwargs = kwargs + + def __call__(self): + mkdir(self.dirname, **self.kwargs) + return {} + + +class VersionsContext(OSContextGenerator): + """Context to return the openstack and operating system versions. + + """ + def __init__(self, pkg='python-keystone'): + """Initialise context. + + :param pkg: Package to extrapolate openstack version from. + :type pkg: str + """ + self.pkg = pkg + + def __call__(self): + ostack = os_release(self.pkg) + osystem = lsb_release()['DISTRIB_CODENAME'].lower() + return { + 'openstack_release': ostack, + 'operating_system_release': osystem} + + +class LogrotateContext(OSContextGenerator): + """Common context generator for logrotate.""" + + def __init__(self, location, interval, count): + """ + :param location: Absolute path for the logrotate config file + :type location: str + :param interval: The interval for the rotations. Valid values are + 'daily', 'weekly', 'monthly', 'yearly' + :type interval: str + :param count: The logrotate count option configures the 'count' times + the log files are being rotated before being + :type count: int + """ + self.location = location + self.interval = interval + self.count = 'rotate {}'.format(count) + + def __call__(self): + ctxt = { + 'logrotate_logs_location': self.location, + 'logrotate_interval': self.interval, + 'logrotate_count': self.count, + } + return ctxt + + +class HostInfoContext(OSContextGenerator): + """Context to provide host information.""" + + def __call__(self): + ctxt = { + 'host_fqdn': socket.getfqdn(), + 'host': socket.gethostname(), + } + return ctxt diff --git a/charmhelpers/contrib/openstack/ha/utils.py b/charmhelpers/contrib/openstack/ha/utils.py index 9a4d79c12fb9768eefd3930276696ba4ee987250..b4b69c82303c6d17eb84273e43be99e866616dca 100644 --- a/charmhelpers/contrib/openstack/ha/utils.py +++ b/charmhelpers/contrib/openstack/ha/utils.py @@ -23,9 +23,12 @@ Helpers for high availability. """ +import hashlib +import json import re from charmhelpers.core.hookenv import ( + expected_related_units, log, relation_set, charm_name, @@ -42,6 +45,9 @@ from charmhelpers.contrib.openstack.ip import ( resolve_address, ) +VIP_GROUP_NAME = 'grp_{service}_vips' +DNSHA_GROUP_NAME = 'grp_{service}_hostnames' + class DNSHAException(Exception): """Raised when an error occurs setting up DNS HA @@ -64,6 +70,101 @@ def update_dns_ha_resource_params(resources, resource_params, @param crm_ocf: Corosync Open Cluster Framework resource agent to use for DNS HA """ + _relation_data = {'resources': {}, 'resource_params': {}} + update_hacluster_dns_ha(charm_name(), + _relation_data, + crm_ocf) + resources.update(_relation_data['resources']) + resource_params.update(_relation_data['resource_params']) + relation_set(relation_id=relation_id, groups=_relation_data['groups']) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + + :raises DNSHAException: if release is < 16.04 + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check juju goal-state if ha relation is expected, check for VIP or dns-ha + settings which indicate the unit should expect to be related to hacluster. + + @returns boolean + """ + ha_related_units = [] + try: + ha_related_units = list(expected_related_units(reltype='ha')) + except (NotImplementedError, KeyError): + pass + return len(ha_related_units) > 0 or config('vip') or config('dns-ha') + + +def generate_ha_relation_data(service, + extra_settings=None, + haproxy_enabled=True): + """ Generate relation data for ha relation + + Based on configuration options and unit interfaces, generate a json + encoded dict of relation data items for the hacluster relation, + providing configuration for DNS HA or VIP's + haproxy clone sets. + + Example of supplying additional settings:: + + COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' + AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' + AGENT_CA_PARAMS = 'op monitor interval="5s"' + + ha_console_settings = { + 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, + 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, + 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, + 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) + generate_ha_relation_data('nova', extra_settings=ha_console_settings) + + + @param service: Name of the service being configured + @param extra_settings: Dict of additional resource data + @returns dict: json encoded data for use with relation_set + """ + _relation_data = {'resources': {}, 'resource_params': {}} + + if haproxy_enabled: + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} + _relation_data['resource_params'] = { + _haproxy_res: 'op monitor interval="5s"' + } + _relation_data['init_services'] = {_haproxy_res: 'haproxy'} + _relation_data['clones'] = { + 'cl_{}_haproxy'.format(service): _haproxy_res + } + + if extra_settings: + for k, v in extra_settings.items(): + if _relation_data.get(k): + _relation_data[k].update(v) + else: + _relation_data[k] = v + + if config('dns-ha'): + update_hacluster_dns_ha(service, _relation_data) + else: + update_hacluster_vip(service, _relation_data) + + return { + 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) + for k, v in _relation_data.items() if v + } # Validate the charm environment for DNS HA assert_charm_supports_dns_ha() @@ -113,6 +214,9 @@ def update_dns_ha_resource_params(resources, resource_params, DEBUG) relation_set(relation_id=relation_id, groups={ 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)}) + relation_data['groups'] = { + DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) + } else: msg = 'DNS HA: Hostname group has no members.' status_set('blocked', msg) @@ -140,3 +244,100 @@ def expect_ha(): @returns boolean """ return config('vip') or config('dns-ha') + +def get_vip_settings(vip): + """Calculate which nic is on the correct network for the given vip. + + If nic or netmask discovery fail then fallback to using charm supplied + config. If fallback is used this is indicated via the fallback variable. + + @param vip: VIP to lookup nic and cidr for. + @returns (str, str, bool): eg (iface, netmask, fallback) + """ + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + fallback = False + if iface is None: + iface = config('vip_iface') + fallback = True + if netmask is None: + netmask = config('vip_cidr') + fallback = True + return iface, netmask, fallback + + +def update_hacluster_vip(service, relation_data): + """ Configure VIP resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + """ + cluster_config = get_hacluster_config() + vip_group = [] + vips_to_delete = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + + iface, netmask, fallback = get_vip_settings(vip) + + vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"' + if iface is not None: + # NOTE(jamespage): Delete old VIP resources + # Old style naming encoding iface in name + # does not work well in environments where + # interface/subnet wiring is not consistent + vip_key = 'res_{}_{}_vip'.format(service, iface) + if vip_key in vips_to_delete: + vip_key = '{}_{}'.format(vip_key, vip_params) + vips_to_delete.append(vip_key) + + vip_key = 'res_{}_{}_vip'.format( + service, + hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) + + relation_data['resources'][vip_key] = res_vip + # NOTE(jamespage): + # Use option provided vip params if these where used + # instead of auto-detected values + if fallback: + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask, + vip_monitoring=vip_monitoring)) + else: + # NOTE(jamespage): + # let heartbeat figure out which interface and + # netmask to configure, which works nicely + # when network interface naming is not + # consistent across units. + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + vip_monitoring=vip_monitoring)) + + vip_group.append(vip_key) + + if vips_to_delete: + try: + relation_data['delete_resources'].extend(vips_to_delete) + except KeyError: + relation_data['delete_resources'] = vips_to_delete + + if len(vip_group) >= 1: + key = VIP_GROUP_NAME.format(service=service) + try: + relation_data['groups'][key] = ' '.join(vip_group) + except KeyError: + relation_data['groups'] = { + key: ' '.join(vip_group) + } diff --git a/charmhelpers/contrib/openstack/ip.py b/charmhelpers/contrib/openstack/ip.py index d1476b1ab21d40934db6eb0cc0d2174d41b1df72..987331736bad088c1799b91b36560ac7ee14fa4a 100644 --- a/charmhelpers/contrib/openstack/ip.py +++ b/charmhelpers/contrib/openstack/ip.py @@ -13,6 +13,7 @@ # limitations under the License. from charmhelpers.core.hookenv import ( + NoNetworkBinding, config, unit_get, service_name, @@ -158,7 +159,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): if is_address_in_network(bound_cidr, vip): resolved_address = vip break - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): # If no net-splits configured and no support for extra # bindings/network spaces so we expect a single vip resolved_address = vips[0] @@ -175,7 +176,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True): # configuration is not in use try: resolved_address = network_get_primary_address(binding) - except NotImplementedError: + except (NotImplementedError, NoNetworkBinding): resolved_address = fallback_addr if resolved_address is None: diff --git a/charmhelpers/contrib/openstack/neutron.py b/charmhelpers/contrib/openstack/neutron.py index 0f847f566e5cd35d48060714890c0108d35bfc8b..fb5607f3e73159d90236b2d7a4051aa82119e889 100644 --- a/charmhelpers/contrib/openstack/neutron.py +++ b/charmhelpers/contrib/openstack/neutron.py @@ -217,6 +217,11 @@ def neutron_plugins(): plugins['nsx']['config'] = '/etc/neutron/nsx.ini' plugins['vsp']['driver'] = ( 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') + if CompareOpenStackReleases(release) >= 'newton': + plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['vsp']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] return plugins diff --git a/charmhelpers/contrib/openstack/policyd.py b/charmhelpers/contrib/openstack/policyd.py new file mode 100644 index 0000000000000000000000000000000000000000..6541146f87806e8382f3e4e702dc67500c7cb074 --- /dev/null +++ b/charmhelpers/contrib/openstack/policyd.py @@ -0,0 +1,716 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import contextlib +import os +import six +import shutil +import yaml +import zipfile + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as ch_host + +# Features provided by this module: + +""" +Policy.d helper functions +========================= + +The functions in this module are designed, as a set, to provide an easy-to-use +set of hooks for classic charms to add in /etc/<service-name>/policy.d/ +directory override YAML files. + +(For charms.openstack charms, a mixin class is provided for this +functionality). + +In order to "hook" this functionality into a (classic) charm, two functions are +provided: + + maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=none, + blacklist_keys=none, + template_function=none, + restart_handler=none) + + maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None + +(See the docstrings for details on the parameters) + +The functions should be called from the install and upgrade hooks in the charm. +The `maybe_do_policyd_overrides_on_config_changed` function is designed to be +called on the config-changed hook, in that it does an additional check to +ensure that an already overriden policy.d in an upgrade or install hooks isn't +repeated. + +In order the *enable* this functionality, the charm's install, config_changed, +and upgrade_charm hooks need to be modified, and a new config option (see +below) needs to be added. The README for the charm should also be updated. + +Examples from the keystone charm are: + +@hooks.hook('install.real') +@harden() +def install(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + + +@hooks.hook('config-changed') +@restart_on_change(restart_map(), restart_functions=restart_function_map()) +@harden() +def config_changed(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides_on_config_changed(os_release('keystone'), + 'keystone') + +@hooks.hook('upgrade-charm') +@restart_on_change(restart_map(), stopstart=True) +@harden() +def upgrade_charm(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + +Status Line +=========== + +The workload status code in charm-helpers has been modified to detect if +policy.d override code has been incorporated into the charm by checking for the +new config variable (in the config.yaml). If it has been, then the workload +status line will automatically show "PO:" at the beginning of the workload +status for that unit/service if the config option is set. If the policy +override is broken, the "PO (broken):" will be shown. No changes to the charm +(apart from those already mentioned) are needed to enable this functionality. +(charms.openstack charms also get this functionality, but please see that +library for further details). +""" + +# The config.yaml for the charm should contain the following for the config +# option: + +""" + use-policyd-override: + type: boolean + default: False + description: | + If True then use the resource file named 'policyd-override' to install + override yaml files in the service's policy.d directory. The resource + file should be a zip file containing at least one yaml file with a .yaml + or .yml extension. If False then remove the overrides. +""" + +# The metadata.yaml for the charm should contain the following: +""" +resources: + policyd-override: + type: file + filename: policyd-override.zip + description: The policy.d overrides file +""" + +# The README for the charm should contain the following: +""" +Policy Overrides +---------------- + +This service allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the service supports should be +clearly and unambiguously understood before trying to override, or add to, the +default policies that the service uses. + +The charm also has some policy defaults. They should also be understood before +being overridden. It is possible to break the system (for tenants and other +services) if policies are incorrectly applied to the service. + +Policy overrides are YAML files that contain rules that will add to, or +override, existing policy rules in the service. The `policy.d` directory is +a place to put the YAML override files. This charm owns the +`/etc/keystone/policy.d` directory, and as such, any manual changes to it will +be overwritten on charm upgrades. + +Policy overrides are provided to the charm using a resource file called +`policyd-override`. This is attached to the charm using (for example): + + juju attach-resource <charm-name> policyd-override=<some-file> + +The `<charm-name>` is the name that this charm is deployed as, with +`<some-file>` being the resource file containing the policy overrides. + +The format of the resource file is a ZIP file (.zip extension) containing at +least one YAML file with an extension of `.yaml` or `.yml`. Note that any +directories in the ZIP file are ignored; all of the files are flattened into a +single directory. There must not be any duplicated filenames; this will cause +an error and nothing in the resource file will be applied. + +(ed. next part is optional is the charm supports some form of +template/substitution on a read file) + +If a (ed. "one or more of") [`.j2`, `.tmpl`, `.tpl`] file is found in the +resource file then the charm will perform a substitution with charm variables +taken from the config or relations. (ed. edit as appropriate to include the +variable). + +To enable the policy overrides the config option `use-policyd-override` must be +set to `True`. + +When `use-policyd-override` is `True` the status line of the charm will be +prefixed with `PO:` indicating that policies have been overridden. If the +installation of the policy override YAML files failed for any reason then the +status line will be prefixed with `PO (broken):`. The log file for the charm +will indicate the reason. No policy override files are installed if the `PO +(broken):` is shown. The status line indicates that the overrides are broken, +not that the policy for the service has failed - they will be the defaults for +the charm and service. + +If the policy overrides did not install then *either* attach a new, corrected, +resource file *or* disable the policy overrides by setting +`use-policyd-override` to False. + +Policy overrides on one service may affect the functionality of another +service. Therefore, it may be necessary to provide policy overrides for +multiple service charms to achieve a consistent set of policies across the +OpenStack system. The charms for the other services that may need overrides +should be checked to ensure that they support overrides before proceeding. +""" + +POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl'] +POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl'] +POLICYD_RESOURCE_NAME = "policyd-override" +POLICYD_CONFIG_NAME = "use-policyd-override" +POLICYD_SUCCESS_FILENAME = "policyd-override-success" +POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO +POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin") + + +class BadPolicyZipFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +class BadPolicyYamlFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +if six.PY2: + BadZipFile = zipfile.BadZipfile +else: + BadZipFile = zipfile.BadZipFile + + +def is_policyd_override_valid_on_this_release(openstack_release): + """Check that the charm is running on at least Ubuntu Xenial, and at + least the queens release. + + :param openstack_release: the release codename that is installed. + :type openstack_release: str + :returns: True if okay + :rtype: bool + """ + # NOTE(ajkavanagh) circular import! This is because the status message + # generation code in utils has to call into this module, but this function + # needs the CompareOpenStackReleases() function. The only way to solve + # this is either to put ALL of this module into utils, or refactor one or + # other of the CompareOpenStackReleases or status message generation code + # into a 3rd module. + import charmhelpers.contrib.openstack.utils as ch_utils + return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens' + + +def maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None): + """If the config option is set, get the resource file and process it to + enable the policy.d overrides for the service passed. + + The param `openstack_release` is required as the policyd overrides feature + is only supported on openstack_release "queens" or later, and on ubuntu + "xenial" or later. Prior to these versions, this feature is a NOP. + + The optional template_function is a function that accepts a string and has + an opportunity to modify the loaded file prior to it being read by + yaml.safe_load(). This allows the charm to perform "templating" using + charm derived data. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + The param restart_handler is an optional Callable that is called to perform + the service restart if the policy.d file is changed. This should normally + be None as oslo.policy automatically picks up changes in the policy.d + directory. However, for any services where this is buggy then a + restart_handler can be used to force the policy.d files to be read. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + """ + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + clean_policyd_dir_for(service, blacklist_paths) + if (os.path.isfile(_policy_success_file()) and + restart_handler is not None and + callable(restart_handler)): + restart_handler() + remove_policy_success_file() + return + except Exception as e: + print("Exception is: ", str(e)) + import traceback + traceback.print_exc() + return + if not is_policyd_override_valid_on_this_release(openstack_release): + return + # from now on it should succeed; if it doesn't then status line will show + # broken. + resource_filename = get_policy_resource_filename() + restart = process_policy_resource_file( + resource_filename, service, blacklist_paths, blacklist_keys, + template_function) + if restart and restart_handler is not None and callable(restart_handler): + restart_handler() + + +def maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None): + """This function is designed to be called from the config changed hook + handler. It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + + See maybe_do_policyd_overrides() for more details on the params. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + """ + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + clean_policyd_dir_for(service, blacklist_paths) + if (os.path.isfile(_policy_success_file()) and + restart_handler is not None and + callable(restart_handler)): + restart_handler() + remove_policy_success_file() + return + except Exception: + return + # if the policyd overrides have been performed just return + if os.path.isfile(_policy_success_file()): + return + maybe_do_policyd_overrides( + openstack_release, service, blacklist_paths, blacklist_keys, + template_function, restart_handler) + + +def get_policy_resource_filename(): + """Function to extract the policy resource filename + + :returns: The filename of the resource, if set, otherwise, if an error + occurs, then None is returned. + :rtype: Union[str, None] + """ + try: + return hookenv.resource_get(POLICYD_RESOURCE_NAME) + except Exception: + return None + + +@contextlib.contextmanager +def open_and_filter_yaml_files(filepath): + """Validate that the filepath provided is a zip file and contains at least + one (.yaml|.yml) file, and that the files are not duplicated when the zip + file is flattened. Note that the yaml files are not checked. This is the + first stage in validating the policy zipfile; individual yaml files are not + checked for validity or black listed keys. + + An example of use is: + + with open_and_filter_yaml_files(some_path) as zfp, g: + for zipinfo in g: + # do something with zipinfo ... + + :param filepath: a filepath object that can be opened by zipfile + :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :returns: (zfp handle, + a generator of the (name, filename, ZipInfo object) tuples) as a + tuple. + :rtype: ContextManager[(zipfile.ZipFile, + Generator[(name, str, str, zipfile.ZipInfo)])] + :raises: zipfile.BadZipFile + :raises: BadPolicyZipFile if duplicated yaml or missing + :raises: IOError if the filepath is not found + """ + with zipfile.ZipFile(filepath, 'r') as zfp: + # first pass through; check for duplicates and at least one yaml file. + names = collections.defaultdict(int) + yamlfiles = _yamlfiles(zfp) + for name, _, _, _ in yamlfiles: + names[name] += 1 + # There must be at least 1 yaml file. + if len(names.keys()) == 0: + raise BadPolicyZipFile("contains no yaml files with {} extensions." + .format(", ".join(POLICYD_VALID_EXTS))) + # There must be no duplicates + duplicates = [n for n, c in names.items() if c > 1] + if duplicates: + raise BadPolicyZipFile("{} have duplicates in the zip file." + .format(", ".join(duplicates))) + # Finally, let's yield the generator + yield (zfp, yamlfiles) + + +def _yamlfiles(zipfile): + """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) + and the infolist item from a zipfile. + + :param zipfile: the zipfile to read zipinfo items from + :type zipfile: zipfile.ZipFile + :returns: generator of (name, ext, filename, info item) for each self-identified + yaml file. + :rtype: List[(str, str, str, zipfile.ZipInfo)] + """ + l = [] + for infolist_item in zipfile.infolist(): + try: + if infolist_item.is_dir(): + continue + except AttributeError: + # fallback to "old" way to determine dir entry for pre-py36 + if infolist_item.filename.endswith('/'): + continue + _, name_ext = os.path.split(infolist_item.filename) + name, ext = os.path.splitext(name_ext) + ext = ext.lower() + if ext and ext in POLICYD_VALID_EXTS: + l.append((name, ext, name_ext, infolist_item)) + return l + + +def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): + """Read, validate and return the (first) yaml document from the stream. + + The doc is read, and checked for a yaml file. The the top-level keys are + checked against the blacklist_keys provided. If there are problems then an + Exception is raised. Otherwise the yaml document is returned as a Python + object that can be dumped back as a yaml file on the system. + + The yaml file must only consist of a str:str mapping, and if not then the + yaml file is rejected. + + :param stream_or_doc: the file object to read the yaml from + :type stream_or_doc: Union[AnyStr, IO[AnyStr]] + :param blacklist_keys: Any keys, which if in the yaml file, should cause + and error. + :type blacklisted_keys: Union[None, List[str]] + :returns: the yaml file as a python document + :rtype: Dict[str, str] + :raises: yaml.YAMLError if there is a problem with the document + :raises: BadPolicyYamlFile if file doesn't look right or there are + blacklisted keys in the file. + """ + blacklist_keys = blacklist_keys or [] + blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS) + doc = yaml.safe_load(stream_or_doc) + if not isinstance(doc, dict): + raise BadPolicyYamlFile("doesn't look like a policy file?") + keys = set(doc.keys()) + blacklisted_keys_present = keys.intersection(blacklist_keys) + if blacklisted_keys_present: + raise BadPolicyYamlFile("blacklisted keys {} present." + .format(", ".join(blacklisted_keys_present))) + if not all(isinstance(k, six.string_types) for k in keys): + raise BadPolicyYamlFile("keys in yaml aren't all strings?") + # check that the dictionary looks like a mapping of str to str + if not all(isinstance(v, six.string_types) for v in doc.values()): + raise BadPolicyYamlFile("values in yaml aren't all strings?") + return doc + + +def policyd_dir_for(service): + """Return the policy directory for the named service. + + This assumes the default name of "policy.d" which is kept across all + charms. + + :param service: str + :returns: the policy.d override directory. + :rtype: os.PathLike[str] + """ + return os.path.join("/", "etc", service, "policy.d") + + +def clean_policyd_dir_for(service, keep_paths=None): + """Clean out the policyd directory except for items that should be kept. + + The keep_paths, if used, should be set to the full path of the files that + should be kept in the policyd directory for the service. Note that the + service name is passed in, and then the policyd_dir_for() function is used. + This is so that a coding error doesn't result in a sudden deletion of the + charm (say). + + :param service: the service name to use to construct the policy.d dir. + :type service: str + :param keep_paths: optional list of paths to not delete. + :type keep_paths: Union[None, List[str]] + """ + keep_paths = keep_paths or [] + path = policyd_dir_for(service) + if not os.path.exists(path): + ch_host.mkdir(path, owner=service, group=service, perms=0o775) + _scanner = os.scandir if six.PY3 else _py2_scandir + for direntry in _scanner(path): + # see if the path should be kept. + if direntry.path in keep_paths: + continue + # we remove any directories; it's ours and there shouldn't be any + if direntry.is_dir(): + shutil.rmtree(direntry.path) + else: + os.remove(direntry.path) + + +@contextlib.contextmanager +def _py2_scandir(path): + """provide a py2 implementation of os.scandir if this module ever gets used + in a py2 charm (unlikely). uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for a + directory. + + :param path: the path to list the directories for + :type path: str + :returns: Generator that provides _P27Direntry objects + :rtype: ContextManager[_P27Direntry] + """ + for f in os.listdir(path): + yield _P27Direntry(f) + + +class _P27Direntry(object): + """Mock a scandir Direntry object with enough to use in + clean_policyd_dir_for + """ + + def __init__(self, path): + self.path = path + + def is_dir(self): + return os.path.isdir(self.path) + + +def path_for_policy_file(service, name): + """Return the full path for a policy.d file that will be written to the + service's policy.d directory. + + It is constructed using policyd_dir_for(), the name and the ".yaml" + extension. + + :param service: the service name + :type service: str + :param name: the name for the policy override + :type name: str + :returns: the full path name for the file + :rtype: os.PathLike[str] + """ + return os.path.join(policyd_dir_for(service), name + ".yaml") + + +def _policy_success_file(): + """Return the file name for a successful drop of policy.d overrides + + :returns: the path name for the file. + :rtype: str + """ + return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME) + + +def remove_policy_success_file(): + """Remove the file that indicates successful policyd override.""" + try: + os.remove(_policy_success_file()) + except Exception: + pass + + +def policyd_status_message_prefix(): + """Return the prefix str for the status line. + + "PO:" indicating that the policy overrides are in place, or "PO (broken):" + if the policy is supposed to be working but there is no success file. + + :returns: the prefix + :rtype: str + """ + if os.path.isfile(_policy_success_file()): + return "PO:" + return "PO (broken):" + + +def process_policy_resource_file(resource_file, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None): + """Process the resource file (which should contain at least one yaml file) + and write those files to the service's policy.d directory. + + The optional template_function is a function that accepts a python + string and has an opportunity to modify the document + prior to it being read by the yaml.safe_load() function and written to + disk. Note that this function does *not* say how the templating is done - + this is up to the charm to implement its chosen method. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + If any error occurs, then the policy.d directory is cleared, the error is + written to the log, and the status line will eventually show as failed. + + :param resource_file: The zipped file to open and extract yaml files form. + :type resource_file: Union[AnyStr, os.PathLike[AnyStr]] + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the yaml + document. + :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :returns: True if the processing was successful, False if not. + :rtype: boolean + """ + blacklist_paths = blacklist_paths or [] + completed = False + try: + with open_and_filter_yaml_files(resource_file) as (zfp, gen): + # first clear out the policy.d directory and clear success + remove_policy_success_file() + clean_policyd_dir_for(service, blacklist_paths) + for name, ext, filename, zipinfo in gen: + # construct a name for the output file. + yaml_filename = path_for_policy_file(service, name) + if yaml_filename in blacklist_paths: + raise BadPolicyZipFile("policy.d name {} is blacklisted" + .format(yaml_filename)) + with zfp.open(zipinfo) as fp: + doc = fp.read() + # if template_function is not None, then offer the document + # to the template function + if ext in POLICYD_TEMPLATE_EXTS: + if (template_function is None or not + callable(template_function)): + raise BadPolicyZipFile( + "Template {} but no template_function is " + "available".format(filename)) + doc = template_function(doc) + yaml_doc = read_and_validate_yaml(doc, blacklist_keys) + with open(yaml_filename, "wt") as f: + yaml.dump(yaml_doc, f) + # Every thing worked, so we mark up a success. + completed = True + except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except IOError as e: + # technically this shouldn't happen; it would be a programming error as + # the filename comes from Juju and thus, should exist. + hookenv.log( + "File {} failed with IOError. This really shouldn't happen" + " -- error: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except Exception as e: + import traceback + hookenv.log("General Exception({}) during policyd processing" + .format(str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + hookenv.log(traceback.format_exc()) + finally: + if not completed: + hookenv.log("Processing {} failed: cleaning policy.d directory" + .format(resource_file), + level=POLICYD_LOG_LEVEL_DEFAULT) + clean_policyd_dir_for(service, blacklist_paths) + else: + # touch the success filename + hookenv.log("policy.d overrides installed.", + level=POLICYD_LOG_LEVEL_DEFAULT) + open(_policy_success_file(), "w").close() + return completed diff --git a/charmhelpers/contrib/openstack/ssh_migrations.py b/charmhelpers/contrib/openstack/ssh_migrations.py new file mode 100644 index 0000000000000000000000000000000000000000..96b9f71d42d1c81539f78b8e1c4761f81d84c304 --- /dev/null +++ b/charmhelpers/contrib/openstack/ssh_migrations.py @@ -0,0 +1,412 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess + +from charmhelpers.core.hookenv import ( + ERROR, + log, + relation_get, +) +from charmhelpers.contrib.network.ip import ( + is_ipv6, + ns_query, +) +from charmhelpers.contrib.openstack.utils import ( + get_hostname, + get_host_ip, + is_ip, +) + +NOVA_SSH_DIR = '/etc/nova/compute_ssh/' + + +def ssh_directory_for_unit(application_name, user=None): + """Return the directory used to store ssh assets for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified directory path. + :rtype: str + """ + if user: + application_name = "{}_{}".format(application_name, user) + _dir = os.path.join(NOVA_SSH_DIR, application_name) + for d in [NOVA_SSH_DIR, _dir]: + if not os.path.isdir(d): + os.mkdir(d) + for f in ['authorized_keys', 'known_hosts']: + f = os.path.join(_dir, f) + if not os.path.isfile(f): + open(f, 'w').close() + return _dir + + +def known_hosts(application_name, user=None): + """Return the known hosts file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'known_hosts') + + +def authorized_keys(application_name, user=None): + """Return the authorized keys file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'authorized_keys') + + +def ssh_known_host_key(host, application_name, user=None): + """Return the first entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Host key + :rtype: str or None + """ + cmd = [ + 'ssh-keygen', + '-f', known_hosts(application_name, user), + '-H', + '-F', + host] + try: + # The first line of output is like '# Host xx found: line 1 type RSA', + # which should be excluded. + output = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + # RC of 1 seems to be legitimate for most ssh-keygen -F calls. + if e.returncode == 1: + output = e.output + else: + raise + output = output.strip() + + if output: + # Bug #1500589 cmd has 0 rc on precise if entry not present + lines = output.split('\n') + if len(lines) >= 1: + return lines[0] + + return None + + +def remove_known_host(host, application_name, user=None): + """Remove the entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + log('Removing SSH known host entry for compute host at %s' % host) + cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host] + subprocess.check_call(cmd) + + +def is_same_key(key_1, key_2): + """Extract the key from two host entries and compare them. + + :param key_1: Host key + :type key_1: str + :param key_2: Host key + :type key_2: str + """ + # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp' + # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare + # the part start with 'ssh-rsa' followed with '= ', because the hash + # value in the beginning will change each time. + k_1 = key_1.split('= ')[1] + k_2 = key_2.split('= ')[1] + return k_1 == k_2 + + +def add_known_host(host, application_name, user=None): + """Add the given host key to the known hosts file. + + :param host: host name + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] + try: + remote_key = subprocess.check_output(cmd).strip() + except Exception as e: + log('Could not obtain SSH host key from %s' % host, level=ERROR) + raise e + + current_key = ssh_known_host_key(host, application_name, user) + if current_key and remote_key: + if is_same_key(remote_key, current_key): + log('Known host key for compute host %s up to date.' % host) + return + else: + remove_known_host(host, application_name, user) + + log('Adding SSH host key to known hosts for compute node at %s.' % host) + with open(known_hosts(application_name, user), 'a') as out: + out.write("{}\n".format(remote_key)) + + +def ssh_authorized_key_exists(public_key, application_name, user=None): + """Check if given key is in the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Whether given key is in the authorized_key file. + :rtype: boolean + """ + with open(authorized_keys(application_name, user)) as keys: + return ('%s' % public_key) in keys.read() + + +def add_authorized_key(public_key, application_name, user=None): + """Add given key to the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + with open(authorized_keys(application_name, user), 'a') as keys: + keys.write("{}\n".format(public_key)) + + +def ssh_compute_add_host_and_key(public_key, hostname, private_address, + application_name, user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param hostname: Hostname to collect host keys from. + :type hostname: str + :param private_address:aCorresponding private address for hostname + :type private_address: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + # If remote compute node hands us a hostname, ensure we have a + # known hosts entry for its IP, hostname and FQDN. + hosts = [private_address] + + if not is_ipv6(private_address): + if hostname: + hosts.append(hostname) + + if is_ip(private_address): + hn = get_hostname(private_address) + if hn: + hosts.append(hn) + short = hn.split('.')[0] + if ns_query(short): + hosts.append(short) + else: + hosts.append(get_host_ip(private_address)) + short = private_address.split('.')[0] + if ns_query(short): + hosts.append(short) + + for host in list(set(hosts)): + add_known_host(host, application_name, user) + + if not ssh_authorized_key_exists(public_key, application_name, user): + log('Saving SSH authorized key for compute host at %s.' % + private_address) + add_authorized_key(public_key, application_name, user) + + +def ssh_compute_add(public_key, application_name, rid=None, unit=None, + user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param rid: Relation id of the relation between this charm and the app. If + none is supplied it is assumed its the relation relating to + the current hook context. + :type rid: str + :param unit: Unit to add ssh asserts for if none is supplied it is assumed + its the unit relating to the current hook context. + :type unit: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + relation_data = relation_get(rid=rid, unit=unit) + ssh_compute_add_host_and_key( + public_key, + relation_data.get('hostname'), + relation_data.get('private-address'), + application_name, + user=user) + + +def ssh_known_hosts_lines(application_name, user=None): + """Return contents of known_hosts file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + known_hosts_list = [] + with open(known_hosts(application_name, user)) as hosts: + for hosts_line in hosts: + if hosts_line.rstrip(): + known_hosts_list.append(hosts_line.rstrip()) + return(known_hosts_list) + + +def ssh_authorized_keys_lines(application_name, user=None): + """Return contents of authorized_keys file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + authorized_keys_list = [] + + with open(authorized_keys(application_name, user)) as keys: + for authkey_line in keys: + if authkey_line.rstrip(): + authorized_keys_list.append(authkey_line.rstrip()) + return(authorized_keys_list) + + +def ssh_compute_remove(public_key, application_name, user=None): + """Remove given public key from authorized_keys file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + if not (os.path.isfile(authorized_keys(application_name, user)) or + os.path.isfile(known_hosts(application_name, user))): + return + + keys = ssh_authorized_keys_lines(application_name, user=None) + keys = [k.strip() for k in keys] + + if public_key not in keys: + return + + [keys.remove(key) for key in keys if key == public_key] + + with open(authorized_keys(application_name, user), 'w') as _keys: + keys = '\n'.join(keys) + if not keys.endswith('\n'): + keys += '\n' + _keys.write(keys) + + +def get_ssh_settings(application_name, user=None): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for the + app + user combination. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = {} + keys = {} + prefix = '' + if user: + prefix = '{}_'.format(user) + + for i, line in enumerate(ssh_known_hosts_lines( + application_name=application_name, user=user)): + settings['{}known_hosts_{}'.format(prefix, i)] = line + if settings: + settings['{}known_hosts_max_index'.format(prefix)] = len( + settings.keys()) + + for i, line in enumerate(ssh_authorized_keys_lines( + application_name=application_name, user=user)): + keys['{}authorized_keys_{}'.format(prefix, i)] = line + if keys: + keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys()) + settings.update(keys) + return settings + + +def get_all_user_ssh_settings(application_name): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for root user + and nova user. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = get_ssh_settings(application_name) + settings.update(get_ssh_settings(application_name, user='nova')) + return settings diff --git a/charmhelpers/contrib/openstack/templates/logrotate b/charmhelpers/contrib/openstack/templates/logrotate new file mode 100644 index 0000000000000000000000000000000000000000..b2900d09a4ec2d04152ed7ce25bdc7346c349675 --- /dev/null +++ b/charmhelpers/contrib/openstack/templates/logrotate @@ -0,0 +1,9 @@ +/var/log/{{ logrotate_logs_location }}/*.log { + {{ logrotate_interval }} + {{ logrotate_count }} + compress + delaycompress + missingok + notifempty + copytruncate +} diff --git a/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka index 8e6889e0221e7bbb3db6c8bbb8f5b697e6d40b8d..c281868b16a885cd01a234984974af0349a5d242 100644 --- a/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka +++ b/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -1,12 +1,14 @@ {% if auth_host -%} [keystone_authtoken] -auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} -auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} auth_type = password {% if api_version == "3" -%} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3 +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3 project_domain_name = {{ admin_domain_name }} user_domain_name = {{ admin_domain_name }} {% else -%} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} project_domain_name = default user_domain_name = default {% endif -%} diff --git a/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only b/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only new file mode 100644 index 0000000000000000000000000000000000000000..d26a91fe1f00e2b12d094be727a53eddc87b829d --- /dev/null +++ b/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only @@ -0,0 +1,9 @@ +{% if auth_host -%} +[keystone_authtoken] +{% for option_name, option_value in keystone_authtoken.items() -%} +{{ option_name }} = {{ option_value }} +{% endfor -%} +{% if use_memcache == true %} +memcached_servers = {{ memcache_url }} +{% endif -%} +{% endif -%} diff --git a/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit b/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit new file mode 100644 index 0000000000000000000000000000000000000000..bed2216aba7217022ded17dec4cdb0871f513b40 --- /dev/null +++ b/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit @@ -0,0 +1,10 @@ +[oslo_messaging_rabbit] +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +{% endif -%} +{% if rabbit_ssl_port -%} +ssl = True +{% endif -%} +{% if rabbit_ssl_ca -%} +ssl_ca_file = {{ rabbit_ssl_ca }} +{% endif -%} diff --git a/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata b/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata new file mode 100644 index 0000000000000000000000000000000000000000..365f43757719b2de3c601ea6a9752dac8a8b3545 --- /dev/null +++ b/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata @@ -0,0 +1,10 @@ +[oslo_messaging_rabbit] +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +{% endif -%} +{% if rabbit_ssl_port -%} +rabbit_use_ssl = True +{% endif -%} +{% if rabbit_ssl_ca -%} +ssl_ca_file = {{ rabbit_ssl_ca }} +{% endif -%} diff --git a/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/charmhelpers/contrib/openstack/templates/section-oslo-notifications index 5dccd4bb3943ff209bd820908baf7e77cb44649a..71c7eb068eace94e8986d7a868bded236f75128c 100644 --- a/charmhelpers/contrib/openstack/templates/section-oslo-notifications +++ b/charmhelpers/contrib/openstack/templates/section-oslo-notifications @@ -1,8 +1,15 @@ {% if transport_url -%} [oslo_messaging_notifications] -driver = messagingv2 +driver = {{ oslo_messaging_driver }} transport_url = {{ transport_url }} +{% if send_notifications_to_logs %} +driver = log +{% endif %} {% if notification_topics -%} topics = {{ notification_topics }} {% endif -%} +{% if notification_format -%} +[notifications] +notification_format = {{ notification_format }} +{% endif -%} {% endif -%} diff --git a/charmhelpers/contrib/openstack/templates/vendor_data.json b/charmhelpers/contrib/openstack/templates/vendor_data.json new file mode 100644 index 0000000000000000000000000000000000000000..904f612a7f74490d4508920400d18d493d056477 --- /dev/null +++ b/charmhelpers/contrib/openstack/templates/vendor_data.json @@ -0,0 +1 @@ +{{ vendor_data_json }} \ No newline at end of file diff --git a/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf index a3841ea6dcb50a16b204d88c1e944f6e678e52ee..6c25f258c57c95c1565a62c1ab82776c7dcfbd75 100644 --- a/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf +++ b/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -21,7 +21,7 @@ Listen {{ public_port }} {% if port -%} <VirtualHost *:{{ port }}> - WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ {% if python_path -%} python-path={{ python_path }} \ {% endif -%} @@ -50,7 +50,7 @@ Listen {{ public_port }} {% if admin_port -%} <VirtualHost *:{{ admin_port }}> - WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ {% if python_path -%} python-path={{ python_path }} \ {% endif -%} @@ -85,7 +85,7 @@ Listen {{ public_port }} DocumentRoot /var/www/html Alias /eds /var/www/html/eds - WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \ + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ {% if python_path -%} python-path={{ python_path }} \ {% endif -%} diff --git a/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf new file mode 100644 index 0000000000000000000000000000000000000000..23b62a385283e6b8f1a6af0fcebccac747031b26 --- /dev/null +++ b/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf @@ -0,0 +1,91 @@ +# Configuration file maintained by Juju. Local changes may be overwritten. + +{% if port -%} +Listen {{ port }} +{% endif -%} + +{% if admin_port -%} +Listen {{ admin_port }} +{% endif -%} + +{% if public_port -%} +Listen {{ public_port }} +{% endif -%} + +{% if port -%} +<VirtualHost *:{{ port }}> + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }} + WSGIScriptAlias / {{ script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + <Directory /usr/bin> + <IfVersion >= 2.4> + Require all granted + </IfVersion> + <IfVersion < 2.4> + Order allow,deny + Allow from all + </IfVersion> + </Directory> +</VirtualHost> +{% endif -%} + +{% if admin_port -%} +<VirtualHost *:{{ admin_port }}> + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-admin + WSGIScriptAlias / {{ admin_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + <Directory /usr/bin> + <IfVersion >= 2.4> + Require all granted + </IfVersion> + <IfVersion < 2.4> + Order allow,deny + Allow from all + </IfVersion> + </Directory> +</VirtualHost> +{% endif -%} + +{% if public_port -%} +<VirtualHost *:{{ public_port }}> + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-public + WSGIScriptAlias / {{ public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + <Directory /usr/bin> + <IfVersion >= 2.4> + Require all granted + </IfVersion> + <IfVersion < 2.4> + Order allow,deny + Allow from all + </IfVersion> + </Directory> +</VirtualHost> +{% endif -%} diff --git a/charmhelpers/contrib/openstack/templating.py b/charmhelpers/contrib/openstack/templating.py index edefcfe900eef85f9441428259bd9997a9801af9..1a5f37e8ac1b28def47d24ac8f6aefd140ffe60f 100644 --- a/charmhelpers/contrib/openstack/templating.py +++ b/charmhelpers/contrib/openstack/templating.py @@ -172,7 +172,7 @@ class OSConfigRenderer(object): /tmp/templates/grizzly/api-paste.ini /tmp/templates/havana/api-paste.ini - Since it was registered with the grizzly release, it first seraches + Since it was registered with the grizzly release, it first searches the grizzly directory for nova.conf, then the templates dir. When writing api-paste.ini, it will find the template in the grizzly diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py index 8a541d4087662120d26f480262181413380374b8..32911d2c3772cfd0f8024d03f2f5fc7b0699908d 100644 --- a/charmhelpers/contrib/openstack/utils.py +++ b/charmhelpers/contrib/openstack/utils.py @@ -80,6 +80,8 @@ from charmhelpers.core.host import ( service_running, service_pause, service_resume, + service_stop, + service_start, restart_on_change_helper, ) from charmhelpers.fetch import ( @@ -89,7 +91,9 @@ from charmhelpers.fetch import ( add_source as fetch_add_source, SourceConfigError, GPGKeyError, - get_upstream_version + get_upstream_version, + filter_missing_packages, + ubuntu_apt_pkg as apt, ) from charmhelpers.fetch.snap import ( @@ -101,6 +105,10 @@ from charmhelpers.fetch.snap import ( from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.policyd import ( + policyd_status_message_prefix, + POLICYD_CONFIG_NAME, +) CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' @@ -124,6 +132,8 @@ OPENSTACK_RELEASES = ( 'pike', 'queens', 'rocky', + 'stein', + 'train', ) UBUNTU_OPENSTACK_RELEASE = OrderedDict([ @@ -141,6 +151,9 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('zesty', 'ocata'), ('artful', 'pike'), ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), ]) @@ -159,6 +172,9 @@ OPENSTACK_CODENAMES = OrderedDict([ ('2017.1', 'ocata'), ('2017.2', 'pike'), ('2018.1', 'queens'), + ('2018.2', 'rocky'), + ('2019.1', 'stein'), + ('2019.2', 'train'), ]) # The ugly duckling - must list releases oldest to newest @@ -190,7 +206,13 @@ SWIFT_CODENAMES = OrderedDict([ ('pike', ['2.13.0', '2.15.0']), ('queens', - ['2.16.0']), + ['2.16.0', '2.17.0']), + ('rocky', + ['2.18.0', '2.19.0']), + ('stein', + ['2.20.0', '2.21.0']), + ('train', + ['2.22.0']), ]) # >= Liberty version->codename mapping @@ -203,6 +225,8 @@ PACKAGE_CODENAMES = { ('16', 'pike'), ('17', 'queens'), ('18', 'rocky'), + ('19', 'stein'), + ('20', 'train'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -212,6 +236,8 @@ PACKAGE_CODENAMES = { ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), + ('15', 'train'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -221,6 +247,8 @@ PACKAGE_CODENAMES = { ('11', 'pike'), ('12', 'queens'), ('13', 'rocky'), + ('14', 'stein'), + ('15', 'train'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -230,6 +258,8 @@ PACKAGE_CODENAMES = { ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -239,6 +269,8 @@ PACKAGE_CODENAMES = { ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -248,6 +280,8 @@ PACKAGE_CODENAMES = { ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), + ('13', 'train'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -257,6 +291,8 @@ PACKAGE_CODENAMES = { ('9', 'pike'), ('10', 'queens'), ('11', 'rocky'), + ('12', 'stein'), + ('13', 'train'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -266,6 +302,8 @@ PACKAGE_CODENAMES = { ('15', 'pike'), ('16', 'queens'), ('17', 'rocky'), + ('18', 'stein'), + ('19', 'train'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -275,6 +313,8 @@ PACKAGE_CODENAMES = { ('12', 'pike'), ('13', 'queens'), ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), ]), } @@ -318,13 +358,22 @@ def error_out(msg): sys.exit(1) +def get_installed_semantic_versioned_packages(): + '''Get a list of installed packages which have OpenStack semantic versioning + + :returns List of installed packages + :rtype: [pkg1, pkg2, ...] + ''' + return filter_missing_packages(PACKAGE_CODENAMES.keys()) + + def get_os_codename_install_source(src): '''Derive OpenStack release codename from a given installation source.''' ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] rel = '' if src is None: return rel - if src in ['distro', 'distro-proposed']: + if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] except KeyError: @@ -398,7 +447,7 @@ def get_swift_codename(version): return codenames[0] # NOTE: fallback - attempt to match with just major.minor version - match = re.match('^(\d+)\.(\d+)', version) + match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) for codename, versions in six.iteritems(SWIFT_CODENAMES): @@ -418,7 +467,7 @@ def get_os_codename_package(package, fatal=True): out = subprocess.check_output(cmd) if six.PY3: out = out.decode('UTF-8') - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: return None lines = out.split('\n') for line in lines: @@ -426,8 +475,6 @@ def get_os_codename_package(package, fatal=True): # Second item in list is Version return line.split()[1] - import apt_pkg as apt - cache = apt_cache() try: @@ -450,11 +497,11 @@ def get_os_codename_package(package, fatal=True): vers = apt.upstream_version(pkg.current_ver.ver_str) if 'swift' in pkg.name: # Fully x.y.z match for swift versions - match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers) else: # x.y match only for 20XX.X # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', vers) + match = re.match(r'^(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -511,7 +558,7 @@ def reset_os_release(): _os_rel = None -def os_release(package, base='essex', reset_cache=False): +def os_release(package, base=None, reset_cache=False): ''' Returns OpenStack release codename from a cached global. @@ -522,6 +569,8 @@ def os_release(package, base='essex', reset_cache=False): the installation source, the earliest release supported by the charm should be returned. ''' + if not base: + base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] global _os_rel if reset_cache: reset_os_release() @@ -642,7 +691,6 @@ def openstack_upgrade_available(package): a newer version of package. """ - import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) if not cur_vers: @@ -652,14 +700,12 @@ def openstack_upgrade_available(package): codename = get_os_codename_install_source(src) avail_vers = get_os_version_codename_swift(codename) else: - avail_vers = get_os_version_install_source(src) + try: + avail_vers = get_os_version_install_source(src) + except: + avail_vers = cur_vers apt.init() - if "swift" in package: - major_cur_vers = cur_vers.split('.', 1)[0] - major_avail_vers = avail_vers.split('.', 1)[0] - major_diff = apt.version_compare(major_avail_vers, major_cur_vers) - return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0) - return apt.version_compare(avail_vers, cur_vers) == 1 + return apt.version_compare(avail_vers, cur_vers) >= 1 def ensure_block_device(block_device): @@ -1264,6 +1310,12 @@ def _determine_os_workload_status( message = "Unit is ready" juju_log(message, 'INFO') + try: + if config(POLICYD_CONFIG_NAME): + message = "{} {}".format(policyd_status_message_prefix(), message) + except Exception: + pass + return state, message @@ -1271,12 +1323,25 @@ def _ows_check_if_paused(services=None, ports=None): """Check if the unit is supposed to be paused, and if so check that the services/ports (if passed) are actually stopped/not being listened to. - if the unit isn't supposed to be paused, just return None, None + If the unit isn't supposed to be paused, just return None, None + + If the unit is performing a series upgrade, return a message indicating + this. @param services: OPTIONAL services spec or list of service names. @param ports: OPTIONAL list of port numbers. @returns state, message or None, None """ + if is_unit_upgrading_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "blocked" + message = ("Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return state, message + if is_unit_paused_set(): state, message = check_actually_paused(services=services, ports=ports) @@ -1383,7 +1448,9 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): """ if charm_func_with_configs: charm_state, charm_message = charm_func_with_configs() - if charm_state != 'active' and charm_state != 'unknown': + if (charm_state != 'active' and + charm_state != 'unknown' and + charm_state is not None): state = workload_state_compare(state, charm_state) if message: charm_message = charm_message.replace("Incomplete relations: ", @@ -1655,7 +1722,7 @@ def remote_restart(rel_name, remote_service=None): def check_actually_paused(services=None, ports=None): - """Check that services listed in the services object and and ports + """Check that services listed in the services object and ports are actually closed (not listened to), to verify that the unit is properly paused. @@ -1733,6 +1800,65 @@ def is_unit_paused_set(): return False +def manage_payload_services(action, services=None, charm_func=None): + """Run an action against all services. + + An optional charm_func() can be called. It should raise an Exception to + indicate that the function failed. If it was succesfull it should return + None or an optional message. + + The signature for charm_func is: + charm_func() -> message: str + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + :param action: Action to run: pause, resume, start or stop. + :type action: str + :param services: See above + :type services: See above + :param charm_func: function to run for custom charm pausing. + :type charm_func: f() + :returns: Status boolean and list of messages + :rtype: (bool, []) + :raises: RuntimeError + """ + actions = { + 'pause': service_pause, + 'resume': service_resume, + 'start': service_start, + 'stop': service_stop} + action = action.lower() + if action not in actions.keys(): + raise RuntimeError( + "action: {} must be one of: {}".format(action, + ', '.join(actions.keys()))) + services = _extract_services_list_helper(services) + messages = [] + success = True + if services: + for service in services.keys(): + rc = actions[action](service) + if not rc: + success = False + messages.append("{} didn't {} cleanly.".format(service, + action)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + success = False + messages.append(str(e)) + return success, messages + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1763,26 +1889,16 @@ def pause_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - stopped = service_pause(service) - if not stopped: - messages.append("{} didn't stop cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'pause', + services=services, + charm_func=charm_func) set_unit_paused() if assess_status_func: message = assess_status_func() if message: messages.append(message) - if messages: + if messages and not is_unit_upgrading_set(): raise Exception("Couldn't pause: {}".format("; ".join(messages))) @@ -1815,20 +1931,10 @@ def resume_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - started = service_resume(service) - if not started: - messages.append("{} didn't start cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'resume', + services=services, + charm_func=charm_func) clear_unit_paused() if assess_status_func: message = assess_status_func() @@ -1880,20 +1986,33 @@ def pausable_restart_on_change(restart_map, stopstart=False, see core.utils.restart_on_change() for more details. + Note restart_map can be a callable, in which case, restart_map is only + evaluated at runtime. This means that it is lazy and the underlying + function won't be called if the decorated function is never called. Note, + retains backwards compatibility for passing a non-callable dictionary. + @param f: the function to decorate - @param restart_map: the restart map {conf_file: [services]} + @param restart_map: (optionally callable, which then returns the + restart_map) the restart map {conf_file: [services]} @param stopstart: DEFAULT false; whether to stop, start or just restart @returns decorator to use a restart_on_change with pausability """ def wrap(f): + # py27 compatible nonlocal variable. When py3 only, replace with + # nonlocal keyword + __restart_map_cache = {'cache': None} + @functools.wraps(f) def wrapped_f(*args, **kwargs): if is_unit_paused_set(): return f(*args, **kwargs) + if __restart_map_cache['cache'] is None: + __restart_map_cache['cache'] = restart_map() \ + if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) + (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], + stopstart, restart_functions) return wrapped_f return wrap @@ -2021,7 +2140,7 @@ def enable_memcache(source=None, release=None, package=None): if release: _release = release else: - _release = os_release(package, base='icehouse') + _release = os_release(package) if not _release: _release = get_os_codename_install_source(source) @@ -2121,3 +2240,62 @@ def install_os_snaps(snaps, refresh=False): snap_install(snap, _ensure_flag(snaps[snap]['channel']), _ensure_flag(snaps[snap]['mode'])) + + +def set_unit_upgrading(): + """Set the unit to a upgrading state in the local kv() store. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', True) + + +def clear_unit_upgrading(): + """Clear the unit from a upgrading state in the local kv() store + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', False) + + +def is_unit_upgrading_set(): + """Return the state of the kv().get('unit-upgrading'). + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-upgrading'))) + except Exception: + return False + + +def series_upgrade_prepare(pause_unit_helper=None, configs=None): + """ Run common series upgrade prepare tasks. + + :param pause_unit_helper: function: Function to pause unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + set_unit_upgrading() + if pause_unit_helper and configs: + if not is_unit_paused_set(): + pause_unit_helper(configs) + + +def series_upgrade_complete(resume_unit_helper=None, configs=None): + """ Run common series upgrade complete tasks. + + :param resume_unit_helper: function: Function to resume unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + clear_unit_paused() + clear_unit_upgrading() + if configs: + configs.write_all() + if resume_unit_helper: + resume_unit_helper(configs) diff --git a/tests/gate-basic-xenial-mitaka b/charmhelpers/contrib/python.py old mode 100755 new mode 100644 similarity index 61% rename from tests/gate-basic-xenial-mitaka rename to charmhelpers/contrib/python.py index f8f11663a3ba47e2b8765dbd977ce05ba9b241e0..84cba8c4eba34fdd705f4ee39628ebd33b5175a2 --- a/tests/gate-basic-xenial-mitaka +++ b/charmhelpers/contrib/python.py @@ -1,6 +1,4 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd +# Copyright 2014-2019 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Amulet tests on a basic keystone deployment on xenial-mitaka.""" - -from basic_deployment import KeystoneBasicDeployment +from __future__ import absolute_import -if __name__ == '__main__': - deployment = KeystoneBasicDeployment(series='xenial') - deployment.run_tests() +# deprecated aliases for backwards compatibility +from charmhelpers.fetch.python import debug # noqa +from charmhelpers.fetch.python import packages # noqa +from charmhelpers.fetch.python import rpdb # noqa +from charmhelpers.fetch.python import version # noqa diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py index 392316126b3799796c786aead740f88e87b8a71e..3eb0d241aad24ec881b0d3ed541d82f8f582e411 100644 --- a/charmhelpers/contrib/storage/linux/ceph.py +++ b/charmhelpers/contrib/storage/linux/ceph.py @@ -59,6 +59,7 @@ from charmhelpers.core.host import ( service_stop, service_running, umount, + cmp_pkgrevno, ) from charmhelpers.fetch import ( apt_install, @@ -178,7 +179,6 @@ class Pool(object): """ # read-only is easy, writeback is much harder mode = get_cache_mode(self.service, cache_pool) - version = ceph_version() if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) @@ -186,7 +186,7 @@ class Pool(object): elif mode == 'writeback': pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'] - if version >= '10.1': + if cmp_pkgrevno('ceph-common', '10.1') >= 0: # Jewel added a mandatory flag pool_forward_cmd.append('--yes-i-really-mean-it') @@ -196,7 +196,8 @@ class Pool(object): check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) - def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, + device_class=None): """Return the number of placement groups to use when creating the pool. Returns the number of placement groups which should be specified when @@ -229,6 +230,9 @@ class Pool(object): increased. NOTE: the default is primarily to handle the scenario where related charms requiring pools has not been upgraded to include an update to indicate their relative usage of the pools. + :param device_class: str. class of storage to use for basis of pgs + calculation; ceph supports nvme, ssd and hdd by default based + on presence of devices of each type in the deployment. :return: int. The number of pgs to use. """ @@ -243,17 +247,20 @@ class Pool(object): # If the expected-osd-count is specified, then use the max between # the expected-osd-count and the actual osd_count - osd_list = get_osds(self.service) + osd_list = get_osds(self.service, device_class) expected = config('expected-osd-count') or 0 if osd_list: - osd_count = max(expected, len(osd_list)) + if device_class: + osd_count = len(osd_list) + else: + osd_count = max(expected, len(osd_list)) # Log a message to provide some insight if the calculations claim # to be off because someone is setting the expected count and # there are more OSDs in reality. Try to make a proper guess # based upon the cluster itself. - if expected and osd_count != expected: + if not device_class and expected and osd_count != expected: log("Found more OSDs than provided expected count. " "Using the actual count instead", INFO) elif expected: @@ -294,6 +301,7 @@ class ReplicatedPool(Pool): percent_data=10.0): super(ReplicatedPool, self).__init__(service=service, name=name) self.replicas = replicas + self.percent_data = percent_data if pg_num: # Since the number of placement groups were specified, ensure # that there aren't too many created. @@ -313,6 +321,24 @@ class ReplicatedPool(Pool): update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e, level=WARNING)) except CalledProcessError: raise @@ -355,6 +381,24 @@ class ErasurePool(Pool): 'erasure', self.erasure_code_profile] try: check_call(cmd) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name, level=WARNING)) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e, level=WARNING)) except CalledProcessError: raise @@ -362,6 +406,32 @@ class ErasurePool(Pool): Returns json formatted output""" +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = check_output(cmd) + except CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def enable_pg_autoscale(service, pool_name): + """ + Enable Ceph's PG autoscaler for the specified pool. + + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types. The name of the pool to enable sutoscaling on + :raise: CalledProcessError if the command fails + """ + check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + + def get_mon_map(service): """ Returns the current monitor map. @@ -554,21 +624,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name): raise -# max_bytes should be an int or long -def set_pool_quota(service, pool_name, max_bytes): +def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): """ - :param service: six.string_types. The Ceph user name to run the command under - :param pool_name: six.string_types - :param max_bytes: int or long - :return: None. Can raise CalledProcessError + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool + :type pool_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + :raises: subprocess.CalledProcessError """ - # Set a byte quota on a RADOS pool in ceph. - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, - 'max_bytes', str(max_bytes)] - try: - check_call(cmd) - except CalledProcessError: - raise + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + if max_bytes: + cmd = cmd + ['max_bytes', str(max_bytes)] + if max_objects: + cmd = cmd + ['max_objects', str(max_objects)] + check_call(cmd) def remove_pool_quota(service, pool_name): @@ -605,7 +678,8 @@ def remove_erasure_profile(service, profile_name): def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', data_chunks=2, coding_chunks=1, - locality=None, durability_estimator=None): + locality=None, durability_estimator=None, + device_class=None): """ Create a new erasure code profile if one does not already exist for it. Updates the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ @@ -619,6 +693,7 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' :param coding_chunks: int :param locality: int :param durability_estimator: int + :param device_class: six.string_types :return: None. Can raise CalledProcessError """ # Ensure this failure_domain is allowed by Ceph @@ -631,6 +706,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure' if locality is not None and durability_estimator is not None: raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 + # failure_domain changed in luminous + if luminous_or_later: + cmd.append('crush-failure-domain=' + failure_domain) + else: + cmd.append('ruleset-failure-domain=' + failure_domain) + + # device class new in luminous + if luminous_or_later and device_class: + cmd.append('crush-device-class={}'.format(device_class)) + else: + log('Skipping device class configuration (ceph < 12.0.0)', + level=DEBUG) + # Add plugin specific information if locality is not None: # For local erasure codes @@ -715,20 +804,26 @@ def pool_exists(service, name): return name in out.split() -def get_osds(service): +def get_osds(service, device_class=None): """Return a list of all Ceph Object Storage Daemons currently in the - cluster. + cluster (optionally filtered by storage device class). + + :param device_class: Class of storage device for OSD's + :type device_class: str """ - version = ceph_version() - if version and version >= '0.56': + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 + if luminous_or_later and device_class: + out = check_output(['ceph', '--id', service, + 'osd', 'crush', 'class', + 'ls-osd', device_class, + '--format=json']) + else: out = check_output(['ceph', '--id', service, 'osd', 'ls', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') - return json.loads(out) - - return None + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) def install(): @@ -769,6 +864,25 @@ def update_pool(client, pool, settings): check_call(cmd) +def set_app_name_for_pool(client, pool, name): + """ + Calls `osd pool application enable` for the specified pool name + + :param client: Name of the ceph client to use + :type client: str + :param pool: Pool to set app name for + :type pool: str + :param name: app name for the specified pool + :type name: str + + :raises: CalledProcessError if ceph call fails + """ + if cmp_pkgrevno('ceph-common', '12.0.0') >= 0: + cmd = ['ceph', '--id', client, 'osd', 'pool', + 'application', 'enable', pool, name] + check_call(cmd) + + def create_pool(service, name, replicas=3, pg_num=None): """Create a new RADOS pool.""" if pool_exists(service, name): @@ -808,12 +922,22 @@ def _keyring_path(service): return KEYRING.format(service) -def create_keyring(service, key): - """Create a new Ceph keyring containing key.""" +def add_key(service, key): + """ + Add a key to a keyring. + + Creates the keyring if it doesn't already exist. + + Logs and returns if the key is already in the keyring. + """ keyring = _keyring_path(service) if os.path.exists(keyring): - log('Ceph keyring exists at %s.' % keyring, level=WARNING) - return + with open(keyring, 'r') as ring: + if key in ring.read(): + log('Ceph keyring exists at %s and has not changed.' % keyring, + level=DEBUG) + return + log('Updating existing keyring %s.' % keyring, level=DEBUG) cmd = ['ceph-authtool', keyring, '--create-keyring', '--name=client.{}'.format(service), '--add-key={}'.format(key)] @@ -821,6 +945,11 @@ def create_keyring(service, key): log('Created new ceph keyring at %s.' % keyring, level=DEBUG) +def create_keyring(service, key): + """Deprecated. Please use the more accurately named 'add_key'""" + return add_key(service, key) + + def delete_keyring(service): """Delete an existing Ceph keyring.""" keyring = _keyring_path(service) @@ -857,7 +986,7 @@ def get_ceph_nodes(relation='ceph'): def configure(service, key, auth, use_syslog): """Perform basic configuration of Ceph.""" - create_keyring(service, key) + add_key(service, key) create_key_file(service, key) hosts = get_ceph_nodes() with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: @@ -1020,7 +1149,7 @@ def ensure_ceph_keyring(service, user=None, group=None, if not key: return False - create_keyring(service=service, key=key) + add_key(service=service, key=key) keyring = _keyring_path(service) if user and group: check_call(['chown', '%s.%s' % (user, group), keyring]) @@ -1028,22 +1157,6 @@ def ensure_ceph_keyring(service, user=None, group=None, return True -def ceph_version(): - """Retrieve the local version of ceph.""" - if os.path.exists('/usr/bin/ceph'): - cmd = ['ceph', '-v'] - output = check_output(cmd) - if six.PY3: - output = output.decode('UTF-8') - output = output.split() - if len(output) > 3: - return output[2] - else: - return None - else: - return None - - class CephBrokerRq(object): """Ceph broker request. @@ -1063,6 +1176,15 @@ class CephBrokerRq(object): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op(self, op): + """Add an op if it is not already in the list. + + :param op: Operation to add. + :type op: dict + """ + if op not in self.ops: + self.ops.append(op) + def add_op_request_access_to_group(self, name, namespace=None, permission=None, key_name=None): """ @@ -1074,22 +1196,89 @@ class CephBrokerRq(object): 'group-permission': permission}) def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None): - """Adds an operation to create a pool. - - @param pg_num setting: optional setting. If not provided, this value - will be calculated by the broker based on how many OSDs are in the - cluster at the time of creation. Note that, if provided, this value - will be capped at the current available maximum. - @param weight: the percentage of data the pool makes up + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, max_objects=None): + """DEPRECATED: Use ``add_op_create_replicated_pool()`` or + ``add_op_create_erasure_pool()`` instead. + """ + return self.add_op_create_replicated_pool( + name, replica_count=replica_count, pg_num=pg_num, weight=weight, + group=group, namespace=namespace, app_name=app_name, + max_bytes=max_bytes, max_objects=max_objects) + + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, + max_objects=None): + """Adds an operation to create a replicated pool. + + :param name: Name of pool to create + :type name: str + :param replica_count: Number of copies Ceph should keep of your data. + :type replica_count: int + :param pg_num: Request specific number of Placement Groups to create + for pool. + :type pg_num: int + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: float + :param group: Group to add pool to + :type group: str + :param namespace: Group namespace + :type namespace: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int """ if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') - self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight, 'group': group, - 'group-namespace': namespace}) + self.add_op({'op': 'create-pool', 'name': name, + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight, 'group': group, + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) + + def add_op_create_erasure_pool(self, name, erasure_profile=None, + weight=None, group=None, app_name=None, + max_bytes=None, max_objects=None): + """Adds an operation to create a erasure coded pool. + + :param name: Name of pool to create + :type name: str + :param erasure_profile: Name of erasure code profile to use. If not + set the ceph-mon unit handling the broker + request will set its default value. + :type erasure_profile: str + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + :type weight: float + :param group: Group to add pool to + :type group: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + """ + self.add_op({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def set_ops(self, ops): """Set request ops to provided value. @@ -1331,13 +1520,28 @@ def send_request_if_needed(request, relation='ceph'): relation_set(relation_id=rid, broker_req=request.request) +def has_broker_rsp(rid=None, unit=None): + """Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data. + + :param rid: The relation to check (default of None means current relation) + :type rid: Union[str, None] + :param unit: The remote unit to check (default of None means current unit) + :type unit: Union[str, None] + :returns: True if broker key exists and is set to something 'truthy' + :rtype: bool + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + return True if broker_rsp else False + + def is_broker_action_done(action, rid=None, unit=None): """Check whether broker action has completed yet. @param action: name of action to be performed @returns True if action complete otherwise False """ - rdata = relation_get(rid, unit) or {} + rdata = relation_get(rid=rid, unit=unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return False @@ -1359,7 +1563,7 @@ def mark_broker_action_done(action, rid=None, unit=None): @param action: name of action to be performed @returns None """ - rdata = relation_get(rid, unit) or {} + rdata = relation_get(rid=rid, unit=unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return diff --git a/charmhelpers/contrib/storage/linux/loopback.py b/charmhelpers/contrib/storage/linux/loopback.py index 1d6ae6f056bd08660abc0cbc664b326d74d88041..82472ff1b6d21eb8fba07bbcf93edd9d5f0960c3 100644 --- a/charmhelpers/contrib/storage/linux/loopback.py +++ b/charmhelpers/contrib/storage/linux/loopback.py @@ -36,10 +36,12 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - devs = [d.strip().split(' ') for d in - check_output(cmd).splitlines() if d != ''] + output = check_output(cmd) + if six.PY3: + output = output.decode('utf-8') + devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] return loopbacks diff --git a/charmhelpers/contrib/storage/linux/utils.py b/charmhelpers/contrib/storage/linux/utils.py index c9428894317a3285c870a08e8f3b53846739c552..a35617606cf52d7cffc04ac245811b770fd95e8e 100644 --- a/charmhelpers/contrib/storage/linux/utils.py +++ b/charmhelpers/contrib/storage/linux/utils.py @@ -17,12 +17,53 @@ import re from stat import S_ISBLK from subprocess import ( + CalledProcessError, check_call, check_output, call ) +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return check_output(cmd).decode('UTF-8').strip() + except CalledProcessError: + return None + + +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + def is_block_device(path): ''' Confirm device at path is a valid block device node. @@ -67,3 +108,21 @@ def is_device_mounted(device): except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) + + +def mkfs_xfs(device, force=False, inode_size=1024): + """Format device with XFS filesystem. + + By default this should fail if the device already has a filesystem on it. + :param device: Full path to device to format + :ptype device: tr + :param force: Force operation + :ptype: force: boolean + :param inode_size: XFS inode size in bytes + :ptype inode_size: int""" + cmd = ['mkfs.xfs'] + if force: + cmd.append("-f") + + cmd += ['-i', "size={}".format(inode_size), device] + check_call(cmd) diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py index b2d0cc758cf63ed36028bc77aa59f937fb52e806..e69836295dab179393808c062c1fb6428384130b 100644 --- a/charmhelpers/core/hookenv.py +++ b/charmhelpers/core/hookenv.py @@ -46,6 +46,12 @@ INFO = "INFO" DEBUG = "DEBUG" TRACE = "TRACE" MARKER = object() +SH_MAX_ARG = 131071 + + +RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. ' + 'This may not be compatible with software you are ' + 'running in your shell.') cache = {} @@ -96,7 +102,7 @@ def log(message, level=None): command += ['-l', level] if not isinstance(message, six.string_types): message = repr(message) - command += [message] + command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests # Send log output to stderr try: @@ -199,11 +205,35 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -def service_name(): - """The name service group this unit belongs to""" +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ return local_unit().split('/')[0] +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + def principal_unit(): """Returns the principal unit of this unit, otherwise None""" # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT @@ -461,6 +491,67 @@ def related_units(relid=None): subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] +def expected_peer_units(): + """Get a generator for units we expect to join peer relation based on + goal-state. + + The local unit is excluded from the result to make it easy to gauge + completion of all peers joining the relation with existing hook tools. + + Example usage: + log('peer {} of {} joined peer relation' + .format(len(related_units()), + len(list(expected_peer_units())))) + + This function will raise NotImplementedError if used with juju versions + without goal-state support. + + :returns: iterator + :rtype: types.GeneratorType + :raises: NotImplementedError + """ + if not has_juju_version("2.4.0"): + # goal-state first appeared in 2.4.0. + raise NotImplementedError("goal-state") + _goal_state = goal_state() + return (key for key in _goal_state['units'] + if '/' in key and key != local_unit()) + + +def expected_related_units(reltype=None): + """Get a generator for units we expect to join relation based on + goal-state. + + Note that you can not use this function for the peer relation, take a look + at expected_peer_units() for that. + + This function will raise KeyError if you request information for a + relation type for which juju goal-state does not have information. It will + raise NotImplementedError if used with juju versions without goal-state + support. + + Example usage: + log('participant {} of {} joined relation {}' + .format(len(related_units()), + len(list(expected_related_units())), + relation_type())) + + :param reltype: Relation type to list data for, default is to list data for + the realtion type we are currently executing a hook for. + :type reltype: str + :returns: iterator + :rtype: types.GeneratorType + :raises: KeyError, NotImplementedError + """ + if not has_juju_version("2.4.4"): + # goal-state existed in 2.4.0, but did not list individual units to + # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) + raise NotImplementedError("goal-state relation unit count") + reltype = reltype or relation_type() + _goal_state = goal_state() + return (key for key in _goal_state['relations'][reltype] if '/' in key) + + @cached def relation_for_unit(unit=None, rid=None): """Get the json represenation of a unit's relation""" @@ -944,6 +1035,14 @@ def application_version_set(version): log("Application Version: {}".format(version)) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +@cached +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership @@ -1205,3 +1304,139 @@ def ingress_address(rid=None, unit=None): settings = relation_get(rid=rid, unit=unit) return (settings.get('ingress-address') or settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') + + +def env_proxy_settings(selected_settings=None): + """Get proxy settings from process environment variables. + + Get charm proxy settings from environment variables that correspond to + juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2, + see lp:1782236) in a format suitable for passing to an application that + reacts to proxy settings passed as environment variables. Some applications + support lowercase or uppercase notation (e.g. curl), some support only + lowercase (e.g. wget), there are also subjectively rare cases of only + uppercase notation support. no_proxy CIDR and wildcard support also varies + between runtimes and applications as there is no enforced standard. + + Some applications may connect to multiple destinations and expose config + options that would affect only proxy settings for a specific destination + these should be handled in charms in an application-specific manner. + + :param selected_settings: format only a subset of possible settings + :type selected_settings: list + :rtype: Option(None, dict[str, str]) + """ + SUPPORTED_SETTINGS = { + 'http': 'HTTP_PROXY', + 'https': 'HTTPS_PROXY', + 'no_proxy': 'NO_PROXY', + 'ftp': 'FTP_PROXY' + } + if selected_settings is None: + selected_settings = SUPPORTED_SETTINGS + + selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() + if k in selected_settings] + proxy_settings = {} + for var in selected_vars: + var_val = os.getenv(var) + if var_val: + proxy_settings[var] = var_val + proxy_settings[var.lower()] = var_val + # Now handle juju-prefixed environment variables. The legacy vs new + # environment variable usage is mutually exclusive + charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) + if charm_var_val: + proxy_settings[var] = charm_var_val + proxy_settings[var.lower()] = charm_var_val + if 'no_proxy' in proxy_settings: + if _contains_range(proxy_settings['no_proxy']): + log(RANGE_WARNING, level=WARNING) + return proxy_settings if proxy_settings else None + + +def _contains_range(addresses): + """Check for cidr or wildcard domain in a string. + + Given a string comprising a comma seperated list of ip addresses + and domain names, determine whether the string contains IP ranges + or wildcard domains. + + :param addresses: comma seperated list of domains and ip addresses. + :type addresses: str + """ + return ( + # Test for cidr (e.g. 10.20.20.0/24) + "/" in addresses or + # Test for wildcard domains (*.foo.com or .foo.com) + "*" in addresses or + addresses.startswith(".") or + ",." in addresses or + " ." in addresses) diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py index 5cc5c86b701fc5375f387eb01a0d2b76c184c263..1fb789e4c8e1d9f002249604ae47f15e0a491a1b 100644 --- a/charmhelpers/core/host.py +++ b/charmhelpers/core/host.py @@ -34,21 +34,23 @@ import six from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG, local_unit +from .hookenv import log, INFO, DEBUG, local_unit, charm_name from .fstab import Fstab from charmhelpers.osplatform import get_platform __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( + from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401 service_available, add_new_group, lsb_release, cmp_pkgrevno, CompareHostReleases, + get_distrib_codename, + arch ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( + from charmhelpers.core.host_factory.centos import ( # NOQA:F401 service_available, add_new_group, lsb_release, @@ -58,6 +60,7 @@ elif __platform__ == "centos": UPDATEDB_PATH = '/etc/updatedb.conf' + def service_start(service_name, **kwargs): """Start a system service. @@ -287,8 +290,8 @@ def service_running(service_name, **kwargs): for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') + output = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -442,7 +445,7 @@ def add_user_to_group(username, group): def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): + mindays=None, maxdays=None, root=None, warndays=None): """Change user password expiry information :param str username: User to update @@ -482,8 +485,10 @@ def chage(username, lastday=None, expiredate=None, inactive=None, cmd.append(username) subprocess.check_call(cmd) + remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -535,13 +540,15 @@ def write_file(path, content, owner='root', group='root', perms=0o444): # lets see if we can grab the file and compare the context, to avoid doing # a write. existing_content = None - existing_uid, existing_gid = None, None + existing_uid, existing_gid, existing_perms = None, None, None try: with open(path, 'rb') as target: existing_content = target.read() stat = os.stat(path) - existing_uid, existing_gid = stat.st_uid, stat.st_gid - except: + existing_uid, existing_gid, existing_perms = ( + stat.st_uid, stat.st_gid, stat.st_mode + ) + except Exception: pass if content != existing_content: log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), @@ -552,7 +559,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): target.write(content) return # the contents were the same, but we might still need to change the - # ownership. + # ownership or permissions. if existing_uid != uid: log("Changing uid on already existing content: {} -> {}" .format(existing_uid, uid), level=DEBUG) @@ -561,6 +568,10 @@ def write_file(path, content, owner='root', group='root', perms=0o444): log("Changing gid on already existing content: {} -> {}" .format(existing_gid, gid), level=DEBUG) os.chown(path, -1, gid) + if existing_perms != perms: + log("Changing permissions on existing content: {} -> {}" + .format(existing_perms, perms), level=DEBUG) + os.chmod(path, perms) def fstab_remove(mp): @@ -825,7 +836,7 @@ def list_nics(nic_type=None): ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line.strip() for line in ip_output if line) - key = re.compile('^[0-9]+:\s+(.+):') + key = re.compile(r'^[0-9]+:\s+(.+):') for line in ip_output: matched = re.search(key, line) if matched: @@ -970,6 +981,20 @@ def is_container(): def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + with open(updatedb_path, 'r+') as f_id: updatedb_text = f_id.read() output = updatedb(updatedb_text, path) @@ -1017,3 +1042,54 @@ def modulo_distribution(modulo=3, wait=30): """ unit_number = int(local_unit().split('/')[1]) return (unit_number % modulo) * wait + + +def install_ca_cert(ca_cert, name=None): + """ + Install the given cert as a trusted CA. + + The ``name`` is the stem of the filename where the cert is written, and if + not provided, it will default to ``juju-{charm_name}``. + + If the cert is empty or None, or is unchanged, nothing is done. + """ + if not ca_cert: + return + if not isinstance(ca_cert, bytes): + ca_cert = ca_cert.encode('utf8') + if not name: + name = 'juju-{}'.format(charm_name()) + cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + new_hash = hashlib.md5(ca_cert).hexdigest() + if file_hash(cert_file) == new_hash: + return + log("Installing new CA cert at: {}".format(cert_file), level=INFO) + write_file(cert_file, ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) + + +def get_system_env(key, default=None): + """Get data from system environment as represented in ``/etc/environment``. + + :param key: Key to look up + :type key: str + :param default: Value to return if key is not found + :type default: any + :returns: Value for key if found or contents of default parameter + :rtype: any + :raises: subprocess.CalledProcessError + """ + env_file = '/etc/environment' + # use the shell and env(1) to parse the global environments file. This is + # done to get the correct result even if the user has shell variable + # substitutions or other shell logic in that file. + output = subprocess.check_output( + ['env', '-i', '/bin/bash', '-c', + 'set -a && source {} && env'.format(env_file)], + universal_newlines=True) + for k, v in (line.split('=', 1) + for line in output.splitlines() if '=' in line): + if k == key: + return v + else: + return default diff --git a/charmhelpers/core/host_factory/ubuntu.py b/charmhelpers/core/host_factory/ubuntu.py index d8dc378a5dad29c271a89289e4b815e2c2c99060..1b57e2ce2311cefb1185eaee3f9435f58a775543 100644 --- a/charmhelpers/core/host_factory/ubuntu.py +++ b/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,6 @@ import subprocess +from charmhelpers.core.hookenv import cached from charmhelpers.core.strutils import BasicStringComparator @@ -20,6 +21,10 @@ UBUNTU_RELEASES = ( 'yakkety', 'zesty', 'artful', + 'bionic', + 'cosmic', + 'disco', + 'eoan', ) @@ -70,6 +75,14 @@ def lsb_release(): return d +def get_distrib_codename(): + """Return the codename of the distribution + :returns: The codename + :rtype: str + """ + return lsb_release()['DISTRIB_CODENAME'].lower() + + def cmp_pkgrevno(package, revno, pkgcache=None): """Compare supplied revno with the revno of the installed package. @@ -81,9 +94,22 @@ def cmp_pkgrevno(package, revno, pkgcache=None): the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. """ - import apt_pkg + from charmhelpers.fetch import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@cached +def arch(): + """Return the package architecture as a string. + + :returns: the architecture + :rtype: str + :raises: subprocess.CalledProcessError if dpkg command fails + """ + return subprocess.check_output( + ['dpkg', '--print-architecture'] + ).rstrip().decode('UTF-8') diff --git a/charmhelpers/core/kernel.py b/charmhelpers/core/kernel.py index 2d404528348e57df4cebee58ff11f6574f334fe0..e01f4f8ba73ee0d5ab7553740c2590a50e42f96d 100644 --- a/charmhelpers/core/kernel.py +++ b/charmhelpers/core/kernel.py @@ -26,12 +26,12 @@ from charmhelpers.core.hookenv import ( __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( + from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( + from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import diff --git a/charmhelpers/core/sysctl.py b/charmhelpers/core/sysctl.py index 6e413e31480e5fb4bcb703d58b1e87f98adc53af..145161e23dfa3d2ed9e108d1e9c8bc44606ad751 100644 --- a/charmhelpers/core/sysctl.py +++ b/charmhelpers/core/sysctl.py @@ -28,13 +28,15 @@ from charmhelpers.core.hookenv import ( __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' -def create(sysctl_dict, sysctl_file): +def create(sysctl_dict, sysctl_file, ignore=False): """Creates a sysctl.conf file from a YAML associative array :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode + :param ignore: If True, ignore "unknown variable" errors. + :type ignore: bool :returns: None """ try: @@ -48,7 +50,12 @@ def create(sysctl_dict, sysctl_file): for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + log("Updating sysctl_file: {} values: {}".format(sysctl_file, + sysctl_dict_parsed), level=DEBUG) - check_call(["sysctl", "-p", sysctl_file]) + call = ["sysctl", "-p", sysctl_file] + if ignore: + call.append("-e") + + check_call(call) diff --git a/charmhelpers/fetch/__init__.py b/charmhelpers/fetch/__init__.py index 480a627636611b9e8e4011231b423dfa9ad42e7f..0cc7fc850a0632568ad78aae9716be718c9ff6b5 100644 --- a/charmhelpers/fetch/__init__.py +++ b/charmhelpers/fetch/__init__.py @@ -84,6 +84,7 @@ module = "charmhelpers.fetch.%s" % __platform__ fetch = importlib.import_module(module) filter_installed_packages = fetch.filter_installed_packages +filter_missing_packages = fetch.filter_missing_packages install = fetch.apt_install upgrade = fetch.apt_upgrade update = _fetch_update = fetch.apt_update @@ -96,11 +97,14 @@ if __platform__ == "ubuntu": apt_update = fetch.apt_update apt_upgrade = fetch.apt_upgrade apt_purge = fetch.apt_purge + apt_autoremove = fetch.apt_autoremove apt_mark = fetch.apt_mark apt_hold = fetch.apt_hold apt_unhold = fetch.apt_unhold import_key = fetch.import_key get_upstream_version = fetch.get_upstream_version + apt_pkg = fetch.ubuntu_apt_pkg + get_apt_dpkg_env = fetch.get_apt_dpkg_env elif __platform__ == "centos": yum_search = fetch.yum_search diff --git a/charmhelpers/fetch/archiveurl.py b/charmhelpers/fetch/archiveurl.py index dd24f9eca94789e7a7bcf463f412988ddc9a91d9..d25587adeff102c3fc9e402f98746fccbd8a3693 100644 --- a/charmhelpers/fetch/archiveurl.py +++ b/charmhelpers/fetch/archiveurl.py @@ -89,7 +89,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): :param str source: URL pointing to an archive file. :param str dest: Local path location to download archive file to. """ - # propogate all exceptions + # propagate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): diff --git a/charmhelpers/fetch/bzrurl.py b/charmhelpers/fetch/bzrurl.py index 07cd0293971df9700f3b687bb974dbbac69773a5..c4ab3ff1e6bc7dde24e8ed568a3dc0c6012ddea6 100644 --- a/charmhelpers/fetch/bzrurl.py +++ b/charmhelpers/fetch/bzrurl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call +from subprocess import STDOUT, check_output from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -55,7 +55,7 @@ class BzrUrlFetchHandler(BaseFetchHandler): cmd = ['bzr', 'branch'] cmd += cmd_opts cmd += [source, dest] - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, dest=None, revno=None): url_parts = self.parse_url(source) diff --git a/charmhelpers/fetch/giturl.py b/charmhelpers/fetch/giturl.py index 4cf21bc29e83f254fb6cb097e0444427ed5e1949..070ca9bb5c1a2fdef39f88606ffcaf39bb049410 100644 --- a/charmhelpers/fetch/giturl.py +++ b/charmhelpers/fetch/giturl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call, CalledProcessError +from subprocess import check_output, CalledProcessError, STDOUT from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -50,7 +50,7 @@ class GitUrlFetchHandler(BaseFetchHandler): cmd = ['git', 'clone', source, dest, '--branch', branch] if depth: cmd.extend(['--depth', depth]) - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) diff --git a/charmhelpers/contrib/python/__init__.py b/charmhelpers/fetch/python/__init__.py similarity index 92% rename from charmhelpers/contrib/python/__init__.py rename to charmhelpers/fetch/python/__init__.py index d7567b863e3a5ad2b7a7f44958b4166e0c3d346b..bff99dc93c64f80716e2d5a2b6d0d4e8a2436955 100644 --- a/charmhelpers/contrib/python/__init__.py +++ b/charmhelpers/fetch/python/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2019 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/charmhelpers/contrib/python/debug.py b/charmhelpers/fetch/python/debug.py similarity index 96% rename from charmhelpers/contrib/python/debug.py rename to charmhelpers/fetch/python/debug.py index d2142c75299046b491444b0ae2fcb264e87e2843..757135ee4cf3b5ff4c02305126f5ca3940892afc 100644 --- a/charmhelpers/contrib/python/debug.py +++ b/charmhelpers/fetch/python/debug.py @@ -20,7 +20,7 @@ from __future__ import print_function import atexit import sys -from charmhelpers.contrib.python.rpdb import Rpdb +from charmhelpers.fetch.python.rpdb import Rpdb from charmhelpers.core.hookenv import ( open_port, close_port, diff --git a/charmhelpers/contrib/python/packages.py b/charmhelpers/fetch/python/packages.py similarity index 100% rename from charmhelpers/contrib/python/packages.py rename to charmhelpers/fetch/python/packages.py diff --git a/charmhelpers/contrib/python/rpdb.py b/charmhelpers/fetch/python/rpdb.py similarity index 100% rename from charmhelpers/contrib/python/rpdb.py rename to charmhelpers/fetch/python/rpdb.py diff --git a/charmhelpers/contrib/python/version.py b/charmhelpers/fetch/python/version.py similarity index 100% rename from charmhelpers/contrib/python/version.py rename to charmhelpers/fetch/python/version.py diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py index 910e96a66f45b8a173147141e7c5108d685b7b7a..b08f91f4fab2d598d25ab2e94cf1b2475fd15773 100644 --- a/charmhelpers/fetch/ubuntu.py +++ b/charmhelpers/fetch/ubuntu.py @@ -13,23 +13,23 @@ # limitations under the License. from collections import OrderedDict -import os import platform import re import six -import time import subprocess -from tempfile import NamedTemporaryFile +import sys +import time + +from charmhelpers.core.host import get_distrib_codename, get_system_env -from charmhelpers.core.host import ( - lsb_release -) from charmhelpers.core.hookenv import ( log, DEBUG, WARNING, + env_proxy_settings, ) from charmhelpers.fetch import SourceConfigError, GPGKeyError +from charmhelpers.fetch import ubuntu_apt_pkg PROPOSED_POCKET = ( "# Proposed\n" @@ -157,6 +157,30 @@ CLOUD_ARCHIVE_POCKETS = { 'queens/proposed': 'xenial-proposed/queens', 'xenial-queens/proposed': 'xenial-proposed/queens', 'xenial-proposed/queens': 'xenial-proposed/queens', + # Rocky + 'rocky': 'bionic-updates/rocky', + 'bionic-rocky': 'bionic-updates/rocky', + 'bionic-rocky/updates': 'bionic-updates/rocky', + 'bionic-updates/rocky': 'bionic-updates/rocky', + 'rocky/proposed': 'bionic-proposed/rocky', + 'bionic-rocky/proposed': 'bionic-proposed/rocky', + 'bionic-proposed/rocky': 'bionic-proposed/rocky', + # Stein + 'stein': 'bionic-updates/stein', + 'bionic-stein': 'bionic-updates/stein', + 'bionic-stein/updates': 'bionic-updates/stein', + 'bionic-updates/stein': 'bionic-updates/stein', + 'stein/proposed': 'bionic-proposed/stein', + 'bionic-stein/proposed': 'bionic-proposed/stein', + 'bionic-proposed/stein': 'bionic-proposed/stein', + # Train + 'train': 'bionic-updates/train', + 'bionic-train': 'bionic-updates/train', + 'bionic-train/updates': 'bionic-updates/train', + 'bionic-updates/train': 'bionic-updates/train', + 'train/proposed': 'bionic-proposed/train', + 'bionic-train/proposed': 'bionic-proposed/train', + 'bionic-proposed/train': 'bionic-proposed/train', } @@ -180,18 +204,54 @@ def filter_installed_packages(packages): return _pkgs -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache.""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) +def filter_missing_packages(packages): + """Return a list of packages that are installed. + + :param packages: list of packages to evaluate. + :returns list: Packages that are installed. + """ + return list( + set(packages) - + set(filter_installed_packages(packages)) + ) + + +def apt_cache(*_, **__): + """Shim returning an object simulating the apt_pkg Cache. + + :param _: Accept arguments for compability, not used. + :type _: any + :param __: Accept keyword arguments for compability, not used. + :type __: any + :returns:Object used to interrogate the system apt and dpkg databases. + :rtype:ubuntu_apt_pkg.Cache + """ + if 'apt_pkg' in sys.modules: + # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module + # in conjunction with the apt_cache helper function, they may expect us + # to call ``apt_pkg.init()`` for them. + # + # Detect this situation, log a warning and make the call to + # ``apt_pkg.init()`` to avoid the consumer Python interpreter from + # crashing with a segmentation fault. + log('Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + sys.modules['apt_pkg'].init() + return ubuntu_apt_pkg.Cache() def apt_install(packages, options=None, fatal=False): - """Install one or more packages.""" + """Install one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -208,7 +268,17 @@ def apt_install(packages, options=None, fatal=False): def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages.""" + """Upgrade all packages. + + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :param dist: Whether ``dist-upgrade`` should be used over ``upgrade`` + :type dist: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -229,7 +299,15 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): - """Purge one or more packages.""" + """Purge one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, six.string_types): cmd.append(packages) @@ -239,6 +317,21 @@ def apt_purge(packages, fatal=False): _run_apt_command(cmd, fatal) +def apt_autoremove(purge=True, fatal=False): + """Purge one or more packages. + :param purge: Whether the ``--purge`` option should be passed on or not. + :type purge: bool + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ + cmd = ['apt-get', '--assume-yes', 'autoremove'] + if purge: + cmd.append('--purge') + _run_apt_command(cmd, fatal) + + def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) @@ -265,13 +358,18 @@ def apt_unhold(packages, fatal=False): def import_key(key): """Import an ASCII Armor key. - /!\ A Radix64 format keyid is also supported for backwards - compatibility, but should never be used; the key retrieval - mechanism is insecure and subject to man-in-the-middle attacks - voiding all signature checks using that key. - - :param keyid: The key in ASCII armor format, - including BEGIN and END markers. + A Radix64 format keyid is also supported for backwards + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferrable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + :param key: A GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. + :type key: (bytes, str) :raises: GPGKeyError if the key could not be imported """ key = key.strip() @@ -282,35 +380,131 @@ def import_key(key): log("PGP key found (looks like ASCII Armor format)", level=DEBUG) if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): - log("Importing ASCII Armor PGP key", level=DEBUG) - with NamedTemporaryFile() as keyfile: - with open(keyfile.name, 'w') as fd: - fd.write(key) - fd.write("\n") - cmd = ['apt-key', 'add', keyfile.name] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) + log("Writing provided PGP key in the binary format", level=DEBUG) + if six.PY3: + key_bytes = key.encode('utf-8') + else: + key_bytes = key + key_name = _get_keyid_by_gpg_key(key_bytes) + key_gpg = _dearmor_gpg_key(key_bytes) + _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) else: raise GPGKeyError("ASCII armor markers missing from GPG key") else: - # We should only send things obviously not a keyid offsite - # via this unsecured protocol, as it may be a secret or part - # of one. log("PGP key found (looks like Radix64 format)", level=WARNING) - log("INSECURLY importing PGP key from keyserver; " + log("SECURELY importing PGP key from keyserver; " "full key not provided.", level=WARNING) - cmd = ['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - error = "Error importing PGP key '{}'".format(key) - log(error) - raise GPGKeyError(error) + # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL + # to retrieve GPG keys. `apt-key adv` command is deprecated as is + # apt-key in general as noted in its manpage. See lp:1433761 for more + # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop + # gpg + key_asc = _get_key_by_keyid(key) + # write the key in GPG format so that apt-key list shows it + key_gpg = _dearmor_gpg_key(key_asc) + _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) + + +def _get_keyid_by_gpg_key(key_material): + """Get a GPG key fingerprint by GPG key material. + Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded + or binary GPG key material. Can be used, for example, to generate file + names for keys passed via charm options. + + :param key_material: ASCII armor-encoded or binary GPG key material + :type key_material: bytes + :raises: GPGKeyError if invalid key material has been provided + :returns: A GPG key fingerprint + :rtype: str + """ + # Use the same gpg command for both Xenial and Bionic + cmd = 'gpg --with-colons --with-fingerprint' + ps = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_material) + if six.PY3: + out = out.decode('utf-8') + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material provided') + # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) + return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) + + +def _get_key_by_keyid(keyid): + """Get a key via HTTPS from the Ubuntu keyserver. + Different key ID formats are supported by SKS keyservers (the longer ones + are more secure, see "dead beef attack" and https://evil32.com/). Since + HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will + impersonate keyserver.ubuntu.com and generate a certificate with + keyserver.ubuntu.com in the CN field or in SubjAltName fields of a + certificate. If such proxy behavior is expected it is necessary to add the + CA certificate chain containing the intermediate CA of the SSLBump proxy to + every machine that this code runs on via ca-certs cloud-init directive (via + cloudinit-userdata model-config) or via other means (such as through a + custom charm option). Also note that DNS resolution for the hostname in a + URL is done at a proxy server - not at the client side. + + 8-digit (32 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 + 16-digit (64 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 + 40-digit key ID: + https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 + + :param keyid: An 8, 16 or 40 hex digit keyid to find a key for + :type keyid: (bytes, str) + :returns: A key material for the specified GPG key id + :rtype: (str, bytes) + :raises: subprocess.CalledProcessError + """ + # options=mr - machine-readable output (disables html wrappers) + keyserver_url = ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr&exact=on&search=0x{}') + curl_cmd = ['curl', keyserver_url.format(keyid)] + # use proxy server settings in order to retrieve the key + return subprocess.check_output(curl_cmd, + env=env_proxy_settings(['https'])) + + +def _dearmor_gpg_key(key_asc): + """Converts a GPG key in the ASCII armor format to the binary format. + + :param key_asc: A GPG key in ASCII armor format. + :type key_asc: (str, bytes) + :returns: A GPG key in binary format + :rtype: (str, bytes) + :raises: GPGKeyError + """ + ps = subprocess.Popen(['gpg', '--dearmor'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_asc) + # no need to decode output as it is binary (invalid utf-8), only error + if six.PY3: + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material. Check your network setup' + ' (MTU, routing, DNS) and/or proxy server settings' + ' as well as destination keyserver status.') + else: + return out + + +def _write_apt_gpg_keyfile(key_name, key_material): + """Writes GPG key material into a file at a provided path. + + :param key_name: A key name to use for a key file (could be a fingerprint) + :type key_name: str + :param key_material: A GPG key material (binary) + :type key_material: (str, bytes) + """ + with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name), + 'wb') as keyf: + keyf.write(key_material) def add_source(source, key=None, fail_invalid=False): @@ -385,14 +579,16 @@ def add_source(source, key=None, fail_invalid=False): for r, fn in six.iteritems(_mapping): m = re.match(r, source) if m: - # call the assoicated function with the captured groups - # raises SourceConfigError on error. - fn(*m.groups()) if key: + # Import key before adding the source which depends on it, + # as refreshing packages could fail otherwise. try: import_key(key) except GPGKeyError as e: raise SourceConfigError(str(e)) + # call the associated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) break else: # nothing matched. log an error and maybe sys.exit @@ -405,13 +601,13 @@ def add_source(source, key=None, fail_invalid=False): def _add_proposed(): """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list - Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for + Uses get_distrib_codename to determine the correct stanza for the deb line. For intel architecutres PROPOSED_POCKET is used for the release, but for other architectures PROPOSED_PORTS_POCKET is used for the release. """ - release = lsb_release()['DISTRIB_CODENAME'] + release = get_distrib_codename() arch = platform.machine() if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): raise SourceConfigError("Arch {} not supported for (distro-)proposed" @@ -424,8 +620,16 @@ def _add_apt_repository(spec): """Add the spec using add_apt_repository :param spec: the parameter to pass to add_apt_repository + :type spec: str """ - _run_with_retries(['add-apt-repository', '--yes', spec]) + if '{series}' in spec: + series = get_distrib_codename() + spec = spec.replace('{series}', series) + # software-properties package for bionic properly reacts to proxy settings + # passed as environment variables (See lp:1433761). This is not the case + # LTS and non-LTS releases below bionic. + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https'])) def _add_cloud_pocket(pocket): @@ -494,7 +698,7 @@ def _verify_is_ubuntu_rel(release, os_release): :raises: SourceConfigError if the release is not the same as the ubuntu release. """ - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + ubuntu_rel = get_distrib_codename() if release != ubuntu_rel: raise SourceConfigError( 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' @@ -505,21 +709,22 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_message="", cmd_env=None): """Run a command and retry until success or max_retries is reached. - :param: cmd: str: The apt command to run. - :param: max_retries: int: The number of retries to attempt on a fatal - command. Defaults to CMD_RETRY_COUNT. - :param: retry_exitcodes: tuple: Optional additional exit codes to retry. - Defaults to retry on exit code 1. - :param: retry_message: str: Optional log prefix emitted during retries. - :param: cmd_env: dict: Environment variables to add to the command run. + :param cmd: The apt command to run. + :type cmd: str + :param max_retries: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :type max_retries: int + :param retry_exitcodes: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :type retry_exitcodes: tuple + :param retry_message: Optional log prefix emitted during retries. + :type retry_message: str + :param: cmd_env: Environment variables to add to the command run. + :type cmd_env: Option[None, Dict[str, str]] """ - - env = None - kwargs = {} + env = get_apt_dpkg_env() if cmd_env: - env = os.environ.copy() env.update(cmd_env) - kwargs['env'] = env if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -531,8 +736,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_results = (None,) + retry_exitcodes while result in retry_results: try: - # result = subprocess.check_call(cmd, env=env) - result = subprocess.check_call(cmd, **kwargs) + result = subprocess.check_call(cmd, env=env) except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > max_retries: @@ -545,22 +749,18 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. + :param cmd: The apt command to run. + :type cmd: str + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool """ - # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. - cmd_env = { - 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} - if fatal: _run_with_retries( - cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), + cmd, retry_exitcodes=(1, APT_NO_LOCK,), retry_message="Couldn't acquire DPKG lock") else: - env = os.environ.copy() - env.update(cmd_env) - subprocess.call(cmd, env=env) + subprocess.call(cmd, env=get_apt_dpkg_env()) def get_upstream_version(package): @@ -568,7 +768,6 @@ def get_upstream_version(package): @returns None (if not installed) or the upstream version """ - import apt_pkg cache = apt_cache() try: pkg = cache[package] @@ -580,4 +779,18 @@ def get_upstream_version(package): # package is known, but no version is currently installed. return None - return apt_pkg.upstream_version(pkg.current_ver.ver_str) + return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) + + +def get_apt_dpkg_env(): + """Get environment suitable for execution of APT and DPKG tools. + + We keep this in a helper function instead of in a global constant to + avoid execution on import of the library. + :returns: Environment suitable for execution of APT and DPKG tools. + :rtype: Dict[str, str] + """ + # The fallback is used in the event of ``/etc/environment`` not containing + # avalid PATH variable. + return {'DEBIAN_FRONTEND': 'noninteractive', + 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')} diff --git a/charmhelpers/fetch/ubuntu_apt_pkg.py b/charmhelpers/fetch/ubuntu_apt_pkg.py new file mode 100644 index 0000000000000000000000000000000000000000..104f91f133c2220e3e2fecd3a31cebe47d48bfb3 --- /dev/null +++ b/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -0,0 +1,237 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide a subset of the ``python-apt`` module API. + +Data collection is done through subprocess calls to ``apt-cache`` and +``dpkg-query`` commands. + +The main purpose for this module is to avoid dependency on the +``python-apt`` python module. + +The indicated python module is a wrapper around the ``apt`` C++ library +which is tightly connected to the version of the distribution it was +shipped on. It is not developed in a backward/forward compatible manner. + +This in turn makes it incredibly hard to distribute as a wheel for a piece +of python software that supports a span of distro releases [0][1]. + +Upstream feedback like [2] does not give confidence in this ever changing, +so with this we get rid of the dependency. + +0: https://github.com/juju-solutions/layer-basic/pull/135 +1: https://bugs.launchpad.net/charm-octavia/+bug/1824112 +2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10 +""" + +import locale +import os +import subprocess + + +class _container(dict): + """Simple container for attributes.""" + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + +class Package(_container): + """Simple container for package attributes.""" + + +class Version(_container): + """Simple container for version attributes.""" + + +class Cache(object): + """Simulation of ``apt_pkg`` Cache object.""" + def __init__(self, progress=None): + pass + + def __getitem__(self, package): + """Get information about a package from apt and dpkg databases. + + :param package: Name of package + :type package: str + :returns: Package object + :rtype: object + :raises: KeyError, subprocess.CalledProcessError + """ + apt_result = self._apt_cache_show([package])[package] + apt_result['name'] = apt_result.pop('package') + pkg = Package(apt_result) + dpkg_result = self._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + if installed_version: + current_ver = Version({'ver_str': installed_version}) + pkg.current_ver = current_ver + pkg.architecture = dpkg_result.get('architecture') + return pkg + + def _dpkg_list(self, packages): + """Get data from system dpkg database for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about installed packages, keys like + ``dpkg-query --list`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['dpkg-query', '--list'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError as cp: + # ``dpkg-query`` may return error and at the same time have + # produced useful output, for example when asked for multiple + # packages where some are not installed + if cp.returncode != 1: + raise + output = cp.output + headings = [] + for line in output.splitlines(): + if line.startswith('||/'): + headings = line.split() + headings.pop(0) + continue + elif (line.startswith('|') or line.startswith('+') or + line.startswith('dpkg-query:')): + continue + else: + data = line.split(None, 4) + status = data.pop(0) + if status != 'ii': + continue + pkg = {} + pkg.update({k.lower(): v for k, v in zip(headings, data)}) + if 'name' in pkg: + pkgs.update({pkg['name']: pkg}) + return pkgs + + def _apt_cache_show(self, packages): + """Get data from system apt cache for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about package, keys like + ``apt-cache show`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['apt-cache', 'show', '--no-all-versions'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + previous = None + pkg = {} + for line in output.splitlines(): + if not line: + if 'package' in pkg: + pkgs.update({pkg['package']: pkg}) + pkg = {} + continue + if line.startswith(' '): + if previous and previous in pkg: + pkg[previous] += os.linesep + line.lstrip() + continue + if ':' in line: + kv = line.split(':', 1) + key = kv[0].lower() + if key == 'n': + continue + previous = key + pkg.update({key: kv[1].lstrip()}) + except subprocess.CalledProcessError as cp: + # ``apt-cache`` returns 100 if none of the packages asked for + # exist in the apt cache. + if cp.returncode != 100: + raise + return pkgs + + +def init(): + """Compability shim that does nothing.""" + pass + + +def upstream_version(version): + """Extracts upstream version from a version string. + + Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/ + apt-pkg/deb/debversion.cc#L259 + + :param version: Version string + :type version: str + :returns: Upstream version + :rtype: str + """ + if version: + version = version.split(':')[-1] + version = version.split('-')[0] + return version + + +def version_compare(a, b): + """Compare the given versions. + + Call out to ``dpkg`` to make sure the code doing the comparison is + compatible with what the ``apt`` library would do. Mimic the return + values. + + Upstream reference: + https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html + ?highlight=version_compare#apt_pkg.version_compare + + :param a: version string + :type a: str + :param b: version string + :type b: str + :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b, + <0 if ``a`` is smaller than ``b`` + :rtype: int + :raises: subprocess.CalledProcessError, RuntimeError + """ + for op in ('gt', 1), ('eq', 0), ('lt', -1): + try: + subprocess.check_call(['dpkg', '--compare-versions', + a, op[0], b], + stderr=subprocess.STDOUT, + universal_newlines=True) + return op[1] + except subprocess.CalledProcessError as cp: + if cp.returncode == 1: + continue + raise + else: + raise RuntimeError('Unable to compare "{}" and "{}", according to ' + 'our logic they are neither greater, equal nor ' + 'less than each other.') diff --git a/config.yaml b/config.yaml index f85c8f8e314c1c6f558d059deb78590d0fd622c5..e34317b3e4ff7f70fc2914f3a5db80a51c3de574 100644 --- a/config.yaml +++ b/config.yaml @@ -76,10 +76,6 @@ options: description: | Apply system hardening. Supports a space-delimited list of modules to run. Supported modules currently include os, ssh, apache and mysql. - config-file: - type: string - default: "/etc/keystone/keystone.conf" - description: "Location of keystone configuration file" service-port: type: int default: 5000 @@ -116,10 +112,37 @@ options: type: string default: 'Admin' description: Admin role to be associated with admin and service users. + token-provider: + type: string + default: + description: | + Transitional configuration option to enable migration to Fernet tokens + prior to upgrade to OpenStack Rocky. + . + Supported values are 'uuid' and 'fernet'. + . + NOTE: This configuration option is honoured on OpenStack versions Ocata + through Queens. For OpenStack Rocky it is a unconfigurable default. + Silently ignored for all other versions. token-expiration: type: int default: 3600 description: Amount of time (in seconds) a token should remain valid. + fernet-max-active-keys: + type: int + default: 3 + description: | + This is the maximum number of active keys when `token-provider` is set to + "fernet". If has a minimum of 3, which includes the spare and staging + keys. When set to 3, the rotation time for the keys is the same as the + token expiration time. When set to a higher value, the rotation time is + less than the `token-expiration` time as calculated by: + . + rotation-time = token-expiration / (fernet-max-active-keys - 2) + . + Please see the charm documentation for further details about how to use + the Fernet token parameters to achieve a key strategy appropriate for the + system in question. service-tenant: type: string default: "services" @@ -600,4 +623,11 @@ options: description: | A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup - + use-policyd-override: + type: boolean + default: False + description: | + If True then use the resource file named 'policyd-override' to install + override YAML files in the service's policy.d directory. The resource + file should be a ZIP file containing at least one yaml file with a .yaml + or .yml extension. If False then remove the overrides. diff --git a/tests/charmhelpers/core/host_factory/__init__.py b/files/.gitkeep similarity index 100% rename from tests/charmhelpers/core/host_factory/__init__.py rename to files/.gitkeep diff --git a/hooks/charmhelpers b/hooks/charmhelpers deleted file mode 120000 index 702de734b0c015b34565dfbd7ba8c48ace8cb262..0000000000000000000000000000000000000000 --- a/hooks/charmhelpers +++ /dev/null @@ -1 +0,0 @@ -../charmhelpers \ No newline at end of file diff --git a/hooks/install b/hooks/install index 29ff68948033d91316d7e01d4cbc2b44f61ba8f5..eb058242b43a001fade1061e2c5d815dcc0218d6 100755 --- a/hooks/install +++ b/hooks/install @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # Wrapper to deal with newer Ubuntu versions that don't have py2 installed # by default. @@ -11,7 +11,7 @@ check_and_install() { fi } -PYTHON="python" +PYTHON="python3" for dep in ${DEPS[@]}; do check_and_install ${PYTHON} ${dep} diff --git a/hooks/keystone-middleware-relation-broken b/hooks/keystone-middleware-relation-broken new file mode 120000 index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0 --- /dev/null +++ b/hooks/keystone-middleware-relation-broken @@ -0,0 +1 @@ +keystone_hooks.py \ No newline at end of file diff --git a/hooks/keystone-middleware-relation-changed b/hooks/keystone-middleware-relation-changed new file mode 120000 index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0 --- /dev/null +++ b/hooks/keystone-middleware-relation-changed @@ -0,0 +1 @@ +keystone_hooks.py \ No newline at end of file diff --git a/hooks/keystone-middleware-relation-departed b/hooks/keystone-middleware-relation-departed new file mode 120000 index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0 --- /dev/null +++ b/hooks/keystone-middleware-relation-departed @@ -0,0 +1 @@ +keystone_hooks.py \ No newline at end of file diff --git a/hooks/keystone-middleware-relation-joined b/hooks/keystone-middleware-relation-joined new file mode 120000 index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0 --- /dev/null +++ b/hooks/keystone-middleware-relation-joined @@ -0,0 +1 @@ +keystone_hooks.py \ No newline at end of file diff --git a/hooks/keystone_context.py b/hooks/keystone_context.py index f4de3dc5ad4687718014d6ed4ce3c061f6938732..265e44aefb7c3271ffd713648135963c2bf462cf 100644 --- a/hooks/keystone_context.py +++ b/hooks/keystone_context.py @@ -34,14 +34,13 @@ from charmhelpers.core.host import ( from charmhelpers.contrib.openstack import context from charmhelpers.contrib.hahelpers.cluster import ( - DC_RESOURCE_NAME, determine_apache_port, determine_api_port, - is_elected_leader, https, ) from charmhelpers.core.hookenv import ( + charm_dir, config, log, leader_get, @@ -151,6 +150,38 @@ class SSLContext(context.ApacheSSLContext): class ApacheSSLContext(SSLContext): + is_leader, + local_unit, + related_units, + relation_ids, + relation_get, +) + +from charmhelpers.contrib.openstack.utils import ( + CompareOpenStackReleases, + os_release, +) + + +class MiddlewareContext(context.OSContextGenerator): + interfaces = ['keystone-middleware'] + + def __call__(self): + + middlewares = [] + + for rid in relation_ids('keystone-middleware'): + if related_units(rid): + for unit in related_units(rid): + middleware_name = relation_get('middleware_name', + rid=rid, + unit=unit) + if middleware_name: + middlewares.append(middleware_name) + return { + 'middlewares': ",".join(middlewares) + } + interfaces = ['https'] external_ports = [] @@ -324,6 +355,8 @@ class KeystoneContext(context.OSContextGenerator): ctxt['identity_backend'] = config('identity-backend') ctxt['assignment_backend'] = config('assignment-backend') + ctxt['token_provider'] = config('token-provider') + ctxt['fernet_max_active_keys'] = config('fernet-max-active-keys') if config('identity-backend') == 'ldap': ctxt['ldap_server'] = config('ldap-server') ctxt['ldap_user'] = config('ldap-user') @@ -563,12 +596,41 @@ class TokenFlushContext(context.OSContextGenerator): def __call__(self): ctxt = { - 'token_flush': is_elected_leader(DC_RESOURCE_NAME) + 'token_flush': (not fernet_enabled() and is_leader()) } return ctxt -class IdpFetchContext(context.OSContextGenerator): +class FernetCronContext(context.OSContextGenerator): + + def __call__(self): + token_expiration = int(config('token-expiration')) + ctxt = { + 'enabled': (fernet_enabled() and is_leader()), + 'unit_name': local_unit(), + 'charm_dir': charm_dir(), + 'minute': ('*/5' if token_expiration > 300 else '*') + } + return ctxt + + +def fernet_enabled(): + """Helper function for determinining whether Fernet tokens are enabled. + + :returns: True if the fernet keys should be configured. + :rtype: bool + """ + cmp_release = CompareOpenStackReleases(os_release('keystone')) + if cmp_release < 'ocata': + return False + elif cmp_release >= 'ocata' and cmp_release < 'rocky': + return config('token-provider') == 'fernet' + else: + return True + + +class KeystoneFIDServiceProviderContext(context.OSContextGenerator): + interfaces = ['keystone-fid-service-provider'] def __call__(self): from keystone_utils import (get_api_suffix, @@ -584,3 +646,26 @@ class IdpFetchContext(context.OSContextGenerator): get_api_suffix()) } return ctxt + + +class AuthMethods(context.OSContextGenerator): + + auth_methods = ["external", "password", "token", "oauth1", + "openid", "totp", "application_credential"] + + def __call__(self): + + _external = "external" + _protocol_name = "" + for rid in relation_ids("keystone-fid-service-provider"): + for unit in related_units(rid): + rdata = relation_get(unit=unit, rid=rid) + _protocol_name = rdata.get('protocol-name').strip('"') + if _protocol_name and _protocol_name not in self.auth_methods: + self.auth_methods.append(_protocol_name) + # We are federated so remove the external method + if _external in self.auth_methods: + self.auth_methods.remove(_external) + + ctxt = {"auth_methods": ",".join(self.auth_methods)} + return ctxt diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 505db8a7b867de2c46c8ef6e79bc57c1a0294ca1..6037ffaf17329c7ab25338db876512dd439afe08 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -19,6 +19,17 @@ import json import os import sys +_path = os.path.dirname(os.path.realpath(__file__)) +_root = os.path.abspath(os.path.join(_path, '..')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + + +_add_path(_root) + from subprocess import check_call from charmhelpers.contrib import unison @@ -42,6 +53,8 @@ from charmhelpers.core.hookenv import ( status_set, open_port, is_leader, + relation_id, + leader_set, ) from charmhelpers.core.host import ( @@ -75,8 +88,12 @@ from charmhelpers.contrib.openstack.utils import ( install_os_snaps, get_snaps_install_info_from_origin, enable_memcache, + series_upgrade_prepare, + series_upgrade_complete, ) +from keystone_context import fernet_enabled + from keystone_utils import ( add_service_to_keystone, add_credentials_to_keystone, @@ -112,6 +129,7 @@ from keystone_utils import ( ensure_pki_dir_permissions, ensure_permissions, force_ssl_sync, + is_expected_scale, filter_null, ensure_ssl_dirs, ensure_pki_cert_paths, @@ -131,6 +149,14 @@ from keystone_utils import ( ADMIN_PROJECT, create_or_show_domain, keystone_service, + restart_keystone, + key_leader_set, + key_setup, + key_write, + pause_unit_helper, + resume_unit_helper, + remove_old_packages, + stop_manager_instance, ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -142,7 +168,7 @@ from charmhelpers.contrib.hahelpers.cluster import ( ) from charmhelpers.contrib.openstack.ha.utils import ( - update_dns_ha_resource_params, + generate_ha_relation_data, expect_ha, ) @@ -154,12 +180,10 @@ from charmhelpers.contrib.peerstorage import ( ) from charmhelpers.contrib.openstack.ip import ( ADMIN, + PUBLIC, resolve_address, ) from charmhelpers.contrib.network.ip import ( - get_iface_for_address, - get_netmask_for_address, - is_ipv6, get_relation_ip, ) from charmhelpers.contrib.openstack.context import ADDRESS_TYPES @@ -168,6 +192,17 @@ from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.hardening.harden import harden +from charmhelpers.contrib.openstack.cert_utils import ( + get_certificate_request, + process_certificates, +) + +from charmhelpers.contrib.openstack.policyd import ( + maybe_do_policyd_overrides, + maybe_do_policyd_overrides_on_config_changed, +) + + hooks = Hooks() CONFIGS = register_configs() @@ -215,6 +250,12 @@ def install(): @synchronize_ca_if_changed(fatal=True) @harden() def config_changed(): + # if we are paused, delay doing any config changed hooks. + # It is forced on the resume. + if is_unit_paused_set(): + log("Unit is pause or upgrading. Skipping config_changed", "WARN") + return + if config('prefer-ipv6'): status_set('maintenance', 'configuring ipv6') setup_ipv6() @@ -239,6 +280,10 @@ def config_changed(): for r_id in relation_ids('cluster'): cluster_joined(rid=r_id, ssl_sync_request=False) + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides_on_config_changed(os_release('keystone'), + 'keystone') + config_changed_postupgrade() @@ -271,6 +316,7 @@ def config_changed_postupgrade(): CONFIGS.write(WSGI_KEYSTONE_API_CONF) if not is_unit_paused_set(): restart_pid_check('apache2') + stop_manager_instance() if config('enable-oidc'): CONFIGS.write(OIDC_MAPPING_FILE) configure_oidc() @@ -289,6 +335,10 @@ def config_changed_postupgrade(): # packages may have changed so ensure they are installed. apt_install(filter_installed_packages(determine_packages())) + if is_leader() and fernet_enabled(): + key_setup() + key_leader_set() + configure_https() open_port(config('service-port')) @@ -298,6 +348,14 @@ def config_changed_postupgrade(): if snap_install_requested() and not is_unit_paused_set(): service_restart('snap.keystone.*') + stop_manager_instance() + + if (is_db_initialised() and is_elected_leader(CLUSTER_RES) and not + is_unit_paused_set()): + ensure_initial_admin(config) + if CompareOpenStackReleases( + os_release('keystone')) >= 'liberty': + CONFIGS.write(POLICY_JSON) initialise_pki() @@ -310,6 +368,8 @@ def config_changed_postupgrade(): for r_id in relation_ids('ha'): ha_joined(relation_id=r_id) + notify_middleware_with_release_version() + @synchronize_ca_if_changed(fatal=True) def initialise_pki(): @@ -402,9 +462,10 @@ def update_all_identity_relation_units(check_db_ready=True): log("Database not yet initialised - deferring identity-relation " "updates", level=INFO) return - - if is_elected_leader(CLUSTER_RES): - ensure_initial_admin(config) + if not is_expected_scale(): + log("Keystone charm and it's dependencies not yet at expected scale " + "- deferring identity-relation updates", level=INFO) + return log('Firing identity_changed hook for all related services.') for rid in relation_ids('identity-service'): @@ -431,6 +492,17 @@ def update_all_domain_backends(): domain_backend_changed(relation_id=rid, unit=unit) +def update_all_fid_backends(): + if CompareOpenStackReleases(os_release('keystone')) < 'ocata': + log('Ignoring keystone-fid-service-provider relation as it is' + ' not supported on releases older than Ocata') + return + """If there are any config changes, e.g. for domain or service port + make sure to update those for all relation-level buckets""" + for rid in relation_ids('keystone-fid-service-provider'): + update_keystone_fid_service_provider(relation_id=rid) + + def leader_init_db_if_ready(use_current_context=False): """ Initialise the keystone db if it is ready and mark it as initialised. @@ -454,6 +526,10 @@ def leader_init_db_if_ready(use_current_context=False): return migrate_database() + ensure_initial_admin(config) + if CompareOpenStackReleases( + os_release('keystone')) >= 'liberty': + CONFIGS.write(POLICY_JSON) # Ensure any existing service entries are updated in the # new database backend. Also avoid duplicate db ready check. update_all_identity_relation_units(check_db_ready=False) @@ -470,7 +546,7 @@ def db_changed(): CONFIGS.write(KEYSTONE_CONF) leader_init_db_if_ready(use_current_context=True) if CompareOpenStackReleases( - os_release('keystone-common')) >= 'liberty': + os_release('keystone')) >= 'liberty': CONFIGS.write(POLICY_JSON) @@ -488,6 +564,13 @@ def pgsql_db_changed(): CONFIGS.write(POLICY_JSON) +@hooks.hook('shared-db-relation-departed', + 'shared-db-relation-broken') +def db_departed_or_broken(): + if is_leader(): + leader_set({'db-initialised': None}) + + @hooks.hook('identity-service-relation-changed') @restart_on_change(restart_map(), restart_functions=restart_function_map()) @synchronize_ca_if_changed() @@ -522,9 +605,9 @@ def identity_changed(relation_id=None, remote_unit=None): # We base the decision to notify on whether these parameters have # changed (if csum is unchanged from previous notify, relation will # not fire). - csum.update(settings.get('public_url', None)) - csum.update(settings.get('admin_url', None)) - csum.update(settings.get('internal_url', None)) + csum.update(settings.get('public_url', None).encode('utf-8')) + csum.update(settings.get('admin_url', None).encode('utf-8')) + csum.update(settings.get('internal_url', None).encode('utf-8')) notifications['%s-endpoint-changed' % (service)] = csum.hexdigest() else: # Each unit needs to set the db information otherwise if the unit @@ -657,7 +740,7 @@ def cluster_changed(): if not is_leader(): echo_whitelist.append('ssl-cert-master') - log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG) + log("Peer echo whitelist: {}".format(echo_whitelist), level=DEBUG) peer_echo(includes=echo_whitelist, force=True) check_peer_actions() @@ -707,80 +790,33 @@ def leader_elected(): @hooks.hook('leader-settings-changed') @restart_on_change(restart_map(), stopstart=True) def leader_settings_changed(): + + # if we are paused, delay doing any config changed hooks. + # It is forced on the resume. + if is_unit_paused_set(): + log("Unit is pause or upgrading. Skipping config_changed", "WARN") + return + # Since minions are notified of a regime change via the # leader-settings-changed hook, rewrite the token flush cron job to make # sure only the leader is running the cron job. CONFIGS.write(TOKEN_FLUSH_CRON_FILE) + # Make sure we keep domain and/or project ids used in templates up to date + if CompareOpenStackReleases( + os_release('keystone')) >= 'liberty': + CONFIGS.write(POLICY_JSON) + + if fernet_enabled(): + key_write() + update_all_identity_relation_units() @hooks.hook('ha-relation-joined') def ha_joined(relation_id=None): - cluster_config = get_hacluster_config() - resources = { - 'res_ks_haproxy': 'lsb:haproxy', - } - resource_params = { - 'res_ks_haproxy': 'op monitor interval="5s"' - } - - if config('dns-ha'): - update_dns_ha_resource_params(relation_id=relation_id, - resources=resources, - resource_params=resource_params) - else: - vip_group = [] - for vip in cluster_config['vip'].split(): - if is_ipv6(vip): - res_ks_vip = 'ocf:heartbeat:IPv6addr' - vip_params = 'ipv6addr' - else: - res_ks_vip = 'ocf:heartbeat:IPaddr2' - vip_params = 'ip' - - iface = (get_iface_for_address(vip) or - config('vip_iface')) - netmask = (get_netmask_for_address(vip) or - config('vip_cidr')) - - if iface is not None: - vip_key = 'res_ks_{}_vip'.format(iface) - if vip_key in vip_group: - if vip not in resource_params[vip_key]: - vip_key = '{}_{}'.format(vip_key, vip_params) - else: - log("Resource '%s' (vip='%s') already exists in " - "vip group - skipping" % (vip_key, vip), WARNING) - continue - - vip_group.append(vip_key) - resources[vip_key] = res_ks_vip - resource_params[vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}"' - ' nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) - - if len(vip_group) >= 1: - relation_set(relation_id=relation_id, - groups={CLUSTER_RES: ' '.join(vip_group)}) - - init_services = { - 'res_ks_haproxy': 'haproxy' - } - clones = { - 'cl_ks_haproxy': 'res_ks_haproxy' - } - relation_set(relation_id=relation_id, - init_services=init_services, - corosync_bindiface=cluster_config['ha-bindiface'], - corosync_mcastport=cluster_config['ha-mcastport'], - resources=resources, - resource_params=resource_params, - clones=clones) + settings = generate_ha_relation_data('ks') + relation_set(relation_id=relation_id, **settings) @hooks.hook('ha-relation-changed') @@ -797,6 +833,12 @@ def ha_changed(): update_all_identity_relation_units_force_sync() else: update_all_identity_relation_units() + if (is_db_initialised() and is_elected_leader(CLUSTER_RES) and not + is_unit_paused_set()): + ensure_initial_admin(config) + update_all_identity_relation_units() + update_all_domain_backends() + update_all_fid_backends() @hooks.hook('identity-admin-relation-changed') @@ -1048,10 +1090,18 @@ def upgrade_charm(): ensure_local_user=True) ensure_ssl_dirs() + packages_to_install = filter_installed_packages(determine_packages()) + if packages_to_install: + log('Installing apt packages') + status_set('maintenance', 'Installing apt packages') + apt_install(packages_to_install) + packages_removed = remove_old_packages() if run_in_apache(): disable_unused_apache_sites() + log('Regenerating configuration files') + status_set('maintenance', 'Regenerating configuration files') CONFIGS.write_all() # See LP bug 1519035 @@ -1059,11 +1109,21 @@ def upgrade_charm(): update_nrpe_config() + if packages_removed: + status_set('maintenance', 'Restarting services') + log("Package purge detected, restarting services", "INFO") + for s in services(): + service_restart(s) + stop_manager_instance() + if is_elected_leader(CLUSTER_RES): log('Cluster leader - ensuring endpoint configuration is up to ' 'date', level=DEBUG) update_all_identity_relation_units() + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + @hooks.hook('update-status') @harden() @@ -1075,6 +1135,8 @@ def update_status(): 'nrpe-external-master-relation-changed') def update_nrpe_config(): # python-dbus is used by check_upstart_job + log('Updating NRPE configuration') + status_set('maintenance', 'Updating NRPE configuration') apt_install('python-dbus') hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() @@ -1090,6 +1152,157 @@ def update_nrpe_config(): nrpe_setup.write() +@hooks.hook('keystone-fid-service-provider-relation-joined', + 'keystone-fid-service-provider-relation-changed') +def keystone_fid_service_provider_changed(): + if get_api_version() < 3: + log('Identity federation is only supported with keystone v3') + return + if CompareOpenStackReleases(os_release('keystone')) < 'ocata': + log('Ignoring keystone-fid-service-provider relation as it is' + ' not supported on releases older than Ocata') + return + # for the join case a keystone public-facing hostname and service + # port need to be set + update_keystone_fid_service_provider(relation_id=relation_id()) + + # handle relation data updates (if any), e.g. remote_id_attribute + # and a restart will be handled via a nonce, not restart_on_change + CONFIGS.write(KEYSTONE_CONF) + + # The relation is container-scoped so this keystone unit's unitdata + # will only contain a nonce of a single fid subordinate for a given + # fid backend (relation id) + restart_nonce = relation_get('restart-nonce') + if restart_nonce: + nonce = json.loads(restart_nonce) + # multiplex by relation id for multiple federated identity + # provider charms + fid_nonce_key = 'fid-restart-nonce-{}'.format(relation_id()) + db = unitdata.kv() + if restart_nonce != db.get(fid_nonce_key): + restart_keystone() + db.set(fid_nonce_key, nonce) + db.flush() + + +@hooks.hook('keystone-fid-service-provider-relation-broken') +def keystone_fid_service_provider_broken(): + if CompareOpenStackReleases(os_release('keystone')) < 'ocata': + log('Ignoring keystone-fid-service-provider relation as it is' + ' not supported on releases older than Ocata') + return + + restart_keystone() + + +@hooks.hook('websso-trusted-dashboard-relation-joined', + 'websso-trusted-dashboard-relation-changed', + 'websso-trusted-dashboard-relation-broken') +@restart_on_change(restart_map(), restart_functions=restart_function_map()) +def websso_trusted_dashboard_changed(): + if get_api_version() < 3: + log('WebSSO is only supported with keystone v3') + return + if CompareOpenStackReleases(os_release('keystone')) < 'ocata': + log('Ignoring WebSSO relation as it is not supported on' + ' releases older than Ocata') + return + CONFIGS.write(KEYSTONE_CONF) + + +def update_keystone_fid_service_provider(relation_id=None): + if relation_ids('certificates'): + tls_enabled = True + else: + tls_enabled = (config('ssl_cert') is not None and + config('ssl_key') is not None) + # NOTE: thedac Use resolve_address which checks host name, VIP and + # network bindings. Use PUBLIC for now. Possible TODO make this + # configurable? + hostname = resolve_address(endpoint_type=PUBLIC, override=True) + # reactive endpoints implementation on the other side, hence + # json-encoded values + fid_settings = { + 'hostname': json.dumps(hostname), + 'port': json.dumps(config('service-port')), + 'tls-enabled': json.dumps(tls_enabled), + } + + relation_set(relation_id=relation_id, + relation_settings=fid_settings) + + +@hooks.hook('certificates-relation-joined') +def certs_joined(relation_id=None): + relation_set( + relation_id=relation_id, + relation_settings=get_certificate_request()) + + +@hooks.hook('certificates-relation-changed') +@restart_on_change(restart_map(), stopstart=True) +def certs_changed(relation_id=None, unit=None): + # update_all_identity_relation_units calls the keystone API + # so configs need to be written and services restarted + # before + @restart_on_change(restart_map(), stopstart=True) + def write_certs_and_config(): + if process_certificates('keystone', relation_id, unit): + configure_https() + return True + return False + if not write_certs_and_config(): + log('no certificates for us on the relation yet, deferring.', + level=INFO) + return + # If enabling https the identity endpoints need updating. + if (is_db_initialised() and is_elected_leader(CLUSTER_RES) and not + is_unit_paused_set()): + ensure_initial_admin(config) + update_all_identity_relation_units() + update_all_domain_backends() + update_all_fid_backends() + + +def notify_middleware_with_release_version(): + for rid in relation_ids('keystone-middleware'): + relation_set(relation_id=rid, release=os_release('keystone')) + + +@hooks.hook('keystone-middleware-relation-joined') +def keystone_middleware_joined(): + notify_middleware_with_release_version() + + +@hooks.hook('keystone-middleware-relation-changed', + 'keystone-middleware-relation-broken', + 'keystone-middleware-relation-departed') +@restart_on_change(restart_map()) +def keystone_middleware_changed(): + CONFIGS.write(KEYSTONE_CONF) + + +@hooks.hook('pre-series-upgrade') +def pre_series_upgrade(): + log("Running prepare series upgrade hook", "INFO") + series_upgrade_prepare( + pause_unit_helper, CONFIGS) + + +@hooks.hook('post-series-upgrade') +def post_series_upgrade(): + log("Running complete series upgrade hook", "INFO") + # if we just upgraded from non systemd then ensure that the new packages of + # keystone definitely don't run the keystone service if we are a wsgi + # configured system. + if run_in_apache(): + disable_unused_apache_sites() + service_pause('keystone') + series_upgrade_complete( + resume_unit_helper, CONFIGS) + + def main(): try: hooks.execute(sys.argv) diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index c3e817b8f3e951939918ece611b1f2b73d744f1d..7b7df703ff0ada4b4eaa3d724dc77c4dc731defd 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2016 Canonical Ltd # @@ -25,10 +25,10 @@ import shutil import subprocess import tarfile import threading +import tempfile import time -import urlparse +import urllib.parse import uuid -import sys from itertools import chain from base64 import b64encode @@ -49,6 +49,10 @@ from charmhelpers.contrib.network.ip import ( get_ipv6_addr ) +from charmhelpers.contrib.openstack.ha.utils import ( + expect_ha, +) + from charmhelpers.contrib.openstack.ip import ( resolve_address, PUBLIC, @@ -98,11 +102,17 @@ from charmhelpers.core.decorators import ( from charmhelpers.core.hookenv import ( charm_dir, + atexit, + cached, config, + expected_peer_units, + expected_related_units, + is_leader, leader_get, leader_set, log, local_unit, + metadata, relation_get, relation_set, relation_id, @@ -113,13 +123,19 @@ from charmhelpers.core.hookenv import ( WARNING, ERROR, is_leader, + ERROR, + WARNING, + status_set, ) from charmhelpers.fetch import ( apt_install, apt_update, apt_upgrade, + apt_purge, + apt_autoremove, add_source, + filter_missing_packages, ) from charmhelpers.core.host import ( @@ -127,6 +143,8 @@ from charmhelpers.core.host import ( add_group, add_user_to_group, mkdir, + mkdir, + service_restart, service_stop, service_start, service_restart, @@ -134,6 +152,7 @@ from charmhelpers.core.host import ( lsb_release, write_file, CompareHostReleases, + write_file, ) from charmhelpers.contrib.peerstorage import ( @@ -148,6 +167,7 @@ from charmhelpers.core.templating import render import keystone_context import keystone_ssl as ssl +import uds_comms as uds TEMPLATES = 'templates/' @@ -162,15 +182,24 @@ BASE_PACKAGES = [ 'python-psycopg2', 'python-requests', 'python-six', + 'python3-six', 'pwgen', 'unison', 'uuid', ] +PY3_PACKAGES = [ + 'python3-keystone', + 'python3-keystoneclient', + 'python3-memcache', + 'python3-six', + 'libapache2-mod-wsgi-py3', +] + BASE_PACKAGES_SNAP = [ 'haproxy', 'openssl', - 'python-six', + 'python3-six', 'pwgen', 'unison', 'uuid', @@ -286,6 +315,10 @@ ADMIN_PROJECT = 'admin' DEFAULT_DOMAIN = 'default' SERVICE_DOMAIN = 'service_domain' TOKEN_FLUSH_CRON_FILE = '/etc/cron.d/keystone-token-flush' +KEY_SETUP_FILE = '/etc/keystone/key-setup' +CREDENTIAL_KEY_REPOSITORY = '/etc/keystone/credential-keys/' +FERNET_KEY_REPOSITORY = '/etc/keystone/fernet-keys/' +FERNET_KEY_ROTATE_SYNC_CRON_FILE = '/etc/cron.d/keystone-fernet-rotate-sync' WSGI_KEYSTONE_API_CONF = '/etc/apache2/sites-enabled/wsgi-openstack-api.conf' UNUSED_APACHE_SITE_FILES = ['/etc/apache2/sites-enabled/keystone.conf', '/etc/apache2/sites-enabled/wsgi-keystone.conf'] @@ -300,7 +333,15 @@ BASE_RESOURCE_MAP = OrderedDict([ keystone_context.HAProxyContext(), context.BindHostContext(), context.WorkerConfigContext(), - context.MemcacheContext(package='keystone')], + context.MemcacheContext(package='keystone'), + keystone_context.KeystoneFIDServiceProviderContext(), + keystone_context.WebSSOTrustedDashboardContext(), + keystone_context.context.SubordinateConfigContext( + interface=['keystone-middleware'], + service='keystone', + config_file=KEYSTONE_CONF), + keystone_context.MiddlewareContext(), + keystone_context.AuthMethods()] }), (KEYSTONE_LOGGER_CONF, { 'contexts': [keystone_context.KeystoneLoggingContext()], @@ -350,6 +391,11 @@ BASE_RESOURCE_MAP = OrderedDict([ context.SyslogContext()], 'services': [], }), + (FERNET_KEY_ROTATE_SYNC_CRON_FILE, { + 'contexts': [keystone_context.FernetCronContext(), + context.SyslogContext()], + 'services': [], + }), ]) valid_services = { @@ -381,6 +427,10 @@ valid_services = { "type": "OpServer", "desc": "Contrail Analytics Service" }, + "dmapi": { + "type": "datamover", + "desc": "Trilio DataMover API Service" + }, "ec2": { "type": "ec2", "desc": "EC2 Compatibility Layer" @@ -493,6 +543,14 @@ valid_services = { "type": "placement", "desc": "Nova Placement Service" }, + "octavia": { + "type": "load-balancer", + "desc": "Octavia Load Balancer as a Service for OpenStack", + }, + "masakari": { + "type": "instance-ha", + "desc": "Masakari instance HA for Openstack" + }, } # The interface is said to be satisfied if anyone of the interfaces in the @@ -512,7 +570,7 @@ def filter_null(settings, null='__null__'): so that the value is actually unset. """ filtered = {} - for k, v in settings.iteritems(): + for k, v in settings.items(): if v == null: filtered[k] = None else: @@ -620,7 +678,8 @@ def restart_pid_check(service_name, ptable_string=None): @retry_on_exception(5, base_delay=3, exc_type=AssertionError) def check_pids_gone(svc_string): log("Checking no pids for {} exist".format(svc_string), level=INFO) - assert(subprocess.call(["pgrep", svc_string]) == 1) + assert(subprocess.call(["pgrep", svc_string, "--nslist", "pid", + "--ns", str(os.getpid())]) == 1) if not ptable_string: ptable_string = service_name @@ -632,14 +691,38 @@ def restart_pid_check(service_name, ptable_string=None): def restart_function_map(): """Return a dict of services with any custom functions that should be used to restart that service - @returns dict of {'svc1': restart_func, 'svc2', other_func, ...} + + :returns: dict of {'svc1': restart_func, 'svc2', other_func, ...} + :rtype: Dict[str, Callable] """ rfunc_map = {} - if run_in_apache(): - rfunc_map['apache2'] = restart_pid_check + rfunc_map[keystone_service()] = restart_keystone return rfunc_map +def restart_keystone(*args): + """Restart the keystone process. + + This will either keystone or apache2 depending on OpenStack version. + Also stop the ManagerServer (and thus manager.py script) which will + reconnect to keystone on next usage of the ManagerServer. + + Note, as restart_keystone is used in the restart_functions map, when it is + called it is passed the service name. However, this function determines + the actual service name to call, so that is discarded, hence the *args in + the function signature. + """ + if not is_unit_paused_set(): + if snap_install_requested(): + service_restart('snap.keystone.*') + else: + if run_in_apache(): + restart_pid_check(keystone_service()) + else: + service_restart(keystone_service()) + stop_manager_instance() + + def run_in_apache(release=None): """Return true if keystone API is run under apache2 with mod_wsgi in this release. @@ -658,6 +741,8 @@ def disable_unused_apache_sites(): if os.path.exists(apache_site_file): try: # Try it cleanly + log('Disabling unused apache configs') + status_set('maintenance', 'Disabling unused apache configs') subprocess.check_call(['a2dissite', apache_site]) except subprocess.CalledProcessError: # Remove the file @@ -668,15 +753,18 @@ def register_configs(): release = os_release('keystone') configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) - for cfg, rscs in resource_map().iteritems(): + for cfg, rscs in resource_map().items(): configs.register(cfg, rscs['contexts']) return configs def restart_map(): - return OrderedDict([(cfg, v['services']) - for cfg, v in resource_map().iteritems() - if v['services']]) + restart_map = OrderedDict([(cfg, v['services']) + for cfg, v in resource_map().items() + if v['services']]) + if os.path.isdir(APACHE_SSL_DIR): + restart_map['{}/*'.format(APACHE_SSL_DIR)] = ['apache2'] + return restart_map def services(): @@ -687,7 +775,7 @@ def services(): def determine_ports(): """Assemble a list of API ports for services we are managing""" ports = [config('admin-port'), config('service-port')] - return list(set(ports)) + return sorted(list(set(ports))) def api_port(service): @@ -698,6 +786,8 @@ def api_port(service): def determine_packages(): + release = CompareOpenStackReleases(os_release('keystone')) + # currently all packages match service names if snap_install_requested(): pkgs = deepcopy(BASE_PACKAGES_SNAP) @@ -709,7 +799,10 @@ def determine_packages(): if git_install_requested(): packages |= set(BASE_GIT_PACKAGES) packages -= set(GIT_PACKAGE_BLACKLIST) - if run_in_apache(): + if release >= 'rocky': + packages = [p for p in packages if not p.startswith('python-')] + packages.extend(PY3_PACKAGES) + elif run_in_apache(): packages.add('libapache2-mod-wsgi') if config('enable-oidc'): packages.add('libapache2-mod-auth-openidc') @@ -718,6 +811,35 @@ def determine_packages(): return sorted(packages) +def determine_purge_packages(): + ''' + Determine list of packages that where previously installed which are no + longer needed. + + :returns: list of package names + ''' + release = CompareOpenStackReleases(os_release('keystone')) + if release >= 'rocky': + pkgs = [p for p in BASE_PACKAGES if p.startswith('python-')] + pkgs.extend(['python-keystone', 'python-memcache']) + return pkgs + return [] + + +def remove_old_packages(): + '''Purge any packages that need to be removed. + + :returns: bool Whether packages were removed. + ''' + installed_packages = filter_missing_packages(determine_purge_packages()) + if installed_packages: + log('Removing apt packages') + status_set('maintenance', 'Removing apt packages') + apt_purge(installed_packages, fatal=True) + apt_autoremove(purge=True, fatal=True) + return bool(installed_packages) + + def save_script_rc(): env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone', 'OPENSTACK_PORT_ADMIN': determine_api_port( @@ -731,7 +853,7 @@ def save_script_rc(): def do_openstack_upgrade_reexec(configs): do_openstack_upgrade(configs) log("Re-execing hook to pickup upgraded packages", level=INFO) - os.execl('./hooks/config-changed-postupgrade', '') + os.execl('/usr/bin/env', 'python3', './hooks/config-changed-postupgrade') def do_openstack_upgrade(configs): @@ -750,6 +872,8 @@ def do_openstack_upgrade(configs): reset_os_release() apt_install(packages=determine_packages(), options=dpkg_opts, fatal=True) + + remove_old_packages() else: # TODO: Add support for upgrade from deb->snap # NOTE(thedac): Setting devmode until LP#1719636 is fixed @@ -778,11 +902,9 @@ def do_openstack_upgrade(configs): def is_db_initialised(): - if relation_ids('cluster'): - inited = peer_retrieve('db-initialised') - if inited and bool_from_string(inited): - log("Database is initialised", level=DEBUG) - return True + if leader_get('db-initialised'): + log("Database is initialised", level=DEBUG) + return True log("Database is NOT initialised", level=DEBUG) return False @@ -798,6 +920,7 @@ def keystone_service(): def migrate_database(): """Runs keystone-manage to initialize a new database or migrate existing""" log('Migrating the keystone database.', level=INFO) + status_set('maintenance', 'Migrating the keystone database') if snap_install_requested(): service_stop('snap.keystone.*') else: @@ -819,7 +942,8 @@ def migrate_database(): else: service_start(keystone_service()) time.sleep(10) - peer_store('db-initialised', 'True') + leader_set({'db-initialised': True}) + stop_manager_instance() # OLD @@ -898,29 +1022,31 @@ def delete_service_entry(service_name, service_type): manager = get_manager() service_id = manager.resolve_service_id(service_name, service_type) if service_id: - manager.api.services.delete(service_id) - log("Deleted service entry '%s'" % service_name, level=DEBUG) + manager.delete_service_by_id(service_id) + log("Deleted service entry '{}'".format(service_name), level=DEBUG) def create_service_entry(service_name, service_type, service_desc, owner=None): """ Add a new service entry to keystone if one does not already exist """ manager = get_manager() - for service in [s._info for s in manager.api.services.list()]: + for service in manager.list_services(): if service['name'] == service_name: - log("Service entry for '%s' already exists." % service_name, + log("Service entry for '{}' already exists.".format(service_name), level=DEBUG) return - manager.api.services.create(service_name, - service_type, - description=service_desc) - log("Created new service entry '%s'" % service_name, level=DEBUG) + manager.create_service(service_name, service_type, + description=service_desc) + + log("Created new service entry '{}'".format(service_name), level=DEBUG) def create_endpoint_template(region, service, publicurl, adminurl, internalurl): manager = get_manager() - if manager.api_version == 2: + # this needs to be a round-trip to the manager.py script to discover what + # the "current" api_version might be, as it can't just be asserted. + if manager.resolved_api_version() == 2: create_endpoint_template_v2(manager, region, service, publicurl, adminurl, internalurl) else: @@ -933,7 +1059,7 @@ def create_endpoint_template_v2(manager, region, service, publicurl, adminurl, """ Create a new endpoint template for service if one does not already exist matching name *and* region """ service_id = manager.resolve_service_id(service) - for ep in [e._info for e in manager.api.endpoints.list()]: + for ep in manager.list_endpoints(): if ep['service_id'] == service_id and ep['region'] == region: log("Endpoint template already exists for '%s' in '%s'" % (service, region)) @@ -948,15 +1074,15 @@ def create_endpoint_template_v2(manager, region, service, publicurl, adminurl, else: # delete endpoint and recreate if endpoint urls need updating. log("Updating endpoint template with new endpoint urls.") - manager.api.endpoints.delete(ep['id']) + manager.delete_endpoint_by_id(ep['id']) manager.create_endpoints(region=region, service_id=service_id, publicurl=publicurl, adminurl=adminurl, internalurl=internalurl) - log("Created new endpoint template for '%s' in '%s'" % (region, service), - level=DEBUG) + log("Created new endpoint template for '{}' in '{}'" + .format(region, service), level=DEBUG) def create_endpoint_template_v3(manager, region, service, publicurl, adminurl, @@ -981,11 +1107,11 @@ def create_endpoint_template_v3(manager, region, service, publicurl, adminurl, region ) if ep_deleted or not ep_exists: - manager.api.endpoints.create( - service_id, - endpoints[ep_type], + manager.create_endpoint_by_type( + region=region, + service_id=service_id, interface=ep_type, - region=region + endpoint=endpoints[ep_type], ) @@ -997,11 +1123,11 @@ def create_tenant(name, domain): manager.create_tenant(tenant_name=name, domain=domain, description='Created by Juju') - log("Created new tenant '%s' in domain '%s'" % (name, domain), + log("Created new tenant '{}' in domain '{}'".format(name, domain), level=DEBUG) return - log("Tenant '%s' already exists." % name, level=DEBUG) + log("Tenant '{}' already exists.".format(name), level=DEBUG) def create_or_show_domain(name): @@ -1009,88 +1135,238 @@ def create_or_show_domain(name): manager = get_manager() domain_id = manager.resolve_domain_id(name) if domain_id: - log("Domain '%s' already exists." % name, level=DEBUG) + log("Domain '{}' already exists.".format(name), level=DEBUG) else: manager.create_domain(domain_name=name, description='Created by Juju') - log("Created new domain: %s" % name, level=DEBUG) + log("Created new domain: {}".format(name), level=DEBUG) domain_id = manager.resolve_domain_id(name) return domain_id def user_exists(name, domain=None): manager = get_manager() - domain_id = None - if domain: - domain_id = manager.resolve_domain_id(domain) - if not domain_id: - error_out('Could not resolve domain_id for {} when checking if ' - ' user {} exists'.format(domain, name)) - if manager.resolve_user_id(name, user_domain=domain): - if manager.api_version == 2: - users = manager.api.users.list() - else: - users = manager.api.users.list(domain=domain_id) - for user in users: - if user.name.lower() == name.lower(): - # In v3 Domains are seperate user namespaces so need to check - # that the domain matched if provided - if domain: - if domain_id == user.domain_id: - return True - else: - return True - - return False + return manager.user_exists(name, domain=domain) def create_user(name, password, tenant=None, domain=None): """Creates a user if it doesn't already exist, as a member of tenant""" manager = get_manager() if user_exists(name, domain=domain): - log("A user named '%s' already exists in domain '%s'" % (name, domain), - level=DEBUG) + log("A user named '{}' already exists in domain '{}'" + .format(name, domain), level=DEBUG) return tenant_id = None if tenant: tenant_id = manager.resolve_tenant_id(tenant, domain=domain) if not tenant_id: - error_out("Could not resolve tenant_id for tenant '%s' in domain " - "'%s'" % (tenant, domain)) + error_out("Could not resolve tenant_id for tenant '{}' in domain " + "'{}'".format(tenant, domain)) domain_id = None if domain: domain_id = manager.resolve_domain_id(domain) if not domain_id: - error_out('Could not resolve domain_id for domain %s when creating' - ' user %s' % (domain, name)) + error_out('Could not resolve domain_id for domain {} when creating' + ' user {}'.format(domain, name)) manager.create_user(name=name, password=password, email='juju@localhost', tenant_id=tenant_id, domain_id=domain_id) - log("Created new user '%s' tenant: '%s' domain: '%s'" % (name, tenant_id, - domain_id), level=DEBUG) + log("Created new user '{}' tenant: '{}' domain: '{}'" + .format(name, tenant_id, domain_id), level=DEBUG) def get_manager(api_version=None): - """Return a keystonemanager for the correct API version""" - set_python_path() - from manager import get_keystone_manager - return get_keystone_manager(get_local_endpoint(), get_admin_token(), - api_version) + return KeystoneManagerProxy(api_version=api_version) + + +class KeystoneManagerProxy(object): + + def __init__(self, api_version=None, path=None): + self._path = path or [] + self.api_version = api_version + + def __getattribute__(self, attr): + if attr in ['__class__', '_path', 'api_version']: + return super().__getattribute__(attr) + return self.__class__(api_version=self.api_version, + path=self._path + [attr]) + + def __call__(self, *args, **kwargs): + # Following line retained commented-out for future debugging + # print("Called: {} ({}, {})".format(self._path, args, kwargs)) + return _proxy_manager_call(self._path, self.api_version, args, kwargs) + + +JSON_ENCODE_OPTIONS = dict( + sort_keys=True, + allow_nan=False, + indent=None, + separators=(',', ':'), +) + + +class RetryProxyManagerCall(Exception): + pass + + +@retry_on_exception(5, base_delay=3, exc_type=RetryProxyManagerCall) +def _proxy_manager_call(path, api_version, args, kwargs): + package = dict(path=path, + api_version=api_version, + api_local_endpoint=get_local_endpoint(), + admin_token=get_admin_token(), + args=args, + kwargs=kwargs) + serialized = json.dumps(package, **JSON_ENCODE_OPTIONS) + server = _get_server_instance() + try: + server.send(serialized) + # wait for the reply + result_str = server.receive() + result = json.loads(result_str) + if 'error' in result: + s = ("The call within manager.py failed with the error: '{}'. " + "The call was: path={}, args={}, kwargs={}, api_version={}" + .format(result['error'], path, args, kwargs, api_version)) + log(s, level=ERROR) + if result.get('retry'): + stop_manager_instance() + raise RetryProxyManagerCall() + raise RuntimeError(s) + return json.loads(result_str)['result'] + except RetryProxyManagerCall: + # cause a retry + raise + except RuntimeError as e: + raise e + except Exception as e: + s = ("Decoding the result from the call to manager.py resulted in " + "error '{}' (command: path={}, args={}, kwargs={}" + .format(str(e), path, args, kwargs)) + log(s, level=ERROR) + raise RuntimeError(s) + + +# singleton to ensure that there's only one manager instance. +_the_manager_instance = None + + +def _get_server_instance(): + """Get a SockServer instance and run up the manager to connect to it. + Ensure that the manager.py is running and is ready to receive messages (i.e + do the handshake. Check that it is still running, and if not, start it + again. In that instance, restart the SockServer + """ + global _the_manager_instance + if _the_manager_instance is None: + _the_manager_instance = ManagerServer() + return _the_manager_instance.server + + +def stop_manager_instance(): + """If a ManagerServer instance exists, then try to kill it, clean-up the + environment and reset the global singleton for it. + """ + global _the_manager_instance + if _the_manager_instance is not None: + _the_manager_instance.clean_up() + _the_manager_instance = None + + +# If a ManagerServer is still running at the end of the charm hook execution +# then kill it off: +atexit(stop_manager_instance) + + +class ManagerServer(): + """This is a singleton server that launches and kills the manager.py script + that is used to allow 'calling' into Keystone when it is in a completely + different process. + + The server() method also ensures that the manager.py script is still + running, and if not, relaunches it. This is to try to make the using the + manager.py methods as transparent, and speedy, as possible. + """ + + def __init__(self): + self.pvar = None + self._server = None + self.socket_file = os.path.join(tempfile.gettempdir(), "keystone-uds") + + @property + def server(self): + self._ensure_running() + return self._server + + def _ensure_running(self): + if self.pvar is None or self.pvar.poll() is not None: + if self._server is not None: + self._server.close() + self._server = uds.UDSServer(self.socket_file) + self._launch_manager() + self._server.wait_for_connection() + + def _launch_manager(self): + script = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'manager.py')) + release = CompareOpenStackReleases(os_release('keystone')) + # need to set the environment variable PYTHONPATH to include the + # payload's directory for the manager.py to find the various keystone + # clients + env = os.environ + _python_path = determine_python_path() + if _python_path: + if _python_path not in os.environ.get('PYTHONPATH', ''): + env['PYTHONPATH'] = ':'.join( + os.environ.get('PYTHONPATH', '').split(':') + + [_python_path]) + # also ensure that the python executable is available if snap + # installed. + if snap_install_requested(): + _bin_path = os.path.join(SNAP_BASE_DIR, 'usr/bin') + if _bin_path not in os.environ.get('PATH', ''): + env['PATH'] = ':'.join( + os.environ.get('PATH', '').split(':') + + [_bin_path]) + # ensure python interpreter matches python version of OpenStack + if release >= 'rocky': + python = 'python3' + else: + python = 'python2' + # launch the process and return immediately + self.pvar = subprocess.Popen([python, script, self.socket_file], + env=env, close_fds=True) + + def clean_up(self): + if self.pvar is not None and self.pvar.poll() is None: + self._server.send("QUIT") + try: + self.pvar.wait(timeout=10) + except subprocess.TimeoutExpired: + self.pvar.kill() + self.pvar = None + if self._server is not None: + self._server.close() + self._server = None + try: + os.remove(self.socket_file) + except OSError: + pass def create_role(name, user=None, tenant=None, domain=None): """Creates a role if it doesn't already exist. grants role to user""" manager = get_manager() if not manager.resolve_role_id(name): - manager.api.roles.create(name=name) - log("Created new role '%s'" % name, level=DEBUG) + manager.create_role(name=name) + log("Created new role '{}'".format(name), level=DEBUG) else: - log("A role named '%s' already exists" % name, level=DEBUG) + log("A role named '{}' already exists".format(name), level=DEBUG) if not user and not tenant: return @@ -1129,8 +1405,8 @@ def grant_role(user, role, tenant=None, domain=None, user_domain=None, if tenant: tenant_id = manager.resolve_tenant_id(tenant, domain=project_domain) if not tenant_id: - error_out("Could not resolve tenant_id for tenant '%s' in domain " - "'%s'" % (tenant, domain)) + error_out("Could not resolve tenant_id for tenant '{}' in domain " + "'{}'".format(tenant, domain)) domain_id = None if domain: @@ -1140,7 +1416,7 @@ def grant_role(user, role, tenant=None, domain=None, user_domain=None, cur_roles = manager.roles_for_user(user_id, tenant_id=tenant_id, domain_id=domain_id) - if not cur_roles or role_id not in [r.id for r in cur_roles]: + if not cur_roles or role_id not in [r['id'] for r in cur_roles]: manager.add_user_role(user=user_id, role=role_id, tenant=tenant_id, @@ -1162,37 +1438,33 @@ def grant_role(user, role, tenant=None, domain=None, user_domain=None, def store_data(backing_file, data): with open(backing_file, 'w+') as fd: - fd.writelines("%s\n" % data) + fd.writelines("{}\n".format(data)) def get_admin_passwd(user=None): passwd = config("admin-password") if passwd and passwd.lower() != "none": - # Previous charm versions did not always store on leader setting so do - # this now to avoid an initial update on install/upgrade - if (is_elected_leader(CLUSTER_RES) and - peer_retrieve('{}_passwd'.format(user)) is None): - set_admin_passwd(passwd, user=user) - return passwd + if user is None: + user = config('admin-user') + _migrate_admin_password() - passwd = peer_retrieve('{}_passwd'.format(user)) + passwd = leader_get('{}_passwd'.format(user)) - if not passwd and is_elected_leader(CLUSTER_RES): - log("Generating new passwd for user: %s" % - config("admin-user")) + if not passwd and is_leader(): + log("Generating new passwd for user: %s" % user) cmd = ['pwgen', '-c', '16', '1'] - passwd = str(subprocess.check_output(cmd)).strip() + passwd = str(subprocess.check_output(cmd).decode('UTF-8')).strip() return passwd def set_admin_passwd(passwd, user=None): if user is None: - user = 'admin' + user = config('admin-user') - peer_store('{}_passwd'.format(user), passwd) + leader_set({'{}_passwd'.format(user): passwd}) def get_api_version(): @@ -1202,31 +1474,11 @@ def get_api_version(): return api_version -def set_python_path(): - """ Set the Python path to include snap installed python libraries - - The charm itself requires access to the python client. When installed as a - snap the client libraries are in /snap/$SNAP/common/lib/python2.7. This - function sets the python path to allow clients to be imported from snap - installs. - """ - if snap_install_requested(): - sys.path.append(determine_python_path()) - - def ensure_initial_admin(config): # Allow retry on fail since leader may not be ready yet. # NOTE(hopem): ks client may not be installed at module import time so we # use this wrapped approach instead. - set_python_path() - try: - from keystoneclient.apiclient.exceptions import InternalServerError - except: - # Backwards-compatibility for earlier versions of keystoneclient (< I) - from keystoneclient.exceptions import (ClientException as - InternalServerError) - - @retry_on_exception(3, base_delay=3, exc_type=InternalServerError) + @retry_on_exception(3, base_delay=3, exc_type=RuntimeError) def _ensure_initial_admin(config): """Ensures the minimum admin stuff exists in whatever database we're using. @@ -1307,9 +1559,9 @@ def endpoint_url(ip, port, suffix=None): if is_ipv6(ip): ip = "[{}]".format(ip) if suffix: - ep = "%s://%s:%s/%s" % (proto, ip, port, suffix) + ep = "{}://{}:{}/{}".format(proto, ip, port, suffix) else: - ep = "%s://%s:%s" % (proto, ip, port) + ep = "{}://{}:{}".format(proto, ip, port) return ep @@ -1326,15 +1578,14 @@ def create_keystone_endpoint(public_ip, service_port, def update_user_password(username, password, domain): manager = get_manager() - log("Updating password for user '%s'" % username) + log("Updating password for user '{}'".format(username)) user_id = manager.resolve_user_id(username, user_domain=domain) if user_id is None: - error_out("Could not resolve user id for '%s'" % username) + error_out("Could not resolve user id for '{}'".format(username)) manager.update_password(user=user_id, password=password) - log("Successfully updated password for user '%s'" % - username) + log("Successfully updated password for user '{}'".format(username)) def load_stored_passwords(path=SERVICE_PASSWD_PATH): @@ -1350,29 +1601,28 @@ def load_stored_passwords(path=SERVICE_PASSWD_PATH): def _migrate_admin_password(): - """Migrate on-disk admin passwords to peer storage""" - if os.path.exists(STORED_PASSWD): - log('Migrating on-disk stored passwords to peer storage') + """Migrate on-disk admin passwords to leader storage""" + if is_leader() and os.path.exists(STORED_PASSWD): + log('Migrating on-disk stored passwords to leader storage') with open(STORED_PASSWD) as fd: - peer_store("admin_passwd", fd.readline().strip('\n')) + leader_set({"admin_passwd": fd.readline().strip('\n')}) os.unlink(STORED_PASSWD) def _migrate_service_passwords(): - """Migrate on-disk service passwords to peer storage""" - if os.path.exists(SERVICE_PASSWD_PATH): - log('Migrating on-disk stored passwords to peer storage') + """Migrate on-disk service passwords to leader storage""" + if is_leader() and os.path.exists(SERVICE_PASSWD_PATH): + log('Migrating on-disk stored passwords to leader storage') creds = load_stored_passwords() - for k, v in creds.iteritems(): - peer_store(key="{}_passwd".format(k), value=v) + for k, v in creds.items(): + leader_set({"{}_passwd".format(k): v}) os.unlink(SERVICE_PASSWD_PATH) def get_service_password(service_username): _migrate_service_passwords() - peer_key = "{}_passwd".format(service_username) - passwd = peer_retrieve(peer_key) + passwd = leader_get("{}_passwd".format(service_username)) if passwd is None: passwd = pwgen(length=64) @@ -1380,13 +1630,11 @@ def get_service_password(service_username): def set_service_password(passwd, user): - peer_key = "{}_passwd".format(user) - peer_store(key=peer_key, value=passwd) + leader_set({"{}_passwd".format(user): passwd}) def is_password_changed(username, passwd): - peer_key = "{}_passwd".format(username) - _passwd = peer_retrieve(peer_key) + _passwd = leader_get("{}_passwd".format(username)) return (_passwd is None or passwd != _passwd) @@ -2001,9 +2249,9 @@ def create_user_credentials(user, passwd_get_callback, passwd_set_callback, level=INFO) return - log("Creating service credentials for '%s'" % user, level=DEBUG) + log("Creating service credentials for '{}'".format(user), level=DEBUG) if user_exists(user, domain=domain): - log("User '%s' already exists" % (user), level=DEBUG) + log("User '{}' already exists".format(user), level=DEBUG) # NOTE(dosaboy): see LP #1648677 if is_password_changed(user, passwd): update_user_password(user, passwd, domain) @@ -2018,13 +2266,13 @@ def create_user_credentials(user, passwd_get_callback, passwd_set_callback, grant_role(user, role, tenant=tenant, user_domain=domain, project_domain=domain) else: - log("No role grants requested for user '%s'" % (user), level=DEBUG) + log("No role grants requested for user '{}'".format(user), level=DEBUG) if new_roles: # Allow the remote service to request creation of any additional roles. # Currently used by Swift and Ceilometer. for role in new_roles: - log("Creating requested role '%s'" % role, level=DEBUG) + log("Creating requested role '{}'".format(role), level=DEBUG) create_role(role, user=user, tenant=tenant, domain=domain) return passwd @@ -2047,22 +2295,25 @@ def create_service_credentials(user, new_roles=None): if not tenant: raise Exception("No service tenant provided in config") - domain = None - if get_api_version() > 2: - domain = DEFAULT_DOMAIN - passwd = create_user_credentials(user, get_service_password, - set_service_password, - tenant=tenant, new_roles=new_roles, - grants=[config('admin-role')], - domain=domain) - if get_api_version() > 2: + if get_api_version() < 3: + passwd = create_user_credentials(user, get_service_password, + set_service_password, + tenant=tenant, new_roles=new_roles, + grants=[config('admin-role')], + domain=None) + else: + # api version 3 or above + create_user_credentials(user, get_service_password, + set_service_password, + tenant=tenant, new_roles=new_roles, + grants=[config('admin-role')], + domain=DEFAULT_DOMAIN) # Create account in SERVICE_DOMAIN as well using same password - domain = SERVICE_DOMAIN passwd = create_user_credentials(user, get_service_password, set_service_password, tenant=tenant, new_roles=new_roles, grants=[config('admin-role')], - domain=domain) + domain=SERVICE_DOMAIN) return passwd @@ -2070,15 +2321,14 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): manager = get_manager() settings = relation_get(rid=relation_id, unit=remote_unit) # the minimum settings needed per endpoint - single = set(['service', 'region', 'public_url', 'admin_url', - 'internal_url']) + single = {'service', 'region', 'public_url', 'admin_url', 'internal_url'} https_cns = [] protocol = get_protocol() if single.issubset(settings): # other end of relation advertised only one endpoint - if 'None' in settings.itervalues(): + if 'None' in settings.values(): # Some backend services advertise no endpoint but require a # hook execution to update auth strategy. relation_data = {} @@ -2100,7 +2350,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): # Allow the remote service to request creation of any additional # roles. Currently used by Horizon for role in get_requested_roles(settings): - log("Creating requested role: %s" % role) + log("Creating requested role: {}".format(role)) create_role(role) peer_store_and_set(relation_id=relation_id, **relation_data) @@ -2118,14 +2368,16 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): service_username = settings['service'] prefix = config('service-admin-prefix') if prefix: - service_username = "%s%s" % (prefix, service_username) + service_username = "{}{}".format(prefix, service_username) # NOTE(jamespage) internal IP for backwards compat for SSL certs - internal_cn = urlparse.urlparse(settings['internal_url']).hostname + internal_cn = (urllib.parse + .urlparse(settings['internal_url']).hostname) https_cns.append(internal_cn) - public_cn = urlparse.urlparse(settings['public_url']).hostname + public_cn = urllib.parse.urlparse(settings['public_url']).hostname https_cns.append(public_cn) - https_cns.append(urlparse.urlparse(settings['admin_url']).hostname) + https_cns.append( + urllib.parse.urlparse(settings['admin_url']).hostname) else: # assemble multiple endpoints from relation data. service name # should be prepended to setting name, ie: @@ -2143,8 +2395,8 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): # 'public_url': $foo # } # } - endpoints = {} - for k, v in settings.iteritems(): + endpoints = OrderedDict() # for Python3 we need a consistent order + for k, v in settings.items(): ep = k.split('_')[0] x = '_'.join(k.split('_')[1:]) if ep not in endpoints: @@ -2167,19 +2419,22 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): services.append(ep['service']) # NOTE(jamespage) internal IP for backwards compat for # SSL certs - internal_cn = urlparse.urlparse(ep['internal_url']).hostname + internal_cn = (urllib.parse + .urlparse(ep['internal_url']).hostname) https_cns.append(internal_cn) - https_cns.append(urlparse.urlparse(ep['public_url']).hostname) - https_cns.append(urlparse.urlparse(ep['admin_url']).hostname) + https_cns.append( + urllib.parse.urlparse(ep['public_url']).hostname) + https_cns.append( + urllib.parse.urlparse(ep['admin_url']).hostname) service_username = '_'.join(services) # If an admin username prefix is provided, ensure all services use it. prefix = config('service-admin-prefix') if service_username and prefix: - service_username = "%s%s" % (prefix, service_username) + service_username = "{}{}".format(prefix, service_username) - if 'None' in settings.itervalues(): + if 'None' in settings.values(): return if not service_username: @@ -2303,8 +2558,15 @@ def add_credentials_to_keystone(relation_id=None, remote_unit=None): } if domain: relation_data['domain'] = domain - # Get and pass CA bundle settings - relation_data.update(get_ssl_ca_settings()) + # Get and pass CA bundle settings + relation_data.update(get_ssl_ca_settings()) + # The same domain is used for project and user creation. However, in + # the future they may not be. + domain_id = manager.resolve_domain_id(domain) + relation_data['credentials_user_domain_name'] = domain + relation_data['credentials_user_domain_id'] = domain_id + relation_data['credentials_project_domain_name'] = domain + relation_data['credentials_project_domain_id'] = domain_id peer_store_and_set(relation_id=relation_id, **relation_data) @@ -2341,12 +2603,15 @@ def get_protocol(): def ensure_valid_service(service): if service not in valid_services.keys(): - log("Invalid service requested: '%s'" % service) + log("Invalid service requested: '{}'".format(service)) relation_set(admin_token=-1) return def add_endpoint(region, service, publicurl, adminurl, internalurl): + status_message = 'Updating endpoint for {}'.format(service) + log(status_message) + status_set('maintenance', status_message) desc = valid_services[service]["desc"] service_type = valid_services[service]["type"] create_service_entry(service, service_type, desc) @@ -2428,14 +2693,14 @@ def send_notifications(data, force=False): for rid in rel_ids: rs = relation_get(unit=local_unit(), rid=rid) if rs: - keys += rs.keys() + keys += list(rs.keys()) # Don't bother checking if we have already identified a diff if diff: continue # Work out if this notification changes anything - for k, v in data.iteritems(): + for k, v in data.items(): if rs.get(k, None) != v: diff = True break @@ -2449,14 +2714,14 @@ def send_notifications(data, force=False): _notifications = {k: None for k in set(keys)} # Set new values - for k, v in data.iteritems(): + for k, v in data.items(): _notifications[k] = v if force: _notifications['trigger'] = str(uuid.uuid4()) # Broadcast - log("Sending identity-service notifications (trigger=%s)" % (force), + log("Sending identity-service notifications (trigger={})".format(force), level=DEBUG) for rid in rel_ids: relation_set(relation_id=rid, relation_settings=_notifications) @@ -2478,16 +2743,16 @@ def is_db_ready(use_current_context=False, db_rel=None): if use_current_context: if not any([relation_id() in relation_ids(r) for r in db_rels]): - raise Exception("use_current_context=True but not in one of %s " - "rel hook contexts (currently in %s)." % - (', '.join(db_rels), relation_id())) + raise Exception("use_current_context=True but not in one of {} " + "rel hook contexts (currently in {})." + .format(', '.join(db_rels), relation_id())) allowed_units = relation_get(attribute=key) if allowed_units and local_unit() in allowed_units.split(): return True # We are in shared-db rel but don't yet have permissions - log("%s does not yet have db permissions" % (local_unit()), + log("{} does not yet have db permissions".format(local_unit()), level=DEBUG) return False else: @@ -2774,3 +3039,228 @@ def post_snap_install(): if os.path.exists(PASTE_SRC): log("Perfoming post snap install tasks", INFO) shutil.copy(PASTE_SRC, PASTE_DST) + + +def key_setup(): + """Initialize Fernet and Credential encryption key repositories + + To setup the key repositories, calls (as user "keystone"): + + keystone-manage fernet_setup + keystone-manage credential_setup + + In addition we migrate any credentials currently stored in database using + the null key to be encrypted by the new credential key: + + keystone-manage credential_migrate + + Note that we only want to do this once, so we store success in the leader + settings (which we should be). + + :raises: `:class:subprocess.CallProcessError` if either of the commands + fails. + """ + if os.path.exists(KEY_SETUP_FILE) or not is_leader(): + return + base_cmd = ['sudo', '-u', 'keystone', 'keystone-manage'] + try: + log("Setting up key repositories for Fernet tokens and Credential " + "encryption", level=DEBUG) + subprocess.check_call(base_cmd + ['fernet_setup']) + subprocess.check_call(base_cmd + ['credential_setup']) + subprocess.check_call(base_cmd + ['credential_migrate']) + # touch the file to create + open(KEY_SETUP_FILE, "w").close() + except subprocess.CalledProcessError as e: + log("Key repository setup failed, will retry in config-changed hook: " + "{}".format(e), level=ERROR) + + +def fernet_rotate(): + """Rotate Fernet keys + + To rotate the Fernet tokens, and create a new staging key, it calls (as the + "keystone" user): + + keystone-manage fernet_rotate + + Note that we do not rotate the Credential encryption keys. + + Note that this does NOT synchronise the keys between the units. This is + performed in `:function:`hooks.keystone_utils.fernet_leader_set` + + :raises: `:class:subprocess.CallProcessError` if the command fails. + """ + log("Rotating Fernet tokens", level=DEBUG) + cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'fernet_rotate'] + subprocess.check_call(cmd) + + +def key_leader_set(): + """Read current key sets and update leader storage + + The keys are read from the `FERNET_KEY_REPOSITORY` and + `CREDENTIAL_KEY_REPOSITORY` directories. Note that this function will fail + if it is called on the unit that is not the leader. + + :raises: :class:`subprocess.CalledProcessError` if the leader_set fails. + """ + disk_keys = {} + for key_repository in [FERNET_KEY_REPOSITORY, CREDENTIAL_KEY_REPOSITORY]: + disk_keys[key_repository] = {} + for key_number in os.listdir(key_repository): + with open(os.path.join(key_repository, key_number), + 'r') as f: + disk_keys[key_repository][key_number] = f.read() + leader_set({'key_repository': json.dumps(disk_keys)}) + + +def key_write(): + """Get keys from leader storage and write out to disk + + The keys are written to the `FERNET_KEY_REPOSITORY` and + `CREDENTIAL_KEY_REPOSITORY` directories. Note that the keys are first + written to a tmp file and then moved to the key to avoid any races. Any + 'excess' keys are deleted, which may occur if the "number of keys" has been + reduced on the leader. + """ + leader_keys = leader_get('key_repository') + if not leader_keys: + log('"key_repository" not in leader settings yet...', level=DEBUG) + return + leader_keys = json.loads(leader_keys) + for key_repository in [FERNET_KEY_REPOSITORY, CREDENTIAL_KEY_REPOSITORY]: + mkdir(key_repository, + owner=KEYSTONE_USER, + group=KEYSTONE_USER, + perms=0o700) + for key_number, key in leader_keys[key_repository].items(): + tmp_filename = os.path.join(key_repository, + ".{}".format(key_number)) + key_filename = os.path.join(key_repository, key_number) + # write to tmp file first, move the key into place in an atomic + # operation avoiding any races with consumers of the key files + write_file(tmp_filename, + key, + owner=KEYSTONE_USER, + group=KEYSTONE_USER, + perms=0o600) + os.rename(tmp_filename, key_filename) + # now delete any keys that shouldn't be there + for key_number in os.listdir(key_repository): + if key_number not in leader_keys[key_repository].keys(): + # ignore if it is not a file + if os.path.isfile(os.path.join(key_repository, key_number)): + os.remove(os.path.join(key_repository, key_number)) + + # also say that keys have been setup for this system. + open(KEY_SETUP_FILE, "w").close() + + +def fernet_keys_rotate_and_sync(log_func=log): + """Rotate and sync the keys if the unit is the leader and the primary key + has expired. + + The modification time of the staging key (key with index '0') is used, + along with the config setting "token_expiration" to determine whether to + rotate the keys, along with the function `fernet_enabled()` to test + whether to do it at all. + + Note that the reason for using modification time and not change time is + that the former can be set by the operator as part of restoring the key + from backup. + + The rotation time = token-expiration / (max-active-keys - 2) + + where max-active-keys has a minumum of 3. + + :param log_func: Function to use for logging + :type log_func: func + """ + if not keystone_context.fernet_enabled() or not is_leader(): + return + if is_unit_paused_set(): + log_func("Fernet key rotation requested but unit is paused", + level=INFO) + return + # now see if the keys need to be rotated + try: + last_rotation = os.stat( + os.path.join(FERNET_KEY_REPOSITORY, '0')).st_mtime + except OSError: + log_func("Fernet key rotation requested but key repository not " + "initialized yet", level=WARNING) + return + max_keys = max(config('fernet-max-active-keys'), 3) + expiration = config('token-expiration') + rotation_time = expiration // (max_keys - 2) + now = time.time() + if last_rotation + rotation_time > now: + # Nothing to do as not reached rotation time + log_func("No rotation until at least {}" + .format( + time.asctime(time.gmtime(last_rotation + rotation_time))), + level=DEBUG) + return + # now rotate the keys and sync them + fernet_rotate() + key_leader_set() + log_func("Rotated and started sync (via leader settings) of fernet keys", + level=INFO) + + +@cached +def container_scoped_relations(): + '''Get all the container scoped relations''' + md = metadata() + relations = [] + for relation_type in ('provides', 'requires', 'peers'): + for relation in md.get(relation_type, []): + if md[relation_type][relation].get('scope') == 'container': + relations.append(relation) + return relations + + +def is_expected_scale(): + """Query juju goal-state to determine whether our peer- and dependency- + relations are at the expected scale. + + Useful for deferring per unit per relation housekeeping work until we are + ready to complete it successfully and without unnecessary repetiton. + + Always returns True if version of juju used does not support goal-state. + + :returns: True or False + :rtype: bool + """ + peer_type = 'cluster' + peer_rid = next((rid for rid in relation_ids(reltype=peer_type)), None) + if not peer_rid: + return False + deps = [ + ('shared-db', + next((rid for rid in relation_ids(reltype='shared-db')), None)), + ] + if expect_ha(): + deps.append(('ha', + next((rid for rid in relation_ids(reltype='ha')), None))) + try: + if (len(related_units(relid=peer_rid)) < + len(list(expected_peer_units()))): + return False + for dep in deps: + if not dep[1]: + return False + # Goal state returns every unit even for container scoped + # relations but the charm only ever has a relation with + # the local unit. + if dep[0] in container_scoped_relations(): + expected_count = 1 + else: + expected_count = len( + list(expected_related_units(reltype=dep[0]))) + if len(related_units(relid=dep[1])) < expected_count: + return False + except NotImplementedError: + return True + return True diff --git a/hooks/manager.py b/hooks/manager.py old mode 100644 new mode 100755 index 489018e47da5d936f924a0da64dffb1cfb4b7ba5..6a2159686d9d97883f8b1a4dddee9e66afce0c22 --- a/hooks/manager.py +++ b/hooks/manager.py @@ -1,5 +1,3 @@ -#!/usr/bin/python -# # Copyright 2016 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,11 +12,60 @@ # See the License for the specific language governing permissions and # limitations under the License. +# NOTE(tinwood): This file needs to remain Python2 as it uses keystoneclient +# from the payload software to do it's work. + +from __future__ import print_function + +import json +import os +import stat +import sys +import time + from keystoneclient.v2_0 import client from keystoneclient.v3 import client as keystoneclient_v3 from keystoneclient.auth import token_endpoint from keystoneclient import session, exceptions -from charmhelpers.core.decorators import retry_on_exception + +import uds_comms as uds + + +_usage = """This file is called from the keystone_utils.py file to implement +various keystone calls and functions. It is called with one parameter which is +the path to a Unix Domain Socket file. + +The messages passed to the this process from the keystone_utils.py includes the +following keys: + +{ + 'path': The api path on the keystone manager object. + 'api_version': the keystone API version to use. + 'api_local_endpoint': the local endpoint to connect to. + 'admin_token': the admin token to use with keystone. + 'args': the non-keyword argument to supply to the keystone manager call. + 'kwargs': any keyword args to supply to the keystone manager call. +} + +The result of the call, or an error, is returned as a json encoded result in +the same file that sent the arguments. + +{ + 'result': <whatever the result of the function call was> + 'error': <if an error occured, the text of the error +} + +This system is currently needed to decouple the majority of the charm from the +underlying package being used for keystone. +""" + +JSON_ENCODE_OPTIONS = dict( + sort_keys=True, + allow_nan=False, + indent=None, + separators=(',', ':'), +) + # Early versions of keystoneclient lib do not have an explicit # ConnectionRefused @@ -42,6 +89,34 @@ def _get_keystone_manager_class(endpoint, token, api_version): raise ValueError('No manager found for api version {}'.format(api_version)) +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + print("Retrying '{0}' {1} more times (delay={2})" + .format(f.__name__, retries, delay)) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 + + @retry_on_exception(5, base_delay=3, exc_type=econnrefused) def get_keystone_manager(endpoint, token, api_version=None): """Return a keystonemanager for the correct API version @@ -75,10 +150,12 @@ def get_keystone_manager(endpoint, token, api_version=None): for svc in manager.api.services.list(): if svc.type == 'identity': svc_id = svc.id + break version = None for ep in manager.api.endpoints.list(): if ep.service_id == svc_id and hasattr(ep, 'adminurl'): version = ep.adminurl.split('/')[-1] + break if version and version == 'v2.0': new_ep = base_ep + "/" + 'v2.0' return _get_keystone_manager_class(new_ep, token, 2) @@ -91,6 +168,16 @@ def get_keystone_manager(endpoint, token, api_version=None): class KeystoneManager(object): + def resolved_api_version(self): + """Used by keystone_utils.py to determine which endpoint template + to create based on the current endpoint which needs to actually be done + in get_keystone_manager() in this file. + + :returns: the current api version + :rtype: int + """ + return self.api_version + def resolve_domain_id(self, name): pass @@ -120,6 +207,28 @@ class KeystoneManager(object): if type == s['type']: return s['id'] + def delete_service_by_id(self, service_id): + """Delete a service by the service id""" + self.api.services.delete(service_id) + + def list_services(self): + """Return a list of services (dictionary items)""" + return [s.to_dict() for s in self.api.services.list()] + + def create_service(self, service_name, service_type, description): + """Create a service using the api""" + self.api.services.create(service_name, + service_type, + description=description) + + def list_endpoints(self): + """Return a list of endpoints (dictionary items)""" + return [e.to_dict() for e in self.api.endpoints.list()] + + def create_role(self, name): + """Create the role by name.""" + self.api.roles.create(name=name) + class KeystoneManager2(KeystoneManager): @@ -140,6 +249,10 @@ class KeystoneManager2(KeystoneManager): publicurl=publicurl, adminurl=adminurl, internalurl=internalurl) + def delete_endpoint_by_id(self, endpoint_id): + """Delete an endpoint by the endpoint_id""" + self.api.endpoints.delete(endpoint_id) + def tenants_list(self): return self.api.tenants.list() @@ -164,11 +277,22 @@ class KeystoneManager2(KeystoneManager): email=email, tenant_id=tenant_id) + def user_exists(self, name, domain=None): + if domain is not None: + raise ValueError("For keystone v2, domain cannot be set") + if self.resolve_user_id(name): + users = manager.api.users.list() + for user in users: + if user.name.lower() == name.lower(): + return True + return False + def update_password(self, user, password): self.api.users.update_password(user=user, password=password) def roles_for_user(self, user_id, tenant_id=None, domain_id=None): - return self.api.roles.roles_for_user(user_id, tenant_id) + roles = self.api.roles.roles_for_user(user_id, tenant_id) + return [r.to_dict() for r in roles] def add_user_role(self, user, role, tenant, domain): self.api.roles.add_user_role(user=user, role=role, tenant=tenant) @@ -221,6 +345,13 @@ class KeystoneManager3(KeystoneManager): self.api.endpoints.create(service_id, internalurl, interface='internal', region=region) + def create_endpoint_by_type(self, service_id, endpoint, interface, region): + """Create an endpoint by interface (type), where _interface is + 'internal', 'admin' or 'public'. + """ + self.api.endpoints.create( + service_id, endpoint, interface=interface, region=region) + def tenants_list(self): return self.api.projects.list() @@ -251,15 +382,37 @@ class KeystoneManager3(KeystoneManager): password=password, email=email) + def user_exists(self, name, domain=None): + domain_id = None + if domain: + domain_id = manager.resolve_domain_id(domain) + if not domain_id: + raise ValueError( + 'Could not resolve domain_id for {} when checking if ' + ' user {} exists'.format(domain, name)) + if manager.resolve_user_id(name, user_domain=domain): + users = manager.api.users.list(domain=domain_id) + for user in users: + if user.name.lower() == name.lower(): + # In v3 Domains are seperate user namespaces so need to + # check that the domain matched if provided + if domain: + if domain_id == user.domain_id: + return True + else: + return True + return False + def update_password(self, user, password): self.api.users.update(user, password=password) def roles_for_user(self, user_id, tenant_id=None, domain_id=None): # Specify either a domain or project, not both if domain_id: - return self.api.roles.list(user_id, domain=domain_id) + roles = self.api.roles.list(user_id, domain=domain_id) else: - return self.api.roles.list(user_id, project=tenant_id) + roles = self.api.roles.list(user_id, project=tenant_id) + return [r.to_dict() for r in roles] def add_user_role(self, user, role, tenant, domain): # Specify either a domain or project, not both @@ -274,12 +427,168 @@ class KeystoneManager3(KeystoneManager): if ep.service_id == service_id and ep.region == region and \ ep.interface == interface: found_eps.append(ep) - return found_eps + return [e.to_dict() for e in found_eps] def delete_old_endpoint_v3(self, interface, service_id, region, url): eps = self.find_endpoint_v3(interface, service_id, region) for ep in eps: - if getattr(ep, 'url') != url: - self.api.endpoints.delete(ep.id) + # if getattr(ep, 'url') != url: + if ep.get('url', None) != url: + # self.api.endpoints.delete(ep.id) + self.api.endpoints.delete(ep['id']) return True return False + + +# the following functions are proxied from keystone_utils, so that a Python3 +# charm can work with a Python2 keystone_client (i.e. in the case of a snap +# installed payload + +# used to provide a singleton if the credentials for the keystone_manager +# haven't changed. +_keystone_manager = dict( + api_version=None, + api_local_endpoint=None, + admin_token=None, + manager=None) + + +def get_manager(api_version=None, api_local_endpoint=None, admin_token=None): + """Return a keystonemanager for the correct API version + + This function actually returns a singleton of the right kind of + KeystoneManager (v2 or v3). If the api_version, api_local_endpoint and + admin_token haven't changed then the current _keystone_manager object is + returned, otherwise a new one is created (and thus the old one goes out of + scope and is closed). This is to that repeated calls to get_manager(...) + only results in a single authorisation request if the details don't change. + This is to speed up calls from the keystone charm into keystone and make + the charm more performant. It's hoped that the complexity/performance + trade-off is a good choice. + + :param api_verion: The version of the api to use or None. if None then the + version is determined from the api_local_enpoint variable. + :param api_local_endpoint: where to find the keystone API + :param admin_token: the token used for authentication. + :raises: RuntimeError if api_local_endpoint or admin_token is not set. + :returns: a KeystoneManager derived class (possibly the singleton). + """ + if api_local_endpoint is None: + raise RuntimeError("get_manager(): api_local_endpoint is not set") + if admin_token is None: + raise RuntimeError("get_manager(): admin_token is not set") + global _keystone_manager + if (api_version == _keystone_manager['api_version'] and + api_local_endpoint == _keystone_manager['api_local_endpoint'] and + admin_token == _keystone_manager['admin_token']): + return _keystone_manager['manager'] + # only retain the params IF getting the manager actually works + _keystone_manager['manager'] = get_keystone_manager( + api_local_endpoint, admin_token, api_version) + _keystone_manager['api_version'] = api_version + _keystone_manager['api_local_endpoint'] = api_local_endpoint + _keystone_manager['admin_token'] = admin_token + return _keystone_manager['manager'] + + +class ManagerException(Exception): + pass + + +""" +In the following code, there is a slightly unusual construction: + + _callable = manager + for attr in spec['path']: + _callable = getattr(_callable, attr) + +What this does is allow the calling file to make it look like it was just +calling a deeply nested function in a class hierarchy. + +So in the calling file, you get something like this: + + manager = get_manager() + manager.some_function(a, b, c, y=10) + +And that gets translated by the calling code into a json structure +that looks like: + +{ + "path": ['some_function'], + "args": [1, 2, 3], + "kwargs": {'y': 10}, + ... other bits for tokens, etc ... +} + +If it was `manager.some_class.some_function(a, b, c, y=10)` then the "path" +would equal ['some_class', 'some_function']. + +So what these three lines do is replicate the call on the KeystoneManager class +in this file, but successively grabbing attributes down/into the class using +the path as the attributes at each level. +""" + +if __name__ == '__main__': + # This script needs 1 argument which is the unix domain socket though which + # it communicates with the caller. The program stays running until it is + # sent a 'STOP' command by the caller, or is just killed. + if len(sys.argv) != 2: + raise RuntimeError( + "{} called without 2 arguments: must pass the filename of the fifo" + .format(__file__)) + filename = sys.argv[1] + if not stat.S_ISSOCK(os.stat(filename).st_mode): + raise RuntimeError( + "{} called with {} but it is not a Unix domain socket" + .format(__file__, filename)) + + uds_client = uds.UDSClient(filename) + uds_client.connect() + # endless loop whilst we process messages from the caller + while True: + try: + data = uds_client.receive() + if data == "QUIT" or data is None: + break + spec = json.loads(data) + manager = get_manager( + api_version=spec['api_version'], + api_local_endpoint=spec['api_local_endpoint'], + admin_token=spec['admin_token']) + _callable = manager + for attr in spec['path']: + _callable = getattr(_callable, attr) + # now make the call and return the arguments + result = {'result': _callable(*spec['args'], **spec['kwargs'])} + except exceptions.InternalServerError as e: + # we've hit a 500 error, which is bad, and really we want the + # parent process to restart us to try again. + print(str(e)) + result = {'error': str(e), + 'retry': True} + except uds.UDSException as e: + print(str(e)) + import traceback + traceback.print_exc() + try: + uds_client.close() + except Exception: + pass + sys.exit(1) + except ManagerException as e: + # deal with sending an error back. + print(str(e)) + import traceback + traceback.print_exc() + result = {'error', str(e)} + except Exception as e: + print("{}: something went wrong: {}".format(__file__, str(e))) + import traceback + traceback.print_exc() + result = {'error': str(e)} + finally: + result_json = json.dumps(result, **JSON_ENCODE_OPTIONS) + uds_client.send(result_json) + + # normal exit + exit(0) diff --git a/hooks/post-series-upgrade b/hooks/post-series-upgrade new file mode 120000 index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0 --- /dev/null +++ b/hooks/post-series-upgrade @@ -0,0 +1 @@ +keystone_hooks.py \ No newline at end of file diff --git a/hooks/pre-series-upgrade b/hooks/pre-series-upgrade new file mode 120000 index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0 --- /dev/null +++ b/hooks/pre-series-upgrade @@ -0,0 +1 @@ +keystone_hooks.py \ No newline at end of file diff --git a/hooks/shared-db-relation-broken b/hooks/shared-db-relation-broken new file mode 120000 index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0 --- /dev/null +++ b/hooks/shared-db-relation-broken @@ -0,0 +1 @@ +keystone_hooks.py \ No newline at end of file diff --git a/hooks/shared-db-relation-departed b/hooks/shared-db-relation-departed new file mode 120000 index 0000000000000000000000000000000000000000..dd3b3eff4b7109293b4cfd9b81f5fc49643432a0 --- /dev/null +++ b/hooks/shared-db-relation-departed @@ -0,0 +1 @@ +keystone_hooks.py \ No newline at end of file diff --git a/hooks/uds_comms.py b/hooks/uds_comms.py new file mode 100644 index 0000000000000000000000000000000000000000..94438d1e6cdb61c26aecfc0844f45713cc9bfe84 --- /dev/null +++ b/hooks/uds_comms.py @@ -0,0 +1,356 @@ +import base64 +import os +import socket + +import six + +# for matching in the Codec class +if six.PY3: + START_CHAR = 37 # 37 == % + END_CHAR = 36 # 36 == $ +else: + START_CHAR = '%' + END_CHAR = '$' + + +class Codec(): + """A very simple codec that bounds messages with a start char of '%' and an + end char of '$'. The message itself mustn't contain either of these + characters, and this is ensured by encoding the message using base64 (which + doesn't contain either of those characters). + + This is for sending over a unix domain socket which has interesting + buffering -- this makes sure we can reconstruct entire messages between two + processes. + """ + + def __init__(self): + self.found_start = -1 + self.message = None + self.buffer = b'' + + def _add(self, bites): + """Add some bytes to the buffer: called from receive() + + It looks for the beginning and end of a message, and if found returns + the encoded buffer without the '%' and '$' markers. + + :param bites: the bytes to add to the buffer and search for a message + :type bites: bytes + :returns: Either a b64encoded message, or None + :rtype: Option[bytes, None] + """ + # current = len(self.buffer) + self.buffer += bites + if self.found_start < 0: + # skip till we found a '%' + for i, b in enumerate(self.buffer): + if b == START_CHAR: + self.found_start = i + break + if self.found_start > -1: + # see if the end of the message is available + for i, b in enumerate(self.buffer): + if i > self.found_start + 1 and b == END_CHAR: + # found the end + start = self.found_start + 1 + self.message = (base64 + .b64decode(self.buffer[start:i]) + .decode('UTF-8')) + self.buffer = self.buffer[i + 1:] + self.found_start = -1 + return self.message + return None + + def receive(self, _callable): + """Continuously calls the param _callable() until it returns None or a + full message is received. + + If the message is already in the buffer, then it grabs it and doesn't + call the _callable(). + + _callable() should return bytes until it wants receive() to terminate, + when it should return None. receive() also returns when a message is + complete. + + receive() will return a decoded UTF-8 string when a complete message is + received. + + Any left over bytes are retained in the Codec object, and further calls + to receive() will consume these first. + + :param _callable: A function that returns None or bytes + :type _callable: Callable() + :returns: None or a UTF-8 decoded string + :rtype: Option[None, str] + """ + # first see if the message is already in the buffer? + message = self._add(b'') + if message: + return message + while True: + # receive the data in chunks + data = _callable() + if data: + message = self._add(data) + if message: + return message + else: + break + return None + + def encode(self, message): + """Encode a message for sending on a channel with inconsistent + buffering (e.g. like a unix domain socket. + + Encodes the message by UTF-8, then base64 and finally adds '%' and '$' + to the start and end of the message. This is so the message can be + recovered by searching through a receiving buffer. + + :param message: The string that needs encoding. + :type message: str + :returns: the encoded message + :rtype: bytes + """ + buffer = base64.b64encode(message.encode('UTF-8')) + return b"%" + buffer + b"$" + + +# client and socket classes for the channel +# +# The Client connects to the server, and performs a READY handshake as part of +# the connect(). The server has to respond 'OK'. Once this is done the client +# and server are synchronised. Note that it is a one-to-one, synchronised +# connection with client and server exchanging messages. The theory is that +# the server initiates the Server, to bind to the socket, launches the script +# and then waits for the connection. There is no race as the client will wait +# until the servers calls wait_for_connection() which can be after the client +# has connected to the socket. +# +# The server then sends a "QUIT" to the client to get it to clean up and exit +# (but this is outside of the protocol in the Client() and Server() classes + +class UDSException(Exception): + """Used to gather up all exceptions and return a single one so that the + client/server can error out on comms failures. + """ + + +class UDSClient(): + """Unix Domain Socket Client class. + + Provides a synchronised message/receive client for connecting to the + equivalent UDSServer() running in a different process. + + The client/server is backwards, as the UDSClient() is expecting to receive + a message, which its user will then reply with a result. i.e. the Client + is implemented in a process that expects to get commands from the server. + This is so that the server can launch a child script, communicate with it, + and then terminate it when finished. + + Example use: + + client = Client(server_address) + client.connect() + + message = client.receive() + if message == "DONE": + client.close() + return + client.send("OK") + # etc. + """ + + BUFFER_SIZE = 256 + + def __init__(self, socket_path): + """Initialise the Client. + + :param socket_path: the file to use as a Unix Domain Socket + :type socket_path: str + :raises: UDSException on Error + """ + self.socket_path = socket_path + try: + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + except Exception as e: + raise UDSException(str(e)) + self.codec = Codec() + + def connect(self): + """Attempt to connect to the other side. + When the connection is made, automatically calls _ready() to indicate + that the client is ready as part of the handshake. When connect() + completes the user should call receive() to receive the first message + from the server. + + :raises: UDSException on Error + """ + try: + self.sock.connect(self.socket_path) + self._ready() + except Exception as e: + raise UDSException(str(e)) + + def _ready(self): + """Internal method to provide a handshake to the server""" + self.sock.sendall(self.codec.encode("READY")) + message = self.receive() + if message != "OK": + raise RuntimeError("Handshake failed") + + def receive(self): + """Receives a message from the Server() in the other process on the + other end of the UDS. Uses the Codec() class to ensure that the + messages are properly received and sent. + + :returns: the string send by the Server.send() methdod. + :rtype: str + :raises: UDSException on Error + """ + try: + return self.codec.receive( + lambda: self.sock.recv(self.BUFFER_SIZE)) + except Exception as e: + raise UDSException(str(e)) + + def send(self, buffer): + """Send a message to the Server() in the other process. + + :param buffer: the string to send + :type buffer: str + :raises: UDSException on Error + """ + try: + self.sock.sendall(self.codec.encode(buffer)) + except Exception as e: + raise UDSException(str(e)) + + def close(self): + """Close the socket -- good housekeeping, so should do it at the end of + the process. + :raises: UDSException on Error + """ + try: + self.sock.close() + except Exception as e: + raise UDSException(str(e)) + + +class UDSServer(): + """The Server (or listening) end of the Unix Domain Socket chat protocol. + Uses Codec() to encode and decode messages on the channel. + + The Server listens for a connection, performs a handshake, and then is in + control of the conversation. The user of Server() should then send a + message and wait for a reponse. It's up to the client to disconnect, so an + protocol level message should be used (e.g. QUIT) that the user of Client() + will use to close the connection. + + Example use: + + server = Server(server_address) + input("Press enter to continue ....") + server.wait_for_connection() + try: + # send some data + server.send(data) + # and await the reply + message = server.receive() + finally: + # clean up + server.send("DONE") + message = server.receive() + server.close() + """ + + BUFFER_SIZE = 256 + + def __init__(self, socket_path): + """Initialise the listener on the UDS. This binds to the socket and + ensures that a client can connect. The conversation doesn't get + started until the wait_for_connection() method is called. + + The server can initialse the Server, then ask the client to connect, + and then at any point later call wait_for_connection() to get the + conversation going. + + :param socket_path: the filename for the UDS. + :type socket_path: str + :raises: UDSException on Error + """ + self.socket_path = socket_path + self.sock = None + # Make sure the socket does not already exist + try: + os.unlink(socket_path) + except OSError: + if os.path.exists(socket_path): + raise + try: + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + # ensure the socket is created with 600 permissions + _mask = os.umask(0o177) + self.sock.bind(socket_path) + os.umask(_mask) + self.sock.listen(1) + except Exception as e: + raise UDSException(str(e)) + self.codec = Codec() + + def wait_for_connection(self): + """Blocking method to wait for a connection from the client. + + Performs the handshake to ensure that both ends are in sync. + :raises: UDSException on Error + """ + try: + self.connection, self.client_address = self.sock.accept() + self._handshake() + except Exception as e: + raise UDSException(str(e)) + + def _handshake(self): + """Internal method to sync up the client and server""" + while True: + message = self.receive() + if message == 'READY': + self.send('OK') + break + + def receive(self): + """Receives a message from the Client() in the other process on the + other end of the UDS. Uses the Codec() class to ensure that the + messages are properly received and sent. + + :returns: the string send by the Client.send() methdod. + :rtype: str + :raises: UDSException on Error + """ + try: + return self.codec.receive( + lambda: self.connection.recv(self.BUFFER_SIZE)) + except Exception as e: + raise UDSException(str(e)) + + def send(self, buffer): + """Send a message to the Client() in the other process. + + :param buffer: the string to send + :type buffer: str + :raises: UDSException on Error + """ + try: + self.connection.sendall(self.codec.encode(buffer)) + except Exception as e: + raise UDSException(str(e)) + + def close(self): + """Close the socket -- good housekeeping, so should do it at the end of + the process. + :raises: UDSException on Error + """ + try: + self.connection.close() + except Exception as e: + raise UDSException(str(e)) diff --git a/metadata.yaml b/metadata.yaml index 9317c3e6ecdc94776140101a95e1ba406f2583e2..e9f4d88d2a336dd005168df0b1d7479dec534d72 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -12,7 +12,8 @@ tags: series: - xenial - bionic - - cosmic + - disco + - eoan - trusty extra-bindings: public: @@ -41,6 +42,21 @@ requires: domain-backend: interface: keystone-domain-backend scope: container + keystone-fid-service-provider: + interface: keystone-fid-service-provider + scope: container + websso-trusted-dashboard: + interface: websso-trusted-dashboard + certificates: + interface: tls-certificates + keystone-middleware: + interface: keystone-middleware + scope: container peers: cluster: interface: keystone-ha +resources: + policyd-override: + type: file + filename: policyd-override.zip + description: The policy.d overrides file diff --git a/requirements.txt b/requirements.txt index 6a3271b078b3af5f2b3cff572172d78317acbe48..343beed192f73f18408852856fc8718ccb9b75be 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,13 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. +# pbr>=1.8.0,<1.9.0 -PyYAML>=3.1.0 simplejson>=2.2.0 netifaces>=0.10.4 netaddr>=0.7.12,!=0.7.16 diff --git a/scripts/fernet_rotate_and_sync.py b/scripts/fernet_rotate_and_sync.py new file mode 100755 index 0000000000000000000000000000000000000000..c948c16edff20ea24b578d8685d14567f4b0ba49 --- /dev/null +++ b/scripts/fernet_rotate_and_sync.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# Copyright 2018 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import print_function + +import os +import sys +import time + +dir_path = os.path.dirname(os.path.realpath(__file__)) +hooks_path = os.path.abspath(os.path.join(dir_path, "..", "hooks")) +root_path = os.path.abspath(os.path.join(dir_path, "..")) + +for p in [hooks_path, root_path]: + if p not in sys.path: + sys.path.append(p) + +# now we can import charm related items +import charmhelpers.core.hookenv + +import keystone_utils + + +def cli_log(msg, level=charmhelpers.core.hookenv.INFO): + """Helper function to write log message to stdout/stderr for CLI usage.""" + if level == charmhelpers.core.hookenv.DEBUG: + return charmhelpers.core.hookenv.log(msg, level=level) + elif level in [charmhelpers.core.hookenv.ERROR, + charmhelpers.core.hookenv.WARNING]: + output = sys.stderr + else: + output = sys.stdout + + print('{}: {}'.format(time.ctime(), msg), file=output) + + +# the rotate_and_sync_keys() function checks for leadership AND whether to +# rotate the keys or not. +if __name__ == "__main__": + keystone_utils.fernet_keys_rotate_and_sync(log_func=cli_log) diff --git a/setup.cfg b/setup.cfg index 37083b62aab87aba1931e298f4bef8484481dab0..3121bb4c510e617fb719721c1ab4374f8a1ed0d3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,21 @@ +[metadata] +name = charm-keystone +summary = Charm module for OpenStack Keystone +description-file = + README.md +author = OpenStack +author-email = openstack-discuss@lists.openstack.org +home-page = https://docs.openstack.org/charm-guide/latest/ +classifier = + Intended Audience :: Developers + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.5 + Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3.7 + [nosetests] verbosity=2 with-coverage=1 diff --git a/templates/essex/keystone.conf b/templates/essex/keystone.conf deleted file mode 100644 index f514d9bbe3139f47fe0d45b694380aadb3ffdd4f..0000000000000000000000000000000000000000 --- a/templates/essex/keystone.conf +++ /dev/null @@ -1,93 +0,0 @@ -# essex -############################################################################### -# [ WARNING ] -# Configuration file maintained by Juju. Local changes may be overwritten. -############################################################################### -[DEFAULT] -admin_token = {{ token }} -admin_port = {{ admin_port }} -public_port = {{ public_port }} -use_syslog = {{ use_syslog }} -log_config = /etc/keystone/logging.conf -debug = {{ debug }} -verbose = {{ verbose }} - -[sql] -{% if database_host -%} -connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %} -{% else -%} -connection = sqlite:////var/lib/keystone/keystone.db -{% endif -%} -idle_timeout = 200 - -[identity] -driver = keystone.identity.backends.sql.Identity - -[catalog] -driver = keystone.catalog.backends.sql.Catalog - -[token] -driver = keystone.token.backends.sql.Token -expiration = 86400 - -[policy] -driver = keystone.policy.backends.rules.Policy - -[ec2] -driver = keystone.contrib.ec2.backends.sql.Ec2 - -[filter:debug] -paste.filter_factory = keystone.common.wsgi:Debug.factory - -[filter:token_auth] -paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory - -[filter:admin_token_auth] -paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory - -[filter:xml_body] -paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory - -[filter:json_body] -paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory - -[filter:crud_extension] -paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory - -[filter:ec2_extension] -paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory - -[app:public_service] -paste.app_factory = keystone.service:public_app_factory - -[app:admin_service] -paste.app_factory = keystone.service:admin_app_factory - -[pipeline:public_api] -pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service - -[pipeline:admin_api] -pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension crud_extension admin_service - -[app:public_version_service] -paste.app_factory = keystone.service:public_version_app_factory - -[app:admin_version_service] -paste.app_factory = keystone.service:admin_version_app_factory - -[pipeline:public_version_api] -pipeline = xml_body public_version_service - -[pipeline:admin_version_api] -pipeline = xml_body admin_version_service - -[composite:main] -use = egg:Paste#urlmap -/v2.0 = public_api -/ = public_version_api - -[composite:admin] -use = egg:Paste#urlmap -/v2.0 = admin_api -/ = admin_version_api - diff --git a/templates/essex/logging.conf b/templates/essex/logging.conf deleted file mode 100644 index 7a538ae8f1ee5ebc27504b2a7719f5ac0f865119..0000000000000000000000000000000000000000 --- a/templates/essex/logging.conf +++ /dev/null @@ -1,39 +0,0 @@ -[loggers] -keys=root - -[formatters] -keys=normal,normal_with_name,debug - -[handlers] -keys=production,file,devel - -[logger_root] -level=WARNING -handlers=file - -[handler_production] -class=handlers.SysLogHandler -level=ERROR -formatter=normal_with_name -args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) - -[handler_file] -class=FileHandler -level=DEBUG -formatter=normal_with_name -args=('/var/log/keystone/keystone.log', 'a') - -[handler_devel] -class=StreamHandler -level=NOTSET -formatter=debug -args=(sys.stdout,) - -[formatter_normal] -format=%(asctime)s %(levelname)s %(message)s - -[formatter_normal_with_name] -format=(%(name)s): %(asctime)s %(levelname)s %(message)s - -[formatter_debug] -format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/templates/folsom/keystone.conf b/templates/folsom/keystone.conf deleted file mode 100644 index 5c09801f2d752cea999730a8afda03d31796d22a..0000000000000000000000000000000000000000 --- a/templates/folsom/keystone.conf +++ /dev/null @@ -1,112 +0,0 @@ -# folsom -############################################################################### -# [ WARNING ] -# Configuration file maintained by Juju. Local changes may be overwritten. -############################################################################### -[DEFAULT] -admin_token = {{ token }} -admin_port = {{ admin_port }} -public_port = {{ public_port }} -use_syslog = {{ use_syslog }} -log_config = /etc/keystone/logging.conf -debug = {{ debug }} -verbose = {{ verbose }} - -[sql] -{% if database_host -%} -connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %} -{% else -%} -connection = sqlite:////var/lib/keystone/keystone.db -{% endif -%} -idle_timeout = 200 - -[identity] -driver = keystone.identity.backends.sql.Identity - -[catalog] -driver = keystone.catalog.backends.sql.Catalog - -[token] -driver = keystone.token.backends.sql.Token -expiration = 86400 - -[policy] -driver = keystone.policy.backends.rules.Policy - -[ec2] -driver = keystone.contrib.ec2.backends.sql.Ec2 - -[signing] -token_format = UUID -key_size = 2048 -valid_days = 3650 - -[filter:debug] -paste.filter_factory = keystone.common.wsgi:Debug.factory - -[filter:token_auth] -paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory - -[filter:admin_token_auth] -paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory - -[filter:xml_body] -paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory - -[filter:json_body] -paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory - -[filter:user_crud_extension] -paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory - -[filter:crud_extension] -paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory - -[filter:ec2_extension] -paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory - -[filter:s3_extension] -paste.filter_factory = keystone.contrib.s3:S3Extension.factory - -[filter:url_normalize] -paste.filter_factory = keystone.middleware:NormalizingFilter.factory - -[filter:stats_monitoring] -paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory - -[filter:stats_reporting] -paste.filter_factory = keystone.contrib.stats:StatsExtension.factory - -[app:public_service] -paste.app_factory = keystone.service:public_app_factory - -[app:admin_service] -paste.app_factory = keystone.service:admin_app_factory - -[pipeline:public_api] -pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service - -[pipeline:admin_api] -pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service - -[app:public_version_service] -paste.app_factory = keystone.service:public_version_app_factory - -[app:admin_version_service] -paste.app_factory = keystone.service:admin_version_app_factory - -[pipeline:public_version_api] -pipeline = stats_monitoring url_normalize xml_body public_version_service - -[pipeline:admin_version_api] -pipeline = stats_monitoring url_normalize xml_body admin_version_service - -[composite:main] -use = egg:Paste#urlmap -/v2.0 = public_api -/ = public_version_api - -[composite:admin] -use = egg:Paste#urlmap -/v2.0 = admin_api -/ = admin_version_api diff --git a/templates/grizzly/keystone.conf b/templates/grizzly/keystone.conf deleted file mode 100644 index ffa746441d2e0477abcf67cca195471950fdb276..0000000000000000000000000000000000000000 --- a/templates/grizzly/keystone.conf +++ /dev/null @@ -1,131 +0,0 @@ -# grizzly -############################################################################### -# [ WARNING ] -# Configuration file maintained by Juju. Local changes may be overwritten. -############################################################################### -[DEFAULT] -admin_token = {{ token }} -admin_port = {{ admin_port }} -public_port = {{ public_port }} -use_syslog = {{ use_syslog }} -log_config = /etc/keystone/logging.conf -debug = {{ debug }} -verbose = {{ verbose }} - -[sql] -{% if database_host -%} -connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %} -{% else -%} -connection = sqlite:////var/lib/keystone/keystone.db -{% endif -%} -idle_timeout = 200 - -[identity] -driver = keystone.identity.backends.sql.Identity - -[trust] -driver = keystone.trust.backends.sql.Trust - -[catalog] -driver = keystone.catalog.backends.sql.Catalog - -[token] -driver = keystone.token.backends.sql.Token - -[policy] -driver = keystone.policy.backends.sql.Policy - -[ec2] -driver = keystone.contrib.ec2.backends.sql.Ec2 - -[signing] -token_format = UUID - -[auth] -methods = password,token -password = keystone.auth.plugins.password.Password -token = keystone.auth.plugins.token.Token - -[filter:debug] -paste.filter_factory = keystone.common.wsgi:Debug.factory - -[filter:token_auth] -paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory - -[filter:admin_token_auth] -paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory - -[filter:xml_body] -paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory - -[filter:json_body] -paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory - -[filter:user_crud_extension] -paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory - -[filter:crud_extension] -paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory - -[filter:ec2_extension] -paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory - -[filter:s3_extension] -paste.filter_factory = keystone.contrib.s3:S3Extension.factory - -[filter:url_normalize] -paste.filter_factory = keystone.middleware:NormalizingFilter.factory - -[filter:sizelimit] -paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory - -[filter:stats_monitoring] -paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory - -[filter:stats_reporting] -paste.filter_factory = keystone.contrib.stats:StatsExtension.factory - -[filter:access_log] -paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory - -[app:public_service] -paste.app_factory = keystone.service:public_app_factory - -[app:service_v3] -paste.app_factory = keystone.service:v3_app_factory - -[app:admin_service] -paste.app_factory = keystone.service:admin_app_factory - -[pipeline:public_api] -pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service - -[pipeline:admin_api] -pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service - -[pipeline:api_v3] -pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3 - -[app:public_version_service] -paste.app_factory = keystone.service:public_version_app_factory - -[app:admin_version_service] -paste.app_factory = keystone.service:admin_version_app_factory - -[pipeline:public_version_api] -pipeline = access_log sizelimit stats_monitoring url_normalize xml_body public_version_service - -[pipeline:admin_version_api] -pipeline = access_log sizelimit stats_monitoring url_normalize xml_body admin_version_service - -[composite:main] -use = egg:Paste#urlmap -/v2.0 = public_api -/v3 = api_v3 -/ = public_version_api - -[composite:admin] -use = egg:Paste#urlmap -/v2.0 = admin_api -/v3 = api_v3 -/ = admin_version_api diff --git a/templates/havana/keystone.conf b/templates/havana/keystone.conf deleted file mode 100644 index 2da8958df4a09f37548f72beaa27caaa9633f81f..0000000000000000000000000000000000000000 --- a/templates/havana/keystone.conf +++ /dev/null @@ -1,64 +0,0 @@ -# havana -############################################################################### -# [ WARNING ] -# Configuration file maintained by Juju. Local changes may be overwritten. -############################################################################### -[DEFAULT] -admin_token = {{ token }} -admin_port = {{ admin_port }} -public_port = {{ public_port }} -use_syslog = {{ use_syslog }} -log_config = /etc/keystone/logging.conf -debug = {{ debug }} -verbose = {{ verbose }} - -[sql] -{% if database_host -%} -connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %} -{% else -%} -connection = sqlite:////var/lib/keystone/keystone.db -{% endif -%} -idle_timeout = 200 - -[identity] -driver = keystone.identity.backends.sql.Identity - -[credential] -driver = keystone.credential.backends.sql.Credential - -[trust] -driver = keystone.trust.backends.sql.Trust - -[os_inherit] - -[catalog] -driver = keystone.catalog.backends.sql.Catalog - -[endpoint_filter] - -[token] -driver = keystone.token.backends.sql.Token -provider = keystone.token.providers.uuid.Provider - -[cache] - -[policy] -driver = keystone.policy.backends.sql.Policy - -[ec2] -driver = keystone.contrib.ec2.backends.sql.Ec2 - -[assignment] - -[oauth1] - -[signing] - -[auth] -methods = external,password,token,oauth1 -password = keystone.auth.plugins.password.Password -token = keystone.auth.plugins.token.Token -oauth1 = keystone.auth.plugins.oauth1.OAuth - -[paste_deploy] -config_file = keystone-paste.ini diff --git a/templates/icehouse/keystone.conf b/templates/icehouse/keystone.conf index f92e9262055f348f9f8bb86ece4910da366731a5..9f071c4f898f9ff63df40307ab824fdddaed1f25 100644 --- a/templates/icehouse/keystone.conf +++ b/templates/icehouse/keystone.conf @@ -23,7 +23,7 @@ connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{ {% else -%} connection = sqlite:////var/lib/keystone/keystone.db {% endif -%} -idle_timeout = 200 +connection_recycle_time = 200 [identity] driver = keystone.identity.backends.{{ identity_backend }}.Identity @@ -43,13 +43,7 @@ driver = keystone.catalog.backends.sql.Catalog [token] driver = keystone.token.backends.sql.Token -{% if token_provider == 'pki' -%} -provider = keystone.token.providers.pki.Provider -{% elif token_provider == 'pkiz' -%} -provider = keystone.token.providers.pkiz.Provider -{% else -%} provider = keystone.token.providers.uuid.Provider -{% endif -%} expiration = {{ token_expiration }} {% include "parts/section-signing" %} @@ -87,7 +81,7 @@ password = {{ ldap_password }} suffix = {{ ldap_suffix }} {% if ldap_config_flags -%} -{% for key, value in ldap_config_flags.iteritems() -%} +{% for key, value in ldap_config_flags.items() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} diff --git a/templates/keystone-fernet-rotate-sync b/templates/keystone-fernet-rotate-sync new file mode 100644 index 0000000000000000000000000000000000000000..184c0e88a876bfdd80a450e9612488051ee8d00f --- /dev/null +++ b/templates/keystone-fernet-rotate-sync @@ -0,0 +1,9 @@ +# call the rotate and sync function at 5 min intervals. The actual function +# works out when to do the rotate and sync of the keys. +{% if enabled -%} +{% if use_syslog -%} +{{ minute }} * * * * root /usr/bin/juju-run {{ unit_name }} {{ charm_dir }}/scripts/fernet_rotate_and_sync.py 2>&1 | logger -t keystone-fernet-rotate-sync +{% else -%} +{{ minute }} * * * * root /usr/bin/juju-run {{ unit_name }} {{ charm_dir }}/scripts/fernet_rotate_and_sync.py >> /var/log/keystone/keystone-fernet-rotate-sync.log 2>&1 +{% endif -%} +{% endif -%} diff --git a/templates/kilo/keystone.conf b/templates/kilo/keystone.conf index 2b1d3f72768163308be2d86b9cce05fdf954d4fc..fec774b8b2e2826d422ced273d23ce69810a9302 100644 --- a/templates/kilo/keystone.conf +++ b/templates/kilo/keystone.conf @@ -26,7 +26,7 @@ connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{ {% else -%} connection = sqlite:////var/lib/keystone/keystone.db {% endif -%} -idle_timeout = 200 +connection_recycle_time = 200 [identity] driver = keystone.identity.backends.{{ identity_backend }}.Identity @@ -54,13 +54,7 @@ driver = keystone.catalog.backends.sql.Catalog [token] driver = keystone.token.persistence.backends.sql.Token -{% if token_provider == 'pki' -%} -provider = keystone.token.providers.pki.Provider -{% elif token_provider == 'pkiz' -%} -provider = keystone.token.providers.pkiz.Provider -{% else -%} provider = keystone.token.providers.uuid.Provider -{% endif -%} expiration = {{ token_expiration }} {% include "parts/section-signing" %} @@ -98,7 +92,7 @@ password = {{ ldap_password }} suffix = {{ ldap_suffix }} {% if ldap_config_flags -%} -{% for key, value in ldap_config_flags.iteritems() -%} +{% for key, value in ldap_config_flags.items() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} @@ -121,3 +115,7 @@ group_allow_update = False group_allow_delete = False {% endif -%} {% endif -%} + +[oslo_middleware] +# Bug #1819134 +max_request_body_size = 114688 \ No newline at end of file diff --git a/templates/mitaka/keystone.conf b/templates/mitaka/keystone.conf index edf46e0c2fee86e8e08fdecfdafc3fc315e84842..648548e063a83537e50a5ef23698fa881799f9cb 100644 --- a/templates/mitaka/keystone.conf +++ b/templates/mitaka/keystone.conf @@ -17,7 +17,7 @@ connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{ {% else -%} connection = sqlite:////var/lib/keystone/keystone.db {% endif -%} -idle_timeout = 200 +connection_recycle_time = 200 [identity] driver = {{ identity_backend }} @@ -45,13 +45,7 @@ driver = sql [token] driver = sql -{% if token_provider == 'pki' -%} -provider = keystone.token.providers.pki.Provider -{% elif token_provider == 'pkiz' -%} -provider = keystone.token.providers.pkiz.Provider -{% else -%} provider = uuid -{% endif -%} expiration = {{ token_expiration }} {% include "parts/section-signing" %} @@ -107,7 +101,7 @@ password = {{ ldap_password }} suffix = {{ ldap_suffix }} {% if ldap_config_flags -%} -{% for key, value in ldap_config_flags.iteritems() -%} +{% for key, value in ldap_config_flags.items() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} @@ -138,3 +132,6 @@ admin_project_name = admin {% endif -%} {% include "section-oslo-middleware" %} +# This goes in the section above, selectively +# Bug #1819134 +max_request_body_size = 114688 diff --git a/templates/ocata/keystone.conf b/templates/ocata/keystone.conf index a0306e5db122b072332330f3642283ac95b1f08e..fb87db408ce65bafb2a5596ad7a852297fd44339 100644 --- a/templates/ocata/keystone.conf +++ b/templates/ocata/keystone.conf @@ -17,7 +17,7 @@ connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{ {% else -%} connection = sqlite:////var/lib/keystone/keystone.db {% endif -%} -idle_timeout = 200 +connection_recycle_time = 200 [identity] driver = {{ identity_backend }} @@ -44,16 +44,19 @@ driver = sql [endpoint_filter] [token] -driver = sql -{% if token_provider == 'pki' -%} -provider = keystone.token.providers.pki.Provider -{% elif token_provider == 'pkiz' -%} -provider = keystone.token.providers.pkiz.Provider +{% if token_provider == 'fernet' -%} +provider = fernet {% else -%} +driver = sql provider = uuid {% endif -%} expiration = {{ token_expiration }} +{% if token_provider == 'fernet' -%} +[fernet_tokens] +max_active_keys = {{ fernet_max_active_keys }} +{% endif -%} + {% include "parts/section-signing" %} {% include "section-oslo-cache" %} @@ -66,6 +69,9 @@ driver = {{ assignment_backend }} [oauth1] +{% if middlewares -%} +{% include "parts/section-middleware" %} +{% else %} [auth] methods = external,password,token,oauth1,mapped,openid,totp,application_credential{% if enable_oidc %},oidc{% endif %}{% if enable_saml2 %},saml2{% endif %} password = keystone.auth.plugins.password.Password @@ -92,7 +98,7 @@ password = {{ ldap_password }} suffix = {{ ldap_suffix }} {% if ldap_config_flags -%} -{% for key, value in ldap_config_flags.iteritems() -%} +{% for key, value in ldap_config_flags.items() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} @@ -125,3 +131,6 @@ admin_project_name = admin {% include "parts/section-federation" %} {% include "section-oslo-middleware" %} +# This goes in the section above, selectively +# Bug #1819134 +max_request_body_size = 114688 diff --git a/templates/parts/section-middleware b/templates/parts/section-middleware new file mode 100644 index 0000000000000000000000000000000000000000..e65f1d98a4390b41ee9410db5a992d1570d81e94 --- /dev/null +++ b/templates/parts/section-middleware @@ -0,0 +1,6 @@ +{% for section in sections -%} +[{{section}}] +{% for key, value in sections[section].items() -%} +{{ key }} = {{ value }} +{% endfor %} +{%- endfor %} diff --git a/templates/queens/keystone.conf b/templates/queens/keystone.conf index a0306e5db122b072332330f3642283ac95b1f08e..072c4e874d2823c1d9359d6b3a437e0cec5161c5 100644 --- a/templates/queens/keystone.conf +++ b/templates/queens/keystone.conf @@ -18,6 +18,7 @@ connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{ connection = sqlite:////var/lib/keystone/keystone.db {% endif -%} idle_timeout = 200 +connection_recycle_time = 200 [identity] driver = {{ identity_backend }} @@ -49,11 +50,18 @@ driver = sql provider = keystone.token.providers.pki.Provider {% elif token_provider == 'pkiz' -%} provider = keystone.token.providers.pkiz.Provider +{% if token_provider == 'fernet' -%} +provider = fernet {% else -%} provider = uuid {% endif -%} expiration = {{ token_expiration }} +{% if token_provider == 'fernet' -%} +[fernet_tokens] +max_active_keys = {{ fernet_max_active_keys }} +{% endif -%} + {% include "parts/section-signing" %} {% include "section-oslo-cache" %} @@ -77,6 +85,13 @@ oidc = keystone.auth.plugins.mapped.Mapped {% if enable_saml2 -%} saml2 = keystone.auth.plugins.mapped.Mapped {% endif -%} +{% if middlewares -%} +{% include "parts/section-middleware" %} +{% else %} +[auth] +methods = {{ auth_methods }} +password = keystone.auth.plugins.password.Password +{% endif %} [paste_deploy] config_file = {{ paste_config_file }} @@ -92,7 +107,7 @@ password = {{ ldap_password }} suffix = {{ ldap_suffix }} {% if ldap_config_flags -%} -{% for key, value in ldap_config_flags.iteritems() -%} +{% for key, value in ldap_config_flags.items() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} @@ -125,3 +140,6 @@ admin_project_name = admin {% include "parts/section-federation" %} {% include "section-oslo-middleware" %} +# This goes in the section above, selectively +# Bug #1819134 +max_request_body_size = 114688 diff --git a/templates/rocky/keystone.conf b/templates/rocky/keystone.conf new file mode 100644 index 0000000000000000000000000000000000000000..4b8ce500b1e7786fd09399754c6fd356eda702c8 --- /dev/null +++ b/templates/rocky/keystone.conf @@ -0,0 +1,112 @@ +# rocky +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[DEFAULT] +admin_token = {{ token }} +use_syslog = {{ use_syslog }} +log_config_append = {{ log_config }} +debug = {{ debug }} +public_endpoint = {{ public_endpoint }} +admin_endpoint = {{ admin_endpoint }} + +[database] +{% if database_host -%} +connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %} +{% else -%} +connection = sqlite:////var/lib/keystone/keystone.db +{% endif -%} +connection_recycle_time = 200 + +[identity] +driver = {{ identity_backend }} +{% if default_domain_id -%} +default_domain_id = {{ default_domain_id }} +{% endif -%} + +{% if api_version == 3 -%} +domain_specific_drivers_enabled = True +domain_config_dir = {{ domain_config_dir }} +{% endif -%} + +[credential] +driver = sql + +[trust] +driver = sql + +[catalog] +driver = sql + +[endpoint_filter] + +[token] +expiration = {{ token_expiration }} + +[fernet_tokens] +max_active_keys = {{ fernet_max_active_keys }} + +{% include "parts/section-signing" %} + +{% include "section-oslo-cache" %} + +[policy] +driver = sql + +[assignment] +driver = {{ assignment_backend }} + +[auth] +methods = {{ auth_methods }} + +[paste_deploy] +config_file = {{ paste_config_file }} + +[extra_headers] +Distribution = Ubuntu + +[ldap] +{% if identity_backend == 'ldap' -%} +url = {{ ldap_server }} +user = {{ ldap_user }} +password = {{ ldap_password }} +suffix = {{ ldap_suffix }} + +{% if ldap_config_flags -%} +{% for key, value in ldap_config_flags.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} + +{% if ldap_readonly -%} +user_allow_create = False +user_allow_update = False +user_allow_delete = False + +tenant_allow_create = False +tenant_allow_update = False +tenant_allow_delete = False + +role_allow_create = False +role_allow_update = False +role_allow_delete = False + +group_allow_create = False +group_allow_update = False +group_allow_delete = False +{% endif -%} +{% endif -%} + +{% if api_version == 3 -%} +[resource] +admin_project_domain_name = {{ admin_domain_name }} +admin_project_name = admin +{% endif -%} + +{% include "parts/section-federation" %} + +{% include "section-oslo-middleware" %} +# This goes in the section above, selectively +# Bug #1819134 +max_request_body_size = 114688 diff --git a/templates/rocky/policy.json b/templates/rocky/policy.json new file mode 100644 index 0000000000000000000000000000000000000000..58b2a81b598bcfe514902db809be3827354eea6a --- /dev/null +++ b/templates/rocky/policy.json @@ -0,0 +1,261 @@ +{ + "admin_required": "role:{{ admin_role }}", + "cloud_admin": "rule:admin_required and (is_admin_project:True or domain_id:{{ admin_domain_id }} or project_id:{{ service_tenant_id }})", + "service_role": "role:service", + "service_or_admin": "rule:admin_required or rule:service_role", + "owner": "user_id:%(user_id)s or user_id:%(target.token.user_id)s", + "admin_or_owner": "(rule:admin_required and domain_id:%(target.token.user.domain.id)s) or rule:owner", + "admin_and_matching_domain_id": "rule:admin_required and domain_id:%(domain_id)s", + "service_admin_or_owner": "rule:service_or_admin or rule:owner", + + "default": "rule:admin_required", + + "identity:get_region": "", + "identity:list_regions": "", + "identity:create_region": "rule:cloud_admin", + "identity:update_region": "rule:cloud_admin", + "identity:delete_region": "rule:cloud_admin", + + "identity:get_service": "rule:admin_required", + "identity:list_services": "rule:admin_required", + "identity:create_service": "rule:cloud_admin", + "identity:update_service": "rule:cloud_admin", + "identity:delete_service": "rule:cloud_admin", + + "identity:get_endpoint": "rule:admin_required", + "identity:list_endpoints": "rule:admin_required", + "identity:create_endpoint": "rule:cloud_admin", + "identity:update_endpoint": "rule:cloud_admin", + "identity:delete_endpoint": "rule:cloud_admin", + + "identity:get_registered_limit": "", + "identity:list_registered_limits": "", + "identity:create_registered_limits": "rule:admin_required", + "identity:update_registered_limit": "rule:admin_required", + "identity:delete_registered_limit": "rule:admin_required", + + "identity:get_limit_model": "", + "identity:get_limit": "", + "identity:list_limits": "", + "identity:create_limits": "rule:admin_required", + "identity:update_limit": "rule:admin_required", + "identity:delete_limit": "rule:admin_required", + + "identity:get_domain": "rule:cloud_admin or rule:admin_and_matching_domain_id or token.project.domain.id:%(target.domain.id)s", + "identity:list_domains": "rule:cloud_admin", + "identity:create_domain": "rule:cloud_admin", + "identity:update_domain": "rule:cloud_admin", + "identity:delete_domain": "rule:cloud_admin", + + "admin_and_matching_target_project_domain_id": "rule:admin_required and domain_id:%(target.project.domain_id)s", + "admin_and_matching_project_domain_id": "rule:admin_required and domain_id:%(project.domain_id)s", + "identity:get_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id or project_id:%(target.project.id)s", + "identity:list_projects": "rule:cloud_admin or rule:admin_and_matching_domain_id", + "identity:list_user_projects": "rule:owner or rule:admin_and_matching_domain_id", + "identity:create_project": "rule:cloud_admin or rule:admin_and_matching_project_domain_id", + "identity:update_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", + "identity:delete_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", + "identity:create_project_tag": "rule:admin_required", + "identity:delete_project_tag": "rule:admin_required", + "identity:get_project_tag": "rule:admin_required", + "identity:list_project_tags": "rule:admin_required", + "identity:delete_project_tags": "rule:admin_required", + "identity:update_project_tags": "rule:admin_required", + + "admin_and_matching_target_user_domain_id": "rule:admin_required and domain_id:%(target.user.domain_id)s", + "admin_and_matching_user_domain_id": "rule:admin_required and domain_id:%(user.domain_id)s", + "identity:get_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id or rule:owner", + "identity:list_users": "rule:cloud_admin or rule:admin_and_matching_domain_id", + "identity:create_user": "rule:cloud_admin or rule:admin_and_matching_user_domain_id", + "identity:update_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", + "identity:delete_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", + + "admin_and_matching_target_group_domain_id": "rule:admin_required and domain_id:%(target.group.domain_id)s", + "admin_and_matching_group_domain_id": "rule:admin_required and domain_id:%(group.domain_id)s", + "identity:get_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:list_groups": "rule:cloud_admin or rule:admin_and_matching_domain_id", + "identity:list_groups_for_user": "rule:owner or rule:admin_and_matching_target_user_domain_id", + "identity:create_group": "rule:cloud_admin or rule:admin_and_matching_group_domain_id", + "identity:update_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:delete_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:list_users_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:remove_user_from_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:check_user_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:add_user_to_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + + "identity:get_credential": "rule:admin_required", + "identity:list_credentials": "rule:admin_required or user_id:%(user_id)s", + "identity:create_credential": "rule:admin_required", + "identity:update_credential": "rule:admin_required", + "identity:delete_credential": "rule:admin_required", + + "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", + "identity:ec2_list_credentials": "rule:admin_required or rule:owner", + "identity:ec2_create_credential": "rule:admin_required or rule:owner", + "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", + + "identity:get_role": "rule:admin_required", + "identity:list_roles": "rule:admin_required", + "identity:create_role": "rule:cloud_admin", + "identity:update_role": "rule:cloud_admin", + "identity:delete_role": "rule:cloud_admin", + + "identity:get_domain_role": "rule:cloud_admin or rule:get_domain_roles", + "identity:list_domain_roles": "rule:cloud_admin or rule:list_domain_roles", + "identity:create_domain_role": "rule:cloud_admin or rule:domain_admin_matches_domain_role", + "identity:update_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role", + "identity:delete_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role", + "domain_admin_matches_domain_role": "rule:admin_required and domain_id:%(role.domain_id)s", + "get_domain_roles": "rule:domain_admin_matches_target_domain_role or rule:project_admin_matches_target_domain_role", + "domain_admin_matches_target_domain_role": "rule:admin_required and domain_id:%(target.role.domain_id)s", + "project_admin_matches_target_domain_role": "rule:admin_required and project_domain_id:%(target.role.domain_id)s", + "list_domain_roles": "rule:domain_admin_matches_filter_on_list_domain_roles or rule:project_admin_matches_filter_on_list_domain_roles", + "domain_admin_matches_filter_on_list_domain_roles": "rule:admin_required and domain_id:%(domain_id)s", + "project_admin_matches_filter_on_list_domain_roles": "rule:admin_required and project_domain_id:%(domain_id)s", + "admin_and_matching_prior_role_domain_id": "rule:admin_required and domain_id:%(target.prior_role.domain_id)s", + "implied_role_matches_prior_role_domain_or_global": "(domain_id:%(target.implied_role.domain_id)s or None:%(target.implied_role.domain_id)s)", + + "identity:get_implied_role": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id", + "identity:list_implied_roles": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id", + "identity:create_implied_role": "rule:cloud_admin or (rule:admin_and_matching_prior_role_domain_id and rule:implied_role_matches_prior_role_domain_or_global)", + "identity:delete_implied_role": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id", + "identity:list_role_inference_rules": "rule:cloud_admin", + "identity:check_implied_role": "rule:cloud_admin or rule:admin_and_matching_prior_role_domain_id", + + "identity:list_system_grants_for_user": "rule:admin_required", + "identity:check_system_grant_for_user": "rule:admin_required", + "identity:create_system_grant_for_user": "rule:admin_required", + "identity:revoke_system_grant_for_user": "rule:admin_required", + + "identity:list_system_grants_for_group": "rule:admin_required", + "identity:check_system_grant_for_group": "rule:admin_required", + "identity:create_system_grant_for_group": "rule:admin_required", + "identity:revoke_system_grant_for_group": "rule:admin_required", + + "identity:check_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", + "identity:list_grants": "rule:cloud_admin or rule:domain_admin_for_list_grants or rule:project_admin_for_list_grants", + "identity:create_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", + "identity:revoke_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", + "domain_admin_for_grants": "rule:domain_admin_for_global_role_grants or rule:domain_admin_for_domain_role_grants", + "domain_admin_for_global_role_grants": "rule:admin_required and None:%(target.role.domain_id)s and rule:domain_admin_grant_match", + "domain_admin_for_domain_role_grants": "rule:admin_required and domain_id:%(target.role.domain_id)s and rule:domain_admin_grant_match", + "domain_admin_grant_match": "domain_id:%(domain_id)s or domain_id:%(target.project.domain_id)s", + "project_admin_for_grants": "rule:project_admin_for_global_role_grants or rule:project_admin_for_domain_role_grants", + "project_admin_for_global_role_grants": "rule:admin_required and None:%(target.role.domain_id)s and project_id:%(project_id)s", + "project_admin_for_domain_role_grants": "rule:admin_required and project_domain_id:%(target.role.domain_id)s and project_id:%(project_id)s", + "domain_admin_for_list_grants": "rule:admin_required and rule:domain_admin_grant_match", + "project_admin_for_list_grants": "rule:admin_required and project_id:%(project_id)s", + + "admin_on_domain_filter": "rule:admin_required and domain_id:%(scope.domain.id)s", + "admin_on_project_filter": "rule:admin_required and project_id:%(scope.project.id)s", + "admin_on_domain_of_project_filter": "rule:admin_required and domain_id:%(target.project.domain_id)s", + "identity:list_role_assignments": "rule:cloud_admin or rule:admin_on_domain_filter or rule:admin_on_project_filter", + "identity:list_role_assignments_for_tree": "rule:cloud_admin or rule:admin_on_domain_of_project_filter", + "identity:get_policy": "rule:cloud_admin", + "identity:list_policies": "rule:cloud_admin", + "identity:create_policy": "rule:cloud_admin", + "identity:update_policy": "rule:cloud_admin", + "identity:delete_policy": "rule:cloud_admin", + + "identity:check_token": "rule:admin_or_owner", + "identity:validate_token": "rule:service_admin_or_owner", + "identity:validate_token_head": "rule:service_or_admin", + "identity:revocation_list": "rule:service_or_admin", + "identity:revoke_token": "rule:admin_or_owner", + + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", + "identity:list_trusts": "", + "identity:list_roles_for_trust": "", + "identity:get_role_for_trust": "", + "identity:delete_trust": "", + "identity:get_trust": "", + + "identity:create_consumer": "rule:admin_required", + "identity:get_consumer": "rule:admin_required", + "identity:list_consumers": "rule:admin_required", + "identity:delete_consumer": "rule:admin_required", + "identity:update_consumer": "rule:admin_required", + + "identity:authorize_request_token": "rule:admin_required", + "identity:list_access_token_roles": "rule:admin_required", + "identity:get_access_token_role": "rule:admin_required", + "identity:list_access_tokens": "rule:admin_required", + "identity:get_access_token": "rule:admin_required", + "identity:delete_access_token": "rule:admin_required", + + "identity:list_projects_for_endpoint": "rule:admin_required", + "identity:add_endpoint_to_project": "rule:admin_required", + "identity:check_endpoint_in_project": "rule:admin_required", + "identity:list_endpoints_for_project": "rule:admin_required", + "identity:remove_endpoint_from_project": "rule:admin_required", + + "identity:create_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups": "rule:admin_required", + "identity:get_endpoint_group": "rule:admin_required", + "identity:update_endpoint_group": "rule:admin_required", + "identity:delete_endpoint_group": "rule:admin_required", + "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", + "identity:get_endpoint_group_in_project": "rule:admin_required", + "identity:list_endpoint_groups_for_project": "rule:admin_required", + "identity:add_endpoint_group_to_project": "rule:admin_required", + "identity:remove_endpoint_group_from_project": "rule:admin_required", + + "identity:create_identity_provider": "rule:cloud_admin", + "identity:list_identity_providers": "rule:cloud_admin", + "identity:get_identity_provider": "rule:cloud_admin", + "identity:update_identity_provider": "rule:cloud_admin", + "identity:delete_identity_provider": "rule:cloud_admin", + + "identity:create_protocol": "rule:cloud_admin", + "identity:update_protocol": "rule:cloud_admin", + "identity:get_protocol": "rule:cloud_admin", + "identity:list_protocols": "rule:cloud_admin", + "identity:delete_protocol": "rule:cloud_admin", + + "identity:create_mapping": "rule:cloud_admin", + "identity:get_mapping": "rule:cloud_admin", + "identity:list_mappings": "rule:cloud_admin", + "identity:delete_mapping": "rule:cloud_admin", + "identity:update_mapping": "rule:cloud_admin", + + "identity:create_service_provider": "rule:cloud_admin", + "identity:list_service_providers": "rule:cloud_admin", + "identity:get_service_provider": "rule:cloud_admin", + "identity:update_service_provider": "rule:cloud_admin", + "identity:delete_service_provider": "rule:cloud_admin", + + "identity:get_auth_catalog": "", + "identity:get_auth_projects": "", + "identity:get_auth_domains": "", + "identity:get_auth_system": "", + + "identity:list_projects_for_user": "", + "identity:list_domains_for_user": "", + + "identity:list_revoke_events": "rule:service_or_admin", + + "identity:create_policy_association_for_endpoint": "rule:cloud_admin", + "identity:check_policy_association_for_endpoint": "rule:cloud_admin", + "identity:delete_policy_association_for_endpoint": "rule:cloud_admin", + "identity:create_policy_association_for_service": "rule:cloud_admin", + "identity:check_policy_association_for_service": "rule:cloud_admin", + "identity:delete_policy_association_for_service": "rule:cloud_admin", + "identity:create_policy_association_for_region_and_service": "rule:cloud_admin", + "identity:check_policy_association_for_region_and_service": "rule:cloud_admin", + "identity:delete_policy_association_for_region_and_service": "rule:cloud_admin", + "identity:get_policy_for_endpoint": "rule:cloud_admin", + "identity:list_endpoints_for_policy": "rule:cloud_admin", + + "identity:create_domain_config": "rule:cloud_admin", + "identity:get_domain_config": "rule:cloud_admin", + "identity:get_security_compliance_domain_config": "", + "identity:update_domain_config": "rule:cloud_admin", + "identity:delete_domain_config": "rule:cloud_admin", + "identity:get_domain_config_default": "rule:cloud_admin", + + "identity:get_application_credential": "rule:admin_or_owner", + "identity:list_application_credentials": "rule:admin_or_owner", + "identity:create_application_credential": "rule:admin_or_owner", + "identity:delete_application_credential": "rule:admin_or_owner" +} diff --git a/test-requirements.txt b/test-requirements.txt index 382489493b1249590ed377ae2afaaecd5d6bdeb9..7d9c2587dd5b31746261023d7028c66b1bbbe3a0 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,30 +1,18 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -coverage>=3.6 +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. +# +charm-tools>=2.4.4 +requests>=2.18.4 mock>=1.2 flake8>=2.2.4,<=2.4.1 -os-testr>=0.4.1 -charm-tools>=2.0.0 -requests==2.6.0 -# BEGIN: Amulet OpenStack Charm Helper Requirements -# Liberty client lower constraints -# The websocket-client issue should be resolved in the jujulib/theblues -# Temporarily work around it -websocket-client<=0.40.0 -amulet>=1.14.3,<2.0 -bundletester>=0.6.1,<1.0 -python-ceilometerclient>=1.5.0 -python-cinderclient>=1.4.0 -python-glanceclient>=1.1.0 -python-heatclient>=0.8.0 -python-keystoneclient>=1.7.1 -python-neutronclient>=3.1.0 -python-novaclient>=2.30.1 -python-openstackclient>=1.7.0 -python-swiftclient>=2.6.0 -pika>=0.10.0,<1.0 -distro-info -# END: Amulet OpenStack Charm Helper Requirements -# NOTE: workaround for 14.04 pip/tox -pytz +stestr>=2.2.0 +coverage>=4.5.2 +pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) +git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/tests/README.md b/tests/README.md deleted file mode 100644 index 046be7fbe653e475ce7b8d92d64f17ac6325f314..0000000000000000000000000000000000000000 --- a/tests/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Overview - -This directory provides Amulet tests to verify basic deployment functionality -from the perspective of this charm, its requirements and its features, as -exercised in a subset of the full OpenStack deployment test bundle topology. - -For full details on functional testing of OpenStack charms please refer to -the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing) -section of the OpenStack Charm Guide. diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py deleted file mode 100644 index 7216dafb75f6974fc80fe20576c44ddf82c2506e..0000000000000000000000000000000000000000 --- a/tests/basic_deployment.py +++ /dev/null @@ -1,1007 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Basic keystone amulet functional tests. -""" - -import amulet -import json -import os -import yaml - -from charmhelpers.contrib.openstack.amulet.deployment import ( - OpenStackAmuletDeployment -) - -from charmhelpers.contrib.openstack.amulet.utils import ( - OpenStackAmuletUtils, - DEBUG, - # ERROR -) -import keystoneclient -from keystoneauth1 import exceptions as ksauth1_exceptions - -# Use DEBUG to turn on debug logging -u = OpenStackAmuletUtils(DEBUG) - - -class KeystoneBasicDeployment(OpenStackAmuletDeployment): - """Amulet tests on a basic keystone deployment.""" - - DEFAULT_DOMAIN = 'default' - - def __init__(self, series=None, openstack=None, - source=None, git=False, stable=True, snap_source=None): - """Deploy the entire test environment.""" - super(KeystoneBasicDeployment, self).__init__(series, openstack, - source, stable) - self.keystone_num_units = 3 - self.keystone_api_version = 2 - self.git = git - - self._setup_test_object(snap_source) - self._add_services() - self._add_relations() - self._configure_services() - self._deploy() - - u.log.info('Waiting on extended status checks...') - self.exclude_services = [] - self._auto_wait_for_status(exclude_services=self.exclude_services) - - self.d.sentry.wait() - self._initialize_tests() - - def _setup_test_object(self, snap_source): - self.snap_source = snap_source - if self.snap_source: - self.config_base = '/var/snap/keystone/common' - self.keystone_conf = ('{}/etc/keystone/keystone.conf.d/' - 'keystone.conf'.format(self.config_base)) - self.process_services = ["haproxy", "nginx", "uwsgi"] - self.init_services = ["snap.keystone.nginx", - "snap.keystone.uwsgi"] - self.no_origin = ['keystone'] - self.keystone_config = {'openstack-origin': self.snap_source} - self.pymysql = '+pymysql' - self.policy_json = ('{}/etc/keystone/keystone.conf.d/policy.json' - ''.format(self.config_base)) - self.logging_config = ('{}/etc/keystone/logging.conf' - ''.format(self.config_base)) - self.log_file = '{}/log/keystone.log'.format(self.config_base) - self.services_to_configs = {'uwsgi': self.keystone_conf} - else: - self.config_base = '' - self.keystone_conf = '/etc/keystone/keystone.conf' - self.no_origin = [] - self.keystone_config = {} - self.pymysql = '' - self.policy_json = ('{}/etc/keystone/policy.json' - ''.format(self.config_base)) - self.logging_config = ('{}/etc/keystone/logging.conf' - ''.format(self.config_base)) - self.log_file = '/var/log/keystone/keystone.log' - - if self.is_liberty_or_newer(): - self.process_services = ["apache2", "haproxy"] - self.init_services = ['apache2'] - self.services_to_configs = {'apache2': self.keystone_conf} - else: - self.process_services = ["keystone-all", "apache2", "haproxy"] - self.init_services = ['keystone'] - self.services_to_configs = {'keystone-all': self.keystone_conf} - - def _assert_services(self, should_run): - for unit in self.keystone_sentries: - u.get_unit_process_ids( - {unit: self.process_services}, expect_success=should_run) - - def _add_services(self): - """Add services - - Add the services that we're testing, where keystone is local, - and the rest of the service are from lp branches that are - compatible with the local charm (e.g. stable or next). - """ - this_service = {'name': 'keystone', 'units': self.keystone_num_units} - other_services = [ - {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, - {'name': 'rabbitmq-server'}, # satisfy wrkload stat - {'name': 'cinder'}, - ] - super(KeystoneBasicDeployment, self)._add_services( - this_service, other_services, no_origin=self.no_origin) - - def _add_relations(self): - """Add all of the relations for the services.""" - relations = {'keystone:shared-db': 'percona-cluster:shared-db', - 'cinder:shared-db': 'percona-cluster:shared-db', - 'cinder:amqp': 'rabbitmq-server:amqp', - 'cinder:identity-service': 'keystone:identity-service'} - super(KeystoneBasicDeployment, self)._add_relations(relations) - - def _configure_services(self): - """Configure all of the services.""" - self.keystone_config.update({ - 'admin-password': 'openstack', - 'admin-token': 'ubuntutesting', - 'preferred-api-version': self.keystone_api_version, - }) - - if self.git: - amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY') - - reqs_repo = 'git://github.com/openstack/requirements' - keystone_repo = 'git://github.com/openstack/keystone' - if self._get_openstack_release() == self.trusty_icehouse: - reqs_repo = 'git://github.com/coreycb/requirements' - keystone_repo = 'git://github.com/coreycb/keystone' - - branch = 'stable/' + self._get_openstack_release_string() - - openstack_origin_git = { - 'repositories': [ - {'name': 'requirements', - 'repository': reqs_repo, - 'branch': branch}, - {'name': 'keystone', - 'repository': keystone_repo, - 'branch': branch}, - ], - 'directory': '/mnt/openstack-git', - 'http_proxy': amulet_http_proxy, - 'https_proxy': amulet_http_proxy, - } - self.keystone_config['openstack-origin-git'] = \ - yaml.dump(openstack_origin_git) - - pxc_config = { - 'dataset-size': '25%', - 'max-connections': 1000, - 'root-password': 'ChangeMe123', - 'sst-password': 'ChangeMe123', - } - cinder_config = {'block-device': 'vdb', - 'glance-api-version': '2', - 'overwrite': 'true', - 'ephemeral-unmount': '/mnt'} - configs = { - 'keystone': self.keystone_config, - 'percona-cluster': pxc_config, - 'cinder': cinder_config, - } - super(KeystoneBasicDeployment, self)._configure_services(configs) - - def api_change_required(self, api_version): - if api_version == 2: - try: - self.keystone_v2.service_catalog.get_urls() - u.log.debug('Already at required api version {}' - ''.format(api_version)) - return False - except (AttributeError, ksauth1_exceptions.http.Unauthorized): - u.log.debug('Change to api version {} required' - ''.format(api_version)) - return True - else: - try: - self.keystone_v3.service_catalog.get_urls() - u.log.debug('Already at required api version {}' - ''.format(api_version)) - return False - except (AttributeError, ksauth1_exceptions.http.Unauthorized): - u.log.debug('Change to api version {} required' - ''.format(api_version)) - return True - - def set_api_version(self, api_version): - # Avoid costly settings if we are already at the correct api_version - if not self.api_change_required(api_version): - return True - u.log.debug('Setting preferred-api-version={}'.format(api_version)) - se_rels = [] - for i in range(0, self.keystone_num_units): - se_rels.append( - (self.keystone_sentries[i], 'cinder:identity-service'), - ) - # Make config change, wait for propagation - u.keystone_configure_api_version(se_rels, self, api_version) - - # Success if we get here, get and store client. - if api_version == 2: - self.keystone_v2 = self.get_keystone_client(api_version=2) - else: - self.keystone_v3 = self.get_keystone_client(api_version=3) - self.keystone_api_version = api_version - - def get_keystone_client(self, api_version=None, keystone_ip=None): - if keystone_ip is None: - keystone_ip = self.keystone_ip - if api_version == 2: - return u.authenticate_keystone_admin(self.keystone_sentries[0], - user='admin', - password='openstack', - tenant='admin', - api_version=api_version, - keystone_ip=keystone_ip) - else: - return u.authenticate_keystone_admin(self.keystone_sentries[0], - user='admin', - password='openstack', - api_version=api_version, - keystone_ip=keystone_ip) - - def create_users_v2(self): - # Create a demo tenant/role/user - self.demo_tenant = 'demoTenant' - self.demo_role = 'demoRole' - self.demo_user = 'demoUser' - if not u.tenant_exists(self.keystone_v2, self.demo_tenant): - tenant = self.keystone_v2.tenants.create( - tenant_name=self.demo_tenant, - description='demo tenant', - enabled=True) - self.keystone_v2.roles.create(name=self.demo_role) - self.keystone_v2.users.create(name=self.demo_user, - password='password', - tenant_id=tenant.id, - email='demo@demo.com') - - # Authenticate keystone demo - self.keystone_demo = u.authenticate_keystone_user( - self.keystone_v2, user=self.demo_user, - password='password', tenant=self.demo_tenant) - - def create_users_v3(self): - # Create a demo tenant/role/user - self.demo_project = 'demoProject' - self.demo_user_v3 = 'demoUserV3' - self.demo_domain_admin = 'demoDomainAdminV3' - self.demo_domain = 'demoDomain' - try: - domain = self.keystone_v3.domains.find(name=self.demo_domain) - except keystoneclient.exceptions.NotFound: - domain = self.keystone_v3.domains.create( - self.demo_domain, - description='Demo Domain', - enabled=True - ) - - try: - self.keystone_v3.projects.find(name=self.demo_project) - except keystoneclient.exceptions.NotFound: - self.keystone_v3.projects.create( - self.demo_project, - domain, - description='Demo Project', - enabled=True, - ) - - try: - self.keystone_v3.roles.find(name=self.demo_role) - except keystoneclient.exceptions.NotFound: - self.keystone_v3.roles.create(name=self.demo_role) - - if not self.find_keystone_v3_user(self.keystone_v3, - self.demo_user_v3, - self.demo_domain): - self.keystone_v3.users.create( - self.demo_user_v3, - domain=domain.id, - project=self.demo_project, - password='password', - email='demov3@demo.com', - description='Demo', - enabled=True) - - try: - self.keystone_v3.roles.find(name='Admin') - except keystoneclient.exceptions.NotFound: - self.keystone_v3.roles.create(name='Admin') - - if not self.find_keystone_v3_user(self.keystone_v3, - self.demo_domain_admin, - self.demo_domain): - user = self.keystone_v3.users.create( - self.demo_domain_admin, - domain=domain.id, - project=self.demo_project, - password='password', - email='demoadminv3@demo.com', - description='Demo Admin', - enabled=True) - - role = self.keystone_v3.roles.find(name='Admin') - u.log.debug("self.keystone_v3.roles.grant('{}', user='{}', " - "domain='{}')".format(role.id, user.id, domain.id)) - self.keystone_v3.roles.grant( - role.id, - user=user.id, - domain=domain.id) - - def _initialize_tests(self): - """Perform final initialization before tests get run.""" - # Access the sentries for inspecting service units - self.pxc_sentry = self.d.sentry['percona-cluster'][0] - self.keystone_sentries = [] - for i in range(0, self.keystone_num_units): - self.keystone_sentries.append(self.d.sentry['keystone'][i]) - self.cinder_sentry = self.d.sentry['cinder'][0] - u.log.debug('openstack release val: {}'.format( - self._get_openstack_release())) - u.log.debug('openstack release str: {}'.format( - self._get_openstack_release_string())) - self.keystone_ip = self.keystone_sentries[0].relation( - 'shared-db', - 'percona-cluster:shared-db')['private-address'] - self.set_api_version(2) - self.create_users_v2() - - def test_100_services(self): - """Verify the expected services are running on the corresponding - service units.""" - services = { - self.cinder_sentry: ['cinder-scheduler', - 'cinder-volume'] - } - if self._get_openstack_release() >= self.xenial_ocata: - services.update({self.cinder_sentry: ['apache2']}) - else: - services.update({self.cinder_sentry: ['cinder-api']}) - - for i in range(0, self.keystone_num_units): - services.update({self.keystone_sentries[i]: self.init_services}) - - ret = u.validate_services_by_name(services) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def validate_keystone_tenants(self, client): - """Verify all existing tenants.""" - u.log.debug('Checking keystone tenants...') - expected = [ - {'name': 'services', - 'enabled': True, - 'description': 'Created by Juju', - 'id': u.not_null}, - {'name': 'demoTenant', - 'enabled': True, - 'description': 'demo tenant', - 'id': u.not_null}, - {'name': 'admin', - 'enabled': True, - 'description': 'Created by Juju', - 'id': u.not_null} - ] - if self.keystone_api_version == 2: - actual = client.tenants.list() - else: - actual = client.projects.list() - - ret = u.validate_tenant_data(expected, actual) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_102_keystone_tenants(self): - self.set_api_version(2) - self.validate_keystone_tenants(self.keystone_v2) - - def validate_keystone_roles(self, client): - """Verify all existing roles.""" - u.log.debug('Checking keystone roles...') - expected = [ - {'name': 'demoRole', - 'id': u.not_null}, - {'name': 'Admin', - 'id': u.not_null} - ] - actual = client.roles.list() - - ret = u.validate_role_data(expected, actual) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_104_keystone_roles(self): - self.set_api_version(2) - self.validate_keystone_roles(self.keystone_v2) - - def validate_keystone_users(self, client): - """Verify all existing roles.""" - u.log.debug('Checking keystone users...') - - if self._get_openstack_release() < self.xenial_pike: - cinder_user = 'cinder_cinderv2' - else: - cinder_user = 'cinderv3_cinderv2' - base = [ - {'name': 'demoUser', - 'enabled': True, - 'id': u.not_null, - 'email': 'demo@demo.com'}, - {'name': 'admin', - 'enabled': True, - 'id': u.not_null, - 'email': 'juju@localhost'}, - {'name': cinder_user, - 'enabled': True, - 'id': u.not_null, - 'email': u'juju@localhost'} - ] - expected = [] - for user_info in base: - if self.keystone_api_version == 2: - user_info['tenantId'] = u.not_null - else: - user_info['default_project_id'] = u.not_null - expected.append(user_info) - if self.keystone_api_version == 2: - actual = client.users.list() - else: - # Ensure list is scoped to the default domain - # when checking v3 users (v2->v3 upgrade check) - actual = client.users.list( - domain=client.domains.find(name=self.DEFAULT_DOMAIN).id - ) - ret = u.validate_user_data(expected, actual, - api_version=self.keystone_api_version) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def find_keystone_v3_user(self, client, username, domain): - """Find a user within a specified keystone v3 domain""" - domain_users = client.users.list( - domain=client.domains.find(name=domain).id - ) - for user in domain_users: - if username.lower() == user.name.lower(): - return user - return None - - def test_106_keystone_users(self): - self.set_api_version(2) - self.validate_keystone_users(self.keystone_v2) - - def is_liberty_or_newer(self): - # os_release = self._get_openstack_release_string() - os_release = self._get_openstack_release() - # if os_release >= 'liberty': - if os_release >= self.trusty_liberty: - return True - else: - u.log.info('Skipping test, {} < liberty'.format(os_release)) - return False - - def is_mitaka_or_newer(self): - # os_release = self._get_openstack_release_string() - os_release = self._get_openstack_release() - # if os_release >= 'mitaka': - if os_release >= self.xenial_mitaka: - return True - else: - u.log.info('Skipping test, {} < mitaka'.format(os_release)) - return False - - def test_112_keystone_list_resources(self): - if self.is_mitaka_or_newer(): - self.set_api_version(3) - self.validate_keystone_tenants(self.keystone_v3) - self.validate_keystone_roles(self.keystone_v3) - self.validate_keystone_users(self.keystone_v3) - - def test_118_keystone_create_users(self): - if self.is_mitaka_or_newer(): - self.set_api_version(3) - self.create_users_v3() - actual_user = self.find_keystone_v3_user(self.keystone_v3, - self.demo_user_v3, - self.demo_domain) - assert actual_user is not None - expect = { - 'default_project_id': self.demo_project, - 'email': 'demov3@demo.com', - 'name': self.demo_user_v3, - } - for key in expect.keys(): - u.log.debug('Checking user {} {} is {}'.format( - self.demo_user_v3, - key, - expect[key]) - ) - assert expect[key] == getattr(actual_user, key) - - def test_120_keystone_domains(self): - if self.is_mitaka_or_newer(): - self.set_api_version(3) - self.create_users_v3() - actual_domain = self.keystone_v3.domains.find( - name=self.demo_domain - ) - expect = { - 'name': self.demo_domain, - } - for key in expect.keys(): - u.log.debug('Checking domain {} {} is {}'.format( - self.demo_domain, - key, - expect[key]) - ) - assert expect[key] == getattr(actual_domain, key) - - def test_121_keystone_demo_domain_admin_access(self): - """Verify that end-user domain admin does not have elevated - privileges. Catch regressions like LP#1651989""" - if self.is_mitaka_or_newer(): - u.log.debug('Checking keystone end-user domain admin access...') - self.set_api_version(3) - # Authenticate as end-user domain admin and verify that we have - # appropriate access. - client = u.authenticate_keystone( - self.keystone_sentries[0].info['public-address'], - username=self.demo_domain_admin, - password='password', - api_version=3, - user_domain_name=self.demo_domain, - domain_name=self.demo_domain, - ) - - try: - # Expect failure - client.domains.list() - except Exception as e: - message = ('Retrieve domain list as end-user domain admin ' - 'NOT allowed...OK ({})'.format(e)) - u.log.debug(message) - pass - else: - message = ('Retrieve domain list as end-user domain admin ' - 'allowed') - amulet.raise_status(amulet.FAIL, msg=message) - - def test_122_keystone_project_scoped_admin_access(self): - """Verify that user admin in domain admin_domain has access to - identity-calls guarded by rule:cloud_admin when using project - scoped token.""" - if self.is_mitaka_or_newer(): - u.log.debug('Checking keystone project scoped admin access...') - self.set_api_version(3) - # Authenticate as end-user domain admin and verify that we have - # appropriate access. - client = u.authenticate_keystone( - self.keystone_sentries[0].info['public-address'], - username='admin', - password='openstack', - api_version=3, - admin_port=True, - user_domain_name='admin_domain', - project_domain_name='admin_domain', - project_name='admin', - ) - - try: - client.domains.list() - u.log.debug('OK') - except Exception as e: - message = ('Retrieve domain list as admin with project scoped ' - 'token FAILED. ({})'.format(e)) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_138_service_catalog(self): - """Verify that the service catalog endpoint data is valid.""" - u.log.debug('Checking keystone service catalog...') - self.set_api_version(2) - endpoint_check = { - 'adminURL': u.valid_url, - 'id': u.not_null, - 'region': 'RegionOne', - 'publicURL': u.valid_url, - 'internalURL': u.valid_url - } - expected = { - 'volume': [endpoint_check], - 'identity': [endpoint_check] - } - if self._get_openstack_release() >= self.xenial_pike: - expected.pop('volume') - expected['volumev2'] = [endpoint_check] - actual = self.keystone_v2.service_catalog.get_endpoints() - - ret = u.validate_svc_catalog_endpoint_data(expected, actual) - if ret: - amulet.raise_status(amulet.FAIL, msg=ret) - - def test_140_keystone_endpoint(self): - """Verify the keystone endpoint data.""" - u.log.debug('Checking keystone api endpoint data...') - endpoints = self.keystone_v2.endpoints.list() - admin_port = '35357' - internal_port = public_port = '5000' - expected = { - 'id': u.not_null, - 'region': 'RegionOne', - 'adminurl': u.valid_url, - 'internalurl': u.valid_url, - 'publicurl': u.valid_url, - 'service_id': u.not_null - } - ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, - public_port, expected) - if ret: - amulet.raise_status(amulet.FAIL, - msg='keystone endpoint: {}'.format(ret)) - - def test_142_cinder_endpoint(self): - """Verify the cinder endpoint data.""" - u.log.debug('Checking cinder endpoint...') - endpoints = self.keystone_v2.endpoints.list() - admin_port = internal_port = public_port = '8776' - expected = { - 'id': u.not_null, - 'region': 'RegionOne', - 'adminurl': u.valid_url, - 'internalurl': u.valid_url, - 'publicurl': u.valid_url, - 'service_id': u.not_null - } - - ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, - public_port, expected) - if ret: - amulet.raise_status(amulet.FAIL, - msg='cinder endpoint: {}'.format(ret)) - - def test_200_keystone_mysql_shared_db_relation(self): - """Verify the keystone shared-db relation data""" - u.log.debug('Checking keystone to mysql db relation data...') - relation = ['shared-db', 'percona-cluster:shared-db'] - expected = { - 'username': 'keystone', - 'private-address': u.valid_ip, - 'hostname': u.valid_ip, - 'database': 'keystone' - } - for unit in self.keystone_sentries: - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('keystone shared-db', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_201_mysql_keystone_shared_db_relation(self): - """Verify the mysql shared-db relation data""" - u.log.debug('Checking mysql to keystone db relation data...') - unit = self.pxc_sentry - relation = ['shared-db', 'keystone:shared-db'] - expected_data = { - 'private-address': u.valid_ip, - 'password': u.not_null, - 'db_host': u.valid_ip - } - ret = u.validate_relation_data(unit, relation, expected_data) - if ret: - message = u.relation_error('mysql shared-db', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_202_keystone_cinder_identity_service_relation(self): - """Verify the keystone identity-service relation data""" - u.log.debug('Checking keystone to cinder id relation data...') - relation = ['identity-service', 'cinder:identity-service'] - expected = { - 'service_protocol': 'http', - 'service_tenant': 'services', - 'admin_token': 'ubuntutesting', - 'service_password': u.not_null, - 'service_port': '5000', - 'auth_port': '35357', - 'auth_protocol': 'http', - 'private-address': u.valid_ip, - 'auth_host': u.valid_ip, - 'service_username': 'cinder_cinderv2', - 'service_tenant_id': u.not_null, - 'service_host': u.valid_ip - } - if self._get_openstack_release() >= self.xenial_pike: - expected['service_username'] = 'cinderv3_cinderv2' - for unit in self.keystone_sentries: - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('keystone identity-service', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_203_cinder_keystone_identity_service_relation(self): - """Verify the cinder identity-service relation data""" - u.log.debug('Checking cinder to keystone id relation data...') - unit = self.cinder_sentry - relation = ['identity-service', 'keystone:identity-service'] - expected = { - 'cinder_service': 'cinder', - 'cinder_region': 'RegionOne', - 'cinder_public_url': u.valid_url, - 'cinder_internal_url': u.valid_url, - 'cinder_admin_url': u.valid_url, - 'cinderv2_service': 'cinderv2', - 'cinderv2_region': 'RegionOne', - 'cinderv2_public_url': u.valid_url, - 'cinderv2_internal_url': u.valid_url, - 'cinderv2_admin_url': u.valid_url, - 'private-address': u.valid_ip, - } - - if self._get_openstack_release() >= self.xenial_pike: - expected.pop('cinder_region') - expected.pop('cinder_service') - expected.pop('cinder_public_url') - expected.pop('cinder_admin_url') - expected.pop('cinder_internal_url') - expected.update({ - 'cinderv2_region': 'RegionOne', - 'cinderv3_region': 'RegionOne', - 'cinderv3_service': 'cinderv3', - 'cinderv3_region': 'RegionOne', - 'cinderv3_public_url': u.valid_url, - 'cinderv3_internal_url': u.valid_url, - 'cinderv3_admin_url': u.valid_url}) - - ret = u.validate_relation_data(unit, relation, expected) - if ret: - message = u.relation_error('cinder identity-service', ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_300_keystone_default_config(self): - """Verify the data in the keystone config file, - comparing some of the variables vs relation data.""" - u.log.debug('Checking keystone config file...') - ks_ci_rel = self.keystone_sentries[0].relation( - 'identity-service', - 'cinder:identity-service') - my_ks_rel = self.pxc_sentry.relation('shared-db', - 'keystone:shared-db') - db_uri = "mysql{}://{}:{}@{}/{}".format(self.pymysql, - 'keystone', - my_ks_rel['password'], - my_ks_rel['db_host'], - 'keystone') - expected = { - 'DEFAULT': { - 'debug': 'False', - 'admin_token': ks_ci_rel['admin_token'], - 'use_syslog': 'False', - 'log_config_append': (self.logging_config), - 'public_endpoint': u.valid_url, # get specific - 'admin_endpoint': u.valid_url, # get specific - }, - 'extra_headers': { - 'Distribution': 'Ubuntu' - }, - 'database': { - 'connection': db_uri, - 'idle_timeout': '200' - } - } - - if self._get_openstack_release() < self.trusty_mitaka: - expected['DEFAULT']['verbose'] = 'False' - expected['DEFAULT']['log_config'] = \ - expected['DEFAULT']['log_config_append'] - del expected['DEFAULT']['log_config_append'] - - if self._get_openstack_release() >= self.trusty_kilo and \ - self._get_openstack_release() < self.trusty_mitaka: - # Kilo and Liberty - expected['eventlet_server'] = { - 'admin_bind_host': '0.0.0.0', - 'public_bind_host': '0.0.0.0', - 'admin_port': '35347', - 'public_port': '4990', - } - elif self._get_openstack_release() <= self.trusty_icehouse: - # Juno and earlier - expected['DEFAULT'].update({ - 'admin_port': '35347', - 'public_port': '4990', - 'bind_host': '0.0.0.0', - }) - - for unit in self.keystone_sentries: - for section, pairs in expected.iteritems(): - ret = u.validate_config_data(unit, self.keystone_conf, section, - pairs) - if ret: - message = "keystone config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_301_keystone_default_policy(self): - """Verify the data in the keystone policy.json file, - comparing some of the variables vs relation data.""" - if not self.is_liberty_or_newer(): - return - u.log.debug('Checking keystone v3 policy.json file') - self.set_api_version(3) - ks_ci_rel = self.keystone_sentries[0].relation( - 'identity-service', - 'cinder:identity-service') - if self._get_openstack_release() >= self.xenial_ocata: - expected = { - 'admin_required': 'role:Admin', - 'cloud_admin': - 'rule:admin_required and ' - '(is_admin_project:True or ' - 'domain_id:{admin_domain_id} or ' - 'project_id:{service_tenant_id})'.format( - admin_domain_id=ks_ci_rel['admin_domain_id'], - service_tenant_id=ks_ci_rel['service_tenant_id']), - } - elif self._get_openstack_release() >= self.trusty_mitaka: - expected = { - 'admin_required': 'role:Admin', - 'cloud_admin': - 'rule:admin_required and ' - '(token.is_admin_project:True or ' - 'domain_id:{admin_domain_id} or ' - 'project_id:{service_tenant_id})'.format( - admin_domain_id=ks_ci_rel['admin_domain_id'], - service_tenant_id=ks_ci_rel['service_tenant_id']), - } - else: - expected = { - 'admin_required': 'role:Admin', - 'cloud_admin': - 'rule:admin_required and ' - 'domain_id:{admin_domain_id}'.format( - admin_domain_id=ks_ci_rel['admin_domain_id']), - } - - for unit in self.keystone_sentries: - data = json.loads(unit.file_contents(self.policy_json)) - ret = u._validate_dict_data(expected, data) - if ret: - message = "keystone policy.json error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - u.log.debug('OK') - - def test_302_keystone_logging_config(self): - """Verify the data in the keystone logging config file""" - u.log.debug('Checking keystone config file...') - expected = { - 'logger_root': { - 'level': 'WARNING', - 'handlers': 'file,production', - }, - 'handlers': { - 'keys': 'production,file,devel' - }, - 'handler_file': { - 'level': 'DEBUG', - 'args': "('{}', 'a')".format(self.log_file) - } - } - - for unit in self.keystone_sentries: - for section, pairs in expected.iteritems(): - ret = u.validate_config_data(unit, self.logging_config, - section, pairs) - if ret: - message = "keystone logging config error: {}".format(ret) - amulet.raise_status(amulet.FAIL, msg=message) - - def test_900_keystone_restart_on_config_change(self): - """Verify that the specified services are restarted when the config - is changed.""" - sentry = self.keystone_sentries[0] - juju_service = 'keystone' - - # Expected default and alternate values - set_default = {'use-syslog': 'False'} - set_alternate = {'use-syslog': 'True'} - - # Make config change, check for service restarts - u.log.debug('Making config change on {}...'.format(juju_service)) - mtime = u.get_sentry_time(sentry) - self.d.configure(juju_service, set_alternate) - - sleep_time = 30 - for s, conf_file in self.services_to_configs.iteritems(): - u.log.debug("Checking that service restarted: {}".format(s)) - if not u.validate_service_config_changed(sentry, mtime, s, - conf_file, - sleep_time=sleep_time): - - self.d.configure(juju_service, set_default) - msg = "service {} didn't restart after config change".format(s) - amulet.raise_status(amulet.FAIL, msg=msg) - - self.d.configure(juju_service, set_default) - - u.log.debug('OK') - - def test_901_pause_resume(self): - """Test pause and resume actions. - - NOTE: Toggle setting when service is paused to check config-changed - hook respects pause Bug #1648016 - """ - # Expected default and alternate values - set_default = {'use-syslog': 'False'} - set_alternate = {'use-syslog': 'True'} - self._assert_services(should_run=True) - for unit in self.keystone_sentries: - action_id = u.run_action(unit, "pause") - assert u.wait_on_action(action_id), "Pause action failed." - - self._assert_services(should_run=False) - self.d.configure('keystone', set_alternate) - for unit in self.keystone_sentries: - action_id = u.run_action(unit, "resume") - assert u.wait_on_action(action_id), "Resume action failed" - self._assert_services(should_run=True) - self.d.configure('keystone', set_default) - self._auto_wait_for_status(message="Unit is ready", - include_only=['keystone']) - - def test_910_test_user_password_reset(self): - """Test that the admin v3 users password is set during - shared-db-relation-changed. Bug #1644606 - - NOTE: The amulet tests setup v2 and v3 credentials which means - that the troublesome update_user_password executes cleanly but - updates the v2 admin user in error. So, to catch this bug change - the admin password and ensure that it is changed back when - shared-db-relation-changed hook runs. - """ - # NOTE(dosaboy): skipping this test so that we can land fix for - # LP: #1648677. Currently, if the admin password is - # changed outside the charm e.g. cli, the charm has no - # way to detect or retreive that password. The user - # would not need to update the admin-password config - # option to fix this. - return - - if self.is_mitaka_or_newer(): - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) - self.set_api_version(3) - self._auto_wait_for_status( - message="Unit is ready", - timeout=timeout, - include_only=['keystone']) - domain = self.keystone_v3.domains.find(name='admin_domain') - v3_admin_user = self.keystone_v3.users.list(domain=domain)[0] - u.log.debug(v3_admin_user) - self.keystone_v3.users.update(user=v3_admin_user, - password='wrongpass') - u.log.debug('Removing keystone percona-cluster relation') - self.d.unrelate('keystone:shared-db', 'percona-cluster:shared-db') - self.d.sentry.wait(timeout=timeout) - u.log.debug('Adding keystone percona-cluster relation') - self.d.sentry.wait(timeout=timeout) - self.d.relate('keystone:shared-db', 'percona-cluster:shared-db') - self.set_api_version(3) - self._auto_wait_for_status( - message="Unit is ready", - timeout=timeout, - include_only=['keystone']) - re_auth = u.authenticate_keystone_admin( - self.keystone_sentries[0], - user='admin', - password='openstack', - api_version=3, - keystone_ip=self.keystone_ip) - try: - re_auth.users.list() - except ksauth1_exceptions.http.Unauthorized: - amulet.raise_status( - amulet.FAIL, - msg="Admin user password not reset") - u.log.debug('OK') diff --git a/tests/bundles/bionic-queens.yaml b/tests/bundles/bionic-queens.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5555c9e6909c66a0a6db8c7d9eb427c48a8793b8 --- /dev/null +++ b/tests/bundles/bionic-queens.yaml @@ -0,0 +1,39 @@ +series: bionic +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + '2': + '3': + '4': +relations: +- - keystone:shared-db + - mysql:shared-db +- - glance:shared-db + - mysql:shared-db +- - glance:identity-service + - keystone:identity-service +applications: + mysql: + charm: cs:percona-cluster + num_units: 1 + to: + - '0' + keystone: + series: bionic + charm: ../../../keystone + num_units: 3 + options: + token-provider: 'fernet' + token-expiration: 60 + to: + - '1' + - '2' + - '3' + glance: + charm: cs:glance + num_units: 1 + to: + - '4' diff --git a/tests/bundles/bionic-rocky.yaml b/tests/bundles/bionic-rocky.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ed39520d68bd3506ed79b5f438d31e3e44fc325 --- /dev/null +++ b/tests/bundles/bionic-rocky.yaml @@ -0,0 +1,41 @@ +series: bionic +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + '2': + '3': + '4': +relations: +- - keystone:shared-db + - mysql:shared-db +- - glance:shared-db + - mysql:shared-db +- - glance:identity-service + - keystone:identity-service +applications: + mysql: + charm: cs:percona-cluster + num_units: 1 + to: + - '0' + keystone: + series: bionic + charm: ../../../keystone + num_units: 3 + options: + openstack-origin: cloud:bionic-rocky + token-expiration: 60 + to: + - '1' + - '2' + - '3' + glance: + charm: cs:glance + num_units: 1 + options: + openstack-origin: cloud:bionic-rocky + to: + - '4' diff --git a/tests/bundles/bionic-stein.yaml b/tests/bundles/bionic-stein.yaml new file mode 100644 index 0000000000000000000000000000000000000000..595882e4943c33653e44fe67d6665805df4f8ca3 --- /dev/null +++ b/tests/bundles/bionic-stein.yaml @@ -0,0 +1,41 @@ +series: bionic +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + '2': + '3': + '4': +relations: +- - keystone:shared-db + - mysql:shared-db +- - glance:shared-db + - mysql:shared-db +- - glance:identity-service + - keystone:identity-service +applications: + mysql: + charm: cs:percona-cluster + num_units: 1 + to: + - '0' + keystone: + series: bionic + charm: ../../../keystone + num_units: 3 + options: + openstack-origin: cloud:bionic-stein + token-expiration: 60 + to: + - '1' + - '2' + - '3' + glance: + charm: cs:glance + num_units: 1 + options: + openstack-origin: cloud:bionic-stein + to: + - '4' diff --git a/tests/bundles/bionic-train.yaml b/tests/bundles/bionic-train.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f892df0a91f7b7becd225ca9d810bc58b7f7b57e --- /dev/null +++ b/tests/bundles/bionic-train.yaml @@ -0,0 +1,41 @@ +series: bionic +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + '2': + '3': + '4': +relations: +- - keystone:shared-db + - mysql:shared-db +- - glance:shared-db + - mysql:shared-db +- - glance:identity-service + - keystone:identity-service +applications: + mysql: + charm: cs:percona-cluster + num_units: 1 + to: + - '0' + keystone: + series: bionic + charm: ../../../keystone + num_units: 3 + options: + openstack-origin: cloud:bionic-train + token-expiration: 60 + to: + - '1' + - '2' + - '3' + glance: + charm: cs:glance + num_units: 1 + options: + openstack-origin: cloud:bionic-train + to: + - '4' diff --git a/tests/bundles/disco-stein.yaml b/tests/bundles/disco-stein.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e7337f79f9aece91aefea347b024739b10e28f37 --- /dev/null +++ b/tests/bundles/disco-stein.yaml @@ -0,0 +1,38 @@ +series: disco +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + '2': + '3': + '4': +relations: +- - keystone:shared-db + - mysql:shared-db +- - glance:shared-db + - mysql:shared-db +- - glance:identity-service + - keystone:identity-service +applications: + mysql: + charm: cs:percona-cluster + num_units: 1 + to: + - '0' + keystone: + series: disco + charm: ../../../keystone + num_units: 3 + options: + token-expiration: 60 + to: + - '1' + - '2' + - '3' + glance: + charm: cs:glance + num_units: 1 + to: + - '4' diff --git a/tests/bundles/overlays/local-charm-overlay.yaml.j2 b/tests/bundles/overlays/local-charm-overlay.yaml.j2 new file mode 100644 index 0000000000000000000000000000000000000000..3aeba971472b094bab6be87f0a63a858af00a241 --- /dev/null +++ b/tests/bundles/overlays/local-charm-overlay.yaml.j2 @@ -0,0 +1,11 @@ +# Add True HA +applications: + keystone: + options: + vip: '{{ OS_VIP00 }}' + hacluster: + charm: cs:hacluster + num_units: 0 +relations: +- - keystone + - hacluster diff --git a/tests/bundles/trusty-mitaka.yaml b/tests/bundles/trusty-mitaka.yaml new file mode 100644 index 0000000000000000000000000000000000000000..99cb7bb41b7e3b2fc07a6b80eecbca2c0e9dbf28 --- /dev/null +++ b/tests/bundles/trusty-mitaka.yaml @@ -0,0 +1,45 @@ +series: trusty +comment: +- 'machines section to decide order of deployment. database sooner = faster' +- 'virt-type=kvm is workaround while awaiting new release of python-libjuju' +machines: + '0': + constraints: virt-type=kvm mem=3072M + '1': + constraints: virt-type=kvm + '2': + constraints: virt-type=kvm + '3': + constraints: virt-type=kvm + '4': + constraints: virt-type=kvm +relations: +- - keystone:shared-db + - mysql:shared-db +- - glance:shared-db + - mysql:shared-db +- - glance:identity-service + - keystone:identity-service +applications: + mysql: + charm: cs:trusty/percona-cluster + num_units: 1 + to: + - '0' + keystone: + series: trusty + charm: ../../../keystone + num_units: 3 + options: + openstack-origin: cloud:trusty-mitaka + to: + - '1' + - '2' + - '3' + glance: + charm: cs:glance + num_units: 1 + options: + openstack-origin: cloud:trusty-mitaka + to: + - '4' diff --git a/tests/bundles/xenial-mitaka.yaml b/tests/bundles/xenial-mitaka.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3152451532768ae59f190a6d457e4f41c3e0409f --- /dev/null +++ b/tests/bundles/xenial-mitaka.yaml @@ -0,0 +1,36 @@ +series: xenial +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + '2': + '3': + '4': +relations: +- - keystone:shared-db + - mysql:shared-db +- - glance:shared-db + - mysql:shared-db +- - glance:identity-service + - keystone:identity-service +applications: + mysql: + charm: cs:percona-cluster + num_units: 1 + to: + - '0' + keystone: + series: xenial + charm: ../../../keystone + num_units: 3 + to: + - '1' + - '2' + - '3' + glance: + charm: cs:glance + num_units: 1 + to: + - '4' diff --git a/tests/bundles/xenial-ocata.yaml b/tests/bundles/xenial-ocata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5d0ce45a45dc788d65d96985d5cd658d06b21d6f --- /dev/null +++ b/tests/bundles/xenial-ocata.yaml @@ -0,0 +1,42 @@ +series: xenial +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + '2': + '3': + '4': +relations: +- - keystone:shared-db + - mysql:shared-db +- - glance:shared-db + - mysql:shared-db +- - glance:identity-service + - keystone:identity-service +applications: + mysql: + charm: cs:percona-cluster + num_units: 1 + to: + - '0' + keystone: + series: xenial + charm: ../../../keystone + num_units: 3 + options: + openstack-origin: cloud:xenial-ocata + token-provider: 'fernet' + token-expiration: 60 + to: + - '1' + - '2' + - '3' + glance: + charm: cs:glance + num_units: 1 + options: + openstack-origin: cloud:xenial-ocata + to: + - '4' diff --git a/tests/bundles/xenial-pike.yaml b/tests/bundles/xenial-pike.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9dc4822c8f7795878697fcbe88fdd3f03a9294d6 --- /dev/null +++ b/tests/bundles/xenial-pike.yaml @@ -0,0 +1,42 @@ +series: xenial +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + '2': + '3': + '4': +relations: +- - keystone:shared-db + - mysql:shared-db +- - glance:shared-db + - mysql:shared-db +- - glance:identity-service + - keystone:identity-service +applications: + mysql: + charm: cs:percona-cluster + num_units: 1 + to: + - '0' + keystone: + series: xenial + charm: ../../../keystone + num_units: 3 + options: + openstack-origin: cloud:xenial-pike + token-provider: 'fernet' + token-expiration: 60 + to: + - '1' + - '2' + - '3' + glance: + charm: cs:glance + num_units: 1 + options: + openstack-origin: cloud:xenial-pike + to: + - '4' diff --git a/tests/bundles/xenial-queens.yaml b/tests/bundles/xenial-queens.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4060a3d00569bb41eeba9143b2b3b73ccf3148ac --- /dev/null +++ b/tests/bundles/xenial-queens.yaml @@ -0,0 +1,42 @@ +series: xenial +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + '2': + '3': + '4': +relations: +- - keystone:shared-db + - mysql:shared-db +- - glance:shared-db + - mysql:shared-db +- - glance:identity-service + - keystone:identity-service +applications: + mysql: + charm: cs:percona-cluster + num_units: 1 + to: + - '0' + keystone: + series: xenial + charm: ../../../keystone + num_units: 3 + options: + openstack-origin: cloud:xenial-queens + token-provider: 'fernet' + token-expiration: 60 + to: + - '1' + - '2' + - '3' + glance: + charm: cs:glance + num_units: 1 + options: + openstack-origin: cloud:xenial-queens + to: + - '4' diff --git a/tests/charmhelpers/__init__.py b/tests/charmhelpers/__init__.py deleted file mode 100644 index e7aa471541a8a5871df11684ca8c579a30203a80..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - -import functools -import inspect -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa - - -# Holds a list of mapping of mangled function names that have been deprecated -# using the @deprecate decorator below. This is so that the warning is only -# printed once for each usage of the function. -__deprecated_functions = {} - - -def deprecate(warning, date=None, log=None): - """Add a deprecation warning the first time the function is used. - The date, which is a string in semi-ISO8660 format indicate the year-month - that the function is officially going to be removed. - - usage: - - @deprecate('use core/fetch/add_source() instead', '2017-04') - def contributed_add_source_thing(...): - ... - - And it then prints to the log ONCE that the function is deprecated. - The reason for passing the logging function (log) is so that hookenv.log - can be used for a charm if needed. - - :param warning: String to indicat where it has moved ot. - :param date: optional sting, in YYYY-MM format to indicate when the - function will definitely (probably) be removed. - :param log: The log function to call to log. If not, logs to stdout - """ - def wrap(f): - - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - try: - module = inspect.getmodule(f) - file = inspect.getsourcefile(f) - lines = inspect.getsourcelines(f) - f_name = "{}-{}-{}..{}-{}".format( - module.__name__, file, lines[0], lines[-1], f.__name__) - except (IOError, TypeError): - # assume it was local, so just use the name of the function - f_name = f.__name__ - if f_name not in __deprecated_functions: - __deprecated_functions[f_name] = True - s = "DEPRECATION WARNING: Function {} is being removed".format( - f.__name__) - if date: - s = "{} on/around {}".format(s, date) - if warning: - s = "{} : {}".format(s, warning) - if log: - log(s) - else: - print(s) - return f(*args, **kwargs) - return wrapped_f - return wrap diff --git a/tests/charmhelpers/contrib/__init__.py b/tests/charmhelpers/contrib/__init__.py deleted file mode 100644 index d7567b863e3a5ad2b7a7f44958b4166e0c3d346b..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/contrib/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/charmhelpers/contrib/amulet/__init__.py b/tests/charmhelpers/contrib/amulet/__init__.py deleted file mode 100644 index d7567b863e3a5ad2b7a7f44958b4166e0c3d346b..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/contrib/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py deleted file mode 100644 index 9c65518e1c4c6ff6f508ff7e046ce2b91f961f4c..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/contrib/amulet/deployment.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import os -import six - - -class AmuletDeployment(object): - """Amulet deployment. - - This class provides generic Amulet deployment and test runner - methods. - """ - - def __init__(self, series=None): - """Initialize the deployment environment.""" - self.series = None - - if series: - self.series = series - self.d = amulet.Deployment(series=self.series) - else: - self.d = amulet.Deployment() - - def _add_services(self, this_service, other_services): - """Add services. - - Add services to the deployment where this_service is the local charm - that we're testing and other_services are the other services that - are being used in the local amulet tests. - """ - if this_service['name'] != os.path.basename(os.getcwd()): - s = this_service['name'] - msg = "The charm's root directory name needs to be {}".format(s) - amulet.raise_status(amulet.FAIL, msg=msg) - - if 'units' not in this_service: - this_service['units'] = 1 - - self.d.add(this_service['name'], units=this_service['units'], - constraints=this_service.get('constraints')) - - for svc in other_services: - if 'location' in svc: - branch_location = svc['location'] - elif self.series: - branch_location = 'cs:{}/{}'.format(self.series, svc['name']), - else: - branch_location = None - - if 'units' not in svc: - svc['units'] = 1 - - self.d.add(svc['name'], charm=branch_location, units=svc['units'], - constraints=svc.get('constraints')) - - def _add_relations(self, relations): - """Add all of the relations for the services.""" - for k, v in six.iteritems(relations): - self.d.relate(k, v) - - def _configure_services(self, configs): - """Configure all of the services.""" - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _deploy(self): - """Deploy environment and wait for all hooks to finish executing.""" - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) - try: - self.d.setup(timeout=timeout) - self.d.sentry.wait(timeout=timeout) - except amulet.helpers.TimeoutError: - amulet.raise_status( - amulet.FAIL, - msg="Deployment timed out ({}s)".format(timeout) - ) - except Exception: - raise - - def run_tests(self): - """Run all of the methods that are prefixed with 'test_'.""" - for test in dir(self): - if test.startswith('test_'): - getattr(self, test)() diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py deleted file mode 100644 index 8a6b7644b4fcb7ea35201552f86ef3b86177c8c4..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/contrib/amulet/utils.py +++ /dev/null @@ -1,821 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import json -import logging -import os -import re -import socket -import subprocess -import sys -import time -import uuid - -import amulet -import distro_info -import six -from six.moves import configparser -if six.PY3: - from urllib import parse as urlparse -else: - import urlparse - - -class AmuletUtils(object): - """Amulet utilities. - - This class provides common utility functions that are used by Amulet - tests. - """ - - def __init__(self, log_level=logging.ERROR): - self.log = self.get_logger(level=log_level) - self.ubuntu_releases = self.get_ubuntu_releases() - - def get_logger(self, name="amulet-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def valid_ip(self, ip): - if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): - return True - else: - return False - - def valid_url(self, url): - p = re.compile( - r'^(?:http|ftp)s?://' - r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa - r'localhost|' - r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' - r'(?::\d+)?' - r'(?:/?|[/?]\S+)$', - re.IGNORECASE) - if p.match(url): - return True - else: - return False - - def get_ubuntu_release_from_sentry(self, sentry_unit): - """Get Ubuntu release codename from sentry unit. - - :param sentry_unit: amulet sentry/service unit pointer - :returns: list of strings - release codename, failure message - """ - msg = None - cmd = 'lsb_release -cs' - release, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} lsb_release: {}'.format( - sentry_unit.info['unit_name'], release)) - else: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, release, code)) - if release not in self.ubuntu_releases: - msg = ("Release ({}) not found in Ubuntu releases " - "({})".format(release, self.ubuntu_releases)) - return release, msg - - def validate_services(self, commands): - """Validate that lists of commands succeed on service units. Can be - used to verify system services are running on the corresponding - service units. - - :param commands: dict with sentry keys and arbitrary command list vals - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # /!\ DEPRECATION WARNING (beisner): - # New and existing tests should be rewritten to use - # validate_services_by_name() as it is aware of init systems. - self.log.warn('DEPRECATION WARNING: use ' - 'validate_services_by_name instead of validate_services ' - 'due to init system differences.') - - for k, v in six.iteritems(commands): - for cmd in v: - output, code = k.run(cmd) - self.log.debug('{} `{}` returned ' - '{}'.format(k.info['unit_name'], - cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) - return None - - def validate_services_by_name(self, sentry_services): - """Validate system service status by service name, automatically - detecting init system based on Ubuntu release codename. - - :param sentry_services: dict with sentry keys and svc list values - :returns: None if successful, Failure string message otherwise - """ - self.log.debug('Checking status of system services...') - - # Point at which systemd became a thing - systemd_switch = self.ubuntu_releases.index('vivid') - - for sentry_unit, services_list in six.iteritems(sentry_services): - # Get lsb_release codename from unit - release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) - if ret: - return ret - - for service_name in services_list: - if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name in ['rabbitmq-server', 'apache2', - 'memcached']): - # init is systemd (or regular sysv) - cmd = 'sudo service {} status'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 - elif self.ubuntu_releases.index(release) < systemd_switch: - # init is upstart - cmd = 'sudo status {}'.format(service_name) - output, code = sentry_unit.run(cmd) - service_running = code == 0 and "start/running" in output - - self.log.debug('{} `{}` returned ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code)) - if not service_running: - return u"command `{}` returned {} {}".format( - cmd, output, str(code)) - return None - - def _get_config(self, unit, filename): - """Get a ConfigParser object for parsing a unit's config file.""" - file_contents = unit.file_contents(filename) - - # NOTE(beisner): by default, ConfigParser does not handle options - # with no value, such as the flags used in the mysql my.cnf file. - # https://bugs.python.org/issue7005 - config = configparser.ConfigParser(allow_no_value=True) - config.readfp(io.StringIO(file_contents)) - return config - - def validate_config_data(self, sentry_unit, config_file, section, - expected): - """Validate config file data. - - Verify that the specified section of the config file contains - the expected option key:value pairs. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('Validating config file data ({} in {} on {})' - '...'.format(section, config_file, - sentry_unit.info['unit_name'])) - config = self._get_config(sentry_unit, config_file) - - if section != 'DEFAULT' and not config.has_section(section): - return "section [{}] does not exist".format(section) - - for k in expected.keys(): - if not config.has_option(section, k): - return "section [{}] is missing option {}".format(section, k) - - actual = config.get(section, k) - v = expected[k] - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if actual != v: - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual): - return "section [{}] {}:{} != expected {}:{}".format( - section, k, actual, k, expected[k]) - return None - - def _validate_dict_data(self, expected, actual): - """Validate dictionary data. - - Compare expected dictionary data vs actual dictionary data. - The values in the 'expected' dictionary can be strings, bools, ints, - longs, or can be a function that evaluates a variable and returns a - bool. - """ - self.log.debug('actual: {}'.format(repr(actual))) - self.log.debug('expected: {}'.format(repr(expected))) - - for k, v in six.iteritems(expected): - if k in actual: - if (isinstance(v, six.string_types) or - isinstance(v, bool) or - isinstance(v, six.integer_types)): - # handle explicit values - if v != actual[k]: - return "{}:{}".format(k, actual[k]) - # handle function pointers, such as not_null or valid_ip - elif not v(actual[k]): - return "{}:{}".format(k, actual[k]) - else: - return "key '{}' does not exist".format(k) - return None - - def validate_relation_data(self, sentry_unit, relation, expected): - """Validate actual relation data based on expected relation data.""" - actual = sentry_unit.relation(relation[0], relation[1]) - return self._validate_dict_data(expected, actual) - - def _validate_list_data(self, expected, actual): - """Compare expected list vs actual list data.""" - for e in expected: - if e not in actual: - return "expected item {} not found in actual list".format(e) - return None - - def not_null(self, string): - if string is not None: - return True - else: - return False - - def _get_file_mtime(self, sentry_unit, filename): - """Get last modification time of file.""" - return sentry_unit.file_stat(filename)['mtime'] - - def _get_dir_mtime(self, sentry_unit, directory): - """Get last modification time of directory.""" - return sentry_unit.directory_stat(directory)['mtime'] - - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): - """Get start time of a process based on the last modification time - of the /proc/pid directory. - - :sentry_unit: The sentry unit to check for the service on - :service: service name to look for in process table - :pgrep_full: [Deprecated] Use full command line search mode with pgrep - :returns: epoch time of service process start - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - if pgrep_full is not None: - # /!\ DEPRECATION WARNING (beisner): - # No longer implemented, as pidof is now used instead of pgrep. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' - 'longer implemented re: lp 1474030.') - - pid_list = self.get_process_id_list(sentry_unit, service) - pid = pid_list[0] - proc_dir = '/proc/{}'.format(pid) - self.log.debug('Pid for {} on {}: {}'.format( - service, sentry_unit.info['unit_name'], pid)) - - return self._get_dir_mtime(sentry_unit, proc_dir) - - def service_restarted(self, sentry_unit, service, filename, - pgrep_full=None, sleep_time=20): - """Check if service was restarted. - - Compare a service's start time vs a file's last modification time - (such as a config file for that service) to determine if the service - has been restarted. - """ - # /!\ DEPRECATION WARNING (beisner): - # This method is prone to races in that no before-time is known. - # Use validate_service_config_changed instead. - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - self.log.warn('DEPRECATION WARNING: use ' - 'validate_service_config_changed instead of ' - 'service_restarted due to known races.') - - time.sleep(sleep_time) - if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= - self._get_file_mtime(sentry_unit, filename)): - return True - else: - return False - - def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=None, sleep_time=20, - retry_count=30, retry_sleep_time=10): - """Check if service was been started after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if service found and its start time it newer than mtime, - False if service is older than mtime or if service was - not found. - """ - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s service restarted since %s on ' - '%s' % (service, mtime, unit_name)) - time.sleep(sleep_time) - proc_start_time = None - tries = 0 - while tries <= retry_count and not proc_start_time: - try: - proc_start_time = self._get_proc_start_time(sentry_unit, - service, - pgrep_full) - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'OK'.format(tries, service, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, proc may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} proc start time on {} ' - 'failed\n{}'.format(tries, service, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not proc_start_time: - self.log.warn('No proc start time found, assuming service did ' - 'not start') - return False - if proc_start_time >= mtime: - self.log.debug('Proc start time is newer than provided mtime' - '(%s >= %s) on %s (OK)' % (proc_start_time, - mtime, unit_name)) - return True - else: - self.log.warn('Proc start time (%s) is older than provided mtime ' - '(%s) on %s, service did not ' - 'restart' % (proc_start_time, mtime, unit_name)) - return False - - def config_updated_since(self, sentry_unit, filename, mtime, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check if file was modified after a given time. - - Args: - sentry_unit (sentry): The sentry unit to check the file mtime on - filename (string): The file to check mtime of - mtime (float): The epoch time to check against - sleep_time (int): Initial sleep time (s) before looking for file - retry_sleep_time (int): Time (s) to sleep between retries - retry_count (int): If file is not found, how many times to retry - - Returns: - bool: True if file was modified more recently than mtime, False if - file was modified before mtime, or if file not found. - """ - unit_name = sentry_unit.info['unit_name'] - self.log.debug('Checking that %s updated since %s on ' - '%s' % (filename, mtime, unit_name)) - time.sleep(sleep_time) - file_mtime = None - tries = 0 - while tries <= retry_count and not file_mtime: - try: - file_mtime = self._get_file_mtime(sentry_unit, filename) - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'OK'.format(tries, filename, unit_name)) - except IOError as e: - # NOTE(beisner) - race avoidance, file may not exist yet. - # https://bugs.launchpad.net/charm-helpers/+bug/1474030 - self.log.debug('Attempt {} to get {} file mtime on {} ' - 'failed\n{}'.format(tries, filename, - unit_name, e)) - time.sleep(retry_sleep_time) - tries += 1 - - if not file_mtime: - self.log.warn('Could not determine file mtime, assuming ' - 'file does not exist') - return False - - if file_mtime >= mtime: - self.log.debug('File mtime is newer than provided mtime ' - '(%s >= %s) on %s (OK)' % (file_mtime, - mtime, unit_name)) - return True - else: - self.log.warn('File mtime is older than provided mtime' - '(%s < on %s) on %s' % (file_mtime, - mtime, unit_name)) - return False - - def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=None, - sleep_time=20, retry_count=30, - retry_sleep_time=10): - """Check service and file were updated after mtime - - Args: - sentry_unit (sentry): The sentry unit to check for the service on - mtime (float): The epoch time to check against - service (string): service name to look for in process table - filename (string): The file to check mtime of - pgrep_full: [Deprecated] Use full command line search mode with pgrep - sleep_time (int): Initial sleep in seconds to pass to test helpers - retry_count (int): If service is not found, how many times to retry - retry_sleep_time (int): Time in seconds to wait between retries - - Typical Usage: - u = OpenStackAmuletUtils(ERROR) - ... - mtime = u.get_sentry_time(self.cinder_sentry) - self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) - if not u.validate_service_config_changed(self.cinder_sentry, - mtime, - 'cinder-api', - '/etc/cinder/cinder.conf') - amulet.raise_status(amulet.FAIL, msg='update failed') - Returns: - bool: True if both service and file where updated/restarted after - mtime, False if service is older than mtime or if service was - not found or if filename was modified before mtime. - """ - - # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now - # used instead of pgrep. pgrep_full is still passed through to ensure - # deprecation WARNS. lp1474030 - - service_restart = self.service_restarted_since( - sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - config_update = self.config_updated_since( - sentry_unit, - filename, - mtime, - sleep_time=sleep_time, - retry_count=retry_count, - retry_sleep_time=retry_sleep_time) - - return service_restart and config_update - - def get_sentry_time(self, sentry_unit): - """Return current epoch time on a sentry""" - cmd = "date +'%s'" - return float(sentry_unit.run(cmd)[0]) - - def relation_error(self, name, data): - return 'unexpected relation data in {} - {}'.format(name, data) - - def endpoint_error(self, name, data): - return 'unexpected endpoint data in {} - {}'.format(name, data) - - def get_ubuntu_releases(self): - """Return a list of all Ubuntu releases in order of release.""" - _d = distro_info.UbuntuDistroInfo() - _release_list = _d.all - return _release_list - - def file_to_url(self, file_rel_path): - """Convert a relative file path to a file URL.""" - _abs_path = os.path.abspath(file_rel_path) - return urlparse.urlparse(_abs_path, scheme='file').geturl() - - def check_commands_on_units(self, commands, sentry_units): - """Check that all commands in a list exit zero on all - sentry units in a list. - - :param commands: list of bash commands - :param sentry_units: list of sentry unit pointers - :returns: None if successful; Failure message otherwise - """ - self.log.debug('Checking exit codes for {} commands on {} ' - 'sentry units...'.format(len(commands), - len(sentry_units))) - for sentry_unit in sentry_units: - for cmd in commands: - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - return ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - return None - - def get_process_id_list(self, sentry_unit, process_name, - expect_success=True): - """Get a list of process ID(s) from a single sentry juju unit - for a single process name. - - :param sentry_unit: Amulet sentry instance (juju unit) - :param process_name: Process name - :param expect_success: If False, expect the PID to be missing, - raise if it is present. - :returns: List of process IDs - """ - cmd = 'pidof -x "{}"'.format(process_name) - if not expect_success: - cmd += " || exit 0 && exit 1" - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output).split() - - def get_unit_process_ids(self, unit_processes, expect_success=True): - """Construct a dict containing unit sentries, process names, and - process IDs. - - :param unit_processes: A dictionary of Amulet sentry instance - to list of process names. - :param expect_success: if False expect the processes to not be - running, raise if they are. - :returns: Dictionary of Amulet sentry instance to dictionary - of process names to PIDs. - """ - pid_dict = {} - for sentry_unit, process_list in six.iteritems(unit_processes): - pid_dict[sentry_unit] = {} - for process in process_list: - pids = self.get_process_id_list( - sentry_unit, process, expect_success=expect_success) - pid_dict[sentry_unit].update({process: pids}) - return pid_dict - - def validate_unit_process_ids(self, expected, actual): - """Validate process id quantities for services on units.""" - self.log.debug('Checking units for running processes...') - self.log.debug('Expected PIDs: {}'.format(expected)) - self.log.debug('Actual PIDs: {}'.format(actual)) - - if len(actual) != len(expected): - return ('Unit count mismatch. expected, actual: {}, ' - '{} '.format(len(expected), len(actual))) - - for (e_sentry, e_proc_names) in six.iteritems(expected): - e_sentry_name = e_sentry.info['unit_name'] - if e_sentry in actual.keys(): - a_proc_names = actual[e_sentry] - else: - return ('Expected sentry ({}) not found in actual dict data.' - '{}'.format(e_sentry_name, e_sentry)) - - if len(e_proc_names.keys()) != len(a_proc_names.keys()): - return ('Process name count mismatch. expected, actual: {}, ' - '{}'.format(len(expected), len(actual))) - - for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ - zip(e_proc_names.items(), a_proc_names.items()): - if e_proc_name != a_proc_name: - return ('Process name mismatch. expected, actual: {}, ' - '{}'.format(e_proc_name, a_proc_name)) - - a_pids_length = len(a_pids) - fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' - '{}, {} ({})'.format(e_sentry_name, e_proc_name, - e_pids, a_pids_length, - a_pids)) - - # If expected is a list, ensure at least one PID quantity match - if isinstance(e_pids, list) and \ - a_pids_length not in e_pids: - return fail_msg - # If expected is not bool and not list, - # ensure PID quantities match - elif not isinstance(e_pids, bool) and \ - not isinstance(e_pids, list) and \ - a_pids_length != e_pids: - return fail_msg - # If expected is bool True, ensure 1 or more PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is True and a_pids_length < 1: - return fail_msg - # If expected is bool False, ensure 0 PIDs exist - elif isinstance(e_pids, bool) and \ - e_pids is False and a_pids_length != 0: - return fail_msg - else: - self.log.debug('PID check OK: {} {} {}: ' - '{}'.format(e_sentry_name, e_proc_name, - e_pids, a_pids)) - return None - - def validate_list_of_identical_dicts(self, list_of_dicts): - """Check that all dicts within a list are identical.""" - hashes = [] - for _dict in list_of_dicts: - hashes.append(hash(frozenset(_dict.items()))) - - self.log.debug('Hashes: {}'.format(hashes)) - if len(set(hashes)) == 1: - self.log.debug('Dicts within list are identical') - else: - return 'Dicts within list are not identical' - - return None - - def validate_sectionless_conf(self, file_contents, expected): - """A crude conf parser. Useful to inspect configuration files which - do not have section headers (as would be necessary in order to use - the configparser). Such as openstack-dashboard or rabbitmq confs.""" - for line in file_contents.split('\n'): - if '=' in line: - args = line.split('=') - if len(args) <= 1: - continue - key = args[0].strip() - value = args[1].strip() - if key in expected.keys(): - if expected[key] != value: - msg = ('Config mismatch. Expected, actual: {}, ' - '{}'.format(expected[key], value)) - amulet.raise_status(amulet.FAIL, msg=msg) - - def get_unit_hostnames(self, units): - """Return a dict of juju unit names to hostnames.""" - host_names = {} - for unit in units: - host_names[unit.info['unit_name']] = \ - str(unit.file_contents('/etc/hostname').strip()) - self.log.debug('Unit host names: {}'.format(host_names)) - return host_names - - def run_cmd_unit(self, sentry_unit, cmd): - """Run a command on a unit, return the output and exit code.""" - output, code = sentry_unit.run(cmd) - if code == 0: - self.log.debug('{} `{}` command returned {} ' - '(OK)'.format(sentry_unit.info['unit_name'], - cmd, code)) - else: - msg = ('{} `{}` command returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return str(output), code - - def file_exists_on_unit(self, sentry_unit, file_name): - """Check if a file exists on a unit.""" - try: - sentry_unit.file_stat(file_name) - return True - except IOError: - return False - except Exception as e: - msg = 'Error checking file {}: {}'.format(file_name, e) - amulet.raise_status(amulet.FAIL, msg=msg) - - def file_contents_safe(self, sentry_unit, file_name, - max_wait=60, fatal=False): - """Get file contents from a sentry unit. Wrap amulet file_contents - with retry logic to address races where a file checks as existing, - but no longer exists by the time file_contents is called. - Return None if file not found. Optionally raise if fatal is True.""" - unit_name = sentry_unit.info['unit_name'] - file_contents = False - tries = 0 - while not file_contents and tries < (max_wait / 4): - try: - file_contents = sentry_unit.file_contents(file_name) - except IOError: - self.log.debug('Attempt {} to open file {} from {} ' - 'failed'.format(tries, file_name, - unit_name)) - time.sleep(4) - tries += 1 - - if file_contents: - return file_contents - elif not fatal: - return None - elif fatal: - msg = 'Failed to get file contents from unit.' - amulet.raise_status(amulet.FAIL, msg) - - def port_knock_tcp(self, host="localhost", port=22, timeout=15): - """Open a TCP socket to check for a listening sevice on a host. - - :param host: host name or IP address, default to localhost - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :returns: True if successful, False if connect failed - """ - - # Resolve host name if possible - try: - connect_host = socket.gethostbyname(host) - host_human = "{} ({})".format(connect_host, host) - except socket.error as e: - self.log.warn('Unable to resolve address: ' - '{} ({}) Trying anyway!'.format(host, e)) - connect_host = host - host_human = connect_host - - # Attempt socket connection - try: - knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - knock.settimeout(timeout) - knock.connect((connect_host, port)) - knock.close() - self.log.debug('Socket connect OK for host ' - '{} on port {}.'.format(host_human, port)) - return True - except socket.error as e: - self.log.debug('Socket connect FAIL for' - ' {} port {} ({})'.format(host_human, port, e)) - return False - - def port_knock_units(self, sentry_units, port=22, - timeout=15, expect_success=True): - """Open a TCP socket to check for a listening sevice on each - listed juju unit. - - :param sentry_units: list of sentry unit pointers - :param port: TCP port number, default to 22 - :param timeout: Connect timeout, default to 15 seconds - :expect_success: True by default, set False to invert logic - :returns: None if successful, Failure message otherwise - """ - for unit in sentry_units: - host = unit.info['public-address'] - connected = self.port_knock_tcp(host, port, timeout) - if not connected and expect_success: - return 'Socket connect failed.' - elif connected and not expect_success: - return 'Socket connected unexpectedly.' - - def get_uuid_epoch_stamp(self): - """Returns a stamp string based on uuid4 and epoch time. Useful in - generating test messages which need to be unique-ish.""" - return '[{}-{}]'.format(uuid.uuid4(), time.time()) - - # amulet juju action helpers: - def run_action(self, unit_sentry, action, - _check_output=subprocess.check_output, - params=None): - """Translate to amulet's built in run_action(). Deprecated. - - Run the named action on a given unit sentry. - - params a dict of parameters to use - _check_output parameter is no longer used - - @return action_id. - """ - self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' - 'deprecated for amulet.run_action') - return unit_sentry.run_action(action, action_args=params) - - def wait_on_action(self, action_id, _check_output=subprocess.check_output): - """Wait for a given action, returning if it completed or not. - - action_id a string action uuid - _check_output parameter is no longer used - """ - data = amulet.actions.get_action_output(action_id, full_output=True) - return data.get(u"status") == "completed" - - def status_get(self, unit): - """Return the current service status of this unit.""" - raw_status, return_code = unit.run( - "status-get --format=json --include-data") - if return_code != 0: - return ("unknown", "") - status = json.loads(raw_status) - return (status["status"], status["message"]) diff --git a/tests/charmhelpers/contrib/openstack/__init__.py b/tests/charmhelpers/contrib/openstack/__init__.py deleted file mode 100644 index d7567b863e3a5ad2b7a7f44958b4166e0c3d346b..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/contrib/openstack/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/tests/charmhelpers/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d7567b863e3a5ad2b7a7f44958b4166e0c3d346b..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 5afbbd87c13e2b168b088c4da51b3b63ab4d07a2..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,354 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import sys -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, - stable=True, log_level=DEBUG): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.log = self.get_logger(level=log_level) - self.log.info('OpenStackAmuletDeployment: init') - self.openstack = openstack - self.source = source - self.stable = stable - - def get_logger(self, name="deployment-logger", level=logging.DEBUG): - """Get a logger object that will log to stdout.""" - log = logging - logger = log.getLogger(name) - fmt = log.Formatter("%(asctime)s %(funcName)s " - "%(levelname)s: %(message)s") - - handler = log.StreamHandler(stream=sys.stdout) - handler.setLevel(level) - handler.setFormatter(fmt) - - logger.addHandler(handler) - logger.setLevel(level) - - return logger - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - - self.log.info('OpenStackAmuletDeployment: determine branch locations') - - # Charms outside the ~openstack-charmers - base_charms = { - 'mysql': ['trusty'], - 'mongodb': ['trusty'], - 'nrpe': ['trusty', 'xenial'], - } - - for svc in other_services: - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if svc['name'] in base_charms: - # NOTE: not all charms have support for all series we - # want/need to test against, so fix to most recent - # that each base charm supports - target_series = self.series - if self.series not in base_charms[svc['name']]: - target_series = base_charms[svc['name']][-1] - svc['location'] = 'cs:{}/{}'.format(target_series, - svc['name']) - elif self.stable: - svc['location'] = 'cs:{}/{}'.format(self.series, - svc['name']) - else: - svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( - self.series, - svc['name'] - ) - - return other_services - - def _add_services(self, this_service, other_services, use_source=None, - no_origin=None): - """Add services to the deployment and optionally set - openstack-origin/source. - - :param this_service dict: Service dictionary describing the service - whose amulet tests are being run - :param other_services dict: List of service dictionaries describing - the services needed to support the target - service - :param use_source list: List of services which use the 'source' config - option rather than 'openstack-origin' - :param no_origin list: List of services which do not support setting - the Cloud Archive. - Service Dict: - { - 'name': str charm-name, - 'units': int number of units, - 'constraints': dict of juju constraints, - 'location': str location of charm, - } - eg - this_service = { - 'name': 'openvswitch-odl', - 'constraints': {'mem': '8G'}, - } - other_services = [ - { - 'name': 'nova-compute', - 'units': 2, - 'constraints': {'mem': '4G'}, - 'location': cs:~bob/xenial/nova-compute - }, - { - 'name': 'mysql', - 'constraints': {'mem': '2G'}, - }, - {'neutron-api-odl'}] - use_source = ['mysql'] - no_origin = ['neutron-api-odl'] - """ - self.log.info('OpenStackAmuletDeployment: adding services') - - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - - use_source = use_source or [] - no_origin = no_origin or [] - - # Charms which should use the source config option - use_source = list(set( - use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw', 'ceph-mon', - 'ceph-proxy', 'percona-cluster', 'lxd'])) - - # Charms which can not use openstack-origin, ie. many subordinates - no_origin = list(set( - no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', - 'nrpe', 'openvswitch-odl', 'neutron-api-odl', - 'odl-controller', 'cinder-backup', 'nexentaedge-data', - 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + no_origin: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - self.log.info('OpenStackAmuletDeployment: configure services') - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _auto_wait_for_status(self, message=None, exclude_services=None, - include_only=None, timeout=None): - """Wait for all units to have a specific extended status, except - for any defined as excluded. Unless specified via message, any - status containing any case of 'ready' will be considered a match. - - Examples of message usage: - - Wait for all unit status to CONTAIN any case of 'ready' or 'ok': - message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) - - Wait for all units to reach this status (exact match): - message = re.compile('^Unit is ready and clustered$') - - Wait for all units to reach any one of these (exact match): - message = re.compile('Unit is ready|OK|Ready') - - Wait for at least one unit to reach this status (exact match): - message = {'ready'} - - See Amulet's sentry.wait_for_messages() for message usage detail. - https://github.com/juju/amulet/blob/master/amulet/sentry.py - - :param message: Expected status match - :param exclude_services: List of juju service names to ignore, - not to be used in conjuction with include_only. - :param include_only: List of juju service names to exclusively check, - not to be used in conjuction with exclude_services. - :param timeout: Maximum time in seconds to wait for status match - :returns: None. Raises if timeout is hit. - """ - if not timeout: - timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) - self.log.info('Waiting for extended status on units for {}s...' - ''.format(timeout)) - - all_services = self.d.services.keys() - - if exclude_services and include_only: - raise ValueError('exclude_services can not be used ' - 'with include_only') - - if message: - if isinstance(message, re._pattern_type): - match = message.pattern - else: - match = message - - self.log.debug('Custom extended status wait match: ' - '{}'.format(match)) - else: - self.log.debug('Default extended status wait match: contains ' - 'READY (case-insensitive)') - message = re.compile('.*ready.*', re.IGNORECASE) - - if exclude_services: - self.log.debug('Excluding services from extended status match: ' - '{}'.format(exclude_services)) - else: - exclude_services = [] - - if include_only: - services = include_only - else: - services = list(set(all_services) - set(exclude_services)) - - self.log.debug('Waiting up to {}s for extended status on services: ' - '{}'.format(timeout, services)) - service_messages = {service: message for service in services} - - # Check for idleness - self.d.sentry.wait(timeout=timeout) - # Check for error states and bail early - self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) - # Check for ready messages - self.d.sentry.wait_for_messages(service_messages, timeout=timeout) - - self.log.info('OK') - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, - self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, - self.yakkety_newton, self.xenial_ocata, self.zesty_ocata, - self.xenial_pike, self.artful_pike, self.xenial_queens, - self.bionic_queens,) = range(13) - - releases = { - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, - ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, - ('xenial', None): self.xenial_mitaka, - ('xenial', 'cloud:xenial-newton'): self.xenial_newton, - ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, - ('xenial', 'cloud:xenial-pike'): self.xenial_pike, - ('xenial', 'cloud:xenial-queens'): self.xenial_queens, - ('yakkety', None): self.yakkety_newton, - ('zesty', None): self.zesty_ocata, - ('artful', None): self.artful_pike, - ('bionic', None): self.bionic_queens, - } - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('trusty', 'icehouse'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] - - def get_ceph_expected_pools(self, radosgw=False): - """Return a list of expected ceph pools in a ceph + cinder + glance - test scenario, based on OpenStack release and whether ceph radosgw - is flagged as present or not.""" - - if self._get_openstack_release() == self.trusty_icehouse: - # Icehouse - pools = [ - 'data', - 'metadata', - 'rbd', - 'cinder-ceph', - 'glance' - ] - elif (self.trusty_kilo <= self._get_openstack_release() <= - self.zesty_ocata): - # Kilo through Ocata - pools = [ - 'rbd', - 'cinder-ceph', - 'glance' - ] - else: - # Pike and later - pools = [ - 'cinder-ceph', - 'glance' - ] - - if radosgw: - pools.extend([ - '.rgw.root', - '.rgw.control', - '.rgw', - '.rgw.gc', - '.users.uid' - ]) - - return pools diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py deleted file mode 100644 index b71b2b1910959f5dbe7860ff3d14d45b8e9e2d90..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,1335 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amulet -import json -import logging -import os -import re -import six -import time -import urllib -import urlparse - -import cinderclient.v1.client as cinder_client -import cinderclient.v2.client as cinder_clientv2 -import glanceclient.v1.client as glance_client -import heatclient.v1.client as heat_client -from keystoneclient.v2_0 import client as keystone_client -from keystoneauth1.identity import ( - v3, - v2, -) -from keystoneauth1 import session as keystone_session -from keystoneclient.v3 import client as keystone_client_v3 -from novaclient import exceptions - -import novaclient.client as nova_client -import novaclient -import pika -import swiftclient - -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) -from charmhelpers.core.host import CompareHostReleases - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - -NOVA_CLIENT_VERSION = "2" - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charm tests. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - self.log.debug('Validating endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate keystone v3 endpoint data. - - Validate the v3 endpoint data which has changed from v2. The - ports are used to find the matching endpoint. - - The new v3 endpoint data looks like: - - [<Endpoint enabled=True, - id=0432655fc2f74d1e9fa17bdaa6f6e60b, - interface=admin, - links={u'self': u'<RESTful URL of this endpoint>'}, - region=RegionOne, - region_id=RegionOne, - service_id=17f842a0dc084b928e476fafe67e4095, - url=http://10.5.6.5:9312>, - <Endpoint enabled=True, - id=6536cb6cb92f4f41bf22b079935c7707, - interface=admin, - links={u'self': u'<RESTful url of this endpoint>'}, - region=RegionOne, - region_id=RegionOne, - service_id=72fc8736fb41435e8b3584205bb2cfa3, - url=http://10.5.6.6:35357/v3>, - ... ] - """ - self.log.debug('Validating v3 endpoint data...') - self.log.debug('actual: {}'.format(repr(endpoints))) - found = [] - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if ((admin_port in ep.url and ep.interface == 'admin') or - (internal_port in ep.url and ep.interface == 'internal') or - (public_port in ep.url and ep.interface == 'public')): - found.append(ep.interface) - # note we ignore the links member. - actual = {'id': ep.id, - 'region': ep.region, - 'region_id': ep.region_id, - 'interface': self.not_null, - 'url': ep.url, - 'service_id': ep.service_id, } - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if len(found) != 3: - return 'Unexpected number of endpoints found' - - def validate_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('Validating service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_v3_svc_catalog_endpoint_data(self, expected, actual): - """Validate the keystone v3 catalog endpoint data. - - Validate a list of dictinaries that make up the keystone v3 service - catalogue. - - It is in the form of: - - - {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:35357/v3'}, - {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}, - {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.224:5000/v3'}], - u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', - u'interface': u'public', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', - u'interface': u'internal', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9311'}, - {u'id': u'f629388955bc407f8b11d8b7ca168086', - u'interface': u'admin', - u'region': u'RegionOne', - u'region_id': u'RegionOne', - u'url': u'http://10.5.5.223:9312'}]} - - Note, that an added complication is that the order of admin, public, - internal against 'interface' in each region. - - Thus, the function sorts the expected and actual lists using the - interface key as a sort key, prior to the comparison. - """ - self.log.debug('Validating v3 service catalog endpoint data...') - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - l_expected = sorted(v, key=lambda x: x['interface']) - l_actual = sorted(actual[k], key=lambda x: x['interface']) - if len(l_actual) != len(l_expected): - return ("endpoint {} has differing number of interfaces " - " - expected({}), actual({})" - .format(k, len(l_expected), len(l_actual))) - for i_expected, i_actual in zip(l_expected, l_actual): - self.log.debug("checking interface {}" - .format(i_expected['interface'])) - ret = self._validate_dict_data(i_expected, i_actual) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('Validating tenant data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('Validating role data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual, api_version=None): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('Validating user data...') - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - if e['name'] == act.name: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'id': act.id} - if api_version == 3: - a['default_project_id'] = getattr(act, - 'default_project_id', - 'none') - else: - a['tenantId'] = act.tenantId - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('Validating flavor data...') - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - self.log.debug('Checking if tenant exists ({})...'.format(tenant)) - return tenant in [t.name for t in keystone.tenants.list()] - - def keystone_wait_for_propagation(self, sentry_relation_pairs, - api_version): - """Iterate over list of sentry and relation tuples and verify that - api_version has the expected value. - - :param sentry_relation_pairs: list of sentry, relation name tuples used - for monitoring propagation of relation - data - :param api_version: api_version to expect in relation data - :returns: None if successful. Raise on error. - """ - for (sentry, relation_name) in sentry_relation_pairs: - rel = sentry.relation('identity-service', - relation_name) - self.log.debug('keystone relation data: {}'.format(rel)) - if rel.get('api_version') != str(api_version): - raise Exception("api_version not propagated through relation" - " data yet ('{}' != '{}')." - "".format(rel['api_version'], api_version)) - - def keystone_configure_api_version(self, sentry_relation_pairs, deployment, - api_version): - """Configure preferred-api-version of keystone in deployment and - monitor provided list of relation objects for propagation - before returning to caller. - - :param sentry_relation_pairs: list of sentry, relation tuples used for - monitoring propagation of relation data - :param deployment: deployment to configure - :param api_version: value preferred-api-version will be set to - :returns: None if successful. Raise on error. - """ - self.log.debug("Setting keystone preferred-api-version: '{}'" - "".format(api_version)) - - config = {'preferred-api-version': api_version} - deployment.d.configure('keystone', config) - deployment._auto_wait_for_status() - self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) - - def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant, api_version=2): - """Authenticates admin user with cinder.""" - # NOTE(beisner): cinder python client doesn't accept tokens. - keystone_ip = keystone_sentry.info['public-address'] - ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) - _clients = { - 1: cinder_client.Client, - 2: cinder_clientv2.Client} - return _clients[api_version](username, password, tenant, ept) - - def authenticate_keystone(self, keystone_ip, username, password, - api_version=False, admin_port=False, - user_domain_name=None, domain_name=None, - project_domain_name=None, project_name=None): - """Authenticate with Keystone""" - self.log.debug('Authenticating with keystone...') - port = 5000 - if admin_port: - port = 35357 - base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), - port) - if not api_version or api_version == 2: - ep = base_ep + "/v2.0" - auth = v2.Password( - username=username, - password=password, - tenant_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - client = keystone_client.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client - else: - ep = base_ep + "/v3" - auth = v3.Password( - user_domain_name=user_domain_name, - username=username, - password=password, - domain_name=domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - auth_url=ep - ) - sess = keystone_session.Session(auth=auth) - client = keystone_client_v3.Client(session=sess) - # This populates the client.service_catalog - client.auth_ref = auth.get_access(sess) - return client - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant=None, api_version=None, - keystone_ip=None, user_domain_name=None, - project_domain_name=None, - project_name=None): - """Authenticates admin user with the keystone admin endpoint.""" - self.log.debug('Authenticating keystone admin...') - if not keystone_ip: - keystone_ip = keystone_sentry.info['public-address'] - - # To support backward compatibility usage of this function - if not project_name: - project_name = tenant - if api_version == 3 and not user_domain_name: - user_domain_name = 'admin_domain' - if api_version == 3 and not project_domain_name: - project_domain_name = 'admin_domain' - if api_version == 3 and not project_name: - project_name = 'admin' - - return self.authenticate_keystone( - keystone_ip, user, password, - api_version=api_version, - user_domain_name=user_domain_name, - project_domain_name=project_domain_name, - project_name=project_name, - admin_port=True) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - self.log.debug('Authenticating keystone user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - keystone_ip = urlparse.urlparse(ep).hostname - - return self.authenticate_keystone(keystone_ip, user, password, - project_name=tenant) - - def authenticate_glance_admin(self, keystone): - """Authenticates admin user with glance.""" - self.log.debug('Authenticating glance admin...') - ep = keystone.service_catalog.url_for(service_type='image', - interface='adminURL') - if keystone.session: - return glance_client.Client(ep, session=keystone.session) - else: - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_heat_admin(self, keystone): - """Authenticates the admin user with heat.""" - self.log.debug('Authenticating heat admin...') - ep = keystone.service_catalog.url_for(service_type='orchestration', - interface='publicURL') - if keystone.session: - return heat_client.Client(endpoint=ep, session=keystone.session) - else: - return heat_client.Client(endpoint=ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - self.log.debug('Authenticating nova user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return nova_client.Client(NOVA_CLIENT_VERSION, - session=keystone.session, - auth_url=ep) - elif novaclient.__version__[0] >= "7": - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, password=password, - project_name=tenant, auth_url=ep) - else: - return nova_client.Client(NOVA_CLIENT_VERSION, - username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def authenticate_swift_user(self, keystone, user, password, tenant): - """Authenticates a regular user with swift api.""" - self.log.debug('Authenticating swift user ({})...'.format(user)) - ep = keystone.service_catalog.url_for(service_type='identity', - interface='publicURL') - if keystone.session: - return swiftclient.Connection(session=keystone.session) - else: - return swiftclient.Connection(authurl=ep, - user=user, - key=password, - tenant_name=tenant, - auth_version='2.0') - - def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create the specified flavor.""" - try: - nova.flavors.find(name=name) - except (exceptions.NotFound, exceptions.NoUniqueMatch): - self.log.debug('Creating flavor ({})'.format(name)) - nova.flavors.create(name, ram, vcpus, disk, flavorid, - ephemeral, swap, rxtx_factor, is_public) - - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance, - validate and return a resource pointer. - - :param glance: pointer to authenticated glance connection - :param image_name: display name for new image - :returns: glance image pointer - """ - self.log.debug('Creating glance cirros image ' - '({})...'.format(image_name)) - - # Download cirros image - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open('http://download.cirros-cloud.net/version/released') - version = f.read().strip() - cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() - - # Create glance image - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) - - # Wait for image to reach active status - img_id = image.id - ret = self.resource_reaches_status(glance.images, img_id, - expected_stat='active', - msg='Image status wait') - if not ret: - msg = 'Glance image failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new image - self.log.debug('Validating image attributes...') - val_img_name = glance.images.get(img_id).name - val_img_stat = glance.images.get(img_id).status - val_img_pub = glance.images.get(img_id).is_public - val_img_cfmt = glance.images.get(img_id).container_format - val_img_dfmt = glance.images.get(img_id).disk_format - msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' - 'container fmt:{} disk fmt:{}'.format( - val_img_name, val_img_pub, img_id, - val_img_stat, val_img_cfmt, val_img_dfmt)) - - if val_img_name == image_name and val_img_stat == 'active' \ - and val_img_pub is True and val_img_cfmt == 'bare' \ - and val_img_dfmt == 'qcow2': - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return image - - def delete_image(self, glance, image): - """Delete the specified image.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_image.') - self.log.debug('Deleting glance image ({})...'.format(image)) - return self.delete_resource(glance.images, image, msg='glance image') - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - self.log.debug('Creating instance ' - '({}|{}|{})'.format(instance_name, image_name, flavor)) - image = nova.glance.find_image(image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - - # /!\ DEPRECATION WARNING - self.log.warn('/!\\ DEPRECATION WARNING: use ' - 'delete_resource instead of delete_instance.') - self.log.debug('Deleting instance ({})...'.format(instance)) - return self.delete_resource(nova.servers, instance, - msg='nova instance') - - def create_or_get_keypair(self, nova, keypair_name="testkey"): - """Create a new keypair, or return pointer if it already exists.""" - try: - _keypair = nova.keypairs.get(keypair_name) - self.log.debug('Keypair ({}) already exists, ' - 'using it.'.format(keypair_name)) - return _keypair - except Exception: - self.log.debug('Keypair ({}) does not exist, ' - 'creating it.'.format(keypair_name)) - - _keypair = nova.keypairs.create(name=keypair_name) - return _keypair - - def _get_cinder_obj_name(self, cinder_object): - """Retrieve name of cinder object. - - :param cinder_object: cinder snapshot or volume object - :returns: str cinder object name - """ - # v1 objects store name in 'display_name' attr but v2+ use 'name' - try: - return cinder_object.display_name - except AttributeError: - return cinder_object.name - - def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, - img_id=None, src_vol_id=None, snap_id=None): - """Create cinder volume, optionally from a glance image, OR - optionally as a clone of an existing volume, OR optionally - from a snapshot. Wait for the new volume status to reach - the expected status, validate and return a resource pointer. - - :param vol_name: cinder volume display name - :param vol_size: size in gigabytes - :param img_id: optional glance image id - :param src_vol_id: optional source volume id to clone - :param snap_id: optional snapshot id to use - :returns: cinder volume pointer - """ - # Handle parameter input and avoid impossible combinations - if img_id and not src_vol_id and not snap_id: - # Create volume from image - self.log.debug('Creating cinder volume from glance image...') - bootable = 'true' - elif src_vol_id and not img_id and not snap_id: - # Clone an existing volume - self.log.debug('Cloning cinder volume...') - bootable = cinder.volumes.get(src_vol_id).bootable - elif snap_id and not src_vol_id and not img_id: - # Create volume from snapshot - self.log.debug('Creating cinder volume from snapshot...') - snap = cinder.volume_snapshots.find(id=snap_id) - vol_size = snap.size - snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id - bootable = cinder.volumes.get(snap_vol_id).bootable - elif not img_id and not src_vol_id and not snap_id: - # Create volume - self.log.debug('Creating cinder volume...') - bootable = 'false' - else: - # Impossible combination of parameters - msg = ('Invalid method use - name:{} size:{} img_id:{} ' - 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, - img_id, src_vol_id, - snap_id)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Create new volume - try: - vol_new = cinder.volumes.create(display_name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except TypeError: - vol_new = cinder.volumes.create(name=vol_name, - imageRef=img_id, - size=vol_size, - source_volid=src_vol_id, - snapshot_id=snap_id) - vol_id = vol_new.id - except Exception as e: - msg = 'Failed to create volume: {}'.format(e) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Wait for volume to reach available status - ret = self.resource_reaches_status(cinder.volumes, vol_id, - expected_stat="available", - msg="Volume status wait") - if not ret: - msg = 'Cinder volume failed to reach expected state.' - amulet.raise_status(amulet.FAIL, msg=msg) - - # Re-validate new volume - self.log.debug('Validating volume attributes...') - val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) - val_vol_boot = cinder.volumes.get(vol_id).bootable - val_vol_stat = cinder.volumes.get(vol_id).status - val_vol_size = cinder.volumes.get(vol_id).size - msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' - '{} size:{}'.format(val_vol_name, vol_id, - val_vol_stat, val_vol_boot, - val_vol_size)) - - if val_vol_boot == bootable and val_vol_stat == 'available' \ - and val_vol_name == vol_name and val_vol_size == vol_size: - self.log.debug(msg_attr) - else: - msg = ('Volume validation failed, {}'.format(msg_attr)) - amulet.raise_status(amulet.FAIL, msg=msg) - - return vol_new - - def delete_resource(self, resource, resource_id, - msg="resource", max_wait=120): - """Delete one openstack resource, such as one instance, keypair, - image, volume, stack, etc., and confirm deletion within max wait time. - - :param resource: pointer to os resource type, ex:glance_client.images - :param resource_id: unique name or id for the openstack resource - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, otherwise False - """ - self.log.debug('Deleting OpenStack resource ' - '{} ({})'.format(resource_id, msg)) - num_before = len(list(resource.list())) - resource.delete(resource_id) - - tries = 0 - num_after = len(list(resource.list())) - while num_after != (num_before - 1) and tries < (max_wait / 4): - self.log.debug('{} delete check: ' - '{} [{}:{}] {}'.format(msg, tries, - num_before, - num_after, - resource_id)) - time.sleep(4) - num_after = len(list(resource.list())) - tries += 1 - - self.log.debug('{}: expected, actual count = {}, ' - '{}'.format(msg, num_before - 1, num_after)) - - if num_after == (num_before - 1): - return True - else: - self.log.error('{} delete timed out'.format(msg)) - return False - - def resource_reaches_status(self, resource, resource_id, - expected_stat='available', - msg='resource', max_wait=120): - """Wait for an openstack resources status to reach an - expected status within a specified time. Useful to confirm that - nova instances, cinder vols, snapshots, glance images, heat stacks - and other resources eventually reach the expected status. - - :param resource: pointer to os resource type, ex: heat_client.stacks - :param resource_id: unique id for the openstack resource - :param expected_stat: status to expect resource to reach - :param msg: text to identify purpose in logging - :param max_wait: maximum wait time in seconds - :returns: True if successful, False if status is not reached - """ - - tries = 0 - resource_stat = resource.get(resource_id).status - while resource_stat != expected_stat and tries < (max_wait / 4): - self.log.debug('{} status check: ' - '{} [{}:{}] {}'.format(msg, tries, - resource_stat, - expected_stat, - resource_id)) - time.sleep(4) - resource_stat = resource.get(resource_id).status - tries += 1 - - self.log.debug('{}: expected, actual status = {}, ' - '{}'.format(msg, resource_stat, expected_stat)) - - if resource_stat == expected_stat: - return True - else: - self.log.debug('{} never reached expected status: ' - '{}'.format(resource_id, expected_stat)) - return False - - def get_ceph_osd_id_cmd(self, index): - """Produce a shell command that will return a ceph-osd id.""" - return ("`initctl list | grep 'ceph-osd ' | " - "awk 'NR=={} {{ print $2 }}' | " - "grep -o '[0-9]*'`".format(index + 1)) - - def get_ceph_pools(self, sentry_unit): - """Return a dict of ceph pools from a single ceph unit, with - pool name as keys, pool id as vals.""" - pools = {} - cmd = 'sudo ceph osd lspools' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - - # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, - for pool in str(output).split(','): - pool_id_name = pool.split(' ') - if len(pool_id_name) == 2: - pool_id = pool_id_name[0] - pool_name = pool_id_name[1] - pools[pool_name] = int(pool_id) - - self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], - pools)) - return pools - - def get_ceph_df(self, sentry_unit): - """Return dict of ceph df json output, including ceph pool state. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :returns: Dict of ceph df output - """ - cmd = 'sudo ceph df --format=json' - output, code = sentry_unit.run(cmd) - if code != 0: - msg = ('{} `{}` returned {} ' - '{}'.format(sentry_unit.info['unit_name'], - cmd, code, output)) - amulet.raise_status(amulet.FAIL, msg=msg) - return json.loads(output) - - def get_ceph_pool_sample(self, sentry_unit, pool_id=0): - """Take a sample of attributes of a ceph pool, returning ceph - pool name, object count and disk space used for the specified - pool ID number. - - :param sentry_unit: Pointer to amulet sentry instance (juju unit) - :param pool_id: Ceph pool ID - :returns: List of pool name, object count, kb disk space used - """ - df = self.get_ceph_df(sentry_unit) - pool_name = df['pools'][pool_id]['name'] - obj_count = df['pools'][pool_id]['stats']['objects'] - kb_used = df['pools'][pool_id]['stats']['kb_used'] - self.log.debug('Ceph {} pool (ID {}): {} objects, ' - '{} kb used'.format(pool_name, pool_id, - obj_count, kb_used)) - return pool_name, obj_count, kb_used - - def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): - """Validate ceph pool samples taken over time, such as pool - object counts or pool kb used, before adding, after adding, and - after deleting items which affect those pool attributes. The - 2nd element is expected to be greater than the 1st; 3rd is expected - to be less than the 2nd. - - :param samples: List containing 3 data samples - :param sample_type: String for logging and usage context - :returns: None if successful, Failure message otherwise - """ - original, created, deleted = range(3) - if samples[created] <= samples[original] or \ - samples[deleted] >= samples[created]: - return ('Ceph {} samples ({}) ' - 'unexpected.'.format(sample_type, samples)) - else: - self.log.debug('Ceph {} samples (OK): ' - '{}'.format(sample_type, samples)) - return None - - # rabbitmq/amqp specific helpers: - - def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): - """Wait for rmq units extended status to show cluster readiness, - after an optional initial sleep period. Initial sleep is likely - necessary to be effective following a config change, as status - message may not instantly update to non-ready.""" - - if init_sleep: - time.sleep(init_sleep) - - message = re.compile('^Unit is ready and clustered$') - deployment._auto_wait_for_status(message=message, - timeout=timeout, - include_only=['rabbitmq-server']) - - def add_rmq_test_user(self, sentry_units, - username="testuser1", password="changeme"): - """Add a test user via the first rmq juju unit, check connection as - the new user against all sentry units. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful. Raise on error. - """ - self.log.debug('Adding rmq user ({})...'.format(username)) - - # Check that user does not already exist - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - if username in output: - self.log.warning('User ({}) already exists, returning ' - 'gracefully.'.format(username)) - return - - perms = '".*" ".*" ".*"' - cmds = ['rabbitmqctl add_user {} {}'.format(username, password), - 'rabbitmqctl set_permissions {} {}'.format(username, perms)] - - # Add user via first unit - for cmd in cmds: - output, _ = self.run_cmd_unit(sentry_units[0], cmd) - - # Check connection against the other sentry_units - self.log.debug('Checking user connect against units...') - for sentry_unit in sentry_units: - connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, - username=username, - password=password) - connection.close() - - def delete_rmq_test_user(self, sentry_units, username="testuser1"): - """Delete a rabbitmq user via the first rmq juju unit. - - :param sentry_units: list of sentry unit pointers - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: None if successful or no such user. - """ - self.log.debug('Deleting rmq user ({})...'.format(username)) - - # Check that the user exists - cmd_user_list = 'rabbitmqctl list_users' - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) - - if username not in output: - self.log.warning('User ({}) does not exist, returning ' - 'gracefully.'.format(username)) - return - - # Delete the user - cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) - output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) - - def get_rmq_cluster_status(self, sentry_unit): - """Execute rabbitmq cluster status command on a unit and return - the full output. - - :param unit: sentry unit - :returns: String containing console output of cluster status command - """ - cmd = 'rabbitmqctl cluster_status' - output, _ = self.run_cmd_unit(sentry_unit, cmd) - self.log.debug('{} cluster_status:\n{}'.format( - sentry_unit.info['unit_name'], output)) - return str(output) - - def get_rmq_cluster_running_nodes(self, sentry_unit): - """Parse rabbitmqctl cluster_status output string, return list of - running rabbitmq cluster nodes. - - :param unit: sentry unit - :returns: List containing node names of running nodes - """ - # NOTE(beisner): rabbitmqctl cluster_status output is not - # json-parsable, do string chop foo, then json.loads that. - str_stat = self.get_rmq_cluster_status(sentry_unit) - if 'running_nodes' in str_stat: - pos_start = str_stat.find("{running_nodes,") + 15 - pos_end = str_stat.find("]},", pos_start) + 1 - str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') - run_nodes = json.loads(str_run_nodes) - return run_nodes - else: - return [] - - def validate_rmq_cluster_running_nodes(self, sentry_units): - """Check that all rmq unit hostnames are represented in the - cluster_status output of all units. - - :param host_names: dict of juju unit names to host names - :param units: list of sentry unit pointers (all rmq units) - :returns: None if successful, otherwise return error message - """ - host_names = self.get_unit_hostnames(sentry_units) - errors = [] - - # Query every unit for cluster_status running nodes - for query_unit in sentry_units: - query_unit_name = query_unit.info['unit_name'] - running_nodes = self.get_rmq_cluster_running_nodes(query_unit) - - # Confirm that every unit is represented in the queried unit's - # cluster_status running nodes output. - for validate_unit in sentry_units: - val_host_name = host_names[validate_unit.info['unit_name']] - val_node_name = 'rabbit@{}'.format(val_host_name) - - if val_node_name not in running_nodes: - errors.append('Cluster member check failed on {}: {} not ' - 'in {}\n'.format(query_unit_name, - val_node_name, - running_nodes)) - if errors: - return ''.join(errors) - - def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): - """Check a single juju rmq unit for ssl and port in the config file.""" - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - conf_file = '/etc/rabbitmq/rabbitmq.config' - conf_contents = str(self.file_contents_safe(sentry_unit, - conf_file, max_wait=16)) - # Checks - conf_ssl = 'ssl' in conf_contents - conf_port = str(port) in conf_contents - - # Port explicitly checked in config - if port and conf_port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif port and not conf_port and conf_ssl: - self.log.debug('SSL is enabled @{} but not on port {} ' - '({})'.format(host, port, unit_name)) - return False - # Port not checked (useful when checking that ssl is disabled) - elif not port and conf_ssl: - self.log.debug('SSL is enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return True - elif not conf_ssl: - self.log.debug('SSL not enabled @{}:{} ' - '({})'.format(host, port, unit_name)) - return False - else: - msg = ('Unknown condition when checking SSL status @{}:{} ' - '({})'.format(host, port, unit_name)) - amulet.raise_status(amulet.FAIL, msg) - - def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): - """Check that ssl is enabled on rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :param port: optional ssl port override to validate - :returns: None if successful, otherwise return error message - """ - for sentry_unit in sentry_units: - if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): - return ('Unexpected condition: ssl is disabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def validate_rmq_ssl_disabled_units(self, sentry_units): - """Check that ssl is enabled on listed rmq juju sentry units. - - :param sentry_units: list of all rmq sentry units - :returns: True if successful. Raise on error. - """ - for sentry_unit in sentry_units: - if self.rmq_ssl_is_enabled_on_unit(sentry_unit): - return ('Unexpected condition: ssl is enabled on unit ' - '({})'.format(sentry_unit.info['unit_name'])) - return None - - def configure_rmq_ssl_on(self, sentry_units, deployment, - port=None, max_wait=60): - """Turn ssl charm config option on, with optional non-default - ssl port specification. Confirm that it is enabled on every - unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param port: amqp port, use defaults if None - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: on') - - # Enable RMQ SSL - config = {'ssl': 'on'} - if port: - config['ssl_port'] = port - - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): - """Turn ssl charm config option off, confirm that it is disabled - on every unit. - - :param sentry_units: list of sentry units - :param deployment: amulet deployment object pointer - :param max_wait: maximum time to wait in seconds to confirm - :returns: None if successful. Raise on error. - """ - self.log.debug('Setting ssl charm config option: off') - - # Disable RMQ SSL - config = {'ssl': 'off'} - deployment.d.configure('rabbitmq-server', config) - - # Wait for unit status - self.rmq_wait_for_cluster(deployment) - - # Confirm - tries = 0 - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - while ret and tries < (max_wait / 4): - time.sleep(4) - self.log.debug('Attempt {}: {}'.format(tries, ret)) - ret = self.validate_rmq_ssl_disabled_units(sentry_units) - tries += 1 - - if ret: - amulet.raise_status(amulet.FAIL, ret) - - def connect_amqp_by_unit(self, sentry_unit, ssl=False, - port=None, fatal=True, - username="testuser1", password="changeme"): - """Establish and return a pika amqp connection to the rabbitmq service - running on a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :param fatal: boolean, default to True (raises on connect error) - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :returns: pika amqp connection pointer or None if failed and non-fatal - """ - host = sentry_unit.info['public-address'] - unit_name = sentry_unit.info['unit_name'] - - # Default port logic if port is not specified - if ssl and not port: - port = 5671 - elif not ssl and not port: - port = 5672 - - self.log.debug('Connecting to amqp on {}:{} ({}) as ' - '{}...'.format(host, port, unit_name, username)) - - try: - credentials = pika.PlainCredentials(username, password) - parameters = pika.ConnectionParameters(host=host, port=port, - credentials=credentials, - ssl=ssl, - connection_attempts=3, - retry_delay=5, - socket_timeout=1) - connection = pika.BlockingConnection(parameters) - assert connection.is_open is True - assert connection.is_closing is False - self.log.debug('Connect OK') - return connection - except Exception as e: - msg = ('amqp connection failed to {}:{} as ' - '{} ({})'.format(host, port, username, str(e))) - if fatal: - amulet.raise_status(amulet.FAIL, msg) - else: - self.log.warn(msg) - return None - - def publish_amqp_message_by_unit(self, sentry_unit, message, - queue="test", ssl=False, - username="testuser1", - password="changeme", - port=None): - """Publish an amqp message to a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param message: amqp message string - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: None. Raises exception if publish failed. - """ - self.log.debug('Publishing message to {} queue:\n{}'.format(queue, - message)) - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - - # NOTE(beisner): extra debug here re: pika hang potential: - # https://github.com/pika/pika/issues/297 - # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw - self.log.debug('Defining channel...') - channel = connection.channel() - self.log.debug('Declaring queue...') - channel.queue_declare(queue=queue, auto_delete=False, durable=True) - self.log.debug('Publishing message...') - channel.basic_publish(exchange='', routing_key=queue, body=message) - self.log.debug('Closing channel...') - channel.close() - self.log.debug('Closing connection...') - connection.close() - - def get_amqp_message_by_unit(self, sentry_unit, queue="test", - username="testuser1", - password="changeme", - ssl=False, port=None): - """Get an amqp message from a rmq juju unit. - - :param sentry_unit: sentry unit pointer - :param queue: message queue, default to test - :param username: amqp user name, default to testuser1 - :param password: amqp user password - :param ssl: boolean, default to False - :param port: amqp port, use defaults if None - :returns: amqp message body as string. Raise if get fails. - """ - connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, - port=port, - username=username, - password=password) - channel = connection.channel() - method_frame, _, body = channel.basic_get(queue) - - if method_frame: - self.log.debug('Retreived message from {} queue:\n{}'.format(queue, - body)) - channel.basic_ack(method_frame.delivery_tag) - channel.close() - connection.close() - return body - else: - msg = 'No message retrieved.' - amulet.raise_status(amulet.FAIL, msg) - - def validate_memcache(self, sentry_unit, conf, os_release, - earliest_release=5, section='keystone_authtoken', - check_kvs=None): - """Check Memcache is running and is configured to be used - - Example call from Amulet test: - - def test_110_memcache(self): - u.validate_memcache(self.neutron_api_sentry, - '/etc/neutron/neutron.conf', - self._get_openstack_release()) - - :param sentry_unit: sentry unit - :param conf: OpenStack config file to check memcache settings - :param os_release: Current OpenStack release int code - :param earliest_release: Earliest Openstack release to check int code - :param section: OpenStack config file section to check - :param check_kvs: Dict of settings to check in config file - :returns: None - """ - if os_release < earliest_release: - self.log.debug('Skipping memcache checks for deployment. {} <' - 'mitaka'.format(os_release)) - return - _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} - self.log.debug('Checking memcached is running') - ret = self.validate_services_by_name({sentry_unit: ['memcached']}) - if ret: - amulet.raise_status(amulet.FAIL, msg='Memcache running check' - 'failed {}'.format(ret)) - else: - self.log.debug('OK') - self.log.debug('Checking memcache url is configured in {}'.format( - conf)) - if self.validate_config_data(sentry_unit, conf, section, _kvs): - message = "Memcache config error in: {}".format(conf) - amulet.raise_status(amulet.FAIL, msg=message) - else: - self.log.debug('OK') - self.log.debug('Checking memcache configuration in ' - '/etc/memcached.conf') - contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', - fatal=True) - ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') - if CompareHostReleases(ubuntu_release) <= 'trusty': - memcache_listen_addr = 'ip6-localhost' - else: - memcache_listen_addr = '::1' - expected = { - '-p': '11211', - '-l': memcache_listen_addr} - found = [] - for key, value in expected.items(): - for line in contents.split('\n'): - if line.startswith(key): - self.log.debug('Checking {} is set to {}'.format( - key, - value)) - assert value == line.split()[-1] - self.log.debug(line.split()[-1]) - found.append(key) - if sorted(found) == sorted(expected.keys()): - self.log.debug('OK') - else: - message = "Memcache config error in: /etc/memcached.conf" - amulet.raise_status(amulet.FAIL, msg=message) diff --git a/tests/charmhelpers/core/__init__.py b/tests/charmhelpers/core/__init__.py deleted file mode 100644 index d7567b863e3a5ad2b7a7f44958b4166e0c3d346b..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/charmhelpers/core/decorators.py b/tests/charmhelpers/core/decorators.py deleted file mode 100644 index 6ad41ee4121f4c0816935f8b16cd84f972aff22b..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/decorators.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2014 Canonical Ltd. -# -# Authors: -# Edward Hope-Morley <opentastic@gmail.com> -# - -import time - -from charmhelpers.core.hookenv import ( - log, - INFO, -) - - -def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): - """If the decorated function raises exception exc_type, allow num_retries - retry attempts before raise the exception. - """ - def _retry_on_exception_inner_1(f): - def _retry_on_exception_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - while True: - try: - return f(*args, **kwargs) - except exc_type: - if not retries: - raise - - delay = base_delay * multiplier - multiplier += 1 - log("Retrying '%s' %d more times (delay=%s)" % - (f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_exception_inner_2 - - return _retry_on_exception_inner_1 diff --git a/tests/charmhelpers/core/files.py b/tests/charmhelpers/core/files.py deleted file mode 100644 index fdd82b75709c13da0d534bf4962822984a3c1867..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/files.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>' - -import os -import subprocess - - -def sed(filename, before, after, flags='g'): - """ - Search and replaces the given pattern on filename. - - :param filename: relative or absolute file path. - :param before: expression to be replaced (see 'man sed') - :param after: expression to replace with (see 'man sed') - :param flags: sed-compatible regex flags in example, to make - the search and replace case insensitive, specify ``flags="i"``. - The ``g`` flag is always specified regardless, so you do not - need to remember to include it when overriding this parameter. - :returns: If the sed command exit code was zero then return, - otherwise raise CalledProcessError. - """ - expression = r's/{0}/{1}/{2}'.format(before, - after, flags) - - return subprocess.check_call(["sed", "-i", "-r", "-e", - expression, - os.path.expanduser(filename)]) diff --git a/tests/charmhelpers/core/fstab.py b/tests/charmhelpers/core/fstab.py deleted file mode 100644 index d9fa9152c765c538adad3fd9bc45a46018c89b72..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/fstab.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' - - -class Fstab(io.FileIO): - """This class extends file in order to implement a file reader/writer - for file `/etc/fstab` - """ - - class Entry(object): - """Entry class represents a non-comment line on the `/etc/fstab` file - """ - def __init__(self, device, mountpoint, filesystem, - options, d=0, p=0): - self.device = device - self.mountpoint = mountpoint - self.filesystem = filesystem - - if not options: - options = "defaults" - - self.options = options - self.d = int(d) - self.p = int(p) - - def __eq__(self, o): - return str(self) == str(o) - - def __str__(self): - return "{} {} {} {} {} {}".format(self.device, - self.mountpoint, - self.filesystem, - self.options, - self.d, - self.p) - - DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') - - def __init__(self, path=None): - if path: - self._path = path - else: - self._path = self.DEFAULT_PATH - super(Fstab, self).__init__(self._path, 'rb+') - - def _hydrate_entry(self, line): - # NOTE: use split with no arguments to split on any - # whitespace including tabs - return Fstab.Entry(*filter( - lambda x: x not in ('', None), - line.strip("\n").split())) - - @property - def entries(self): - self.seek(0) - for line in self.readlines(): - line = line.decode('us-ascii') - try: - if line.strip() and not line.strip().startswith("#"): - yield self._hydrate_entry(line) - except ValueError: - pass - - def get_entry_by_attr(self, attr, value): - for entry in self.entries: - e_attr = getattr(entry, attr) - if e_attr == value: - return entry - return None - - def add_entry(self, entry): - if self.get_entry_by_attr('device', entry.device): - return False - - self.write((str(entry) + '\n').encode('us-ascii')) - self.truncate() - return entry - - def remove_entry(self, entry): - self.seek(0) - - lines = [l.decode('us-ascii') for l in self.readlines()] - - found = False - for index, line in enumerate(lines): - if line.strip() and not line.strip().startswith("#"): - if self._hydrate_entry(line) == entry: - found = True - break - - if not found: - return False - - lines.remove(line) - - self.seek(0) - self.write(''.join(lines).encode('us-ascii')) - self.truncate() - return True - - @classmethod - def remove_by_mountpoint(cls, mountpoint, path=None): - fstab = cls(path=path) - entry = fstab.get_entry_by_attr('mountpoint', mountpoint) - if entry: - return fstab.remove_entry(entry) - return False - - @classmethod - def add(cls, device, mountpoint, filesystem, options=None, path=None): - return cls(path=path).add_entry(Fstab.Entry(device, - mountpoint, filesystem, - options=options)) diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py deleted file mode 100644 index 5a88f798e89546ad9128cb7d4a1cb8bd6e69a644..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/hookenv.py +++ /dev/null @@ -1,1206 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. -# -# Authors: -# Charm Helpers Developers <juju@lists.ubuntu.com> - -from __future__ import print_function -import copy -from distutils.version import LooseVersion -from functools import wraps -from collections import namedtuple -import glob -import os -import json -import yaml -import subprocess -import sys -import errno -import tempfile -from subprocess import CalledProcessError - -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - -CRITICAL = "CRITICAL" -ERROR = "ERROR" -WARNING = "WARNING" -INFO = "INFO" -DEBUG = "DEBUG" -TRACE = "TRACE" -MARKER = object() - -cache = {} - - -def cached(func): - """Cache return values for multiple executions of func + args - - For example:: - - @cached - def unit_get(attribute): - pass - - unit_get('test') - - will cache the result of unit_get + 'test' for future calls. - """ - @wraps(func) - def wrapper(*args, **kwargs): - global cache - key = str((func, args, kwargs)) - try: - return cache[key] - except KeyError: - pass # Drop out of the exception handler scope. - res = func(*args, **kwargs) - cache[key] = res - return res - wrapper._wrapped = func - return wrapper - - -def flush(key): - """Flushes any entries from function cache where the - key is found in the function+args """ - flush_list = [] - for item in cache: - if key in item: - flush_list.append(item) - for item in flush_list: - del cache[item] - - -def log(message, level=None): - """Write a message to the juju log""" - command = ['juju-log'] - if level: - command += ['-l', level] - if not isinstance(message, six.string_types): - message = repr(message) - command += [message] - # Missing juju-log should not cause failures in unit tests - # Send log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - if level: - message = "{}: {}".format(level, message) - message = "juju-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -class Serializable(UserDict): - """Wrapper, an object that can be serialized to yaml or json""" - - def __init__(self, obj): - # wrap the object - UserDict.__init__(self) - self.data = obj - - def __getattr__(self, attr): - # See if this object has attribute. - if attr in ("json", "yaml", "data"): - return self.__dict__[attr] - # Check for attribute in wrapped object. - got = getattr(self.data, attr, MARKER) - if got is not MARKER: - return got - # Proxy to the wrapped object via dict interface. - try: - return self.data[attr] - except KeyError: - raise AttributeError(attr) - - def __getstate__(self): - # Pickle as a standard dictionary. - return self.data - - def __setstate__(self, state): - # Unpickle into our wrapper. - self.data = state - - def json(self): - """Serialize the object to json""" - return json.dumps(self.data) - - def yaml(self): - """Serialize the object to yaml""" - return yaml.dump(self.data) - - -def execution_environment(): - """A convenient bundling of the current execution context""" - context = {} - context['conf'] = config() - if relation_id(): - context['reltype'] = relation_type() - context['relid'] = relation_id() - context['rel'] = relation_get() - context['unit'] = local_unit() - context['rels'] = relations() - context['env'] = os.environ - return context - - -def in_relation_hook(): - """Determine whether we're running in a relation hook""" - return 'JUJU_RELATION' in os.environ - - -def relation_type(): - """The scope for the current relation hook""" - return os.environ.get('JUJU_RELATION', None) - - -@cached -def relation_id(relation_name=None, service_or_unit=None): - """The relation ID for the current or a specified relation""" - if not relation_name and not service_or_unit: - return os.environ.get('JUJU_RELATION_ID', None) - elif relation_name and service_or_unit: - service_name = service_or_unit.split('/')[0] - for relid in relation_ids(relation_name): - remote_service = remote_service_name(relid) - if remote_service == service_name: - return relid - else: - raise ValueError('Must specify neither or both of relation_name and service_or_unit') - - -def local_unit(): - """Local unit ID""" - return os.environ['JUJU_UNIT_NAME'] - - -def remote_unit(): - """The remote unit for the current relation hook""" - return os.environ.get('JUJU_REMOTE_UNIT', None) - - -def service_name(): - """The name service group this unit belongs to""" - return local_unit().split('/')[0] - - -def principal_unit(): - """Returns the principal unit of this unit, otherwise None""" - # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT - principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) - # If it's empty, then this unit is the principal - if principal_unit == '': - return os.environ['JUJU_UNIT_NAME'] - elif principal_unit is not None: - return principal_unit - # For Juju 2.1 and below, let's try work out the principle unit by - # the various charms' metadata.yaml. - for reltype in relation_types(): - for rid in relation_ids(reltype): - for unit in related_units(rid): - md = _metadata_unit(unit) - if not md: - continue - subordinate = md.pop('subordinate', None) - if not subordinate: - return unit - return None - - -@cached -def remote_service_name(relid=None): - """The remote service name for a given relation-id (or the current relation)""" - if relid is None: - unit = remote_unit() - else: - units = related_units(relid) - unit = units[0] if units else None - return unit.split('/')[0] if unit else None - - -def hook_name(): - """The name of the currently executing hook""" - return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) - - -class Config(dict): - """A dictionary representation of the charm's config.yaml, with some - extra features: - - - See which values in the dictionary have changed since the previous hook. - - For values that have changed, see what the previous value was. - - Store arbitrary data for use in a later hook. - - NOTE: Do not instantiate this object directly - instead call - ``hookenv.config()``, which will return an instance of :class:`Config`. - - Example usage:: - - >>> # inside a hook - >>> from charmhelpers.core import hookenv - >>> config = hookenv.config() - >>> config['foo'] - 'bar' - >>> # store a new key/value for later use - >>> config['mykey'] = 'myval' - - - >>> # user runs `juju set mycharm foo=baz` - >>> # now we're inside subsequent config-changed hook - >>> config = hookenv.config() - >>> config['foo'] - 'baz' - >>> # test to see if this val has changed since last hook - >>> config.changed('foo') - True - >>> # what was the previous value? - >>> config.previous('foo') - 'bar' - >>> # keys/values that we add are preserved across hooks - >>> config['mykey'] - 'myval' - - """ - CONFIG_FILE_NAME = '.juju-persistent-config' - - def __init__(self, *args, **kw): - super(Config, self).__init__(*args, **kw) - self.implicit_save = True - self._prev_dict = None - self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): - self.load_previous() - atexit(self._implicit_save) - - def load_previous(self, path=None): - """Load previous copy of config from disk. - - In normal usage you don't need to call this method directly - it - is called automatically at object initialization. - - :param path: - - File path from which to load the previous config. If `None`, - config is loaded from the default location. If `path` is - specified, subsequent `save()` calls will write to the same - path. - - """ - self.path = path or self.path - with open(self.path) as f: - self._prev_dict = json.load(f) - for k, v in copy.deepcopy(self._prev_dict).items(): - if k not in self: - self[k] = v - - def changed(self, key): - """Return True if the current value for this key is different from - the previous value. - - """ - if self._prev_dict is None: - return True - return self.previous(key) != self.get(key) - - def previous(self, key): - """Return previous value for this key, or None if there - is no previous value. - - """ - if self._prev_dict: - return self._prev_dict.get(key) - return None - - def save(self): - """Save this config to disk. - - If the charm is using the :mod:`Services Framework <services.base>` - or :meth:'@hook <Hooks.hook>' decorator, this - is called automatically at the end of successful hook execution. - Otherwise, it should be called directly by user code. - - To disable automatic saves, set ``implicit_save=False`` on this - instance. - - """ - with open(self.path, 'w') as f: - json.dump(self, f) - - def _implicit_save(self): - if self.implicit_save: - self.save() - - -@cached -def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') - try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) - if scope is not None: - return config_data - return Config(config_data) - except ValueError: - return None - - -@cached -def relation_get(attribute=None, unit=None, rid=None): - """Get relation information""" - _args = ['relation-get', '--format=json'] - if rid: - _args.append('-r') - _args.append(rid) - _args.append(attribute or '-') - if unit: - _args.append(unit) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except CalledProcessError as e: - if e.returncode == 2: - return None - raise - - -def relation_set(relation_id=None, relation_settings=None, **kwargs): - """Set relation information for the current unit""" - relation_settings = relation_settings if relation_settings else {} - relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) - if relation_id is not None: - relation_cmd_line.extend(('-r', relation_id)) - settings = relation_settings.copy() - settings.update(kwargs) - for key, value in settings.items(): - # Force value to be a string: it always should, but some call - # sites pass in things like dicts or numbers. - if value is not None: - settings[key] = "{}".format(value) - if accepts_file: - # --file was introduced in Juju 1.23.2. Use it by default if - # available, since otherwise we'll break if the relation data is - # too big. Ideally we should tell relation-set to read the data from - # stdin, but that feature is broken in 1.23.2: Bug #1454678. - with tempfile.NamedTemporaryFile(delete=False) as settings_file: - settings_file.write(yaml.safe_dump(settings).encode("utf-8")) - subprocess.check_call( - relation_cmd_line + ["--file", settings_file.name]) - os.remove(settings_file.name) - else: - for key, value in settings.items(): - if value is None: - relation_cmd_line.append('{}='.format(key)) - else: - relation_cmd_line.append('{}={}'.format(key, value)) - subprocess.check_call(relation_cmd_line) - # Flush cache of any relation-gets for local unit - flush(local_unit()) - - -def relation_clear(r_id=None): - ''' Clears any relation data already set on relation r_id ''' - settings = relation_get(rid=r_id, - unit=local_unit()) - for setting in settings: - if setting not in ['public-address', 'private-address']: - settings[setting] = None - relation_set(relation_id=r_id, - **settings) - - -@cached -def relation_ids(reltype=None): - """A list of relation_ids""" - reltype = reltype or relation_type() - relid_cmd_line = ['relation-ids', '--format=json'] - if reltype is not None: - relid_cmd_line.append(reltype) - return json.loads( - subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] - return [] - - -@cached -def related_units(relid=None): - """A list of related units""" - relid = relid or relation_id() - units_cmd_line = ['relation-list', '--format=json'] - if relid is not None: - units_cmd_line.extend(('-r', relid)) - return json.loads( - subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] - - -@cached -def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" - unit = unit or remote_unit() - relation = relation_get(unit=unit, rid=rid) - for key in relation: - if key.endswith('-list'): - relation[key] = relation[key].split() - relation['__unit__'] = unit - return relation - - -@cached -def relations_for_id(relid=None): - """Get relations of a specific relation ID""" - relation_data = [] - relid = relid or relation_ids() - for unit in related_units(relid): - unit_data = relation_for_unit(unit, relid) - unit_data['__relid__'] = relid - relation_data.append(unit_data) - return relation_data - - -@cached -def relations_of_type(reltype=None): - """Get relations of a specific type""" - relation_data = [] - reltype = reltype or relation_type() - for relid in relation_ids(reltype): - for relation in relations_for_id(relid): - relation['__relid__'] = relid - relation_data.append(relation) - return relation_data - - -@cached -def metadata(): - """Get the current charm metadata.yaml contents as a python object""" - with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: - return yaml.safe_load(md) - - -def _metadata_unit(unit): - """Given the name of a unit (e.g. apache2/0), get the unit charm's - metadata.yaml. Very similar to metadata() but allows us to inspect - other units. Unit needs to be co-located, such as a subordinate or - principal/primary. - - :returns: metadata.yaml as a python object. - - """ - basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) - unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') - if not os.path.exists(joineddir): - return None - with open(joineddir) as md: - return yaml.safe_load(md) - - -@cached -def relation_types(): - """Get a list of relation types supported by this charm""" - rel_types = [] - md = metadata() - for key in ('provides', 'requires', 'peers'): - section = md.get(key) - if section: - rel_types.extend(section.keys()) - return rel_types - - -@cached -def peer_relation_id(): - '''Get the peers relation id if a peers relation has been joined, else None.''' - md = metadata() - section = md.get('peers') - if section: - for key in section: - relids = relation_ids(key) - if relids: - return relids[0] - return None - - -@cached -def relation_to_interface(relation_name): - """ - Given the name of a relation, return the interface that relation uses. - - :returns: The interface name, or ``None``. - """ - return relation_to_role_and_interface(relation_name)[1] - - -@cached -def relation_to_role_and_interface(relation_name): - """ - Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). - - :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. - """ - _metadata = metadata() - for role in ('provides', 'requires', 'peers'): - interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') - if interface: - return role, interface - return None, None - - -@cached -def role_and_interface_to_relations(role, interface_name): - """ - Given a role and interface name, return a list of relation names for the - current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peers``). - - :returns: A list of relation names. - """ - _metadata = metadata() - results = [] - for relation_name, relation in _metadata.get(role, {}).items(): - if relation['interface'] == interface_name: - results.append(relation_name) - return results - - -@cached -def interface_to_relations(interface_name): - """ - Given an interface, return a list of relation names for the current - charm that use that interface. - - :returns: A list of relation names. - """ - results = [] - for role in ('provides', 'requires', 'peers'): - results.extend(role_and_interface_to_relations(role, interface_name)) - return results - - -@cached -def charm_name(): - """Get the name of the current charm as is specified on metadata.yaml""" - return metadata().get('name') - - -@cached -def relations(): - """Get a nested dictionary of relation data for all related units""" - rels = {} - for reltype in relation_types(): - relids = {} - for relid in relation_ids(reltype): - units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} - for unit in related_units(relid): - reldata = relation_get(unit=unit, rid=relid) - units[unit] = reldata - relids[relid] = units - rels[reltype] = relids - return rels - - -@cached -def is_relation_made(relation, keys='private-address'): - ''' - Determine whether a relation is established by checking for - presence of key(s). If a list of keys is provided, they - must all be present for the relation to be identified as made - ''' - if isinstance(keys, str): - keys = [keys] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - context = {} - for k in keys: - context[k] = relation_get(k, rid=r_id, - unit=unit) - if None not in context.values(): - return True - return False - - -def _port_op(op_name, port, protocol="TCP"): - """Open or close a service network port""" - _args = [op_name] - icmp = protocol.upper() == "ICMP" - if icmp: - _args.append(protocol) - else: - _args.append('{}/{}'.format(port, protocol)) - try: - subprocess.check_call(_args) - except subprocess.CalledProcessError: - # Older Juju pre 2.3 doesn't support ICMP - # so treat it as a no-op if it fails. - if not icmp: - raise - - -def open_port(port, protocol="TCP"): - """Open a service network port""" - _port_op('open-port', port, protocol) - - -def close_port(port, protocol="TCP"): - """Close a service network port""" - _port_op('close-port', port, protocol) - - -def open_ports(start, end, protocol="TCP"): - """Opens a range of service network ports""" - _args = ['open-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def close_ports(start, end, protocol="TCP"): - """Close a range of service network ports""" - _args = ['close-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def opened_ports(): - """Get the opened ports - - *Note that this will only show ports opened in a previous hook* - - :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` - """ - _args = ['opened-ports', '--format=json'] - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - - -@cached -def unit_get(attribute): - """Get the unit ID for the remote unit""" - _args = ['unit-get', '--format=json', attribute] - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -def unit_public_ip(): - """Get this unit's public IP address""" - return unit_get('public-address') - - -def unit_private_ip(): - """Get this unit's private IP address""" - return unit_get('private-address') - - -@cached -def storage_get(attribute=None, storage_id=None): - """Get storage attributes""" - _args = ['storage-get', '--format=json'] - if storage_id: - _args.extend(('-s', storage_id)) - if attribute: - _args.append(attribute) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -@cached -def storage_list(storage_name=None): - """List the storage IDs for the unit""" - _args = ['storage-list', '--format=json'] - if storage_name: - _args.append(storage_name) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except OSError as e: - import errno - if e.errno == errno.ENOENT: - # storage-list does not exist - return [] - raise - - -class UnregisteredHookError(Exception): - """Raised when an undefined hook is called""" - pass - - -class Hooks(object): - """A convenient handler for hook functions. - - Example:: - - hooks = Hooks() - - # register a hook, taking its name from the function name - @hooks.hook() - def install(): - pass # your code here - - # register a hook, providing a custom hook name - @hooks.hook("config-changed") - def config_changed(): - pass # your code here - - if __name__ == "__main__": - # execute a hook based on the name the program is called by - hooks.execute(sys.argv) - """ - - def __init__(self, config_save=None): - super(Hooks, self).__init__() - self._hooks = {} - - # For unknown reasons, we allow the Hooks constructor to override - # config().implicit_save. - if config_save is not None: - config().implicit_save = config_save - - def register(self, name, function): - """Register a hook""" - self._hooks[name] = function - - def execute(self, args): - """Execute a registered hook based on args[0]""" - _run_atstart() - hook_name = os.path.basename(args[0]) - if hook_name in self._hooks: - try: - self._hooks[hook_name]() - except SystemExit as x: - if x.code is None or x.code == 0: - _run_atexit() - raise - _run_atexit() - else: - raise UnregisteredHookError(hook_name) - - def hook(self, *hook_names): - """Decorator, registering them as hooks""" - def wrapper(decorated): - for hook_name in hook_names: - self.register(hook_name, decorated) - else: - self.register(decorated.__name__, decorated) - if '_' in decorated.__name__: - self.register( - decorated.__name__.replace('_', '-'), decorated) - return decorated - return wrapper - - -def charm_dir(): - """Return the root directory of the current charm""" - d = os.environ.get('JUJU_CHARM_DIR') - if d is not None: - return d - return os.environ.get('CHARM_DIR') - - -@cached -def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" - cmd = ['action-get'] - if key is not None: - cmd.append(key) - cmd.append('--format=json') - action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return action_data - - -def action_set(values): - """Sets the values to be returned after the action finishes""" - cmd = ['action-set'] - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -def action_fail(message): - """Sets the action status to failed and sets the error message. - - The results set by action_set are preserved.""" - subprocess.check_call(['action-fail', message]) - - -def action_name(): - """Get the name of the currently executing action.""" - return os.environ.get('JUJU_ACTION_NAME') - - -def action_uuid(): - """Get the UUID of the currently executing action.""" - return os.environ.get('JUJU_ACTION_UUID') - - -def action_tag(): - """Get the tag for the currently executing action.""" - return os.environ.get('JUJU_ACTION_TAG') - - -def status_set(workload_state, message): - """Set the workload state with a message - - Use status-set to set the workload state with a message which is visible - to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. - - workload_state -- valid juju workload state. - message -- status update message - """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] - try: - ret = subprocess.call(cmd) - if ret == 0: - return - except OSError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'status-set failed: {} {}'.format(workload_state, - message) - log(log_message, level='INFO') - - -def status_get(): - """Retrieve the previously set juju workload state and message - - If the status-get command is not found then assume this is juju < 1.23 and - return 'unknown', "" - - """ - cmd = ['status-get', "--format=json", "--include-data"] - try: - raw_status = subprocess.check_output(cmd) - except OSError as e: - if e.errno == errno.ENOENT: - return ('unknown', "") - else: - raise - else: - status = json.loads(raw_status.decode("UTF-8")) - return (status["status"], status["message"]) - - -def translate_exc(from_exc, to_exc): - def inner_translate_exc1(f): - @wraps(f) - def inner_translate_exc2(*args, **kwargs): - try: - return f(*args, **kwargs) - except from_exc: - raise to_exc - - return inner_translate_exc2 - - return inner_translate_exc1 - - -def application_version_set(version): - """Charm authors may trigger this command from any hook to output what - version of the application is running. This could be a package version, - for instance postgres version 9.5. It could also be a build number or - version control revision identifier, for instance git sha 6fb7ba68. """ - - cmd = ['application-version-set'] - cmd.append(version) - try: - subprocess.check_call(cmd) - except OSError: - log("Application Version: {}".format(version)) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def is_leader(): - """Does the current unit hold the juju leadership - - Uses juju to determine whether the current unit is the leader of its peers - """ - cmd = ['is-leader', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_get(attribute=None): - """Juju leader get value(s)""" - cmd = ['leader-get', '--format=json'] + [attribute or '-'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_set(settings=None, **kwargs): - """Juju leader set value(s)""" - # Don't log secrets. - # log("Juju leader-set '%s'" % (settings), level=DEBUG) - cmd = ['leader-set'] - settings = settings or {} - settings.update(kwargs) - for k, v in settings.items(): - if v is None: - cmd.append('{}='.format(k)) - else: - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_register(ptype, klass, pid): - """ is used while a hook is running to let Juju know that a - payload has been started.""" - cmd = ['payload-register'] - for x in [ptype, klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_unregister(klass, pid): - """ is used while a hook is running to let Juju know - that a payload has been manually stopped. The <class> and <id> provided - must match a payload that has been previously registered with juju using - payload-register.""" - cmd = ['payload-unregister'] - for x in [klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_status_set(klass, pid, status): - """is used to update the current status of a registered payload. - The <class> and <id> provided must match a payload that has been previously - registered with juju using payload-register. The <status> must be one of the - follow: starting, started, stopping, stopped""" - cmd = ['payload-status-set'] - for x in [klass, pid, status]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def resource_get(name): - """used to fetch the resource path of the given name. - - <name> must match a name of defined resource in metadata.yaml - - returns either a path or False if resource not available - """ - if not name: - return False - - cmd = ['resource-get', name] - try: - return subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError: - return False - - -@cached -def juju_version(): - """Full version string (eg. '1.23.3.1-trusty-amd64')""" - # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 - jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] - return subprocess.check_output([jujud, 'version'], - universal_newlines=True).strip() - - -@cached -def has_juju_version(minimum_version): - """Return True if the Juju version is at least the provided version""" - return LooseVersion(juju_version()) >= LooseVersion(minimum_version) - - -_atexit = [] -_atstart = [] - - -def atstart(callback, *args, **kwargs): - '''Schedule a callback to run before the main hook. - - Callbacks are run in the order they were added. - - This is useful for modules and classes to perform initialization - and inject behavior. In particular: - - - Run common code before all of your hooks, such as logging - the hook name or interesting relation data. - - Defer object or module initialization that requires a hook - context until we know there actually is a hook context, - making testing easier. - - Rather than requiring charm authors to include boilerplate to - invoke your helper's behavior, have it run automatically if - your object is instantiated or module imported. - - This is not at all useful after your hook framework as been launched. - ''' - global _atstart - _atstart.append((callback, args, kwargs)) - - -def atexit(callback, *args, **kwargs): - '''Schedule a callback to run on successful hook completion. - - Callbacks are run in the reverse order that they were added.''' - _atexit.append((callback, args, kwargs)) - - -def _run_atstart(): - '''Hook frameworks must invoke this before running the main hook body.''' - global _atstart - for callback, args, kwargs in _atstart: - callback(*args, **kwargs) - del _atstart[:] - - -def _run_atexit(): - '''Hook frameworks must invoke this after the main hook body has - successfully completed. Do not invoke it if the hook fails.''' - global _atexit - for callback, args, kwargs in reversed(_atexit): - callback(*args, **kwargs) - del _atexit[:] - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def network_get_primary_address(binding): - ''' - Retrieve the primary network address for a named binding - - :param binding: string. The name of a relation of extra-binding - :return: string. The primary IP address for the named binding - :raise: NotImplementedError if run on Juju < 2.0 - ''' - cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).decode('UTF-8').strip() - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def network_get(endpoint, relation_id=None): - """ - Retrieve the network details for a relation endpoint - - :param endpoint: string. The name of a relation endpoint - :param relation_id: int. The ID of the relation for the current context. - :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if run on Juju < 2.1 - """ - cmd = ['network-get', endpoint, '--format', 'yaml'] - if relation_id: - cmd.append('-r') - cmd.append(relation_id) - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - # Early versions of Juju 2.0.x required the --primary-address argument. - # We catch that condition here and raise NotImplementedError since - # the requested semantics are not available - the caller can then - # use the network_get_primary_address() method instead. - if '--primary-address is currently required' in e.output.decode('UTF-8'): - raise NotImplementedError - raise - return yaml.safe_load(response) - - -def add_metric(*args, **kwargs): - """Add metric values. Values may be expressed with keyword arguments. For - metric names containing dashes, these may be expressed as one or more - 'key=value' positional arguments. May only be called from the collect-metrics - hook.""" - _args = ['add-metric'] - _kvpairs = [] - _kvpairs.extend(args) - _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) - _args.extend(sorted(_kvpairs)) - try: - subprocess.check_call(_args) - return - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) - log(log_message, level='INFO') - - -def meter_status(): - """Get the meter status, if running in the meter-status-changed hook.""" - return os.environ.get('JUJU_METER_STATUS') - - -def meter_info(): - """Get the meter status information, if running in the meter-status-changed - hook.""" - return os.environ.get('JUJU_METER_INFO') - - -def iter_units_for_relation_name(relation_name): - """Iterate through all units in a relation - - Generator that iterates through all the units in a relation and yields - a named tuple with rid and unit field names. - - Usage: - data = [(u.rid, u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param relation_name: string relation name - :yield: Named Tuple with rid and unit field names - """ - RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') - for rid in relation_ids(relation_name): - for unit in related_units(rid): - yield RelatedUnit(rid, unit) - - -def ingress_address(rid=None, unit=None): - """ - Retrieve the ingress-address from a relation when available. Otherwise, - return the private-address. This function is to be used on the consuming - side of the relation. - - Usage: - addresses = [ingress_address(rid=u.rid, unit=u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: string IP address - """ - settings = relation_get(rid=rid, unit=unit) - return (settings.get('ingress-address') or - settings.get('private-address')) diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py deleted file mode 100644 index 5cc5c86b701fc5375f387eb01a0d2b76c184c263..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/host.py +++ /dev/null @@ -1,1019 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for working with the host system""" -# Copyright 2012 Canonical Ltd. -# -# Authors: -# Nick Moffitt <nick.moffitt@canonical.com> -# Matthew Wedgwood <matthew.wedgwood@canonical.com> - -import os -import re -import pwd -import glob -import grp -import random -import string -import subprocess -import hashlib -import functools -import itertools -import six - -from contextlib import contextmanager -from collections import OrderedDict -from .hookenv import log, DEBUG, local_unit -from .fstab import Fstab -from charmhelpers.osplatform import get_platform - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import - -UPDATEDB_PATH = '/etc/updatedb.conf' - -def service_start(service_name, **kwargs): - """Start a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('start', service_name, **kwargs) - - -def service_stop(service_name, **kwargs): - """Stop a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('stop', service_name, **kwargs) - - -def service_restart(service_name, **kwargs): - """Restart a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be restarted. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_restart('ceph-osd', id=4) - - :param service_name: the name of the service to restart - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - return service('restart', service_name) - - -def service_reload(service_name, restart_on_failure=False, **kwargs): - """Reload a system service, optionally falling back to restart if - reload fails. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_reload('ceph-osd', id=4) - - :param service_name: the name of the service to reload - :param restart_on_failure: boolean indicating whether to fallback to a - restart if the reload fails. - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - service_result = service('reload', service_name, **kwargs) - if not service_result and restart_on_failure: - service_result = service('restart', service_name, **kwargs) - return service_result - - -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", - **kwargs): - """Pause a system service. - - Stop it, and prevent it from starting again at boot. - - :param service_name: the name of the service to pause - :param init_dir: path to the upstart init directory - :param initd_dir: path to the sysv init directory - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems which do not support - key=value arguments via the commandline. - """ - stopped = True - if service_running(service_name, **kwargs): - stopped = service_stop(service_name, **kwargs) - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('disable', service_name) - service('mask', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "disable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - return stopped - - -def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d", **kwargs): - """Resume a system service. - - Reenable starting again at boot. Start the service. - - :param service_name: the name of the service to resume - :param init_dir: the path to the init dir - :param initd dir: the path to the initd dir - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('unmask', service_name) - service('enable', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "enable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - started = service_running(service_name, **kwargs) - - if not started: - started = service_start(service_name, **kwargs) - return started - - -def service(action, service_name, **kwargs): - """Control a system service. - - :param action: the action to take on the service - :param service_name: the name of the service to perform th action on - :param **kwargs: additional params to be passed to the service command in - the form of key=value. - """ - if init_is_systemd(): - cmd = ['systemctl', action, service_name] - else: - cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - return subprocess.call(cmd) == 0 - - -_UPSTART_CONF = "/etc/init/{}.conf" -_INIT_D_CONF = "/etc/init.d/{}" - - -def service_running(service_name, **kwargs): - """Determine whether a system service is running. - - :param service_name: the name of the service - :param **kwargs: additional args to pass to the service command. This is - used to pass additional key=value arguments to the - service command line for managing specific instance - units (e.g. service ceph-osd status id=2). The kwargs - are ignored in systemd services. - """ - if init_is_systemd(): - return service('is-active', service_name) - else: - if os.path.exists(_UPSTART_CONF.format(service_name)): - try: - cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running - # 'start/running' - if ("start/running" in output or - "is running" in output or - "up and running" in output): - return True - elif os.path.exists(_INIT_D_CONF.format(service_name)): - # Check System V scripts init script return codes - return service('status', service_name) - return False - - -SYSTEMD_SYSTEM = '/run/systemd/system' - - -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" - if lsb_release()['DISTRIB_CODENAME'] == 'trusty': - return False - return os.path.isdir(SYSTEMD_SYSTEM) - - -def adduser(username, password=None, shell='/bin/bash', - system_user=False, primary_group=None, - secondary_groups=None, uid=None, home_dir=None): - """Add a user to the system. - - Will log but otherwise succeed if the user already exists. - - :param str username: Username to create - :param str password: Password for user; if ``None``, create a system user - :param str shell: The default shell for the user - :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to username - :param list secondary_groups: Optional list of additional groups - :param int uid: UID for user being created - :param str home_dir: Home directory for user - - :returns: The password database entry struct, as returned by `pwd.getpwnam` - """ - try: - user_info = pwd.getpwnam(username) - log('user {0} already exists!'.format(username)) - if uid: - user_info = pwd.getpwuid(int(uid)) - log('user with uid {0} already exists!'.format(uid)) - except KeyError: - log('creating user {0}'.format(username)) - cmd = ['useradd'] - if uid: - cmd.extend(['--uid', str(uid)]) - if home_dir: - cmd.extend(['--home', str(home_dir)]) - if system_user or password is None: - cmd.append('--system') - else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) - if not primary_group: - try: - grp.getgrnam(username) - primary_group = username # avoid "group exists" error - except KeyError: - pass - if primary_group: - cmd.extend(['-g', primary_group]) - if secondary_groups: - cmd.extend(['-G', ','.join(secondary_groups)]) - cmd.append(username) - subprocess.check_call(cmd) - user_info = pwd.getpwnam(username) - return user_info - - -def user_exists(username): - """Check if a user exists""" - try: - pwd.getpwnam(username) - user_exists = True - except KeyError: - user_exists = False - return user_exists - - -def uid_exists(uid): - """Check if a uid exists""" - try: - pwd.getpwuid(uid) - uid_exists = True - except KeyError: - uid_exists = False - return uid_exists - - -def group_exists(groupname): - """Check if a group exists""" - try: - grp.getgrnam(groupname) - group_exists = True - except KeyError: - group_exists = False - return group_exists - - -def gid_exists(gid): - """Check if a gid exists""" - try: - grp.getgrgid(gid) - gid_exists = True - except KeyError: - gid_exists = False - return gid_exists - - -def add_group(group_name, system_group=False, gid=None): - """Add a group to the system - - Will log but otherwise succeed if the group already exists. - - :param str group_name: group to create - :param bool system_group: Create system group - :param int gid: GID for user being created - - :returns: The password database entry struct, as returned by `grp.getgrnam` - """ - try: - group_info = grp.getgrnam(group_name) - log('group {0} already exists!'.format(group_name)) - if gid: - group_info = grp.getgrgid(gid) - log('group with gid {0} already exists!'.format(gid)) - except KeyError: - log('creating group {0}'.format(group_name)) - add_new_group(group_name, system_group, gid) - group_info = grp.getgrnam(group_name) - return group_info - - -def add_user_to_group(username, group): - """Add a user to a group""" - cmd = ['gpasswd', '-a', username, group] - log("Adding user {} to group {}".format(username, group)) - subprocess.check_call(cmd) - - -def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): - """Change user password expiry information - - :param str username: User to update - :param str lastday: Set when password was changed in YYYY-MM-DD format - :param str expiredate: Set when user's account will no longer be - accessible in YYYY-MM-DD format. - -1 will remove an account expiration date. - :param str inactive: Set the number of days of inactivity after a password - has expired before the account is locked. - -1 will remove an account's inactivity. - :param str mindays: Set the minimum number of days between password - changes to MIN_DAYS. - 0 indicates the password can be changed anytime. - :param str maxdays: Set the maximum number of days during which a - password is valid. - -1 as MAX_DAYS will remove checking maxdays - :param str root: Apply changes in the CHROOT_DIR directory - :param str warndays: Set the number of days of warning before a password - change is required - :raises subprocess.CalledProcessError: if call to chage fails - """ - cmd = ['chage'] - if root: - cmd.extend(['--root', root]) - if lastday: - cmd.extend(['--lastday', lastday]) - if expiredate: - cmd.extend(['--expiredate', expiredate]) - if inactive: - cmd.extend(['--inactive', inactive]) - if mindays: - cmd.extend(['--mindays', mindays]) - if maxdays: - cmd.extend(['--maxdays', maxdays]) - if warndays: - cmd.extend(['--warndays', warndays]) - cmd.append(username) - subprocess.check_call(cmd) - -remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') - -def rsync(from_path, to_path, flags='-r', options=None, timeout=None): - """Replicate the contents of a path""" - options = options or ['--delete', '--executability'] - cmd = ['/usr/bin/rsync', flags] - if timeout: - cmd = ['timeout', str(timeout)] + cmd - cmd.extend(options) - cmd.append(from_path) - cmd.append(to_path) - log(" ".join(cmd)) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() - - -def symlink(source, destination): - """Create a symbolic link""" - log("Symlinking {} as {}".format(source, destination)) - cmd = [ - 'ln', - '-sf', - source, - destination, - ] - subprocess.check_call(cmd) - - -def mkdir(path, owner='root', group='root', perms=0o555, force=False): - """Create a directory""" - log("Making dir {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - realpath = os.path.abspath(path) - path_exists = os.path.exists(realpath) - if path_exists and force: - if not os.path.isdir(realpath): - log("Removing non-directory file {} prior to mkdir()".format(path)) - os.unlink(realpath) - os.makedirs(realpath, perms) - elif not path_exists: - os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) - os.chmod(realpath, perms) - - -def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a byte string.""" - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - # lets see if we can grab the file and compare the context, to avoid doing - # a write. - existing_content = None - existing_uid, existing_gid = None, None - try: - with open(path, 'rb') as target: - existing_content = target.read() - stat = os.stat(path) - existing_uid, existing_gid = stat.st_uid, stat.st_gid - except: - pass - if content != existing_content: - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), - level=DEBUG) - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) - return - # the contents were the same, but we might still need to change the - # ownership. - if existing_uid != uid: - log("Changing uid on already existing content: {} -> {}" - .format(existing_uid, uid), level=DEBUG) - os.chown(path, uid, -1) - if existing_gid != gid: - log("Changing gid on already existing content: {} -> {}" - .format(existing_gid, gid), level=DEBUG) - os.chown(path, -1, gid) - - -def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab""" - return Fstab.remove_by_mountpoint(mp) - - -def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file""" - return Fstab.add(dev, mp, fs, options=options) - - -def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): - """Mount a filesystem at a particular mountpoint""" - cmd_args = ['mount'] - if options is not None: - cmd_args.extend(['-o', options]) - cmd_args.extend([device, mountpoint]) - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) - return False - - if persist: - return fstab_add(device, mountpoint, filesystem, options=options) - return True - - -def umount(mountpoint, persist=False): - """Unmount a filesystem""" - cmd_args = ['umount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - - if persist: - return fstab_remove(mountpoint) - return True - - -def mounts(): - """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" - with open('/proc/mounts') as f: - # [['/mount/point','/dev/path'],[...]] - system_mounts = [m[1::-1] for m in [l.strip().split() - for l in f.readlines()]] - return system_mounts - - -def fstab_mount(mountpoint): - """Mount filesystem using fstab""" - cmd_args = ['mount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - return True - - -def file_hash(path, hash_type='md5'): - """Generate a hash checksum of the contents of 'path' or None if not found. - - :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - """ - if os.path.exists(path): - h = getattr(hashlib, hash_type)() - with open(path, 'rb') as source: - h.update(source.read()) - return h.hexdigest() - else: - return None - - -def path_hash(path): - """Generate a hash checksum of all files matching 'path'. Standard - wildcards like '*' and '?' are supported, see documentation for the 'glob' - module for more information. - - :return: dict: A { filename: hash } dictionary for all matched files. - Empty if none found. - """ - return { - filename: file_hash(filename) - for filename in glob.iglob(path) - } - - -def check_hash(path, checksum, hash_type='md5'): - """Validate a file using a cryptographic checksum. - - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - :raises ChecksumError: If the file fails the checksum - - """ - actual_checksum = file_hash(path, hash_type) - if checksum != actual_checksum: - raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) - - -class ChecksumError(ValueError): - """A class derived from Value error to indicate the checksum failed.""" - pass - - -def restart_on_change(restart_map, stopstart=False, restart_functions=None): - """Restart services based on configuration files changing - - This function is used a decorator, for example:: - - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - '/etc/apache/sites-enabled/*': [ 'apache2' ] - }) - def config_changed(): - pass # your code here - - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. The apache2 service would be - restarted if any file matching the pattern got changed, created - or removed. Standard wildcards are supported, see documentation - for the 'glob' module for more information. - - @param restart_map: {path_file_name: [service_name, ...] - @param stopstart: DEFAULT false; whether to stop, start OR restart - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result from decorated function - """ - def wrap(f): - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart, - restart_functions) - return wrapped_f - return wrap - - -def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None): - """Helper function to perform the restart_on_change function. - - This is provided for decorators to restart services if files described - in the restart_map have changed after an invocation of lambda_f(). - - @param lambda_f: function to call. - @param restart_map: {file: [service, ...]} - @param stopstart: whether to stop, start or restart a service - @param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - @returns result of lambda_f() - """ - if restart_functions is None: - restart_functions = {} - checksums = {path: path_hash(path) for path in restart_map} - r = lambda_f() - # create a list of lists of the services to restart - restarts = [restart_map[path] - for path in restart_map - if path_hash(path) != checksums[path]] - # create a flat list of ordered services without duplicates from lists - services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) - if services_list: - actions = ('stop', 'start') if stopstart else ('restart',) - for service_name in services_list: - if service_name in restart_functions: - restart_functions[service_name](service_name) - else: - for action in actions: - service(action, service_name) - return r - - -def pwgen(length=None): - """Generate a random pasword.""" - if length is None: - # A random length is ok to use a weak PRNG - length = random.choice(range(35, 45)) - alphanumeric_chars = [ - l for l in (string.ascii_letters + string.digits) - if l not in 'l0QD1vAEIOUaeiou'] - # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the - # actual password - random_generator = random.SystemRandom() - random_chars = [ - random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) - - -def is_phy_iface(interface): - """Returns True if interface is not virtual, otherwise False.""" - if interface: - sys_net = '/sys/class/net' - if os.path.isdir(sys_net): - for iface in glob.glob(os.path.join(sys_net, '*')): - if '/virtual/' in os.path.realpath(iface): - continue - - if interface == os.path.basename(iface): - return True - - return False - - -def get_bond_master(interface): - """Returns bond master if interface is bond slave otherwise None. - - NOTE: the provided interface is expected to be physical - """ - if interface: - iface_path = '/sys/class/net/%s' % (interface) - if os.path.exists(iface_path): - if '/virtual/' in os.path.realpath(iface_path): - return None - - master = os.path.join(iface_path, 'master') - if os.path.exists(master): - master = os.path.realpath(master) - # make sure it is a bond master - if os.path.exists(os.path.join(master, 'bonding')): - return os.path.basename(master) - - return None - - -def list_nics(nic_type=None): - """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): - int_types = [nic_type] - else: - int_types = nic_type - - interfaces = [] - if nic_type: - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - ip_output = ip_output.split('\n') - ip_output = (line for line in ip_output if line) - for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + - r'[0-9]+\.[0-9]+)@.*', line) - if matched: - iface = matched.groups()[0] - else: - iface = line.split()[1].replace(":", "") - - if iface not in interfaces: - interfaces.append(iface) - else: - cmd = ['ip', 'a'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line.strip() for line in ip_output if line) - - key = re.compile('^[0-9]+:\s+(.+):') - for line in ip_output: - matched = re.search(key, line) - if matched: - iface = matched.group(1) - iface = iface.partition("@")[0] - if iface not in interfaces: - interfaces.append(iface) - - return interfaces - - -def set_nic_mtu(nic, mtu): - """Set the Maximum Transmission Unit (MTU) on a network interface.""" - cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] - subprocess.check_call(cmd) - - -def get_nic_mtu(nic): - """Return the Maximum Transmission Unit (MTU) for a network interface.""" - cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - mtu = "" - for line in ip_output: - words = line.split() - if 'mtu' in words: - mtu = words[words.index("mtu") + 1] - return mtu - - -def get_nic_hwaddr(nic): - """Return the Media Access Control (MAC) for a network interface.""" - cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - hwaddr = "" - words = ip_output.split() - if 'link/ether' in words: - hwaddr = words[words.index('link/ether') + 1] - return hwaddr - - -@contextmanager -def chdir(directory): - """Change the current working directory to a different directory for a code - block and return the previous directory after the block exits. Useful to - run commands from a specificed directory. - - :param str directory: The directory path to change to for this context. - """ - cur = os.getcwd() - try: - yield os.chdir(directory) - finally: - os.chdir(cur) - - -def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """Recursively change user and group ownership of files and directories - in given path. Doesn't chown path itself by default, only its children. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also follow and chown links if True - :param bool chowntopdir: Also chown path itself if True - """ - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - if follow_links: - chown = os.chown - else: - chown = os.lchown - - if chowntopdir: - broken_symlink = os.path.lexists(path) and not os.path.exists(path) - if not broken_symlink: - chown(path, uid, gid) - for root, dirs, files in os.walk(path, followlinks=follow_links): - for name in dirs + files: - full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: - chown(full, uid, gid) - - -def lchownr(path, owner, group): - """Recursively change user and group ownership of files and directories - in a given path, not following symbolic links. See the documentation for - 'os.lchown' for more information. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - """ - chownr(path, owner, group, follow_links=False) - - -def owner(path): - """Returns a tuple containing the username & groupname owning the path. - - :param str path: the string path to retrieve the ownership - :return tuple(str, str): A (username, groupname) tuple containing the - name of the user and group owning the path. - :raises OSError: if the specified path does not exist - """ - stat = os.stat(path) - username = pwd.getpwuid(stat.st_uid)[0] - groupname = grp.getgrgid(stat.st_gid)[0] - return username, groupname - - -def get_total_ram(): - """The total amount of system RAM in bytes. - - This is what is reported by the OS, and may be overcommitted when - there are multiple containers hosted on the same machine. - """ - with open('/proc/meminfo', 'r') as f: - for line in f.readlines(): - if line: - key, value, unit = line.split() - if key == 'MemTotal:': - assert unit == 'kB', 'Unknown unit' - return int(value) * 1024 # Classic, not KiB. - raise NotImplementedError() - - -UPSTART_CONTAINER_TYPE = '/run/container_type' - - -def is_container(): - """Determine whether unit is running in a container - - @return: boolean indicating if unit is in a container - """ - if init_is_systemd(): - # Detect using systemd-detect-virt - return subprocess.call(['systemd-detect-virt', - '--container']) == 0 - else: - # Detect using upstart container file marker - return os.path.exists(UPSTART_CONTAINER_TYPE) - - -def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): - with open(updatedb_path, 'r+') as f_id: - updatedb_text = f_id.read() - output = updatedb(updatedb_text, path) - f_id.seek(0) - f_id.write(output) - f_id.truncate() - - -def updatedb(updatedb_text, new_path): - lines = [line for line in updatedb_text.split("\n")] - for i, line in enumerate(lines): - if line.startswith("PRUNEPATHS="): - paths_line = line.split("=")[1].replace('"', '') - paths = paths_line.split(" ") - if new_path not in paths: - paths.append(new_path) - lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) - output = "\n".join(lines) - return output - - -def modulo_distribution(modulo=3, wait=30): - """ Modulo distribution - - This helper uses the unit number, a modulo value and a constant wait time - to produce a calculated wait time distribution. This is useful in large - scale deployments to distribute load during an expensive operation such as - service restarts. - - If you have 1000 nodes that need to restart 100 at a time 1 minute at a - time: - - time.wait(modulo_distribution(modulo=100, wait=60)) - restart() - - If you need restarts to happen serially set modulo to the exact number of - nodes and set a high constant wait time: - - time.wait(modulo_distribution(modulo=10, wait=120)) - restart() - - @param modulo: int The modulo number creates the group distribution - @param wait: int The constant time wait value - @return: int Calculated time to wait for unit operation - """ - unit_number = int(local_unit().split('/')[1]) - return (unit_number % modulo) * wait diff --git a/tests/charmhelpers/core/host_factory/centos.py b/tests/charmhelpers/core/host_factory/centos.py deleted file mode 100644 index 7781a3961f23ce0b161ae08b11710466af8de814..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/host_factory/centos.py +++ /dev/null @@ -1,72 +0,0 @@ -import subprocess -import yum -import os - -from charmhelpers.core.strutils import BasicStringComparator - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Host releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - - def __init__(self, item): - raise NotImplementedError( - "CompareHostReleases() is not implemented for CentOS") - - -def service_available(service_name): - # """Determine whether a system service is available.""" - if os.path.isdir('/run/systemd/system'): - cmd = ['systemctl', 'is-enabled', service_name] - else: - cmd = ['service', service_name, 'is-enabled'] - return subprocess.call(cmd) == 0 - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['groupadd'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('-r') - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/os-release in a dict.""" - d = {} - with open('/etc/os-release', 'r') as lsb: - for l in lsb: - s = l.split('=') - if len(s) != 2: - continue - d[s[0].strip()] = s[1].strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports YumBase function if the pkgcache argument - is None. - """ - if not pkgcache: - y = yum.YumBase() - packages = y.doPackageLists() - pkgcache = {i.Name: i.version for i in packages['installed']} - pkg = pkgcache[package] - if pkg > revno: - return 1 - if pkg < revno: - return -1 - return 0 diff --git a/tests/charmhelpers/core/host_factory/ubuntu.py b/tests/charmhelpers/core/host_factory/ubuntu.py deleted file mode 100644 index d8dc378a5dad29c271a89289e4b815e2c2c99060..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/host_factory/ubuntu.py +++ /dev/null @@ -1,89 +0,0 @@ -import subprocess - -from charmhelpers.core.strutils import BasicStringComparator - - -UBUNTU_RELEASES = ( - 'lucid', - 'maverick', - 'natty', - 'oneiric', - 'precise', - 'quantal', - 'raring', - 'saucy', - 'trusty', - 'utopic', - 'vivid', - 'wily', - 'xenial', - 'yakkety', - 'zesty', - 'artful', -) - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Ubuntu releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - _list = UBUNTU_RELEASES - - -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/tests/charmhelpers/core/hugepage.py b/tests/charmhelpers/core/hugepage.py deleted file mode 100644 index 54b5b5e2fcf81eea5f2ebfbceb620ea68d725584..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/hugepage.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml -from charmhelpers.core import fstab -from charmhelpers.core import sysctl -from charmhelpers.core.host import ( - add_group, - add_user_to_group, - fstab_mount, - mkdir, -) -from charmhelpers.core.strutils import bytes_from_string -from subprocess import check_output - - -def hugepage_support(user, group='hugetlb', nr_hugepages=256, - max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True, set_shmmax=False): - """Enable hugepages on system. - - Args: - user (str) -- Username to allow access to hugepages to - group (str) -- Group name to own hugepages - nr_hugepages (int) -- Number of pages to reserve - max_map_count (int) -- Number of Virtual Memory Areas a process can own - mnt_point (str) -- Directory to mount hugepages on - pagesize (str) -- Size of hugepages - mount (bool) -- Whether to Mount hugepages - """ - group_info = add_group(group) - gid = group_info.gr_gid - add_user_to_group(user, group) - if max_map_count < 2 * nr_hugepages: - max_map_count = 2 * nr_hugepages - sysctl_settings = { - 'vm.nr_hugepages': nr_hugepages, - 'vm.max_map_count': max_map_count, - 'vm.hugetlb_shm_group': gid, - } - if set_shmmax: - shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) - shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages - if shmmax_minsize > shmmax_current: - sysctl_settings['kernel.shmmax'] = shmmax_minsize - sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') - mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) - lfstab = fstab.Fstab() - fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) - if fstab_entry: - lfstab.remove_entry(fstab_entry) - entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', - 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) - lfstab.add_entry(entry) - if mount: - fstab_mount(mnt_point) diff --git a/tests/charmhelpers/core/kernel.py b/tests/charmhelpers/core/kernel.py deleted file mode 100644 index 2d404528348e57df4cebee58ff11f6574f334fe0..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/kernel.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import subprocess - -from charmhelpers.osplatform import get_platform -from charmhelpers.core.hookenv import ( - log, - INFO -) - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import - -__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" - - -def modprobe(module, persist=True): - """Load a kernel module and configure for auto-load on reboot.""" - cmd = ['modprobe', module] - - log('Loading kernel module %s' % module, level=INFO) - - subprocess.check_call(cmd) - if persist: - persistent_modprobe(module) - - -def rmmod(module, force=False): - """Remove a module from the linux kernel""" - cmd = ['rmmod'] - if force: - cmd.append('-f') - cmd.append(module) - log('Removing kernel module %s' % module, level=INFO) - return subprocess.check_call(cmd) - - -def lsmod(): - """Shows what kernel modules are currently loaded""" - return subprocess.check_output(['lsmod'], - universal_newlines=True) - - -def is_module_loaded(module): - """Checks if a kernel module is already loaded""" - matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) - return len(matches) > 0 diff --git a/tests/charmhelpers/core/kernel_factory/__init__.py b/tests/charmhelpers/core/kernel_factory/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/tests/charmhelpers/core/kernel_factory/centos.py b/tests/charmhelpers/core/kernel_factory/centos.py deleted file mode 100644 index 1c402c1157900ff1ad5c6c296a409c9e8fb96d2b..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/kernel_factory/centos.py +++ /dev/null @@ -1,17 +0,0 @@ -import subprocess -import os - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - if not os.path.exists('/etc/rc.modules'): - open('/etc/rc.modules', 'a') - os.chmod('/etc/rc.modules', 111) - with open('/etc/rc.modules', 'r+') as modules: - if module not in modules.read(): - modules.write('modprobe %s\n' % module) - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["dracut", "-f", version]) diff --git a/tests/charmhelpers/core/kernel_factory/ubuntu.py b/tests/charmhelpers/core/kernel_factory/ubuntu.py deleted file mode 100644 index 3de372fd3df38fe151cf79243f129cb504516f22..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/kernel_factory/ubuntu.py +++ /dev/null @@ -1,13 +0,0 @@ -import subprocess - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module + "\n") - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/tests/charmhelpers/core/services/__init__.py b/tests/charmhelpers/core/services/__init__.py deleted file mode 100644 index 61fd074edc09de434859e48ae1b36baef0503708..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/services/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .base import * # NOQA -from .helpers import * # NOQA diff --git a/tests/charmhelpers/core/services/base.py b/tests/charmhelpers/core/services/base.py deleted file mode 100644 index ca9dc996bd7d7fc2a18b7d9a9ee51adff171bda9..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/services/base.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import json -from inspect import getargspec -from collections import Iterable, OrderedDict - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -__all__ = ['ServiceManager', 'ManagerCallback', - 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', - 'service_restart', 'service_stop'] - - -class ServiceManager(object): - def __init__(self, services=None): - """ - Register a list of services, given their definitions. - - Service definitions are dicts in the following formats (all keys except - 'service' are optional):: - - { - "service": <service name>, - "required_data": <list of required data contexts>, - "provided_data": <list of provided data contexts>, - "data_ready": <one or more callbacks>, - "data_lost": <one or more callbacks>, - "start": <one or more callbacks>, - "stop": <one or more callbacks>, - "ports": <list of ports to manage>, - } - - The 'required_data' list should contain dicts of required data (or - dependency managers that act like dicts and know how to collect the data). - Only when all items in the 'required_data' list are populated are the list - of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more - information. - - The 'provided_data' list should contain relation data providers, most likely - a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, - that will indicate a set of data to set on a given relation. - - The 'data_ready' value should be either a single callback, or a list of - callbacks, to be called when all items in 'required_data' pass `is_ready()`. - Each callback will be called with the service name as the only parameter. - After all of the 'data_ready' callbacks are called, the 'start' callbacks - are fired. - - The 'data_lost' value should be either a single callback, or a list of - callbacks, to be called when a 'required_data' item no longer passes - `is_ready()`. Each callback will be called with the service name as the - only parameter. After all of the 'data_lost' callbacks are called, - the 'stop' callbacks are fired. - - The 'start' value should be either a single callback, or a list of - callbacks, to be called when starting the service, after the 'data_ready' - callbacks are complete. Each callback will be called with the service - name as the only parameter. This defaults to - `[host.service_start, services.open_ports]`. - - The 'stop' value should be either a single callback, or a list of - callbacks, to be called when stopping the service. If the service is - being stopped because it no longer has all of its 'required_data', this - will be called after all of the 'data_lost' callbacks are complete. - Each callback will be called with the service name as the only parameter. - This defaults to `[services.close_ports, host.service_stop]`. - - The 'ports' value should be a list of ports to manage. The default - 'start' handler will open the ports after the service is started, - and the default 'stop' handler will close the ports prior to stopping - the service. - - - Examples: - - The following registers an Upstart service called bingod that depends on - a mongodb relation and which runs a custom `db_migrate` function prior to - restarting the service, and a Runit service called spadesd:: - - manager = services.ServiceManager([ - { - 'service': 'bingod', - 'ports': [80, 443], - 'required_data': [MongoRelation(), config(), {'my': 'data'}], - 'data_ready': [ - services.template(source='bingod.conf'), - services.template(source='bingod.ini', - target='/etc/bingod.ini', - owner='bingo', perms=0400), - ], - }, - { - 'service': 'spadesd', - 'data_ready': services.template(source='spadesd_run.j2', - target='/etc/sv/spadesd/run', - perms=0555), - 'start': runit_start, - 'stop': runit_stop, - }, - ]) - manager.manage() - """ - self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') - self._ready = None - self.services = OrderedDict() - for service in services or []: - service_name = service['service'] - self.services[service_name] = service - - def manage(self): - """ - Handle the current hook by doing The Right Thing with the registered services. - """ - hookenv._run_atstart() - try: - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - except SystemExit as x: - if x.code is None or x.code == 0: - hookenv._run_atexit() - hookenv._run_atexit() - - def provide_data(self): - """ - Set the relation data for each provider in the ``provided_data`` list. - - A provider must have a `name` attribute, which indicates which relation - to set data on, and a `provide_data()` method, which returns a dict of - data to set. - - The `provide_data()` method can optionally accept two parameters: - - * ``remote_service`` The name of the remote service that the data will - be provided to. The `provide_data()` method will be called once - for each connected service (not unit). This allows the method to - tailor its data to the given service. - * ``service_ready`` Whether or not the service definition had all of - its requirements met, and thus the ``data_ready`` callbacks run. - - Note that the ``provided_data`` methods are now called **after** the - ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks - a chance to generate any data necessary for the providing to the remote - services. - """ - for service_name, service in self.services.items(): - service_ready = self.is_ready(service_name) - for provider in service.get('provided_data', []): - for relid in hookenv.relation_ids(provider.name): - units = hookenv.related_units(relid) - if not units: - continue - remote_service = units[0].split('/')[0] - argspec = getargspec(provider.provide_data) - if len(argspec.args) > 1: - data = provider.provide_data(remote_service, service_ready) - else: - data = provider.provide_data() - if data: - hookenv.relation_set(relid, data) - - def reconfigure_services(self, *service_names): - """ - Update all files for one or more registered services, and, - if ready, optionally restart them. - - If no service names are given, reconfigures all registered services. - """ - for service_name in service_names or self.services.keys(): - if self.is_ready(service_name): - self.fire_event('data_ready', service_name) - self.fire_event('start', service_name, default=[ - service_restart, - manage_ports]) - self.save_ready(service_name) - else: - if self.was_ready(service_name): - self.fire_event('data_lost', service_name) - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - self.save_lost(service_name) - - def stop_services(self, *service_names): - """ - Stop one or more registered services, by name. - - If no service names are given, stops all registered services. - """ - for service_name in service_names or self.services.keys(): - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - - def get_service(self, service_name): - """ - Given the name of a registered service, return its service definition. - """ - service = self.services.get(service_name) - if not service: - raise KeyError('Service not registered: %s' % service_name) - return service - - def fire_event(self, event_name, service_name, default=None): - """ - Fire a data_ready, data_lost, start, or stop event on a given service. - """ - service = self.get_service(service_name) - callbacks = service.get(event_name, default) - if not callbacks: - return - if not isinstance(callbacks, Iterable): - callbacks = [callbacks] - for callback in callbacks: - if isinstance(callback, ManagerCallback): - callback(self, service_name, event_name) - else: - callback(service_name) - - def is_ready(self, service_name): - """ - Determine if a registered service is ready, by checking its 'required_data'. - - A 'required_data' item can be any mapping type, and is considered ready - if `bool(item)` evaluates as True. - """ - service = self.get_service(service_name) - reqs = service.get('required_data', []) - return all(bool(req) for req in reqs) - - def _load_ready_file(self): - if self._ready is not None: - return - if os.path.exists(self._ready_file): - with open(self._ready_file) as fp: - self._ready = set(json.load(fp)) - else: - self._ready = set() - - def _save_ready_file(self): - if self._ready is None: - return - with open(self._ready_file, 'w') as fp: - json.dump(list(self._ready), fp) - - def save_ready(self, service_name): - """ - Save an indicator that the given service is now data_ready. - """ - self._load_ready_file() - self._ready.add(service_name) - self._save_ready_file() - - def save_lost(self, service_name): - """ - Save an indicator that the given service is no longer data_ready. - """ - self._load_ready_file() - self._ready.discard(service_name) - self._save_ready_file() - - def was_ready(self, service_name): - """ - Determine if the given service was previously data_ready. - """ - self._load_ready_file() - return service_name in self._ready - - -class ManagerCallback(object): - """ - Special case of a callback that takes the `ServiceManager` instance - in addition to the service name. - - Subclasses should implement `__call__` which should accept three parameters: - - * `manager` The `ServiceManager` instance - * `service_name` The name of the service it's being triggered for - * `event_name` The name of the event that this callback is handling - """ - def __call__(self, manager, service_name, event_name): - raise NotImplementedError() - - -class PortManagerCallback(ManagerCallback): - """ - Callback class that will open or close ports, for use as either - a start or stop action. - """ - def __call__(self, manager, service_name, event_name): - service = manager.get_service(service_name) - new_ports = service.get('ports', []) - port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) - if os.path.exists(port_file): - with open(port_file) as fp: - old_ports = fp.read().split(',') - for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) - with open(port_file, 'w') as fp: - fp.write(','.join(str(port) for port in new_ports)) - for port in new_ports: - if event_name == 'start': - hookenv.open_port(port) - elif event_name == 'stop': - hookenv.close_port(port) - - -def service_stop(service_name): - """ - Wrapper around host.service_stop to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_running(service_name): - host.service_stop(service_name) - - -def service_restart(service_name): - """ - Wrapper around host.service_restart to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_available(service_name): - if host.service_running(service_name): - host.service_restart(service_name) - else: - host.service_start(service_name) - - -# Convenience aliases -open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/tests/charmhelpers/core/services/helpers.py b/tests/charmhelpers/core/services/helpers.py deleted file mode 100644 index 3e6e30d2fe0d9c73ffdc42d70b77e864b6379c53..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/services/helpers.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import yaml - -from charmhelpers.core import hookenv -from charmhelpers.core import host -from charmhelpers.core import templating - -from charmhelpers.core.services.base import ManagerCallback - - -__all__ = ['RelationContext', 'TemplateCallback', - 'render_template', 'template'] - - -class RelationContext(dict): - """ - Base class for a context generator that gets relation data from juju. - - Subclasses must provide the attributes `name`, which is the name of the - interface of interest, `interface`, which is the type of the interface of - interest, and `required_keys`, which is the set of keys required for the - relation to be considered complete. The data for all interfaces matching - the `name` attribute that are complete will used to populate the dictionary - values (see `get_data`, below). - - The generated context will be namespaced under the relation :attr:`name`, - to prevent potential naming conflicts. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = None - interface = None - - def __init__(self, name=None, additional_required_keys=None): - if not hasattr(self, 'required_keys'): - self.required_keys = [] - - if name is not None: - self.name = name - if additional_required_keys: - self.required_keys.extend(additional_required_keys) - self.get_data() - - def __bool__(self): - """ - Returns True if all of the required_keys are available. - """ - return self.is_ready() - - __nonzero__ = __bool__ - - def __repr__(self): - return super(RelationContext, self).__repr__() - - def is_ready(self): - """ - Returns True if all of the `required_keys` are available from any units. - """ - ready = len(self.get(self.name, [])) > 0 - if not ready: - hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) - return ready - - def _is_ready(self, unit_data): - """ - Helper method that tests a set of relation data and returns True if - all of the `required_keys` are present. - """ - return set(unit_data.keys()).issuperset(set(self.required_keys)) - - def get_data(self): - """ - Retrieve the relation data for each unit involved in a relation and, - if complete, store it in a list under `self[self.name]`. This - is automatically called when the RelationContext is instantiated. - - The units are sorted lexographically first by the service ID, then by - the unit ID. Thus, if an interface has two other services, 'db:1' - and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', - and 'db:2' having one unit, 'mediawiki/0', all of which have a complete - set of data, the relation data for the units will be stored in the - order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. - - If you only care about a single unit on the relation, you can just - access it as `{{ interface[0]['key'] }}`. However, if you can at all - support multiple units on a relation, you should iterate over the list, - like:: - - {% for unit in interface -%} - {{ unit['key'] }}{% if not loop.last %},{% endif %} - {%- endfor %} - - Note that since all sets of relation data from all related services and - units are in a single list, if you need to know which service or unit a - set of data came from, you'll need to extend this class to preserve - that information. - """ - if not hookenv.relation_ids(self.name): - return - - ns = self.setdefault(self.name, []) - for rid in sorted(hookenv.relation_ids(self.name)): - for unit in sorted(hookenv.related_units(rid)): - reldata = hookenv.relation_get(rid=rid, unit=unit) - if self._is_ready(reldata): - ns.append(reldata) - - def provide_data(self): - """ - Return data to be relation_set for this interface. - """ - return {} - - -class MysqlRelation(RelationContext): - """ - Relation context for the `mysql` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'db' - interface = 'mysql' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'user', 'password', 'database'] - RelationContext.__init__(self, *args, **kwargs) - - -class HttpRelation(RelationContext): - """ - Relation context for the `http` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'website' - interface = 'http' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'port'] - RelationContext.__init__(self, *args, **kwargs) - - def provide_data(self): - return { - 'host': hookenv.unit_get('private-address'), - 'port': 80, - } - - -class RequiredConfig(dict): - """ - Data context that loads config options with one or more mandatory options. - - Once the required options have been changed from their default values, all - config options will be available, namespaced under `config` to prevent - potential naming conflicts (for example, between a config option and a - relation property). - - :param list *args: List of options that must be changed from their default values. - """ - - def __init__(self, *args): - self.required_options = args - self['config'] = hookenv.config() - with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) - - def __bool__(self): - for option in self.required_options: - if option not in self['config']: - return False - current_value = self['config'][option] - default_value = self.config[option].get('default') - if current_value == default_value: - return False - if current_value in (None, '') and default_value in (None, ''): - return False - return True - - def __nonzero__(self): - return self.__bool__() - - -class StoredContext(dict): - """ - A data context that always returns the data that it was first created with. - - This is useful to do a one-time generation of things like passwords, that - will thereafter use the same value that was originally generated, instead - of generating a new value each time it is run. - """ - def __init__(self, file_name, config_data): - """ - If the file exists, populate `self` with the data from the file. - Otherwise, populate with the given data and persist it to the file. - """ - if os.path.exists(file_name): - self.update(self.read_context(file_name)) - else: - self.store_context(file_name, config_data) - self.update(config_data) - - def store_context(self, file_name, config_data): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0o600) - yaml.dump(config_data, file_stream) - - def read_context(self, file_name): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) - if not data: - raise OSError("%s is empty" % file_name) - return data - - -class TemplateCallback(ManagerCallback): - """ - Callback class that will render a Jinja2 template, for use as a ready - action. - - :param str source: The template source file, relative to - `$CHARM_DIR/templates` - - :param str target: The target to write the rendered template to (or None) - :param str owner: The owner of the rendered file - :param str group: The group of the rendered file - :param int perms: The permissions of the rendered file - :param partial on_change_action: functools partial to be executed when - rendered file changes - :param jinja2 loader template_loader: A jinja2 template loader - - :return str: The rendered template - """ - def __init__(self, source, target, - owner='root', group='root', perms=0o444, - on_change_action=None, template_loader=None): - self.source = source - self.target = target - self.owner = owner - self.group = group - self.perms = perms - self.on_change_action = on_change_action - self.template_loader = template_loader - - def __call__(self, manager, service_name, event_name): - pre_checksum = '' - if self.on_change_action and os.path.isfile(self.target): - pre_checksum = host.file_hash(self.target) - service = manager.get_service(service_name) - context = {'ctx': {}} - for ctx in service.get('required_data', []): - context.update(ctx) - context['ctx'].update(ctx) - - result = templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) - if self.on_change_action: - if pre_checksum == host.file_hash(self.target): - hookenv.log( - 'No change detected: {}'.format(self.target), - hookenv.DEBUG) - else: - self.on_change_action() - - return result - - -# Convenience aliases for templates -render_template = template = TemplateCallback diff --git a/tests/charmhelpers/core/strutils.py b/tests/charmhelpers/core/strutils.py deleted file mode 100644 index e8df0452f8203b53947eb137eed22d85ff62dff0..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/strutils.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six -import re - - -def bool_from_string(value): - """Interpret string value as boolean. - - Returns True if value translates to True otherwise False. - """ - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) - raise ValueError(msg) - - value = value.strip().lower() - - if value in ['y', 'yes', 'true', 't', 'on']: - return True - elif value in ['n', 'no', 'false', 'f', 'off']: - return False - - msg = "Unable to interpret string value '%s' as boolean" % (value) - raise ValueError(msg) - - -def bytes_from_string(value): - """Interpret human readable string value as bytes. - - Returns int - """ - BYTE_POWER = { - 'K': 1, - 'KB': 1, - 'M': 2, - 'MB': 2, - 'G': 3, - 'GB': 3, - 'T': 4, - 'TB': 4, - 'P': 5, - 'PB': 5, - } - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as bytes" % (value) - raise ValueError(msg) - matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if matches: - size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) - else: - # Assume that value passed in is bytes - try: - size = int(value) - except ValueError: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return size - - -class BasicStringComparator(object): - """Provides a class that will compare strings from an iterator type object. - Used to provide > and < comparisons on strings that may not necessarily be - alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the - z-wrap. - """ - - _list = None - - def __init__(self, item): - if self._list is None: - raise Exception("Must define the _list in the class definition!") - try: - self.index = self._list.index(item) - except Exception: - raise KeyError("Item '{}' is not in list '{}'" - .format(item, self._list)) - - def __eq__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index == self._list.index(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index < self._list.index(other) - - def __ge__(self, other): - return not self.__lt__(other) - - def __gt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index > self._list.index(other) - - def __le__(self, other): - return not self.__gt__(other) - - def __str__(self): - """Always give back the item at the index so it can be used in - comparisons like: - - s_mitaka = CompareOpenStack('mitaka') - s_newton = CompareOpenstack('newton') - - assert s_newton > s_mitaka - - @returns: <string> - """ - return self._list[self.index] diff --git a/tests/charmhelpers/core/sysctl.py b/tests/charmhelpers/core/sysctl.py deleted file mode 100644 index 6e413e31480e5fb4bcb703d58b1e87f98adc53af..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/sysctl.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml - -from subprocess import check_call - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, -) - -__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' - - -def create(sysctl_dict, sysctl_file): - """Creates a sysctl.conf file from a YAML associative array - - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" - :type sysctl_dict: str - :param sysctl_file: path to the sysctl file to be saved - :type sysctl_file: str or unicode - :returns: None - """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return - - with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict_parsed.items(): - fd.write("{}={}\n".format(key, value)) - - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), - level=DEBUG) - - check_call(["sysctl", "-p", sysctl_file]) diff --git a/tests/charmhelpers/core/templating.py b/tests/charmhelpers/core/templating.py deleted file mode 100644 index 7b801a34a5e6585485347f7a97bc18a10a093d03..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/templating.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): - """ - Render a template. - - The `source` path, if not absolute, is relative to the `templates_dir`. - - The `target` path should be absolute. It can also be `None`, in which - case no file will be written. - - The context should be a dict containing the values to be replaced in the - template. - - The `owner`, `group`, and `perms` options will be passed to `write_file`. - - If omitted, `templates_dir` defaults to the `templates` folder in the charm. - - The rendered template will be written to the file as well as being returned - as a string. - - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. - """ - try: - from jinja2 import FileSystemLoader, Environment, exceptions - except ImportError: - try: - from charmhelpers.fetch import apt_install - except ImportError: - hookenv.log('Could not import jinja2, and could not import ' - 'charmhelpers.fetch to install it', - level=hookenv.ERROR) - raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment, exceptions - - if template_loader: - template_env = Environment(loader=template_loader) - else: - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - template_env = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e - content = template.render(context) - if target is not None: - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) - return content diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py deleted file mode 100644 index 7af875c2fcc1e2e38f9267bfdc60ab5a2a499c18..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/core/unitdata.py +++ /dev/null @@ -1,518 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Authors: -# Kapil Thangavelu <kapil.foss@gmail.com> -# -""" -Intro ------ - -A simple way to store state in units. This provides a key value -storage with support for versioned, transactional operation, -and can calculate deltas from previous values to simplify unit logic -when processing changes. - - -Hook Integration ----------------- - -There are several extant frameworks for hook execution, including - - - charmhelpers.core.hookenv.Hooks - - charmhelpers.core.services.ServiceManager - -The storage classes are framework agnostic, one simple integration is -via the HookData contextmanager. It will record the current hook -execution environment (including relation data, config data, etc.), -setup a transaction and allow easy access to the changes from -previously seen values. One consequence of the integration is the -reservation of particular keys ('rels', 'unit', 'env', 'config', -'charm_revisions') for their respective values. - -Here's a fully worked integration example using hookenv.Hooks:: - - from charmhelper.core import hookenv, unitdata - - hook_data = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # Print all changes to configuration from previously seen - # values. - for changed, (prev, cur) in hook_data.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - # Directly access all charm config as a mapping. - conf = db.getrange('config', True) - - # Directly access all relation data as a mapping - rels = db.getrange('rels', True) - - if __name__ == '__main__': - with hook_data(): - hook.execute() - - -A more basic integration is via the hook_scope context manager which simply -manages transaction scope (and records hook name, and timestamp):: - - >>> from unitdata import kv - >>> db = kv() - >>> with db.hook_scope('install'): - ... # do work, in transactional scope. - ... db.set('x', 1) - >>> db.get('x') - 1 - - -Usage ------ - -Values are automatically json de/serialized to preserve basic typing -and complex data struct capabilities (dicts, lists, ints, booleans, etc). - -Individual values can be manipulated via get/set:: - - >>> kv.set('y', True) - >>> kv.get('y') - True - - # We can set complex values (dicts, lists) as a single key. - >>> kv.set('config', {'a': 1, 'b': True'}) - - # Also supports returning dictionaries as a record which - # provides attribute access. - >>> config = kv.get('config', record=True) - >>> config.b - True - - -Groups of keys can be manipulated with update/getrange:: - - >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") - >>> kv.getrange('gui.', strip=True) - {'z': 1, 'y': 2} - -When updating values, its very helpful to understand which values -have actually changed and how have they changed. The storage -provides a delta method to provide for this:: - - >>> data = {'debug': True, 'option': 2} - >>> delta = kv.delta(data, 'config.') - >>> delta.debug.previous - None - >>> delta.debug.current - True - >>> delta - {'debug': (None, True), 'option': (None, 2)} - -Note the delta method does not persist the actual change, it needs to -be explicitly saved via 'update' method:: - - >>> kv.update(data, 'config.') - -Values modified in the context of a hook scope retain historical values -associated to the hookname. - - >>> with db.hook_scope('config-changed'): - ... db.set('x', 42) - >>> db.gethistory('x') - [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), - (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] - -""" - -import collections -import contextlib -import datetime -import itertools -import json -import os -import pprint -import sqlite3 -import sys - -__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>' - - -class Storage(object): - """Simple key value database for local unit state within charms. - - Modifications are not persisted unless :meth:`flush` is called. - - To support dicts, lists, integer, floats, and booleans values - are automatically json encoded/decoded. - """ - def __init__(self, path=None): - self.db_path = path - if path is None: - if 'UNIT_STATE_DB' in os.environ: - self.db_path = os.environ['UNIT_STATE_DB'] - else: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') - self.conn = sqlite3.connect('%s' % self.db_path) - self.cursor = self.conn.cursor() - self.revision = None - self._closed = False - self._init() - - def close(self): - if self._closed: - return - self.flush(False) - self.cursor.close() - self.conn.close() - self._closed = True - - def get(self, key, default=None, record=False): - self.cursor.execute('select data from kv where key=?', [key]) - result = self.cursor.fetchone() - if not result: - return default - if record: - return Record(json.loads(result[0])) - return json.loads(result[0]) - - def getrange(self, key_prefix, strip=False): - """ - Get a range of keys starting with a common prefix as a mapping of - keys to values. - - :param str key_prefix: Common prefix among all keys - :param bool strip: Optionally strip the common prefix from the key - names in the returned dict - :return dict: A (possibly empty) dict of key-value mappings - """ - self.cursor.execute("select key, data from kv where key like ?", - ['%s%%' % key_prefix]) - result = self.cursor.fetchall() - - if not result: - return {} - if not strip: - key_prefix = '' - return dict([ - (k[len(key_prefix):], json.loads(v)) for k, v in result]) - - def update(self, mapping, prefix=""): - """ - Set the values of multiple keys at once. - - :param dict mapping: Mapping of keys to values - :param str prefix: Optional prefix to apply to all keys in `mapping` - before setting - """ - for k, v in mapping.items(): - self.set("%s%s" % (prefix, k), v) - - def unset(self, key): - """ - Remove a key from the database entirely. - """ - self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - [key, self.revision, json.dumps('DELETED')]) - - def unsetrange(self, keys=None, prefix=""): - """ - Remove a range of keys starting with a common prefix, from the database - entirely. - - :param list keys: List of keys to remove. - :param str prefix: Optional prefix to apply to all keys in ``keys`` - before removing. - """ - if keys is not None: - keys = ['%s%s' % (prefix, key) for key in keys] - self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), - list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) - else: - self.cursor.execute('delete from kv where key like ?', - ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) - - def set(self, key, value): - """ - Set a value in the database. - - :param str key: Key to set the value for - :param value: Any JSON-serializable value to be set - """ - serialized = json.dumps(value) - - self.cursor.execute('select data from kv where key=?', [key]) - exists = self.cursor.fetchone() - - # Skip mutations to the same value - if exists: - if exists[0] == serialized: - return value - - if not exists: - self.cursor.execute( - 'insert into kv (key, data) values (?, ?)', - (key, serialized)) - else: - self.cursor.execute(''' - update kv - set data = ? - where key = ?''', [serialized, key]) - - # Save - if not self.revision: - return value - - self.cursor.execute( - 'select 1 from kv_revisions where key=? and revision=?', - [key, self.revision]) - exists = self.cursor.fetchone() - - if not exists: - self.cursor.execute( - '''insert into kv_revisions ( - revision, key, data) values (?, ?, ?)''', - (self.revision, key, serialized)) - else: - self.cursor.execute( - ''' - update kv_revisions - set data = ? - where key = ? - and revision = ?''', - [serialized, key, self.revision]) - - return value - - def delta(self, mapping, prefix): - """ - return a delta containing values that have changed. - """ - previous = self.getrange(prefix, strip=True) - if not previous: - pk = set() - else: - pk = set(previous.keys()) - ck = set(mapping.keys()) - delta = DeltaSet() - - # added - for k in ck.difference(pk): - delta[k] = Delta(None, mapping[k]) - - # removed - for k in pk.difference(ck): - delta[k] = Delta(previous[k], None) - - # changed - for k in pk.intersection(ck): - c = mapping[k] - p = previous[k] - if c != p: - delta[k] = Delta(p, c) - - return delta - - @contextlib.contextmanager - def hook_scope(self, name=""): - """Scope all future interactions to the current hook execution - revision.""" - assert not self.revision - self.cursor.execute( - 'insert into hooks (hook, date) values (?, ?)', - (name or sys.argv[0], - datetime.datetime.utcnow().isoformat())) - self.revision = self.cursor.lastrowid - try: - yield self.revision - self.revision = None - except Exception: - self.flush(False) - self.revision = None - raise - else: - self.flush() - - def flush(self, save=True): - if save: - self.conn.commit() - elif self._closed: - return - else: - self.conn.rollback() - - def _init(self): - self.cursor.execute(''' - create table if not exists kv ( - key text, - data text, - primary key (key) - )''') - self.cursor.execute(''' - create table if not exists kv_revisions ( - key text, - revision integer, - data text, - primary key (key, revision) - )''') - self.cursor.execute(''' - create table if not exists hooks ( - version integer primary key autoincrement, - hook text, - date text - )''') - self.conn.commit() - - def gethistory(self, key, deserialize=False): - self.cursor.execute( - ''' - select kv.revision, kv.key, kv.data, h.hook, h.date - from kv_revisions kv, - hooks h - where kv.key=? - and kv.revision = h.version - ''', [key]) - if deserialize is False: - return self.cursor.fetchall() - return map(_parse_history, self.cursor.fetchall()) - - def debug(self, fh=sys.stderr): - self.cursor.execute('select * from kv') - pprint.pprint(self.cursor.fetchall(), stream=fh) - self.cursor.execute('select * from kv_revisions') - pprint.pprint(self.cursor.fetchall(), stream=fh) - - -def _parse_history(d): - return (d[0], d[1], json.loads(d[2]), d[3], - datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) - - -class HookData(object): - """Simple integration for existing hook exec frameworks. - - Records all unit information, and stores deltas for processing - by the hook. - - Sample:: - - from charmhelper.core import hookenv, unitdata - - changes = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # View all changes to configuration - for changed, (prev, cur) in changes.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - if __name__ == '__main__': - with changes(): - hook.execute() - - """ - def __init__(self): - self.kv = kv() - self.conf = None - self.rels = None - - @contextlib.contextmanager - def __call__(self): - from charmhelpers.core import hookenv - hook_name = hookenv.hook_name() - - with self.kv.hook_scope(hook_name): - self._record_charm_version(hookenv.charm_dir()) - delta_config, delta_relation = self._record_hook(hookenv) - yield self.kv, delta_config, delta_relation - - def _record_charm_version(self, charm_dir): - # Record revisions.. charm revisions are meaningless - # to charm authors as they don't control the revision. - # so logic dependnent on revision is not particularly - # useful, however it is useful for debugging analysis. - charm_rev = open( - os.path.join(charm_dir, 'revision')).read().strip() - charm_rev = charm_rev or '0' - revs = self.kv.get('charm_revisions', []) - if charm_rev not in revs: - revs.append(charm_rev.strip() or '0') - self.kv.set('charm_revisions', revs) - - def _record_hook(self, hookenv): - data = hookenv.execution_environment() - self.conf = conf_delta = self.kv.delta(data['conf'], 'config') - self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', dict(data['env'])) - self.kv.set('unit', data['unit']) - self.kv.set('relid', data.get('relid')) - return conf_delta, rels_delta - - -class Record(dict): - - __slots__ = () - - def __getattr__(self, k): - if k in self: - return self[k] - raise AttributeError(k) - - -class DeltaSet(Record): - - __slots__ = () - - -Delta = collections.namedtuple('Delta', ['previous', 'current']) - - -_KV = None - - -def kv(): - global _KV - if _KV is None: - _KV = Storage() - return _KV diff --git a/tests/charmhelpers/osplatform.py b/tests/charmhelpers/osplatform.py deleted file mode 100644 index d9a4d5c02ea77ac4679026d619afa80c227b8eca..0000000000000000000000000000000000000000 --- a/tests/charmhelpers/osplatform.py +++ /dev/null @@ -1,25 +0,0 @@ -import platform - - -def get_platform(): - """Return the current OS platform. - - For example: if current os platform is Ubuntu then a string "ubuntu" - will be returned (which is the name of the module). - This string is used to decide which platform module should be imported. - """ - # linux_distribution is deprecated and will be removed in Python 3.7 - # Warings *not* disabled, as we certainly need to fix this. - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] - if "Ubuntu" in current_platform: - return "ubuntu" - elif "CentOS" in current_platform: - return "centos" - elif "debian" in current_platform: - # Stock Python does not detect Ubuntu and instead returns debian. - # Or at least it does in some build environments like Travis CI - return "ubuntu" - else: - raise RuntimeError("This module is not supported on {}." - .format(current_platform)) diff --git a/tests/dev-basic-bionic-rocky b/tests/dev-basic-bionic-rocky deleted file mode 100755 index 696c35a97e0bcc0ad8f8d89bc5ee1617ef578ec6..0000000000000000000000000000000000000000 --- a/tests/dev-basic-bionic-rocky +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic keystone deployment on bionic-rocky.""" - -from basic_deployment import KeystoneV3Deployment - -if __name__ == '__main__': - deployment = KeystoneV3Deployment(series='bionic', - openstack='cloud:bionic-rocky', - source='cloud:bionic-updates/rocky') - deployment.run_tests() diff --git a/tests/dev-basic-cosmic-rocky b/tests/dev-basic-cosmic-rocky deleted file mode 100755 index c14ca6c46e8f3dc96f4ec6d937a1a074ba7df89d..0000000000000000000000000000000000000000 --- a/tests/dev-basic-cosmic-rocky +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic keystone deployment on cosmic-rocky.""" - -from basic_deployment import KeystoneV3Deployment - -if __name__ == '__main__': - deployment = KeystoneV3Deployment(series='cosmic') - deployment.run_tests() diff --git a/tests/gate-basic-bionic-queens b/tests/gate-basic-bionic-queens deleted file mode 100755 index df46adc8c5f979d82f7a81f19d60d04e34043eb9..0000000000000000000000000000000000000000 --- a/tests/gate-basic-bionic-queens +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic keystone deployment on bionic-queens.""" - -from basic_deployment import KeystoneV3Deployment - -if __name__ == '__main__': - deployment = KeystoneV3Deployment(series='bionic') - deployment.run_tests() diff --git a/tests/gate-basic-trusty-icehouse b/tests/gate-basic-trusty-icehouse deleted file mode 100755 index 8108a554b6d4c2f8ac406c5f9f300661058edf03..0000000000000000000000000000000000000000 --- a/tests/gate-basic-trusty-icehouse +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic keystone deployment on trusty-icehouse.""" - -from basic_deployment import KeystoneBasicDeployment - -if __name__ == '__main__': - deployment = KeystoneBasicDeployment(series='trusty') - deployment.run_tests() diff --git a/tests/gate-basic-trusty-mitaka b/tests/gate-basic-trusty-mitaka deleted file mode 100755 index 64e4418bcad5ad9febd14447f1496a1358d6aa91..0000000000000000000000000000000000000000 --- a/tests/gate-basic-trusty-mitaka +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic keystone deployment on trusty-mitaka.""" - -from basic_deployment import KeystoneBasicDeployment - -if __name__ == '__main__': - deployment = KeystoneBasicDeployment(series='trusty', - openstack='cloud:trusty-mitaka', - source='cloud:trusty-updates/mitaka') - deployment.run_tests() diff --git a/tests/gate-basic-xenial-ocata b/tests/gate-basic-xenial-ocata deleted file mode 100755 index 0de993a1743aa06c93438b1714af02fb04ca3ea7..0000000000000000000000000000000000000000 --- a/tests/gate-basic-xenial-ocata +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic keystone deployment on xenial-ocata.""" - -from basic_deployment import KeystoneBasicDeployment - -if __name__ == '__main__': - deployment = KeystoneBasicDeployment(series='xenial', - openstack='cloud:xenial-ocata', - source='cloud:xenial-updates/ocata') - deployment.run_tests() diff --git a/tests/gate-basic-xenial-pike b/tests/gate-basic-xenial-pike deleted file mode 100755 index a63193d0d1d490c60d12cd48ea18337bdc124754..0000000000000000000000000000000000000000 --- a/tests/gate-basic-xenial-pike +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic keystone deployment on xenial-pike.""" - -from basic_deployment import KeystoneBasicDeployment - -if __name__ == '__main__': - deployment = KeystoneBasicDeployment(series='xenial', - openstack='cloud:xenial-pike', - source='cloud:xenial-updates/pike') - deployment.run_tests() diff --git a/tests/gate-basic-xenial-queens b/tests/gate-basic-xenial-queens deleted file mode 100755 index 1aa6bbc73d57215120cd8736e22ecc78c13bbdd5..0000000000000000000000000000000000000000 --- a/tests/gate-basic-xenial-queens +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Amulet tests on a basic keystone deployment on xenial-queens.""" - -from basic_deployment import KeystoneV3Deployment - -if __name__ == '__main__': - deployment = KeystoneV3Deployment(series='xenial', - openstack='cloud:xenial-queens', - source='cloud:xenial-updates/queens') - deployment.run_tests() diff --git a/tests/tests.yaml b/tests/tests.yaml index a03e7badbcc04ab2b3e06162f5d8f2c1dabefdfa..6ffefdd8f2bc5192f16e470e55678c35c5fc9230 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -1,18 +1,28 @@ -# Bootstrap the model if necessary. -bootstrap: True -# Re-use bootstrap node. -reset: True -# Use tox/requirements to drive the venv instead of bundletester's venv feature. -virtualenv: False -# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. -makefile: [] -# Do not specify juju PPA sources. Juju is presumed to be pre-installed -# and configured in all test runner environments. -#sources: -# Do not specify or rely on system packages. -#packages: -# Do not specify python packages here. Use test-requirements.txt -# and tox instead. ie. The venv is constructed before bundletester -# is invoked. -#python-packages: -reset_timeout: 600 +charm_name: keystone +smoke_bundles: +- bionic-train +gate_bundles: +- bionic-train +- disco-stein +- bionic-stein +- bionic-rocky +- bionic-queens +- xenial-queens +- xenial-pike +- xenial-ocata +- xenial-mitaka +- trusty-mitaka +comment: | + the glance configure job validates operation of identity-service relation. + The policyd test is generic and validates the policy.d overrides work +configure: +- zaza.openstack.charm_tests.glance.setup.add_lts_image +- zaza.openstack.charm_tests.keystone.setup.add_demo_user +tests: +- zaza.openstack.charm_tests.keystone.tests.AuthenticationAuthorizationTest +- zaza.openstack.charm_tests.keystone.tests.CharmOperationTest +- zaza.openstack.charm_tests.keystone.tests.SecurityTests +- zaza.openstack.charm_tests.policyd.tests.KeystoneTests +tests_options: + policyd: + service: keystone diff --git a/tox.ini b/tox.ini index 6d44f4b9affa6fd79582a8d52d93f057011eefcc..332cff5073185d13df19a8c16b473863979c1d87 100644 --- a/tox.ini +++ b/tox.ini @@ -1,9 +1,19 @@ -# Classic charm: ./tox.ini +# Classic charm (with zaza): ./tox.ini # This file is managed centrally by release-tools and should not be modified -# within individual charm repos. +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# TODO: Distill the func test requirements from the lint/unit test +# requirements. They are intertwined. Also, Zaza itself should specify +# all of its own requirements and if it doesn't, fix it there. [tox] -envlist = pep8,py27 +envlist = pep8,py3 skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} @@ -13,72 +23,96 @@ setenv = VIRTUAL_ENV={envdir} install_command = pip install --allow-unverified python-apt {opts} {packages} commands = ostestr {posargs} +install_command = + pip install {opts} {packages} +commands = stestr run --slowest {posargs} whitelist_externals = juju -passenv = HOME TERM AMULET_* CS_API_* +passenv = HOME TERM CS_* OS_* TEST_* +deps = -r{toxinidir}/test-requirements.txt -[testenv:py27] -basepython = python2.7 +[testenv:py35] +basepython = python3.5 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -[testenv:py35] -basepython = python3.5 +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py3] +basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt [testenv:pep8] -basepython = python2.7 +basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests actions lib +commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + [testenv:venv] +basepython = python3 commands = {posargs} -[testenv:func27-noop] -# DRY RUN - For Debug -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-noop] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy + functest-run-suite --help -[testenv:func27] -# Charm Functional Test -# Run all gate tests which are +x (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy + functest-run-suite --keep-model -[testenv:func27-smoke] -# Charm Functional Test -# Run a specific test as an Amulet smoke test (expected to always pass) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-smoke] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy + functest-run-suite --keep-model --smoke -[testenv:func27-dfs] -# Charm Functional Test -# Run all deploy-from-source tests which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-dev] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy + functest-run-suite --keep-model --dev -[testenv:func27-dev] -# Charm Functional Test -# Run all development test targets which are +x (may not always pass!) -basepython = python2.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +[testenv:func-target] +basepython = python3 commands = - bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy + functest-run-suite --keep-model --bundle {posargs} [flake8] ignore = E402,E226 diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py index 184cf3d885bf860ccfc74146e412bf88b31b2a75..c7dc9d98af2551118bdc64eb1d39665e6be2e8f8 100644 --- a/unit_tests/__init__.py +++ b/unit_tests/__init__.py @@ -12,7 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import sys -sys.path.append('actions/') -sys.path.append('hooks/') +_path = os.path.dirname(os.path.realpath(__file__)) +_actions = os.path.abspath(os.path.join(_path, '../actions')) +_hooks = os.path.abspath(os.path.join(_path, '../hooks')) +_charmhelpers = os.path.abspath(os.path.join(_path, '../charmhelpers')) +_unit_tests = os.path.abspath(os.path.join(_path, '../unit_tests')) +_scripts = os.path.abspath(os.path.join(_path, '../scripts')) + + +def _add_path(path): + if path not in sys.path: + sys.path.insert(1, path) + +_add_path(_actions) +_add_path(_hooks) +_add_path(_charmhelpers) +_add_path(_unit_tests) +_add_path(_scripts) diff --git a/unit_tests/test_actions.py b/unit_tests/test_actions.py index a8083899065cf1b2fcdd2e5bb061ec804e0e006f..61e9ce77a55338b8cb0fef0a826b576dbb6fd997 100644 --- a/unit_tests/test_actions.py +++ b/unit_tests/test_actions.py @@ -17,9 +17,9 @@ from mock import patch from test_utils import CharmTestCase -with patch('actions.hooks.charmhelpers.contrib.openstack.utils.' +with patch('charmhelpers.contrib.openstack.utils.' 'snap_install_requested') as snap_install_requested, \ - patch('actions.hooks.keystone_utils.register_configs') as configs: + patch('keystone_utils.register_configs') as configs: snap_install_requested.return_value = False configs.return_value = 'test-config' import actions.actions diff --git a/unit_tests/test_actions_openstack_upgrade.py b/unit_tests/test_actions_openstack_upgrade.py index 2e6bee02e9baef0f87622247bea3cce10315fd52..ea1ddac44362c14b5d5765fa3bace08935f8b05a 100644 --- a/unit_tests/test_actions_openstack_upgrade.py +++ b/unit_tests/test_actions_openstack_upgrade.py @@ -20,7 +20,7 @@ os.environ['JUJU_UNIT_NAME'] = 'keystone' with patch('charmhelpers.contrib.openstack.utils' '.snap_install_requested') as snap_install_requested: snap_install_requested.return_value = False - import openstack_upgrade + import openstack_upgrade as openstack_upgrade from test_utils import ( CharmTestCase @@ -58,7 +58,7 @@ class TestKeystoneUpgradeActions(CharmTestCase): self.assertTrue(self.do_openstack_upgrade.called) self.os.execl.assert_called_with('./hooks/config-changed-postupgrade', - '') + 'config-changed-postupgrade') @patch.object(openstack_upgrade, 'register_configs') @patch('charmhelpers.contrib.openstack.utils.config') diff --git a/unit_tests/test_keystone_contexts.py b/unit_tests/test_keystone_contexts.py index e245b11b1ff1826e4316f6ea4e1c760e44fd95a4..c665464b309623dfa1ad5b85cceae8383d4a30ef 100644 --- a/unit_tests/test_keystone_contexts.py +++ b/unit_tests/test_keystone_contexts.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections +import importlib import os from mock import patch, MagicMock @@ -20,6 +22,7 @@ with patch('charmhelpers.contrib.openstack.' snap_install_requested.return_value = False import keystone_utils # noqa import keystone_context as context + importlib.reload(keystone_utils) from test_utils import ( CharmTestCase @@ -29,6 +32,7 @@ TO_PATCH = [ 'config', 'determine_apache_port', 'determine_api_port', + 'os_release', ] @@ -36,6 +40,7 @@ class TestKeystoneContexts(CharmTestCase): def setUp(self): super(TestKeystoneContexts, self).setUp(context, TO_PATCH) + self.config.side_effect = self.test_config.get def test_is_cert_provided_in_config(self): config = {'ssl_cert': 'somecert', 'ssl_key': 'greatkey'} @@ -142,6 +147,8 @@ class TestKeystoneContexts(CharmTestCase): self.assertTrue(mock_https.called) mock_unit_get.assert_called_with('private-address') + @patch('charmhelpers.contrib.openstack.context.is_ipv6_disabled') + @patch('charmhelpers.contrib.openstack.context.get_relation_ip') @patch('charmhelpers.contrib.openstack.context.mkdir') @patch('keystone_utils.api_port') @patch('charmhelpers.contrib.openstack.context.get_netmask_for_address') @@ -153,7 +160,7 @@ class TestKeystoneContexts(CharmTestCase): @patch('charmhelpers.contrib.openstack.context.relation_get') @patch('charmhelpers.contrib.openstack.context.log') @patch('charmhelpers.contrib.openstack.context.kv') - @patch('__builtin__.open') + @patch('builtins.open') def test_haproxy_context_service_enabled( self, mock_open, mock_kv, mock_log, mock_relation_get, mock_related_units, mock_unit_get, mock_relation_ids, mock_config, @@ -171,31 +178,39 @@ class TestKeystoneContexts(CharmTestCase): self.determine_apache_port.return_value = '34' mock_api_port.return_value = '12' mock_kv().get.return_value = 'abcdefghijklmnopqrstuvwxyz123456' - + is_ipv6_disabled.return_value = False ctxt = context.HAProxyContext() self.maxDiff = None - self.assertEqual( - ctxt(), - {'listen_ports': {'admin_port': '12', - 'public_port': '12'}, - 'local_host': '127.0.0.1', - 'haproxy_host': '0.0.0.0', - 'stat_port': '8888', - 'stat_password': 'abcdefghijklmnopqrstuvwxyz123456', - 'service_ports': {'admin-port': ['12', '34'], - 'public-port': ['12', '34']}, - 'default_backend': '1.2.3.4', - 'ipv6_enabled': True, - 'frontends': {'1.2.3.4': { - 'network': '1.2.3.4/255.255.255.0', - 'backends': { - 'keystone': '1.2.3.4', - 'unit-0': '10.0.0.0' - } - }} - } - ) + _ctxt = ctxt() + test_ctxt = { + 'listen_ports': { + 'admin_port': '12', + 'public_port': '12' + }, + 'ipv6_enabled': True, + 'local_host': '127.0.0.1', + 'haproxy_host': '0.0.0.0', + 'stat_port': '8888', + 'stat_password': 'abcdefghijklmnopqrstuvwxyz123456', + 'service_ports': { + 'admin-port': ['12', '34'], + 'public-port': ['12', '34'] + }, + 'default_backend': '1.2.3.4', + 'frontends': { + '1.2.3.4': { + 'network': '1.2.3.4/255.255.255.0', + 'backends': collections.OrderedDict([ + ('keystone', '1.2.3.4'), + ('unit-0', '10.0.0.0') + ]), + } + } + } + self.assertEqual(sorted(list(_ctxt.keys())), + sorted(list(test_ctxt.keys()))) + self.assertEqual(_ctxt, test_ctxt) @patch.object(context, 'config') def test_keystone_logger_context(self, mock_config): @@ -206,12 +221,344 @@ class TestKeystoneContexts(CharmTestCase): 'log_file': '/var/log/keystone/keystone.log'}, ctxt()) - @patch.object(context, 'is_elected_leader') - def test_token_flush_context(self, mock_is_elected_leader): + @patch.object(context, 'is_leader') + @patch.object(context, 'fernet_enabled') + def test_token_flush_context( + self, mock_fernet_enabled, mock_is_leader): ctxt = context.TokenFlushContext() - mock_is_elected_leader.return_value = False + mock_fernet_enabled.return_value = False + mock_is_leader.return_value = False self.assertEqual({'token_flush': False}, ctxt()) - mock_is_elected_leader.return_value = True + mock_is_leader.return_value = True self.assertEqual({'token_flush': True}, ctxt()) + + mock_fernet_enabled.return_value = True + self.assertEqual({'token_flush': False}, ctxt()) + + @patch.object(context, 'charm_dir') + @patch.object(context, 'local_unit') + @patch.object(context, 'is_leader') + @patch.object(context, 'fernet_enabled') + def test_fernet_cron_context( + self, mock_fernet_enabled, mock_is_leader, mock_local_unit, + mock_charm_dir): + ctxt = context.FernetCronContext() + + mock_charm_dir.return_value = "my-dir" + mock_local_unit.return_value = "the-local-unit" + + expected = { + 'enabled': False, + 'unit_name': 'the-local-unit', + 'charm_dir': 'my-dir', + 'minute': '*/5', + } + + mock_fernet_enabled.return_value = False + mock_is_leader.return_value = False + self.assertEqual(expected, ctxt()) + + mock_is_leader.return_value = True + self.assertEqual(expected, ctxt()) + + mock_fernet_enabled.return_value = True + expected['enabled'] = True + self.assertEqual(expected, ctxt()) + + def test_fernet_enabled_no_config(self): + self.os_release.return_value = 'ocata' + self.test_config.set('token-provider', 'uuid') + result = context.fernet_enabled() + self.assertFalse(result) + + def test_fernet_enabled_yes_config(self): + self.os_release.return_value = 'ocata' + self.test_config.set('token-provider', 'fernet') + result = context.fernet_enabled() + self.assertTrue(result) + + def test_fernet_enabled_no_release_override_config(self): + self.os_release.return_value = 'mitaka' + self.test_config.set('token-provider', 'fernet') + result = context.fernet_enabled() + self.assertFalse(result) + + def test_fernet_enabled_yes_release(self): + self.os_release.return_value = 'rocky' + result = context.fernet_enabled() + self.assertTrue(result) + + def test_fernet_enabled_yes_release_override_config(self): + self.os_release.return_value = 'rocky' + self.test_config.set('token-provider', 'uuid') + result = context.fernet_enabled() + self.assertTrue(result) + + @patch.object(context, 'relation_ids') + @patch.object(context, 'related_units') + @patch.object(context, 'relation_get') + def test_keystone_fid_service_provider_rdata( + self, mock_relation_get, mock_related_units, + mock_relation_ids): + os.environ['JUJU_UNIT_NAME'] = 'keystone' + + def relation_ids_side_effect(rname): + return { + 'keystone-fid-service-provider': { + 'keystone-fid-service-provider:0', + 'keystone-fid-service-provider:1', + 'keystone-fid-service-provider:2' + } + }[rname] + + mock_relation_ids.side_effect = relation_ids_side_effect + + def related_units_side_effect(rid): + return { + 'keystone-fid-service-provider:0': ['sp-mellon/0'], + 'keystone-fid-service-provider:1': ['sp-shib/0'], + 'keystone-fid-service-provider:2': ['sp-oidc/0'], + }[rid] + mock_related_units.side_effect = related_units_side_effect + + def relation_get_side_effect(unit, rid): + # one unit only as the relation is container-scoped + return { + "keystone-fid-service-provider:0": { + "sp-mellon/0": { + "ingress-address": '10.0.0.10', + "protocol-name": '"saml2"', + "remote-id-attribute": '"MELLON_IDP"', + }, + }, + "keystone-fid-service-provider:1": { + "sp-shib/0": { + "ingress-address": '10.0.0.10', + "protocol-name": '"mapped"', + "remote-id-attribute": '"Shib-Identity-Provider"', + }, + }, + "keystone-fid-service-provider:2": { + "sp-oidc/0": { + "ingress-address": '10.0.0.10', + "protocol-name": '"oidc"', + "remote-id-attribute": '"HTTP_OIDC_ISS"', + }, + }, + }[rid][unit] + + mock_relation_get.side_effect = relation_get_side_effect + ctxt = context.KeystoneFIDServiceProviderContext() + + self.maxDiff = None + self.assertCountEqual( + ctxt(), + { + "fid_sps": [ + { + "protocol-name": "saml2", + "remote-id-attribute": "MELLON_IDP", + }, + { + "protocol-name": "mapped", + "remote-id-attribute": "Shib-Identity-Provider", + }, + { + "protocol-name": "oidc", + "remote-id-attribute": "HTTP_OIDC_ISS", + }, + ] + } + ) + + @patch.object(context, 'relation_ids') + def test_keystone_fid_service_provider_empty( + self, mock_relation_ids): + os.environ['JUJU_UNIT_NAME'] = 'keystone' + + def relation_ids_side_effect(rname): + return { + 'keystone-fid-service-provider': {} + }[rname] + + mock_relation_ids.side_effect = relation_ids_side_effect + ctxt = context.KeystoneFIDServiceProviderContext() + + self.maxDiff = None + self.assertCountEqual(ctxt(), {}) + + @patch.object(context, 'relation_ids') + @patch.object(context, 'related_units') + @patch.object(context, 'relation_get') + def test_websso_trusted_dashboard_urls_generated( + self, mock_relation_get, mock_related_units, + mock_relation_ids): + os.environ['JUJU_UNIT_NAME'] = 'keystone' + + def relation_ids_side_effect(rname): + return { + 'websso-trusted-dashboard': { + 'websso-trusted-dashboard:0', + 'websso-trusted-dashboard:1', + 'websso-trusted-dashboard:2' + } + }[rname] + + mock_relation_ids.side_effect = relation_ids_side_effect + + def related_units_side_effect(rid): + return { + 'websso-trusted-dashboard:0': ['dashboard-blue/0', + 'dashboard-blue/1'], + 'websso-trusted-dashboard:1': ['dashboard-red/0', + 'dashboard-red/1'], + 'websso-trusted-dashboard:2': ['dashboard-green/0', + 'dashboard-green/1'] + }[rid] + mock_related_units.side_effect = related_units_side_effect + + def relation_get_side_effect(unit, rid): + return { + "websso-trusted-dashboard:0": { + "dashboard-blue/0": { # dns-ha + "ingress-address": '10.0.0.10', + "scheme": "https://", + "hostname": "horizon.intranet.test", + "path": "/auth/websso/", + }, + "dashboard-blue/1": { # dns-ha + "ingress-address": '10.0.0.11', + "scheme": "https://", + "hostname": "horizon.intranet.test", + "path": "/auth/websso/", + }, + }, + "websso-trusted-dashboard:1": { + "dashboard-red/0": { # vip + "ingress-address": '10.0.0.12', + "scheme": "https://", + "hostname": "10.0.0.100", + "path": "/auth/websso/", + }, + "dashboard-red/1": { # vip + "ingress-address": '10.0.0.13', + "scheme": "https://", + "hostname": "10.0.0.100", + "path": "/auth/websso/", + }, + }, + "websso-trusted-dashboard:2": { + "dashboard-green/0": { # vip-less, dns-ha-less + "ingress-address": '10.0.0.14', + "scheme": "http://", + "hostname": "10.0.0.14", + "path": "/auth/websso/", + }, + "dashboard-green/1": { + "ingress-address": '10.0.0.15', + "scheme": "http://", + "hostname": "10.0.0.15", + "path": "/auth/websso/", + }, + }, + }[rid][unit] + + mock_relation_get.side_effect = relation_get_side_effect + ctxt = context.WebSSOTrustedDashboardContext() + + self.maxDiff = None + self.assertEqual( + ctxt(), + { + 'trusted_dashboards': set([ + 'https://horizon.intranet.test/auth/websso/', + 'https://10.0.0.100/auth/websso/', + 'http://10.0.0.14/auth/websso/', + 'http://10.0.0.15/auth/websso/', + ]) + } + ) + + @patch.object(context, 'relation_ids') + def test_websso_trusted_dashboard_empty( + self, mock_relation_ids): + os.environ['JUJU_UNIT_NAME'] = 'keystone' + + def relation_ids_side_effect(rname): + return { + 'websso-trusted-dashboard': {} + }[rname] + + mock_relation_ids.side_effect = relation_ids_side_effect + ctxt = context.WebSSOTrustedDashboardContext() + + self.maxDiff = None + self.assertCountEqual(ctxt(), {}) + + @patch.object(context, 'relation_ids') + def test_middleware_no_related_units(self, mock_relation_ids): + os.environ['JUJU_UNIT_NAME'] = 'keystone' + + def relation_ids_side_effect(rname): + return { + 'keystone-middleware': {} + }[rname] + + mock_relation_ids.side_effect = relation_ids_side_effect + ctxt = context.MiddlewareContext() + + self.assertEqual(ctxt(), {'middlewares': ''}) + + @patch('charmhelpers.contrib.openstack.context.relation_ids') + @patch('charmhelpers.contrib.openstack.context.related_units') + @patch('charmhelpers.contrib.openstack.context.relation_get') + def test_middleware_related_units( + self, mock_relation_get, mock_related_units, mock_relation_ids): + mock_relation_ids.return_value = ['keystone-middleware:0'] + mock_related_units.return_value = ['keystone-ico/0'] + settings = \ + { + 'middleware_name': 'keystone-ico', + 'subordinate_configuration': + '{"keystone":' + '{"/etc/keystone/keystone.conf":' + '{"sections":' + '{"authentication":' + '[["simple_token_header", "SimpleToken"],' + '["simple_token_secret", "foobar"]],' + '"auth":' + '[["methods", "external,password,token,oauth1"],' + '["external", "keystone.auth.plugins.external.Domain"],' + '["password", "keystone.auth.plugins.password.Password"],' + '["token", "keystone.auth.plugins.token.Token"],' + '["oauth1", "keystone.auth.plugins.oauth1.OAuth"]]' + '}}}}' + + } + + def fake_rel_get(attribute=None, unit=None, rid=None): + return settings[attribute] + + mock_relation_get.side_effect = fake_rel_get + ctxt = context.context.SubordinateConfigContext( + interface=['keystone-middleware'], + service='keystone', + config_file='/etc/keystone/keystone.conf') + + exp = {'sections': { + u'auth': [[u'methods', + u'external,password,token,oauth1'], + [u'external', + u'keystone.auth.plugins.external.Domain'], + [u'password', + u'keystone.auth.plugins.password.Password'], + [u'token', + u'keystone.auth.plugins.token.Token'], + [u'oauth1', + u'keystone.auth.plugins.oauth1.OAuth']], + u'authentication': [[u'simple_token_header', u'SimpleToken'], + [u'simple_token_secret', u'foobar']]}} + + self.assertEqual(ctxt(), exp) diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index b7bd442b2ed48a003562481eb7ce5ea383f96719..b699ccb22f652ed2f4c444ba952fa6ae8600c138 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import importlib import os import uuid import yaml import sys -from mock import call, patch, MagicMock +from mock import call, patch, MagicMock, ANY from test_utils import CharmTestCase # python-apt is not installed as part of test-requirements but is imported by @@ -31,9 +32,17 @@ with patch('charmhelpers.core.hookenv.config') as config, \ snap_install_requested.return_value = False config.return_value = 'keystone' import keystone_utils as utils - -_reg = utils.register_configs -_map = utils.restart_map + importlib.reload(utils) + + with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec, \ + patch('keystone_utils.register_configs'), \ + patch('keystone_utils.restart_map'): + mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: + lambda *args, **kwargs: f(*args, **kwargs)) + with patch.object(utils, 'run_in_apache') as mock_run_in_apache: + mock_run_in_apache.return_value = True + import keystone_hooks as hooks + importlib.reload(hooks) utils.register_configs = MagicMock() utils.restart_map = MagicMock() @@ -76,11 +85,9 @@ TO_PATCH = [ # charmhelpers.contrib.openstack.ip 'resolve_address', # charmhelpers.contrib.openstack.ha.utils - 'update_dns_ha_resource_params', 'expect_ha', # charmhelpers.contrib.hahelpers.cluster_utils 'is_elected_leader', - 'get_hacluster_config', 'is_clustered', 'enable_memcache', # keystone_utils @@ -99,15 +106,26 @@ TO_PATCH = [ 'keystone_service', 'create_or_show_domain', 'get_api_version', + 'fernet_enabled', + 'key_leader_set', + 'key_setup', + 'key_write', + 'remove_old_packages', + 'services', # other 'check_call', 'execd_preinstall', +<<<<<<< HEAD 'mkdir', 'os', # ip 'get_iface_for_address', 'get_netmask_for_address', 'git_install', +======= + 'generate_ha_relation_data', + # ip +>>>>>>> stable/19.10 'is_service_present', 'delete_service_entry', 'os_release', @@ -127,12 +145,19 @@ class KeystoneRelationTests(CharmTestCase): self.ssh_user = 'juju_keystone' self.snap_install_requested.return_value = False + @patch.object(hooks, 'maybe_do_policyd_overrides') @patch.object(utils, 'os_release') @patch.object(utils, 'git_install_requested') @patch.object(unison, 'ensure_user') @patch.object(hooks, 'service_stop', lambda *args: None) @patch.object(hooks, 'service_start', lambda *args: None) +<<<<<<< HEAD def test_install_hook(self, ensure_user, git_requested, os_release): +======= + def test_install_hook(self, + os_release, + mock_maybe_do_policyd_overrides): +>>>>>>> stable/19.10 os_release.return_value = 'havana' git_requested.return_value = False self.run_in_apache.return_value = False @@ -146,17 +171,30 @@ class KeystoneRelationTests(CharmTestCase): self.apt_install.assert_called_with( ['apache2', 'haproxy', 'keystone', 'openssl', 'pwgen', 'python-keystoneclient', 'python-mysqldb', 'python-psycopg2', +<<<<<<< HEAD 'python-requests', 'python-six', 'unison', 'uuid'], fatal=True) self.git_install.assert_called_with(None) +======= + 'python3-six', 'uuid'], fatal=True) +>>>>>>> stable/19.10 self.disable_unused_apache_sites.assert_not_called() + mock_maybe_do_policyd_overrides.assert_called_once_with( + ANY, "keystone") + @patch.object(hooks, 'maybe_do_policyd_overrides') @patch.object(utils, 'os_release') @patch.object(utils, 'git_install_requested') @patch.object(unison, 'ensure_user') @patch.object(hooks, 'service_stop', lambda *args: None) @patch.object(hooks, 'service_start', lambda *args: None) +<<<<<<< HEAD def test_install_hook_apache2(self, ensure_user, git_requested, os_release): +======= + def test_install_hook_apache2(self, + os_release, + mock_maybe_do_policyd_overrides): +>>>>>>> stable/19.10 os_release.return_value = 'havana' git_requested.return_value = False self.run_in_apache.return_value = True @@ -171,9 +209,15 @@ class KeystoneRelationTests(CharmTestCase): self.apt_install.assert_called_with( ['apache2', 'haproxy', 'keystone', 'openssl', 'pwgen', 'python-keystoneclient', 'python-mysqldb', 'python-psycopg2', +<<<<<<< HEAD 'python-requests', 'python-six', 'unison', 'uuid'], fatal=True) self.git_install.assert_called_with(None) +======= + 'python3-six', 'uuid'], fatal=True) +>>>>>>> stable/19.10 self.disable_unused_apache_sites.assert_called_with() + mock_maybe_do_policyd_overrides.assert_called_once_with( + ANY, "keystone") @patch.object(utils, 'os_release') @patch.object(utils, 'git_install_requested') @@ -325,6 +369,7 @@ class KeystoneRelationTests(CharmTestCase): configs.write.call_args_list) self.assertTrue(leader_init.called) +<<<<<<< HEAD @patch.object(hooks, 'leader_init_db_if_ready') @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'CONFIGS') @@ -338,6 +383,10 @@ class KeystoneRelationTests(CharmTestCase): configs.write.call_args_list) self.assertTrue(leader_init.called) +======= + @patch.object(hooks, 'maybe_do_policyd_overrides_on_config_changed') + @patch.object(hooks, 'notify_middleware_with_release_version') +>>>>>>> stable/19.10 @patch.object(hooks, 'update_all_domain_backends') @patch.object(hooks, 'update_all_identity_relation_units') @patch.object(hooks, 'run_in_apache') @@ -360,6 +409,7 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') @patch.object(hooks, 'configure_https') +<<<<<<< HEAD def test_config_changed_no_upgrade_leader(self, configure_https, identity_changed, configs, get_homedir, @@ -380,6 +430,23 @@ class KeystoneRelationTests(CharmTestCase): mock_run_in_apache, update, mock_update_domains): +======= + def test_config_changed_no_upgrade_leader( + self, + configure_https, + identity_changed, + configs, + mock_cluster_joined, + admin_relation_changed, + mock_log, + mock_is_db_initialised, + mock_run_in_apache, + update, + mock_update_domains, + mock_notify_middleware, + mock_maybe_do_policyd_overrides_on_config_changed + ): +>>>>>>> stable/19.10 def fake_relation_ids(relation): rids = {'cluster': ['cluster:1'], 'identity-service': ['identity-service:0']} @@ -413,7 +480,13 @@ class KeystoneRelationTests(CharmTestCase): self.assertTrue(mock_cluster_joined.called) self.assertTrue(update.called) self.assertTrue(mock_update_domains.called) + self.assertTrue(mock_notify_middleware.called_once) + (mock_maybe_do_policyd_overrides_on_config_changed + .assert_called_once_with(ANY, "keystone")) + + @patch.object(hooks, 'maybe_do_policyd_overrides_on_config_changed') + @patch.object(hooks, 'is_db_initialised') @patch.object(hooks, 'update_all_domain_backends') @patch.object(hooks, 'update_all_identity_relation_units') @patch.object(hooks, 'run_in_apache') @@ -433,6 +506,7 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') @patch.object(hooks, 'configure_https') +<<<<<<< HEAD def test_config_changed_no_upgrade_not_leader(self, configure_https, identity_changed, configs, get_homedir, @@ -449,6 +523,20 @@ class KeystoneRelationTests(CharmTestCase): mock_log, git_requested, mock_run_in_apache, update, mock_update_domains): +======= + def test_config_changed_no_upgrade_not_leader( + self, + configure_https, + identity_changed, + configs, + mock_cluster_joined, + mock_log, + mock_run_in_apache, update, + mock_update_domains, + mock_is_db_initialised, + mock_maybe_do_policyd_overrides_on_config_changed + ): +>>>>>>> stable/19.10 def fake_relation_ids(relation): rids = {} @@ -463,8 +551,12 @@ class KeystoneRelationTests(CharmTestCase): mock_is_ssl_cert_master.return_value = True mock_peer_units.return_value = [] self.openstack_upgrade_available.return_value = False +<<<<<<< HEAD self.is_elected_leader.return_value = False mock_ensure_ssl_cert_master.return_value = False +======= + mock_is_db_initialised.return_value = True +>>>>>>> stable/19.10 hooks.config_changed() ensure_user.assert_called_with(user=self.ssh_user, group='keystone') @@ -479,6 +571,10 @@ class KeystoneRelationTests(CharmTestCase): self.assertTrue(update.called) self.assertTrue(mock_update_domains.called) + (mock_maybe_do_policyd_overrides_on_config_changed + .assert_called_once_with(ANY, "keystone")) + + @patch.object(hooks, 'maybe_do_policyd_overrides_on_config_changed') @patch.object(hooks, 'update_all_domain_backends') @patch.object(hooks, 'update_all_identity_relation_units') @patch.object(hooks, 'run_in_apache') @@ -501,6 +597,7 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') @patch.object(hooks, 'configure_https') +<<<<<<< HEAD def test_config_changed_with_openstack_upgrade(self, configure_https, identity_changed, configs, get_homedir, @@ -520,6 +617,22 @@ class KeystoneRelationTests(CharmTestCase): mock_run_in_apache, update, mock_update_domains): +======= + def test_config_changed_with_openstack_upgrade( + self, + configure_https, + identity_changed, + configs, + cluster_joined, + admin_relation_changed, + mock_log, + mock_is_db_initialised, + mock_run_in_apache, + update, + mock_update_domains, + mock_maybe_do_policyd_overrides_on_config_changed + ): +>>>>>>> stable/19.10 def fake_relation_ids(relation): rids = {'identity-service': ['identity-service:0']} return rids.get(relation, []) @@ -553,8 +666,17 @@ class KeystoneRelationTests(CharmTestCase): self.assertTrue(update.called) self.assertTrue(mock_update_domains.called) +<<<<<<< HEAD @patch.object(hooks, 'update_all_domain_backends') @patch.object(hooks, 'update_all_identity_relation_units') +======= + (mock_maybe_do_policyd_overrides_on_config_changed + .assert_called_once_with(ANY, "keystone")) + + @patch.object(hooks, 'maybe_do_policyd_overrides_on_config_changed') + @patch.object(hooks, 'is_expected_scale') + @patch.object(hooks, 'os_release') +>>>>>>> stable/19.10 @patch.object(hooks, 'run_in_apache') @patch.object(hooks, 'initialise_pki') @patch.object(hooks, 'git_install_requested') @@ -573,6 +695,7 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') @patch.object(hooks, 'configure_https') +<<<<<<< HEAD def test_config_changed_git_updated(self, configure_https, identity_changed, configs, get_homedir, ensure_user, @@ -649,6 +772,21 @@ class KeystoneRelationTests(CharmTestCase): mock_run_in_apache.return_value = False ensure_ssl_cert.return_value = False peer_units.return_value = [] +======= + def test_config_changed_with_openstack_upgrade_action( + self, + config_https, + mock_db_init, + mock_run_in_apache, + os_release, + is_expected_scale, + mock_maybe_do_policyd_overrides_on_config_changed + ): + os_release.return_value = 'ocata' + self.enable_memcache.return_value = False + mock_run_in_apache.return_value = False + is_expected_scale.return_value = True +>>>>>>> stable/19.10 git_requested.return_value = False self.openstack_upgrade_available.return_value = True @@ -658,6 +796,9 @@ class KeystoneRelationTests(CharmTestCase): self.assertFalse(self.do_openstack_upgrade_reexec.called) + (mock_maybe_do_policyd_overrides_on_config_changed + .assert_called_once_with(ANY, "keystone")) + @patch.object(hooks, 'is_db_initialised') @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') @@ -784,8 +925,12 @@ class KeystoneRelationTests(CharmTestCase): mock_config.return_value = None hooks.cluster_changed() +<<<<<<< HEAD whitelist = ['_passwd', 'identity-service:', 'db-initialised', 'ssl-cert-available-updates', 'ssl-cert-master'] +======= + whitelist = ['_passwd', 'identity-service:'] +>>>>>>> stable/19.10 self.peer_echo.assert_called_with(force=True, includes=whitelist) ssh_authorized_peers.assert_called_with( user=self.ssh_user, group='juju_keystone', @@ -802,60 +947,24 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'update_all_identity_relation_units') @patch.object(hooks.CONFIGS, 'write') def test_leader_settings_changed(self, mock_write, update): + self.os_release.return_value = 'mitaka' self.relation_ids.return_value = ['identity:1'] self.related_units.return_value = ['keystone/1'] hooks.leader_settings_changed() - mock_write.assert_has_calls([call(utils.TOKEN_FLUSH_CRON_FILE)]) + mock_write.assert_has_calls( + [ + call(utils.TOKEN_FLUSH_CRON_FILE), + call(utils.POLICY_JSON), + ]) self.assertTrue(update.called) def test_ha_joined(self): - self.get_hacluster_config.return_value = { - 'vip': '10.10.10.10', - 'ha-bindiface': 'em0', - 'ha-mcastport': '8080' - } - self.get_iface_for_address.return_value = 'em1' - self.get_netmask_for_address.return_value = '255.255.255.0' - hooks.ha_joined() - args = { - 'relation_id': None, - 'corosync_bindiface': 'em0', - 'corosync_mcastport': '8080', - 'init_services': {'res_ks_haproxy': 'haproxy'}, - 'resources': {'res_ks_em1_vip': 'ocf:heartbeat:IPaddr2', - 'res_ks_haproxy': 'lsb:haproxy'}, - 'resource_params': { - 'res_ks_em1_vip': 'params ip="10.10.10.10"' - ' cidr_netmask="255.255.255.0" nic="em1"', - 'res_ks_haproxy': 'op monitor interval="5s"'}, - 'clones': {'cl_ks_haproxy': 'res_ks_haproxy'} - } - self.relation_set.assert_called_with(**args) - - def test_ha_joined_duplicate_vip_key(self): - self.get_hacluster_config.return_value = { - 'vip': '10.10.10.10 10.10.10.10', - 'ha-bindiface': 'em0', - 'ha-mcastport': '8080' - } - self.get_iface_for_address.return_value = 'em1' - self.get_netmask_for_address.return_value = '255.255.255.0' - hooks.ha_joined() - args = { - 'relation_id': None, - 'corosync_bindiface': 'em0', - 'corosync_mcastport': '8080', - 'init_services': {'res_ks_haproxy': 'haproxy'}, - 'resources': {'res_ks_em1_vip': 'ocf:heartbeat:IPaddr2', - 'res_ks_haproxy': 'lsb:haproxy'}, - 'resource_params': { - 'res_ks_em1_vip': 'params ip="10.10.10.10"' - ' cidr_netmask="255.255.255.0" nic="em1"', - 'res_ks_haproxy': 'op monitor interval="5s"'}, - 'clones': {'cl_ks_haproxy': 'res_ks_haproxy'} - } - self.relation_set.assert_called_with(**args) + self.generate_ha_relation_data.return_value = {'rel_data': 'data'} + hooks.ha_joined(relation_id='rid:23') + self.relation_set.assert_called_once_with( + relation_id='rid:23', rel_data='data') +<<<<<<< HEAD def test_ha_joined_dual_stack_vips(self): self.get_hacluster_config.return_value = { 'vip': '10.10.10.10 2001:db8::abc', @@ -969,15 +1078,21 @@ class KeystoneRelationTests(CharmTestCase): self.assertTrue(self.update_dns_ha_resource_params.called) self.relation_set.assert_called_with(**args) +======= +>>>>>>> stable/19.10 @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.synchronize_ca') @patch.object(hooks, 'CONFIGS') def test_ha_relation_changed_not_clustered_not_leader(self, configs, +<<<<<<< HEAD mock_synchronize_ca, mock_is_master, mock_log): mock_is_master.return_value = False +======= + mock_log): +>>>>>>> stable/19.10 self.relation_get.return_value = False self.is_elected_leader.return_value = False @@ -985,8 +1100,14 @@ class KeystoneRelationTests(CharmTestCase): self.assertTrue(configs.write_all.called) self.assertFalse(mock_synchronize_ca.called) +<<<<<<< HEAD @patch.object(hooks, 'is_ssl_cert_master') @patch.object(hooks, 'update_all_identity_relation_units_force_sync') +======= + @patch.object(hooks, 'update_all_fid_backends') + @patch.object(hooks, 'update_all_domain_backends') + @patch.object(hooks, 'update_all_identity_relation_units') +>>>>>>> stable/19.10 @patch.object(hooks, 'is_db_initialised') @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') @@ -997,7 +1118,13 @@ class KeystoneRelationTests(CharmTestCase): mock_ensure_ssl_cert_master, mock_log, mock_is_db_initialised, +<<<<<<< HEAD update, cert_master): +======= + update_ids, + update_domains, + update_fids): +>>>>>>> stable/19.10 mock_is_db_initialised.return_value = True self.is_db_ready.return_value = True mock_ensure_ssl_cert_master.return_value = False @@ -1009,7 +1136,9 @@ class KeystoneRelationTests(CharmTestCase): hooks.ha_changed() self.assertTrue(configs.write_all.called) - self.assertTrue(update.called) + update_ids.assert_called_once_with() + update_domains.assert_called_once_with() + update_fids.assert_called_once_with() @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') @@ -1042,6 +1171,7 @@ class KeystoneRelationTests(CharmTestCase): cmd = ['a2dissite', 'openstack_https_frontend'] self.check_call.assert_called_with(cmd) + @patch.object(hooks, 'maybe_do_policyd_overrides') @patch.object(hooks, 'update_all_identity_relation_units') @patch.object(utils, 'os_release') @patch.object(utils, 'git_install_requested') @@ -1049,6 +1179,7 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'is_db_initialised') @patch('keystone_utils.log') @patch('keystone_utils.relation_ids') +<<<<<<< HEAD @patch('keystone_utils.is_elected_leader') @patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.update_hash_from_path') @@ -1059,18 +1190,25 @@ class KeystoneRelationTests(CharmTestCase): mock_update_hash_from_path, mock_ensure_ssl_cert_master, mock_is_elected_leader, +======= + @patch.object(hooks, 'stop_manager_instance') + def test_upgrade_charm_leader(self, + mock_stop_manager_instance, +>>>>>>> stable/19.10 mock_relation_ids, mock_log, mock_is_db_initialised, mock_is_db_ready, git_requested, os_release, - update): + update, + mock_maybe_do_policyd_overrides): os_release.return_value = 'havana' mock_is_db_initialised.return_value = True mock_is_db_ready.return_value = True mock_is_elected_leader.return_value = False mock_relation_ids.return_value = [] +<<<<<<< HEAD mock_ensure_ssl_cert_master.return_value = True # Ensure always returns diff mock_update_hash_from_path.side_effect = \ @@ -1079,6 +1217,12 @@ class KeystoneRelationTests(CharmTestCase): self.is_elected_leader.return_value = True self.filter_installed_packages.return_value = [] git_requested.return_value = False +======= + self.remove_old_packages.return_value = True + self.services.return_value = ['apache2'] + + self.filter_installed_packages.return_value = ['something'] +>>>>>>> stable/19.10 hooks.upgrade_charm() self.assertTrue(self.apt_install.called) ssh_authorized_peers.assert_called_with( @@ -1086,6 +1230,45 @@ class KeystoneRelationTests(CharmTestCase): peer_interface='cluster', ensure_local_user=True) self.assertTrue(mock_synchronize_ca.called) self.assertTrue(update.called) + self.remove_old_packages.assert_called_once_with() + self.service_restart.assert_called_with('apache2') + mock_stop_manager_instance.assert_called_once_with() + mock_maybe_do_policyd_overrides.assert_called_once_with( + ANY, "keystone") + + @patch.object(hooks, 'maybe_do_policyd_overrides') + @patch.object(hooks, 'update_all_identity_relation_units') + @patch.object(utils, 'os_release') + @patch.object(hooks, 'is_db_ready') + @patch.object(hooks, 'is_db_initialised') + @patch('keystone_utils.log') + @patch('keystone_utils.relation_ids') + @patch.object(hooks, 'stop_manager_instance') + def test_upgrade_charm_leader_no_packages(self, + mock_stop_manager_instance, + mock_relation_ids, + mock_log, + mock_is_db_initialised, + mock_is_db_ready, + os_release, + update, + mock_maybe_do_policyd_overrides): + os_release.return_value = 'havana' + mock_is_db_initialised.return_value = True + mock_is_db_ready.return_value = True + mock_relation_ids.return_value = [] + self.remove_old_packages.return_value = True + self.services.return_value = ['apache2'] + + self.filter_installed_packages.return_value = [] + hooks.upgrade_charm() + self.assertFalse(self.apt_install.called) + self.assertTrue(update.called) + self.remove_old_packages.assert_called_once_with() + self.service_restart.assert_called_with('apache2') + mock_stop_manager_instance.assert_called_once_with() + mock_maybe_do_policyd_overrides.assert_called_once_with( + ANY, "keystone") @patch.object(hooks, 'update_all_identity_relation_units') @patch.object(hooks, 'is_db_initialised') @@ -1095,6 +1278,7 @@ class KeystoneRelationTests(CharmTestCase): self.is_elected_leader.return_value = True is_db_initialized.return_value = False self.is_db_ready.return_value = True + self.os_release.return_value = 'mitaka' hooks.leader_init_db_if_ready() self.is_db_ready.assert_called_with(use_current_context=False) self.migrate_database.assert_called_with() @@ -1137,6 +1321,7 @@ class KeystoneRelationTests(CharmTestCase): self.assertFalse(self.migrate_database.called) self.assertFalse(update.called) + @patch.object(hooks, 'is_expected_scale') @patch.object(hooks, 'configure_https') @patch.object(hooks, 'admin_relation_changed') @patch.object(hooks, 'identity_credentials_changed') @@ -1148,9 +1333,11 @@ class KeystoneRelationTests(CharmTestCase): identity_changed, identity_credentials_changed, admin_relation_changed, - configure_https): + configure_https, + is_expected_scale): """ Verify all identity relations are updated """ is_db_initialized.return_value = True + is_expected_scale.return_value = True self.relation_ids.return_value = ['identity-relation:0'] self.related_units.return_value = ['unit/0'] log_calls = [call('Firing identity_changed hook for all related ' @@ -1197,39 +1384,51 @@ class KeystoneRelationTests(CharmTestCase): level='INFO') self.assertFalse(self.relation_ids.called) + @patch.object(hooks, 'is_expected_scale') @patch.object(hooks, 'configure_https') @patch.object(hooks, 'is_db_initialised') @patch.object(hooks, 'CONFIGS') def test_update_all_leader(self, configs, is_db_initialized, - configure_https): + configure_https, is_expected_scale): """ Verify update identity relations when the leader""" self.is_elected_leader.return_value = True is_db_initialized.return_value = True + is_expected_scale.return_value = True hooks.update_all_identity_relation_units(check_db_ready=False) +<<<<<<< HEAD self.assertTrue(configs.write_all.called) self.assertTrue(self.ensure_initial_admin.called) +======= +>>>>>>> stable/19.10 # Still updates relations self.assertTrue(self.relation_ids.called) + @patch.object(hooks, 'is_expected_scale') @patch.object(hooks, 'configure_https') @patch.object(hooks, 'is_db_initialised') @patch.object(hooks, 'CONFIGS') def test_update_all_not_leader(self, configs, is_db_initialized, - configure_https): + configure_https, is_expected_scale): """ Verify update identity relations when not the leader""" self.is_elected_leader.return_value = False is_db_initialized.return_value = True + is_expected_scale.return_value = True hooks.update_all_identity_relation_units(check_db_ready=False) self.assertTrue(configs.write_all.called) self.assertFalse(self.ensure_initial_admin.called) # Still updates relations self.assertTrue(self.relation_ids.called) +<<<<<<< HEAD +======= + @patch.object(hooks, 'maybe_do_policyd_overrides') +>>>>>>> stable/19.10 @patch.object(hooks, 'update_all_identity_relation_units') @patch.object(utils, 'os_release') @patch.object(utils, 'git_install_requested') @patch('keystone_utils.log') @patch('keystone_utils.relation_ids') +<<<<<<< HEAD @patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.update_hash_from_path') @patch.object(unison, 'ssh_authorized_peers') @@ -1239,6 +1438,15 @@ class KeystoneRelationTests(CharmTestCase): mock_relation_ids, mock_log, git_requested, os_release, update): +======= + @patch.object(hooks, 'stop_manager_instance') + def test_upgrade_charm_not_leader(self, + mock_stop_manager_instance, + mock_relation_ids, + mock_log, + os_release, update, + mock_maybe_do_policyd_overrides): +>>>>>>> stable/19.10 os_release.return_value = 'havana' mock_relation_ids.return_value = [] mock_ensure_ssl_cert_master.return_value = False @@ -1246,6 +1454,10 @@ class KeystoneRelationTests(CharmTestCase): mock_update_hash_from_path.side_effect = \ lambda hash, *args, **kwargs: hash.update(str(uuid.uuid4())) +<<<<<<< HEAD +======= + self.filter_installed_packages.return_value = ['something'] +>>>>>>> stable/19.10 self.is_elected_leader.return_value = False self.filter_installed_packages.return_value = [] git_requested.return_value = False @@ -1256,6 +1468,36 @@ class KeystoneRelationTests(CharmTestCase): peer_interface='cluster', ensure_local_user=True) self.assertTrue(self.log.called) self.assertFalse(update.called) + mock_stop_manager_instance.assert_called_once() + mock_maybe_do_policyd_overrides.assert_called_once_with( + ANY, "keystone") + + @patch.object(hooks, 'maybe_do_policyd_overrides') + @patch.object(hooks, 'update_all_identity_relation_units') + @patch.object(utils, 'os_release') + @patch('keystone_utils.log') + @patch('keystone_utils.relation_ids') + @patch.object(hooks, 'stop_manager_instance') + def test_upgrade_charm_not_leader_no_packages( + self, + mock_stop_manager_instance, + mock_relation_ids, + mock_log, + os_release, + update, + mock_maybe_do_policyd_overrides + ): + os_release.return_value = 'havana' + + self.filter_installed_packages.return_value = [] + self.is_elected_leader.return_value = False + hooks.upgrade_charm() + self.assertFalse(self.apt_install.called) + self.assertTrue(self.log.called) + self.assertFalse(update.called) + mock_stop_manager_instance.assert_called_once() + mock_maybe_do_policyd_overrides.assert_called_once_with( + ANY, "keystone") def test_domain_backend_changed_v2(self): self.get_api_version.return_value = 2 @@ -1277,7 +1519,15 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'is_unit_paused_set') @patch.object(hooks, 'is_db_initialised') +<<<<<<< HEAD + def test_domain_backend_changed_complete(self, +======= + @patch.object(utils, 'run_in_apache') + @patch.object(utils, 'restart_pid_check') def test_domain_backend_changed_complete(self, + restart_pid_check, + run_in_apache, +>>>>>>> stable/19.10 is_db_initialised, is_unit_paused_set): self.get_api_version.return_value = 3 @@ -1303,14 +1553,26 @@ class KeystoneRelationTests(CharmTestCase): rid=None), ]) self.create_or_show_domain.assert_called_with('mydomain') +<<<<<<< HEAD self.service_restart.assert_called_with('apache2') +======= + restart_pid_check.assert_called_with('apache2') +>>>>>>> stable/19.10 mock_kv.set.assert_called_with('domain-restart-nonce-mydomain', 'nonce2') self.assertTrue(mock_kv.flush.called) @patch.object(hooks, 'is_unit_paused_set') @patch.object(hooks, 'is_db_initialised') +<<<<<<< HEAD def test_domain_backend_changed_complete_follower(self, +======= + @patch.object(utils, 'run_in_apache') + @patch.object(utils, 'restart_pid_check') + def test_domain_backend_changed_complete_follower(self, + restart_pid_check, + run_in_apache, +>>>>>>> stable/19.10 is_db_initialised, is_unit_paused_set): self.get_api_version.return_value = 3 @@ -1337,7 +1599,211 @@ class KeystoneRelationTests(CharmTestCase): ]) # Only lead unit will create the domain self.assertFalse(self.create_or_show_domain.called) +<<<<<<< HEAD self.service_restart.assert_called_with('apache2') mock_kv.set.assert_called_with('domain-restart-nonce-mydomain', 'nonce2') self.assertTrue(mock_kv.flush.called) +======= + restart_pid_check.assert_called_with('apache2') + mock_kv.set.assert_called_with('domain-restart-nonce-mydomain', + 'nonce2') + self.assertTrue(mock_kv.flush.called) + + @patch.object(hooks, 'os_release') + @patch.object(hooks, 'relation_id') + @patch.object(hooks, 'is_unit_paused_set') + @patch.object(hooks, 'is_db_initialised') + @patch.object(utils, 'run_in_apache') + @patch.object(utils, 'restart_pid_check') + def test_fid_service_provider_changed_complete( + self, + restart_pid_check, + run_in_apache, + is_db_initialised, + is_unit_paused_set, + relation_id, os_release): + os_release.return_value = 'ocata' + rel = 'keystone-fid-service-provider:0' + relation_id.return_value = rel + run_in_apache.return_value = True + self.get_api_version.return_value = 3 + self.relation_get.side_effect = ['"nonce2"'] + self.is_leader.return_value = True + self.is_db_ready.return_value = True + is_db_initialised.return_value = True + self.resolve_address.return_value = "10.0.0.10" + mock_kv = MagicMock() + mock_kv.get.return_value = None + self.unitdata.kv.return_value = mock_kv + is_unit_paused_set.return_value = False + + hooks.keystone_fid_service_provider_changed() + + self.assertTrue(self.get_api_version.called) + self.relation_get.assert_has_calls([ + call('restart-nonce'), + ]) + restart_pid_check.assert_called_with('apache2') + mock_kv.set.assert_called_with( + 'fid-restart-nonce-{}'.format(rel), 'nonce2') + self.assertTrue(mock_kv.flush.called) + + @patch.object(hooks, 'os_release') + @patch.object(hooks, 'relation_id') + @patch.object(hooks, 'is_unit_paused_set') + @patch.object(hooks, 'is_db_initialised') + @patch.object(utils, 'run_in_apache') + @patch.object(utils, 'restart_pid_check') + def test_fid_service_provider_changed_complete_follower( + self, + restart_pid_check, + run_in_apache, + is_db_initialised, + is_unit_paused_set, + relation_id, os_release): + os_release.return_value = 'ocata' + rel = 'keystone-fid-service-provider:0' + relation_id.return_value = rel + run_in_apache.return_value = True + self.get_api_version.return_value = 3 + self.relation_get.side_effect = ['"nonce2"'] + self.is_leader.return_value = False + self.is_db_ready.return_value = True + is_db_initialised.return_value = True + mock_kv = MagicMock() + mock_kv.get.return_value = None + self.unitdata.kv.return_value = mock_kv + is_unit_paused_set.return_value = False + self.resolve_address.return_value = "10.0.0.10" + + hooks.keystone_fid_service_provider_changed() + + self.assertTrue(self.get_api_version.called) + self.relation_get.assert_has_calls([ + call('restart-nonce'), + ]) + restart_pid_check.assert_called_with('apache2') + mock_kv.set.assert_called_with( + 'fid-restart-nonce-{}'.format(rel), + 'nonce2') + self.assertTrue(mock_kv.flush.called) + + def test_update_keystone_fid_service_provider_no_tls(self): + self.relation_ids.return_value = [] + public_addr = "10.0.0.10" + self.resolve_address.return_value = public_addr + relation_id = "keystone-fid-service-provider-certificates:5" + relation_settings = { + 'hostname': '"{}"'.format(public_addr), + 'port': '5000', + 'tls-enabled': 'false' + } + hooks.update_keystone_fid_service_provider(relation_id=relation_id) + self.relation_set.assert_called_once_with( + relation_id=relation_id, relation_settings=relation_settings) + + def test_update_keystone_fid_service_provider_tls_certificates_relation( + self): + self.relation_ids.return_value = ["certficates:9"] + public_addr = "10.0.0.10" + self.resolve_address.return_value = public_addr + relation_id = "keystone-fid-service-provider-certificates:5" + relation_settings = { + 'hostname': '"{}"'.format(public_addr), + 'port': '5000', + 'tls-enabled': 'true' + } + hooks.update_keystone_fid_service_provider(relation_id=relation_id) + self.relation_set.assert_called_once_with( + relation_id=relation_id, relation_settings=relation_settings) + + def test_update_keystone_fid_service_provider_ssl_config(self): + self.test_config.set("ssl_cert", "CERTIFICATE") + self.test_config.set("ssl_key", "KEY") + self.relation_ids.return_value = [] + public_addr = "10.0.0.10" + self.resolve_address.return_value = public_addr + relation_id = "keystone-fid-service-provider-certificates:5" + relation_settings = { + 'hostname': '"{}"'.format(public_addr), + 'port': '5000', + 'tls-enabled': 'true' + } + hooks.update_keystone_fid_service_provider(relation_id=relation_id) + self.relation_set.assert_called_once_with( + relation_id=relation_id, relation_settings=relation_settings) + + @patch.object(hooks, 'relation_set') + @patch.object(hooks, 'get_certificate_request') + def test_certs_joined(self, get_certificate_request, relation_set): + get_certificate_request.return_value = {'cn': 'this-unit'} + hooks.certs_joined(relation_id='rid:23') + relation_set.assert_called_once_with( + relation_id='rid:23', + relation_settings={'cn': 'this-unit'}) + + @patch.object(hooks, 'update_all_fid_backends') + @patch.object(hooks, 'config') + @patch.object(hooks, 'update_all_domain_backends') + @patch.object(hooks, 'update_all_identity_relation_units') + @patch.object(hooks, 'ensure_initial_admin') + @patch.object(hooks, 'is_unit_paused_set') + @patch.object(hooks, 'is_elected_leader') + @patch.object(hooks, 'is_db_initialised') + @patch.object(hooks, 'configure_https') + @patch.object(hooks, 'process_certificates') + def test_certs_changed(self, process_certificates, configure_https, + is_db_initialised, + is_elected_leader, is_unit_paused_set, + ensure_initial_admin, + update_all_identity_relation_units, + update_all_domain_backends, config, + update_all_fid_backends): + is_db_initialised.return_value = True + is_elected_leader.return_value = True + is_unit_paused_set.return_value = False + process_certificates.return_value = False + hooks.certs_changed() + process_certificates.assert_called_once_with('keystone', None, None) + self.assertFalse(configure_https.called) + self.assertFalse(ensure_initial_admin.called) + process_certificates.reset_mock() + process_certificates.return_value = True + hooks.certs_changed() + configure_https.assert_called_once_with() + is_db_initialised.assert_called_once_with() + is_elected_leader.assert_called_once_with('grp_ks_vips') + is_unit_paused_set.assert_called_once_with() + ensure_initial_admin.assert_called_once_with(config) + update_all_identity_relation_units.assert_called_once_with() + update_all_domain_backends.assert_called_once_with() + update_all_fid_backends.assert_called_once_with() + + ensure_initial_admin.reset_mock() + is_db_initialised.return_value = False + hooks.certs_changed() + self.assertFalse(ensure_initial_admin.called) + + @patch.object(hooks, 'relation_set') + @patch.object(hooks, 'os_release') + def test_keystone_middleware_notify_release( + self, + os_release, + relation_set): + self.relation_ids.return_value = ['keystone-middleware:0'] + os_release.return_value = 'Pike' + hooks.keystone_middleware_joined() + relation_set.assert_called_once_with( + relation_id='keystone-middleware:0', release='Pike') + + @patch.object(hooks, 'notify_middleware_with_release_version') + @patch.object(hooks, 'CONFIGS') + def test_keystone_middleware_config_changed( + self, + configs, + notify_middleware_with_release_version): + hooks.keystone_middleware_changed() + self.assertTrue(configs.write.called) + self.assertFalse(notify_middleware_with_release_version.called) +>>>>>>> stable/19.10 diff --git a/unit_tests/test_keystone_utils.py b/unit_tests/test_keystone_utils.py index ebfd5fddd19757cfa77dec9cbd18abbe3cef2f19..301f63c24f5183cde2f7d2f5c351acb221fd17f0 100644 --- a/unit_tests/test_keystone_utils.py +++ b/unit_tests/test_keystone_utils.py @@ -12,18 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import patch, call, MagicMock -from test_utils import CharmTestCase +import builtins +import collections +from mock import patch, call, MagicMock, mock_open, Mock +import json import os from base64 import b64encode import subprocess +import time + +from test_utils import CharmTestCase os.environ['JUJU_UNIT_NAME'] = 'keystone' with patch('charmhelpers.core.hookenv.config') as config, \ patch('charmhelpers.contrib.openstack.' - 'utils.snap_install_requested') as snap_install_requested: - snap_install_requested.return_value = False + 'utils.snap_install_requested', + Mock(return_value=False)): + import importlib import keystone_utils as utils + # we have to force utils to reload as another test module may already have + # pulled it in, and thus all this fancy patching will just fail + importlib.reload(utils) TO_PATCH = [ 'api_port', @@ -63,6 +72,8 @@ TO_PATCH = [ 'https', 'peer_store', 'pip_install', + 'mkdir', + 'write_file', # generic 'apt_update', 'apt_upgrade', @@ -132,7 +143,35 @@ class TestKeystoneUtils(CharmTestCase): '/etc/apache2/sites-available/openstack_https_frontend.conf', [self.ctxt]), ] - self.assertEqual(fake_renderer.register.call_args_list, ex_reg) + fake_renderer.register.assert_has_calls(ex_reg, any_order=True) + + def test_resource_map_exclude_policy_json_before_liberty(self): + resources = self._test_resource_map(os_release='kilo') + self.assertFalse('/etc/keystone/policy.json' in resources.keys()) + + def test_resource_map_include_policy_json_from_liberty(self): + resources = self._test_resource_map(os_release='liberty') + self.assertTrue('/etc/keystone/policy.json' in resources.keys()) + + def test_resource_map_apache24_conf_present_if_conf_avail_present(self): + resources = self._test_resource_map(os_path_return_value=True) + self.assertTrue( + '/etc/apache2/sites-available/openstack_https_frontend.conf' + in resources.keys()) + + def test_resource_map_apache24_conf_absent_if_conf_avail_absent(self): + resources = self._test_resource_map(os_path_return_value=False) + self.assertFalse( + '/etc/apache2/sites-available/openstack_https_frontend.conf' + in resources.keys()) + + def test_resource_map_excludes_apache_files_if_using_snap(self): + resources = self._test_resource_map(use_snap=True) + for config_file in ( + '/etc/apache2/sites-available/openstack_https_frontend', + '/etc/apache2/sites-available/openstack_https_frontend.conf', + ): + self.assertFalse(config_file in resources.keys()) @patch.object(utils, 'git_determine_usr_bin') @patch.object(utils, 'snap_install_requested') @@ -145,6 +184,21 @@ class TestKeystoneUtils(CharmTestCase): git_determine_usr_bin.return_value = '/usr/bin' mock_os.path.exists.return_value = True self.assertTrue('/etc/memcached.conf' in utils.resource_map().keys()) + def test_resource_map_ensure_snap_includes_nginx_and_uwsgi(self): + resources = self._test_resource_map(use_snap=True) + required_services = ('snap.keystone.nginx', 'snap.keystone.uwsgi') + for cfile in resources: + services = resources[cfile]['services'] + self.assertTrue(all(service in services) + for service in required_services) + + def test_resource_map_enable_memcache_mitaka(self): + resources = self._test_resource_map(os_release='mitaka') + self.assertTrue('/etc/memcached.conf' in resources.keys()) + + def test_resource_map_enable_memcache_liberty(self): + resources = self._test_resource_map(os_release='liberty') + self.assertFalse('/etc/memcached.conf' in resources.keys()) @patch.object(utils, 'git_determine_usr_bin') @patch.object(utils, 'snap_install_requested') @@ -157,6 +211,15 @@ class TestKeystoneUtils(CharmTestCase): git_determine_usr_bin.return_value = '/usr/bin' mock_os.path.exists.return_value = True self.assertFalse('/etc/memcached.conf' in utils.resource_map().keys()) + def _test_resource_map(self, mock_os, snap_install_requested, + os_release='mitaka', + use_snap=False, + os_path_return_value=False): + self.os_release.return_value = os_release + snap_install_requested.return_value = use_snap + mock_os.path.exists.return_value = os_path_return_value + resource_map = utils.resource_map() + return resource_map def test_determine_ports(self): self.test_config.set('admin-port', '80') @@ -174,14 +237,26 @@ class TestKeystoneUtils(CharmTestCase): self.assertEqual(set(ex), set(result)) @patch('charmhelpers.contrib.openstack.utils.config') - def test_determine_packages_mitaka(self, _config): - self.os_release.return_value = 'mitaka' + def test_determine_packages_queens(self, _config): + self.os_release.return_value = 'queens' self.snap_install_requested.return_value = False _config.return_value = None result = utils.determine_packages() ex = utils.BASE_PACKAGES + [ - 'keystone', 'python-keystoneclient', 'libapache2-mod-wsgi', - 'memcached'] + 'keystone', 'python-keystoneclient', 'memcached', + 'libapache2-mod-wsgi' + ] + self.assertEqual(set(ex), set(result)) + + @patch('charmhelpers.contrib.openstack.utils.config') + def test_determine_packages_rocky(self, _config): + self.os_release.return_value = 'rocky' + self.snap_install_requested.return_value = False + _config.return_value = None + result = utils.determine_packages() + ex = list(set( + [p for p in utils.BASE_PACKAGES if not p.startswith('python-')] + + ['memcached'] + utils.PY3_PACKAGES)) self.assertEqual(set(ex), set(result)) @patch('charmhelpers.contrib.openstack.utils.config') @@ -203,6 +278,20 @@ class TestKeystoneUtils(CharmTestCase): ex = utils.BASE_PACKAGES_SNAP + ['memcached'] self.assertEqual(set(ex), set(result)) + def test_determine_purge_packages(self): + 'Ensure no packages are identified for purge prior to rocky' + self.os_release.return_value = 'queens' + self.assertEqual(utils.determine_purge_packages(), []) + + def test_determine_purge_packages_rocky(self): + 'Ensure python packages are identified for purge at rocky' + self.os_release.return_value = 'rocky' + self.assertEqual(utils.determine_purge_packages(), + [p for p in utils.BASE_PACKAGES + if p.startswith('python-')] + + ['python-keystone', 'python-memcache']) + + @patch.object(utils, 'is_elected_leader') @patch.object(utils, 'disable_unused_apache_sites') @patch('os.path.exists') @patch.object(utils, 'run_in_apache') @@ -213,6 +302,7 @@ class TestKeystoneUtils(CharmTestCase): run_in_apache, os_path_exists, disable_unused_apache_sites): configs = MagicMock() self.test_config.set('openstack-origin', 'cloud:xenial-newton') + self.os_release.return_value = 'ocata' determine_packages.return_value = [] self.is_elected_leader.return_value = True os_path_exists.return_value = True @@ -247,7 +337,24 @@ class TestKeystoneUtils(CharmTestCase): disable_unused_apache_sites.assert_called_with() self.reset_os_release.assert_called() - def test_migrate_database(self): + @patch.object(utils, 'leader_get') + def test_is_db_initialised_true_string(self, _leader_get): + _leader_get.return_value = "True" + self.assertTrue(utils.is_db_initialised()) + + @patch.object(utils, 'leader_get') + def test_is_db_initialised_true_bool(self, _leader_get): + _leader_get.return_value = True + self.assertTrue(utils.is_db_initialised()) + + @patch.object(utils, 'leader_get') + def test_is_db_initialised_not_set(self, _leader_get): + _leader_get.return_value = None + self.assertFalse(utils.is_db_initialised()) + + @patch.object(utils, 'stop_manager_instance') + @patch.object(utils, 'leader_set') + def test_migrate_database(self, _leader_set, mock_stop_manager_instance): self.os_release.return_value = 'havana' utils.migrate_database() @@ -255,6 +362,8 @@ class TestKeystoneUtils(CharmTestCase): cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync'] self.subprocess.check_output.assert_called_with(cmd) self.service_start.assert_called_with('keystone') + _leader_set.assert_called_with({'db-initialised': True}) + mock_stop_manager_instance.assert_called_once_with() @patch.object(utils, 'leader_get') @patch.object(utils, 'get_api_version') @@ -303,6 +412,7 @@ class TestKeystoneUtils(CharmTestCase): self.peer_store_and_set.assert_called_with(relation_id=relation_id, **relation_data) + @patch.object(utils, 'leader_set') @patch.object(utils, 'leader_get') @patch.object(utils, 'get_api_version') @patch.object(utils, 'create_user') @@ -313,7 +423,7 @@ class TestKeystoneUtils(CharmTestCase): def test_add_service_to_keystone_no_clustered_no_https_complete_values( self, KeystoneManager, add_endpoint, ensure_valid_service, _resolve_address, create_user, get_api_version, leader_get, - test_api_version=2): + leader_set, test_api_version=2): get_api_version.return_value = test_api_version leader_get.return_value = None relation_id = 'identity-service:0' @@ -327,7 +437,6 @@ class TestKeystoneUtils(CharmTestCase): self.test_config.set('admin-port', 80) self.test_config.set('service-port', 81) self.https.return_value = False - self.test_config.set('https-service-endpoints', 'False') self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/' self.relation_ids.return_value = ['cluster/0'] @@ -346,6 +455,7 @@ class TestKeystoneUtils(CharmTestCase): 'admin_url': '10.0.0.2', 'internal_url': '192.168.1.2'} + mock_keystone.user_exists.return_value = False utils.add_service_to_keystone( relation_id=relation_id, remote_unit=remote_unit) @@ -382,8 +492,8 @@ class TestKeystoneUtils(CharmTestCase): 'service_tenant_id': 'tenant_id', 'api_version': test_api_version} - filtered = {} - for k, v in relation_data.iteritems(): + filtered = collections.OrderedDict() + for k, v in relation_data.items(): if v == '__null__': filtered[k] = None else: @@ -401,6 +511,8 @@ class TestKeystoneUtils(CharmTestCase): test_add_service_to_keystone_no_clustered_no_https_complete_values( test_api_version=3) + @patch.object(utils, 'leader_set') + @patch.object(utils, 'is_leader') @patch.object(utils, 'leader_get') @patch('charmhelpers.contrib.openstack.ip.config') @patch.object(utils, 'ensure_valid_service') @@ -408,7 +520,7 @@ class TestKeystoneUtils(CharmTestCase): @patch.object(utils, 'get_manager') def test_add_service_to_keystone_nosubset( self, KeystoneManager, add_endpoint, ensure_valid_service, - ip_config, leader_get): + ip_config, leader_get, is_leader, leader_set): relation_id = 'identity-service:0' remote_unit = 'unit/0' @@ -419,6 +531,7 @@ class TestKeystoneUtils(CharmTestCase): 'ec2_internal_url': '192.168.1.2'} self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/' KeystoneManager.resolve_tenant_id.return_value = 'tenant_id' + KeystoneManager.user_exists.return_value = False leader_get.return_value = None utils.add_service_to_keystone( @@ -520,43 +633,6 @@ class TestKeystoneUtils(CharmTestCase): def test_create_user_credentials_user_exists_v3(self): self.test_create_user_credentials_user_exists(test_api_version=3) - @patch.object(utils, 'get_manager') - def test_create_user_case_sensitivity(self, KeystoneManager): - """ Test case sensitivity of check for existence in - the user creation process """ - mock_keystone = MagicMock() - KeystoneManager.return_value = mock_keystone - - mock_user = MagicMock() - mock_keystone.resolve_user_id.return_value = mock_user - mock_keystone.api.users.list.return_value = [mock_user] - - # User found is the same i.e. userA == userA - mock_user.name = 'userA' - utils.create_user('userA', 'passA') - mock_keystone.resolve_user_id.assert_called_with('userA', - user_domain=None) - mock_keystone.create_user.assert_not_called() - - # User found has different case but is the same - # i.e. Usera != userA - mock_user.name = 'Usera' - utils.create_user('userA', 'passA') - mock_keystone.resolve_user_id.assert_called_with('userA', - user_domain=None) - mock_keystone.create_user.assert_not_called() - - # User is different i.e. UserB != userA - mock_user.name = 'UserB' - utils.create_user('userA', 'passA') - mock_keystone.resolve_user_id.assert_called_with('userA', - user_domain=None) - mock_keystone.create_user.assert_called_with(name='userA', - password='passA', - tenant_id=None, - domain_id=None, - email='juju@localhost') - @patch.object(utils, 'set_service_password') @patch.object(utils, 'get_service_password') @patch.object(utils, 'create_user_credentials') @@ -629,19 +705,59 @@ class TestKeystoneUtils(CharmTestCase): mock_relation_set.assert_called_once_with(relation_id=relation_id, relation_settings=settings) - @patch.object(utils, 'peer_retrieve') - @patch.object(utils, 'peer_store') - def test_get_admin_passwd_pwd_set(self, mock_peer_store, - mock_peer_retrieve): - mock_peer_retrieve.return_value = None + def test_get_admin_passwd_pwd_set(self): self.test_config.set('admin-password', 'supersecret') self.assertEqual(utils.get_admin_passwd(), 'supersecret') - mock_peer_store.assert_called_once_with('admin_passwd', 'supersecret') - @patch.object(utils, 'peer_retrieve') + @patch.object(utils, 'is_leader') + @patch.object(utils, 'leader_get') + def test_get_admin_passwd_leader_set(self, leader_get, is_leader): + is_leader.return_value = False + leader_get.return_value = 'admin' + self.assertEqual(utils.get_admin_passwd(), 'admin') + leader_get.assert_called_with('admin_passwd') + + @patch.object(utils, 'is_leader') + @patch.object(utils, 'leader_get') + def test_get_admin_passwd_leader_set_user_specified(self, leader_get, + is_leader): + is_leader.return_value = False + leader_get.return_value = 'admin' + self.assertEqual(utils.get_admin_passwd(user='test'), 'admin') + leader_get.assert_called_with('test_passwd') + + @patch.object(utils, 'is_leader') + @patch.object(utils, 'leader_get') + def test_get_admin_passwd_leader_set_user_config(self, leader_get, + is_leader): + is_leader.return_value = False + leader_get.return_value = 'admin' + self.test_config.set('admin-user', 'test') + self.assertEqual(utils.get_admin_passwd(), 'admin') + leader_get.assert_called_with('test_passwd') + + @patch.object(utils, 'leader_set') + def test_set_admin_password(self, leader_set): + utils.set_admin_passwd('secret') + leader_set.assert_called_once_with({'admin_passwd': 'secret'}) + + @patch.object(utils, 'leader_set') + def test_set_admin_password_config_username(self, leader_set): + self.test_config.set('admin-user', 'username') + utils.set_admin_passwd('secret') + leader_set.assert_called_once_with({'username_passwd': 'secret'}) + + @patch.object(utils, 'leader_set') + def test_set_admin_password_username(self, leader_set): + utils.set_admin_passwd('secret', user='username') + leader_set.assert_called_once_with({'username_passwd': 'secret'}) + + @patch.object(utils, 'is_leader') + @patch.object(utils, 'leader_get') @patch('os.path.isfile') - def test_get_admin_passwd_genpass(self, isfile, peer_retrieve): - peer_retrieve.return_value = 'supersecretgen' + def test_get_admin_passwd_genpass(self, isfile, leader_get, is_leader): + is_leader.return_value = True + leader_get.return_value = 'supersecretgen' self.test_config.set('admin-password', '') isfile.return_value = False self.subprocess.check_output.return_value = 'supersecretgen' @@ -669,7 +785,8 @@ class TestKeystoneUtils(CharmTestCase): self.assertFalse(utils.is_db_ready(use_current_context=True)) self.relation_ids.return_value = ['acme:0'] - self.assertRaises(utils.is_db_ready, use_current_context=True) + with self.assertRaises(Exception): + utils.is_db_ready(use_current_context=True) allowed_units = 'unit/0' self.related_units.return_value = ['unit/0'] @@ -929,19 +1046,13 @@ class TestKeystoneUtils(CharmTestCase): mock_keystone.resolve_service_id.return_value = 'sid1' KeystoneManager.return_value = mock_keystone utils.delete_service_entry('bob', 'bill') - mock_keystone.api.services.delete.assert_called_with('sid1') + mock_keystone.delete_service_by_id.assert_called_once_with('sid1') @patch('os.path.isfile') def test_get_file_stored_domain_id(self, isfile_mock): isfile_mock.return_value = False x = utils.get_file_stored_domain_id('/a/file') assert x is None - from sys import version_info - if version_info.major == 2: - import __builtin__ as builtins - else: - import builtins - from mock import mock_open with patch.object(builtins, 'open', mock_open( read_data="some_data\n")): isfile_mock.return_value = True @@ -1003,30 +1114,92 @@ class TestKeystoneUtils(CharmTestCase): f.assert_called_once_with('assessor', services='s1', ports='p1') @patch.object(utils, 'run_in_apache') - @patch.object(utils, 'restart_pid_check') - def test_restart_function_map(self, restart_pid_check, run_in_apache): + @patch.object(utils, 'restart_keystone') + def test_restart_function_map(self, restart_keystone, run_in_apache): run_in_apache.return_value = True self.assertEqual(utils.restart_function_map(), - {'apache2': restart_pid_check}) + {'apache2': restart_keystone}) + + @patch.object(utils, 'stop_manager_instance') + @patch.object(utils, 'is_unit_paused_set') + def test_restart_keystone_unit_paused(self, + mock_is_unit_paused_set, + mock_stop_manager_instance): + mock_is_unit_paused_set.return_value = True + utils.restart_keystone() + mock_stop_manager_instance.assert_not_called() + + @patch.object(utils, 'snap_install_requested') + @patch.object(utils, 'service_restart') + @patch.object(utils, 'stop_manager_instance') + @patch.object(utils, 'is_unit_paused_set') + def test_restart_keystone_unit_not_paused_snap_install( + self, + mock_is_unit_paused_set, + mock_stop_manager_instance, + mock_service_restart, + mock_snap_install_requested): + mock_is_unit_paused_set.return_value = False + mock_snap_install_requested.return_value = True + utils.restart_keystone() + mock_service_restart.assert_called_once_with('snap.keystone.*') + mock_stop_manager_instance.assert_called_once_with() @patch.object(utils, 'run_in_apache') - def test_restart_function_map_legacy(self, run_in_apache): - run_in_apache.return_value = False - self.assertEqual(utils.restart_function_map(), {}) + @patch.object(utils, 'snap_install_requested') + @patch.object(utils, 'service_restart') + @patch.object(utils, 'stop_manager_instance') + @patch.object(utils, 'is_unit_paused_set') + def test_restart_keystone_unit_not_paused_legacy( + self, + mock_is_unit_paused_set, + mock_stop_manager_instance, + mock_service_restart, + mock_snap_install_requested, + mock_run_in_apache): + mock_is_unit_paused_set.return_value = False + mock_snap_install_requested.return_value = False + mock_run_in_apache.return_value = False + utils.restart_keystone() + mock_service_restart.assert_called_once_with('keystone') + mock_stop_manager_instance.assert_called_once_with() + + @patch.object(utils, 'run_in_apache') + @patch.object(utils, 'snap_install_requested') + @patch.object(utils, 'restart_pid_check') + @patch.object(utils, 'stop_manager_instance') + @patch.object(utils, 'is_unit_paused_set') + def test_restart_keystone_unit_not_paused( + self, + mock_is_unit_paused_set, + mock_stop_manager_instance, + mock_restart_pid_check, + mock_snap_install_requested, + mock_run_in_apache): + mock_is_unit_paused_set.return_value = False + mock_snap_install_requested.return_value = False + mock_run_in_apache.return_value = True + utils.restart_keystone() + mock_restart_pid_check.assert_called_once_with('apache2') + mock_stop_manager_instance.assert_called_once_with() def test_restart_pid_check(self): self.subprocess.call.return_value = 1 utils.restart_pid_check('apache2') self.service_stop.assert_called_once_with('apache2') self.service_start.assert_called_once_with('apache2') - self.subprocess.call.assert_called_once_with(['pgrep', 'apache2']) + self.subprocess.call.assert_called_once_with( + ['pgrep', 'apache2', '--nslist', 'pid', '--ns', str(os.getpid())] + ) def test_restart_pid_check_ptable_string(self): self.subprocess.call.return_value = 1 utils.restart_pid_check('apache2', ptable_string='httpd') self.service_stop.assert_called_once_with('apache2') self.service_start.assert_called_once_with('apache2') - self.subprocess.call.assert_called_once_with(['pgrep', 'httpd']) + self.subprocess.call.assert_called_once_with( + ['pgrep', 'httpd', '--nslist', 'pid', '--ns', str(os.getpid())] + ) # Do not sleep() to speed up manual runs. @patch('charmhelpers.core.decorators.time') @@ -1038,9 +1211,12 @@ class TestKeystoneUtils(CharmTestCase): self.service_start.assert_called_once_with('apache2') # self.subprocess.call.assert_called_once_with(['pgrep', 'httpd']) expected = [ - call(['pgrep', 'httpd']), - call(['pgrep', 'httpd']), - call(['pgrep', 'httpd']), + call(['pgrep', 'httpd', '--nslist', 'pid', '--ns', + str(os.getpid())]), + call(['pgrep', 'httpd', '--nslist', 'pid', '--ns', + str(os.getpid())]), + call(['pgrep', 'httpd', '--nslist', 'pid', '--ns', + str(os.getpid())]) ] self.assertEqual(self.subprocess.call.call_args_list, expected) @@ -1160,6 +1336,7 @@ class TestKeystoneUtils(CharmTestCase): """ Verify add_credentials with Keystone V3 """ manager = MagicMock() manager.resolve_tenant_id.return_value = 'abcdef0123456789' + manager.resolve_domain_id.return_value = 'a-domain-id' get_manager.return_value = manager remote_unit = 'unit/0' relation_id = 'identity-credentials:0' @@ -1183,6 +1360,10 @@ class TestKeystoneUtils(CharmTestCase): 'credentials_password': 'password', 'credentials_project': 'services', 'credentials_project_id': 'abcdef0123456789', + 'credentials_user_domain_id': 'a-domain-id', + 'credentials_project_domain_id': 'a-domain-id', + 'credentials_project_domain_name': 'Non-Default', + 'credentials_user_domain_name': 'Non-Default', 'region': 'RegionOne', 'domain': 'Non-Default', 'api_version': 3} @@ -1357,3 +1538,266 @@ class TestKeystoneUtils(CharmTestCase): def test_run_in_apache_set_release(self): self.os_release.return_value = 'kilo' self.assertTrue(utils.run_in_apache(release='liberty')) + + def test_get_api_version_icehouse(self): + self.assertEqual(utils.get_api_version(), 2) + + def test_get_api_version_queens(self): + self.get_os_codename_install_source.return_value = 'queens' + self.assertEqual(utils.get_api_version(), 3) + + def test_get_api_version_invalid_option_value(self): + self.test_config.set('preferred-api-version', 4) + with self.assertRaises(ValueError): + utils.get_api_version() + + def test_get_api_version_queens_invalid_option_value(self): + self.test_config.set('preferred-api-version', 2) + self.get_os_codename_install_source.return_value = 'queens' + with self.assertRaises(ValueError): + utils.get_api_version() + + @patch.object(utils, 'is_leader') + @patch('os.path.exists') + def test_key_setup(self, mock_path_exists, mock_is_leader): + base_cmd = ['sudo', '-u', 'keystone', 'keystone-manage'] + mock_is_leader.return_value = True + mock_path_exists.return_value = False + with patch.object(builtins, 'open', mock_open()) as m: + utils.key_setup() + m.assert_called_once_with(utils.KEY_SETUP_FILE, "w") + self.subprocess.check_output.has_calls( + [ + base_cmd + ['fernet_setup'], + base_cmd + ['credential_setup'], + base_cmd + ['credential_migrate'], + ]) + mock_path_exists.assert_called_once_with(utils.KEY_SETUP_FILE) + mock_is_leader.assert_called_once_with() + + def test_fernet_rotate(self): + cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'fernet_rotate'] + utils.fernet_rotate() + self.subprocess.check_output.called_with(cmd) + + @patch.object(utils, 'leader_set') + @patch('os.listdir') + def test_key_leader_set(self, listdir, leader_set): + listdir.return_value = ['0', '1'] + self.time.time.return_value = "the-time" + with patch.object(builtins, 'open', mock_open( + read_data="some_data")): + utils.key_leader_set() + listdir.has_calls([ + call(utils.FERNET_KEY_REPOSITORY), + call(utils.CREDENTIAL_KEY_REPOSITORY)]) + leader_set.assert_called_with( + {'key_repository': json.dumps( + {utils.FERNET_KEY_REPOSITORY: + {'0': 'some_data', '1': 'some_data'}, + utils.CREDENTIAL_KEY_REPOSITORY: + {'0': 'some_data', '1': 'some_data'}}) + }) + + @patch('os.rename') + @patch.object(utils, 'leader_get') + @patch('os.listdir') + @patch('os.remove') + def test_key_write(self, remove, listdir, leader_get, rename): + leader_get.return_value = json.dumps( + {utils.FERNET_KEY_REPOSITORY: + {'0': 'key0', '1': 'key1'}, + utils.CREDENTIAL_KEY_REPOSITORY: + {'0': 'key0', '1': 'key1'}}) + listdir.return_value = ['0', '1', '2'] + with patch.object(builtins, 'open', mock_open()) as m: + utils.key_write() + m.assert_called_with(utils.KEY_SETUP_FILE, "w") + self.mkdir.has_calls([call(utils.CREDENTIAL_KEY_REPOSITORY, + owner='keystone', group='keystone', + perms=0o700), + call(utils.FERNET_KEY_REPOSITORY, + owner='keystone', group='keystone', + perms=0o700)]) + # note 'any_order=True' as we are dealing with dictionaries in Py27 + self.write_file.assert_has_calls( + [ + call(os.path.join(utils.CREDENTIAL_KEY_REPOSITORY, '.0'), + u'key0', owner='keystone', group='keystone', perms=0o600), + call(os.path.join(utils.CREDENTIAL_KEY_REPOSITORY, '.1'), + u'key1', owner='keystone', group='keystone', perms=0o600), + call(os.path.join(utils.FERNET_KEY_REPOSITORY, '.0'), u'key0', + owner='keystone', group='keystone', perms=0o600), + call(os.path.join(utils.FERNET_KEY_REPOSITORY, '.1'), u'key1', + owner='keystone', group='keystone', perms=0o600), + ], any_order=True) + rename.assert_has_calls( + [ + call(os.path.join(utils.CREDENTIAL_KEY_REPOSITORY, '.0'), + os.path.join(utils.CREDENTIAL_KEY_REPOSITORY, '0')), + call(os.path.join(utils.CREDENTIAL_KEY_REPOSITORY, '.1'), + os.path.join(utils.CREDENTIAL_KEY_REPOSITORY, '1')), + call(os.path.join(utils.FERNET_KEY_REPOSITORY, '.0'), + os.path.join(utils.FERNET_KEY_REPOSITORY, '0')), + call(os.path.join(utils.FERNET_KEY_REPOSITORY, '.1'), + os.path.join(utils.FERNET_KEY_REPOSITORY, '1')), + ], any_order=True) + + @patch.object(utils, 'keystone_context') + @patch.object(utils, 'fernet_rotate') + @patch.object(utils, 'key_leader_set') + @patch.object(utils, 'os') + @patch.object(utils, 'is_leader') + def test_fernet_keys_rotate_and_sync(self, mock_is_leader, mock_os, + mock_key_leader_set, + mock_fernet_rotate, + mock_keystone_context): + self.test_config.set('fernet-max-active-keys', 3) + self.test_config.set('token-expiration', 60) + self.time.time.return_value = 0 + + # if not leader shouldn't do anything + mock_is_leader.return_value = False + utils.fernet_keys_rotate_and_sync() + mock_os.stat.assert_not_called() + # shouldn't do anything as the token provider is wrong + mock_keystone_context.fernet_enabled.return_value = False + mock_is_leader.return_value = True + utils.fernet_keys_rotate_and_sync() + mock_os.stat.assert_not_called() + # fail gracefully if key repository is not initialized + mock_keystone_context.fernet_enabled.return_value = True + mock_os.stat.side_effect = Exception() + with self.assertRaises(Exception): + utils.fernet_keys_rotate_and_sync() + self.time.time.assert_not_called() + mock_os.stat.side_effect = None + # now set up the times, so that it still shouldn't be called. + self.time.time.return_value = 30 + self.time.gmtime = time.gmtime + self.time.asctime = time.asctime + _stat = MagicMock() + _stat.st_mtime = 10 + mock_os.stat.return_value = _stat + utils.fernet_keys_rotate_and_sync(log_func=self.log) + self.log.assert_called_once_with( + 'No rotation until at least Thu Jan 1 00:01:10 1970', + level='DEBUG') + mock_key_leader_set.assert_not_called() + # finally, set it up so that the rotation and sync occur + self.time.time.return_value = 71 + utils.fernet_keys_rotate_and_sync() + mock_fernet_rotate.assert_called_once_with() + mock_key_leader_set.assert_called_once_with() + + @patch.object(utils, 'container_scoped_relations') + @patch.object(utils, 'expected_related_units') + @patch.object(utils, 'expected_peer_units') + @patch.object(utils, 'related_units') + @patch.object(utils, 'expect_ha') + @patch.object(utils, 'relation_ids') + def test_is_expected_scale(self, relation_ids, expect_ha, related_units, + expected_peer_units, expected_related_units, + container_scoped_relations): + container_scoped_relations.return_value = ['ha'] + relation_ids.return_value = ['FAKE_RID'] + expect_ha.return_value = False + related_units.return_value = ['unit/0', 'unit/1', 'unit/2'] + expected_peer_units.return_value = iter(related_units.return_value) + expected_related_units.return_value = iter(related_units.return_value) + self.assertTrue(utils.is_expected_scale()) + relation_ids.assert_has_calls([ + call(reltype='cluster'), + call(reltype='shared-db')]) + related_units.assert_called_with(relid='FAKE_RID') + + @patch.object(utils, 'container_scoped_relations') + @patch.object(utils, 'expected_related_units') + @patch.object(utils, 'expected_peer_units') + @patch.object(utils, 'related_units') + @patch.object(utils, 'expect_ha') + @patch.object(utils, 'relation_ids') + def test_is_expected_scale_ha(self, relation_ids, expect_ha, related_units, + expected_peer_units, expected_related_units, + container_scoped_relations): + container_scoped_relations.return_value = ['ha'] + relation_ids.return_value = ['FAKE_RID'] + expect_ha.return_value = True + related_units.return_value = ['unit/0', 'unit/1', 'unit/2'] + expected_peer_units.return_value = iter(related_units.return_value) + expected_related_units.return_value = iter(related_units.return_value) + self.assertTrue(utils.is_expected_scale()) + relation_ids.assert_has_calls([ + call(reltype='cluster'), + call(reltype='shared-db'), + call(reltype='ha')]) + related_units.assert_called_with(relid='FAKE_RID') + + @patch.object(utils, 'expected_related_units') + @patch.object(utils, 'expected_peer_units') + @patch.object(utils, 'related_units') + @patch.object(utils, 'expect_ha') + @patch.object(utils, 'relation_ids') + def test_not_is_expected_scale(self, relation_ids, expect_ha, + related_units, expected_peer_units, + expected_related_units): + relation_ids.return_value = ['FAKE_RID'] + expect_ha.return_value = False + related_units.return_value = ['unit/0', 'unit/1'] + expected_peer_units.return_value = iter(['unit/0', 'unit/1', 'unit/2']) + expected_related_units.return_value = iter( + ['unit/0', 'unit/1', 'unit/2']) + self.assertFalse(utils.is_expected_scale()) + relation_ids.assert_has_calls([ + call(reltype='cluster'), + call(reltype='shared-db')]) + related_units.assert_called_with(relid='FAKE_RID') + + @patch.object(utils, 'expected_related_units') + @patch.object(utils, 'expected_peer_units') + @patch.object(utils, 'related_units') + @patch.object(utils, 'expect_ha') + @patch.object(utils, 'relation_ids') + def test_is_expected_scale_no_goal_state_support(self, relation_ids, + expect_ha, related_units, + expected_peer_units, + expected_related_units): + relation_ids.return_value = ['FAKE_RID'] + related_units.return_value = ['unit/0', 'unit/1', 'unit/2'] + expected_peer_units.side_effect = NotImplementedError + self.assertTrue(utils.is_expected_scale()) + expected_related_units.assert_not_called() + + @patch.object(utils, 'metadata') + def test_container_scoped_relations(self, metadata): + _metadata = { + 'provides': { + 'amqp': {'interface': 'rabbitmq'}, + 'identity-service': {'interface': 'keystone'}, + 'ha': { + 'interface': 'hacluster', + 'scope': 'container'}}, + 'peers': { + 'cluster': {'interface': 'openstack-ha'}}} + metadata.return_value = _metadata + self.assertEqual(utils.container_scoped_relations(), ['ha']) + + @patch.object(utils, 'resource_map') + @patch.object(utils.os.path, 'isdir') + def test_restart_map(self, osp_isdir, resource_map): + rsc_map = collections.OrderedDict([ + ('file1', { + 'services': ['svc1'], + 'contexts': ['ctxt1']})]) + resource_map.return_value = rsc_map + osp_isdir.return_value = False + self.assertEqual( + utils.restart_map(), + collections.OrderedDict([ + ('file1', ['svc1'])])) + osp_isdir.return_value = True + self.assertEqual( + utils.restart_map(), + collections.OrderedDict([ + ('file1', ['svc1']), + ('/etc/apache2/ssl/keystone/*', ['apache2'])])) diff --git a/unit_tests/test_scripts_fernet_rotate_and_sync.py b/unit_tests/test_scripts_fernet_rotate_and_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..153daf44566dff4c0ce1b5e5703871c8015e5e99 --- /dev/null +++ b/unit_tests/test_scripts_fernet_rotate_and_sync.py @@ -0,0 +1,42 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from mock import patch + +from test_utils import CharmTestCase + +import fernet_rotate_and_sync as script + + +class FernetRotateAndSync(CharmTestCase): + + def setUp(self): + super(FernetRotateAndSync, self).setUp( + script, []) + + @patch('charmhelpers.core.hookenv.log') + @patch('time.ctime') + @patch('builtins.print') + def test_cli_log(self, mock_print, mock_ctime, mock_ch_log): + mock_ctime.return_value = 'FAKE_TIMESTAMP' + script.cli_log('message', level='DEBUG') + mock_ch_log.assert_called_with('message', level='DEBUG') + script.cli_log('message', level='WARNING') + mock_print.assert_called_with('FAKE_TIMESTAMP: message', + file=sys.stderr) + script.cli_log('message', level='INFO') + mock_print.assert_called_with('FAKE_TIMESTAMP: message', + file=sys.stdout) diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py index 81bc3acdd3cf7ff1f4b00a45d1bf030e8d92e5cb..d0d354932cbca3bdb8103678d7ad16c0d5f077c0 100644 --- a/unit_tests/test_utils.py +++ b/unit_tests/test_utils.py @@ -17,8 +17,7 @@ import os import unittest import yaml -from contextlib import contextmanager -from mock import patch, MagicMock +from mock import patch patch('charmhelpers.contrib.openstack.utils.set_os_workload_status').start() patch('charmhelpers.core.hookenv.status_set').start() @@ -39,7 +38,7 @@ def load_config(): if not config: logging.error('Could not find config.yaml in any parent directory ' - 'of %s. ' % file) + 'of %s. ' % __file__) raise Exception with open(config) as f: @@ -52,7 +51,7 @@ def get_default_config(): ''' default_config = {} config = load_config() - for k, v in config.iteritems(): + for k, v in config.items(): if 'default' in v: default_config[k] = v['default'] else: @@ -117,21 +116,3 @@ class TestRelation(object): elif attr in self.relation_data: return self.relation_data[attr] return None - - -@contextmanager -def patch_open(): - '''Patch open() to allow mocking both open() itself and the file that is - yielded. - Yields the mock for "open" and "file", respectively. - ''' - mock_open = MagicMock(spec=open) - mock_file = MagicMock(spec=file) - - @contextmanager - def stub_open(*args, **kwargs): - mock_open(*args, **kwargs) - yield mock_file - - with patch('__builtin__.open', stub_open): - yield mock_open, mock_file