diff options
35 files changed, 1583 insertions, 86 deletions
diff --git a/doc/mgr/ansible.rst b/doc/mgr/ansible.rst new file mode 100644 index 00000000000..e92c22b6365 --- /dev/null +++ b/doc/mgr/ansible.rst @@ -0,0 +1,122 @@ + +.. _ansible-module: + +==================== +Ansible Orchestrator +==================== + +This module is a :ref:`Ceph orchestrator <orchestrator-modules>` module that uses `Ansible Runner Service <https://github.com/pcuzner/ansible-runner-service>`_ (a RESTful API server) to execute Ansible playbooks in order to satisfy the different operations supported. + +These operations basically (and for the moment) are: + +- Get an inventory of the Ceph cluster nodes and all the storage devices present in each node +- ... +- ... + + +Usage +===== + +Enable the module: + +:: + + # ceph mgr module enable ansible + +Disable the module + +:: + + # ceph mgr module disable ansible + + +Enable the Ansible orchestrator module and use it with the :ref:`CLI <orchestrator-cli-module>`: + +:: + + ceph mgr module enable orchestrator_cli + ceph mgr module enable ansible + ceph orchestrator set backend ansible + + +Configuration +============= + +Configuration must be set once the module is enabled by first time. + +This can be done in one monitor node via the configuration key facility on a +cluster-wide level (so they apply to all manager instances) as follows:: + + + # ceph config set mgr mgr/ansible/server_addr <ip_address/server_name> + # ceph config set mgr mgr/ansible/server_port <port> + # ceph config set mgr mgr/ansible/username <username> + # ceph config set mgr mgr/ansible/password <password> + # ceph config set mgr mgr/ansible/verify_server <verify_server_value> + +Where: + + * <ip_address/server_name>: Is the ip address/hostname of the server where the Ansible Runner Service is available. + * <port>: The port number where the Ansible Runner Service is listening + * <username>: The username of one authorized user in the Ansible Runner Service + * <password>: The password of the authorized user. + * <verify_server_value>: Either a boolean, in which case it controls whether the server's TLS certificate is verified, or a string, in which case it must be a path to a CA bundle to use in the verification. Defaults to ``True``. + + +Debugging +========= + +Any kind of incident with this orchestrator module can be debugged using the Ceph manager logs: + +Set the right log level in order to debug properly. Remember that the python log levels debug, info, warn, err are mapped into the Ceph severities 20, 4, 1 and 0 respectively. + +And use the "active" manager node: ( "ceph -s" command in one monitor give you this information) + +* Check current debug level:: + + [@mgr0 ~]# ceph daemon mgr.mgr0 config show | grep debug_mgr + "debug_mgr": "1/5", + "debug_mgrc": "1/5", + +* Change the log level to "debug":: + + [mgr0 ~]# ceph daemon mgr.mgr0 config set debug_mgr 20/5 + { + "success": "" + } + +* Restore "info" log level:: + + [mgr0 ~]# ceph daemon mgr.mgr0 config set debug_mgr 1/5 + { + "success": "" + } + + +Operations +========== + +**Inventory:** + +Get the list of storage devices installed in all the cluster nodes. The output format is:: + + [host: + device_name (type_of_device , size_in_bytes)] + +Example:: + + [root@mon0 ~]# ceph orchestrator device ls + 192.168.121.160: + vda (hdd, 44023414784b) + sda (hdd, 53687091200b) + sdb (hdd, 53687091200b) + sdc (hdd, 53687091200b) + 192.168.121.36: + vda (hdd, 44023414784b) + 192.168.121.201: + vda (hdd, 44023414784b) + 192.168.121.70: + vda (hdd, 44023414784b) + sda (hdd, 53687091200b) + sdb (hdd, 53687091200b) + sdc (hdd, 53687091200b) diff --git a/doc/mgr/index.rst b/doc/mgr/index.rst index 0b40cb89809..337127c05fc 100644 --- a/doc/mgr/index.rst +++ b/doc/mgr/index.rst @@ -44,3 +44,4 @@ sensible. Rook plugin <rook> DeepSea plugin <deepsea> Insights plugin <insights> + Ansible plugin <ansible> diff --git a/doc/rbd/rbd-config-ref.rst b/doc/rbd/rbd-config-ref.rst index e408c0d5c5a..8fdc8a322fc 100644 --- a/doc/rbd/rbd-config-ref.rst +++ b/doc/rbd/rbd-config-ref.rst @@ -136,8 +136,8 @@ Read-ahead is automatically disabled if caching is disabled. :Default: ``50 MiB`` -RBD Default Features -==================== +RBD Features +============ RBD supports advanced features which can be specified via the command line when creating images or the default features can be specified via Ceph config file via 'rbd_default_features = <sum of feature numeric values>' or 'rbd_default_features = <comma-delimited list of CLI values>' @@ -146,24 +146,36 @@ RBD supports advanced features which can be specified via the command line when :Description: Layering enables you to use cloning. :Internal value: 1 :CLI value: layering +:Added in: v0.70 (Emperor) +:KRBD support: since v3.10 +:Default: yes ``Striping v2`` :Description: Striping spreads data across multiple objects. Striping helps with parallelism for sequential read/write workloads. :Internal value: 2 :CLI value: striping +:Added in: v0.70 (Emperor) +:KRBD support: since v3.10 +:Default: yes ``Exclusive locking`` :Description: When enabled, it requires a client to get a lock on an object before making a write. Exclusive lock should only be enabled when a single client is accessing an image at the same time. :Internal value: 4 :CLI value: exclusive-lock +:Added in: v0.92 (Hammer) +:KRBD support: since v4.9 +:Default: yes ``Object map`` :Description: Object map support depends on exclusive lock support. Block devices are thin provisioned—meaning, they only store data that actually exists. Object map support helps track which objects actually exist (have data stored on a drive). Enabling object map support speeds up I/O operations for cloning; importing and exporting a sparsely populated image; and deleting. :Internal value: 8 :CLI value: object-map +:Added in: v0.93 (Hammer) +:KRBD support: no +:Default: yes ``Fast-diff`` @@ -171,6 +183,9 @@ RBD supports advanced features which can be specified via the command line when :Description: Fast-diff support depends on object map support and exclusive lock support. It adds another property to the object map, which makes it much faster to generate diffs between snapshots of an image, and the actual data usage of a snapshot much faster. :Internal value: 16 :CLI value: fast-diff +:Added in: v9.0.1 (Infernalis) +:KRBD support: no +:Default: yes ``Deep-flatten`` @@ -178,6 +193,9 @@ RBD supports advanced features which can be specified via the command line when :Description: Deep-flatten makes rbd flatten work on all the snapshots of an image, in addition to the image itself. Without it, snapshots of an image will still rely on the parent, so the parent will not be delete-able until the snapshots are deleted. Deep-flatten makes a parent independent of its clones, even if they have snapshots. :Internal value: 32 :CLI value: deep-flatten +:Added in: v9.0.2 (Infernalis) +:KRBD support: no +:Default: yes ``Journaling`` @@ -185,6 +203,34 @@ RBD supports advanced features which can be specified via the command line when :Description: Journaling support depends on exclusive lock support. Journaling records all modifications to an image in the order they occur. RBD mirroring utilizes the journal to replicate a crash consistent image to a remote cluster. :Internal value: 64 :CLI value: journaling +:Added in: v10.0.1 (Jewel) +:KRBD support: no +:Default: no + + +``Data pool`` + +:Description: On erasure-coded pools, the image data block objects need to be stored on a separate pool from the image metadata. +:Internal value: 128 +:Added in: v11.1.0 (Kraken) +:KRBD support: since v4.11 +:Default: no + + +``Operations`` + +:Description: Used to restrict older clients from performing certain maintenance operations against an image (e.g. clone, snap create). +:Internal value: 256 +:Added in: v13.0.2 (Mimic) +:KRBD support: since v4.16 + + +``Migrating`` + +:Description: Used to restrict older clients from opening an image when it is in migration state. +:Internal value: 512 +:Added in: v14.0.1 (Nautilus) +:KRBD support: no RBD QOS Settings diff --git a/doc/releases/releases.yml b/doc/releases/releases.yml index b0d658ad40d..c22fca0417c 100644 --- a/doc/releases/releases.yml +++ b/doc/releases/releases.yml @@ -23,6 +23,10 @@ releases: luminous: releases: + - version: 12.2.10 + released: 2018-11-27 + - version: 12.2.9 + released: 2018-11-01 - version: 12.2.8 released: 2018-09-01 - version: 12.2.7 diff --git a/doc/releases/schedule.rst b/doc/releases/schedule.rst index ac141a9cf7d..9a3b0f78ef7 100644 --- a/doc/releases/schedule.rst +++ b/doc/releases/schedule.rst @@ -18,6 +18,8 @@ Timeline .. _13.2.0: ../mimic#v13-2-0-mimic .. _Luminous: ../luminous#v12-2-0-luminous +.. _12.2.10: ../luminous#v12-2-10-luminous +.. _12.2.9: ../luminous#v12-2-9-luminous .. _12.2.8: ../luminous#v12-2-8-luminous .. _12.2.7: ../luminous#v12-2-7-luminous .. _12.2.6: ../luminous#v12-2-6-luminous diff --git a/qa/tasks/radosgw_admin_rest.py b/qa/tasks/radosgw_admin_rest.py index 263dd3dc0df..b77a73cde3d 100644 --- a/qa/tasks/radosgw_admin_rest.py +++ b/qa/tasks/radosgw_admin_rest.py @@ -203,7 +203,7 @@ def task(ctx, config): assert ret == 200 # TESTCASE 'list-no-user','user','list','list user keys','user list object' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'max-entries' : 0}) + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'list' : '', 'max-entries' : 0}) assert ret == 200 assert out['count'] == 0 assert out['truncated'] == True @@ -211,7 +211,7 @@ def task(ctx, config): assert len(out['marker']) > 0 # TESTCASE 'list-user-without-marker','user','list','list user keys','user list object' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'max-entries' : 1}) + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'list' : '', 'max-entries' : 1}) assert ret == 200 assert out['count'] == 1 assert out['truncated'] == True @@ -220,15 +220,12 @@ def task(ctx, config): marker = out['marker'] # TESTCASE 'list-user-with-marker','user','list','list user keys','user list object' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'max-entries' : 1, 'marker': marker}) + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'list' : '', 'max-entries' : 1, 'marker': marker}) assert ret == 200 assert out['count'] == 1 assert out['truncated'] == False assert len(out['keys']) == 1 - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'max-entries' : 1, - 'marker': }) - # TESTCASE 'info-existing','user','info','existing user','returns correct info' (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) diff --git a/src/common/cmdparse.cc b/src/common/cmdparse.cc index 518112facf4..3380178934d 100644 --- a/src/common/cmdparse.cc +++ b/src/common/cmdparse.cc @@ -93,6 +93,7 @@ dump_cmd_to_json(Formatter *f, uint64_t features, const string& cmd) f->open_object_section(string(desckv["name"]).c_str()); // Compatibility for pre-nautilus clients that don't know about CephBool + std::string val; if (!HAVE_FEATURE(features, SERVER_NAUTILUS)) { auto i = desckv.find("type"); if (i != desckv.end() && i->second == "CephBool") { @@ -100,7 +101,7 @@ dump_cmd_to_json(Formatter *f, uint64_t features, const string& cmd) // of a 'true'/'false' value std::ostringstream oss; oss << std::string("--") << desckv["name"]; - std::string val = oss.str(); + val = oss.str(); std::replace(val.begin(), val.end(), '_', '-'); desckv["type"] = "CephChoices"; diff --git a/src/crush/CrushWrapper.cc b/src/crush/CrushWrapper.cc index 744d5c9c38b..0fda4ee89d4 100644 --- a/src/crush/CrushWrapper.cc +++ b/src/crush/CrushWrapper.cc @@ -2044,7 +2044,7 @@ int CrushWrapper::get_new_bucket_id() crush->buckets, sizeof(crush->buckets[0]) * crush->max_buckets); for (auto& i : choose_args) { - assert(i.second.size == crush->max_buckets - 1); + assert(i.second.size == (__u32)crush->max_buckets - 1); ++i.second.size; i.second.args = (struct crush_choose_arg*)realloc( i.second.args, diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index c38d03324a6..246731c302b 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -3326,7 +3326,6 @@ bool OSDMonitor::prepare_pg_ready_to_merge(MonOpRequestRef op) } else { wait_for_finished_proposal(op, new C_ReplyMap(this, op, m->version)); } - mon->no_reply(op); return true; } diff --git a/src/msg/async/Protocol.cc b/src/msg/async/Protocol.cc index 0fb8bb389ce..5e202412502 100644 --- a/src/msg/async/Protocol.cc +++ b/src/msg/async/Protocol.cc @@ -182,20 +182,9 @@ void ProtocolV1::fault() { connection->write_lock.unlock(); - if (state != START_CONNECT && state != CONNECTING && state != WAIT) { - // policy maybe empty when state is in accept - if (connection->policy.server) { - ldout(cct, 0) << __func__ << " server, going to standby" << dendl; - state = STANDBY; - } else { - ldout(cct, 0) << __func__ << " initiating reconnect" << dendl; - connect_seq++; - state = START_CONNECT; - connection->state = AsyncConnection::STATE_CONNECTING; - } - backoff = utime_t(); - connection->center->dispatch_event_external(connection->read_handler); - } else { + if ((state >= START_CONNECT && state <= CONNECTING_SEND_CONNECT_MSG) || + state == WAIT) { + // backoff! if (state == WAIT) { backoff.set_from_double(cct->_conf->ms_max_backoff); } else if (backoff == utime_t()) { @@ -213,6 +202,19 @@ void ProtocolV1::fault() { connection->register_time_events.insert( connection->center->create_time_event(backoff.to_nsec() / 1000, connection->wakeup_handler)); + } else { + // policy maybe empty when state is in accept + if (connection->policy.server) { + ldout(cct, 0) << __func__ << " server, going to standby" << dendl; + state = STANDBY; + } else { + ldout(cct, 0) << __func__ << " initiating reconnect" << dendl; + connect_seq++; + state = START_CONNECT; + connection->state = AsyncConnection::STATE_CONNECTING; + } + backoff = utime_t(); + connection->center->dispatch_event_external(connection->read_handler); } } diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index 0ba4ebc9095..47d9ff94c67 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -5313,9 +5313,13 @@ int BlueStore::allocate_bluefs_freespace(uint64_t size) 0, 0, &extents); if (alloc_len < (int64_t)gift) { - dout(0) << __func__ << " no allocate on 0x" << std::hex << gift - << " min_alloc_size 0x" << cct->_conf->bluefs_alloc_size - << std::dec << dendl; + derr << __func__ + << " failed to allocate on 0x" << std::hex << gift + << " bluefs_alloc_size 0x" << cct->_conf->bluefs_alloc_size + << " allocated 0x" << alloc_len + << " available 0x " << alloc->get_free() + << std::dec << dendl; + alloc->dump(); alloc->release(extents); return -ENOSPC; @@ -5427,15 +5431,19 @@ int BlueStore::_balance_bluefs_freespace(PExtentVector *extents) 0, 0, extents); if (alloc_len <= 0) { - dout(0) << __func__ << " no allocate on 0x" << std::hex << gift - << " bluefs_alloc_size 0x" << cct->_conf->bluefs_alloc_size - << std::dec << dendl; + derr << __func__ + << " failed to allocate 0x" << std::hex << gift + << " min_alloc_size 0x" << cct->_conf->bluefs_alloc_size + << " available 0x " << alloc->get_free() + << std::dec << dendl; _dump_alloc_on_rebalance_failure(); return 0; } else if (alloc_len < (int64_t)gift) { - dout(0) << __func__ << " insufficient allocate on 0x" << std::hex << gift + dout(0) << __func__ + << " insufficient allocate on 0x" << std::hex << gift << " bluefs_alloc_size 0x" << cct->_conf->bluefs_alloc_size << " allocated 0x" << alloc_len + << " available 0x " << alloc->get_free() << std::dec << dendl; _dump_alloc_on_rebalance_failure(); } @@ -7081,8 +7089,12 @@ int BlueStore::_fsck(bool deep, bool repair) int64_t alloc_len = alloc->allocate(e->length, min_alloc_size, 0, 0, &exts); if (alloc_len < (int64_t)e->length) { - derr << __func__ << " allocate failed on 0x" << std::hex << e->length - << " min_alloc_size 0x" << min_alloc_size << std::dec << dendl; + derr << __func__ + << " failed to allocate 0x" << std::hex << e->length + << " allocated 0x " << alloc_len + << " min_alloc_size 0x" << min_alloc_size + << " available 0x " << alloc->get_free() + << std::dec << dendl; if (alloc_len > 0) { alloc->release(exts); } @@ -11422,16 +11434,21 @@ int BlueStore::_do_alloc_write( } PExtentVector prealloc; prealloc.reserve(2 * wctx->writes.size());; - int prealloc_left = 0; + int64_t prealloc_left = 0; prealloc_left = alloc->allocate( need, min_alloc_size, need, 0, &prealloc); - if (prealloc_left < 0) { - derr << __func__ << " failed to allocate 0x" << std::hex << need << std::dec - << dendl; + if (prealloc_left < (int64_t)need) { + derr << __func__ << " failed to allocate 0x" << std::hex << need + << " allocated 0x " << prealloc_left + << " min_alloc_size 0x" << min_alloc_size + << " available 0x " << alloc->get_free() + << std::dec << dendl; + if (prealloc.size()) { + alloc->release(prealloc); + } return -ENOSPC; } - ceph_assert(prealloc_left == (int64_t)need); dout(20) << __func__ << " prealloc " << prealloc << dendl; auto prealloc_pos = prealloc.begin(); diff --git a/src/pybind/mgr/CMakeLists.txt b/src/pybind/mgr/CMakeLists.txt index 916c672b9ba..f66bba7e4be 100644 --- a/src/pybind/mgr/CMakeLists.txt +++ b/src/pybind/mgr/CMakeLists.txt @@ -1,2 +1,3 @@ add_subdirectory(dashboard) add_subdirectory(insights) +add_subdirectory(ansible) diff --git a/src/pybind/mgr/ansible/CMakeLists.txt b/src/pybind/mgr/ansible/CMakeLists.txt new file mode 100644 index 00000000000..c706aa8cbe5 --- /dev/null +++ b/src/pybind/mgr/ansible/CMakeLists.txt @@ -0,0 +1,7 @@ +set(MGR_ANSIBLE_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/mgr-ansible-virtualenv) + +add_custom_target(mgr-ansible-test-venv + COMMAND ${CMAKE_SOURCE_DIR}/src/tools/setup-virtualenv.sh --python=${MGR_PYTHON_EXECUTABLE} ${MGR_ANSIBLE_VIRTUALENV} + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/pybind/mgr/ansible + COMMENT "ansible tests virtualenv is being created") +add_dependencies(tests mgr-ansible-test-venv) diff --git a/src/pybind/mgr/ansible/__init__.py b/src/pybind/mgr/ansible/__init__.py new file mode 100644 index 00000000000..ea61a12fd7e --- /dev/null +++ b/src/pybind/mgr/ansible/__init__.py @@ -0,0 +1,9 @@ +from __future__ import absolute_import +import os + +if 'UNITTEST' not in os.environ: + from .module import Module +else: + import sys + import mock + sys.modules['ceph_module'] = mock.Mock() diff --git a/src/pybind/mgr/ansible/ansible_runner_svc.py b/src/pybind/mgr/ansible/ansible_runner_svc.py new file mode 100644 index 00000000000..2989c89ce05 --- /dev/null +++ b/src/pybind/mgr/ansible/ansible_runner_svc.py @@ -0,0 +1,274 @@ +""" +Tool module to interact with the Ansible Runner Service +""" +import requests +import json +import re + +# Ansible Runner events finished + + +# Ansible Runner service API endpoints +API_URL = "api" +LOGIN_URL = "api/v1/login" +PLAYBOOK_EXEC_URL = "api/v1/playbooks" +PLAYBOOK_EVENTS = "api/v1/jobs/%s/events" +EVENT_DATA_URL = "api/v1/jobs/%s/events/%s" + +class ExecutionStatusCode(object): + """Execution status of playbooks ( 'msg' field in playbook status request) + """ + + SUCCESS = 0 # Playbook has been executed succesfully" msg = successful + ERROR = 1 # Playbook has finished with error msg = failed + ON_GOING = 2 # Playbook is being executed msg = running + NOT_LAUNCHED = 3 # Not initialized + + +class PlayBookExecution(object): + """Object to provide all the results of a Playbook execution + """ + + def __init__(self, rest_client, playbook, logger, result_pattern="", the_params={}): + + self.rest_client = rest_client + + # Identifier of the playbook execution + self.play_uuid = "-" + + # Pattern used to extract the result from the events + self.result_task_pattern = result_pattern + + # Playbook name + self.playbook = playbook + + # Params used in the playbook + self.params = the_params + + # Logger + self.log = logger + + def launch(self): + """ Launch the playbook execution + """ + + endpoint = "%s/%s" % (PLAYBOOK_EXEC_URL, self.playbook) + + response = self.rest_client.http_post(endpoint, self.params) + + if response: + self.play_uuid = json.loads(response.text)["data"]["play_uuid"] + self.log.info("Playbook execution launched succesfuly") + else: + # An error launching the execution implies play_uuid empty + self.play_uuid = "" + self.log.error("Playbook launch error. \ + Check <endpoint> request result") + + def get_status(self): + """ Return the status of the execution + + In the msg field of the respons we can find: + "msg": "successful" + "msg": "running" + "msg": "failed" + """ + + status_value = ExecutionStatusCode.NOT_LAUNCHED + + if self.play_uuid == '-': # Initialized + status_value = ExecutionStatusCode.NOT_LAUNCHED + elif self.play_uuid == '': # Error launching playbook + status_value = ExecutionStatusCode.ERROR + else: + endpoint = "%s/%s" % (PLAYBOOK_EXEC_URL, self.play_uuid) + response = self.rest_client.http_get(endpoint) + + if response: + the_status = json.loads(response.text)["msg"] + if the_status == 'successful': + status_value = ExecutionStatusCode.SUCCESS + elif the_status == 'failed': + status_value = ExecutionStatusCode.ERROR + else: + status_value = ExecutionStatusCode.ON_GOING + else: + status_value = ExecutionStatusCode.ERROR + + self.log.info("Requested playbook execution status is: %s", status_value) + return status_value + + def get_result(self, event_filter=""): + """Get the data of the events filtered by a task pattern and + a event filter + + @returns: the events that matches with the patterns provided + """ + + if not self.result_task_pattern or not self.play_uuid: + result_events = {} + + response = self.rest_client.http_get(PLAYBOOK_EVENTS % self.play_uuid) + + if not response: + result_events = {} + else: + events = json.loads(response.text)["data"]["events"] + result_events = {event:data for event,data in events.items() + if "task" in data and + re.match(self.result_task_pattern, data["task"])} + if event_filter: + result_events = {event:data for event,data in result_events.items() + if re.match(event_filter, data['event'])} + + self.log.info("Requested playbook result is: %s", json.dumps(result_events)) + return result_events + +class Client(object): + """An utility object that allows to connect with the Ansible runner service + and execute easily playbooks + """ + + def __init__(self, server_url, user, password, verify_server, logger): + """Provide an https client to make easy interact with the Ansible + Runner Service" + + @param servers_url: The base URL >server>:<port> of the Ansible Runner Service + @param user: User name of the authorized user + @param password: Password of the authotized user + @param verify_server: Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. + @param logger: Log file + """ + self.server_url = server_url + self.user = user + self.password = password + self.log = logger + self.auth = (self.user, self.password) + if not verify_server: + self.verify_server = True + elif verify_server.lower().strip() == 'false': + self.verify_server = False + else: + self.verify_server = verify_server + + # Once authenticated this token will be used in all the requests + self.token = "" + + self.server_url = "https://{0}".format(self.server_url) + + # Log in the server and get a token + self.login() + + def login(self): + """ Login with user credentials to obtain a valid token + """ + + response = None + + the_url = "%s/%s" % (self.server_url, LOGIN_URL) + response = requests.get(the_url, + auth = self.auth, + verify = self.verify_server) + + if response.status_code != requests.codes.ok: + self.log.error("login error <<%s>> (%s):%s", + the_url, response.status_code, response.text) + else: + self.log.info("login succesful <<%s>> (%s):%s", + the_url, response.status_code, response.text) + + if response: + self.token = json.loads(response.text)["data"]["token"] + self.log.info("Connection with Ansible Runner Service is operative") + + + def is_operative(self): + """Indicates if the connection with the Ansible runner Server is ok + """ + + # No Token... this means we haven't used yet the service. + if not self.token: + return False + + # Check the service + response = self.http_get(API_URL) + + if response: + return response.status_code == requests.codes.ok + else: + return False + + def http_get(self, endpoint): + """Execute an http get request + + @param endpoint: Ansible Runner service RESTful API endpoint + + @returns: A requests object + """ + + response = None + + try: + the_url = "%s/%s" % (self.server_url, endpoint) + r = requests.get(the_url, + verify = self.verify_server, + headers = {"Authorization": self.token}) + + if r.status_code != requests.codes.ok: + self.log.error("http GET %s <--> (%s - %s)\n%s", + the_url, r.status_code, r.reason, r.text) + else: + self.log.info("http GET %s <--> (%s - %s)", + the_url, r.status_code, r.text) + + response = r + + except Exception: + self.log.exception("Ansible runner service(GET %s)", the_url) + + return response + + def http_post(self, endpoint, payload): + """Execute an http post request + + @param endpoint: Ansible Runner service RESTful API endpoint + @param payload: Dictionary with the data used in the post request + + @returns: A requests object + """ + + response = None + + try: + the_url = "%s/%s" % (self.server_url, endpoint) + r = requests.post(the_url, + verify = self.verify_server, + headers = {"Authorization": self.token, + "Content-type": "application/json"}, + data = payload) + + if r.status_code != requests.codes.ok: + self.log.error("http POST %s [%s] <--> (%s - %s)\n%s", + the_url, payload, r.status_code, r.reason, r.text) + else: + self.log.info("http POST %s <--> (%s - %s)", + the_url, r.status_code, r.text) + response = r + + except Exception: + self.log.exception("Ansible runner service(POST %s)", the_url) + + return response + + def http_put(self, endpoint, payload): + """Execute an http put request + + @param endpoint: Ansible Runner service RESTful API endpoint + @param payload: Dictionary with the data used in the put request + + @returns: A requests object + """ + # TODO + raise NotImplementedError("TODO") diff --git a/src/pybind/mgr/ansible/module.py b/src/pybind/mgr/ansible/module.py new file mode 100644 index 00000000000..862e53aebf2 --- /dev/null +++ b/src/pybind/mgr/ansible/module.py @@ -0,0 +1,382 @@ +""" +ceph-mgr Ansible orchestrator module + +The external Orchestrator is the Ansible runner service (RESTful https service) +""" + +# Python stuff +from threading import Event +import errno +import json + +# Ceph stuff +from mgr_module import MgrModule +import orchestrator + +# Orchestrator stuff +# A Client is used to communicate with the Ansible Runner service +from ansible_runner_svc import Client, PlayBookExecution, ExecutionStatusCode,\ + EVENT_DATA_URL + +# Constants section + +# Time to clean the completions list +WAIT_PERIOD = 10 + + +# List of playbooks names used + +# Name of the playbook used in the "get_inventory" method. +# This playbook is expected to provide a list of storage devices in the host +# where the playbook is executed. +GET_STORAGE_DEVICES_CATALOG_PLAYBOOK = "host-disks.yml" + + + +class AnsibleReadOperation(orchestrator.ReadCompletion): + """ A read operation means to obtain information from the cluster. + """ + + def __init__(self, client, playbook, logger, result_pattern, params): + super(AnsibleReadOperation, self).__init__() + + + # Private attributes + self.playbook = playbook + self._is_complete = False + self._is_errored = False + self._result = [] + + # Error description in operation + self.error = "" + + # Ansible Runner Service client + self.ar_client = client + + # Logger + self.log = logger + + # An aditional filter of result events based in the event + self.event_filter = "" + + # Function assigned dinamically to process the result + self.process_output = None + + # Playbook execution object + self.pb_execution = PlayBookExecution(client, + playbook, + logger, + result_pattern, + params) + + @property + def is_complete(self): + return self._is_complete + + @property + def is_errored(self): + return self._is_errored + + @property + def result(self): + return self._result + + @property + def status(self): + """Return the status code of the operation + updating conceptually 'linked' attributes + """ + current_status = self.pb_execution.get_status() + + self._is_complete = (current_status == ExecutionStatusCode.SUCCESS) or \ + (current_status == ExecutionStatusCode.ERROR) + + self._is_errored = (current_status == ExecutionStatusCode.ERROR) + + return current_status + + def execute_playbook(self): + """Execute the playbook with the provided params. + """ + + self.pb_execution.launch() + + def update_result(self): + """Output of the read operation + + The result of the playbook execution can be customized through the + function provided as 'process_output' attribute + + @return string: Result of the operation formatted if it is possible + """ + + processed_result = [] + + if self._is_complete: + raw_result = self.pb_execution.get_result(self.event_filter) + + if self.process_output: + processed_result = self.process_output( + raw_result, + self.ar_client, + self.pb_execution.play_uuid) + else: + processed_result = raw_result + + self._result = processed_result + + +class AnsibleChangeOperation(orchestrator.WriteCompletion): + """Operations that changes the "cluster" state + + Modifications/Changes (writes) are a two-phase thing, firstly execute + the playbook that is going to change elements in the Ceph Cluster. + When the playbook finishes execution (independently of the result), + the modification/change operation has finished. + """ + def __init__(self): + super(AnsibleChangeOperation, self).__init__() + + self.error = False + @property + def status(self): + """Return the status code of the operation + """ + #TODO + return 0 + + @property + def is_persistent(self): + """ + Has the operation updated the orchestrator's configuration + persistently? Typically this would indicate that an update + had been written to a manifest, but that the update + had not necessarily been pushed out to the cluster. + + In the case of Ansible is always False. + because a initiated playbook execution will need always to be + relaunched if it fails. + """ + + return False + + @property + def is_effective(self): + """Has the operation taken effect on the cluster? + For example, if we were adding a service, has it come up and appeared + in Ceph's cluster maps? + + In the case of Ansible, this will be True if the playbooks has been + executed succesfully. + + @return Boolean: if the playbook has been executed succesfully + """ + + return self.status == ExecutionStatusCode.SUCCESS + + @property + def is_errored(self): + return self.error + + @property + def is_complete(self): + return self.is_errored or (self.is_persistent and self.is_effective) + + +class Module(MgrModule, orchestrator.Orchestrator): + """An Orchestrator that an external Ansible runner service to perform + operations + """ + + OPTIONS = [ + {'name': 'server_url'}, + {'name': 'username'}, + {'name': 'password'}, + {'name': 'verify_server'} # Check server identity (Boolean/path to CA bundle) + + ] + + def __init__(self, *args, **kwargs): + """ + """ + super(Module, self).__init__(*args, **kwargs) + + self.run = False + + self.all_completions = [] + + self.ar_client = None + + def available(self): + """ Check if Ansible Runner service is working + """ + # TODO + return (True, "Everything ready") + + def wait(self, completions): + """Given a list of Completion instances, progress any which are + incomplete. + + @param completions: list of Completion instances + @Returns : List with completions operations pending + """ + + # Check progress and update status in each operation + for operation in completions: + self.log.info("playbook <%s> status:%s", operation.playbook, operation.status) + if operation.is_complete: + operation.update_result() + + completions = filter(lambda x: not x.is_complete, completions) + + self.log.info("Operations pending: %s", len(completions)) + + return completions + + def serve(self): + """ Mandatory for standby modules + """ + self.log.info("Starting Ansible Orchestrator module ...") + + # Verify config options (Just that settings are available) + self.verify_config() + + # Ansible runner service client + try: + self.ar_client = Client(server_url = self.get_config('server_url', ''), + user = self.get_config('username', ''), + password = self.get_config('password', ''), + verify_server = self.get_config('verify_server', True), + logger = self.log) + except Exception: + self.log.exception("Ansible Runner Service not available. " + "Check external server status/TLS identity or " + "connection options. If configuration options changed" + " try to disable/enable the module.") + self.shutdown() + return + + self.run = True + + def shutdown(self): + self.log.info('Stopping Ansible orchestrator module') + self.run = False + + def get_inventory(self, node_filter=None): + """ + + @param : node_filter instance + @Return : A AnsibleReadOperation instance (Completion Object) + """ + + # Create a new read completion object for execute the playbook + ansible_operation = AnsibleReadOperation(client = self.ar_client, + playbook = GET_STORAGE_DEVICES_CATALOG_PLAYBOOK, + logger = self.log, + result_pattern = "RESULTS", + params = "{}") + + # Assign the process_output function + ansible_operation.process_output = process_inventory_json + ansible_operation.event_filter = "runner_on_ok" + + # Execute the playbook to obtain data + ansible_operation.execute_playbook() + + self.all_completions.append(ansible_operation) + + return ansible_operation + + def create_osds(self, osd_spec): + """ + Create one or more OSDs within a single Drive Group. + + The principal argument here is the drive_group member + of OsdSpec: other fields are advisory/extensible for any + finer-grained OSD feature enablement (choice of backing store, + compression/encryption, etc). + + :param osd_spec: OsdCreationSpec + """ + + def verify_config(self): + + if not self.get_config('server_url', ''): + self.log.error( + "No Ansible Runner Service base URL <server_name>:<port>" + "Try 'ceph config set mgr mgr/%s/server_url <server name/ip>:<port>'", + self.module_name) + + if not self.get_config('username', ''): + self.log.error( + "No Ansible Runner Service user. " + "Try 'ceph config set mgr mgr/%s/username <string value>'", + self.module_name) + + if not self.get_config('password', ''): + self.log.error( + "No Ansible Runner Service User password. " + "Try 'ceph config set mgr mgr/%s/password <string value>'", + self.module_name) + + if not self.get_config('verify_server', ''): + self.log.error( + "TLS server identity verification is enabled by default." + "Use 'ceph config set mgr mgr/{0}/verify_server False' to disable it." + "Use 'ceph config set mgr mgr/{0}/verify_server <path>' to " + "point the CA bundle path used for verification".format(self.module_name)) + + +# Auxiliary functions +#============================================================================== + +def process_inventory_json(inventory_events, ar_client, playbook_uuid): + """ Adapt the output of the playbook used in 'get_inventory' + to the Orchestrator expected output (list of InventoryNode) + + @param inventory_events: events dict with the results + + Example: + inventory_events = + {'37-100564f1-9fed-48c2-bd62-4ae8636dfcdb': {'host': '192.168.121.254', + 'task': 'RESULTS', + 'event': 'runner_on_ok'}, + '36-2016b900-e38f-7dcd-a2e7-00000000000e': {'host': '192.168.121.252' + 'task': 'RESULTS', + 'event': 'runner_on_ok'}} + @param ar_client: Ansible Runner Service client + @param playbook_uuid: Palybooud identifier + + @return : list of InventoryNode + """ + + #Obtain the needed data for each result event + inventory_nodes = [] + + # Loop over the result events and request the event data + for event_key, data in inventory_events.items(): + event_response = ar_client.http_get(EVENT_DATA_URL % (playbook_uuid, + event_key)) + + # Process the data for each event + if event_response: + event_data = json.loads(event_response.text)["data"]["event_data"] + + free_disks = event_data["res"]["disks_catalog"] + for item, data in free_disks.items(): + if item not in [host.name for host in inventory_nodes]: + + devs = [] + for dev_key, dev_data in data.items(): + if dev_key not in [device.id for device in devs]: + dev = orchestrator.InventoryDevice() + dev.id = dev_key + dev.type = 'hdd' if dev_data["rotational"] else "sdd/nvme" + dev.size = dev_data["sectorsize"] * dev_data["sectors"] + devs.append(dev) + + inventory_nodes.append( + orchestrator.InventoryNode(item, devs)) + + + return inventory_nodes diff --git a/src/pybind/mgr/ansible/run-tox.sh b/src/pybind/mgr/ansible/run-tox.sh new file mode 100644 index 00000000000..951ea23150e --- /dev/null +++ b/src/pybind/mgr/ansible/run-tox.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# run from ./ or from ../ +: ${MGR_ANSIBLE_VIRTUALENV:=/tmp/mgr-ansible-virtualenv} +: ${WITH_PYTHON2:=ON} +: ${WITH_PYTHON3:=ON} +: ${CEPH_BUILD_DIR:=$PWD/.tox} +test -d ansible && cd ansible + +if [ -e tox.ini ]; then + TOX_PATH=$(readlink -f tox.ini) +else + TOX_PATH=$(readlink -f $(dirname $0)/tox.ini) +fi + +# tox.ini will take care of this. +unset PYTHONPATH +export CEPH_BUILD_DIR=$CEPH_BUILD_DIR + +source ${MGR_ANSIBLE_VIRTUALENV}/bin/activate + +if [ "$WITH_PYTHON2" = "ON" ]; then + ENV_LIST+="py27" +fi +if [ "$WITH_PYTHON3" = "ON" ]; then + ENV_LIST+="py3" +fi + +tox -c ${TOX_PATH} -e ${ENV_LIST} diff --git a/src/pybind/mgr/ansible/tests/__init__.py b/src/pybind/mgr/ansible/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/src/pybind/mgr/ansible/tests/__init__.py diff --git a/src/pybind/mgr/ansible/tests/pb_execution_events.data b/src/pybind/mgr/ansible/tests/pb_execution_events.data new file mode 100644 index 00000000000..248134a3961 --- /dev/null +++ b/src/pybind/mgr/ansible/tests/pb_execution_events.data @@ -0,0 +1,183 @@ +{ + "status": "OK", + "msg": "", + "data": { + "events": { + "2-6edf768f-2923-44e1-b884-f0227b811cfc": { + "event": "playbook_on_start" + }, + "3-2016b900-e38f-7dcd-a2e7-000000000008": { + "event": "playbook_on_play_start" + }, + "4-2016b900-e38f-7dcd-a2e7-000000000012": { + "event": "playbook_on_task_start", + "task": "Gathering Facts" + }, + "5-19ae1e5e-aa2d-479e-845a-ef0253cc1f99": { + "event": "runner_on_ok", + "host": "192.168.121.245", + "task": "Gathering Facts" + }, + "6-aad3acc4-06a3-4c97-82ff-31e9e484b1f5": { + "event": "runner_on_ok", + "host": "192.168.121.61", + "task": "Gathering Facts" + }, + "7-55298017-3e7d-4734-b316-bbe13ce1da5e": { + "event": "runner_on_ok", + "host": "192.168.121.254", + "task": "Gathering Facts" + }, + "8-2016b900-e38f-7dcd-a2e7-00000000000a": { + "event": "playbook_on_task_start", + "task": "setup" + }, + "9-2085ccb6-e337-4b9f-bc38-1d8bbf9b973f": { + "event": "runner_on_ok", + "host": "192.168.121.254", + "task": "setup" + }, + "10-e14cdbbc-4883-436c-a41c-a8194ec69075": { + "event": "runner_on_ok", + "host": "192.168.121.245", + "task": "setup" + }, + "11-6d815a26-df53-4240-b8b6-2484e88e4f48": { + "event": "runner_on_ok", + "host": "192.168.121.61", + "task": "setup" + }, + "12-2016b900-e38f-7dcd-a2e7-00000000000b": { + "event": "playbook_on_task_start", + "task": "Get a list of block devices (excludes loop and child devices)" + }, + "13-799b0119-ccab-4eca-b30b-a37b0bafa02c": { + "event": "runner_on_ok", + "host": "192.168.121.245", + "task": "Get a list of block devices (excludes loop and child devices)" + }, + "14-6beb6958-4bfd-4a9c-bd2c-d20d00248605": { + "event": "runner_on_ok", + "host": "192.168.121.61", + "task": "Get a list of block devices (excludes loop and child devices)" + }, + "15-3ca99cc8-98ea-4967-8f2d-115426d00b6a": { + "event": "runner_on_ok", + "host": "192.168.121.254", + "task": "Get a list of block devices (excludes loop and child devices)" + }, + "16-2016b900-e38f-7dcd-a2e7-00000000000c": { + "event": "playbook_on_task_start", + "task": "check if disk {{ item }} is free" + }, + "17-8c88141a-08d1-411f-a855-9f7702a49c4e": { + "event": "runner_item_on_failed", + "host": "192.168.121.245", + "task": "check if disk vda is free" + }, + "18-4457db98-6f18-4f63-bfaa-584db5eea05b": { + "event": "runner_on_failed", + "host": "192.168.121.245", + "task": "check if disk {{ item }} is free" + }, + "19-ac3c72cd-1fbb-495a-be69-53fa6029f356": { + "event": "runner_item_on_failed", + "host": "192.168.121.61", + "task": "check if disk vda is free" + }, + "20-d161cb70-ba2e-4571-b029-c6428a566fef": { + "event": "runner_on_failed", + "host": "192.168.121.61", + "task": "check if disk {{ item }} is free" + }, + "21-65f1ce5c-2d86-4cc3-8e10-cff6bf6cbd82": { + "event": "runner_item_on_failed", + "host": "192.168.121.254", + "task": "check if disk sda is free" + }, + "22-7f86dcd4-4ef7-4f5a-9db3-c3780b67cc4b": { + "event": "runner_item_on_failed", + "host": "192.168.121.254", + "task": "check if disk sdb is free" + }, + "23-837bf4f6-a912-46a8-b94b-55aa66a935c4": { + "event": "runner_item_on_ok", + "host": "192.168.121.254", + "task": "check if disk sdc is free" + }, + "24-adf6238d-723f-4783-9226-8475419d466e": { + "event": "runner_item_on_failed", + "host": "192.168.121.254", + "task": "check if disk vda is free" + }, + "25-554661d8-bc34-4885-a589-4960d6b8a487": { + "event": "runner_on_failed", + "host": "192.168.121.254", + "task": "check if disk {{ item }} is free" + }, + "26-2016b900-e38f-7dcd-a2e7-00000000000d": { + "event": "playbook_on_task_start", + "task": "Update hosts freedisk list" + }, + "27-52df484c-30a0-4e3b-9057-02ca345c5790": { + "event": "runner_item_on_skipped", + "host": "192.168.121.254", + "task": "Update hosts freedisk list" + }, + "28-083616ad-3c1f-4fb8-a06c-5d64e670e362": { + "event": "runner_item_on_skipped", + "host": "192.168.121.254", + "task": "Update hosts freedisk list" + }, + "29-bffc68d3-5448-491f-8780-07858285f5cd": { + "event": "runner_item_on_skipped", + "host": "192.168.121.245", + "task": "Update hosts freedisk list" + }, + "30-cca2dfd9-16e9-4fcb-8bf7-c4da7dab5668": { + "event": "runner_on_skipped", + "host": "192.168.121.245", + "task": "Update hosts freedisk list" + }, + "31-158a98ac-7e8d-4ebb-8c53-4467351a2d3a": { + "event": "runner_item_on_ok", + "host": "192.168.121.254", + "task": "Update hosts freedisk list" + }, + "32-06a7e809-8d82-41df-b01d-45d94e519cb7": { + "event": "runner_item_on_skipped", + "host": "192.168.121.254", + "task": "Update hosts freedisk list" + }, + "33-d5cdbb58-728a-4be5-abf1-4a051146e727": { + "event": "runner_item_on_skipped", + "host": "192.168.121.61", + "task": "Update hosts freedisk list" + }, + "34-9b3c570b-22d8-4539-8c94-d0c1cbed8633": { + "event": "runner_on_ok", + "host": "192.168.121.254", + "task": "Update hosts freedisk list" + }, + "35-93336830-03cd-43ff-be87-a7e063ca7547": { + "event": "runner_on_skipped", + "host": "192.168.121.61", + "task": "Update hosts freedisk list" + }, + "36-2016b900-e38f-7dcd-a2e7-00000000000e": { + "event": "playbook_on_task_start", + "task": "RESULTS" + }, + "37-100564f1-9fed-48c2-bd62-4ae8636dfcdb": { + "event": "runner_on_ok", + "host": "192.168.121.254", + "task": "RESULTS" + }, + "38-20a64160-30a1-481f-a3ee-36e491bc7869": { + "event": "playbook_on_stats" + } + }, + "total_events": 37 + } +} + diff --git a/src/pybind/mgr/ansible/tests/test_client_playbooks.py b/src/pybind/mgr/ansible/tests/test_client_playbooks.py new file mode 100644 index 00000000000..4a75b5c7a0f --- /dev/null +++ b/src/pybind/mgr/ansible/tests/test_client_playbooks.py @@ -0,0 +1,266 @@ +import logging +import unittest +import mock +import json + +import requests_mock + +from requests.exceptions import ConnectionError + +from ..ansible_runner_svc import Client, PlayBookExecution, ExecutionStatusCode, \ + LOGIN_URL, API_URL, PLAYBOOK_EXEC_URL, \ + PLAYBOOK_EVENTS + + +SERVER_URL = "ars:5001" +USER = "admin" +PASSWORD = "admin" +CERTIFICATE = "" + +# Playbook attributes +PB_NAME = "test_playbook" +PB_UUID = "1733c3ac" + +# Playbook execution data file +PB_EVENTS_FILE = "./tests/pb_execution_events.data" + +# create console handler and set level to info +logger = logging.getLogger() +handler = logging.StreamHandler() +handler.setLevel(logging.INFO) +formatter = logging.Formatter("%(levelname)s - %(message)s") +handler.setFormatter(formatter) +logger.addHandler(handler) + + +def mock_login(mock_server): + + the_login_url = "https://%s/%s" % (SERVER_URL,LOGIN_URL) + + mock_server.register_uri("GET", + the_login_url, + json={"status": "OK", + "msg": "Token returned", + "data": {"token": "dummy_token"}}, + status_code=200) + + the_api_url = "https://%s/%s" % (SERVER_URL,API_URL) + mock_server.register_uri("GET", + the_api_url, + text="<!DOCTYPE html>api</html>", + status_code=200) + +def mock_get_pb(mock_server, playbook_name, return_code): + + mock_login(mock_server) + + ars_client = Client(SERVER_URL, USER, PASSWORD, + CERTIFICATE, logger) + + the_pb_url = "https://%s/%s/%s" % (SERVER_URL, PLAYBOOK_EXEC_URL, playbook_name) + + if return_code == 404: + mock_server.register_uri("POST", + the_pb_url, + json={ "status": "NOTFOUND", + "msg": "playbook file not found", + "data": {}}, + status_code=return_code) + elif return_code == 202: + mock_server.register_uri("POST", + the_pb_url, + json={ "status": "STARTED", + "msg": "starting", + "data": { "play_uuid": "1733c3ac" }}, + status_code=return_code) + + return PlayBookExecution(ars_client, playbook_name, logger, + result_pattern = "RESULTS") + +class ARSclientTest(unittest.TestCase): + + def test_server_not_reachable(self): + + with self.assertRaises(ConnectionError): + ars_client = Client(SERVER_URL, USER, PASSWORD, + CERTIFICATE, logger) + + def test_server_wrong_USER(self): + + with requests_mock.Mocker() as mock_server: + the_login_url = "https://%s/%s" % (SERVER_URL,LOGIN_URL) + mock_server.get(the_login_url, + json={"status": "NOAUTH", + "msg": "Access denied invalid login: unknown USER", + "data": {}}, + status_code=401) + + + ars_client = Client(SERVER_URL, USER, PASSWORD, + CERTIFICATE, logger) + + self.assertFalse(ars_client.is_operative(), + "Operative attribute expected to be False") + + def test_server_connection_ok(self): + + with requests_mock.Mocker() as mock_server: + + mock_login(mock_server) + + ars_client = Client(SERVER_URL, USER, PASSWORD, + CERTIFICATE, logger) + + self.assertTrue(ars_client.is_operative(), + "Operative attribute expected to be True") + +class PlayBookExecutionTests(unittest.TestCase): + + + def test_playbook_execution_ok(self): + """Check playbook id is set when the playbook is launched + """ + with requests_mock.Mocker() as mock_server: + + test_pb = mock_get_pb(mock_server, PB_NAME, 202) + + test_pb.launch() + + self.assertEqual(test_pb.play_uuid, PB_UUID, + "Found Unexpected playbook uuid") + + + + def test_playbook_execution_error(self): + """Check playbook id is not set when the playbook is not present + """ + + with requests_mock.Mocker() as mock_server: + + test_pb = mock_get_pb(mock_server, "unknown_playbook", 404) + + test_pb.launch() + + self.assertEqual(test_pb.play_uuid, "", + "Playbook uuid not empty") + + def test_playbook_not_launched(self): + """Check right status code when Playbook execution has not been launched + """ + + with requests_mock.Mocker() as mock_server: + + test_pb = mock_get_pb(mock_server, PB_NAME, 202) + + # Check playbook not launched + self.assertEqual(test_pb.get_status(), + ExecutionStatusCode.NOT_LAUNCHED, + "Wrong status code for playbook not launched") + + def test_playbook_launched(self): + """Check right status code when Playbook execution has been launched + """ + + with requests_mock.Mocker() as mock_server: + + test_pb = mock_get_pb(mock_server, PB_NAME, 202) + + test_pb.launch() + + the_status_url = "https://%s/%s/%s" % (SERVER_URL, + PLAYBOOK_EXEC_URL, + PB_UUID) + mock_server.register_uri("GET", + the_status_url, + json={"status": "OK", + "msg": "running", + "data": {"task": "Step 2", + "last_task_num": 6} + }, + status_code=200) + + self.assertEqual(test_pb.get_status(), + ExecutionStatusCode.ON_GOING, + "Wrong status code for a running playbook") + + self.assertEqual(test_pb.play_uuid, PB_UUID, + "Unexpected playbook uuid") + + def test_playbook_finish_ok(self): + """Check right status code when Playbook execution is succesful + """ + with requests_mock.Mocker() as mock_server: + + test_pb = mock_get_pb(mock_server, PB_NAME, 202) + + test_pb.launch() + + the_status_url = "https://%s/%s/%s" % (SERVER_URL, + PLAYBOOK_EXEC_URL, + PB_UUID) + mock_server.register_uri("GET", + the_status_url, + json={"status": "OK", + "msg": "successful", + "data": {} + }, + status_code=200) + + self.assertEqual(test_pb.get_status(), + ExecutionStatusCode.SUCCESS, + "Wrong status code for a playbook executed succesfully") + + def test_playbook_finish_error(self): + """Check right status code when Playbook execution has failed + """ + with requests_mock.Mocker() as mock_server: + + test_pb = mock_get_pb(mock_server, PB_NAME, 202) + + test_pb.launch() + + the_status_url = "https://%s/%s/%s" % (SERVER_URL, + PLAYBOOK_EXEC_URL, + PB_UUID) + mock_server.register_uri("GET", + the_status_url, + json={"status": "OK", + "msg": "failed", + "data": {} + }, + status_code=200) + + self.assertEqual(test_pb.get_status(), + ExecutionStatusCode.ERROR, + "Wrong status code for a playbook with error") + + def test_playbook_get_result(self): + """ Find the right result event in a set of different events + """ + with requests_mock.Mocker() as mock_server: + + test_pb = mock_get_pb(mock_server, PB_NAME, 202) + + test_pb.launch() + + the_events_url = "https://%s/%s" % (SERVER_URL, + PLAYBOOK_EVENTS % PB_UUID) + + # Get the events stored in a file + pb_events = {} + with open(PB_EVENTS_FILE) as events_file: + pb_events = json.loads(events_file.read()) + + mock_server.register_uri("GET", + the_events_url, + json=pb_events, + status_code=200) + + result = test_pb.get_result("runner_on_ok") + + self.assertEqual(len(result.keys()), 1, + "Unique result event not found") + + self.assertIn("37-100564f1-9fed-48c2-bd62-4ae8636dfcdb", + result.keys(), + "Predefined result event not found") diff --git a/src/pybind/mgr/ansible/tox.ini b/src/pybind/mgr/ansible/tox.ini new file mode 100644 index 00000000000..1df28401be7 --- /dev/null +++ b/src/pybind/mgr/ansible/tox.ini @@ -0,0 +1,18 @@ +[tox] +envlist = py27,py3 +skipsdist = true +toxworkdir = {env:CEPH_BUILD_DIR}/ansible +minversion = 2.8.1 + +[testenv] +deps = + pytest + mock + requests-mock +setenv= + UNITTEST = true + py27: PYTHONPATH = {toxinidir}/../../../../build/lib/cython_modules/lib.2 + py3: PYTHONPATH = {toxinidir}/../../../../build/lib/cython_modules/lib.3 + +commands= + {envbindir}/py.test tests/ diff --git a/src/pybind/mgr/dashboard/HACKING.rst b/src/pybind/mgr/dashboard/HACKING.rst index b2d8d8a069e..db77b09d7d9 100644 --- a/src/pybind/mgr/dashboard/HACKING.rst +++ b/src/pybind/mgr/dashboard/HACKING.rst @@ -725,6 +725,44 @@ same applies to other request types: | DELETE | Yes | delete | 204 | +--------------+------------+----------------+-------------+ +How to use a custom API endpoint in a RESTController? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you don't have any access restriction you can use ``@Endpoint``. If you +have set a permission scope to restrict access to your endpoints, +``@Endpoint`` will fail, as it doesn't know which permission property should be +used. To use a custom endpoint inside a restricted ``RESTController`` use +``@RESTController.Collection`` instead. You can also choose +``@RESTController.Resource`` if you have set a ``RESOURCE_ID`` in your +``RESTController`` class. + +.. code-block:: python + + import cherrypy + from ..tools import ApiController, RESTController + + @ApiController('ping', Scope.Ping) + class Ping(RESTController): + RESOURCE_ID = 'ping' + + @RESTController.Resource('GET') + def some_get_endpoint(self): + return {"msg": "Hello"} + + @RESTController.Collection('POST') + def some_post_endpoint(self, **data): + return {"msg": data} + +Both decorators also support four parameters to customize the +endpoint: + +* ``method="GET"``: the HTTP method allowed to access this endpoint. +* ``path="/<method_name>"``: the URL path of the endpoint, excluding the + controller URL path prefix. +* ``status=200``: set the HTTP status response code +* ``query_params=[]``: list of method parameter names that correspond to URL + query parameters. + How to restrict access to a controller? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/src/pybind/mgr/dashboard/controllers/__init__.py b/src/pybind/mgr/dashboard/controllers/__init__.py index ce2b40a4452..ea978126687 100644 --- a/src/pybind/mgr/dashboard/controllers/__init__.py +++ b/src/pybind/mgr/dashboard/controllers/__init__.py @@ -602,7 +602,7 @@ class RESTController(BaseController): # should be overridden by subclasses. # to specify a composite id (two parameters) use '/'. e.g., "param1/param2". # If subclasses don't override this property we try to infer the structure - # of the resourse ID. + # of the resource ID. RESOURCE_ID = None _permission_map = { diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.html b/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.html index 54d9e40bd15..a5f9014bd75 100644 --- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.html +++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.html @@ -19,8 +19,7 @@ *ngIf="healthData.health?.status"> <ng-container *ngIf="healthData.health?.checks?.length > 0"> <ng-template #healthChecks> - <p class="logs-link" - i18n>→ See <a routerLink="/logs">Logs</a> for more details.</p> + <ng-container *ngTemplateOutlet="logsLink"></ng-container> <ul> <li *ngFor="let check of healthData.health.checks"> <span [ngStyle]="check.severity | healthColor">{{ check.type }}</span>: {{ check.summary.message }} @@ -39,7 +38,7 @@ {{ healthData.health.status }} </div> </ng-container> - <ng-container *ngIf="healthData.health?.checks?.length == 0"> + <ng-container *ngIf="!healthData.health?.checks?.length"> <div [ngStyle]="healthData.health.status | healthColor"> {{ healthData.health.status }} </div> @@ -237,8 +236,7 @@ (click)="pgStatusTarget.toggle()" *ngIf="healthData.pg_info"> <ng-template #pgStatus> - <p class="logs-link" - i18n>→ See <a routerLink="/logs">Logs</a> for more details.</p> + <ng-container *ngTemplateOutlet="logsLink"></ng-container> <ul> <li *ngFor="let pgStatesText of healthData.pg_info.statuses | keyvalue"> {{ pgStatesText.key }}: {{ pgStatesText.value }} @@ -260,4 +258,11 @@ </cd-info-card> </div> </cd-info-group> + + <ng-template #logsLink> + <ng-container *ngIf="permissions.log.read"> + <p class="logs-link" + i18n><i class="fa fa-info-circle"></i> See <a routerLink="/logs">Logs</a> for more details.</p> + </ng-container> + </ng-template> </div> diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.spec.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.spec.ts index f4de0412581..847121617ae 100644 --- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.spec.ts +++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.spec.ts @@ -1,6 +1,7 @@ import { HttpClientTestingModule } from '@angular/common/http/testing'; import { NO_ERRORS_SCHEMA } from '@angular/core'; import { ComponentFixture, TestBed } from '@angular/core/testing'; +import { By } from '@angular/platform-browser'; import * as _ from 'lodash'; import { PopoverModule } from 'ngx-bootstrap/popover'; @@ -8,6 +9,8 @@ import { of } from 'rxjs'; import { configureTestBed, i18nProviders } from '../../../../testing/unit-test-helper'; import { HealthService } from '../../../shared/api/health.service'; +import { Permissions } from '../../../shared/models/permissions'; +import { AuthStorageService } from '../../../shared/services/auth-storage.service'; import { SharedModule } from '../../../shared/shared.module'; import { MdsSummaryPipe } from '../mds-summary.pipe'; import { MgrSummaryPipe } from '../mgr-summary.pipe'; @@ -36,6 +39,11 @@ describe('HealthComponent', () => { df: { stats: { total_objects: 0 } }, pg_info: {} }; + const fakeAuthStorageService = { + getPermissions: () => { + return new Permissions({ log: ['read'] }); + } + }; configureTestBed({ imports: [SharedModule, HttpClientTestingModule, PopoverModule.forRoot()], @@ -49,7 +57,7 @@ describe('HealthComponent', () => { PgStatusPipe ], schemas: [NO_ERRORS_SCHEMA], - providers: i18nProviders + providers: [i18nProviders, { provide: AuthStorageService, useValue: fakeAuthStorageService }] }); beforeEach(() => { @@ -138,4 +146,35 @@ describe('HealthComponent', () => { expect(infoGroup.querySelectorAll('cd-info-card').length).toBe(1); }); }); + + it('should render "Cluster Status" card text that is not clickable', () => { + getHealthSpy.and.returnValue(of(healthPayload)); + fixture.detectChanges(); + + const clusterStatusCard = fixture.debugElement.query( + By.css('cd-info-card[cardTitle="Cluster Status"]') + ); + const clickableContent = clusterStatusCard.query(By.css('.info-card-content-clickable')); + expect(clickableContent).toBeNull(); + expect(clusterStatusCard.nativeElement.textContent).toEqual(` ${healthPayload.health.status} `); + }); + + it('should render "Cluster Status" card text that is clickable (popover)', () => { + const payload = _.cloneDeep(healthPayload); + payload.health['status'] = 'HEALTH_WARN'; + payload.health['checks'] = [ + { severity: 'HEALTH_WARN', type: 'WRN', summary: { message: 'fake warning' } } + ]; + + getHealthSpy.and.returnValue(of(payload)); + fixture.detectChanges(); + + expect(component.permissions.log.read).toBeTruthy(); + + const clusterStatusCard = fixture.debugElement.query( + By.css('cd-info-card[cardTitle="Cluster Status"]') + ); + const clickableContent = clusterStatusCard.query(By.css('.info-card-content-clickable')); + expect(clickableContent.nativeElement.textContent).toEqual(` ${payload.health.status} `); + }); }); diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.ts index 5ec12fee396..31ce19f494a 100644 --- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.ts +++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard/health/health.component.ts @@ -4,6 +4,8 @@ import { I18n } from '@ngx-translate/i18n-polyfill'; import * as _ from 'lodash'; import { HealthService } from '../../../shared/api/health.service'; +import { Permissions } from '../../../shared/models/permissions'; +import { AuthStorageService } from '../../../shared/services/auth-storage.service'; @Component({ selector: 'cd-health', @@ -13,8 +15,15 @@ import { HealthService } from '../../../shared/api/health.service'; export class HealthComponent implements OnInit, OnDestroy { healthData: any; interval: number; - - constructor(private healthService: HealthService, private i18n: I18n) {} + permissions: Permissions; + + constructor( + private healthService: HealthService, + private i18n: I18n, + private authStorageService: AuthStorageService + ) { + this.permissions = this.authStorageService.getPermissions(); + } ngOnInit() { this.getHealth(); diff --git a/src/pybind/mgr/dashboard/frontend/src/locale/messages.xlf b/src/pybind/mgr/dashboard/frontend/src/locale/messages.xlf index 890b142d3f1..11343488773 100644 --- a/src/pybind/mgr/dashboard/frontend/src/locale/messages.xlf +++ b/src/pybind/mgr/dashboard/frontend/src/locale/messages.xlf @@ -28,7 +28,7 @@ </context-group> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">82</context> + <context context-type="linenumber">81</context> </context-group> </trans-unit><trans-unit id="624f596cc3320f5e0a0d7c7346c364e5af9bdd8c" datatype="html"> <source>Monitors</source> @@ -38,7 +38,7 @@ </context-group> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">49</context> + <context context-type="linenumber">48</context> </context-group> </trans-unit><trans-unit id="1a9183778f2c6473d7ccb080f651caa01faaf70c" datatype="html"> <source>OSDs</source> @@ -48,7 +48,7 @@ </context-group> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">58</context> + <context context-type="linenumber">57</context> </context-group> </trans-unit><trans-unit id="4a41f824a35ba01d5bd7be61aa06b3e8145209d0" datatype="html"> <source>Configuration</source> @@ -80,7 +80,7 @@ </context-group> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">191</context> + <context context-type="linenumber">190</context> </context-group> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/cephfs/cephfs-detail/cephfs-detail.component.html</context> @@ -2537,16 +2537,6 @@ <context context-type="sourcefile">app/ceph/cluster/configuration/configuration-details/configuration-details.component.html</context> <context context-type="linenumber">102</context> </context-group> - </trans-unit><trans-unit id="4f951c2d3472dda85872e7b09fbab383463aa4b5" datatype="html"> - <source>→ See <x id="START_LINK" ctype="x-a" equiv-text="<a>"/>Logs<x id="CLOSE_LINK" ctype="x-a" equiv-text="</a>"/> for more details.</source> - <context-group purpose="location"> - <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">23</context> - </context-group> - <context-group purpose="location"> - <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">241</context> - </context-group> </trans-unit><trans-unit id="73caac4265ea7314ff061e5a1d78a6361a6dd3b8" datatype="html"> <source>Cluster Status</source> <context-group purpose="location"> @@ -2557,73 +2547,73 @@ <source>Manager Daemons</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">70</context> + <context context-type="linenumber">69</context> </context-group> </trans-unit><trans-unit id="946ac5dea9921dc09d7b0a63b89535371f283b19" datatype="html"> <source>Object Gateways</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">91</context> + <context context-type="linenumber">90</context> </context-group> </trans-unit><trans-unit id="ff03fa5bcf37c4da46ad736c1f7d03f959e8ba9a" datatype="html"> <source>Metadata Servers</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">99</context> + <context context-type="linenumber">98</context> </context-group> </trans-unit><trans-unit id="d817609ba4993eba859409ab71e566168f4d5f5a" datatype="html"> <source>iSCSI Gateways</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">110</context> + <context context-type="linenumber">109</context> </context-group> </trans-unit><trans-unit id="42c13e50391250ea9379bdf55d5d6c0228c0c8bc" datatype="html"> <source>Client IOPS</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">126</context> + <context context-type="linenumber">125</context> </context-group> </trans-unit><trans-unit id="52213660b2454d139ada3079a42ec6caf3c3c01e" datatype="html"> <source>Client Throughput</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">135</context> + <context context-type="linenumber">134</context> </context-group> </trans-unit><trans-unit id="32efd1c3f70e3c5244239de97a2cc95d98534a14" datatype="html"> <source>Client Read/Write</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">144</context> + <context context-type="linenumber">143</context> </context-group> </trans-unit><trans-unit id="5277e7546d03a767761199b70deb8c77a921b390" datatype="html"> <source>Client Recovery</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">162</context> + <context context-type="linenumber">161</context> </context-group> </trans-unit><trans-unit id="6d9a9f55046891733ef71170e7652063765eb542" datatype="html"> <source>Scrub</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">171</context> + <context context-type="linenumber">170</context> </context-group> </trans-unit><trans-unit id="3cc9c2ae277393b3946b38c088dabff671b1ee1b" datatype="html"> <source>Performance</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">120</context> + <context context-type="linenumber">119</context> </context-group> </trans-unit><trans-unit id="88f383269db2d32cccee9e936fe549dccb9fdbf4" datatype="html"> <source>Raw Capacity</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">201</context> + <context context-type="linenumber">200</context> </context-group> </trans-unit><trans-unit id="afdb601c16162f2c798b16a2920955f1cc6a20aa" datatype="html"> <source>Objects</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">214</context> + <context context-type="linenumber">213</context> </context-group> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/block/rbd-details/rbd-details.component.html</context> @@ -2633,19 +2623,25 @@ <source>PGs per OSD</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">223</context> + <context context-type="linenumber">222</context> </context-group> </trans-unit><trans-unit id="498a109c6e9e94f1966de01aa0326f7f0ac6fb52" datatype="html"> <source>PG Status</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">232</context> + <context context-type="linenumber">231</context> </context-group> </trans-unit><trans-unit id="ce9dfdc6dccb28dc75a78c704e09dc18fb02dcfa" datatype="html"> <source>Capacity</source> <context-group purpose="location"> <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> - <context context-type="linenumber">182</context> + <context context-type="linenumber">181</context> + </context-group> + </trans-unit><trans-unit id="44ecac93d67c6a671198091c2270354f80322327" datatype="html"> + <source><x id="START_ITALIC_TEXT" ctype="x-i" equiv-text="<i>"/><x id="CLOSE_ITALIC_TEXT" ctype="x-i" equiv-text="</i>"/> See <x id="START_LINK" ctype="x-a" equiv-text="<a>"/>Logs<x id="CLOSE_LINK" ctype="x-a" equiv-text="</a>"/> for more details.</source> + <context-group purpose="location"> + <context context-type="sourcefile">app/ceph/dashboard/health/health.component.html</context> + <context context-type="linenumber">265</context> </context-group> </trans-unit><trans-unit id="f0b5d789d42c0e69348e5fe0037fcbf5b5fbbdcc" datatype="html"> <source>Move an image to trash</source> diff --git a/src/pybind/mgr/dashboard/run-backend-api-tests.sh b/src/pybind/mgr/dashboard/run-backend-api-tests.sh index 070a7b7e6ac..c9efb2d5bc1 100755 --- a/src/pybind/mgr/dashboard/run-backend-api-tests.sh +++ b/src/pybind/mgr/dashboard/run-backend-api-tests.sh @@ -115,7 +115,7 @@ run_teuthology_tests() { export PATH=$BUILD_DIR/bin:$PATH export LD_LIBRARY_PATH=$BUILD_DIR/lib/cython_modules/lib.${CEPH_PY_VERSION_MAJOR}/:$BUILD_DIR/lib - export PYTHONPATH=$TEMP_DIR/teuthology:$BUILD_DIR/../qa:$BUILD_DIR/lib/cython_modules/lib.${CEPH_PY_VERSION_MAJOR}/ + export PYTHONPATH=$TEMP_DIR/teuthology:$BUILD_DIR/../qa:$BUILD_DIR/lib/cython_modules/lib.${CEPH_PY_VERSION_MAJOR}/:$BUILD_DIR/../src/pybind eval python ../qa/tasks/vstart_runner.py $TEST_CASES deactivate diff --git a/src/pybind/mgr/dashboard/tests/test_controllers.py b/src/pybind/mgr/dashboard/tests/test_controllers.py index 333f7a37742..c6fcb25b0d0 100644 --- a/src/pybind/mgr/dashboard/tests/test_controllers.py +++ b/src/pybind/mgr/dashboard/tests/test_controllers.py @@ -175,7 +175,7 @@ class ControllersTest(ControllerTestCase): self.assertStatus(200) self.assertJsonBody({'key': '300', 'data': 30}) - def test_rest_resourse(self): + def test_rest_resource(self): self._get('/test/api/rtest/{}/{}/{}/rmethod?opt=4'.format(300, 2, 3)) self.assertStatus(200) self.assertJsonBody({'key': '300', 'skey': '2', 'ekey': '3', diff --git a/src/pybind/mgr/orchestrator_cli/module.py b/src/pybind/mgr/orchestrator_cli/module.py index 59934bec931..756f9326366 100644 --- a/src/pybind/mgr/orchestrator_cli/module.py +++ b/src/pybind/mgr/orchestrator_cli/module.py @@ -42,6 +42,13 @@ class OrchestratorCli(MgrModule): "perm": "rw" }, { + 'cmd': "orchestrator service rm " + "name=svc_type,type=CephString " + "name=svc_id,type=CephString ", + "desc": "Remove a service", + "perm": "rw" + }, + { 'cmd': "orchestrator set backend " "name=module,type=CephString,req=true", "desc": "Select orchestrator module backend", @@ -78,7 +85,7 @@ class OrchestratorCli(MgrModule): done = False while done is False: - done = self._oremote("wait", completions) + done = self._oremote("wait", completions) == [] if not done: any_nonpersistent = False @@ -249,6 +256,14 @@ class OrchestratorCli(MgrModule): else: raise NotImplementedError(svc_type) + def _service_rm(self, cmd): + svc_type = cmd['svc_type'] + svc_id = cmd['svc_id'] + + completion = self._oremote("remove_stateless_service", svc_type, svc_id) + self._wait([completion]) + return HandleCommandResult(rs="Success.") + def _set_backend(self, cmd): """ We implement a setter command instead of just having the user @@ -330,6 +345,8 @@ class OrchestratorCli(MgrModule): return self._service_status(cmd) elif cmd['prefix'] == "orchestrator service add": return self._service_add(cmd) + elif cmd['prefix'] == "orchestrator service rm": + return self._service_rm(cmd) elif cmd['prefix'] == "orchestrator set backend": return self._set_backend(cmd) elif cmd['prefix'] == "orchestrator status": diff --git a/src/pybind/mgr/rook/module.py b/src/pybind/mgr/rook/module.py index 90390a50cc6..d01576d21d1 100644 --- a/src/pybind/mgr/rook/module.py +++ b/src/pybind/mgr/rook/module.py @@ -376,6 +376,11 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): # TODO: RGW, NFS raise NotImplementedError(service_type) + def remove_stateless_service(self, service_type, service_id): + return RookWriteCompletion( + lambda: self.rook_cluster.rm_service(service_type, service_id), None, + "Removing {0} services for {1}".format(service_type, service_id)) + def create_osds(self, spec): # Validate spec.node if not self.rook_cluster.node_exists(spec.node): diff --git a/src/pybind/mgr/rook/rook_cluster.py b/src/pybind/mgr/rook/rook_cluster.py index ccd2bcb6bb8..48df3428dff 100644 --- a/src/pybind/mgr/rook/rook_cluster.py +++ b/src/pybind/mgr/rook/rook_cluster.py @@ -92,6 +92,9 @@ class RookCluster(object): def rook_api_get(self, path, **kwargs): return self.rook_api_call("GET", path, **kwargs) + def rook_api_delete(self, path): + return self.rook_api_call("DELETE", path) + def rook_api_patch(self, path, **kwargs): return self.rook_api_call("PATCH", path, header_params={"Content-Type": "application/json-patch+json"}, @@ -266,6 +269,25 @@ class RookCluster(object): else: raise + def rm_service(self, service_type, service_id): + assert service_type in ("mds", "rgw") + + if service_type == "mds": + rooktype = "filesystems" + elif service_type == "rgw": + rooktype = "objectstores" + + objpath = "{0}/{1}".format(rooktype, service_id) + + try: + self.rook_api_delete(objpath) + except ApiException as e: + if e.status == 404: + log.info("{0} service '{1}' does not exist".format(service_type, service_id)) + # Idempotent, succeed. + else: + raise + def can_create_osd(self): current_cluster = self.rook_api_get( "clusters/{0}".format(self.cluster_name)) diff --git a/src/pybind/mgr/volumes/__init__.py b/src/pybind/mgr/volumes/__init__.py index f99ece4a8ca..4c5b97ce82b 100644 --- a/src/pybind/mgr/volumes/__init__.py +++ b/src/pybind/mgr/volumes/__init__.py @@ -1,2 +1,2 @@ -from module import Module +from .module import Module diff --git a/src/pybind/mgr/volumes/module.py b/src/pybind/mgr/volumes/module.py index d366b3f5989..78d6f6b2d30 100644 --- a/src/pybind/mgr/volumes/module.py +++ b/src/pybind/mgr/volumes/module.py @@ -1,7 +1,10 @@ from threading import Event import errno import json -import Queue +try: + import queue as Queue +except ImportError: + import Queue from mgr_module import MgrModule import orchestrator @@ -283,13 +286,11 @@ class Module(MgrModule): # Tear down MDS daemons # ===================== - spec = orchestrator.StatelessServiceSpec() - spec.name = vol_name try: completion = self._oremote( - "rm_stateless_service", + "remove_stateless_service", "mds", - spec + vol_name ) self._orchestrator_wait([completion]) except ImportError: diff --git a/src/test/CMakeLists.txt b/src/test/CMakeLists.txt index 1e47535e984..ec250fc7214 100644 --- a/src/test/CMakeLists.txt +++ b/src/test/CMakeLists.txt @@ -559,6 +559,11 @@ if(WITH_MGR) list(APPEND tox_tests run-tox-mgr-insights) set(MGR_INSIGHTS_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/mgr-insights-virtualenv) list(APPEND env_vars_for_tox_tests MGR_INSIGHTS_VIRTUALENV=${MGR_INSIGHTS_VIRTUALENV}) + + add_test(NAME run-tox-mgr-ansible COMMAND bash ${CMAKE_SOURCE_DIR}/src/pybind/mgr/ansible/run-tox.sh) + list(APPEND tox_tests run-tox-mgr-ansible) + set(MGR_ANSIBLE_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/mgr-ansible-virtualenv) + list(APPEND env_vars_for_tox_tests MGR_ANSIBLE_VIRTUALENV=${MGR_ANSIBLE_VIRTUALENV}) endif() set_property( |