diff --git a/environments/template/group_vars/mongo_servers.yml b/environments/template/group_vars/mongo_servers.yml index 70bb40871..d9f3e10cc 100644 --- a/environments/template/group_vars/mongo_servers.yml +++ b/environments/template/group_vars/mongo_servers.yml @@ -1,5 +1,5 @@ --- -replica_set_name: my_mongo_cluster +mongo_replica_set_name: my_mongo_cluster mongo_cluster_members: - host: "mongo3.example.com:{{ mongo_port }}" # arbiter first or change mongo_arbiter_index diff --git a/environments/template/secrets/secret_example.yml b/environments/template/secrets/secret_example.yml index 6dc112821..da8690cfa 100644 --- a/environments/template/secrets/secret_example.yml +++ b/environments/template/secrets/secret_example.yml @@ -13,7 +13,7 @@ mongo_passwords: oidcng: secret myconext: secret -mongo_admin_password: secret +mongo_admin_password: secret # this works for first time install, if you change it later you will have to do it manually mongo_ca_passphrase: secret engine_api_metadata_push_password: secret diff --git a/roles/mongo/README.md b/roles/mongo/README.md index 9e96770e5..035dd0f2d 100644 --- a/roles/mongo/README.md +++ b/roles/mongo/README.md @@ -14,6 +14,31 @@ Set the mongo_cluster_private_key variable encrypted in host_vars Please review the official Mongo documentation for more information. +# Mongo deployment + +To avoid surprisesyou can enable or disable cluster configuration with the boolean option mongo_configure_cluster. The role willonly initiate or reconfigure cluster if this is true (safest option is to use -e mongo_configure_cluster=true with your deployment when cluster configuration is necessary). +Another issue is the serial value, it is safest to set it to 1 in your playbook, if it is higher multiple mongo nodes can will be restarted at once and it can break your cluster. However when you want to intialise a new cluster you need to run the tasks parallel and serial needs to be as high as the amount of nodes. We handled this with a variable serial with the name serial_number in our playbook with a default 1. If cluster initialisation or reconfiguration is necessary use -e "serial_number=" + + +See also https://docs.ansible.com/projects/ansible/latest/playbook_guide/playbooks_strategies.html#setting-the-batch-size-with-serial + +# Cluster reconfiguration + +Warning: the cluster reconfiguration option in the mongodb_replicationset module is experimental. and you can only add or remove one node at a time. + # Todo -- [ ] Add the possibility for adding and removing cluster members -- [ ] Add the possibility for a standalone mongo server +- [x] Check mongo_replication_roles and give a clear fail message when not set +- [ ] Add option to change the already existing admin user, for now change the password manually and change it in the ansible config accordingly +- [x] Add the possibility for adding and removing cluster members +- [x] Add the possibility for a standalone mongo server +- [x] Cluster changes can be enabled or disabled +- [ ] Reconfigure cluster always reports changed +- [ ] Initialise cluster always reports changed +- [ ] check mode for writeconcern change tasks does not report change () same for any other mongodb_shell task "remote module (community.mongodb.mongodb_shell) does not support check mode"} +- [X] Clearer error messaging for even number of votes +- [X] Role refuses to add users when a new cluster is built (3 nodes) (cannot add users on a broken cluster) +- [X] it would be helpfull if role (for example primary) is not defined in host_vars but in the mongo_cluster_members array +- [X] removing primary from the cluster will not work but the error is unclear, this is related to the todo above +- [ ] is it necessary to make votes configurable? +- [X] preflight check are cluster members in the inventory and monog_servers group +- [ ] Standalone mongo also requires cluster certificates, not logical although it doens't hurt diff --git a/roles/mongo/defaults/main.yml b/roles/mongo/defaults/main.yml index a58b2a320..ce2f16235 100644 --- a/roles/mongo/defaults/main.yml +++ b/roles/mongo/defaults/main.yml @@ -13,35 +13,37 @@ mongo_servers: [] # Set this in group_vars # Not all mongo servers in the inventory are cluster members, so we use a separate list for this. # Set this in group_vars of your environment(s). The arbiter should go first, or change the mongo_arbiter_index. # mongo_cluster_members: -# - host: "mongoarbiter.example.com:27017" +# - host: "mongoarbiter.example.com" # priority: 1 # can vote, cannot become primary -# - host: "mongo2.example.com:27017" +# port: 27017 +# - host: "mongo2.example.com" # priority: 2 -# - host: "mongo1.example.com:27017" +# port: 27017 +# - host: "mongo1.example.com" # priority: 3 -# mongo_arbiter_index: 0 - -# The replication role -# mongo_replication_role: # Set this in host_vars, it can have the values: "primary", "secondary" or arbiter +# port: 27017 # Todo: there is a link between mongo_replication_role and priority (arbiter is priority 1, primary the highest) so # setting them separately is not ideal. # The port for mongo server -mongod_port: 27017 +mongo_port: 27017 # The password for admin user -mongo_admin_pass: "{{ mongo_admin_password }}" # Set this in secrets +# mongo_admin_password: # set this in secrets + +# Are we using a cluster? +mongo_mode: "cluster" # cluster or standalone # The name of the replication set -replica_set_name: "{{ instance_name }}" # Set this in group_vars +mongo_replica_set_name: "{{ instance_name }}" # Set this in group_vars # Add a database mongo: users: - - { name: managerw, db_name: metadata, password: "{{ mongo_passwords.manage }}" } - - { name: oidcsrw, db_name: oidc, password: "{{ mongo_passwords.oidcng }}" } - - { name: myconextrw, db_name: myconext, password: "{{ mongo_passwords.myconext }}" } + - { name: managerw, db_name: metadata, password: "{{ mongo_passwords.manage }}", role: "readWrite" } + - { name: oidcsrw, db_name: oidc, password: "{{ mongo_passwords.oidcng }}", role: "readWrite"} + - { name: myconextrw, db_name: myconext, password: "{{ mongo_passwords.myconext }}", role: "readWrite" } # Listen on all addresses by default mongo_bind_listen_address: "0.0.0.0" @@ -53,3 +55,31 @@ mongo_pki_dir: "/etc/pki/mongo" # Users and groups mongo_group: "mongod" + +# Paths +mongo_config_file: "/etc/mongod.conf" +mongo_data_path: "/var/lib/mongo" +mongo_pymongo_version: 4.16.0 + +# cluster members +# set in group_vars +# mongo_cluster_members: +# - host: mongo1.example.com +# priority: 3 +# votes: 1 +# port: 27017 +# - host: mongo2.example.com +# priority: 2 +# votes: 1 +# port: 27017 +# - host: mongo3.example.com +# priority: 1 +# votes: 1 +# port: 27017 +# arbiterOnly: true + +mongo_cluster_write_concern: "majority" +mongo_cluster_write_timeout: 5000 + +# to avoid surprises only initiate or reconfigure cluster if this is true (safest option is to use -e mongo_configure_cluster=true with your deployment when cluster configuration is necessary) +mongo_configure_cluster: false diff --git a/roles/mongo/tasks/clusterconfig.yml b/roles/mongo/tasks/clusterconfig.yml index ced96e9b9..dd819d3da 100644 --- a/roles/mongo/tasks/clusterconfig.yml +++ b/roles/mongo/tasks/clusterconfig.yml @@ -1,60 +1,191 @@ --- -# todo this weorks only for new deployments -# rewrite so mongo config can be changed and cluster members can be added or removed -- name: Check if hosts are in clustered - ansible.builtin.command: mongosh --port {{ mongod_port }} --quiet --eval 'db.isMaster().hosts' - register: check_cluster - changed_when: false - check_mode: false - -- name: Debug check_cluster variable +# In this task file the cluster is configured + +# priority moet matchen met replication role, of replication role uit cluster mebers halen? +# todo write concern zetten + +# Do some preflight checks +- name: Check some cluster related variables + when: mongo_mode == "cluster" + block: + - name: Fail on undefined mongo_replica_set_name + when: mongo_replica_set_name is not defined + ansible.builtin.fail: + msg: "Something is wrong, mongo_mode was set to cluster but mongo_replica_set_name is undefined." + +- name: Debug replica settings ansible.builtin.debug: - msg: "{{ check_cluster }}" + msg: "Replica set name {{ mongo_replica_set_name }}" verbosity: 2 -- name: Debug mongo_cluster_members variable +# Loop over cluster members and check their presence in mong_servers group and their mode (not standalone) + +- name: Check if mongo_cluster_members exist in inventory group + ansible.builtin.assert: + that: + - item.host in groups['mongo_servers'] + fail_msg: "Server '{{ item.host }}' is not in the mongo_servers inventory group" + success_msg: "Server '{{ item.host }}' found in mongo_servers inventory group" + run_once: true + loop: "{{ mongo_cluster_members }}" + +# Loop over cluster members and check for primary + +- name: Set primary host fact + ansible.builtin.set_fact: + mongo_primary_host: "{{ (mongo_cluster_members | max(attribute='priority')).host }}" + +- name: Debug primary settings ansible.builtin.debug: - msg: "{{ mongo_cluster_members }}" + msg: "Primary is {{ mongo_primary_host }}" verbosity: 2 -- name: Debug mongo_replication_role variable +# What is the replication role of the current host +- name: Debug replication role settings ansible.builtin.debug: - msg: "{{ mongo_replication_role }}" + msg: "This nodes replication role is {{ mongo_replication_role }}" verbosity: 2 -- name: Initial cluster initialisation - community.mongodb.mongodb_replicaset: - login_host: localhost - login_user: admin - login_port: "{{ mongod_port }}" - login_password: "{{ mongo_admin_password }}" - replica_set: "{{ replica_set_name }}" - members: "{{ mongo_cluster_members }}" - arbiter_at_index: "{{ mongo_arbiter_index | default(0) }}" - validate: false - run_once: true - when: mongo_replication_role == 'primary' +# Cannot initialise a cluster without starting....... +- name: Enable and start mongod + ansible.builtin.service: + name: mongod.service + enabled: true + state: started -- name: Wait until cluster health is ok - community.mongodb.mongodb_status: - login_user: admin - login_password: "{{ mongo_admin_password }}" - login_database: admin - login_port: "{{ mongod_port }}" - validate: default - poll: 5 - interval: 12 - replica_set: "{{ replica_set_name }}" +# Initialise cluster block +- name: Initialise or reconfigure cluster block when: mongo_replication_role == 'primary' + block: + - name: Check if replica set is already initialised + community.mongodb.mongodb_shell: + login_host: localhost + login_user: admin + login_port: "{{ mongo_port }}" + login_password: "{{ mongo_admin_password }}" + eval: "rs.status().ok" + db: admin + register: rs_already_init + ignore_errors: true -- name: Add the admin user - community.mongodb.mongodb_user: - database: admin - name: admin - password: "{{ mongo_admin_password }}" - login_port: "{{ mongod_port }}" - roles: root - state: present - when: check_cluster.stdout == "" - no_log: true - run_once: true + - name: Debug cluster initialization check + ansible.builtin.debug: + msg: "{{ rs_already_init }}" + verbosity: 2 + + # This should be possible with community.mongodb.mongodb_replicaset + # But we keep getting authenticatione error so leave it like this for now + - name: Initialise replica set if necessary + community.mongodb.mongodb_shell: + login_host: localhost + login_user: admin + login_port: "{{ mongo_port }}" + login_password: "{{ mongo_admin_password }}" + eval: | + rs.initiate({ + _id: "{{ mongo_replica_set_name }}", + members: [ + {% for m in mongo_cluster_members %} + { _id: {{ loop.index0 }}, host: "{{ m.host }}:{{ m.port }}", priority: {{ m.priority }}, votes: {{ m.votes }}{% if m.arbiterOnly is defined and m.arbiterOnly and m.arbiterOnly == true %}, arbiterOnly: true {% endif %} }{{ "," if not loop.last else "" }} + {% endfor %} + ] + }) + db: admin + when: rs_already_init.failed + register: rs_init + + - name: Debug cluster initialization + ansible.builtin.debug: + msg: "{{ rs_init }}" + verbosity: 2 + + - name: Format members list + ansible.builtin.set_fact: + mongo_cluster_members_formatted: "{{ mongo_cluster_members_formatted | default([]) + [m | combine({'host': m.host ~ ':' ~ (m.port | string)}) | dict2items | rejectattr('key', 'eq', 'port') | list | items2dict] }}" + loop: "{{ mongo_cluster_members }}" + loop_control: + loop_var: m + + - name: Debug members list + ansible.builtin.debug: + msg: "{{ mongo_cluster_members }}" + verbosity: 2 + + - name: Debug formatted members list + ansible.builtin.debug: + msg: "{{ mongo_cluster_members_formatted }}" + verbosity: 2 + + # Reconfigure cluster + # todo: this always returns changed even when nothing changes + - name: Reconfigure cluster if necessary + community.mongodb.mongodb_replicaset: + login_host: localhost + login_user: admin + login_password: "{{ mongo_admin_password }}" + login_port: "{{ mongo_port }}" + reconfigure: true + replica_set: "{{ mongo_replica_set_name }}" + members: "{{ mongo_cluster_members_formatted }}" + register: rs_reconfigure + + - name: Debug cluster reconfiguration + ansible.builtin.debug: + msg: "{{ rs_reconfigure }}" + verbosity: 2 + + - name: Wait for the replicaset to stabilise + community.mongodb.mongodb_status: + replica_set: "{{ mongo_replica_set_name }}" + login_host: localhost + login_user: admin + login_password: "{{ mongo_admin_password }}" + login_port: "{{ mongo_port }}" + poll: 5 + interval: 30 + validate: minimal # default fails on even number of servers and although this is not a great situation, it is sometimes the temporary situation because we can onlye add or remove 1 node at a time + + # Cluster settings that cannot be changed with mongodb_replicaset + + - name: Get current default write concern + community.mongodb.mongodb_shell: + login_host: localhost + login_port: 27017 + login_user: admin + login_password: "{{ mongo_admin_password }}" + eval: "db.adminCommand({ getDefaultRWConcern: 1 })" + register: current_write_concern + changed_when: false + + - name: Debug write concern check + ansible.builtin.debug: + msg: "{{ current_write_concern.transformed_output.defaultWriteConcern }}" + verbosity: 2 + when: current_write_concern.transformed_output.defaultWriteConcern is defined + + - name: Set default write concern + when: > + current_write_concern.transformed_output.defaultWriteConcern is defined + and + (current_write_concern.transformed_output.defaultWriteConcern.w | string != mongo_cluster_write_concern | default('majority') | string + or + current_write_concern.transformed_output.defaultWriteConcern.wtimeout | int != mongo_cluster_write_timeout | default(5000) | int) + or current_write_concern.transformed_output.defaultWriteConcern is not defined + block: + - name: "set write concern majority" + when: mongo_cluster_write_concern == "majority" + community.mongodb.mongodb_shell: + login_host: localhost + login_user: admin + login_password: "{{ mongo_admin_password }}" + login_port: "{{ mongo_port }}" + eval: "db.adminCommand({ setDefaultRWConcern: 1, defaultWriteConcern: { w: \"{{ mongo_cluster_write_concern | default('majority') }}\", wtimeout: {{ mongo_cluster_write_timeout | default(5000) }} } })" + # could not get this to work with either majority with quotes or number without quotes so for now an ugly fix + - name: "set write concern numeric" + when: mongo_cluster_write_concern != "majority" + community.mongodb.mongodb_shell: + login_host: localhost + login_user: admin + login_password: "{{ mongo_admin_password }}" + login_port: "{{ mongo_port }}" + eval: "db.adminCommand({ setDefaultRWConcern: 1, defaultWriteConcern: { w: {{ mongo_cluster_write_concern | default('majority') }}, wtimeout: {{ mongo_cluster_write_timeout | default(5000) }} } })" diff --git a/roles/mongo/tasks/clusterhealthcheck.yml b/roles/mongo/tasks/clusterhealthcheck.yml new file mode 100644 index 000000000..aa129ed5b --- /dev/null +++ b/roles/mongo/tasks/clusterhealthcheck.yml @@ -0,0 +1,88 @@ +--- +# task file to check if cluster is up and running + +- name: Cluster check when we are not in cluster config mode + when: not mongo_configure_cluster | bool + block: + # Get the replicaset status and fail on minimal (everything but even number of nodes) + - name: Check replicaset status + community.mongodb.mongodb_status: + login_host: localhost + login_user: admin + login_port: "{{ mongo_port }}" + login_password: "{{ mongo_admin_password }}" + replica_set: "{{ mongo_replica_set_name }}" + poll: 3 + interval: 10 + register: replica_status + ignore_errors: true + + - name: Debug replica set status + ansible.builtin.debug: + msg: "{{ replica_status }}" + verbosity: 2 + + # Message for non cluster config mode + - name: Fail when there is no cluster reconfiguration options set + ansible.builtin.fail: + msg: "Your mongo cluster is broken or non existent and mongo_configure_cluster is disabled, consider enabling it and fix your cluster. The error: {{ replica_status.msg }}." + when: + - replica_status.failed + +- name: Cluster check when we are in cluster config mode + when: mongo_configure_cluster | bool + block: + # Get the replicaset status and fail on minimal (everything but even number of nodes) + - name: Check replicaset status minimal + community.mongodb.mongodb_status: + login_host: localhost + login_user: admin + login_port: "{{ mongo_port }}" + login_password: "{{ mongo_admin_password }}" + replica_set: "{{ mongo_replica_set_name }}" + poll: 3 + interval: 10 + validate: minimal + register: replica_status_minimal + ignore_errors: true + + # Message for cluster config mode gone wrong + - name: Fail when you misconfigured your replica cluster + ansible.builtin.fail: + msg: "Your mongo cluster is broken, error: {{ replica_status_minimal.msg }}." + when: + - replica_status_minimal.failed + + # Get the replicaset status votes + - name: Check replicaset status votes + community.mongodb.mongodb_status: + login_host: localhost + login_user: admin + login_port: "{{ mongo_port }}" + login_password: "{{ mongo_admin_password }}" + replica_set: "{{ mongo_replica_set_name }}" + poll: 3 + interval: 10 + validate: votes + register: replica_status_votes + ignore_errors: true + + # Message for cluster config mode wrong amount of votes + - name: Fail when you misconfigured your replica cluster + ansible.builtin.fail: + msg: | + Your mongo cluster doesn't have the right amount of members, + perhaps you are adding new nodes one by one, + in that case add the next node to cluster_members and run the play again. + The error message is: {{ replica_status_votes.msg }}." + when: + - replica_status_votes.failed + + # In non cluster config mode we use replica_status + # and here replica_status_votes and replica_status_minimal + # for better error messages, but we need a general replica_status + # for for example users.yml, so lets set it here + - name: Set a value for replica_status + ansible.builtin.set_fact: + replica_status: + failed: False diff --git a/roles/mongo/tasks/generalconfig.yml b/roles/mongo/tasks/generalconfig.yml new file mode 100644 index 000000000..7bc094d4f --- /dev/null +++ b/roles/mongo/tasks/generalconfig.yml @@ -0,0 +1,52 @@ +--- + +- name: Enable and start mongod for the first time + ansible.builtin.service: + name: mongod.service + enabled: true + state: started + +- name: Check if mongodb authentication is activated + ansible.builtin.shell: + cmd: "mongosh 'mongodb://127.0.0.1:{{ mongo_port }}/admin' --eval 'db.runCommand({ usersInfo: 1 })'" + register: mongo_authentication_disabled + changed_when: false + ignore_errors: true + check_mode: false # This can safely run in check mode because it is not changing anything + failed_when: mongo_authentication_disabled.rc > 1 # rc=1 means command failed because authentication is enabled, we need to know that but we don't need to see an error + +- name: Debug mongodb authentication check + ansible.builtin.debug: + msg: "{{ mongo_authentication_disabled }}" + verbosity: 2 + +- name: configure primary or standalone + when: mongo_mode == "standalone" or mongo_replication_role == "primary" + block: + # first run add admin user without logging in + - name: Add the admin user + community.mongodb.mongodb_user: + login_database: admin + database: admin + name: admin + password: "{{ mongo_admin_password }}" + login_port: "{{ mongo_port }}" + roles: root + state: present + # todo enable no_log: true + when: mongo_authentication_disabled.rc == 0 + +# Config for standalone and replication server +- name: Install mongodb.conf file + ansible.builtin.template: + src: "mongod.conf.j2" + dest: "/etc/mongod.conf" + owner: root + group: root + mode: "0644" + backup: true + notify: Restart mongod + +# restart mongo right away with authentication enabled +- name: Flush handlers + ansible.builtin.meta: flush_handlers \ No newline at end of file diff --git a/roles/mongo/tasks/install.yml b/roles/mongo/tasks/install.yml index 673d465e3..9103a2ca8 100644 --- a/roles/mongo/tasks/install.yml +++ b/roles/mongo/tasks/install.yml @@ -1,6 +1,5 @@ --- - name: Create the repository for mongodb - when: ansible_os_family == 'RedHat' ansible.builtin.template: src: "mongo.repo.j2" dest: "/etc/yum.repos.d/mongo.repo" @@ -8,7 +7,6 @@ mode: "0640" - name: Install the mongodb package and some helper packages - when: ansible_os_family == 'RedHat' ansible.builtin.yum: name: - mongodb-org @@ -17,7 +15,7 @@ - name: Install pymongo ansible.builtin.pip: - name: pymongo + name: pymongo=={{ mongo_pymongo_version }} - name: Install kernel settings script ansible.builtin.copy: @@ -52,17 +50,3 @@ value: 128000 state: present -- name: Install mongodb.conf file - ansible.builtin.template: - src: "mongod.conf.j2" - dest: "/etc/mongod.conf" - owner: root - group: root - mode: "0644" - notify: Restart mongod - -- name: Enable and start mongod - ansible.builtin.service: - name: mongod.service - enabled: true - state: started diff --git a/roles/mongo/tasks/main.yml b/roles/mongo/tasks/main.yml index b0a30b31f..d55d8fa8a 100644 --- a/roles/mongo/tasks/main.yml +++ b/roles/mongo/tasks/main.yml @@ -1,8 +1,26 @@ --- +# Main task file mongo role + +- name: Message for non redhat family servers + when: ansible_facts['os_family'] != 'RedHat' + ansible.builtin.fail: + msg: "Sorry, this role only works on RedHat family servers" + - name: Install and configure mongo on redhat family servers - when: ansible_os_family == 'RedHat' + when: + - ansible_facts['os_family'] == 'RedHat' block: - - name: Use temporarily python3 as remote interpreter, this fixes pymongo + - name: Debug standalone or cluster mode + ansible.builtin.debug: + msg: "{{ mongo_mode }}" + verbosity: 2 + + - name: Debug cluster reconfiguration is allowed + ansible.builtin.debug: + msg: "{{ mongo_configure_cluster }}" + verbosity: 2 + + - name: Use temporarily python3 as remote interpreter, this fixes pymongo # todo is this still necessary? ansible.builtin.set_fact: ansible_python_interpreter: "/usr/bin/python3" tags: mongo_users @@ -11,30 +29,47 @@ ansible.builtin.include_tasks: file: install.yml - - ansible.builtin.meta: flush_handlers - - name: Include Certificate tasks ansible.builtin.include_tasks: file: certs.yml - # - name: Include cluster installation tasks - # ansible.builtin.include_tasks: - # file: clusterconfig.yml + - name: Include General config tasks + ansible.builtin.include_tasks: + file: generalconfig.yml + + - name: Include cluster configuration tasks + ansible.builtin.include_tasks: + file: clusterconfig.yml + when: + - mongo_mode == "cluster" + - mongo_configure_cluster | bool # safest option is to set this to false and enable with -e mongo_configure_cluster=true + + - name: Include cluster health check tasks + ansible.builtin.include_tasks: + file: clusterhealthcheck.yml + when: + - mongo_mode == "cluster" + - mongo_replication_role == 'primary' + + - name: Include Service tasks + ansible.builtin.include_tasks: + file: services.yml - name: Include user creation ansible.builtin.include_tasks: file: users.yml + # Cannot add users on a broken cluster + when: > + (mongo_mode == 'cluster' and mongo_replication_role is defined and mongo_replication_role == 'primary' and not replica_status.failed) + or mongo_mode == 'standalone' - name: Include postinstallation tasks ansible.builtin.include_tasks: file: postinstall.yml - - name: Use python2 again as remote interpreter + - name: Use python2 again as remote interpreter on centos 7 ansible.builtin.set_fact: ansible_python_interpreter: "/usr/bin/python" - when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' - -- name: Message for non redhat family servers - when: ansible_os_family != 'RedHat' - ansible.builtin.debug: - msg: "Sorry, this role only works on RedHat family servers" + when: + - ansible_facts['distribution'] == 'CentOS' + - ansible_facts['distribution_major_version'] == '7' diff --git a/roles/mongo/tasks/postinstall.yml b/roles/mongo/tasks/postinstall.yml index e474a0b1e..027d05f1c 100644 --- a/roles/mongo/tasks/postinstall.yml +++ b/roles/mongo/tasks/postinstall.yml @@ -15,29 +15,33 @@ group: root mode: "0700" -- name: Install the backup script - ansible.builtin.template: - src: "backup_mongo.pl.j2" - dest: "/usr/local/sbin/backup_mongo.pl" - mode: "0700" - owner: root - group: root - when: mongo_replication_role != 'arbiter' +- name: Configure backup + when: mongo_replication_role is not defined or mongo_replication_role != 'arbiter' + block: + - name: Install the backup script + ansible.builtin.template: + src: "backup_mongo.pl.j2" + dest: "/usr/local/sbin/backup_mongo.pl" + mode: "0700" + owner: root + group: root + - name: Create cron symlink for backup script + ansible.builtin.file: + src: "/usr/local/sbin/backup_mongo.pl" + dest: "/etc/cron.daily/mongodb_backup" + state: link + mode: "0700" + owner: root -- name: Create cron symlink for backup script - ansible.builtin.file: - src: "/usr/local/sbin/backup_mongo.pl" - dest: "/etc/cron.daily/mongodb_backup" - state: link - mode: "0700" - owner: root - when: mongo_replication_role != 'arbiter' +- name: Debug mongo_cluster_members + debug: + msg: "{{ item.host }}" + verbosity: 2 + loop: + "{{ mongo_cluster_members }}" + when: mongo_mode == "cluster" -# TODO: this template gets mongo_servers from -# the inventory, maybe change that to group vars -# this is not on an per app basis. These are mongoservers -# in the same cluster. - name: Create mongosh config file ansible.builtin.template: src: mongoshrc.js.j2 diff --git a/roles/mongo/tasks/services.yml b/roles/mongo/tasks/services.yml new file mode 100644 index 000000000..5caa4c54c --- /dev/null +++ b/roles/mongo/tasks/services.yml @@ -0,0 +1,6 @@ +--- +- name: Enable and start mongod + ansible.builtin.service: + name: mongod.service + enabled: true + state: started \ No newline at end of file diff --git a/roles/mongo/tasks/standaloneconfig.yml b/roles/mongo/tasks/standaloneconfig.yml new file mode 100644 index 000000000..a8ec00126 --- /dev/null +++ b/roles/mongo/tasks/standaloneconfig.yml @@ -0,0 +1 @@ +# todo mag weg? \ No newline at end of file diff --git a/roles/mongo/tasks/users.yml b/roles/mongo/tasks/users.yml index a218bac46..5afa45151 100644 --- a/roles/mongo/tasks/users.yml +++ b/roles/mongo/tasks/users.yml @@ -1,13 +1,32 @@ -- name: Create mongo database users # requires pymongo 4+ +- name: Create mongo database users cluster # requires pymongo 4+ + when: + - mongo_mode == "cluster" + - mongo_replication_role == "primary" community.mongodb.mongodb_user: login_database: admin database: "{{ item.db_name }}" login_user: admin - login_password: "{{ mongo_admin_pass }}" + login_password: "{{ mongo_admin_password }}" name: "{{ item.name }}" password: "{{ item.password }}" - roles: readWrite - replica_set: "{{ replica_set_name }}" + roles: "{{ item.role | default('readWrite')}}" + replica_set: "{{ mongo_replica_set_name }}" + no_log: true + run_once: true + with_items: "{{ mongo.users }}" + changed_when: false + tags: mongo_users + +- name: Create mongo database users single server # requires pymongo 4+ + when: mongo_mode != "cluster" + community.mongodb.mongodb_user: + login_database: admin + database: "{{ item.db_name }}" + login_user: admin + login_password: "{{ mongo_admin_password }}" + name: "{{ item.name }}" + password: "{{ item.password }}" + roles: "{{ item.role | default('readWrite') }}" no_log: true run_once: true with_items: "{{ mongo.users }}" diff --git a/roles/mongo/templates/mongod.conf.j2 b/roles/mongo/templates/mongod.conf.j2 index f5e990add..ebd13a88f 100644 --- a/roles/mongo/templates/mongod.conf.j2 +++ b/roles/mongo/templates/mongod.conf.j2 @@ -14,10 +14,12 @@ net: allowConnectionsWithoutCertificates: true storage: - dbPath: /var/lib/mongo + dbPath: {{ mongo_data_path }} +{% if mongo_replica_set_name is defined and mongo_mode == "cluster" %} replication: - replSetName: {{ replica_set_name }} + replSetName: {{ mongo_replica_set_name }} +{% endif %} security: authorization: enabled diff --git a/roles/mongo/templates/mongoshrc.js.j2 b/roles/mongo/templates/mongoshrc.js.j2 index 9faf2cdb0..c8b4c92f4 100644 --- a/roles/mongo/templates/mongoshrc.js.j2 +++ b/roles/mongo/templates/mongoshrc.js.j2 @@ -1,2 +1,5 @@ -db = connect("mongodb://admin:{{ mongo_admin_password }}@{% for mongo_server in mongo_servers %}{{ mongo_server }}:{{ mongod_port }}{% if not loop.last %},{% endif %}{% endfor %}?ssl=true&tlsCAFile=/etc/pki/mongo/mongoca.pem") - +{% if mongo_mode == "cluster" %} +db = connect("mongodb://admin:{{ mongo_admin_password }}@{% for mongo_server in mongo_cluster_members %}{{ mongo_server.host }}:{{ mongo_server.port }}{% if not loop.last %},{% endif %}{% endfor %}?ssl=true&tlsCAFile=/etc/pki/mongo/mongoca.pem") +{% else %} +db = connect("mongodb://admin:{{ mongo_admin_password }}@{{ ansible_facts['fqdn'] }}:{{ mongo_port }}?ssl=true&tlsCAFile=/etc/pki/mongo/mongoca.pem") +{% endif %}