diff --git a/.travis.yml b/.travis.yml index 2c97aa7603..3c9b24d3a5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,6 +5,9 @@ dist: trusty rvm: 2.1.9 +before_install: + - rvm @global do gem install bundler -v '< 2.0.0' + matrix: include: - env: SYNTAXCHECK @@ -13,3 +16,5 @@ matrix: - env: SPEC_TESTS script: - bundle exec rake spec + - name: "Databag testing" + script: bundle exec crowbar-validate-databags chef/data_bags/crowbar diff --git a/Gemfile b/Gemfile index 2835337bbf..137f680b63 100644 --- a/Gemfile +++ b/Gemfile @@ -18,6 +18,7 @@ source "https://rubygems.org" group :development do + gem "crowbar-validate-databags" gem "rake", "< 12.0.0" gem "uglifier", "~> 2.7.2" gem "sass", "~> 3.2.19" diff --git a/bin/prepare-mariadb b/bin/prepare-mariadb new file mode 100755 index 0000000000..f04166bdbe --- /dev/null +++ b/bin/prepare-mariadb @@ -0,0 +1,143 @@ +#!/usr/bin/env ruby +# +# Copyright 2018, SUSE +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +$LOAD_PATH.unshift(File.expand_path("../../crowbar_framework/lib", __FILE__)) +$LOAD_PATH.unshift(File.expand_path("../../chef/cookbooks/database/libraries", __FILE__)) + +require "chef" +require "erb" +require "crowbar" + +RECIPE = "recipe[database::pg2mariadb_preparation]" +LOGDIR = "/var/log/crowbar" + +def chef_init + Chef::Config.node_name "crowbar" + Chef::Config.client_key "/opt/dell/crowbar_framework/config/client.pem" + Chef::Config.chef_server_url "http://localhost:4000" +end + +def node_for_role(role) + nodes = [] + Chef::Search::Query.new.search "node", "roles:#{role}" do |n| + nodes << n + end + nodes.sort_by! { |n| n.name } + nodes.empty? ? nil : nodes.first +end + +def mysql_node + node_for_role "mysql-server" +end + +# Select nodes which represent all services which use database +# they might be standalone nodes or cluster members. In most simple +# case there will be only one node used for all services. +def selected_nodes(first_mysql_node) + nodes_and_roles = { + first_mysql_node.name => { + node: first_mysql_node, roles: ["mysql-server"] + } + } + CrowbarDatabaseHelper.roles_using_database.each do |role| + node = node_for_role(role) + next if node.nil? + if nodes_and_roles.include? node.name + nodes_and_roles[node.name][:roles] << role + else + nodes_and_roles[node.name] = { node: node, roles: [role] } + end + end + nodes_and_roles +end + +def node_role(node) + Chef::Role.load("crowbar-"+node.name.gsub(".", "_")) +end + +def add_recipe(node) + role = node_role(node) + role.run_list << RECIPE + role.save +end + +def remove_recipe(node) + role = node_role(node) + role.run_list.remove(RECIPE) + role.save +end + +# based on code from crowbar_framework/app/models/node.rb +def run_ssh_cmd(node, cmd, log_suffix = nil) + log_file = "/var/log/crowbar/db-prepare.#{log_suffix}.log" if log_suffix + log_redirect = "> #{log_file} 2>&1" if log_file + start_time = Time.now + args = ["sudo", "-i", "-u", "root", "--", + "ssh", "-o", "ConnectTimeout=10", + "root@#{node.name}", + %("#{cmd.gsub('"', '\\"')} #{log_redirect}") + ].join(" ") + log "Log: #{log_file} on #{node.name}" + Open3.popen2e(args) do |stdin, stdout_and_stderr, wait_thr| + { + stdout_and_stderr: stdout_and_stderr.gets(nil), + exit_code: wait_thr.value.exitstatus, + run_time: Time.now - start_time + } + end +end + +def log(msg) + print "#{msg}\n" +end + +def prepare_node(node, roles) + log "Preparing node #{node.name}" + log "Adding #{RECIPE} to run_list" + add_recipe node + log "Running chef-client on #{node.name}..." + res = run_ssh_cmd(node, "chef-client", "chef-client") + log "Run time: #{res[:run_time]}s" + log "Removing #{RECIPE} from run_list" + remove_recipe node + unless res[:exit_code].zero? + log "ERROR: Chef-client failed with code: #{res[:exit_code]}" + return -2 + end + log "Prepare completed for #{node.name}" + 0 +end + +def main + ret = 0 + chef_init + first_mysql_node = mysql_node + if first_mysql_node.nil? + log "ERROR: MySQL server not found. Please assign mysql-server role " \ + "to some node or cluster and re-apply database proposal." + return -1 + end + selected_nodes(first_mysql_node).values.each do |node_data| + node_ret = prepare_node(node_data[:node], node_data[:roles]) + ret = node_ret if ret.zero? && !node_ret.zero? + log "Summary of used databases: /etc/pg2mysql/databases.yaml on " \ + "#{first_mysql_node.name}" if node_data[:node] == first_mysql_node && ret.zero? + end + return ret +end + +exit(main) diff --git a/chef/cookbooks/aodh/attributes/default.rb b/chef/cookbooks/aodh/attributes/default.rb index 8a151751a0..cd3e89f18e 100644 --- a/chef/cookbooks/aodh/attributes/default.rb +++ b/chef/cookbooks/aodh/attributes/default.rb @@ -55,6 +55,7 @@ default[:aodh][:notifier][:service_name] = notifier_service_name default[:aodh][:listener][:service_name] = listener_service_name default[:aodh][:evaluation_interval] = 600 +default[:aodh][:alarm_history_ttl] = -1 default[:aodh][:debug] = false default[:aodh][:verbose] = false diff --git a/chef/cookbooks/aodh/recipes/aodh.rb b/chef/cookbooks/aodh/recipes/aodh.rb index f39a48aa56..a9f0ad3cdf 100644 --- a/chef/cookbooks/aodh/recipes/aodh.rb +++ b/chef/cookbooks/aodh/recipes/aodh.rb @@ -145,9 +145,6 @@ bind_port = node[:aodh][:api][:port] end -node.normal[:apache][:listen_ports_crowbar] ||= {} -node.normal[:apache][:listen_ports_crowbar][:aodh] = { plain: [bind_port] } - template node[:aodh][:config_file] do source "aodh.conf.erb" owner "root" @@ -163,7 +160,8 @@ database_connection: db_connection, node_hostname: node["hostname"], aodh_ssl: node[:aodh][:ssl], - evaluation_interval: node[:aodh][:evaluation_interval] + evaluation_interval: node[:aodh][:evaluation_interval], + alarm_history_ttl: node[:aodh][:alarm_history_ttl] ) notifies :reload, resources(service: "apache2") end diff --git a/chef/cookbooks/aodh/templates/default/aodh.conf.erb b/chef/cookbooks/aodh/templates/default/aodh.conf.erb index 3c702255cf..dcdec2fbf2 100644 --- a/chef/cookbooks/aodh/templates/default/aodh.conf.erb +++ b/chef/cookbooks/aodh/templates/default/aodh.conf.erb @@ -9,6 +9,7 @@ transport_url = <%= @rabbit_settings[:url] %> workers = <%= [node["cpu"]["total"], 2, 4].sort[1] %> [database] +alarm_history_time_to_live = <%= @alarm_history_ttl %> connection = <%= @database_connection %> [keystone_authtoken] diff --git a/chef/cookbooks/barbican/recipes/api.rb b/chef/cookbooks/barbican/recipes/api.rb index 75bd1d98dd..4aa6d429ad 100644 --- a/chef/cookbooks/barbican/recipes/api.rb +++ b/chef/cookbooks/barbican/recipes/api.rb @@ -112,7 +112,7 @@ action :add_user end -keystone_register "give barbican user access" do +keystone_register "give barbican user access as admin" do protocol keystone_settings["protocol"] insecure keystone_settings["insecure"] host keystone_settings["internal_url_host"] @@ -124,6 +124,94 @@ action :add_access end +keystone_register "add key-manager:service-admin role for barbican" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + role_name "key-manager:service-admin" + action :add_role +end + +keystone_register "give barbican user access as key-manager:service-admin" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name keystone_settings["service_user"] + tenant_name keystone_settings["service_tenant"] + role_name "key-manager:service-admin" + action :add_access +end + +keystone_register "add creator role for barbican" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + role_name "creator" + action :add_role +end + +keystone_register "give barbican user access as creator" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name keystone_settings["service_user"] + tenant_name keystone_settings["service_tenant"] + role_name "creator" + action :add_access +end + +keystone_register "add observer role for barbican" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + role_name "observer" + action :add_role +end + +keystone_register "give barbican user access as observer" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name keystone_settings["service_user"] + tenant_name keystone_settings["service_tenant"] + role_name "observer" + action :add_access +end + +keystone_register "add audit role for barbican" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + role_name "audit" + action :add_role +end + +keystone_register "give barbican user access as audit" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name keystone_settings["service_user"] + tenant_name keystone_settings["service_tenant"] + role_name "audit" + action :add_access +end + crowbar_pacemaker_sync_mark "create-barbican_register" if ha_enabled if node[:barbican][:ha][:enabled] diff --git a/chef/cookbooks/ceilometer/attributes/default.rb b/chef/cookbooks/ceilometer/attributes/default.rb index 016bb300dd..410f77b884 100644 --- a/chef/cookbooks/ceilometer/attributes/default.rb +++ b/chef/cookbooks/ceilometer/attributes/default.rb @@ -78,6 +78,7 @@ default[:ceilometer][:ha][:agent_notification][:agent] = "systemd:#{agent_notification_service_name}" default[:ceilometer][:ha][:agent_notification][:op][:monitor][:interval] = "10s" + default[:ceilometer][:ha][:central][:enabled] = false default[:ceilometer][:ha][:central][:agent] = "systemd:#{central_service_name}" default[:ceilometer][:ha][:central][:op][:monitor][:interval] = "10s" @@ -90,3 +91,7 @@ # this establishes which node is used for mongo client connections that # we use to initialize the replica set default[:ceilometer][:ha][:mongodb][:replica_set][:controller] = false + +# Pacemaker ceilometer expirer cronjob link +default[:ceilometer][:ha][:expirer][:cronjob][:agent] = "ocf:heartbeat:symlink" +default[:ceilometer][:ha][:expirer][:cronjob][:op][:monitor][:interval] = "10s" diff --git a/chef/cookbooks/ceilometer/recipes/server.rb b/chef/cookbooks/ceilometer/recipes/server.rb index 6f7df194c6..211ec916eb 100644 --- a/chef/cookbooks/ceilometer/recipes/server.rb +++ b/chef/cookbooks/ceilometer/recipes/server.rb @@ -207,9 +207,6 @@ bind_port = node[:ceilometer][:api][:port] end -node.normal[:apache][:listen_ports_crowbar] ||= {} -node.normal[:apache][:listen_ports_crowbar][:ceilometer] = { plain: [bind_port] } - if ceilometer_protocol == "https" ssl_setup "setting up ssl for ceilometer" do generate_certs node[:ceilometer][:ssl][:generate_certs] diff --git a/chef/cookbooks/ceilometer/recipes/server_ha.rb b/chef/cookbooks/ceilometer/recipes/server_ha.rb index df4257ab21..493b55481b 100644 --- a/chef/cookbooks/ceilometer/recipes/server_ha.rb +++ b/chef/cookbooks/ceilometer/recipes/server_ha.rb @@ -23,6 +23,43 @@ action :nothing end.run_action(:create) +# install openstack-ceilometer-collector - the package contains the cron file +# /usr/share/ceilometer/openstack-ceilometer-expirer.cron +package "openstack-ceilometer-collector" + +# setup the expirer cronjob only on a single node to not +# run into DB deadlocks (bsc#1113107) +crowbar_pacemaker_sync_mark "wait-ceilometer_expirer_cron" + +expirer_transaction_objects = [] + +ceilometer_expirer_cron_primitive = "ceilometer-expirer-cron" +pacemaker_primitive ceilometer_expirer_cron_primitive do + agent node[:ceilometer][:ha][:expirer][:cronjob][:agent] + params( + # target is from the RPM package openstack-ceilometer + "target" => "/usr/share/ceilometer/openstack-ceilometer-expirer.cron", + "link" => "/etc/cron.daily/openstack-ceilometer-expirer.cron", + "backup_suffix" => ".orig" + ) + op node[:ceilometer][:ha][:expirer][:cronjob][:op] + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } +end +expirer_transaction_objects << "pacemaker_primitive[#{ceilometer_expirer_cron_primitive}]" + +ceilometer_expirer_cron_loc = openstack_pacemaker_controller_only_location_for ceilometer_expirer_cron_primitive +expirer_transaction_objects << "pacemaker_location[#{ceilometer_expirer_cron_loc}]" + +pacemaker_transaction "ceilometer-expirer cron" do + cib_objects expirer_transaction_objects + # note that this will also automatically start the resources + action :commit_new + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } +end + +crowbar_pacemaker_sync_mark "create-ceilometer_expirer_cron" + if node[:pacemaker][:clone_stateless_services] # Wait for all nodes to reach this point so we know that they will have # all the required packages installed and configuration files updated diff --git a/chef/cookbooks/cinder/recipes/api.rb b/chef/cookbooks/cinder/recipes/api.rb index 867da60c69..f39dba5f30 100644 --- a/chef/cookbooks/cinder/recipes/api.rb +++ b/chef/cookbooks/cinder/recipes/api.rb @@ -31,6 +31,18 @@ my_admin_host = CrowbarHelper.get_host_for_admin_url(node, ha_enabled) my_public_host = CrowbarHelper.get_host_for_public_url(node, node[:cinder][:api][:protocol] == "https", ha_enabled) +if node[:cinder][:api][:protocol] == "https" + ssl_setup "setting up ssl for cinder" do + generate_certs node[:cinder][:ssl][:generate_certs] + certfile node[:cinder][:ssl][:certfile] + keyfile node[:cinder][:ssl][:keyfile] + group node[:cinder][:group] + fqdn node[:fqdn] + cert_required node[:cinder][:ssl][:cert_required] + ca_certs node[:cinder][:ssl][:ca_certs] + end +end + crowbar_pacemaker_sync_mark "wait-cinder_register" register_auth_hash = { user: keystone_settings["admin_user"], diff --git a/chef/cookbooks/cinder/recipes/common.rb b/chef/cookbooks/cinder/recipes/common.rb index a4f6819b2b..896b2c0a94 100644 --- a/chef/cookbooks/cinder/recipes/common.rb +++ b/chef/cookbooks/cinder/recipes/common.rb @@ -84,18 +84,6 @@ node.save if dirty -if node[:cinder][:api][:protocol] == "https" - ssl_setup "setting up ssl for cinder" do - generate_certs node[:cinder][:ssl][:generate_certs] - certfile node[:cinder][:ssl][:certfile] - keyfile node[:cinder][:ssl][:keyfile] - group node[:cinder][:group] - fqdn node[:fqdn] - cert_required node[:cinder][:ssl][:cert_required] - ca_certs node[:cinder][:ssl][:ca_certs] - end -end - availability_zone = nil unless node[:crowbar_wall].nil? or node[:crowbar_wall][:openstack].nil? if node[:crowbar_wall][:openstack][:availability_zone] != "" diff --git a/chef/cookbooks/cinder/templates/default/cinder.conf.erb b/chef/cookbooks/cinder/templates/default/cinder.conf.erb index 3a4decda66..663521a7b9 100644 --- a/chef/cookbooks/cinder/templates/default/cinder.conf.erb +++ b/chef/cookbooks/cinder/templates/default/cinder.conf.erb @@ -12,6 +12,12 @@ wsgi_keep_alive = false state_path = /var/lib/cinder my_ip = <%= node[:cinder][:my_ip] %> +# os_privileged_* values are required for migrations of attached volumes +# See bsc#1079763 +os_privileged_user_name = <%= @keystone_settings['service_user'] %> +os_privileged_user_password = <%= @keystone_settings['service_password'] %> +os_privileged_user_tenant = <%= @keystone_settings['service_tenant'] %> + glance_api_servers = <%= @glance_server_protocol %>://<%= @glance_server_host %>:<%= @glance_server_port %> glance_api_version = 2 <% unless @glance_server_insecure.nil? -%> @@ -307,6 +313,11 @@ lock_path = /var/run/openstack lock_path = /var/run/cinder <% end -%> +<% if @rabbit_settings[:enable_notifications] -%> +[oslo_messaging_notifications] +driver = messaging +<% end -%> + [oslo_messaging_rabbit] rabbit_use_ssl = <%= @rabbit_settings[:use_ssl] %> <% if @rabbit_settings[:cluster] -%> diff --git a/chef/cookbooks/crowbar-openstack/definitions/openstack_pacemaker_drbd_controller_only_location_for.rb b/chef/cookbooks/crowbar-openstack/definitions/openstack_pacemaker_drbd_controller_only_location_for.rb new file mode 100644 index 0000000000..754d590e43 --- /dev/null +++ b/chef/cookbooks/crowbar-openstack/definitions/openstack_pacemaker_drbd_controller_only_location_for.rb @@ -0,0 +1,37 @@ +# +# Copyright 2016, SUSE +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +define :openstack_pacemaker_drbd_controller_only_location_for do + # ensure attributes are set + include_recipe "crowbar-pacemaker::attributes" + + resource = params[:name] + location_name = "l-#{resource}-controller" + + # Make sure drbd nodes are known so that drbd-controller constraint makes sense + location_def = if node[:pacemaker][:drbd].fetch("nodes", []).any? + OpenStackHAHelper.drbd_controller_only_location(location_name, resource) + else + OpenStackHAHelper.controller_only_location(location_name, resource) + end + + pacemaker_location location_name do + definition location_def + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + location_name +end diff --git a/chef/cookbooks/crowbar-openstack/libraries/ha_helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/ha_helpers.rb index 413a515e73..394f47a6a8 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/ha_helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/ha_helpers.rb @@ -40,6 +40,11 @@ def self.controller_only_location_ignoring_upgrade(location, service) "rule 0: OpenStack-role eq controller" end + def self.drbd_controller_only_location(location, service) + "location #{location} #{service} resource-discovery=exclusive " \ + "rule 0: OpenStack-role eq controller and drbd-controller eq true" + end + def self.no_compute_location(location, service) "location #{location} #{service} resource-discovery=exclusive " \ "rule 0: OpenStack-role ne compute" diff --git a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb index 73c76c14ee..03ef06f575 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/helpers.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/helpers.rb @@ -72,23 +72,45 @@ def self.database_settings(node, barclamp) "on behalf of #{barclamp}") end @database_settings = nil + @sql_engine_cache = nil @database_settings_cache_time = node[:ohai_time] end - if @database_settings && @database_settings.include?(instance) - Chef::Log.info("Database server found at #{@database_settings[instance][:address]} [cached]") + if ["mysql", "postgresql"].include? barclamp + sql_engine = barclamp + elsif @sql_engine_cache && @sql_engine_cache.include?(instance) + sql_engine = @sql_engine_cache[instance] + else + db_roles, = Chef::Search::Query.new.search( + :role, + "name:database-config-#{instance}" + ) + db_proposal_role = db_roles.first unless db_roles.empty? + # TODO(jhesketh): What if db_roles is empty here? + sql_engine = db_proposal_role.default_attributes["database"]["sql_engine"] + + @sql_engine_cache ||= {} + @sql_engine_cache[instance] = sql_engine + end + + if @database_settings && @database_settings.include?(instance) && @database_settings[instance].include?(sql_engine) + Chef::Log.info("Database server found at #{@database_settings[instance][sql_engine][:address]} [cached]") else @database_settings ||= Hash.new - database = get_node(node, "database-server", "database", instance) + db_role = if sql_engine == "postgresql" + "database-server" + else + "mysql-server" + end + database = get_node(node, db_role, "database", instance) if database.nil? Chef::Log.warn("No database server found!") else - address = CrowbarDatabaseHelper.get_listen_address(database) - backend_name = DatabaseLibrary::Database::Util.get_backend_name(database) + address = CrowbarDatabaseHelper.get_listen_address(database, sql_engine) ssl_opts = {} - if backend_name == "mysql" + if sql_engine == "mysql" ssl_opts = { enabled: database["database"]["mysql"]["ssl"]["enabled"], ca_certs: database["database"]["mysql"]["ssl"]["ca_certs"], @@ -96,13 +118,14 @@ def self.database_settings(node, barclamp) database["database"]["mysql"]["ssl"]["insecure"] } end - @database_settings[instance] = { + @database_settings[instance] ||= {} + @database_settings[instance][sql_engine] = { address: address, - url_scheme: backend_name, - backend_name: backend_name, - provider: DatabaseLibrary::Database::Util.get_database_provider(database), - user_provider: DatabaseLibrary::Database::Util.get_user_provider(database), - privs: DatabaseLibrary::Database::Util.get_default_priviledges(database), + url_scheme: sql_engine, + backend_name: sql_engine, + provider: DatabaseLibrary::Database::Util.get_database_provider(database, sql_engine), + user_provider: DatabaseLibrary::Database::Util.get_user_provider(database, sql_engine), + privs: DatabaseLibrary::Database::Util.get_default_priviledges(database, sql_engine), connection: { host: address, username: "db_maker", @@ -111,11 +134,11 @@ def self.database_settings(node, barclamp) } } - Chef::Log.info("Database server found at #{@database_settings[instance][:address]}") + Chef::Log.info("Database server found at #{@database_settings[instance][sql_engine][:address]}") end end - @database_settings[instance] + @database_settings[instance][sql_engine] end def self.database_connection_string(db_settings, db_auth_attr) @@ -170,6 +193,8 @@ def self.rabbitmq_settings(node, barclamp) else rabbit = rabbits.first + address = CrowbarRabbitmqHelper.get_listen_address(rabbit) + port = if rabbit[:rabbitmq][:ssl][:enabled] rabbit[:rabbitmq][:ssl][:port] else @@ -181,33 +206,36 @@ def self.rabbitmq_settings(node, barclamp) rabbit[:rabbitmq][:ssl][:client_ca_certs] end - single_rabbit_settings = { - # backwards compatible attributes, remove in cloud8? - address: rabbit[:rabbitmq][:address], - port: port, - user: rabbit[:rabbitmq][:user], - password: rabbit[:rabbitmq][:password], - vhost: rabbit[:rabbitmq][:vhost], - # end backwards comatible attrs + common_rabbit_settings = { use_ssl: rabbit[:rabbitmq][:ssl][:enabled], client_ca_certs: client_ca_certs, - url: "rabbit://#{rabbit[:rabbitmq][:user]}:" \ - "#{rabbit[:rabbitmq][:password]}@" \ - "#{rabbit[:rabbitmq][:address]}:#{port}/" \ - "#{rabbit[:rabbitmq][:vhost]}", - trove_url: "rabbit://#{rabbit[:rabbitmq][:trove][:user]}:" \ - "#{rabbit[:rabbitmq][:trove][:password]}@" \ - "#{rabbit[:rabbitmq][:address]}:#{port}/" \ - "#{rabbit[:rabbitmq][:trove][:vhost]}", - cluster: false, - durable_queues: false, - ha_queues: false, - heartbeat_timeout: rabbit[:rabbitmq][:client][:heartbeat_timeout], - pacemaker_resource: "rabbitmq" + enable_notifications: rabbit[:rabbitmq][:client][:enable_notifications], + heartbeat_timeout: rabbit[:rabbitmq][:client][:heartbeat_timeout] } if !rabbit[:rabbitmq][:cluster] - @rabbitmq_settings[instance] = single_rabbit_settings + @rabbitmq_settings[instance] = { + # backwards compatible attributes, remove in cloud8? + address: address, + port: port, + user: rabbit[:rabbitmq][:user], + password: rabbit[:rabbitmq][:password], + vhost: rabbit[:rabbitmq][:vhost], + # end backwards comatible attrs + url: "rabbit://#{rabbit[:rabbitmq][:user]}:" \ + "#{rabbit[:rabbitmq][:password]}@" \ + "#{address}:#{port}/" \ + "#{rabbit[:rabbitmq][:vhost]}", + trove_url: "rabbit://#{rabbit[:rabbitmq][:trove][:user]}:" \ + "#{rabbit[:rabbitmq][:trove][:password]}@" \ + "#{address}:#{port}/" \ + "#{rabbit[:rabbitmq][:trove][:vhost]}", + cluster: false, + durable_queues: false, + ha_queues: false, + pacemaker_resource: "rabbitmq" + }.merge(common_rabbit_settings) + Chef::Log.info("RabbitMQ server found") else # transport_url format: @@ -220,7 +248,7 @@ def self.rabbitmq_settings(node, barclamp) end url = "#{rabbit[:rabbitmq][:user]}:" url << "#{rabbit[:rabbitmq][:password]}@" - url << "#{rabbit[:rabbitmq][:address]}:#{port}" + url << "#{CrowbarRabbitmqHelper.get_listen_address(rabbit)}:#{port}" url << "/#{rabbit[:rabbitmq][:vhost]}" if rabbit.equal? rabbits.last url.prepend("rabbit://") if rabbit.equal? rabbits.first @@ -236,7 +264,7 @@ def self.rabbitmq_settings(node, barclamp) url = "#{rabbit[:rabbitmq][:trove][:user]}:" url << "#{rabbit[:rabbitmq][:trove][:password]}@" - url << "#{rabbit[:rabbitmq][:address]}:#{port}" + url << "#{CrowbarRabbitmqHelper.get_listen_address(rabbit)}:#{port}" url << "/#{rabbit[:rabbitmq][:trove][:vhost]}" unless rabbit.equal? rabbits.first url.prepend("rabbit://") if rabbit.equal? rabbits.first @@ -244,16 +272,13 @@ def self.rabbitmq_settings(node, barclamp) end @rabbitmq_settings[instance] = { - use_ssl: rabbit[:rabbitmq][:ssl][:enabled], - client_ca_certs: client_ca_certs, url: rabbit_hosts.join(","), trove_url: trove_rabbit_hosts.join(","), cluster: true, durable_queues: true, ha_queues: true, - heartbeat_timeout: rabbit[:rabbitmq][:client][:heartbeat_timeout], pacemaker_resource: "ms-rabbitmq" - } + }.merge(common_rabbit_settings) Chef::Log.info("RabbitMQ cluster found") end end @@ -280,8 +305,6 @@ def self.insecure(attributes) use_ssl && attributes["ssl"]["insecure"] end - private - def self.get_node(node, role, barclamp, instance) result = nil @@ -299,12 +322,13 @@ def self.get_node(node, role, barclamp, instance) result end + private + def self.get_nodes(node, role, barclamp, instance) nodes, = Chef::Search::Query.new.search(:node, "roles:#{role} AND " \ "#{barclamp}_config_environment:#{barclamp}-config-#{instance}") nodes end - private_class_method :get_node private_class_method :get_nodes end diff --git a/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb b/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb index 45f2fe2970..6a1f4ae3a8 100644 --- a/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb +++ b/chef/cookbooks/crowbar-openstack/libraries/provider_ssl_setup.rb @@ -72,6 +72,15 @@ def action_setup end # We do not check for existence of keyfile, as the private key is # allowed to be in the certfile + + # If we do no generate the certificate, we need to be sure + # that is readable for the user. In some configurations we + # need to share the same certificate for multiple services, + # so needs to be readable for multiple different users and + # groups (for example, if we share the apache certificate + # for Nova and the Dashboard) + _fix_acl @current_resource.certfile, @current_resource.group + _fix_acl @current_resource.keyfile, @current_resource.group end # if generate_certs if @current_resource.cert_required && ! ::File.size?(@current_resource.ca_certs) @@ -80,6 +89,33 @@ def action_setup raise message end end + + def _fix_acl(certificate, group) + partial = "/" + certificate.split(::File::SEPARATOR).each do |entry| + next if entry.empty? + + partial = ::File.join(partial, entry) + # If the file is readable by all users, and the directory is + # readable and executable (we can list the contents) we can + # avoid an ACL modification + if ::File.world_readable?(partial) + next if ::File.file?(partial) + next if _world_executable?(partial) && ::File.directory?(partial) + end + + mask = if ::File.directory?(partial) + "group:#{group}:r-x" + else + "group:#{group}:r--" + end + system "setfacl -m #{mask} #{partial}" + end + end + + def _world_executable?(path) + ::File.stat(path).mode & 1 == 1 + end end end end diff --git a/chef/cookbooks/crowbar-openstack/metadata.rb b/chef/cookbooks/crowbar-openstack/metadata.rb index 87aee4dcff..6dfa9ae566 100644 --- a/chef/cookbooks/crowbar-openstack/metadata.rb +++ b/chef/cookbooks/crowbar-openstack/metadata.rb @@ -8,3 +8,4 @@ depends "crowbar-pacemaker" depends "database" +depends "rabbitmq" diff --git a/chef/cookbooks/crowbar-openstack/templates/default/vhost-wsgi.conf.erb b/chef/cookbooks/crowbar-openstack/templates/default/vhost-wsgi.conf.erb index 563f2b7ee1..63d0638014 100644 --- a/chef/cookbooks/crowbar-openstack/templates/default/vhost-wsgi.conf.erb +++ b/chef/cookbooks/crowbar-openstack/templates/default/vhost-wsgi.conf.erb @@ -5,6 +5,7 @@ Listen <%= @bind_host %>:<%= @bind_port %> WSGIProcessGroup <%= @process_group %> WSGIScriptAlias / <%= @script_alias %> WSGIApplicationGroup %{GLOBAL} + WSGIScriptReloading Off <% if @pass_authorization %> WSGIPassAuthorization On <% end %> diff --git a/chef/cookbooks/database/attributes/default.rb b/chef/cookbooks/database/attributes/default.rb index 32f3e09f79..0440382476 100644 --- a/chef/cookbooks/database/attributes/default.rb +++ b/chef/cookbooks/database/attributes/default.rb @@ -18,5 +18,7 @@ # # ha -default[:database][:ha][:enabled] = false -default[:database][:ha][:storage][:mode] = nil +default[:database][:postgresql][:ha][:enabled] = false +default[:database][:postgresql][:ha][:storage][:mode] = nil + +default[:database][:mysql][:ha][:enabled] = false diff --git a/chef/cookbooks/database/libraries/crowbar.rb b/chef/cookbooks/database/libraries/crowbar.rb index ed13e9e4fd..2120c3f495 100644 --- a/chef/cookbooks/database/libraries/crowbar.rb +++ b/chef/cookbooks/database/libraries/crowbar.rb @@ -1,10 +1,10 @@ module CrowbarDatabaseHelper - def self.get_ha_vhostname(node) - if node[:database][:ha][:enabled] + def self.get_ha_vhostname(node, sql_engine = node[:database][:sql_engine]) + if node["database"][sql_engine]["ha"]["enabled"] cluster_name = CrowbarPacemakerHelper.cluster_name(node) # Any change in the generation of the vhostname here must be reflected in # apply_role_pre_chef_call of the database barclamp model - if node[:database][:sql_engine] == "postgresql" + if sql_engine == "postgresql" "#{node[:database][:config][:environment].gsub("-config", "")}-#{cluster_name}".tr("_", "-") else "cluster-#{cluster_name}".tr("_", "-") @@ -14,14 +14,112 @@ def self.get_ha_vhostname(node) end end - def self.get_listen_address(node) + def self.get_listen_address(node, sql_engine = node[:database][:sql_engine]) # For SSL we prefer a cluster hostname (for certificate validation) - use_ssl = node[:database][:sql_engine] == "mysql" && node[:database][:mysql][:ssl][:enabled] - if node[:database][:ha][:enabled] - vhostname = get_ha_vhostname(node) + use_ssl = sql_engine == "mysql" && node[:database][:mysql][:ssl][:enabled] + if node["database"][sql_engine]["ha"]["enabled"] + vhostname = get_ha_vhostname(node, sql_engine) use_ssl ? "#{vhostname}.#{node[:domain]}" : CrowbarPacemakerHelper.cluster_vip(node, "admin", vhostname) else use_ssl ? node[:fqdn] : Chef::Recipe::Barclamp::Inventory.get_network_by_type(node, "admin").address end end + + def self.roles_using_database + migration_data.keys + end + + def self.role_migration_data(role) + migration_data[role] + end + + def self.migration_data + { + "keystone-server" => { + "barclamp" => "keystone", + "db_sync_cmd" => "keystone-manage --config-dir /etc/keystone/keystone.conf.d/ " \ + "--config-dir <%=db_override_conf%> db_sync" + }, + "glance-server" => { + "barclamp" => "glance", + "db_sync_cmd" => "glance-manage --config-dir /etc/glance/glance.conf.d/ " \ + "--config-dir <%=db_override_conf%> db_sync" + }, + "cinder-controller" => { + "barclamp" => "cinder", + "db_sync_cmd" => "cinder-manage --config-dir /etc/cinder/cinder.conf.d/ " \ + "--config-dir <%=db_override_conf%> db sync" + }, + "manila-server" => { + "barclamp" => "manila", + "db_sync_cmd" => "manila-manage --config-dir /etc/manila/manila.conf.d/ " \ + "--config-dir <%=db_override_conf%> db sync" + }, + "neutron-server" => { + "barclamp" => "neutron", + "db_sync_cmd" => "neutron-db-manage --config-dir /etc/neutron/neutron.conf.d/ " \ + "--config-dir <%=db_override_conf%> upgrade head" + }, + "nova-controller" => { + "barclamp" => "nova", + "db_sync_cmd" => [ + "nova-manage --config-dir /etc/nova/nova.conf.d/ " \ + "--config-dir <%=db_override_conf%> api_db sync", + "nova-manage --config-dir /etc/nova/nova.conf.d/ " \ + "--config-dir <%=db_override_conf%> db sync" + ] + }, + # ec2 is special in that it's attributes are part of the nova barclamp + "ec2-api" => { + "barclamp" => "nova", + "db_sync_cmd" => "ec2-api-manage --config-dir /etc/ec2api/ec2api.conf.d/ " \ + "--config-dir <%=db_override_conf%> db_sync" + }, + # django migration tool uses db settings from + # /srv/www/openstack-dashboard/openstack_dashboard/local/local.settings.d/_100_local_settings.py + "horizon-server" => { + "barclamp" => "horizon", + "db_sync_cmd" => "python /srv/www/openstack-dashboard/manage.py migrate --database mysql" + }, + "ceilometer-server" => { + "barclamp" => "ceilometer", + "db_sync_cmd" => "ceilometer-dbsync --config-dir /etc/ceilometer/ceilometer.conf.d/ " \ + "--config-dir <%=db_override_conf%>" + }, + "heat-server" => { + "barclamp" => "heat", + "db_sync_cmd" => "heat-manage --config-dir /etc/heat/heat.conf.d/ " \ + "--config-dir <%=db_override_conf%> db_sync" + }, + "aodh-server" => { + "barclamp" => "aodh", + "db_sync_cmd" => "aodh-dbsync --config-dir /etc/aodh/aodh.conf.d/ " \ + "--config-dir <%=db_override_conf%>" + }, + "barbican-controller" => { + "barclamp" => "barbican", + # this doesn't work because of a bug in barbican-manage handling of oslo_config + # "db_sync_cmd" => "barbican-manage --config-dir /etc/barbican/barbican.conf.d/ " \ + # "--config-dir <%=db_override_conf%> db upgrade" + "db_sync_cmd" => "barbican-manage db upgrade --db-url <%=db_conf_sections['DEFAULT']%>" + }, + "magnum-server" => { + "barclamp" => "magnum", + "db_sync_cmd" => "magnum-db-manage --config-dir /etc/magnum/magnum.conf.d/ " \ + "--config-dir <%=db_override_conf%> upgrade" + }, + "sahara-server" => { + "barclamp" => "sahara", + "db_sync_cmd" => "sahara-db-manage --config-dir /etc/sahara/sahara.conf.d/ " \ + "--config-dir <%=db_override_conf%> upgrade head" + }, + "trove-server" => { + "barclamp" => "trove", + "db_sync_cmd" => "trove-manage --config-dir /etc/trove/trove.conf.d/ " \ + "--config-dir <%=db_override_conf%> db_sync" + } + } + end + + private_class_method :migration_data end diff --git a/chef/cookbooks/database/libraries/database_library.rb b/chef/cookbooks/database/libraries/database_library.rb index 2e343db085..8164f42373 100644 --- a/chef/cookbooks/database/libraries/database_library.rb +++ b/chef/cookbooks/database/libraries/database_library.rb @@ -19,8 +19,7 @@ module DatabaseLibrary class Database class Util - def self.get_database_provider(node) - backend = node[:database][:sql_engine] + def self.get_database_provider(node, backend = node[:database][:sql_engine]) db_provider = nil case backend when "postgresql" @@ -33,8 +32,7 @@ def self.get_database_provider(node) db_provider end - def self.get_user_provider(node) - backend = node[:database][:sql_engine] + def self.get_user_provider(node, backend = node[:database][:sql_engine]) db_provider = nil case backend when "postgresql" @@ -51,8 +49,7 @@ def self.get_backend_name(node) node[:database][:sql_engine] end - def self.get_default_priviledges(node) - backend = node[:database][:sql_engine] + def self.get_default_priviledges(node, backend = node[:database][:sql_engine]) privs = nil case backend when "postgresql" diff --git a/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb b/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb new file mode 100644 index 0000000000..73d14ac5e8 --- /dev/null +++ b/chef/cookbooks/database/recipes/pg2mariadb_preparation.rb @@ -0,0 +1,148 @@ +databases = [] +commands = [] +# The "barclamp" parameter doesn't really matter here, we want to use the same +# instance for all databases. +db_settings = CrowbarOpenStackHelper.database_settings(node, "mysql") +psql_settings = CrowbarOpenStackHelper.database_settings(node, "postgresql") +CrowbarDatabaseHelper.roles_using_database.each do |role| + role_migration_data = CrowbarDatabaseHelper.role_migration_data(role) + barclamp = role_migration_data["barclamp"] + + # Find a node with this role even if the recipe was executed from another one + # e.g. one of the database nodes. + role_node = CrowbarOpenStackHelper.get_node(node, role, barclamp, "default") + + # Role not found on any node? Skip it completely. + next if role_node.nil? + + db = if role == "ec2-api" + role_node[barclamp]["ec2-api"]["db"] + else + role_node[barclamp]["db"] + end + db_conf_sections = {} + db_connection_key = "connection" + connection = CrowbarOpenStackHelper.database_connection_string(db_settings, db) + databases << { db: db, url: connection } + Chef::Log.info("connection string: #{connection}") + db_conf_sections["database"] = connection + + # The nova-controller role creates more than one database + if role == "nova-controller" + connection = CrowbarOpenStackHelper.database_connection_string(db_settings, + role_node[barclamp]["api_db"]) + databases << { db: role_node[barclamp]["api_db"], url: connection } + Chef::Log.info("connection string: #{connection}") + db_conf_sections["api_database"] = connection + connection = CrowbarOpenStackHelper.database_connection_string(db_settings, + role_node[barclamp]["placement_db"]) + databases << { db: role_node[barclamp]["placement_db"], url: connection } + Chef::Log.info("connection string: #{connection}") + db_conf_sections["placement_database"] = connection + end + # Barbican uses non-standard db config structure + if role == "barbican-controller" + db_conf_sections = { "DEFAULT" => connection } + db_connection_key = "sql_connection" + end + + directory "/etc/pg2mysql/" do + mode 0750 + owner "root" + group "root" + end + + # Remaining part of the loop should only be executed on the controller node with this role + next unless node.roles.include? role + + db_override_conf = "/etc/pg2mysql/#{role}.mariadb-conf.d/" + + cmds = role_migration_data["db_sync_cmd"] + cmds = [cmds] unless cmds.is_a?(Array) + + idx = 0 + cmds.each do |cmd| + suffix = idx.zero? ? "" : "-#{idx}" + log_file = "/var/log/crowbar/db-prepare.#{role}#{suffix}.log" + log_redirect = "> #{log_file} 2>&1" + commands << { cmd: ERB.new("#{cmd} #{log_redirect}").result(binding), role: role + suffix } + idx += 1 + end + + directory db_override_conf do + mode 0750 + owner "root" + group "root" + end + + template "#{db_override_conf}/999-db.conf" do + source "mariadb-override.conf.erb" + mode 0640 + owner "root" + group "root" + variables( + db_conf_sections: db_conf_sections, + db_connection_key: db_connection_key + ) + end +end + +include_recipe "database::client" +include_recipe "#{db_settings[:backend_name]}::client" +include_recipe "#{db_settings[:backend_name]}::python-client" + +databases.each do |dbdata| + db = dbdata[:db] + # fill psql url for databases.yaml + dbdata[:psql_url] = CrowbarOpenStackHelper.database_connection_string(psql_settings, db) + Chef::Log.info("creating database #{db["database"]}") + Chef::Log.info("creating database user #{db["user"]} with password #{db["password"]}") + Chef::Log.info("db settings: #{db_settings.inspect}") + + database "create #{db[:database]} database (pg2my)" do + connection db_settings[:connection] + database_name db[:database] + provider db_settings[:provider] + action :create + end + + database_user "create #{db[:user]} database user (pg2my)" do + connection db_settings[:connection] + username db[:user] + password db[:password] + host "%" + provider db_settings[:user_provider] + action :create + end + + database_user "grant database access for #{db[:user]} database user (pg2my)" do + connection db_settings[:connection] + username db[:user] + password db[:password] + database_name db[:database] + host "%" + privileges db_settings[:privs] + provider db_settings[:user_provider] + require_ssl db_settings[:connection][:ssl][:enabled] + action :grant + end + +end + +commands.each do |command| + execute "dbsync-role-#{command[:role]}" do + command command[:cmd] + end +end + +# Write the index only on database node +template "/etc/pg2mysql/databases.yaml" do + source "mariadb-databases.yaml.erb" + mode 0640 + owner "root" + group "root" + variables( + databases: databases + ) + only_if { node.roles.include? "mysql-server" } +end diff --git a/chef/cookbooks/database/recipes/role_database_server.rb b/chef/cookbooks/database/recipes/role_database_server.rb index 65a6817a08..a71952ff35 100644 --- a/chef/cookbooks/database/recipes/role_database_server.rb +++ b/chef/cookbooks/database/recipes/role_database_server.rb @@ -1,5 +1,5 @@ # -# Copyright 2016, SUSE LINUX GmbH +# Copyright 2018, SUSE LINUX GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # - if CrowbarRoleRecipe.node_state_valid_for_role?(node, "database", "database-server") include_recipe "database::crowbar" - include_recipe "database::server" + Chef::Log.info("Running database::server for PostgreSQL") + include_recipe "postgresql::server" end diff --git a/chef/cookbooks/database/recipes/role_mysql_server.rb b/chef/cookbooks/database/recipes/role_mysql_server.rb new file mode 100644 index 0000000000..ef6113fe87 --- /dev/null +++ b/chef/cookbooks/database/recipes/role_mysql_server.rb @@ -0,0 +1,18 @@ +# +# Copyright 2018, SUSE LINUX GmbH +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +if CrowbarRoleRecipe.node_state_valid_for_role?(node, "database", "mysql-server") + include_recipe "mysql::server" +end diff --git a/chef/cookbooks/database/templates/default/mariadb-databases.yaml.erb b/chef/cookbooks/database/templates/default/mariadb-databases.yaml.erb new file mode 100644 index 0000000000..51effc79c7 --- /dev/null +++ b/chef/cookbooks/database/templates/default/mariadb-databases.yaml.erb @@ -0,0 +1,5 @@ +<% @databases.each do |dbdata| -%> +<%= dbdata[:db][:database] %>: + source: <%= dbdata[:psql_url] %> + target: <%= dbdata[:url] %> +<% end %> diff --git a/chef/cookbooks/database/templates/default/mariadb-override.conf.erb b/chef/cookbooks/database/templates/default/mariadb-override.conf.erb new file mode 100644 index 0000000000..e787748c8a --- /dev/null +++ b/chef/cookbooks/database/templates/default/mariadb-override.conf.erb @@ -0,0 +1,5 @@ +<% @db_conf_sections.keys.each do |section| -%> +[<%= section -%>] +<%= @db_connection_key -%> = <%= @db_conf_sections[section] -%> + +<% end %> diff --git a/chef/cookbooks/glance/recipes/api.rb b/chef/cookbooks/glance/recipes/api.rb index ce41cfe2b4..99f1d1f9af 100644 --- a/chef/cookbooks/glance/recipes/api.rb +++ b/chef/cookbooks/glance/recipes/api.rb @@ -95,25 +95,29 @@ # ensure swift tempurl key only if some agent_* drivers are enabled in ironic if !swift_config.empty? && node[:glance][:default_store] == "swift" && \ ironics.any? && ironics.first[:ironic][:enabled_drivers].any? { |d| d.start_with?("agent_") } - swift_command = "swift " + swift_command = "swift" swift_command << (swift_insecure ? " --insecure" : "") env = { "OS_USERNAME" => keystone_settings["service_user"], "OS_PASSWORD" => keystone_settings["service_password"], "OS_PROJECT_NAME" => keystone_settings["service_tenant"], "OS_AUTH_URL" => keystone_settings["public_auth_url"], - "OS_IDENTITY_API_VERSION" => 3 + "OS_IDENTITY_API_VERSION" => "3" } get_tempurl_key = "#{swift_command} stat | grep -m1 'Meta Temp-Url-Key:' | awk '{print $3}'" tempurl_key = Mixlib::ShellOut.new(get_tempurl_key, environment: env).run_command.stdout.chomp # no tempurl key set, set a random one if tempurl_key.empty? + # include the secure_password code + ::Chef::Recipe.send(:include, Opscode::OpenSSL::Password) + tempurl_key = secure_password execute "set-glance-tempurl-key" do command "#{swift_command} post -m 'Temp-Url-Key:#{tempurl_key}'" user node[:glance][:user] group node[:glance][:group] + environment env end end end diff --git a/chef/cookbooks/glance/recipes/ha.rb b/chef/cookbooks/glance/recipes/ha.rb index 90ecf2e08d..120b227539 100644 --- a/chef/cookbooks/glance/recipes/ha.rb +++ b/chef/cookbooks/glance/recipes/ha.rb @@ -29,6 +29,7 @@ port network_settings[:api][:ha_bind_port] use_ssl (node[:glance][:api][:protocol] == "https") servers CrowbarPacemakerHelper.haproxy_servers_for_service(node, "glance", "glance-server", "api") + rate_limit node[:glance][:ha_rate_limit]["glance-api"] action :nothing end.run_action(:create) diff --git a/chef/cookbooks/glance/templates/default/glance-api.conf.erb b/chef/cookbooks/glance/templates/default/glance-api.conf.erb index 047ba4e5f5..47aa247786 100644 --- a/chef/cookbooks/glance/templates/default/glance-api.conf.erb +++ b/chef/cookbooks/glance/templates/default/glance-api.conf.erb @@ -81,8 +81,10 @@ auth_type = password [oslo_concurrency] lock_path = /var/run/glance +<% if @rabbit_settings[:enable_notifications] -%> [oslo_messaging_notifications] driver = messaging +<% end -%> [oslo_messaging_rabbit] <% if @rabbit_settings[:cluster] -%> diff --git a/chef/cookbooks/glance/templates/default/glance-registry.conf.erb b/chef/cookbooks/glance/templates/default/glance-registry.conf.erb index 496f461966..031ab8145f 100644 --- a/chef/cookbooks/glance/templates/default/glance-registry.conf.erb +++ b/chef/cookbooks/glance/templates/default/glance-registry.conf.erb @@ -33,8 +33,10 @@ user_domain_name = <%= @keystone_settings["admin_domain"] %> auth_url = <%= @keystone_settings['admin_auth_url'] %> auth_type = password +<% if @rabbit_settings[:enable_notifications] -%> [oslo_messaging_notifications] driver = messaging +<% end -%> [oslo_messaging_rabbit] <% if @rabbit_settings[:cluster] -%> diff --git a/chef/cookbooks/heat/recipes/server.rb b/chef/cookbooks/heat/recipes/server.rb index b21ff4eeb2..846923b9d2 100644 --- a/chef/cookbooks/heat/recipes/server.rb +++ b/chef/cookbooks/heat/recipes/server.rb @@ -125,6 +125,7 @@ port keystone_settings["admin_port"] auth register_auth_hash action :wakeup + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "register heat user" do @@ -137,6 +138,7 @@ user_password keystone_settings["service_password"] tenant_name keystone_settings["service_tenant"] action :add_user + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "give heat user access" do @@ -149,6 +151,7 @@ tenant_name keystone_settings["service_tenant"] role_name "admin" action :add_access + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "add heat stack user role" do @@ -161,6 +164,7 @@ tenant_name keystone_settings["service_tenant"] role_name "heat_stack_user" action :add_role + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end node[:heat][:trusts_delegated_roles].each do |role| @@ -174,6 +178,7 @@ tenant_name keystone_settings["service_tenant"] role_name role action :add_role + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "give admin access to stack owner role #{role}" do @@ -186,6 +191,7 @@ tenant_name keystone_settings["default_tenant"] role_name role action :add_access + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end end @@ -286,6 +292,7 @@ "OS_REGION_NAME" => keystone_settings["endpoint_region"], "OS_IDENTITY_API_VERSION" => "3" }) + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end # Create Heat CloudFormation service @@ -299,6 +306,7 @@ service_type "cloudformation" service_description "Heat CloudFormation Service" action :add_service + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "register heat Cfn endpoint" do @@ -315,6 +323,7 @@ # endpoint_global true # endpoint_enabled true action :add_endpoint_template + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end # Create Heat service @@ -328,6 +337,7 @@ service_type "orchestration" service_description "Heat Service" action :add_service + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end keystone_register "register heat endpoint" do @@ -350,6 +360,7 @@ # endpoint_global true # endpoint_enabled true action :add_endpoint_template + only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } end crowbar_pacemaker_sync_mark "create-heat_register" if ha_enabled diff --git a/chef/cookbooks/horizon/libraries/helper.rb b/chef/cookbooks/horizon/libraries/helper.rb index e502fddc9e..7e33d9f095 100644 --- a/chef/cookbooks/horizon/libraries/helper.rb +++ b/chef/cookbooks/horizon/libraries/helper.rb @@ -19,10 +19,6 @@ def self.monasca_public_host(node) CrowbarHelper.get_host_for_public_url(node, ssl_enabled, ha_enabled) end - def self.monasca_admin_host(node) - CrowbarHelper.get_host_for_admin_url(node, node[:monasca][:ha][:enabled]) - end - def self.api_public_url(node) host = monasca_public_host(node) # SSL is not supported at this moment diff --git a/chef/cookbooks/horizon/recipes/ha.rb b/chef/cookbooks/horizon/recipes/ha.rb index 2de52b7754..ad88be77cc 100644 --- a/chef/cookbooks/horizon/recipes/ha.rb +++ b/chef/cookbooks/horizon/recipes/ha.rb @@ -33,10 +33,21 @@ end.run_action(:create) end +# Once HAProxy is taking care of :80 and :443 we need to remove this +# from Apache realm. This requires update the node information from +# Apache, and maybe the listen.conf file +if node[:apache][:listen_ports].include?("80") || node[:apache][:listen_ports].include?("443") + node.set[:apache][:listen_ports] = [] + node.save + include_recipe "apache2::default" +end + # Wait for all nodes to reach this point so we know that all nodes will have # all the required packages installed before we create the pacemaker # resources -crowbar_pacemaker_sync_mark "sync-horizon_before_ha" +crowbar_pacemaker_sync_mark "sync-horizon_before_ha" do + timeout 150 +end # no wait/create sync mark as it's done in crowbar-pacemaker itself diff --git a/chef/cookbooks/horizon/recipes/monasca_ui.rb b/chef/cookbooks/horizon/recipes/monasca_ui.rb index b56f953e22..141f8478c4 100644 --- a/chef/cookbooks/horizon/recipes/monasca_ui.rb +++ b/chef/cookbooks/horizon/recipes/monasca_ui.rb @@ -14,9 +14,12 @@ keystone_settings = KeystoneHelper.keystone_settings(node, @cookbook_name) monasca_server = node_search_with_cache("roles:monasca-server").first -monasca_master = node_search_with_cache("roles:monasca-master").first -monasca_host = MonascaUiHelper.monasca_admin_host(monasca_server) -grafana_password = monasca_master[:monasca][:master][:database_grafana_password] +if monasca_server.nil? + Chef::Log.warn("No monasca-server found.") + return +end +monasca_cfg = Barclamp::Config.load("openstack", "monasca") +grafana_password = monasca_cfg["master"]["database_grafana_password"] # Used for creating data source grafana_base_url = ::File.join(MonascaUiHelper.dashboard_local_url(node), "/grafana") @@ -26,16 +29,6 @@ ha_enabled = node[:horizon][:ha][:enabled] -if monasca_server.nil? - Chef::Log.warn("No monasca-server found.") - return -end - -if monasca_master.nil? - Chef::Log.warn("No monasca-master found.") - return -end - template "/srv/www/openstack-dashboard/openstack_dashboard/"\ "local/local_settings.d/_80_monasca_ui_settings.py" do source "_80_monasca_ui_settings.py.erb" @@ -57,7 +50,9 @@ template "/etc/grafana/grafana.ini" do source "grafana.ini.erb" variables( - database_host: monasca_host, + database_host: CrowbarHelper.get_host_for_admin_url( + monasca_server, monasca_cfg["ha"]["enabled"] + ), grafana_password: grafana_password ) owner "root" diff --git a/chef/cookbooks/horizon/recipes/server.rb b/chef/cookbooks/horizon/recipes/server.rb index edb8c98f1d..a5e19837c7 100644 --- a/chef/cookbooks/horizon/recipes/server.rb +++ b/chef/cookbooks/horizon/recipes/server.rb @@ -335,6 +335,19 @@ "default-character-set" => "'utf8'" } +mysql_settings = fetch_database_settings "mysql" +if mysql_settings + package "python-mysql" + django_mysql_settings = { + "ENGINE" => "'django.db.backends.mysql'", + "NAME" => "'#{node[:horizon][:db][:database]}'", + "USER" => "'#{node[:horizon][:db][:user]}'", + "PASSWORD" => "'#{node[:horizon][:db][:password]}'", + "HOST" => "'#{mysql_settings[:address]}'", + "default-character-set" => "'utf8'" + } +end + db_ca_certs = database_ssl ? db_settings[:connection][:ssl][:ca_certs] : "" glance_insecure = CrowbarOpenStackHelper.insecure(Barclamp::Config.load("openstack", "glance")) @@ -454,6 +467,7 @@ || manila_insecure \ || ceilometer_insecure, db_settings: django_db_settings, + mysql_settings: django_mysql_settings, db_ca_certs: db_ca_certs, enable_lb: neutron_use_lbaas, enable_vpn: neutron_use_vpnaas, @@ -492,32 +506,50 @@ bind_port_ssl = 443 end -node.normal[:apache][:listen_ports_crowbar] ||= {} - -if node[:horizon][:apache][:ssl] - node.normal[:apache][:listen_ports_crowbar][:horizon] = { plain: [bind_port], ssl: [bind_port_ssl] } -else - node.normal[:apache][:listen_ports_crowbar][:horizon] = { plain: [bind_port] } -end - -# we can only include the recipe after having defined the listen_ports_crowbar attribute include_recipe "horizon::ha" if ha_enabled -# Override what the apache2 cookbook does since it enforces the ports -resource = resources(template: "#{node[:apache][:dir]}/ports.conf") -resource.variables({apache_listen_ports: node.normal[:apache][:listen_ports_crowbar].values.map{ |p| p.values }.flatten.uniq.sort}) +# Type 1 synchronizarion. Only one node of the cluser will create the +# certificates that will be transferred to the rest of the nodes +crowbar_pacemaker_sync_mark "wait-horizon_ssl_sync" do + # Generate the certificate is a slow process, can timeout in the + # other nodes of the cluster + timeout 60 * 5 +end if ha_enabled if node[:horizon][:apache][:ssl] && node[:horizon][:apache][:generate_certs] package "apache2-utils" bash "Generate Apache certificate" do code <<-EOH - (umask 377 ; /usr/bin/gensslcert -C openstack-dashboard ) + (umask 377 ; /usr/bin/gensslcert -C openstack-dashboard -n openstack-dashboard) EOH - not_if { File.size?(node[:horizon][:apache][:ssl_crt_file]) } + only_if do + !File.size?(node[:horizon][:apache][:ssl_crt_file]) && ( + !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) + end + end + + if ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node) + cluster_nodes = CrowbarPacemakerHelper.cluster_nodes(node, "horizon-server") + cluster_nodes.map do |n| + next if node.name == n.name + node_address = Chef::Recipe::Barclamp::Inventory.get_network_by_type(n, "admin").address + bash "Synchronize SSL cetificates" do + code <<-EOH + rsync -a /etc/apache2/ssl.key/ #{node_address}:/etc/apache2/ssl.key/ + rsync -a /etc/apache2/ssl.crt/ #{node_address}:/etc/apache2/ssl.crt/ + rsync -a /etc/apache2/ssl.csr/ #{node_address}:/etc/apache2/ssl.csr/ + rsync -a /srv/www/htdocs/ #{node_address}:/srv/www/htdocs/ + EOH + timeout 120 + ignore_failure true + end + end end end +crowbar_pacemaker_sync_mark "create-horizon_ssl_sync" if ha_enabled + template "#{node[:apache][:dir]}/sites-available/openstack-dashboard.conf" do if node[:platform_family] == "suse" path "#{node[:apache][:dir]}/vhosts.d/openstack-dashboard.conf" diff --git a/chef/cookbooks/horizon/templates/default/local_settings.py.erb b/chef/cookbooks/horizon/templates/default/local_settings.py.erb index ab77b33aee..b14619b449 100644 --- a/chef/cookbooks/horizon/templates/default/local_settings.py.erb +++ b/chef/cookbooks/horizon/templates/default/local_settings.py.erb @@ -253,6 +253,13 @@ DATABASES = { } <% end %> }, +<% if @mysql_settings -%> + 'mysql': { + <% @mysql_settings.sort_by { |key, value| key }.each do |key,value| -%> + '<%= key %>': <%= value %>, + <% end -%> + }, +<% end -%> } SITE_BRANDING = "<%= @site_branding %>" diff --git a/chef/cookbooks/horizon/templates/default/openstack-dashboard.conf.erb b/chef/cookbooks/horizon/templates/default/openstack-dashboard.conf.erb index 919cebadeb..4b5b05faf8 100644 --- a/chef/cookbooks/horizon/templates/default/openstack-dashboard.conf.erb +++ b/chef/cookbooks/horizon/templates/default/openstack-dashboard.conf.erb @@ -6,6 +6,9 @@ RewriteEngine On RewriteCond %{SERVER_PORT} ^<%= @bind_port %>$ RewriteRule / https://%{HTTP_HOST}%{REQUEST_URI} [L,R] +Listen <%= @bind_host %>:<%= @bind_port %>> +Listen <%= @bind_host %>:<%= @bind_port_ssl %> + :<%= @bind_port_ssl %>> SSLEngine On SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH @@ -17,6 +20,8 @@ RewriteRule / https://%{HTTP_HOST}%{REQUEST_URI} [L,R] <% end %> <% else %> +Listen <%= @bind_host %>:<%= @bind_port %>> + :<%= @bind_port %>> <% end %> WSGIDaemonProcess horizon user=<%= @user %> group=<%= @group %> processes=3 threads=10 home=<%= @horizon_dir %> display-name=%{GROUP} diff --git a/chef/cookbooks/horizon/templates/default/openstack-dashboard.logrotate.erb b/chef/cookbooks/horizon/templates/default/openstack-dashboard.logrotate.erb index 40a2e8c05d..17d863a5e6 100644 --- a/chef/cookbooks/horizon/templates/default/openstack-dashboard.logrotate.erb +++ b/chef/cookbooks/horizon/templates/default/openstack-dashboard.logrotate.erb @@ -1,27 +1,21 @@ /var/log/apache2/openstack-dashboard-access_log { compress + copytruncate dateext maxage 365 rotate 99 size=+4096k notifempty missingok - create 644 root root - postrotate - /etc/init.d/apache2 reload - endscript } /var/log/apache2/openstack-dashboard-error_log { compress + copytruncate dateext maxage 365 rotate 99 size=+1024k notifempty missingok - create 644 root root - postrotate - /etc/init.d/apache2 reload - endscript } diff --git a/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb b/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb index 81dbed809a..6595a61c6c 100644 --- a/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb +++ b/chef/cookbooks/horizon/templates/suse/openstack-dashboard.conf.erb @@ -2,6 +2,8 @@ +Listen <%= @bind_host %>:<%= @bind_port %> + # Redirect non-SSL traffic to SSL :<%= @bind_port %>> RewriteEngine On @@ -24,6 +26,8 @@ RewriteRule / https://%1%{REQUEST_URI} [L,R] +Listen <%= @bind_host %>:<%= @bind_port_ssl %> + :<%= @bind_port_ssl %>> SSLEngine On SSLCipherSuite DEFAULT_SUSE @@ -31,10 +35,12 @@ SSLCertificateFile <%= @ssl_crt_file %> SSLCertificateKeyFile <%= @ssl_key_file %> <% unless @ssl_crt_chain_file.nil? or @ssl_crt_chain_file.empty? %> - SSLCertificateChainFile <%= @ssl_crt_chain_file %> + SSLCACertificateFile <%= @ssl_crt_chain_file %> <% end %> <% else %> +Listen <%= @bind_host %>:<%= @bind_port %> + :<%= @bind_port %>> <% end %> WSGIScriptAlias / <%= @horizon_dir %>/openstack_dashboard/wsgi/django.wsgi diff --git a/chef/cookbooks/ironic/libraries/helpers.rb b/chef/cookbooks/ironic/libraries/helpers.rb index d1b3d529ca..1a1c45a863 100644 --- a/chef/cookbooks/ironic/libraries/helpers.rb +++ b/chef/cookbooks/ironic/libraries/helpers.rb @@ -39,9 +39,9 @@ def swift_settings(node, glance) env = { "OS_USERNAME" => glance_keystone_settings["service_user"], - "OS_PASSWORD" => keystone_settings["service_password"], - "OS_PROJECT_NAME" => keystone_settings["service_tenant"], - "OS_AUTH_URL" => auth_url(keystone_settings), + "OS_PASSWORD" => glance_keystone_settings["service_password"], + "OS_PROJECT_NAME" => glance_keystone_settings["service_tenant"], + "OS_AUTH_URL" => auth_url(glance_keystone_settings), "OS_IDENTITY_API_VERSION" => "3" } insecure = swift[:swift][:ssl][:insecure] ? " --insecure" : "" diff --git a/chef/cookbooks/keystone/providers/fernet.rb b/chef/cookbooks/keystone/providers/fernet.rb new file mode 100644 index 0000000000..19baee3f34 --- /dev/null +++ b/chef/cookbooks/keystone/providers/fernet.rb @@ -0,0 +1,21 @@ +action :setup do + execute "keystone-manage fernet_setup" do + command "keystone-manage fernet_setup \ + --keystone-user #{node[:keystone][:user]} \ + --keystone-group #{node[:keystone][:group]}" + action :run + end +end + +# attribute :rsync_command, kind_of: String, default: "" +action :rotate_script do + template "/var/lib/keystone/keystone-fernet-rotate" do + source "keystone-fernet-rotate.erb" + owner "root" + group node[:keystone][:group] + mode "0750" + variables( + rsync_command: new_resource.rsync_command + ) + end +end diff --git a/chef/cookbooks/keystone/providers/register.rb b/chef/cookbooks/keystone/providers/register.rb index 4178486c6f..17b3be9738 100644 --- a/chef/cookbooks/keystone/providers/register.rb +++ b/chef/cookbooks/keystone/providers/register.rb @@ -27,10 +27,13 @@ # Lets verify that the service does not exist yet count = 0 error = true - while error and count < 50 do + loop do count = count + 1 item_id, error = _find_id(http, headers, "fred", path, dir) - sleep 1 if error + break unless error && count < 50 + sleep 1 + next unless new_resource.reissue_token_on_error + http, headers = _build_connection(new_resource) end raise "Failed to validate keystone is wake" if error @@ -384,9 +387,8 @@ endpoint_template["endpoint"]["url"] = new_url endpoint_template["endpoint"]["endpoint_id"] = endpoints[interface]["id"] endpoint_template["endpoint"]["service_id"] = endpoints[interface]["service_id"] - resp = http.send_request("PATCH", - "#{path}/#{endpoints[interface]["id"]}", - JSON.generate(endpoint_template), headers) + fullpath = "#{path}/#{endpoints[interface]["id"]}" + resp = retry_request(http, "PATCH", fullpath, endpoint_template, headers) if resp.is_a?(Net::HTTPOK) Chef::Log.info("Successfully updated endpoint URL #{interface} #{new_url}") else @@ -404,6 +406,54 @@ end end +action :update_one_endpoint do + http, headers = _build_connection(new_resource) + + path = "/v3/services" + dir = "services" + my_service_id, error = _find_id(http, headers, new_resource.endpoint_service, path, dir) + unless my_service_id + msg = "Couldn't find service #{new_resource.endpoint_service} in keystone" + _raise_error(nil, msg, "update_endpoint") + end + + path = "/v3/endpoints" + + resp = http.request_get(path, headers) + if resp.is_a?(Net::HTTPOK) + data = JSON.parse(resp.read_body) + endpoints = {} + data["endpoints"].each do |endpoint| + if endpoint["service_id"].to_s == my_service_id.to_s + endpoints[endpoint["interface"]] = endpoint + end + end + interface = new_resource.endpoint_interface + new_url = new_resource.endpoint_url + endpoint_template = {} + endpoint_template["endpoint"] = {} + endpoint_template["endpoint"]["interface"] = interface + endpoint_template["endpoint"]["url"] = new_url + endpoint_template["endpoint"]["endpoint_id"] = endpoints[interface]["id"] + endpoint_template["endpoint"]["service_id"] = endpoints[interface]["service_id"] + fullpath = "#{path}/#{endpoints[interface]["id"]}" + resp = retry_request(http, "PATCH", fullpath, endpoint_template, headers) + if resp.is_a?(Net::HTTPOK) + Chef::Log.info("Successfully updated endpoint URL #{interface} #{new_url}") + else + Chef::Log.error("Unknown response code: #{resp.code}") + new_resource.updated_by_last_action(false) + raise "Failed to talk to keystone in update_endpoint" + end + else + Chef::Log.error "Unknown response from Keystone Server" + Chef::Log.error("Response Code: #{resp.code}") + Chef::Log.error("Response Message: #{resp.message}") + new_resource.updated_by_last_action(false) + raise "Failed to talk to keystone in update_one_endpoint" if error + end +end + # Return true on success private def _create_item(http, headers, path, body, name) @@ -547,15 +597,18 @@ def _get_token(http, user_name, password, tenant = "") path = "/v2.0/tokens" headers = _build_headers body = _build_auth(user_name, password, tenant) - resp = http.send_request("POST", path, JSON.generate(body), headers) - if resp.is_a?(Net::HTTPCreated) || resp.is_a?(Net::HTTPOK) - data = JSON.parse(resp.read_body) - data["access"]["token"]["id"] - else + + resp = retry_request(http, "POST", path, body, headers) + error = !resp.is_a?(Net::HTTPSuccess) + + if error Chef::Log.info "Failed to get token for User '#{user_name}' Tenant '#{tenant}'" Chef::Log.info "Response Code: #{resp.code}" Chef::Log.info "Response Message: #{resp.message}" nil + else + data = JSON.parse(resp.read_body) + data["access"]["token"]["id"] end end @@ -658,3 +711,14 @@ def endpoint_needs_update(endpoint, new_resource) return true end end + +def retry_request(http, method, path, body, headers) + resp = nil + 10.times do |count| + resp = http.send_request(method, path, JSON.generate(body), headers) + break unless resp.is_a?(Net::HTTPServerError) + Chef::Log.debug("Retrying request #{method} #{path} : #{count}") + sleep 5 + end + resp +end diff --git a/chef/cookbooks/keystone/recipes/ha.rb b/chef/cookbooks/keystone/recipes/ha.rb index e6feae1e5f..7290a0c742 100644 --- a/chef/cookbooks/keystone/recipes/ha.rb +++ b/chef/cookbooks/keystone/recipes/ha.rb @@ -31,6 +31,134 @@ action :nothing end.run_action(:create) +# Configure Keystone token fernet backend provider +if node[:keystone][:signing][:token_format] == "fernet" + template "/usr/bin/keystone-fernet-keys-push.sh" do + source "keystone-fernet-keys-push.sh" + owner "root" + group "root" + mode "0755" + end + + # To be sure that rsync package is installed + package "rsync" + crowbar_pacemaker_sync_mark "sync-keystone_install_rsync" + + rsync_command = "" + initial_rsync_command = "" + + # can't use CrowbarPacemakerHelper.cluster_nodes() here as it will sometimes not return + # nodes which will be added to the cluster in current chef-client run. + cluster_nodes = node[:pacemaker][:elements]["pacemaker-cluster-member"] + cluster_nodes = cluster_nodes.map { |n| Chef::Node.load(n) } + cluster_nodes.sort_by! { |n| n[:hostname] } + cluster_nodes.each do |n| + next if node.name == n.name + node_address = Chef::Recipe::Barclamp::Inventory.get_network_by_type(n, "admin").address + node_rsync_command = "/usr/bin/keystone-fernet-keys-push.sh #{node_address}; " + rsync_command += node_rsync_command + # initial rsync only for (new) nodes which didn't get the keys yet + next if n.include?(:keystone) && + n[:keystone].include?(:signing) && + n[:keystone][:signing][:initial_keys_sync] + initial_rsync_command += node_rsync_command + end + raise "No other cluster members found" if rsync_command.empty? + + # Rotate primary key, which is used for new tokens + keystone_fernet "keystone-fernet-rotate-ha" do + action :rotate_script + rsync_command rsync_command + end + + crowbar_pacemaker_sync_mark "wait-keystone_fernet_rotate" + + if File.exist?("/etc/keystone/fernet-keys/0") + # Mark node to avoid unneeded future rsyncs + unless node[:keystone][:signing][:initial_keys_sync] + node[:keystone][:signing][:initial_keys_sync] = true + node.save + end + else + keystone_fernet "keystone-fernet-setup-ha" do + action :setup + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + end + + # We would like to propagate fernet keys to all (new) nodes in the cluster + execute "propagate fernet keys to all nodes in the cluster" do + command initial_rsync_command + action :run + only_if do + CrowbarPacemakerHelper.is_cluster_founder?(node) && + !initial_rsync_command.empty? + end + end + + service_transaction_objects = [] + + keystone_fernet_primitive = "keystone-fernet-rotate" + pacemaker_primitive keystone_fernet_primitive do + agent node[:keystone][:ha][:fernet][:agent] + params( + "target" => "/var/lib/keystone/keystone-fernet-rotate", + "link" => "/etc/cron.hourly/openstack-keystone-fernet", + "backup_suffix" => ".orig" + ) + op node[:keystone][:ha][:fernet][:op] + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + service_transaction_objects << "pacemaker_primitive[#{keystone_fernet_primitive}]" + + fernet_rotate_loc = openstack_pacemaker_controller_only_location_for keystone_fernet_primitive + service_transaction_objects << "pacemaker_location[#{fernet_rotate_loc}]" + + pacemaker_transaction "keystone-fernet-rotate cron" do + cib_objects service_transaction_objects + # note that this will also automatically start the resources + action :commit_new + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + crowbar_pacemaker_sync_mark "create-keystone_fernet_rotate" +end + +# note(jtomasiak): We don't need new syncmarks for the fernet-keys-sync part. +# This is because the deployment and configuration of this feature will be done +# once during keystone installation and it will not be used until some keystone +# node is reinstalled. We assume that time between keystone installation and +# possible node reinstallation is high enough to run this safely without +# syncmarks. +fernet_resources_action = node[:keystone][:signing][:token_format] == "fernet" ? :create : :delete + +template "/usr/bin/keystone-fernet-keys-sync.sh" do + source "keystone-fernet-keys-sync.sh" + owner "root" + group "root" + mode "0755" + action fernet_resources_action +end + +# handler scripts are run by hacluster user so sudo configuration is needed +# if the handler needs to rsync to other nodes using root's keys +template "/etc/sudoers.d/keystone-fernet-keys-sync" do + source "hacluster_sudoers.erb" + owner "root" + group "root" + mode "0440" + action fernet_resources_action +end + +# on founder: create/delete pacemaker alert +pacemaker_alert "keystone-fernet-keys-sync" do + handler "/usr/bin/keystone-fernet-keys-sync.sh" + action fernet_resources_action + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } +end + +# Create/update apache resources after fernet keys setup to make sure everything is ready. if node[:keystone][:frontend] == "apache" && node[:pacemaker][:clone_stateless_services] include_recipe "crowbar-pacemaker::apache" @@ -71,26 +199,3 @@ crowbar_pacemaker_sync_mark "create-keystone_ha_resources" end - -template "/usr/bin/keystone-fernet-keys-sync.sh" do - source "keystone-fernet-keys-sync.sh" - owner "root" - group "root" - mode "0755" -end - -# handler scripts are run by hacluster user so sudo configuration is needed -# if the handler needs to rsync to other nodes using root's keys -template "/etc/sudoers.d/keystone-fernet-keys-sync" do - source "hacluster_sudoers.erb" - owner "root" - group "root" - mode "0440" -end - -# on founder: create/delete pacemaker alert -pacemaker_alert "keystone-fernet-keys-sync" do - handler "/usr/bin/keystone-fernet-keys-sync.sh" - action node[:keystone][:signing][:token_format] == "fernet" ? :create : :delete - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } -end diff --git a/chef/cookbooks/keystone/recipes/server.rb b/chef/cookbooks/keystone/recipes/server.rb index d9837b4871..3c1de63629 100644 --- a/chef/cookbooks/keystone/recipes/server.rb +++ b/chef/cookbooks/keystone/recipes/server.rb @@ -48,9 +48,6 @@ bind_service_port = node[:keystone][:api][:service_port] end -node.normal[:apache][:listen_ports_crowbar] ||= {} -node.normal[:apache][:listen_ports_crowbar][:keystone] = { admin: [bind_admin_port], service: [bind_service_port] } - # Ideally this would be called admin_host, but that's already being # misleadingly used to store a value which actually represents the # service bind address. @@ -138,7 +135,9 @@ ssl_enable node[:keystone][:api][:protocol] == "https" ssl_certfile node[:keystone][:ssl][:certfile] ssl_keyfile node[:keystone][:ssl][:keyfile] - ssl_cacert node[:keystone][:ssl][:ca_certs] + ssl_cacert node[:keystone][:ssl][:ca_certs] unless node[:keystone][:ssl][:insecure] + # LDAP backend can be slow.. + timeout 600 end apache_site "keystone-public.conf" do @@ -159,7 +158,9 @@ ssl_enable node[:keystone][:api][:protocol] == "https" ssl_certfile node[:keystone][:ssl][:certfile] ssl_keyfile node[:keystone][:ssl][:keyfile] - ssl_cacert node[:keystone][:ssl][:ca_certs] + ssl_cacert node[:keystone][:ssl][:ca_certs] unless node[:keystone][:ssl][:insecure] + # LDAP backend can be slow.. + timeout 600 end apache_site "keystone-admin.conf" do @@ -234,47 +235,6 @@ password: node[:keystone][:admin][:password], tenant: node[:keystone][:admin][:tenant] } -if node[:keystone].key?(:endpoint) - endpoint_protocol = node[:keystone][:endpoint][:protocol] - endpoint_insecure = node[:keystone][:endpoint][:insecure] - # In order to update keystone's endpoints we need the old internal endpoint. - endpoint_port = node[:keystone][:endpoint][:port] -else - endpoint_protocol = node[:keystone][:api][:protocol] - endpoint_insecure = node[:keystone][:ssl][:insecure] - endpoint_port = node[:keystone][:api][:admin_port] -end - -endpoint_host = my_admin_host - -# Update keystone endpoints (in case we switch http/https this will update the -# endpoints to the correct ones). This needs to be done _before_ we switch -# protocols on the keystone api. -keystone_register "update keystone endpoint" do - protocol endpoint_protocol - insecure endpoint_insecure - host endpoint_host - port endpoint_port - auth register_auth_hash - endpoint_service "keystone" - endpoint_region node[:keystone][:api][:region] - endpoint_adminURL KeystoneHelper.admin_auth_url(node, my_admin_host) - endpoint_publicURL KeystoneHelper.public_auth_url(node, my_public_host) - endpoint_internalURL KeystoneHelper.internal_auth_url(node, my_admin_host) - action :update_endpoint - # Do not try to update keystone endpoint during upgrade, when keystone is not running yet - # ("done_os_upgrade" is present when first chef-client run is executed at the end of upgrade) - not_if { node["crowbar_upgrade_step"] == "done_os_upgrade" } - only_if do - node[:keystone][:bootstrap] && - (!ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) && - node[:keystone].key?(:endpoint) && - (node[:keystone][:endpoint][:protocol] != node[:keystone][:api][:protocol] || - node[:keystone][:endpoint][:insecure] != node[:keystone][:ssl][:insecure] || - node[:keystone][:endpoint][:port] != node[:keystone][:api][:admin_port]) - end -end - template node[:keystone][:config_file] do source "keystone.conf.erb" owner "root" @@ -387,9 +347,9 @@ ruby_block "synchronize signing keys for founder and remember them for non-HA case" do only_if { (!ha_enabled || (ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node))) } block do - ca = File.open("/etc/keystone/ssl/certs/ca.pem", "rb", &:read) - signing_cert = File.open("/etc/keystone/ssl/certs/signing_cert.pem", "rb", &:read) - signing_key = File.open("/etc/keystone/ssl/private/signing_key.pem", "rb", &:read) + ca = File.open(node[:keystone][:signing][:ca_certs], "rb", &:read) + signing_cert = File.open(node[:keystone][:signing][:certfile], "rb", &:read) + signing_key = File.open(node[:keystone][:signing][:keyfile], "rb", &:read) node[:keystone][:certificates] ||= {} node[:keystone][:certificates][:content] ||= {} @@ -416,9 +376,9 @@ ruby_block "synchronize signing keys for non-founder" do only_if { ha_enabled && !CrowbarPacemakerHelper.is_cluster_founder?(node) } block do - ca = File.open("/etc/keystone/ssl/certs/ca.pem", "rb", &:read) - signing_cert = File.open("/etc/keystone/ssl/certs/signing_cert.pem", "rb", &:read) - signing_key = File.open("/etc/keystone/ssl/private/signing_key.pem", "rb", &:read) + ca = File.open(node[:keystone][:signing][:ca_certs], "rb", &:read) + signing_cert = File.open(node[:keystone][:signing][:certfile], "rb", &:read) + signing_key = File.open(node[:keystone][:signing][:keyfile], "rb", &:read) founder = CrowbarPacemakerHelper.cluster_founder(node) @@ -430,19 +390,19 @@ # the code below dirty = false if ca != cluster_ca - File.open("/etc/keystone/ssl/certs/ca.pem", "w") { |f| + File.open(node[:keystone][:signing][:ca_certs], "w") { |f| f.write(cluster_ca) } dirty = true end if signing_cert != cluster_signing_cert - File.open("/etc/keystone/ssl/certs/signing_cert.pem", "w") { |f| + File.open(node[:keystone][:signing][:certfile], "w") { |f| f.write(cluster_signing_cert) } dirty = true end if signing_key != cluster_signing_key - File.open("/etc/keystone/ssl/private/signing_key.pem", "w") { |f| + File.open(node[:keystone][:signing][:keyfile], "w") { |f| f.write(cluster_signing_key) } dirty = true @@ -471,100 +431,28 @@ end end -if ha_enabled - include_recipe "keystone::ha" -end - -# Configure Keystone token fernet backend provider -if node[:keystone][:signing][:token_format] == "fernet" - # To be sure that rsync package is installed - package "rsync" - crowbar_pacemaker_sync_mark "sync-keystone_install_rsync" if ha_enabled - - template "/usr/bin/keystone-fernet-keys-push.sh" do - source "keystone-fernet-keys-push.sh" - owner "root" - group "root" - mode "0755" - end - - rsync_command = "" - if ha_enabled - cluster_nodes = CrowbarPacemakerHelper.cluster_nodes(node) - cluster_nodes.map do |n| - next if node.name == n.name - node_address = Chef::Recipe::Barclamp::Inventory.get_network_by_type(n, "admin").address - rsync_command += "/usr/bin/keystone-fernet-keys-push.sh #{node_address}; " - end - raise "No other cluster members found" if rsync_command.empty? - end - +# Configure Keystone token fernet backend provider (non-HA case) +if !ha_enabled && node[:keystone][:signing][:token_format] == "fernet" # Rotate primary key, which is used for new tokens - template "/var/lib/keystone/keystone-fernet-rotate" do - source "keystone-fernet-rotate.erb" - owner "root" - group node[:keystone][:group] - mode "0750" - variables( - rsync_command: rsync_command - ) + keystone_fernet "keystone-fernet-rotate-non-ha" do + action :rotate_script end - unless ha_enabled - link "/etc/cron.hourly/openstack-keystone-fernet" do - to "/var/lib/keystone/keystone-fernet-rotate" - end + link "/etc/cron.hourly/openstack-keystone-fernet" do + to "/var/lib/keystone/keystone-fernet-rotate" end - crowbar_pacemaker_sync_mark "wait-keystone_fernet_rotate" if ha_enabled - unless File.exist?("/etc/keystone/fernet-keys/0") # Setup a key repository for fernet tokens - execute "keystone-manage fernet_setup" do - command "keystone-manage fernet_setup \ - --keystone-user #{node[:keystone][:user]} \ - --keystone-group #{node[:keystone][:group]}" - action :run - only_if { !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) } + keystone_fernet "keystone-fernet-setup-non-ha" do + action :setup end - - # We would like to propagate fernet keys to all nodes in the cluster - execute "propagate fernet keys to all nodes in the cluster" do - command rsync_command - action :run - only_if { ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node) } - end - end - - service_transaction_objects = [] - - keystone_fernet_primitive = "keystone-fernet-rotate" - pacemaker_primitive keystone_fernet_primitive do - agent node[:keystone][:ha][:fernet][:agent] - params({ - "target" => "/var/lib/keystone/keystone-fernet-rotate", - "link" => "/etc/cron.hourly/openstack-keystone-fernet", - "backup_suffix" => ".orig" - }) - op node[:keystone][:ha][:fernet][:op] - action :update - only_if { ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node) } - end - service_transaction_objects << "pacemaker_primitive[#{keystone_fernet_primitive}]" - - fernet_rotate_loc = openstack_pacemaker_controller_only_location_for keystone_fernet_primitive - service_transaction_objects << "pacemaker_location[#{fernet_rotate_loc}]" - - pacemaker_transaction "keystone-fernet-rotate cron" do - cib_objects service_transaction_objects - # note that this will also automatically start the resources - action :commit_new - only_if { ha_enabled && CrowbarPacemakerHelper.is_cluster_founder?(node) } end - - crowbar_pacemaker_sync_mark "create-keystone_fernet_rotate" if ha_enabled end +# This also includes fernet setup for HA case. +include_recipe "keystone::ha" if ha_enabled + # Wait for all nodes to reach this point so we know that all nodes will have # all the required services correctly configured and running before we create # the keystone resources @@ -574,6 +462,38 @@ keystone_insecure = node["keystone"]["api"]["protocol"] == "https" && node[:keystone][:ssl][:insecure] +register_auth_hash = { user: node[:keystone][:admin][:username], + password: node[:keystone][:admin][:password], + tenant: node[:keystone][:admin][:tenant] } + +old_password = node[:keystone][:admin][:old_password] +old_register_auth_hash = register_auth_hash.clone +old_register_auth_hash[:password] = old_password +update_admin_password = node[:keystone][:bootstrap] && + (!ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) && + old_password && !old_password.empty? && + old_password != node[:keystone][:admin][:password] + +keystone_register "update admin password" do + protocol node[:keystone][:api][:protocol] + insecure keystone_insecure + host my_admin_host + port node[:keystone][:api][:admin_port] + auth old_register_auth_hash + user_name node[:keystone][:admin][:username] + user_password node[:keystone][:admin][:password] + tenant_name node[:keystone][:admin][:tenant] + action :add_user + only_if { update_admin_password } +end + +ruby_block "backup current admin password on node attributes" do + block do + node.set[:keystone][:admin][:old_password] = node[:keystone][:admin][:password] + node.save + end +end + # Creates admin user, admin role and admin project execute "keystone-manage bootstrap" do command "keystone-manage bootstrap \ @@ -593,40 +513,6 @@ end end -register_auth_hash = { user: node[:keystone][:admin][:username], - password: node[:keystone][:admin][:password], - tenant: node[:keystone][:admin][:tenant] } - -updated_password = node[:keystone][:admin][:updated_password] - -unless updated_password.nil? || - updated_password.empty? || - updated_password == node[:keystone][:admin][:password] - - if !ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node) - keystone_register "update admin password" do - protocol node[:keystone][:api][:protocol] - insecure keystone_insecure - host my_admin_host - port node[:keystone][:api][:admin_port] - auth register_auth_hash - user_name node[:keystone][:admin][:username] - user_password updated_password - tenant_name node[:keystone][:admin][:tenant] - action :nothing - end.run_action(:add_user) - end - - ruby_block "update admin password on node attributes" do - block do - node.set[:keystone][:admin][:password] = updated_password - node.save - register_auth_hash[:password] = updated_password - end - action :nothing - end.run_action(:create) -end - # Silly wake-up call - this is a hack; we use retries because the server was # just (re)started, and might not answer on the first try keystone_register "wakeup keystone" do @@ -637,6 +523,7 @@ auth register_auth_hash retries 5 retry_delay 10 + reissue_token_on_error update_admin_password action :wakeup end @@ -772,6 +659,8 @@ crowbar_pacemaker_sync_mark "create-keystone_register" if ha_enabled +include_recipe "keystone::update_endpoint" + keystone_settings = KeystoneHelper.keystone_settings(node, @cookbook_name) template "/root/.openrc" do diff --git a/chef/cookbooks/keystone/recipes/update_endpoint.rb b/chef/cookbooks/keystone/recipes/update_endpoint.rb new file mode 100644 index 0000000000..0375c6f410 --- /dev/null +++ b/chef/cookbooks/keystone/recipes/update_endpoint.rb @@ -0,0 +1,123 @@ +# Copyright 2018 SUSE Linux GmbH +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ha_enabled = node[:keystone][:ha][:enabled] + +if node[:keystone].key?(:endpoint) + endpoint_protocol = node[:keystone][:endpoint][:protocol] + endpoint_insecure = node[:keystone][:endpoint][:insecure] + endpoint_port = node[:keystone][:endpoint][:port] + + endpoint_changed = endpoint_protocol != node[:keystone][:api][:protocol] || + endpoint_insecure != node[:keystone][:ssl][:insecure] || + endpoint_port != node[:keystone][:api][:admin_port] + + # Will be reset on next chef run + node.default[:keystone][:endpoint_changed] = endpoint_changed + + endpoint_needs_update = endpoint_changed && + node[:keystone][:bootstrap] && + # Do not try to update keystone endpoint during upgrade, when keystone is not + # running yet ("done_os_upgrade" is present when first chef-client run is + # executed at the end of upgrade) + node["crowbar_upgrade_step"] != "done_os_upgrade" +else + endpoint_needs_update = false +end +endpoint_host = CrowbarHelper.get_host_for_admin_url(node, ha_enabled) + +use_ssl = node[:keystone][:api][:protocol] == "https" +public_host = CrowbarHelper.get_host_for_public_url(node, use_ssl, ha_enabled) +register_auth_hash = { user: node[:keystone][:admin][:username], + password: node[:keystone][:admin][:password], + tenant: node[:keystone][:admin][:tenant] } + +# In compile phase, update the internal keystone endpoint if necessary. +# Do this before the haproxy and apache configs are updated, otherwise the old +# endpoint will become invalid too early. +keystone_register "update keystone internal endpoint" do + protocol endpoint_protocol + insecure endpoint_insecure + host endpoint_host + port endpoint_port + auth register_auth_hash + endpoint_service "keystone" + endpoint_region node[:keystone][:api][:region] + endpoint_url KeystoneHelper.internal_auth_url(node, endpoint_host) + endpoint_interface "internal" + action :nothing + only_if do + endpoint_needs_update && + (!ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) + end +end.run_action(:update_one_endpoint) + +# Update variables for use in converge-phase endpoint updates +endpoint_protocol = node[:keystone][:api][:protocol] +endpoint_insecure = node[:keystone][:ssl][:insecure] +endpoint_port = node[:keystone][:api][:admin_port] + +ruby_block "Prepare haproxy and apache2 for new keystone endpoints" do + block {} + if ha_enabled + notifies :create, resources(template: node[:haproxy][:platform][:config_file]), :immediately + notifies :reload, resources(service: "haproxy"), :immediately + end + notifies :create, resources(ruby_block: "set origin for apache2 restart"), :immediately + notifies :reload, resources(service: "apache2"), :immediately + only_if { endpoint_needs_update } +end + +keystone_register "wakeup keystone after service reload" do + protocol endpoint_protocol + insecure endpoint_insecure + host endpoint_host + port endpoint_port + auth register_auth_hash + retries 10 + retry_delay 10 + action :wakeup +end + +# Wait until all nodes have refreshed haproxy and apache before trying to use +# the new internal endpoint to update the rest of the endpoints +crowbar_pacemaker_sync_mark "sync-keystone_update_endpoints" if ha_enabled + +crowbar_pacemaker_sync_mark "wait-keystone_update_endpoints" if ha_enabled + +# Update keystone endpoints (in case we switch http/https this will update the +# endpoints to the correct ones). This needs to be done _before_ we switch +# protocols on the keystone api. +keystone_register "update keystone endpoint" do + protocol endpoint_protocol + insecure endpoint_insecure + host endpoint_host + port endpoint_port + auth register_auth_hash + endpoint_service "keystone" + endpoint_region node[:keystone][:api][:region] + endpoint_adminURL KeystoneHelper.admin_auth_url(node, endpoint_host) + endpoint_publicURL KeystoneHelper.public_auth_url(node, public_host) + endpoint_internalURL KeystoneHelper.internal_auth_url(node, endpoint_host) + action :update_endpoint + # Do not try to update keystone endpoint during upgrade, when keystone is not running yet + # ("done_os_upgrade" is present when first chef-client run is executed at the end of upgrade) + only_if do + endpoint_needs_update && + (!ha_enabled || CrowbarPacemakerHelper.is_cluster_founder?(node)) + end +end + +crowbar_pacemaker_sync_mark "create-keystone_services" if ha_enabled diff --git a/chef/cookbooks/database/recipes/server.rb b/chef/cookbooks/keystone/resources/fernet.rb similarity index 71% rename from chef/cookbooks/database/recipes/server.rb rename to chef/cookbooks/keystone/resources/fernet.rb index 97ea5c1ce0..ba6f434807 100644 --- a/chef/cookbooks/database/recipes/server.rb +++ b/chef/cookbooks/keystone/resources/fernet.rb @@ -1,8 +1,8 @@ # -# Cookbook Name:: database -# Recipe:: server +# Cookbook Name:: keystone +# Resource:: fernet # -# Copyright 2012, SUSE Linux Products GmbH +# Copyright:: 2018, SUSE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,8 +17,7 @@ # limitations under the License. # -backend = node[:database][:sql_engine] +actions :setup, :rotate_script -Chef::Log.info("Running database::server for #{backend}") - -include_recipe "#{backend}::server" +# :rotate_script specific attributes +attribute :rsync_command, kind_of: String, default: "" diff --git a/chef/cookbooks/keystone/resources/register.rb b/chef/cookbooks/keystone/resources/register.rb index dc3a4f6044..ea75eed825 100644 --- a/chef/cookbooks/keystone/resources/register.rb +++ b/chef/cookbooks/keystone/resources/register.rb @@ -18,7 +18,7 @@ # actions :add_service, :add_endpoint_template, :add_tenant, :add_domain, :add_domain_role, :add_user, - :add_role, :add_access, :add_ec2, :wakeup, :update_endpoint + :add_role, :add_access, :add_ec2, :wakeup, :update_endpoint, :update_one_endpoint attribute :protocol, kind_of: String attribute :insecure, kind_of: [TrueClass, FalseClass], default: false @@ -43,6 +43,10 @@ attribute :endpoint_global, default: true attribute :endpoint_enabled, default: true +# :update_one_endpoint specific attributes +attribute :endpoint_interface, kind_of: String +attribute :endpoint_url, kind_of: String + # :add_tenant specific attributes attribute :tenant_name, kind_of: String @@ -64,3 +68,6 @@ # :add_ec2 specific attributes attribute :user_name, kind_of: String attribute :tenant_name, kind_of: String + +# :wakeup specific attributes +attribute :reissue_token_on_error, kind_of: [TrueClass, FalseClass], default: false diff --git a/chef/cookbooks/magnum/recipes/post_install.rb b/chef/cookbooks/magnum/recipes/post_install.rb index e58bed49fb..cbd42694f7 100644 --- a/chef/cookbooks/magnum/recipes/post_install.rb +++ b/chef/cookbooks/magnum/recipes/post_install.rb @@ -60,6 +60,8 @@ --container-format bare --public --property os_distro=opensuse \ #{service_sles_image_name}" not_if "#{openstack_cmd} #{openstack_args_glance} image list -f value -c Name | grep -q #{service_sles_image_name}" + retries 5 + retry_delay 10 action :nothing end @@ -67,6 +69,8 @@ command "#{openstack_cmd} #{openstack_args_nova} flavor create --ram 1024 --disk 10 \ --vcpus 1 m1.magnum" not_if "#{openstack_cmd} #{openstack_args_nova} flavor list --all | grep -q m1.magnum" + retries 5 + retry_delay 10 action :nothing end diff --git a/chef/cookbooks/magnum/templates/default/magnum.conf.erb b/chef/cookbooks/magnum/templates/default/magnum.conf.erb index 1fe68357e4..5108220acb 100644 --- a/chef/cookbooks/magnum/templates/default/magnum.conf.erb +++ b/chef/cookbooks/magnum/templates/default/magnum.conf.erb @@ -41,6 +41,8 @@ project_name = <%= @keystone_settings['service_tenant'] %> username = <%= @keystone_settings['service_user'] %> password = <%= @keystone_settings['service_password'] %> insecure = <%= @keystone_settings['insecure'] %> +project_domain_name = <%= @keystone_settings["admin_domain"]%> +user_domain_name = <%= @keystone_settings["admin_domain"] %> [keystone_authtoken] auth_uri = <%= @keystone_settings['public_auth_url'] %> @@ -70,8 +72,10 @@ insecure = <%= @keystone_settings['insecure'] %> [oslo_concurrency] lock_path = /var/run/magnum +<% if @rabbit_settings[:enable_notifications] -%> [oslo_messaging_notifications] driver = messaging +<% end -%> [oslo_messaging_rabbit] <% if @rabbit_settings[:cluster] -%> diff --git a/chef/cookbooks/memcached/templates/default/memcached.sysconfig.erb b/chef/cookbooks/memcached/templates/default/memcached.sysconfig.erb index 8a5451d7f7..917b6c976e 100644 --- a/chef/cookbooks/memcached/templates/default/memcached.sysconfig.erb +++ b/chef/cookbooks/memcached/templates/default/memcached.sysconfig.erb @@ -10,7 +10,7 @@ # # see man 1 memcached for more # -MEMCACHED_PARAMS="<%= @daemonize ? "-d" : "" %> -m <%= @memory %> -l <%= @listen %> -p <%= @port %> -c <%= @max_connections %>" +MEMCACHED_PARAMS="<%= @daemonize ? "-d" : "" %> -U 0 -m <%= @memory %> -l <%= @listen %> -p <%= @port %> -c <%= @max_connections %>" ## Path: Network/WWW/Memcached ## Description: username memcached should run as diff --git a/chef/cookbooks/monasca/recipes/master.rb b/chef/cookbooks/monasca/recipes/master.rb index 9cdf2bfd85..8ebae5285f 100644 --- a/chef/cookbooks/monasca/recipes/master.rb +++ b/chef/cookbooks/monasca/recipes/master.rb @@ -26,7 +26,7 @@ package "ansible" package "monasca-installer" do - notifies :run, "execute[run ansible]", :delayed + notifies :run, "execute[remove lock file]", :immediately end cookbook_file "/etc/ansible/ansible.cfg" do @@ -66,7 +66,7 @@ ansible_ssh_user: "root", keystone_host: keystone_settings["internal_url_host"] ) - notifies :run, "execute[run ansible]", :delayed + notifies :run, "execute[remove lock file]", :immediately end monasca_net_ip = MonascaHelper.get_host_for_monitoring_url(monasca_node) @@ -124,10 +124,11 @@ curator_cron_config: [curator_cron_config].to_yaml.split("\n")[1..-1], curator_excluded_index: curator_excluded_index.to_yaml.split("\n")[1..-1], elasticsearch_repo_dir: node[:monasca][:elasticsearch][:repo_dir].to_yaml.split("\n")[1..-1], + elasticsearch_tunables: node[:monasca][:elasticsearch][:tunables], monitor_libvirt: node[:monasca][:agent][:monitor_libvirt], delegate_role: node[:monasca][:delegate_role] ) - notifies :run, "execute[run ansible]", :delayed + notifies :run, "execute[remove lock file]", :immediately end # This file is used to mark that ansible installer run successfully. @@ -137,15 +138,6 @@ # and monasca-installer. If they change re-execute ansible installer. lock_file = "/opt/monasca-installer/.installed" -previous_versions = if Pathname.new(lock_file).file? - File.read(lock_file).gsub(/^$\n/, "") - else - "" - end - -get_versions = "rpm -qa | grep -e crowbar-openstack -e monasca-installer | sort" -actual_versions = IO.popen(get_versions, &:read).gsub(/^$\n/, "") - cookbook_file "/etc/logrotate.d/monasca-installer" do owner "root" group "root" @@ -154,15 +146,26 @@ source "monasca-installer.logrotate" end -ansible_cmd = - "rm -f #{lock_file} " \ - "&& ansible-playbook " \ - "-i monasca-hosts -e '@/opt/monasca-installer/crowbar_vars.yml' " \ - "monasca.yml -vvv >> /var/log/monasca-installer.log 2>&1 " \ - "&& echo '#{actual_versions}' > #{lock_file}" +template "/usr/sbin/run-monasca-installer" do + source "run-monasca-installer.erb" + owner "root" + group "root" + mode "0555" + variables( + lock_file: lock_file + ) + notifies :run, "execute[remove lock file]", :immediately +end + +# Remove lock file. This gets notified if parameters change and ensures the +# version check in run-monasca-installer fails. +execute "remove lock file" do + command "rm -f #{lock_file}" + action :nothing +end execute "run ansible" do - command ansible_cmd - cwd "/opt/monasca-installer" - action :nothing unless actual_versions != previous_versions + command "/usr/sbin/run-monasca-installer 2>&1"\ + " | awk '{ print strftime(\"[%Y-%m-%d %H:%M:%S]\"), $0 }'"\ + " >> /var/log/monasca-installer.log" end diff --git a/chef/cookbooks/monasca/templates/default/crowbar_vars.yml.erb b/chef/cookbooks/monasca/templates/default/crowbar_vars.yml.erb index c331da07b8..edc5f34b8d 100644 --- a/chef/cookbooks/monasca/templates/default/crowbar_vars.yml.erb +++ b/chef/cookbooks/monasca/templates/default/crowbar_vars.yml.erb @@ -58,6 +58,9 @@ monasca_log_api_url: "http://<%= @pub_net_ip %>:<%= @log_api_settings['bind_port memcached_nodes: ["<%= @monasca_net_ip %>:11211"] elasticsearch_nodes: ["<%= @monasca_net_ip %>"] elasticsearch_hosts: <%= @monasca_net_ip %> +<%- @elasticsearch_tunables.each_key do |t| %> +elasticsearch_<%= t %>: <%= @elasticsearch_tunables[t] %> +<%- end %> monasca_api_log_level: <%= @api_settings['log_level'] %> log_api_log_level: <%= @log_api_settings['log_level'] %> diff --git a/chef/cookbooks/monasca/templates/default/run-monasca-installer.erb b/chef/cookbooks/monasca/templates/default/run-monasca-installer.erb new file mode 100644 index 0000000000..19fc3ed235 --- /dev/null +++ b/chef/cookbooks/monasca/templates/default/run-monasca-installer.erb @@ -0,0 +1,23 @@ +#!/bin/sh + +set -e + +actual_versions=$(LC_ALL=C rpm -qa crowbar-openstack openstack-monasca-installer | sort) +previous_versions=$(cat <%= @lock_file %> 2> /dev/null || echo) + +# No need to run if versions match (Crowbar will ensure a mismatch by deleting +# <%= @lock_file %> if any parameters change. +if [ "$actual_versions" = "$previous_versions" ]; then + echo "No package version changes, skipping monasca-installer run" + exit 0 +fi + +cd "/opt/monasca-installer" +rm -f <%= @lock_file %> + /usr/bin/ansible-playbook \ + -i monasca-hosts \ + -e '@/opt/monasca-installer/crowbar_vars.yml' \ + monasca.yml -vvv + +# Record version information to indicate a successful run. +LC_ALL=C rpm -qa crowbar-openstack openstack-monasca-installer | sort > <%= @lock_file %> diff --git a/chef/cookbooks/mysql/attributes/server.rb b/chef/cookbooks/mysql/attributes/server.rb index 8709dd3782..ff48db8ed2 100644 --- a/chef/cookbooks/mysql/attributes/server.rb +++ b/chef/cookbooks/mysql/attributes/server.rb @@ -26,16 +26,27 @@ default[:database][:mysql][:ebs_vol_size] = 50 end -default[:database][:mysql][:tunable][:max_allowed_packet] = "16M" -default[:database][:mysql][:tunable][:thread_cache_size] = 8 - # Ports to bind to when haproxy is used default[:mysql][:ha][:ports][:admin_port] = 3306 # Default operation setting for the galera resource # in pacemamker -default[:mysql][:ha][:op][:monitor][:interval] = "20s" -default[:mysql][:ha][:op][:monitor][:role] = "Master" +default[:mysql][:ha][:op][:monitor] = [ + { interval: "23s" }, { interval: "20s", role: "Master" } +] + +# If needed we can enhance this to set the mariadb version +# depeding on "platform" and "platform_version". But currently +# this should be enough +default[:mysql][:mariadb][:version] = "10.2" +default[:mysql][:galera_packages] = [ + "galera-3-wsrep-provider", + "mariadb-tools", + "socat", + "galera-python-clustercheck" +] -# Let users override this if galera-python-clustercheck is available to them -default[:mysql][:ha][:clustercheck] = false +# newer version need an additional package on SLES +unless node[:mysql][:mariadb][:version] == "10.1" + default[:mysql][:galera_packages] << "mariadb-galera" +end diff --git a/chef/cookbooks/mysql/recipes/client.rb b/chef/cookbooks/mysql/recipes/client.rb index 435a6f295f..7728175d0a 100644 --- a/chef/cookbooks/mysql/recipes/client.rb +++ b/chef/cookbooks/mysql/recipes/client.rb @@ -30,7 +30,7 @@ package "mysql-ruby" do package_name value_for_platform_family( ["rhel", "fedora"] => "ruby-mysql", - "suse" => "ruby#{node["languages"]["ruby"]["version"].to_f}-rubygem-mysql2", + "suse" => "ruby2.1-rubygem-mysql2", "default" => "libmysql-ruby" ) action :install diff --git a/chef/cookbooks/mysql/recipes/ha_galera.rb b/chef/cookbooks/mysql/recipes/ha_galera.rb index 0689ac6028..5f04b62551 100644 --- a/chef/cookbooks/mysql/recipes/ha_galera.rb +++ b/chef/cookbooks/mysql/recipes/ha_galera.rb @@ -19,34 +19,39 @@ resource_agent = "ocf:heartbeat:galera" -["galera-3-wsrep-provider", "mariadb-tools", "xtrabackup", "socat"].each do |p| +node[:mysql][:galera_packages].each do |p| package p end -package "galera-python-clustercheck" if node[:mysql][:ha][:clustercheck] -unless node[:database][:galera_bootstrapped] - directory "/var/run/mysql/" do - owner "mysql" - group "root" - mode "0755" - action :create - end +directory "/var/run/mysql/" do + owner "mysql" + group "root" + mode "0755" + action :create +end - execute "mysql_install_db" do - command "mysql_install_db" - action :run - end +directory "/var/lib/mysql/" do + owner "mysql" + group "root" + mode "0700" + action :create end node_address = Chef::Recipe::Barclamp::Inventory.get_network_by_type(node, "admin").address unless node[:database][:galera_bootstrapped] if CrowbarPacemakerHelper.is_cluster_founder?(node) + + execute "mysql_install_db" do + command "mysql_install_db" + action :run + end + # To bootstrap for the first time, start galera on one node # to set up the seed sst and monitoring users. - template "temporary bootstrap /etc/my.cnf.d/galera.cnf" do - path "/etc/my.cnf.d/galera.cnf" + template "temporary bootstrap /etc/my.cnf.d/75-galera.cnf" do + path "/etc/my.cnf.d/75-galera.cnf" source "galera.cnf.erb" owner "root" group "mysql" @@ -57,7 +62,10 @@ sstuser_password: "", expire_logs_days: node[:database][:mysql][:expire_logs_days], node_address: node_address, - wsrep_slave_threads: node[:database][:mysql][:wsrep_slave_threads] + wsrep_slave_threads: node[:database][:mysql][:wsrep_slave_threads], + gcs_fc_limit_multiplier: node[:database][:mysql][:gcs_fc_limit_multiplier], + gcs_fc_factor: node[:database][:mysql][:gcs_fc_factor], + wsrep_provider_options_custom: node[:database][:mysql][:wsrep_provider_options_custom].join(";") ) end @@ -72,7 +80,7 @@ # unauthenticated root user is later removed in server.rb after the # bootstraping. Once the cluster has started other nodes will pick up on # the sstuser and we are able to use these credentails. - db_settings = fetch_database_settings + db_settings = fetch_database_settings(@cookbook_name) db_connection = db_settings[:connection].dup db_connection[:host] = "localhost" db_connection[:username] = "root" @@ -127,7 +135,7 @@ cluster_addresses = "gcomm://" + nodes_names.join(",") -template "/etc/my.cnf.d/galera.cnf" do +template "/etc/my.cnf.d/75-galera.cnf" do source "galera.cnf.erb" owner "root" group "mysql" @@ -138,10 +146,39 @@ sstuser_password: node[:database][:mysql][:sstuser_password], expire_logs_days: node[:database][:mysql][:expire_logs_days], node_address: node_address, - wsrep_slave_threads: node[:database][:mysql][:wsrep_slave_threads] + wsrep_slave_threads: node[:database][:mysql][:wsrep_slave_threads], + gcs_fc_limit_multiplier: node[:database][:mysql][:gcs_fc_limit_multiplier], + gcs_fc_factor: node[:database][:mysql][:gcs_fc_factor], + wsrep_provider_options_custom: node[:database][:mysql][:wsrep_provider_options_custom].join(";") + ) +end + +# Configuration files for galera-python-clustercheck +template "/etc/galera-python-clustercheck/galera-python-clustercheck.conf" do + source "galera-python-clustercheck.conf.erb" + owner "galera-python-clustercheck" + group "mysql" + mode "0640" + variables( + node_address: node_address ) end +template "/etc/galera-python-clustercheck/my.cnf" do + source "galera-python-clustercheck-my.cnf.erb" + owner "galera-python-clustercheck" + group "mysql" + mode "0640" + variables( + node_address: node_address + ) +end + +file "/etc/my.cnf.d/galera.cnf" do + action :delete + notifies :restart, "service[mysql]" +end + # Wait for all nodes to reach this point so we know that all nodes will have # all the required packages and configurations installed before we create the # pacemaker resources @@ -156,6 +193,12 @@ revision node[:database]["crowbar-revision"] end +# some of the op attributes are now in the proposal, so we need to merge the +# default attributes and the proposal attributes (that actually completely +# override the default attributes, even the ones not defined in the proposal) +primitive_op = node.default_attrs[:mysql][:ha][:op].to_hash +primitive_op.merge!(node[:database][:mysql][:ha][:op].to_hash) + pacemaker_primitive service_name do agent resource_agent params({ @@ -164,9 +207,9 @@ "check_user" => "monitoring", "socket" => "/var/run/mysql/mysql.sock", "datadir" => node[:database][:mysql][:datadir], - "log" => "/var/log/mysql/mysql_error.log" + "log" => "/var/log/mysql/mysqld.log" }) - op node[:database][:mysql][:ha][:op] + op primitive_op action :update only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } end @@ -212,7 +255,7 @@ block do require "timeout" begin - cmd = "mysql -u '' -N -B " \ + cmd = "mysql -u 'monitoring' -N -B " \ "-e \"SHOW STATUS WHERE Variable_name='wsrep_local_state_comment';\" | cut -f 2" sync_state = "" Timeout.timeout(seconds) do @@ -252,72 +295,54 @@ password \"#{node[:database][:mysql][:server_root_password]}\"" action :run only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - only_if "/usr/bin/mysql -u root -e 'show databases;'" + only_if "/usr/bin/mysql --no-defaults -u root -e 'select (1);'" end crowbar_pacemaker_sync_mark "sync-database_root_password" do revision node[:database]["crowbar-revision"] end -if node[:mysql][:ha][:clustercheck] - # Configuration files for galera-python-clustercheck - template "/etc/galera-python-clustercheck/galera-python-clustercheck.conf" do - source "galera-python-clustercheck.conf.erb" - owner "galera-python-clustercheck" - group "mysql" - mode "0640" - variables( - node_address: node_address - ) - end - - template "/etc/galera-python-clustercheck/my.cnf" do - source "galera-python-clustercheck-my.cnf.erb" - owner "galera-python-clustercheck" - group "mysql" - mode "0640" - variables( - node_address: node_address - ) - end +# Start galera-clustercheck which serves the cluster state as http return codes +# on port 5555 +transaction_objects = [] +service_name = "galera-python-clustercheck" - # Start galera-clustercheck which serves the cluster state as http return codes - # on port 5555 - transaction_objects = [] - service_name = "galera-python-clustercheck" +clustercheck_op = {} +clustercheck_op["monitor"] = {} +clustercheck_op["monitor"]["interval"] = "10s" - pacemaker_primitive service_name do - agent "systemd:#{service_name}" - action :update - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end +pacemaker_primitive service_name do + agent "systemd:#{service_name}" + op clustercheck_op + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } +end - transaction_objects.push("pacemaker_primitive[#{service_name}]") +transaction_objects.push("pacemaker_primitive[#{service_name}]") - clone_name = "cl-#{service_name}" - pacemaker_clone clone_name do - rsc service_name - meta CrowbarPacemakerHelper.clone_meta(node, remote: false) - action :update - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end +clone_name = "cl-#{service_name}" +pacemaker_clone clone_name do + rsc service_name + meta CrowbarPacemakerHelper.clone_meta(node, remote: false) + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } +end - transaction_objects.push("pacemaker_clone[#{clone_name}]") +transaction_objects.push("pacemaker_clone[#{clone_name}]") - clone_location_name = openstack_pacemaker_controller_only_location_for clone_name - transaction_objects << "pacemaker_location[#{clone_location_name}]" +clone_location_name = openstack_pacemaker_controller_only_location_for clone_name +transaction_objects << "pacemaker_location[#{clone_location_name}]" - pacemaker_transaction "clustercheck" do - cib_objects transaction_objects - action :commit_new - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end -end # if node[:mysql][:ha][:clustercheck] +pacemaker_transaction "clustercheck" do + cib_objects transaction_objects + action :commit_new + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } +end include_recipe "crowbar-pacemaker::haproxy" ha_servers = CrowbarPacemakerHelper.haproxy_servers_for_service( - node, "mysql", "database-server", "admin_port" + node, "mysql", "mysql-server", "admin_port" ) # Let all nodes but one act as backup (standby) servers. @@ -331,6 +356,8 @@ n["fall"] = 2 # lower the interval checking after first failure is found n["fastinter"] = 1000 + # shutdown connection when backend is marked down + n["on_marked_down_shutdown"] = true end haproxy_loadbalancer "galera" do @@ -339,12 +366,8 @@ mode "tcp" # leave some room for pacemaker health checks max_connections node[:database][:mysql][:max_connections] - 10 - if node[:mysql][:ha][:clustercheck] - options ["httpchk", "clitcpka"] - default_server "port 5555" - else - options ["mysql-check user monitoring", "clitcpka"] - end + options ["httpchk", "clitcpka"] + default_server "port 5555" stick ({ "on" => "dst" }) servers ha_servers action :nothing diff --git a/chef/cookbooks/mysql/recipes/server.rb b/chef/cookbooks/mysql/recipes/server.rb index 1c43f3603f..20f1fc4218 100644 --- a/chef/cookbooks/mysql/recipes/server.rb +++ b/chef/cookbooks/mysql/recipes/server.rb @@ -20,7 +20,7 @@ include_recipe "mysql::client" include_recipe "database::client" -ha_enabled = node[:database][:ha][:enabled] +ha_enabled = node[:database][:mysql][:ha][:enabled] # For Crowbar, we need to set the address to bind - default to admin node. addr = node[:database][:mysql][:bind_address] || "" @@ -91,11 +91,11 @@ node[:database][:mysql][:ssl][:generate_certs] || node[:database][:mysql][:ssl][:insecure]) group "mysql" - fqdn CrowbarDatabaseHelper.get_listen_address(node) + fqdn CrowbarDatabaseHelper.get_listen_address(node, "mysql") end end -template "/etc/my.cnf.d/openstack.cnf" do +template "/etc/my.cnf.d/72-openstack.cnf" do source "my.cnf.erb" owner "root" group "mysql" @@ -103,7 +103,12 @@ notifies :restart, "service[mysql]", :immediately end -template "/etc/my.cnf.d/ssl.cnf" do +file "/etc/my.cnf.d/openstack.cnf" do + action :delete + notifies :restart, "service[mysql]" +end + +template "/etc/my.cnf.d/73-ssl.cnf" do source "ssl.cnf.erb" owner "root" group "mysql" @@ -111,7 +116,12 @@ notifies :restart, "service[mysql]", :immediately end -template "/etc/my.cnf.d/logging.cnf" do +file "/etc/my.cnf.d/ssl.cnf" do + action :delete + notifies :restart, "service[mysql]" +end + +template "/etc/my.cnf.d/71-logging.cnf" do source "logging.cnf.erb" owner "root" group "mysql" @@ -122,7 +132,12 @@ notifies :restart, "service[mysql]", :immediately end -template "/etc/my.cnf.d/tuning.cnf" do +file "/etc/my.cnf.d/logging.cnf" do + action :delete + notifies :restart, "service[mysql]" +end + +template "/etc/my.cnf.d/74-tuning.cnf" do source "tuning.cnf.erb" owner "root" group "mysql" @@ -138,6 +153,11 @@ notifies :restart, "service[mysql]", :immediately end +file "/etc/my.cnf.d/tuning.cnf" do + action :delete + notifies :restart, "service[mysql]" +end + unless Chef::Config[:solo] ruby_block "save node data" do block do @@ -160,10 +180,10 @@ command "/usr/bin/mysqladmin -u root password \"#{server_root_password}\"" action :run not_if { ha_enabled } # password already set as part of the ha bootstrap - only_if "/usr/bin/mysql -u root -e 'show databases;'" + only_if "/usr/bin/mysql --no-defaults -u root -e 'select (1);'" end -db_settings = fetch_database_settings +db_settings = fetch_database_settings(@cookbook_name) db_connection = db_settings[:connection].dup db_connection[:host] = "localhost" db_connection[:username] = "root" @@ -259,3 +279,13 @@ mode "0755" action :create end + +template "/root/.my.cnf" do + source "root-my.cnf.erb" + owner "root" + group "root" + mode "0600" + variables( + password: node[:database][:mysql][:server_root_password] + ) +end diff --git a/chef/cookbooks/mysql/templates/default/galera.cnf.erb b/chef/cookbooks/mysql/templates/default/galera.cnf.erb index a8aa1d9ca5..8b282fe786 100644 --- a/chef/cookbooks/mysql/templates/default/galera.cnf.erb +++ b/chef/cookbooks/mysql/templates/default/galera.cnf.erb @@ -3,9 +3,19 @@ wsrep_on = ON wsrep_provider = /usr/lib64/galera-3/libgalera_smm.so wsrep_cluster_address = "<%= @cluster_addresses %>" # values recommended by mysqltuner.pl -wsrep_provider_options = "gmcast.listen_addr=tcp://<%= @node_address %>:4567;gcs.fc_limit = <%= @wsrep_slave_threads * 5 %>;gcs.fc_factor = 0.8" +wsrep_provider_options = "gmcast.listen_addr=tcp://<%= @node_address %>:4567;gcs.fc_limit = <%= @wsrep_slave_threads * @gcs_fc_limit_multiplier %>;gcs.fc_factor = <%= @gcs_fc_factor %>;<%= @wsrep_provider_options_custom %>" wsrep_slave_threads = <%= @wsrep_slave_threads %> +# Maximum number of rows in write set +# "0" (unlimited) is the upstream default, but the default configuration in the +# rpm package overwrites that +wsrep_max_ws_rows=0 + +# Maximum size of write set +# "2147483647" (2GB) is the upstream default, but the default configuration in +# the rpm package overwrites that +wsrep_max_ws_size=2147483647 + # to enable debug level logging, set this to 1 wsrep_debug = 0 @@ -22,5 +32,5 @@ query_cache_type = 0 expire_logs_days = <%= @expire_logs_days %> # SST method -wsrep_sst_method = xtrabackup-v2 +wsrep_sst_method = mariabackup wsrep_sst_auth = <%= @sstuser %>:<%= @sstuser_password %> diff --git a/chef/cookbooks/mysql/templates/default/logging.cnf.erb b/chef/cookbooks/mysql/templates/default/logging.cnf.erb index 7f360264e6..a3ed954fcb 100644 --- a/chef/cookbooks/mysql/templates/default/logging.cnf.erb +++ b/chef/cookbooks/mysql/templates/default/logging.cnf.erb @@ -1,6 +1,4 @@ [mysqld] -log_error=/var/log/mysql/mysql_error.log - <% if @slow_query_logging_enabled -%> slow_query_log = 1 slow_query_log_file = /var/log/mysql/mysql_slow.log diff --git a/chef/cookbooks/mysql/templates/default/my.cnf.erb b/chef/cookbooks/mysql/templates/default/my.cnf.erb index 669d02690d..a659b1b087 100644 --- a/chef/cookbooks/mysql/templates/default/my.cnf.erb +++ b/chef/cookbooks/mysql/templates/default/my.cnf.erb @@ -13,7 +13,3 @@ tmpdir = <%= node[:database][:mysql][:tmpdir] %> # Instead of skip-networking the default is now to listen only on # localhost which is more compatible and is not less secure. bind-address = <%= node[:database][:mysql][:bind_address] %> - -[mysqldump] -# FIXME: Remove after MariaDB 10.2.X switch (new default is 16777216) -max_allowed_packet = <%= node[:database][:mysql][:tunable][:max_allowed_packet] %> diff --git a/chef/cookbooks/mysql/templates/default/root-my.cnf.erb b/chef/cookbooks/mysql/templates/default/root-my.cnf.erb new file mode 100644 index 0000000000..714e83417d --- /dev/null +++ b/chef/cookbooks/mysql/templates/default/root-my.cnf.erb @@ -0,0 +1,5 @@ +# Managed by Crowbar +[client] +socket = /var/run/mysql/mysql.sock +user = root +password = <%= @password %> diff --git a/chef/cookbooks/mysql/templates/default/tuning.cnf.erb b/chef/cookbooks/mysql/templates/default/tuning.cnf.erb index f32f63a420..7634b64be6 100644 --- a/chef/cookbooks/mysql/templates/default/tuning.cnf.erb +++ b/chef/cookbooks/mysql/templates/default/tuning.cnf.erb @@ -15,8 +15,3 @@ tmp_table_size = <%= @tmp_table_size %>M max_heap_table_size = <%= @max_heap_table_size %>M skip_name_resolve = 1 - -# FIXME: Remove after MariaDB 10.2.X switch (new default is auto) -thread_cache_size = <%= node[:database][:mysql][:tunable][:thread_cache_size] %> -# FIXME: Remove after MariaDB 10.2.X switch (new default is 16777216) -max_allowed_packet = <%= node[:database][:mysql][:tunable][:max_allowed_packet] %> diff --git a/chef/cookbooks/neutron/attributes/default.rb b/chef/cookbooks/neutron/attributes/default.rb index a56f484dd2..e58ecf78e8 100644 --- a/chef/cookbooks/neutron/attributes/default.rb +++ b/chef/cookbooks/neutron/attributes/default.rb @@ -32,6 +32,9 @@ default[:neutron][:metadata_agent_config_file] = "/etc/neutron/neutron-metadata-agent.conf.d/100-metadata_agent.conf" default[:neutron][:ml2_config_file] = "/etc/neutron/neutron.conf.d/110-ml2.conf" default[:neutron][:nsx_config_file] = "/etc/neutron/neutron.conf.d/110-nsx.conf" +default[:neutron][:ml2_cisco_config_file] = "/etc/neutron/neutron.conf.d/115-ml2_cisco.conf" +default[:neutron][:ml2_cisco_apic_config_file] = "/etc/neutron/neutron.conf.d/115-ml2_cisco_apic.conf" +default[:neutron][:opflex_config_file] = "/etc/opflex-agent-ovs/conf.d/10-opflex-agent-ovs.conf" default[:neutron][:rpc_workers] = 1 default[:neutron][:db][:database] = "neutron" @@ -69,16 +72,26 @@ default[:neutron][:apic][:hosts] = "" default[:neutron][:apic][:username] = "admin" default[:neutron][:apic][:password] = "" +default[:neutron][:apic][:optimized_metadata] = true +default[:neutron][:apic][:optimized_dhcp] = true +default[:neutron][:apic][:opflex] = [{ + pod: "", + nodes: [], + peer_ip: "", + peer_port: "", + encap: "vxlan", + vxlan: { + uplink_iface: "vlan.4093", + uplink_vlan: 4093, + encap_iface: "br-int_vxlan0", + remote_ip: "", + remote_port: 8472 + }, + vlan: { + encap_iface: "" + } +}] -default[:neutron][:apic][:opflex][:peer_ip] = "" -default[:neutron][:apic][:opflex][:peer_port] = 8009 -default[:neutron][:apic][:opflex][:encap] = "vxlan" -default[:neutron][:apic][:opflex][:vxlan][:uplink_iface] = "vlan.4093" -default[:neutron][:apic][:opflex][:vxlan][:uplink_vlan] = 4093 -default[:neutron][:apic][:opflex][:vxlan][:encap_iface] = "br-int_vxlan0" -default[:neutron][:apic][:opflex][:vxlan][:remote_ip] = "" -default[:neutron][:apic][:opflex][:vxlan][:remote_port] = 8472 -default[:neutron][:apic][:opflex][:vlan][:encap_iface] = "" case node[:platform_family] when "suse" @@ -108,6 +121,7 @@ l3_agent_name: "openstack-neutron-l3-agent", l3_agent_pkg: "openstack-neutron-l3-agent", ha_tool_pkg: "openstack-neutron-ha-tool", + l3_ha_pkg: "keepalived", hyperv_pkg: "python-networking-hyperv", nsx_pkgs: ["openvswitch-pki", "ruby2.1-rubygem-faraday"], @@ -115,8 +129,8 @@ cisco_apic_pkgs: ["python-apicapi", "python-neutron-ml2-driver-apic"], cisco_apic_gbp_pkgs: ["openstack-neutron-gbp", - "python-gbpclient"], - cisco_opflex_pkgs: ["agent-ovs", + "python-group-based-policy-client"], + cisco_opflex_pkgs: ["opflex-agent", "lldpd", "openstack-neutron-opflex-agent"], infoblox_pkgs: ["python-infoblox-client", @@ -154,14 +168,15 @@ l3_agent_name: "neutron-l3-agent", l3_agent_pkg: "openstack-neutron", ha_tool_pkg: "", + l3_ha_pkg: "", hyperv_pkg: "", nsx_pkgs: [""], cisco_pkgs: ["python-networking-cisco"], cisco_apic_pkgs: ["python-apicapi", "python-neutron-ml2-driver-apic"], cisco_apic_gbp_pkgs: ["openstack-neutron-gbp", - "python-gbpclient"], - cisco_opflex_pkgs: ["agent-ovs", + "python-group-based-policy-client"], + cisco_opflex_pkgs: ["opflex-agent", "lldpd", "neutron-opflex-agent"], infoblox_pkgs: [], @@ -200,6 +215,7 @@ l3_agent_name: "neutron-l3-agent", l3_agent_pkg: "neutron-l3-agent", ha_tool_pkg: "", + l3_ha_pkg: "", hyperv_pkg: "python-networking-hyperv", nsx_pkgs: [""], cisco_pkgs: [""], @@ -258,6 +274,6 @@ default[:neutron][:ha][:neutron_l3_ha_service][:timeouts][:router_migration][:kill] = 120 default[:neutron][:ha][:neutron_l3_ha_service][:hatool][:program] = "/usr/bin/neutron-ha-tool" default[:neutron][:ha][:neutron_l3_ha_service][:hatool][:env] = {} -default[:neutron][:ha][:neutron_l3_ha_service][:seconds_to_sleep_between_checks] = 10 +default[:neutron][:ha][:neutron_l3_ha_service][:seconds_to_sleep_between_checks] = 30 default[:neutron][:ha][:neutron_l3_ha_service][:max_errors_tolerated] = 10 default[:neutron][:ha][:neutron_l3_ha_service][:log_file] = "/var/log/neutron/neutron-l3-ha-service.log" diff --git a/chef/cookbooks/neutron/definitions/neutron_metadata.rb b/chef/cookbooks/neutron/definitions/neutron_metadata.rb index b955fed479..f6d56b9fe4 100644 --- a/chef/cookbooks/neutron/definitions/neutron_metadata.rb +++ b/chef/cookbooks/neutron/definitions/neutron_metadata.rb @@ -99,13 +99,19 @@ use_crowbar_pacemaker_service = \ (neutron_network_ha && node[:pacemaker][:clone_stateless_services]) || nova_compute_ha_enabled + enable_metadata = node.roles.include?("neutron-network") || !neutron[:neutron][:metadata][:force] + # In case of Cisco ACI driver, supervisord takes care of starting up # the metadata agent. service node[:neutron][:platform][:metadata_agent_name] do - action [:enable, :start] - subscribes :restart, resources(template: node[:neutron][:config_file]) - subscribes :restart, resources(template: node[:neutron][:metadata_agent_config_file]) - subscribes :restart, resources(file: "/etc/neutron/metadata_agent.ini") + if enable_metadata + action [:enable, :start] + subscribes :restart, resources(template: node[:neutron][:config_file]) + subscribes :restart, resources(template: node[:neutron][:metadata_agent_config_file]) + subscribes :restart, resources(file: "/etc/neutron/metadata_agent.ini") + else + action [:disable, :stop] + end provider Chef::Provider::CrowbarPacemakerService if use_crowbar_pacemaker_service if nova_compute_ha_enabled supports no_crm_maintenance_mode: true @@ -114,7 +120,11 @@ end end utils_systemd_service_restart node[:neutron][:platform][:metadata_agent_name] do - action use_crowbar_pacemaker_service ? :disable : :enable + if enable_metadata + action use_crowbar_pacemaker_service ? :disable : :enable + else + action :disable + end end end end diff --git a/chef/cookbooks/neutron/files/default/neutron-l3-ha-service.rb b/chef/cookbooks/neutron/files/default/neutron-l3-ha-service.rb index 3d3007828e..d8c19d8dcc 100644 --- a/chef/cookbooks/neutron/files/default/neutron-l3-ha-service.rb +++ b/chef/cookbooks/neutron/files/default/neutron-l3-ha-service.rb @@ -334,7 +334,7 @@ def insecure_flag end def status_command - [@options.program, "--l3-agent-check", "--quiet"] + [@options.program, "--l3-agent-check", "--quiet"] + insecure_flag end def migration_command diff --git a/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb b/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb index 261032f78d..7a74d4a9e4 100644 --- a/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb +++ b/chef/cookbooks/neutron/recipes/cisco_apic_agents.rb @@ -57,80 +57,85 @@ end # apply configurations to compute node -if node.roles.include?("nova-compute-kvm") - node[:neutron][:platform][:cisco_opflex_pkgs].each { |p| package p } +node[:neutron][:platform][:cisco_opflex_pkgs].each { |p| package p } - service "lldpd" do - action [:enable, :start] - end - utils_systemd_service_restart "lldpd" - - # include neutron::common_config only now, after we've installed packages - include_recipe "neutron::common_config" - - # Agent configurations for Cisco APIC driver - # The ACI setup for OpenStack releases before Pike use "of_interface" options - # set to "ovs-ofctl". This option has been deprecated in Pike and removed - # from this config file for Pike. It is still included in Newton (Cloud7) - agent_config_path = "/etc/neutron/plugins/ml2/openvswitch_agent.ini" - template agent_config_path do - cookbook "neutron" - source "openvswitch_agent.ini.erb" - owner "root" - group node[:neutron][:platform][:group] - mode "0640" - variables( - ml2_type_drivers: ml2_type_drivers, - ml2_mech_drivers: ml2_mech_drivers, - tunnel_types: "", - enable_tunneling: false, - use_l2pop: false, - dvr_enabled: false, - of_interface: "ovs-ofctl", - ovsdb_interface: neutron[:neutron][:ovs][:ovsdb_interface], - bridge_mappings: "" - ) - end +service "lldpd" do + action [:enable, :start] +end +utils_systemd_service_restart "lldpd" + +# include neutron::common_config only now, after we've installed packages +include_recipe "neutron::common_config" + +# Agent configurations for Cisco APIC driver +# The ACI setup for OpenStack releases before Pike use "of_interface" options +# set to "ovs-ofctl". This option has been deprecated in Pike and removed +# from this config file for Pike. It is still included in Newton (Cloud7) +agent_config_path = "/etc/neutron/plugins/ml2/openvswitch_agent.ini" +template agent_config_path do + cookbook "neutron" + source "openvswitch_agent.ini.erb" + owner "root" + group node[:neutron][:platform][:group] + mode "0640" + variables( + ml2_type_drivers: ml2_type_drivers, + ml2_mech_drivers: ml2_mech_drivers, + tunnel_types: "", + enable_tunneling: false, + use_l2pop: false, + dvr_enabled: false, + of_interface: "ovs-ofctl", + ovsdb_interface: neutron[:neutron][:ovs][:ovsdb_interface], + bridge_mappings: "" + ) +end - # Update config file from template - opflex_agent_conf = "/etc/opflex-agent-ovs/conf.d/10-opflex-agent-ovs.conf" - template opflex_agent_conf do - cookbook "neutron" - source "10-opflex-agent-ovs.conf.erb" - mode "0755" - owner "root" - group neutron[:neutron][:platform][:group] - variables( - opflex_apic_domain_name: neutron[:neutron][:apic][:system_id], - hostname: node[:hostname], - socketgroup: neutron[:neutron][:platform][:group], - opflex_peer_ip: neutron[:neutron][:apic][:opflex][:peer_ip], - opflex_peer_port: neutron[:neutron][:apic][:opflex][:peer_port], - opflex_vxlan_encap_iface: neutron[:neutron][:apic][:opflex][:vxlan][:encap_iface], - opflex_vxlan_uplink_iface: neutron[:neutron][:apic][:opflex][:vxlan][:uplink_iface], - opflex_vxlan_uplink_vlan: neutron[:neutron][:apic][:opflex][:vxlan][:uplink_vlan], - opflex_vxlan_remote_ip: neutron[:neutron][:apic][:opflex][:vxlan][:remote_ip], - opflex_vxlan_remote_port: neutron[:neutron][:apic][:opflex][:vxlan][:remote_port], - # TODO(mmnelemane) : update VLAN encapsulation config when it works. - # Currently set to VXLAN by default but can be modified from proposal. - ml2_type_drivers: ml2_type_drivers - ) - end +# Update config file from template +apic = neutron[:neutron][:apic] +opflex_list = apic[:opflex].select { |i| i[:nodes].include? node[:hostname] } +opflex_list.any? || raise("Opflex instance not found for node '#{node[:hostname]}'") +opflex_list.one? || raise("Multiple opflex instances found for node '#{node[:hostname]}'") +opflex = opflex_list.first +template node[:neutron][:opflex_config_file] do + cookbook "neutron" + source "opflex-agent-ovs.conf.erb" + mode "0755" + owner "root" + group neutron[:neutron][:platform][:group] + variables( + opflex_apic_domain_name: neutron[:neutron][:apic][:system_id], + hostname: node[:hostname], + socketgroup: neutron[:neutron][:platform][:group], + opflex_peer_ip: opflex[:peer_ip], + opflex_peer_port: opflex[:peer_port], + opflex_ssl_mode: opflex[:ssl_mode], + opflex_int_bridge: opflex[:integration_bridge], + opflex_access_bridge: opflex[:access_bridge], + opflex_vxlan_encap_iface: opflex[:vxlan][:encap_iface], + opflex_vxlan_uplink_iface: opflex[:vxlan][:uplink_iface], + opflex_vxlan_uplink_vlan: opflex[:vxlan][:uplink_vlan], + opflex_vxlan_remote_ip: opflex[:vxlan][:remote_ip], + opflex_vxlan_remote_port: opflex[:vxlan][:remote_port], + # TODO(mmnelemane) : update VLAN encapsulation config when it works. + # Currently set to VXLAN by default but can be modified from proposal. + ml2_type_drivers: ml2_type_drivers + ) +end - neutron_metadata do - use_cisco_apic_ml2_driver true - neutron_node_object neutron - end +neutron_metadata do + use_cisco_apic_ml2_driver true + neutron_node_object neutron +end - service "neutron-opflex-agent" do - action [:enable, :start] - subscribes :restart, resources("template[#{agent_config_path}]") - end - utils_systemd_service_restart "neutron-opflex-agent" +service "neutron-opflex-agent" do + action [:enable, :start] + subscribes :restart, resources("template[#{agent_config_path}]") +end +utils_systemd_service_restart "neutron-opflex-agent" - service "agent-ovs" do - action [:enable, :start] - subscribes :restart, resources("template[#{opflex_agent_conf}]") - end - utils_systemd_service_restart "agent-ovs" +service "opflex-agent" do + action [:enable, :start] + subscribes :restart, resources("template[#{node[:neutron][:opflex_config_file]}]") end +utils_systemd_service_restart "opflex-agent" diff --git a/chef/cookbooks/neutron/recipes/cisco_apic_support.rb b/chef/cookbooks/neutron/recipes/cisco_apic_support.rb index 3ae366a60f..72ea20abaf 100644 --- a/chef/cookbooks/neutron/recipes/cisco_apic_support.rb +++ b/chef/cookbooks/neutron/recipes/cisco_apic_support.rb @@ -21,17 +21,23 @@ end aciswitches = node[:neutron][:apic][:apic_switches].to_hash -template "/etc/neutron/neutron-server.conf.d/100-ml2_conf_cisco_apic.ini.conf" do +acivmms = node[:neutron][:apic][:apic_vmms] + +template node[:neutron][:ml2_cisco_apic_config_file] do cookbook "neutron" source "ml2_conf_cisco_apic.ini.erb" mode "0640" owner "root" group node[:neutron][:platform][:group] variables( + vpc_pairs: node[:neutron][:apic][:vpc_pairs], apic_switches: aciswitches, + optimized_dhcp: node[:neutron][:apic][:optimized_dhcp], + optimized_metadata: node[:neutron][:apic][:optimized_metadata], + apic_vmms: acivmms, ml2_mechanism_drivers: node[:neutron][:ml2_mechanism_drivers], policy_drivers: "implicit_policy,apic", - default_ip_pool: "192.168.0.0/16", + default_ip_pool: "192.168.0.0/16" ) notifies :restart, "service[#{node[:neutron][:platform][:service_name]}]" end diff --git a/chef/cookbooks/neutron/recipes/common_agent.rb b/chef/cookbooks/neutron/recipes/common_agent.rb index d0d4183636..9ae45615f7 100644 --- a/chef/cookbooks/neutron/recipes/common_agent.rb +++ b/chef/cookbooks/neutron/recipes/common_agent.rb @@ -81,12 +81,10 @@ end end -if neutron[:neutron][:networking_plugin] == "ml2" && +# Skip working with regular agents if Cisco ACI is used +return if neutron[:neutron][:networking_plugin] == "ml2" && (neutron[:neutron][:ml2_mechanism_drivers].include?("cisco_apic_ml2") || neutron[:neutron][:ml2_mechanism_drivers].include?("apic_gbp")) - include_recipe "neutron::cisco_apic_agents" - return # skip anything else in this recipe -end multiple_external_networks = !neutron[:neutron][:additional_external_networks].empty? @@ -184,6 +182,7 @@ external_networks.concat(neutron[:neutron][:additional_external_networks]) ext_physnet_map = NeutronHelper.get_neutron_physnets(node, external_networks) external_networks.each do |net| + next if node[:crowbar_wall][:network][:nets][net].nil? ext_iface = node[:crowbar_wall][:network][:nets][net].last # we can't do "floating:br-public, physnet1:br-public"; this also means # that all relevant nodes here must have a similar bridge_mappings @@ -277,7 +276,8 @@ tunnel_csum: neutron[:neutron][:ovs][:tunnel_csum], of_interface: neutron[:neutron][:ovs][:of_interface], ovsdb_interface: neutron[:neutron][:ovs][:ovsdb_interface], - bridge_mappings: bridge_mappings + bridge_mappings: bridge_mappings, + of_inactivity_probe: neutron[:neutron][:ovs][:of_inactivity_probe] ) end when ml2_mech_drivers.include?("linuxbridge") @@ -337,9 +337,12 @@ handle_internal_only_routers: "True", metadata_port: 9697, send_arp_for_ha: 3, + force_metadata: neutron[:neutron][:metadata][:force], periodic_interval: 40, periodic_fuzzy_delay: 5, dvr_enabled: neutron[:neutron][:use_dvr], + l3_ha_enabled: node.roles.include?("neutron-network") && neutron[:neutron][:l3_ha][:use_l3_ha], + l3_ha_vrrp_password: neutron[:neutron][:l3_ha][:vrrp_password], dvr_mode: node.roles.include?("neutron-network") ? "dvr_snat" : "dvr" ) end diff --git a/chef/cookbooks/neutron/recipes/common_config.rb b/chef/cookbooks/neutron/recipes/common_config.rb index 41afad1f04..4ad5d4a3bf 100644 --- a/chef/cookbooks/neutron/recipes/common_config.rb +++ b/chef/cookbooks/neutron/recipes/common_config.rb @@ -24,6 +24,9 @@ neutron = node end +use_apic_gbp = neutron[:neutron][:networking_plugin] == "ml2" && + neutron[:neutron][:ml2_mechanism_drivers].include?("apic_gbp") + # RDO package magic (non-standard packages) if node[:platform_family] == "rhel" net_core_pkgs=%w(kernel-*openstack* iproute-*el6ost.netns* iputils) @@ -78,6 +81,12 @@ if neutron[:neutron][:networking_plugin] == "ml2" service_plugins.unshift("neutron.services.l3_router.l3_router_plugin.L3RouterPlugin") + + if neutron[:neutron][:ml2_mechanism_drivers].include?("linuxbridge") || + neutron[:neutron][:ml2_mechanism_drivers].include?("openvswitch") + service_plugins.push("neutron.services.trunk.plugin.TrunkPlugin") + end + if neutron[:neutron][:ml2_mechanism_drivers].include?("cisco_apic_ml2") service_plugins = ["cisco_apic_l3"] elsif neutron[:neutron][:ml2_mechanism_drivers].include?("apic_gbp") @@ -111,6 +120,7 @@ variables( sql_connection: is_neutron_server ? neutron[:neutron][:db][:sql_connection] : nil, sql_min_pool_size: neutron[:neutron][:sql][:min_pool_size], + sql_max_pool_size: neutron[:neutron][:sql][:max_pool_size], sql_max_pool_overflow: neutron[:neutron][:sql][:max_pool_overflow], sql_pool_timeout: neutron[:neutron][:sql][:pool_timeout], debug: neutron[:neutron][:debug], @@ -132,12 +142,15 @@ service_plugins: service_plugins, allow_overlapping_ips: neutron[:neutron][:allow_overlapping_ips], dvr_enabled: neutron[:neutron][:use_dvr], + l3_ha_enabled: neutron[:neutron][:l3_ha][:use_l3_ha], network_nodes_count: network_nodes_count, dns_domain: neutron[:neutron][:dhcp_domain], mtu_value: mtu_value, infoblox: infoblox_settings, ipam_driver: ipam_driver, - rpc_workers: neutron[:neutron][:rpc_workers] + rpc_workers: neutron[:neutron][:rpc_workers], + use_apic_gbp: use_apic_gbp, + default_log_levels: neutron[:neutron][:default_log_levels] ) end diff --git a/chef/cookbooks/neutron/recipes/network_agents.rb b/chef/cookbooks/neutron/recipes/network_agents.rb index 8c143d32c0..a679435263 100644 --- a/chef/cookbooks/neutron/recipes/network_agents.rb +++ b/chef/cookbooks/neutron/recipes/network_agents.rb @@ -15,9 +15,10 @@ # include_recipe "neutron::common_agent" +ceilometer_agent_enabled = node.roles.include? "ceilometer-agent" package node[:neutron][:platform][:dhcp_agent_pkg] -package node[:neutron][:platform][:metering_agent_pkg] +package node[:neutron][:platform][:metering_agent_pkg] if ceilometer_agent_enabled if node[:neutron][:use_lbaas] if node[:neutron][:lbaasv2_driver] == "f5" && @@ -28,6 +29,8 @@ end end +package node[:neutron][:platform][:l3_ha_pkg] if node[:neutron][:l3_ha][:use_l3_ha] + # Enable ip forwarding on network node for SLE11 ruby_block "edit /etc/sysconfig/sysctl for IP_FORWARD" do block do @@ -39,9 +42,9 @@ end # Enable ip forwarding on network node for new SUSE platforms -ruby_block "edit /etc/sysctl.d/99-sysctl.conf for net.ipv4.ip_forward" do +ruby_block "edit /etc/sysctl.conf for net.ipv4.ip_forward" do block do - rc = Chef::Util::FileEdit.new("/etc/sysctl.d/99-sysctl.conf") + rc = Chef::Util::FileEdit.new("/etc/sysctl.conf") rc.search_file_replace_line(/^net.ipv4.ip_forward =/, "net.ipv4.ip_forward = 1") rc.write_file end @@ -51,7 +54,7 @@ # The rest of this logic will be compatible for all the platforms. # There is an overlap here, but will not cause inferference (the # variable `net.ipv4.ip_forward` is set to 1 in two files, -# 99-sysctl.conf and 50-neutron-enable-ip_forward.conf) +# sysctl.conf and 50-neutron-enable-ip_forward.conf) directory "create /etc/sysctl.d for enable-ip_forward" do path "/etc/sysctl.d" @@ -119,6 +122,7 @@ debug: node[:neutron][:debug], interface_driver: interface_driver, ) + only_if { ceilometer_agent_enabled } end # Delete pre-existing configuration file. @@ -153,7 +157,8 @@ dhcp_domain: node[:neutron][:dhcp_domain], enable_isolated_metadata: "True", enable_metadata_network: "False", - nameservers: dns_list + nameservers: dns_list, + force_metadata: node[:neutron][:metadata][:force] ) end @@ -180,6 +185,7 @@ debug: node[:neutron][:debug], interface_driver: interface_driver, user_group: node[:neutron][:platform][:lbaas_haproxy_group], + allow_automatic_lbaas_agent_failover: node[:neutron][:allow_automatic_lbaas_agent_failover], device_driver: "neutron_lbaas.drivers.haproxy.namespace_driver.HaproxyNSDriver" ) end @@ -221,9 +227,11 @@ subscribes :restart, resources(template: node[:neutron][:config_file]) subscribes :restart, resources("template[/etc/neutron/metering_agent.ini]") provider Chef::Provider::CrowbarPacemakerService if use_crowbar_pacemaker_service + only_if { ceilometer_agent_enabled } end utils_systemd_service_restart node[:neutron][:platform][:metering_agent_name] do action use_crowbar_pacemaker_service ? :disable : :enable + only_if { ceilometer_agent_enabled } end if node[:neutron][:use_lbaas] && diff --git a/chef/cookbooks/neutron/recipes/network_agents_ha.rb b/chef/cookbooks/neutron/recipes/network_agents_ha.rb index b6f2fdd673..14880459e0 100644 --- a/chef/cookbooks/neutron/recipes/network_agents_ha.rb +++ b/chef/cookbooks/neutron/recipes/network_agents_ha.rb @@ -41,19 +41,6 @@ action :create end - # We need .openrc present at network node so the node can use neutron-ha-tool even - # when located in separate cluster - template "/root/.openrc" do - source "openrc.erb" - cookbook "keystone" - owner "root" - group "root" - mode 0o600 - variables( - keystone_settings: keystone_settings - ) - end - # skip neutron-ha-tool resource creation during upgrade unless CrowbarPacemakerHelper.being_upgraded?(node) @@ -106,6 +93,14 @@ ) end + service "neutron-l3-ha-service" do + supports status: true, restart: true, restart_crm_resource: true + subscribes :restart, resources(file: "/etc/neutron/neutron-l3-ha-service.yaml"), :immediately + subscribes :restart, resources(file: "/etc/neutron/os_password"), :immediately + + provider Chef::Provider::CrowbarPacemakerService + end + # Reload systemd when unit file changed bash "reload systemd after neutron-l3-ha-service update" do code "systemctl daemon-reload" @@ -122,7 +117,9 @@ crowbar_pacemaker_sync_mark "sync-neutron-agents_before_ha" # Avoid races when creating pacemaker resources -crowbar_pacemaker_sync_mark "wait-neutron-agents_ha_resources" +crowbar_pacemaker_sync_mark "wait-neutron-agents_ha_resources" do + timeout 150 +end if node[:pacemaker][:clone_stateless_services] transaction_objects = [] @@ -180,21 +177,31 @@ l3_agent_clone = "cl-#{l3_agent_primitive}" end - if use_metadata_agent - metadata_agent_primitive = "neutron-metadata-agent" + enable_metadata = node.roles.include?("neutron-network") || !node[:neutron][:metadata][:force] + + metadata_agent_primitive = "neutron-metadata-agent" + if use_metadata_agent && enable_metadata objects = openstack_pacemaker_controller_clone_for_transaction metadata_agent_primitive do agent node[:neutron][:ha][:network][:metadata_ra] op node[:neutron][:ha][:network][:op] end transaction_objects.push(objects) + else + pacemaker_primitive metadata_agent_primitive do + agent node[:neutron][:ha][:network][:metadata_ra] + action [:stop, :delete] + only_if "crm configure show #{metadata_agent_primitive}" + end end - metering_agent_primitive = "neutron-metering-agent" - objects = openstack_pacemaker_controller_clone_for_transaction metering_agent_primitive do - agent node[:neutron][:ha][:network][:metering_ra] - op node[:neutron][:ha][:network][:op] + if node.roles.include? "ceilometer-agent" + metering_agent_primitive = "neutron-metering-agent" + objects = openstack_pacemaker_controller_clone_for_transaction metering_agent_primitive do + agent node[:neutron][:ha][:network][:metering_ra] + op node[:neutron][:ha][:network][:op] + end + transaction_objects.push(objects) end - transaction_objects.push(objects) if use_lbaas_agent && [nil, "", "haproxy"].include?(node[:neutron][:lbaasv2_driver]) @@ -234,68 +241,82 @@ if use_l3_agent # Remove old resource ha_tool_primitive_name = "neutron-ha-tool" - pacemaker_primitive ha_tool_primitive_name do - agent node[:neutron][:ha][:network][:ha_tool_ra] - action [:stop, :delete] - only_if "crm configure show #{ha_tool_primitive_name}" - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end + ha_service_primitive_name = "neutron-l3-ha-service" - # Remove old location - ha_tool_location_name = "l-#{ha_tool_primitive_name}-controller" - pacemaker_location ha_tool_location_name do - action :delete - only_if "crm configure show #{ha_tool_location_name}" - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end + if node[:neutron][:l3_ha][:use_l3_ha] + ## Do we really need to delete it? what about routers not + # marked with --ha Enabled ?! + pacemaker_primitive ha_service_primitive_name do + agent "systemd:neutron-l3-ha-service" + op node[:neutron][:ha][:neutron_l3_ha_resource][:op] + action [:stop, :delete] + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + only_if "crm configure show #{ha_service_primitive_name}" + end + else + pacemaker_primitive ha_tool_primitive_name do + agent node[:neutron][:ha][:network][:ha_tool_ra] + action [:stop, :delete] + only_if "crm configure show #{ha_tool_primitive_name}" + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end - # Remove old ordering - ha_tool_ordering_name = "o-#{ha_tool_primitive_name}" - pacemaker_order ha_tool_ordering_name do - action :delete - only_if "crm configure show #{ha_tool_ordering_name}" - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end + # Remove old location + ha_tool_location_name = "l-#{ha_tool_primitive_name}-controller" + pacemaker_location ha_tool_location_name do + action :delete + only_if "crm configure show #{ha_tool_location_name}" + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end - # Add pacemaker resource for neutron-l3-ha-service - ha_service_transaction_objects = [] - ha_service_primitive_name = "neutron-l3-ha-service" + # Remove old ordering + ha_tool_ordering_name = "o-#{ha_tool_primitive_name}" + pacemaker_order ha_tool_ordering_name do + action :delete + only_if "crm configure show #{ha_tool_ordering_name}" + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end - pacemaker_primitive ha_service_primitive_name do - agent "systemd:neutron-l3-ha-service" - op node[:neutron][:ha][:neutron_l3_ha_resource][:op] - action :update - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end - ha_service_transaction_objects << "pacemaker_primitive[#{ha_service_primitive_name}]" + # Add pacemaker resource for neutron-l3-ha-service + # only if l3_ha is not enabled + ha_service_transaction_objects = [] - ha_service_location_name = openstack_pacemaker_controller_only_location_for( - ha_service_primitive_name - ) + pacemaker_primitive ha_service_primitive_name do + agent "systemd:neutron-l3-ha-service" + op node[:neutron][:ha][:neutron_l3_ha_resource][:op] + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + ha_service_transaction_objects << "pacemaker_primitive[#{ha_service_primitive_name}]" - ha_service_transaction_objects << "pacemaker_location[#{ha_service_location_name}]" + ha_service_location_name = openstack_pacemaker_controller_only_location_for( + ha_service_primitive_name + ) - pacemaker_transaction "neutron ha service" do - cib_objects ha_service_transaction_objects - # note that this will also automatically start the resources - action :commit_new - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } - end + ha_service_transaction_objects << "pacemaker_location[#{ha_service_location_name}]" - rabbit_settings = fetch_rabbitmq_settings - - crowbar_pacemaker_order_only_existing "o-#{ha_service_primitive_name}" do - # While neutron-ha-tool technically doesn't directly depend on postgresql or - # rabbitmq, if these bits are not running, then neutron-server can run but - # can't do what it's being asked. Note that neutron-server does have a - # constraint on these services, but it's optional, not mandatory (because it - # doesn't need to be restarted when postgresql or rabbitmq are restarted). - # So explicitly depend on postgresql and rabbitmq (if they are in the cluster). - ordering "( postgresql #{rabbit_settings[:pacemaker_resource]} g-haproxy cl-neutron-server " \ - "#{l3_agent_clone} ) #{ha_service_primitive_name}" - score "Mandatory" - action :create - only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + pacemaker_transaction "neutron ha service" do + cib_objects ha_service_transaction_objects + # note that this will also automatically start the resources + action :commit_new + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + rabbit_settings = fetch_rabbitmq_settings + + crowbar_pacemaker_order_only_existing "o-#{ha_service_primitive_name}" do + # While neutron-ha-tool technically doesn't directly depend on postgresql or + # rabbitmq, if these bits are not running, then neutron-server can run but + # can't do what it's being asked. Note that neutron-server does have a + # constraint on these services, but it's optional, not mandatory (because it + # doesn't need to be restarted when postgresql or rabbitmq are restarted). + # So explicitly depend on postgresql and rabbitmq (if they are in the cluster). + ordering "( postgresql #{rabbit_settings[:pacemaker_resource]} g-haproxy cl-neutron-server " \ + "#{l3_agent_clone} ) #{ha_service_primitive_name}" + score "Mandatory" + action :create + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end end end diff --git a/chef/cookbooks/neutron/recipes/role_neutron_sdn_cisco_aci_agents.rb b/chef/cookbooks/neutron/recipes/role_neutron_sdn_cisco_aci_agents.rb new file mode 100644 index 0000000000..eddfd28ad9 --- /dev/null +++ b/chef/cookbooks/neutron/recipes/role_neutron_sdn_cisco_aci_agents.rb @@ -0,0 +1,19 @@ +# +# Copyright 2018, SUSE LINUX GmbH +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +if CrowbarRoleRecipe.node_state_valid_for_role?(node, "neutron", "neutron-sdn-cisco-aci-agents") + include_recipe "neutron::cisco_apic_agents" +end diff --git a/chef/cookbooks/neutron/recipes/server.rb b/chef/cookbooks/neutron/recipes/server.rb index 3fcca1c6b7..3f156bae75 100644 --- a/chef/cookbooks/neutron/recipes/server.rb +++ b/chef/cookbooks/neutron/recipes/server.rb @@ -85,7 +85,7 @@ else cisco_nexus_link_action = "delete" end -link "/etc/neutron/neutron-server.conf.d/100-ml2_conf_cisco.ini.conf" do +link "#{node[:neutron][:platform][:ml2_cisco_config_file]}" do to "/etc/neutron/plugins/ml2/ml2_conf_cisco.ini" action cisco_nexus_link_action notifies :restart, "service[#{node[:neutron][:platform][:service_name]}]" @@ -99,7 +99,7 @@ else cisco_apic_link_action = "delete" end -link "/etc/neutron/neutron-server.conf.d/100-ml2_conf_cisco_apic.ini.conf" do +link "#{node[:neutron][:platform][:ml2_cisco_apic_config_file]}" do to "/etc/neutron/plugins/ml2/ml2_conf_cisco_apic.ini" action cisco_apic_link_action notifies :restart, "service[#{node[:neutron][:platform][:service_name]}]" @@ -419,6 +419,13 @@ utils_systemd_service_restart node[:neutron][:platform][:service_name] do action use_crowbar_pacemaker_service ? :disable : :enable end +# neutron-server must be restarted immediately if keystone settings have changed, +# otherwise neutron requests in recipes will fail +if node[:keystone][:endpoint_changed] + service node[:neutron][:platform][:service_name] do + subscribes :restart, resources(template: node[:neutron][:config_file]), :immediately + end +end if node[:neutron][:use_infoblox] service node[:neutron][:platform][:infoblox_agent_name] do diff --git a/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb b/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb index 49976d7272..06afca44ad 100644 --- a/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/dhcp_agent.ini.erb @@ -2,8 +2,14 @@ interface_driver = <%= @interface_driver %> resync_interval = <%= @resync_interval %> dhcp_driver = <%= @dhcp_driver %> +<% if @force_metadata -%> +enable_isolated_metadata = False +enable_metadata_network = False +<% else -%> enable_isolated_metadata = <%= @enable_isolated_metadata %> enable_metadata_network = <%= @enable_metadata_network %> +<% end -%> +force_metadata = <%= @force_metadata %> dhcp_domain = <%= @dhcp_domain %> <% if @nameservers -%> dnsmasq_dns_servers = <%= @nameservers %> diff --git a/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb b/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb index 88f905c33a..6c2c6d8079 100644 --- a/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/l3_agent.ini.erb @@ -1,8 +1,12 @@ [DEFAULT] interface_driver = <%= @interface_driver %> +ha_vrrp_auth_password = <%= @l3_ha_vrrp_password %> <% if @dvr_enabled -%> agent_mode = <%= @dvr_mode %> <% end -%> +<% if @force_metadata -%> +enable_metadata_proxy = False +<% end -%> metadata_port = <%= @metadata_port %> send_arp_for_ha = <%= @send_arp_for_ha %> handle_internal_only_routers = <%= @handle_internal_only_routers %> diff --git a/chef/cookbooks/neutron/templates/default/lbaas_agent.ini.erb b/chef/cookbooks/neutron/templates/default/lbaas_agent.ini.erb index e0958c381d..eb3830e702 100644 --- a/chef/cookbooks/neutron/templates/default/lbaas_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/lbaas_agent.ini.erb @@ -2,5 +2,6 @@ device_driver = <%= @device_driver %> interface_driver = <%= @interface_driver %> debug = <%= @debug %> +allow_automatic_lbaas_agent_failover = <%= @allow_automatic_lbaas_agent_failover %> [haproxy] user_group = <%= @user_group %> diff --git a/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb b/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb index 90935dda63..421c8bebbc 100644 --- a/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb +++ b/chef/cookbooks/neutron/templates/default/ml2_conf_cisco_apic.ini.erb @@ -2,7 +2,7 @@ apic_system_id=<%= node[:neutron][:apic][:system_id] %> [opflex] networks = * -[ml2_cisco_apic] +[apic] apic_hosts=<%= node[:neutron][:apic][:hosts] %> apic_username=<%= node[:neutron][:apic][:username] %> apic_password=<%= node[:neutron][:apic][:password] %> @@ -11,16 +11,21 @@ apic_name_mapping = use_name apic_clear_node_profiles = True enable_aci_routing = True apic_arp_flooding = True -enable_optimized_metadata = True +enable_optimized_metadata = <%= @optimized_metadata %> +enable_optimized_dhcp = <%= @optimized_dhcp %> apic_provision_infra = True apic_provision_hostlinks = True +<% unless @vpc_pairs.nil? -%> +apic_vpc_pairs = <%= @vpc_pairs %> +<% end -%> + <% @apic_switches.keys.each do |ip| -%> [apic_switch:<%=ip%>] -<% if @apic_switches[ip].key?(:switch_ports) -%> -<% @apic_switches[ip][:switch_ports].each do |name, values| -%> -<%= name %> = <%= values[:switch_port] %> -<% end -%> -<% end -%> + <% if @apic_switches[ip].key?(:switch_ports) -%> + <% @apic_switches[ip][:switch_ports].each do |name, values| -%> +<%= name %> = <%= values[:switch_port] %> + <% end -%> + <% end -%> <% end -%> <% if @ml2_mechanism_drivers.include?("apic_gbp") -%> [group_policy] @@ -28,3 +33,20 @@ policy_drivers = <%= @policy_drivers %> [group_policy_implicit_policy] default_ip_pool = <%= @default_ip_pool %> <% end -%> + +[apic_external_network:<%=node[:neutron][:apic][:ext_net][:name]%>] +preexisting = <%= node[:neutron][:apic][:ext_net][:preexisting] %> +<% unless node[:neutron][:apic][:ext_net][:nat_enabled].nil? -%> +enable_nat = <%= node[:neutron][:apic][:ext_net][:nat_enabled] %> +<% end -%> +external_epg = <%= node[:neutron][:apic][:ext_net][:ext_epg] %> +host_pool_cidr = <%= node[:neutron][:apic][:ext_net][:host_pool_cidr] %> + +<% @apic_vmms.each do |vmm_domain| -%> +[apic_vmdom:<%= vmm_domain[:vmm_name]%>] +vmm_type = <%= vmm_domain[:vmm_type]%> +<% if vmm_domain[:vlan_ranges] -%> +vlan_ranges = <%= vmm_domain[:vlan_ranges] %> +<% end -%> +<% end -%> + diff --git a/chef/cookbooks/neutron/templates/default/neutron.conf.erb b/chef/cookbooks/neutron/templates/default/neutron.conf.erb index c8abcc3f41..1de2e9042d 100644 --- a/chef/cookbooks/neutron/templates/default/neutron.conf.erb +++ b/chef/cookbooks/neutron/templates/default/neutron.conf.erb @@ -17,6 +17,10 @@ dhcp_agents_per_network = <%= @network_nodes_count %> <% if @dvr_enabled -%> router_distributed = True <% end -%> +<% if @l3_ha_enabled -%> +l3_ha = True +max_l3_agents_per_router = <%= (@network_nodes_count + 1) / 2 %> +<% end -%> debug = <%= @debug ? "True" : "False" %> verbose = <%= @verbose ? "True" : "False" %> log_dir = /var/log/neutron @@ -26,6 +30,9 @@ transport_url = <%= @rabbit_settings[:url] %> control_exchange = neutron max_header_line = <%= node[:neutron][:max_header_line] %> wsgi_keep_alive = false +<% unless @default_log_levels.length.zero? -%> +default_log_levels = <%= @default_log_levels.join(", ") %> +<% end -%> [agent] root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf @@ -53,6 +60,17 @@ project_name = <%= @keystone_settings['service_tenant'] %> project_domain_name = <%= @keystone_settings['admin_domain'] %> user_domain_name = <%= @keystone_settings['admin_domain'] %> +<% if @use_apic_gbp -%> +[apic_aim_auth] +auth_plugin = v3password +auth_url = <%= @keystone_settings['internal_auth_url'] %> +user_domain_name = <%= @keystone_settings['admin_domain'] %> +project_name = <%= @keystone_settings['service_tenant'] %> +project_domain_name = <%= @keystone_settings['admin_domain'] %> +username = <%= @keystone_settings['service_user'] %> +password = <%= @keystone_settings['service_password'] %> + +<% end -%> [nova] region_name = <%= @keystone_settings['endpoint_region'] %> endpoint_type = internal @@ -68,8 +86,10 @@ username = <%= @keystone_settings['service_user'] %> [oslo_concurrency] lock_path = /var/run/neutron +<% if @rabbit_settings[:enable_notifications] -%> [oslo_messaging_notifications] -driver = neutron.openstack.common.notifier.rpc_notifier +driver = messaging +<% end -%> [oslo_messaging_rabbit] rabbit_use_ssl = <%= @rabbit_settings[:use_ssl] %> diff --git a/chef/cookbooks/neutron/templates/default/openvswitch_agent.ini.erb b/chef/cookbooks/neutron/templates/default/openvswitch_agent.ini.erb index f3e5083db1..5c074fd3d4 100644 --- a/chef/cookbooks/neutron/templates/default/openvswitch_agent.ini.erb +++ b/chef/cookbooks/neutron/templates/default/openvswitch_agent.ini.erb @@ -26,5 +26,6 @@ of_interface = <%= @of_interface %> local_ip = <%= node.address("os_sdn").addr %> <% end -%> bridge_mappings = <%= @bridge_mappings %> +of_inactivity_probe = <%= @of_inactivity_probe %> [securitygroup] firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver diff --git a/chef/cookbooks/neutron/templates/default/10-opflex-agent-ovs.conf.erb b/chef/cookbooks/neutron/templates/default/opflex-agent-ovs.conf.erb similarity index 91% rename from chef/cookbooks/neutron/templates/default/10-opflex-agent-ovs.conf.erb rename to chef/cookbooks/neutron/templates/default/opflex-agent-ovs.conf.erb index 28f504218d..45eb74dcbb 100644 --- a/chef/cookbooks/neutron/templates/default/10-opflex-agent-ovs.conf.erb +++ b/chef/cookbooks/neutron/templates/default/opflex-agent-ovs.conf.erb @@ -10,7 +10,7 @@ {"hostname": "<%= @opflex_peer_ip %>", "port": "<%= @opflex_peer_port %>"} ], "ssl": { - "mode": "enabled", + "mode": "<%= @opflex_ssl_mode %>", "ca-store": "/etc/ssl/certs/" }, "inspector": { @@ -36,7 +36,8 @@ "renderers": { "stitched-mode": { - "ovs-bridge-name": "br-int", + "int-bridge-name": "<%= @opflex_int_bridge %>", + "access-bridge-name": "<%= @opflex_access_bridge %>", "encap": { "vxlan" : { "encap-iface": "<%= @opflex_vxlan_encap_iface %>", diff --git a/chef/cookbooks/nova/attributes/default.rb b/chef/cookbooks/nova/attributes/default.rb index 4341793981..87d63bab49 100644 --- a/chef/cookbooks/nova/attributes/default.rb +++ b/chef/cookbooks/nova/attributes/default.rb @@ -21,6 +21,7 @@ default[:nova][:debug] = false default[:nova][:max_header_line] = 16384 default[:nova][:config_file] = "/etc/nova/nova.conf.d/100-nova.conf" +default[:nova][:placement_config_file] = "/etc/nova/nova.conf.d/101-nova-placement.conf" # # Database Settings @@ -47,6 +48,14 @@ default[:nova][:api_db][:max_overflow] = nil default[:nova][:api_db][:pool_timeout] = nil +# +# Placement API database settings +# +default[:nova][:placement_db][:password] = nil +default[:nova][:placement_db][:user] = "placement" +default[:nova][:placement_db][:database] = "placement" + + # Feature settings default[:nova][:use_migration] = false default[:nova][:setup_shared_instance_storage] = false @@ -83,6 +92,12 @@ default[:nova][:scheduler][:disk_allocation_ratio] = 1.0 default[:nova][:scheduler][:reserved_host_memory_mb] = 512 +# +# Placement Settings +# +default[:nova][:placement_service_user] = "placement" +default[:nova][:placement_service_password] = "placement" + # # Shared Settings # @@ -117,6 +132,7 @@ default[:nova][:ports][:api_ec2] = 8788 default[:nova][:ports][:api] = 8774 +default[:nova][:ports][:placement_api] = 8780 default[:nova][:ports][:metadata] = 8775 default[:nova][:ports][:objectstore] = 3333 default[:nova][:ports][:novncproxy] = 6080 @@ -142,6 +158,7 @@ default[:nova][:ha][:ports][:objectstore] = 5553 default[:nova][:ha][:ports][:novncproxy] = 5554 default[:nova][:ha][:ports][:serialproxy] = 5556 +default[:nova][:ha][:ports][:placement_api] = 5560 default[:nova][:ha][:compute][:enabled] = false default[:nova][:ha][:compute][:compute][:op][:monitor][:interval] = "10s" diff --git a/chef/cookbooks/nova/definitions/nova_package.rb b/chef/cookbooks/nova/definitions/nova_package.rb index d133f38f1d..c6ed620e96 100644 --- a/chef/cookbooks/nova/definitions/nova_package.rb +++ b/chef/cookbooks/nova/definitions/nova_package.rb @@ -55,7 +55,8 @@ end end - subscribes :restart, resources(template: node[:nova][:config_file]) + subscribes :restart, [resources(template: node[:nova][:config_file]), + resources(template: node[:nova][:placement_config_file])] provider Chef::Provider::CrowbarPacemakerService if params[:use_pacemaker_provider] end diff --git a/chef/cookbooks/nova/recipes/api.rb b/chef/cookbooks/nova/recipes/api.rb index d408623c99..ed27fc1155 100644 --- a/chef/cookbooks/nova/recipes/api.rb +++ b/chef/cookbooks/nova/recipes/api.rb @@ -28,6 +28,14 @@ use_pacemaker_provider use_crowbar_pacemaker_service end +# nova-api must be restarted immediately if keystone settings have changed, +# otherwise nova requests in recipes will fail +if node[:keystone][:endpoint_changed] + service "nova-api" do + subscribes :restart, resources(template: node[:nova][:config_file]), :immediately + end +end + api_ha_enabled = node[:nova][:ha][:enabled] admin_api_host = CrowbarHelper.get_host_for_admin_url(node, api_ha_enabled) public_api_host = CrowbarHelper.get_host_for_public_url(node, node[:nova][:ssl][:enabled], api_ha_enabled) diff --git a/chef/cookbooks/nova/recipes/compute.rb b/chef/cookbooks/nova/recipes/compute.rb index b3483c0a16..bfec1c30a7 100644 --- a/chef/cookbooks/nova/recipes/compute.rb +++ b/chef/cookbooks/nova/recipes/compute.rb @@ -109,6 +109,20 @@ package "qemu-block-rbd" end + execute "enable kvm intel nested virt" do + command <<-SHELL + grep -q nested /etc/modprobe.d/80-kvm-intel.conf || + echo "options kvm_intel nested=1" > /etc/modprobe.d/80-kvm-intel.conf + ! grep -q N /sys/module/kvm_intel/parameters/nested || + /sbin/modprobe -r kvm_intel + SHELL + only_if do + node[:nova][:kvm][:nested_virt] && + `uname -r`.include?("default") && + system("grep -qw vmx /proc/cpuinfo") + end + end + # load modules only when appropriate kernel is present execute "loading kvm modules" do command <<-EOF @@ -186,7 +200,8 @@ mode 0644 variables( user: libvirt_user, - group: libvirt_group + group: libvirt_group, + max_threads_per_process: node[:nova][:kvm][:max_threads_per_process] ) notifies :create, "ruby_block[restart_libvirtd]", :immediately end @@ -341,8 +356,8 @@ source "crowbar-compute-set-sys-options.erb" variables({ ksm_enabled: node[:nova][:kvm][:ksm_enabled] ? 1 : 0, - tranparent_hugepage_enabled: node[:nova][:kvm][:ksm_enabled] ? "never" : "always", - tranparent_hugepage_defrag: node[:nova][:kvm][:ksm_enabled] ? "never" : "always" + transparent_hugepage_enabled: node[:nova][:kvm][:ksm_enabled] ? "never" : "always", + transparent_hugepage_defrag: node[:nova][:kvm][:ksm_enabled] ? "never" : "madvise" }) mode "0755" end diff --git a/chef/cookbooks/nova/recipes/config.rb b/chef/cookbooks/nova/recipes/config.rb index 6b98f8f1f8..9aa1acc44e 100644 --- a/chef/cookbooks/nova/recipes/config.rb +++ b/chef/cookbooks/nova/recipes/config.rb @@ -39,6 +39,19 @@ action :install end +# Fake service to take control of the WSGI process from apache that +# runs Placement API. We replace the `reload` action, sending +# manually the signal SIGUSR1 to all the process that are part of +# `wsgi:nova-placement-api` +service "nova-placement-api" do + service_name "apache2" + if node[:platform_family] == "suse" + reload_command 'sleep 1 && pkill --signal SIGUSR1 -f "^\(wsgi:nova-placement" && sleep 1' + end + supports reload: true, restart: true, status: true + ignore_failure true +end + # don't expose database connection to the compute clients if is_controller db_settings = fetch_database_settings @@ -48,9 +61,11 @@ include_recipe "#{db_settings[:backend_name]}::python-client" database_connection = fetch_database_connection_string(node[:nova][:db]) + placement_database_connection = fetch_database_connection_string(node[:nova][:placement_db]) api_database_connection = fetch_database_connection_string(node[:nova][:api_db]) else database_connection = nil + placement_database_connection = nil api_database_connection = nil end @@ -299,6 +314,7 @@ cpu_mode = "" cpu_model = "" +rng_device = nil if node.roles.include? "nova-compute-kvm" compute_flags = node[:nova][:compute]["kvm-#{node[:kernel][:machine]}"] @@ -311,6 +327,14 @@ cpu_mode = compute_flags["cpu_mode"] end +if File.exist?("/sys/devices/virtual/misc/hw_random/rng_current") && + !File.read("/sys/devices/virtual/misc/hw_random/rng_current").include?("none") + # Unfortunately that file isn't readable by non-root so we can not set it + # rng_device = "/dev/hwrng" +else + rng_device = "/dev/random" +end + # lock path prevents race conditions for cinder-volume and nova-compute on same # node. Keep code in sync between cinder and nova recipes. For reference check # http://docs.openstack.org/releasenotes/nova/newton.html @@ -324,6 +348,22 @@ include_recipe "crowbar-openstack::common" end +template node[:nova][:placement_config_file] do + source "nova-placement.conf.erb" + user "root" + group node[:nova][:group] + mode 0640 + variables( + keystone_settings: keystone_settings, + placement_database_connection: placement_database_connection, + placement_service_user: node["nova"]["placement_service_user"], + placement_service_password: node["nova"]["placement_service_password"], + placement_service_insecure: node[:nova][:ssl][:insecure] + ) + notifies :reload, "service[nova-placement-api]" +end + + template node[:nova][:config_file] do source "nova.conf.erb" user "root" @@ -333,6 +373,7 @@ cpu_mode: cpu_mode, cpu_model: cpu_model, bind_host: bind_host, + rng_device: rng_device, bind_port_api: bind_port_api, bind_port_api_ec2: bind_port_api_ec2, bind_port_metadata: bind_port_metadata, @@ -388,10 +429,12 @@ oat_appraiser_host: oat_server[:hostname], oat_appraiser_port: "8443", has_itxt: has_itxt, + default_filters: node[:nova][:scheduler][:default_filters], reserved_host_memory: reserved_host_memory, use_baremetal_filters: use_baremetal_filters, track_instance_changes: track_instance_changes, - ironic_settings: ironic_settings + ironic_settings: ironic_settings, + default_log_levels: node[:nova][:default_log_levels] ) end diff --git a/chef/cookbooks/nova/recipes/controller_ha.rb b/chef/cookbooks/nova/recipes/controller_ha.rb index 9a5b135716..65c2e2e07e 100644 --- a/chef/cookbooks/nova/recipes/controller_ha.rb +++ b/chef/cookbooks/nova/recipes/controller_ha.rb @@ -33,6 +33,14 @@ action :nothing end.run_action(:create) +haproxy_loadbalancer "nova-placement-api" do + address "0.0.0.0" + port node[:nova][:ports][:placement_api] + use_ssl node[:nova][:ssl][:enabled] + servers CrowbarPacemakerHelper.haproxy_servers_for_service(node, "nova", "nova-controller", "placement_api") + action :nothing +end.run_action(:create) + haproxy_loadbalancer "nova-metadata" do address cluster_admin_ip port node[:nova][:ports][:metadata] @@ -73,7 +81,9 @@ crowbar_pacemaker_sync_mark "sync-nova_before_ha" # Avoid races when creating pacemaker resources - crowbar_pacemaker_sync_mark "wait-nova_ha_resources" + crowbar_pacemaker_sync_mark "wait-nova_ha_resources" do + timeout 160 + end rabbit_settings = fetch_rabbitmq_settings transaction_objects = [] diff --git a/chef/cookbooks/nova/recipes/database.rb b/chef/cookbooks/nova/recipes/database.rb index 89d73f8d01..7582b919c4 100644 --- a/chef/cookbooks/nova/recipes/database.rb +++ b/chef/cookbooks/nova/recipes/database.rb @@ -31,7 +31,7 @@ only_if { ha_enabled } end -[node[:nova][:db], node[:nova][:api_db]].each do |d| +[node[:nova][:db], node[:nova][:api_db], node[:nova][:placement_db]].each do |d| # Creates empty nova database database "create #{d[:database]} database" do connection db_settings[:connection] diff --git a/chef/cookbooks/nova/recipes/flavors.rb b/chef/cookbooks/nova/recipes/flavors.rb index 35b0cf1acb..c61818f5ba 100644 --- a/chef/cookbooks/nova/recipes/flavors.rb +++ b/chef/cookbooks/nova/recipes/flavors.rb @@ -96,44 +96,76 @@ trusted_flavors = flavors.select{ |key, value| value["name"].match(/\.trusted\./) } default_flavors = flavors.select{ |key, value| !value["name"].match(/\.trusted\./) } -flavorlist = `#{openstack} flavor list -f value -c Name`.split("\n") -# create the trusted flavors -if node[:nova][:trusted_flavors] - trusted_flavors.keys.each do |id| - next if flavorlist.include?(flavors[id]["name"]) - execute "register_#{flavors[id]["name"]}_flavor" do - retries 5 - command <<-EOF - #{novacmd} flavor-create #{flavors[id]["name"]} #{id} #{flavors[id]["mem"]} \ - #{flavors[id]["disk"]} #{flavors[id]["vcpu"]} - #{novacmd} flavor-key #{flavors[id]["name"]} set trust:trusted_host=trusted - EOF - action :nothing - subscribes :run, "execute[trigger-flavor-creation]", :delayed - end - end +execute "delay-flavor-creation" do + command "true" + action :nothing end -# create the default flavors -if node[:nova][:create_default_flavors] - default_flavors.keys.each do |id| - next if flavorlist.include?(flavors[id]["name"]) - execute "register_#{flavors[id]["name"]}_flavor" do - retries 5 - command <<-EOF - #{novacmd} flavor-create #{flavors[id]["name"]} #{id} #{flavors[id]["mem"]} \ - #{flavors[id]["disk"]} #{flavors[id]["vcpu"]} - EOF - action :nothing - subscribes :run, "execute[trigger-flavor-creation]", :delayed - end +ruby_block "Get current flavors" do + block do + cmd = Mixlib::ShellOut.new("#{openstack} flavor list -f value -c Name").run_command + raise "Flavor list not obtained, is the nova-api down?" unless cmd.exitstatus.zero? + node.run_state["flavorlist"] = cmd.stdout.split("\n") end + retries 10 end -# This is to trigger all the above "execute" resources to run :delayed, so that -# they run at the end of the chef-client run, after the nova service has been -# restarted (in case of a config change) -execute "trigger-flavor-creation" do - command "true" +ruby_block "Flavor creation" do + block do + flavorlist = node.run_state["flavorlist"] + + if node[:nova][:create_default_flavors] + default_flavors.each do |id, flavor| + next if flavorlist.include?(flavor["name"]) + command = "#{novacmd} flavor-create #{flavor["name"]} #{id} #{flavor["mem"]} " + command << "#{flavor["disk"]} #{flavor["vcpu"]}" + run_context.resource_collection << flavor_create = Chef::Resource::Execute.new( + "Create flavor #{flavor["name"]}", run_context + ) + flavor_create.command command + flavor_create.retries 5 + # don't retry after "Flavor with ID ... already exists" + flavor_create.not_if "#{openstack} flavor show #{id}" + + # delay the run of this resource until the end of the run + run_context.notifies_delayed( + Chef::Resource::Notification.new(flavor_create, :run, "delay-flavor-creation") + ) + end + end + + if node[:nova][:trusted_flavors] + trusted_flavors.each do |id, flavor| + next if flavorlist.include?(flavor["name"]) + command = "#{novacmd} flavor-create #{flavor["name"]} " + command << "#{id} #{flavor["mem"]} #{flavor["disk"]} #{flavor["vcpu"]} " + run_context.resource_collection << flavor_create = Chef::Resource::Execute.new( + "Create trusted flavor #{flavor["name"]}", run_context + ) + flavor_create.command command + flavor_create.retries 5 + # don't retry after "Flavor with ID ... already exists" + flavor_create.not_if "#{openstack} flavor show #{id}" + + # delay the run of this resource until the end of the run + run_context.notifies_delayed( + Chef::Resource::Notification.new(flavor_create, :run, "delay-flavor-creation") + ) + + # set flavors to trusted + command = "#{novacmd} flavor-key #{flavor["name"]} set trust:trusted_host=trusted" + run_context.resource_collection << flavor_trusted = Chef::Resource::Execute.new( + "Set flavor #{flavor["name"]} to trusted", run_context + ) + flavor_trusted.command command + flavor_trusted.retries 5 + + # delay the run of this resource until the end of the run + run_context.notifies_delayed( + Chef::Resource::Notification.new(flavor_trusted, :run, "delay-flavor-creation") + ) + end + end + end end diff --git a/chef/cookbooks/nova/recipes/placement_api.rb b/chef/cookbooks/nova/recipes/placement_api.rb new file mode 100644 index 0000000000..59c9f84de7 --- /dev/null +++ b/chef/cookbooks/nova/recipes/placement_api.rb @@ -0,0 +1,123 @@ +# +# Cookbook Name:: nova +# Recipe:: placement_api +# +# Copyright 2017, SUSE Linux GmbH +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include_recipe "apache2" +include_recipe "apache2::mod_wsgi" +include_recipe "nova::config" + +keystone_settings = KeystoneHelper.keystone_settings(node, @cookbook_name) + +package "openstack-nova-placement-api" + +api_ha_enabled = node[:nova][:ha][:enabled] +admin_api_host = CrowbarHelper.get_host_for_admin_url(node, api_ha_enabled) +public_api_host = CrowbarHelper.get_host_for_public_url( + node, node[:nova][:ssl][:enabled], api_ha_enabled +) +api_port = node[:nova][:ports][:placement_api] + +api_protocol = node[:nova][:ssl][:enabled] ? "https" : "http" + +crowbar_pacemaker_sync_mark "wait-nova-placement_register" if api_ha_enabled + +register_auth_hash = { user: keystone_settings["admin_user"], + password: keystone_settings["admin_password"], + tenant: keystone_settings["admin_tenant"] } + +keystone_register "register placement user '#{node["nova"]["placement_service_user"]}'" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name node["nova"]["placement_service_user"] + user_password node["nova"]["placement_service_password"] + tenant_name keystone_settings["service_tenant"] + action :add_user +end + +keystone_register "give placement user '#{node["nova"]["placement_service_user"]}' access" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + user_name node["nova"]["placement_service_user"] + tenant_name keystone_settings["service_tenant"] + role_name "admin" + action :add_access +end + +keystone_register "register placement service" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + service_name "placement" + service_type "placement" + service_description "Openstack Placement Service" + action :add_service +end + +keystone_register "register placement endpoint" do + protocol keystone_settings["protocol"] + insecure keystone_settings["insecure"] + host keystone_settings["internal_url_host"] + port keystone_settings["admin_port"] + auth register_auth_hash + endpoint_service "placement" + endpoint_region keystone_settings["endpoint_region"] + endpoint_publicURL "#{api_protocol}://#{public_api_host}:#{api_port}" + endpoint_adminURL "#{api_protocol}://#{admin_api_host}:#{api_port}" + endpoint_internalURL "#{api_protocol}://#{admin_api_host}:#{api_port}" + action :add_endpoint_template +end + +if node[:nova][:ha][:enabled] + admin_address = Chef::Recipe::Barclamp::Inventory.get_network_by_type(node, "admin").address + bind_host = admin_address + bind_port = node[:nova][:ha][:ports][:placement_api] +else + bind_host = "0.0.0.0" + bind_port = node[:nova][:ports][:placement_api] +end + +node.normal[:apache][:listen_ports_crowbar] ||= {} +node.normal[:apache][:listen_ports_crowbar][:nova] = { plain: [bind_port] } + +crowbar_openstack_wsgi "WSGI entry for nova-placement-api" do + bind_host bind_host + bind_port bind_port + daemon_process "nova-placement-api" + user node[:nova][:user] + group node[:nova][:group] + ssl_enable node[:nova][:ssl][:enabled] + ssl_certfile node[:nova][:ssl][:certfile] + ssl_keyfile node[:nova][:ssl][:keyfile] + if node[:nova][:ssl][:cert_required] + ssl_cacert node[:nova][:ssl][:ca_certs] + end +end + +apache_site "nova-placement-api.conf" do + enable true +end + +crowbar_pacemaker_sync_mark "create-nova-placement_register" if api_ha_enabled diff --git a/chef/cookbooks/nova/recipes/role_nova_controller.rb b/chef/cookbooks/nova/recipes/role_nova_controller.rb index 51e9a9b8dd..cc83b38362 100644 --- a/chef/cookbooks/nova/recipes/role_nova_controller.rb +++ b/chef/cookbooks/nova/recipes/role_nova_controller.rb @@ -18,6 +18,7 @@ include_recipe "nova::config" include_recipe "nova::database" include_recipe "nova::api" + include_recipe "nova::placement_api" include_recipe "nova::cert" include_recipe "nova::instances" include_recipe "nova::scheduler" diff --git a/chef/cookbooks/nova/templates/default/crowbar-compute-set-sys-options.erb b/chef/cookbooks/nova/templates/default/crowbar-compute-set-sys-options.erb index e6c4785dc1..be0dd27c7d 100644 --- a/chef/cookbooks/nova/templates/default/crowbar-compute-set-sys-options.erb +++ b/chef/cookbooks/nova/templates/default/crowbar-compute-set-sys-options.erb @@ -8,11 +8,11 @@ if test -w /sys/kernel/mm/ksm/run; then fi if test -w /sys/kernel/mm/transparent_hugepage/enabled; then - echo <%= @tranparent_hugepage_enabled %> > /sys/kernel/mm/transparent_hugepage/enabled + echo <%= @transparent_hugepage_enabled %> > /sys/kernel/mm/transparent_hugepage/enabled fi if test -w /sys/kernel/mm/transparent_hugepage/defrag; then - echo <%= @tranparent_hugepage_defrag %> > /sys/kernel/mm/transparent_hugepage/defrag + echo <%= @transparent_hugepage_defrag %> > /sys/kernel/mm/transparent_hugepage/defrag fi find /sys/block -type l -name 'sd*' -exec sh -c 'echo deadline > {}/queue/scheduler' \; diff --git a/chef/cookbooks/nova/templates/default/nova-placement.conf.erb b/chef/cookbooks/nova/templates/default/nova-placement.conf.erb new file mode 100644 index 0000000000..07487d6680 --- /dev/null +++ b/chef/cookbooks/nova/templates/default/nova-placement.conf.erb @@ -0,0 +1,16 @@ +[placement] +os_region_name = <%= @keystone_settings['endpoint_region'] %> +auth_url = <%= @keystone_settings['admin_auth_url'] %> +project_name = <%= @keystone_settings['service_tenant'] %> +project_domain_name = <%= @keystone_settings["admin_domain"] %> +user_domain_name = <%= @keystone_settings['admin_domain'] %> +auth_type = password +username = <%= @placement_service_user %> +password = <%= @placement_service_password %> +os_interface = internal +insecure = <%= @placement_service_insecure %> + +<% if @placement_database_connection -%> +[placement_database] +connection = <%= @placement_database_connection %> +<% end -%> \ No newline at end of file diff --git a/chef/cookbooks/nova/templates/default/nova.conf.erb b/chef/cookbooks/nova/templates/default/nova.conf.erb index 45269a1ebf..91f2abb400 100644 --- a/chef/cookbooks/nova/templates/default/nova.conf.erb +++ b/chef/cookbooks/nova/templates/default/nova.conf.erb @@ -8,6 +8,7 @@ instance_name_template=zvm%05x my_ip = <%= node[:nova][:my_ip] %> <% unless @ironic_settings.nil? %>scheduler_host_manager = ironic_host_manager<% end %> notify_on_state_change = vm_and_task_state +notification_format = unversioned state_path = /var/lib/nova enabled_ssl_apis = <%= @ssl_enabled ? "osapi_compute,metadata" : "" %> osapi_compute_listen = <%= @bind_host %> @@ -34,9 +35,23 @@ reserved_host_memory_mb = <%= @reserved_host_memory %> cpu_allocation_ratio = <%= node[:nova][:scheduler][:cpu_allocation_ratio] %> ram_allocation_ratio = <%= node[:nova][:scheduler][:ram_allocation_ratio] %> disk_allocation_ratio = <%= node[:nova][:scheduler][:disk_allocation_ratio] %> -<% if @use_baremetal_filters %>scheduler_use_baremetal_filters = true<% end -%> -<% if @has_itxt -%>scheduler_available_filters = nova.scheduler.filters.standard_filters<% end %> -<% if @has_itxt %>scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,TrustedFilter<% end %> +<% if @use_baremetal_filters %> +scheduler_use_baremetal_filters = true +<% end -%> +<% if @has_itxt %> +scheduler_available_filters = nova.scheduler.filters.standard_filters +<% if @default_filters.empty? %> +scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,TrustedFilter +<% else %> +scheduler_default_filters = <%= @default_filters %> +<% end %> +<% elsif !@default_filters.empty? %> +scheduler_available_filters = nova.scheduler.filters.all_filters +scheduler_default_filters = <%= @default_filters %> +<% end %> +# Avoid scheduler conflicts when using HA +scheduler_max_attempts = 9 +scheduler_host_subset_size = 4 <% unless @track_instance_changes %>scheduler_tracks_instance_changes = false<% end %> <% if @libvirt_type.eql?('vmware') -%> compute_driver = vmwareapi.VMwareVCDriver @@ -90,6 +105,9 @@ control_exchange = nova <%= "zvm_user_profile=#{node[:nova][:zvm][:zvm_user_profile]}" if @libvirt_type.eql?('zvm') %> <%= "zvm_user_default_password=#{node[:nova][:zvm][:zvm_user_default_password]}" if @libvirt_type.eql?('zvm') %> <%= "zvm_user_default_privilege=#{node[:nova][:zvm][:zvm_user_default_privilege]}" if @libvirt_type.eql?('zvm') %> +<% unless @default_log_levels.length.zero? -%> +default_log_levels = <%= @default_log_levels.join(", ") %> +<% end -%> [api_database] <% if @api_database_connection -%> @@ -139,6 +157,7 @@ pool_timeout = <%= node[:nova][:db][:pool_timeout] %> [glance] <%= "host = #{@glance_server_host}" unless @glance_server_host.nil? %> <%= "port = #{@glance_server_port}" unless @glance_server_host.nil? %> +endpoint_type = internal protocol = <%= @glance_server_protocol %> <%= "api_servers = #{@glance_server_protocol}://#{@glance_server_host}:#{@glance_server_port}" unless @glance_server_host.nil? %> <%= "api_insecure = #{@glance_server_insecure ? 'True' : 'False'}" unless @glance_server_host.nil? %> @@ -192,6 +211,9 @@ live_migration_flag = VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MI <% else -%> block_migration_flag = VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC, VIR_MIGRATE_LIVE <% end -%> +# Timeout migration if less than 1MB/s RAM can be copied +live_migration_progress_timeout=0 +live_migration_completion_timeout=1000 <% end -%> <% end -%> <%= "disk_prefix = xvd" if @libvirt_type.eql?('xen') %> @@ -200,6 +222,10 @@ block_migration_flag = VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_M <% if @libvirt_type.eql?('kvm') %>use_virtio_for_bridges = true<% end %> <%= "volume_use_multipath = true" if @use_multipath %> <%= "iser_use_multipath = true" if @use_multipath %> +<% if @rng_device %> +rng_dev_path = <%= @rng_device %> +<% end %> +disk_cachemodes = <%= node[:nova][:kvm][:disk_cachemodes] %> [neutron] service_metadata_proxy = true @@ -210,6 +236,7 @@ auth_url = <%= KeystoneHelper.versioned_service_URL(@keystone_settings["protocol @keystone_settings["internal_url_host"], @keystone_settings["service_port"], "2.0") %> +endpoint_type = internal auth_type = password insecure = <%= @neutron_insecure ? 'True' : 'False' %> password = <%= @neutron_service_password %> @@ -225,8 +252,10 @@ lock_path = /var/run/openstack lock_path = /var/run/nova <% end -%> +<% if @rabbit_settings[:enable_notifications] -%> [oslo_messaging_notifications] driver = messagingv2 +<% end -%> [oslo_messaging_rabbit] <% if @rabbit_settings[:cluster] -%> diff --git a/chef/cookbooks/nova/templates/default/qemu.conf.erb b/chef/cookbooks/nova/templates/default/qemu.conf.erb index d5c9cadc83..12aba019d5 100644 --- a/chef/cookbooks/nova/templates/default/qemu.conf.erb +++ b/chef/cookbooks/nova/templates/default/qemu.conf.erb @@ -400,7 +400,16 @@ group = "<%= @group %>" #max_processes = 0 #max_files = 0 +# If max_threads_per_process is set to a positive integer, libvirt +# will use it to set the maximum number of threads that can be +# created by a qemu process. Some VM configurations can result in +# qemu processes with tens of thousands of threads. systemd-based +# systems typically limit the number of threads per process to +# 16k. max_threads_per_process can be used to override default +# limits in the host OS. +# +max_threads_per_process = <%= @max_threads_per_process %> # mac_filter enables MAC addressed based filtering on bridge ports. # This currently requires ebtables to be installed. diff --git a/chef/cookbooks/postgresql/attributes/default.rb b/chef/cookbooks/postgresql/attributes/default.rb index 40ecfe9218..60ec5cf9e4 100644 --- a/chef/cookbooks/postgresql/attributes/default.rb +++ b/chef/cookbooks/postgresql/attributes/default.rb @@ -132,14 +132,18 @@ default["postgresql"]["contrib"]["packages"] = ["postgresql-contrib"] when node["platform_version"].to_f < 12.0 default["postgresql"]["version"] = "9.1" - default["postgresql"]["client"]["packages"] = ["postgresql91", - "ruby#{node["languages"]["ruby"]["version"].to_f}-rubygem-pg"] + default["postgresql"]["client"]["packages"] = [ + "postgresql91", + "ruby2.1-rubygem-pg" + ] default["postgresql"]["server"]["packages"] = ["postgresql91-server"] default["postgresql"]["contrib"]["packages"] = ["postgresql91-contrib"] when node["platform_version"].to_f == 12.0 default["postgresql"]["version"] = "9.3" - default["postgresql"]["client"]["packages"] = ["postgresql93", - "ruby#{node["languages"]["ruby"]["version"].to_f}-rubygem-pg"] + default["postgresql"]["client"]["packages"] = [ + "postgresql93", + "ruby2.1-rubygem-pg" + ] default["postgresql"]["server"]["packages"] = ["postgresql93-server"] default["postgresql"]["contrib"]["packages"] = ["postgresql93-contrib"] else @@ -160,7 +164,7 @@ default["postgresql"]["version"] = "9.4" default["postgresql"]["client"]["packages"] = [ "postgresql94", - "ruby#{node["languages"]["ruby"]["version"].to_f}-rubygem-pg" + "ruby2.1-rubygem-pg" ] default["postgresql"]["server"]["packages"] = ["postgresql94-server"] default["postgresql"]["contrib"]["packages"] = ["postgresql94-contrib"] @@ -219,6 +223,7 @@ default["postgresql"]["config"]["log_truncate_on_rotation"] = true default["postgresql"]["config"]["log_rotation_age"] = "1d" default["postgresql"]["config"]["log_rotation_size"] = 0 + default["postgresql"]["config"]["log_line_prefix"] = "%t " default["postgresql"]["config"]["datestyle"] = "iso, mdy" default["postgresql"]["config"]["lc_messages"] = "en_US.UTF-8" default["postgresql"]["config"]["lc_monetary"] = "en_US.UTF-8" diff --git a/chef/cookbooks/postgresql/recipes/ha.rb b/chef/cookbooks/postgresql/recipes/ha.rb index 313759f993..2e5ffd90ef 100644 --- a/chef/cookbooks/postgresql/recipes/ha.rb +++ b/chef/cookbooks/postgresql/recipes/ha.rb @@ -22,14 +22,14 @@ # # This is the second step. -vip_primitive = "vip-admin-#{CrowbarDatabaseHelper.get_ha_vhostname(node)}" +vip_primitive = "vip-admin-#{CrowbarDatabaseHelper.get_ha_vhostname(node, "postgresql")}" service_name = "postgresql" fs_primitive = "fs-#{service_name}" group_name = "g-#{service_name}" agent_name = "ocf:heartbeat:pgsql" -ip_addr = CrowbarDatabaseHelper.get_listen_address(node) +ip_addr = CrowbarDatabaseHelper.get_listen_address(node, "postgresql") postgres_op = {} postgres_op["monitor"] = {} @@ -85,7 +85,7 @@ end transaction_objects << "pacemaker_primitive[#{service_name}]" -if node[:database][:ha][:storage][:mode] == "drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" colocation_constraint = "col-#{service_name}" pacemaker_colocation colocation_constraint do @@ -108,7 +108,7 @@ vip_location_name = openstack_pacemaker_controller_only_location_for vip_primitive transaction_objects << "pacemaker_location[#{vip_location_name}]" - location_name = openstack_pacemaker_controller_only_location_for service_name + location_name = openstack_pacemaker_drbd_controller_only_location_for service_name transaction_objects << "pacemaker_location[#{location_name}]" else diff --git a/chef/cookbooks/postgresql/recipes/ha_storage.rb b/chef/cookbooks/postgresql/recipes/ha_storage.rb index 837b4f52bb..6c8be1a411 100644 --- a/chef/cookbooks/postgresql/recipes/ha_storage.rb +++ b/chef/cookbooks/postgresql/recipes/ha_storage.rb @@ -37,21 +37,24 @@ fs_params = {} fs_params["directory"] = "/var/lib/pgsql" -if node[:database][:ha][:storage][:mode] == "drbd" - include_recipe "crowbar-pacemaker::drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" + + if CrowbarPacemakerHelper.drbd_node?(node) + include_recipe "crowbar-pacemaker::drbd" + end crowbar_pacemaker_drbd drbd_resource do - size "#{node[:database][:ha][:storage][:drbd][:size]}G" + size "#{node[:database][:postgresql][:ha][:storage][:drbd][:size]}G" action :nothing end.run_action(:create) fs_params["device"] = node["drbd"]["rsc"][drbd_resource]["device"] fs_params["fstype"] = "xfs" -elsif node[:database][:ha][:storage][:mode] == "shared" - fs_params["device"] = node[:database][:ha][:storage][:shared][:device] - fs_params["fstype"] = node[:database][:ha][:storage][:shared][:fstype] - unless node[:database][:ha][:storage][:shared][:options].empty? - fs_params["options"] = node[:database][:ha][:storage][:shared][:options] +elsif node[:database][:postgresql][:ha][:storage][:mode] == "shared" + fs_params["device"] = node[:database][:postgresql][:ha][:storage][:shared][:device] + fs_params["fstype"] = node[:database][:postgresql][:ha][:storage][:shared][:fstype] + unless node[:database][:postgresql][:ha][:storage][:shared][:options].empty? + fs_params["options"] = node[:database][:postgresql][:ha][:storage][:shared][:options] end else raise "Invalid mode for HA storage!" @@ -71,7 +74,7 @@ transaction_objects = [] -if node[:database][:ha][:storage][:mode] == "drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" drbd_params = {} drbd_params["drbd_resource"] = drbd_resource @@ -104,7 +107,7 @@ end transaction_objects << "pacemaker_ms[#{ms_name}]" - location_name = openstack_pacemaker_controller_location_ignoring_upgrade_for ms_name + location_name = openstack_pacemaker_drbd_controller_only_location_for ms_name transaction_objects << "pacemaker_location[#{location_name}]" end @@ -117,10 +120,14 @@ end transaction_objects << "pacemaker_primitive[#{fs_primitive}]" -location_name = openstack_pacemaker_controller_only_location_for fs_primitive +location_name = if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" + openstack_pacemaker_drbd_controller_only_location_for fs_primitive +else + openstack_pacemaker_controller_only_location_for fs_primitive +end transaction_objects << "pacemaker_location[#{location_name}]" -if node[:database][:ha][:storage][:mode] == "drbd" +if node[:database][:postgresql][:ha][:storage][:mode] == "drbd" colocation_constraint = "col-#{fs_primitive}" pacemaker_colocation colocation_constraint do score "inf" diff --git a/chef/cookbooks/postgresql/recipes/server.rb b/chef/cookbooks/postgresql/recipes/server.rb index 9d7d272df7..3cd4285ad6 100644 --- a/chef/cookbooks/postgresql/recipes/server.rb +++ b/chef/cookbooks/postgresql/recipes/server.rb @@ -28,7 +28,7 @@ dirty = false # For Crowbar, we need to set the address to bind - default to admin node. -newaddr = CrowbarDatabaseHelper.get_listen_address(node) +newaddr = CrowbarDatabaseHelper.get_listen_address(node, "postgresql") if node["postgresql"]["config"]["listen_addresses"] != newaddr node.set["postgresql"]["config"]["listen_addresses"] = newaddr dirty = true @@ -121,7 +121,7 @@ notifies change_notify, "service[postgresql]", :immediately end -ha_enabled = node[:database][:ha][:enabled] +ha_enabled = node[:database][:postgresql][:ha][:enabled] if ha_enabled log "HA support for postgresql is enabled" diff --git a/chef/cookbooks/postgresql/recipes/server_debian.rb b/chef/cookbooks/postgresql/recipes/server_debian.rb index 78da2a6c27..4258923b17 100644 --- a/chef/cookbooks/postgresql/recipes/server_debian.rb +++ b/chef/cookbooks/postgresql/recipes/server_debian.rb @@ -28,7 +28,7 @@ # We need to include the HA recipe early, before the config files are # generated, but after the postgresql packages are installed since they live in # the directory that will be mounted for HA -if node[:database][:ha][:enabled] +if node[:database][:postgresql][:ha][:enabled] include_recipe "postgresql::ha_storage" end diff --git a/chef/cookbooks/postgresql/recipes/server_redhat.rb b/chef/cookbooks/postgresql/recipes/server_redhat.rb index f2139a5c10..5c3a26a671 100644 --- a/chef/cookbooks/postgresql/recipes/server_redhat.rb +++ b/chef/cookbooks/postgresql/recipes/server_redhat.rb @@ -49,7 +49,7 @@ package pg_pack end -ha_enabled = node[:database][:ha][:enabled] +ha_enabled = node[:database][:postgresql][:ha][:enabled] # We need to include the HA recipe early, before the config files are # generated, but after the postgresql packages are installed since they live in diff --git a/chef/cookbooks/rabbitmq/attributes/default.rb b/chef/cookbooks/rabbitmq/attributes/default.rb index 1ccd279d1d..3b7ea07a81 100644 --- a/chef/cookbooks/rabbitmq/attributes/default.rb +++ b/chef/cookbooks/rabbitmq/attributes/default.rb @@ -27,20 +27,14 @@ default[:rabbitmq][:rabbitmq_group] = "rabbitmq" default[:rabbitmq][:nodename] = "rabbit@#{node[:hostname]}" -# This is the address for internal usage -default[:rabbitmq][:address] = nil -# These are all the addresses, possibly including public one -default[:rabbitmq][:addresses] = [] default[:rabbitmq][:port] = 5672 default[:rabbitmq][:management_port] = 15672 -default[:rabbitmq][:management_address] = nil default[:rabbitmq][:configfile] = nil default[:rabbitmq][:logdir] = nil default[:rabbitmq][:mnesiadir] = nil default[:rabbitmq][:cluster] = false default[:rabbitmq][:clustername] = "rabbit@#{node[:hostname]}" -default[:rabbitmq][:erlang_cookie_path] = "/var/lib/rabbitmq/.erlang.cookie" # ha default[:rabbitmq][:ha][:enabled] = false @@ -66,3 +60,6 @@ default[:rabbitmq][:ha][:clustered_rmq_features] = true end end + +# create empty users list as it is expected by some recipes +default[:rabbitmq][:users] = [] diff --git a/chef/cookbooks/rabbitmq/libraries/crowbar.rb b/chef/cookbooks/rabbitmq/libraries/crowbar.rb index 1e0fe643a3..faaad1e220 100644 --- a/chef/cookbooks/rabbitmq/libraries/crowbar.rb +++ b/chef/cookbooks/rabbitmq/libraries/crowbar.rb @@ -24,4 +24,8 @@ def self.get_public_listen_address(node) Chef::Recipe::Barclamp::Inventory.get_network_by_type(node, "public").address end end + + def self.get_management_address(node) + get_listen_address(node) + end end diff --git a/chef/cookbooks/rabbitmq/metadata.rb b/chef/cookbooks/rabbitmq/metadata.rb index 7695a1b4d0..4d24cea35a 100644 --- a/chef/cookbooks/rabbitmq/metadata.rb +++ b/chef/cookbooks/rabbitmq/metadata.rb @@ -25,10 +25,6 @@ description: "The Erlang node name for this server.", default: "node[:hostname]" -attribute "rabbitmq/address", - display_name: "RabbitMQ server IP address", - description: "IP address to bind." - attribute "rabbitmq/port", display_name: "RabbitMQ server port", description: "TCP port to bind." diff --git a/chef/cookbooks/rabbitmq/recipes/default.rb b/chef/cookbooks/rabbitmq/recipes/default.rb index 0a6d3242bf..8bc6be16eb 100644 --- a/chef/cookbooks/rabbitmq/recipes/default.rb +++ b/chef/cookbooks/rabbitmq/recipes/default.rb @@ -18,11 +18,15 @@ # limitations under the License. # +listen_address = CrowbarRabbitmqHelper.get_listen_address(node) +addresses = [listen_address] +if node[:rabbitmq][:listen_public] + addresses << CrowbarRabbitmqHelper.get_public_listen_address(node) +end + ha_enabled = node[:rabbitmq][:ha][:enabled] # we only do cluster if we do HA cluster_enabled = node[:rabbitmq][:cluster] && ha_enabled -# dont let the changes to the templates restart the rabbitmq in cluster mode -service_action = cluster_enabled ? :nothing : :restart quorum = CrowbarPacemakerHelper.num_corosync_nodes(node) / 2 + 1 cluster_partition_handling = if cluster_enabled @@ -57,7 +61,7 @@ group "root" mode 0o644 variables( - listen_address: node[:rabbitmq][:address] + listen_address: listen_address ) only_if "grep -q Requires=epmd.service /usr/lib/systemd/system/rabbitmq-server.service" end @@ -94,7 +98,7 @@ owner "root" group "root" mode 0644 - notifies service_action, "service[rabbitmq-server]" + notifies :restart, "service[rabbitmq-server]" end `systemd-detect-virt -v -q` @@ -109,9 +113,11 @@ variables( cluster_enabled: cluster_enabled, cluster_partition_handling: cluster_partition_handling, + addresses: addresses, + management_address: CrowbarRabbitmqHelper.get_management_address(node), hipe_compile: hipe_compile ) - notifies service_action, "service[rabbitmq-server]" + notifies :restart, "service[rabbitmq-server]" end # create a file with definitions to load on start, to be 100% sure we always @@ -132,7 +138,8 @@ json_trove_password: node[:rabbitmq][:trove][:password].to_json, json_trove_vhost: node[:rabbitmq][:trove][:vhost].to_json, ha_all_policy: cluster_enabled, - quorum: quorum + quorum: quorum, + extra_users: node[:rabbitmq][:users] ) # no notification to restart rabbitmq, as we still do changes with # rabbitmqctl in the rabbit.rb recipe (this is less disruptive) @@ -154,7 +161,7 @@ environment "HOME" => "/root/" code "#{rabbitmq_plugins} #{rabbitmq_plugins_param} enable rabbitmq_management > /dev/null" not_if "#{rabbitmq_plugins} list -E | grep rabbitmq_management -q", environment: {"HOME" => "/root/"} - notifies service_action, "service[rabbitmq-server]" + notifies :restart, "service[rabbitmq-server]" end service "rabbitmq-server" do diff --git a/chef/cookbooks/rabbitmq/recipes/ha.rb b/chef/cookbooks/rabbitmq/recipes/ha.rb index 3c24ee52aa..4d74d5febf 100644 --- a/chef/cookbooks/rabbitmq/recipes/ha.rb +++ b/chef/cookbooks/rabbitmq/recipes/ha.rb @@ -33,7 +33,10 @@ fs_params = {} fs_params["directory"] = "/var/lib/rabbitmq" if node[:rabbitmq][:ha][:storage][:mode] == "drbd" - include_recipe "crowbar-pacemaker::drbd" + + if CrowbarPacemakerHelper.drbd_node?(node) + include_recipe "crowbar-pacemaker::drbd" + end crowbar_pacemaker_drbd drbd_resource do size "#{node[:rabbitmq][:ha][:storage][:drbd][:size]}G" @@ -74,6 +77,7 @@ # on anyway. static_uid = 91 static_gid = 91 +ssl_keyfile = node[:rabbitmq][:ssl][:keyfile] bash "assign static uid to rabbitmq" do code < /dev/null; @@ -83,6 +87,7 @@ chown rabbitmq:rabbitmq /var/run/rabbitmq /var/log/rabbitmq; chown rabbitmq:rabbitmq /var/run/rabbitmq/pid /var/log/rabbitmq/*.log* || :; chgrp rabbitmq /etc/rabbitmq/definitions.json; +test -e #{ssl_keyfile} && chgrp rabbitmq #{ssl_keyfile} || :; EOC # Make any error in the commands fatal flags "-e" @@ -133,7 +138,7 @@ end storage_transaction_objects << "pacemaker_ms[#{ms_name}]" - ms_location_name = openstack_pacemaker_controller_location_ignoring_upgrade_for ms_name + ms_location_name = openstack_pacemaker_drbd_controller_only_location_for ms_name storage_transaction_objects << "pacemaker_location[#{ms_location_name}]" end @@ -146,7 +151,12 @@ end storage_transaction_objects << "pacemaker_primitive[#{fs_primitive}]" -fs_location_name = openstack_pacemaker_controller_only_location_for fs_primitive +fs_location_name = if node[:rabbitmq][:ha][:storage][:mode] == "drbd" + openstack_pacemaker_drbd_controller_only_location_for fs_primitive +else + openstack_pacemaker_controller_only_location_for fs_primitive +end + storage_transaction_objects << "pacemaker_location[#{fs_location_name}]" if node[:rabbitmq][:ha][:storage][:mode] == "drbd" @@ -318,7 +328,7 @@ service_transaction_objects << "pacemaker_location[#{public_vip_location_name}]" end - location_name = openstack_pacemaker_controller_only_location_for service_name + location_name = openstack_pacemaker_drbd_controller_only_location_for service_name service_transaction_objects << "pacemaker_location[#{location_name}]" else diff --git a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb index e2e7bca39e..fd86778b39 100644 --- a/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb +++ b/chef/cookbooks/rabbitmq/recipes/ha_cluster.rb @@ -17,14 +17,8 @@ agent_name = "ocf:rabbitmq:rabbitmq-server-ha" -# set the shared rabbitmq cookie -# cookie is automatically set during barclamp apply -# on the apply_role_pre_chef_call method -file node[:rabbitmq][:erlang_cookie_path] do - content node[:rabbitmq][:erlang_cookie] - owner node[:rabbitmq][:rabbitmq_user] - group node[:rabbitmq][:rabbitmq_group] -end +service_name = "rabbitmq" +ms_name = "ms-#{service_name}" # create file that will be sourced by OCF resource agent on promote template "/etc/rabbitmq/ocf-promote" do @@ -37,16 +31,74 @@ ) end +# wait for service to have a master, and to be active +ruby_block "wait for #{ms_name} to be started" do + block do + require "timeout" + begin + Timeout.timeout(360) do + # Check that the service has a master + cmd = "crm resource show #{ms_name} 2> /dev/null " + cmd << "| grep \"is running on\" | grep -q \"Master\"" + until ::Kernel.system(cmd) + Chef::Log.info("#{ms_name} still without master") + sleep(2) + end + + # Check that the service is running on this node + cmd = "crm resource show #{ms_name} 2> /dev/null " + cmd << "| grep -q \"is running on: #{node.hostname}\"" + until ::Kernel.system(cmd) + Chef::Log.info("#{ms_name} still not running locally") + sleep(2) + end + + # The sed command grabs everything between '{running_applications' + # and ']}', and what we want is that the rabbit application is + # running + # Checks if the actual rabbit app is running properly at least 5 times in a row + # as to prevent continuing when its not stable enough + cmd = "rabbitmqctl -q status 2> /dev/null " + cmd << "| sed -n '/{running_applications/,/\]}/p' | grep -q '{rabbit,'" + count = 0 + until count == 5 + if ::Kernel.system(cmd) + count += 1 + sleep(2) + else + count = 0 + end + end + + # Check that we dont have any pending pacemaker resource operations + cmd = "crm resource operations #{ms_name} 2> /dev/null " + cmd << "| grep -q \"pending\"" + while ::Kernel.system(cmd) + Chef::Log.info("resource #{ms_name} still has pending operations") + sleep(2) + end + end + rescue Timeout::Error + message = "The #{ms_name} pacemaker resource is not started or doesn't have a master yet." + message << " Please manually check for an error." + Chef::Log.fatal(message) + raise message + end + end + action :nothing +end + # Wait for all nodes to reach this point so we know that all nodes will have # all the required packages installed before we create the pacemaker # resources crowbar_pacemaker_sync_mark "sync-rabbitmq_before_ha" -crowbar_pacemaker_sync_mark "wait-rabbitmq_ha_resources" +crowbar_pacemaker_sync_mark "wait-rabbitmq_ha_resources" do + timeout 300 +end transaction_objects = [] -service_name = "rabbitmq" pacemaker_primitive service_name do agent agent_name # nodename is empty so that we explicitly depend on the config files @@ -71,7 +123,6 @@ # no location on the role here: the ms resource will have this constraint -ms_name = "ms-#{service_name}" pacemaker_ms ms_name do rsc service_name meta ({ @@ -95,37 +146,145 @@ # note that this will also automatically start the resources action :commit_new only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + notifies :create, resources(ruby_block: "wait for #{ms_name} to be started"), :immediately end crowbar_pacemaker_sync_mark "create-rabbitmq_ha_resources" -# wait for service to have a master, and to be active -ruby_block "wait for #{ms_name} to be started" do - block do - require "timeout" - begin - Timeout.timeout(240) do - # Check that the service is running - cmd = "crm resource show #{ms_name} 2> /dev/null " - cmd << "| grep -q \"is running on\"" - until ::Kernel.system(cmd) - Chef::Log.debug("#{ms_name} still not started") - sleep(2) - end - # The sed command grabs everything between '{running_applications' - # and ']}', and what we want is that the rabbit application is - # running - cmd = "rabbitmqctl -q status 2> /dev/null " - cmd << "| sed -n '/{running_applications/,/\]}/p' | grep -q '{rabbit,'" - until ::Kernel.system(cmd) - Chef::Log.debug("#{ms_name} still not answering") - sleep(2) - end - end - rescue Timeout::Error - message = "The #{ms_name} pacemaker resource is not started. Please manually check for an error." - Chef::Log.fatal(message) - raise message +clustermon_op = { "monitor" => [{ "interval" => "10s" }] } +clustermon_params = { "extra_options" => "-E /usr/bin/rabbitmq-alert-handler.sh --watch-fencing" } +name = "rabbitmq-port-blocker" +clone_name = "cl-#{name}" +location_name = "l-#{name}-controller" +node_upgrading = CrowbarPacemakerHelper.being_upgraded?(node) +clone_running = "crm resource show #{clone_name}" +primitive_running = "crm resource show #{name}" +port = node[:rabbitmq][:port] +ssl_port = node[:rabbitmq][:ssl][:port] + +crowbar_pacemaker_sync_mark "wait-rabbitmq_alert_resources" + +if CrowbarPacemakerHelper.cluster_nodes(node).size > 2 && !node_upgrading + template "/usr/bin/rabbitmq-alert-handler.sh" do + source "rabbitmq-alert-handler.erb" + owner "root" + group "root" + mode "0755" + variables(node: node, nodes: CrowbarPacemakerHelper.cluster_nodes(node)) + end + + template "/usr/bin/#{name}.sh" do + source "#{name}.erb" + owner "root" + group "root" + mode "0755" + variables(total_nodes: CrowbarPacemakerHelper.cluster_nodes(node).size, + port: port, ssl_port: ssl_port) + end + + pacemaker_primitive name do + agent "ocf:pacemaker:ClusterMon" + op clustermon_op + params clustermon_params + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + pacemaker_clone clone_name do + rsc name + meta CrowbarPacemakerHelper.clone_meta(node) + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + pacemaker_location location_name do + definition OpenStackHAHelper.controller_only_location(location_name, clone_name) + action :update + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + pacemaker_transaction name do + cib_objects [ + "pacemaker_primitive[#{name}]", + "pacemaker_clone[#{clone_name}]", + "pacemaker_location[#{location_name}]" + ] + # note that this will also automatically start the resources + action :commit_new + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end +else + pacemaker_location location_name do + definition OpenStackHAHelper.controller_only_location(location_name, clone_name) + action :delete + only_if { CrowbarPacemakerHelper.is_cluster_founder?(node) } + end + + pacemaker_clone "#{clone_name}_stop" do + name clone_name + rsc name + meta CrowbarPacemakerHelper.clone_meta(node) + action :stop + only_if do + running = system(clone_running, err: File::NULL) + CrowbarPacemakerHelper.is_cluster_founder?(node) && running end - end # block -end # ruby_block + end + + pacemaker_clone "#{clone_name}_delete" do + name clone_name + rsc name + meta CrowbarPacemakerHelper.clone_meta(node) + action :delete + only_if do + running = system(clone_running, err: File::NULL) + CrowbarPacemakerHelper.is_cluster_founder?(node) && running + end + end + + pacemaker_primitive "#{name}_stop" do + agent "ocf:pacemaker:ClusterMon" + name name + op clustermon_op + params clustermon_params + action :stop + only_if do + running = system(primitive_running, err: File::NULL) + CrowbarPacemakerHelper.is_cluster_founder?(node) && running + end + end + + pacemaker_primitive "#{name}_delete" do + agent "ocf:pacemaker:ClusterMon" + name name + op clustermon_op + params clustermon_params + action :delete + only_if do + running = system(primitive_running, err: File::NULL) + CrowbarPacemakerHelper.is_cluster_founder?(node) && running + end + end + + file "/usr/bin/rabbitmq-alert-handler.sh" do + action :delete + end + + file "/usr/bin/#{name}.sh" do + action :delete + end + + # in case that the script was already deployed and the rule is already stored we need to clean it + # up as to not left anything around + bash "Remove existent rabbitmq blocking rules" do + code "iptables -D INPUT -p tcp --destination-port 5672 "\ + "-m comment --comment \"rabbitmq port blocker (no quorum)\" -j DROP" + only_if do + # check for the rule + cmd = "iptables -L -n | grep -F \"tcp dpt:5672 /* rabbitmq port blocker (no quorum) */\"" + system(cmd) + end + end +end + +crowbar_pacemaker_sync_mark "create-rabbitmq_alert_resources" diff --git a/chef/cookbooks/rabbitmq/recipes/monitor.rb b/chef/cookbooks/rabbitmq/recipes/monitor.rb index 1d7e4d5d48..34cdacc800 100644 --- a/chef/cookbooks/rabbitmq/recipes/monitor.rb +++ b/chef/cookbooks/rabbitmq/recipes/monitor.rb @@ -28,5 +28,8 @@ mode "0644" group node[:nagios][:group] owner node[:nagios][:user] + variables( + listen_address: CrowbarRabbitmqHelper.get_listen_address(node) + ) notifies :restart, "service[nagios-nrpe-server]" end diff --git a/chef/cookbooks/rabbitmq/recipes/rabbit.rb b/chef/cookbooks/rabbitmq/recipes/rabbit.rb index 8b0c890574..16a11c8115 100644 --- a/chef/cookbooks/rabbitmq/recipes/rabbit.rb +++ b/chef/cookbooks/rabbitmq/recipes/rabbit.rb @@ -24,25 +24,7 @@ dirty = false -listen_address = CrowbarRabbitmqHelper.get_listen_address(node) -if node[:rabbitmq][:address] != listen_address - node.set[:rabbitmq][:address] = listen_address - dirty = true -end -if node[:rabbitmq][:management_address] != listen_address - node.set[:rabbitmq][:management_address] = listen_address - dirty = true -end - -addresses = [node[:rabbitmq][:address]] -if node[:rabbitmq][:listen_public] - addresses << CrowbarRabbitmqHelper.get_public_listen_address(node) -end -if node[:rabbitmq][:addresses] != addresses - node.set[:rabbitmq][:addresses] = addresses - dirty = true -end - +management_address = CrowbarRabbitmqHelper.get_management_address(node) nodename = "rabbit@#{CrowbarRabbitmqHelper.get_ha_vhostname(node)}" if cluster_enabled @@ -112,7 +94,7 @@ rabbitmq_user "adding user #{node[:rabbitmq][:user]}" do user node[:rabbitmq][:user] password node[:rabbitmq][:password] - address node[:rabbitmq][:management_address] + address management_address port node[:rabbitmq][:management_port] action :add only_if only_if_command if ha_enabled @@ -134,13 +116,56 @@ only_if only_if_command if ha_enabled end +node[:rabbitmq][:users].each do |user| + # create extra users + rabbitmq_user "adding user #{user[:username]}" do + user user[:username] + password user[:password] + address management_address + port node[:rabbitmq][:management_port] + action :add + only_if only_if_command if ha_enabled + end + + # add permisions to those extra users into the default vhost + rabbitmq_user "setting permissions for #{user[:username]}" do + user user[:username] + vhost node[:rabbitmq][:vhost] + # permissions is a list but the resource needs an escaped string + permissions user[:permissions].map { |x| "'#{x}'" }.join(" ") + action :set_permissions + only_if only_if_command if ha_enabled + end + + # tag those users as management + execute "rabbitmqctl set_user_tags #{user[:username]} #{user[:tags].join(",")}" do + not_if "rabbitmqctl list_users | grep #{user[:username]} | grep -q #{user[:tags].join(",")}" + action :run + only_if only_if_command if ha_enabled + end +end + if cluster_enabled - quorum = CrowbarPacemakerHelper.num_corosync_nodes(node) / 2 + 1 + if node[:rabbitmq][:enable_queue_mirroring] + quorum = CrowbarPacemakerHelper.num_corosync_nodes(node) / 2 + 1 + else + quorum = 1 + end + + # don't mirror queues that are 'amq.*' or '*_fanout_*' or `reply_*` in their names + queue_regex = "^(?!(amq\.)|(.*_fanout_)|(reply_)).*" + # policy doesnt need spaces between elements as they will be removed when listing them + # making it more difficult to check for them + policy = "{\"ha-mode\":\"exactly\",\"ha-params\":#{quorum},\"ha-sync-mode\":\"automatic\"}" + vhost = node[:rabbitmq][:vhost] + # we need to scape the regex properly so we can use it on the grep command + queue_regex_escaped = "" + queue_regex.split("").each { |c| queue_regex_escaped << "\\" + c } - set_policy_command = "rabbitmqctl set_policy -p #{node[:rabbitmq][:vhost]} --apply-to queues " \ - " ha-queues '^(?!amq\.).*' '{\"ha-mode\": \"exactly\", \"ha-params\": #{quorum}}'" - check_policy_command = "rabbitmqctl list_policies -p #{node[:rabbitmq][:vhost]} | " \ - " grep -q '^#{node[:rabbitmq][:vhost]}\\s*ha-queues\\s'" + set_policy_command = "rabbitmqctl set_policy -p #{vhost} --apply-to queues " \ + " ha-queues '#{queue_regex}' '#{policy}'" + check_policy_command = "rabbitmqctl list_policies -p #{vhost} | " \ + " grep -Eq '^#{vhost}\\s*ha-queues\\s*queues\\s*#{queue_regex_escaped}\\s*#{policy}\\s*0$'" execute set_policy_command do not_if check_policy_command @@ -166,7 +191,7 @@ rabbitmq_user "adding user #{node[:rabbitmq][:trove][:user]}" do user node[:rabbitmq][:trove][:user] password node[:rabbitmq][:trove][:password] - address node[:rabbitmq][:management_address] + address management_address port node[:rabbitmq][:management_port] action :add only_if only_if_command if ha_enabled @@ -184,7 +209,7 @@ else rabbitmq_user "deleting user #{node[:rabbitmq][:trove][:user]}" do user node[:rabbitmq][:trove][:user] - address node[:rabbitmq][:management_address] + address management_address port node[:rabbitmq][:management_port] action :delete only_if only_if_command if ha_enabled diff --git a/chef/cookbooks/rabbitmq/templates/default/definitions.json.erb b/chef/cookbooks/rabbitmq/templates/default/definitions.json.erb index d31e26f02a..7c77fc0532 100644 --- a/chef/cookbooks/rabbitmq/templates/default/definitions.json.erb +++ b/chef/cookbooks/rabbitmq/templates/default/definitions.json.erb @@ -34,6 +34,13 @@ "password": <%= @json_trove_password %>, "tags": "" }, +<% end -%> +<% @extra_users.each do |user| -%> + { + "name": "<%= user[:username] %>", + "password": "<%= user[:password] %>", + "tags": "<%= user[:tags].join(',') %>" + }, <% end -%> { "name": <%= @json_user %>, @@ -50,6 +57,15 @@ "read": ".*", "write": ".*" }, +<% end -%> +<% @extra_users.each do |user| -%> + { + "user": "<%= user[:username] %>", + "vhost": <%= @json_vhost %>, + "configure": "<%= user[:permissions][0] %>", + "read": "<%= user[:permissions][2] %>", + "write": "<%= user[:permissions][1] %>" + }, <% end -%> { "user": <%= @json_user %>, diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb new file mode 100644 index 0000000000..4c104de656 --- /dev/null +++ b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-alert-handler.erb @@ -0,0 +1,7 @@ +#!/bin/sh + +# exit if isn't a rabbitmq alert or is not a monitor task +[ "${CRM_notify_rsc}" = "rabbitmq" -a "${CRM_notify_task}" = "monitor" ] || exit 0 + +# launch the blocker in exclusive mode +flock /var/lock/rabbit /usr/bin/rabbitmq-port-blocker.sh \ No newline at end of file diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb new file mode 100644 index 0000000000..5d457b93c7 --- /dev/null +++ b/chef/cookbooks/rabbitmq/templates/default/rabbitmq-port-blocker.erb @@ -0,0 +1,43 @@ +#!/bin/sh + +# calcules the blocking level applying the formula +total_nodes=<%= @total_nodes %> +blocking_level=$(expr $total_nodes / 2) +comment_text="rabbitmq port blocker (no quorum)" +port=<%= @port %> +ssl_port=<%= @ssl_port %> + +# get the number of running nodes of rabbitmq in the current cluster +function running_nodes() +{ + rabbitmqctl cluster_status 2>/dev/null | tr -d "\n" | sed -e 's/running_nodes,/\nrunning_nodes/g'| grep running_nodes | cut -d "[" -f2 | cut -d "]" -f1 | tr "," "\n" | wc -l +} + +# check if exists the blocking rule for rabbitmq clients +function check_rule() +{ + iptables -L -n | grep -F "tcp dpt:$1 /* $comment_text */" | grep DROP | wc -l +} + +function create_rule(){ + if [ $(check_rule $1) -eq 0 ]; then + iptables -A INPUT -p tcp --destination-port $1 -m comment --comment "$comment_text" -j DROP + fi +} + +function delete_rule(){ + if [[ $(check_rule $1) -gt 0 ]]; then + iptables -D INPUT -p tcp --destination-port $1 -m comment --comment "$comment_text" -j DROP + fi +} + +# if the running nodes is les that the blocking level, then... +if [ $(running_nodes) -le $blocking_level ]; then + # if rule not exists the rule will be added to block the clients port + create_rule $port + create_rule $ssl_port +else + # finally if the rule exists it will be deleted. If there are more than one, will remove all + delete_rule $port + delete_rule $ssl_port +fi diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb index 99437a9fcc..9b3e629faa 100644 --- a/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb +++ b/chef/cookbooks/rabbitmq/templates/default/rabbitmq.config.erb @@ -8,11 +8,11 @@ {rabbit, [ {tcp_listeners, [ - <%= node[:rabbitmq][:addresses].map { |address| "{\"#{address}\", #{node[:rabbitmq][:port]}}" }.join(", ") %> + <%= @addresses.map { |address| "{\"#{address}\", #{node[:rabbitmq][:port]}}" }.join(", ") %> ]}, <% if node[:rabbitmq][:ssl][:enabled] -%> {ssl_listeners, [ - <%= node[:rabbitmq][:addresses].map { |address| "{\"#{address}\", #{node[:rabbitmq][:ssl][:port]}}" }.join(", ") %> + <%= @addresses.map { |address| "{\"#{address}\", #{node[:rabbitmq][:ssl][:port]}}" }.join(", ") %> ]}, {ssl_options, [ <% if node[:rabbitmq][:ssl][:cert_required] -%> @@ -37,7 +37,7 @@ ]}, {rabbitmq_management, [ - {listener, [{ip, "<%= node[:rabbitmq][:management_address] %>"}, {port, <%= node[:rabbitmq][:management_port] %>}]}, + {listener, [{ip, "<%= @management_address %>"}, {port, <%= node[:rabbitmq][:management_port] %>}]}, {load_definitions, "/etc/rabbitmq/definitions.json"} ] } diff --git a/chef/cookbooks/rabbitmq/templates/default/rabbitmq_nrpe.cfg.erb b/chef/cookbooks/rabbitmq/templates/default/rabbitmq_nrpe.cfg.erb index 87295a87c8..0079c07b86 100644 --- a/chef/cookbooks/rabbitmq/templates/default/rabbitmq_nrpe.cfg.erb +++ b/chef/cookbooks/rabbitmq/templates/default/rabbitmq_nrpe.cfg.erb @@ -1,4 +1,4 @@ <% unless node[:rabbitmq].nil? -%> -command[check_rabbit]=/usr/lib/nagios/plugins/check_rabbitmq_aliveness -H <%= node[:rabbitmq][:address] %> -u <%= node[:rabbitmq][:user] %> -p <%= node[:rabbitmq][:password] %> --vhost <%= node[:rabbitmq][:vhost] %> +command[check_rabbit]=/usr/lib/nagios/plugins/check_rabbitmq_aliveness -H <%= @listen_address %> -u <%= node[:rabbitmq][:user] %> -p <%= node[:rabbitmq][:password] %> --vhost <%= node[:rabbitmq][:vhost] %> <% end -%> diff --git a/chef/cookbooks/swift/libraries/rack_awareness.rb b/chef/cookbooks/swift/libraries/rack_awareness.rb index 266aeee01c..2ad76ebdcb 100644 --- a/chef/cookbooks/swift/libraries/rack_awareness.rb +++ b/chef/cookbooks/swift/libraries/rack_awareness.rb @@ -38,7 +38,10 @@ def get_node_sw(n) #get storage iface iface="" n[:crowbar_wall][:network][:interfaces].keys.each do |ifn| - if n[:crowbar_wall][:network][:interfaces][ifn.to_s][:addresses].include?(storage_ip) + ifaddrs = n[:crowbar_wall][:network][:interfaces][ifn.to_s][:addresses] + # strip netmasks from CIDR addresses + ifaddrs.map! { |addr| addr[%r{^[^/]+}] } + if ifaddrs.include?(storage_ip) iface=ifn break end @@ -52,7 +55,7 @@ def get_node_sw(n) #fallback to something default iface=n[:crowbar_ohai][:switch_config].keys[0] end - sw_name=n[:crowbar_ohai][:switch_config][iface][:switch_name] + return n[:crowbar_ohai][:switch_config].fetch(iface, switch_name: -1)[:switch_name] end def switch_to_zone() diff --git a/chef/cookbooks/tempest/recipes/config.rb b/chef/cookbooks/tempest/recipes/config.rb index b11ddb8773..378b8a4b65 100644 --- a/chef/cookbooks/tempest/recipes/config.rb +++ b/chef/cookbooks/tempest/recipes/config.rb @@ -95,6 +95,16 @@ roles = [ 'anotherrole' ] +if enabled_services.include?("metering") + rabbitmq_settings = fetch_rabbitmq_settings + + unless rabbitmq_settings[:enable_notifications] + # without rabbitmq notification clients configured the ceilometer + # tempest tests will fail so skip them + enabled_services = enabled_services - ["metering"] + end +end + heat_server = search(:node, "roles:heat-server").first if enabled_services.include?("orchestration") && !heat_server.nil? heat_trusts_delegated_roles = heat_server[:heat][:trusts_delegated_roles] @@ -485,7 +495,7 @@ template "/etc/tempest/tempest.conf" do source "tempest.conf.erb" - mode 0644 + mode 0o640 variables( lazy { { diff --git a/chef/data_bags/crowbar/migrate/aodh/101_add_alarm_history_ttl.rb b/chef/data_bags/crowbar/migrate/aodh/101_add_alarm_history_ttl.rb new file mode 100644 index 0000000000..9a81fde5b2 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/aodh/101_add_alarm_history_ttl.rb @@ -0,0 +1,13 @@ +def upgrade(ta, td, a, d) + unless a.key? "alarm_history_ttl" + a["alarm_history_ttl"] = ta["alarm_history_ttl"] + end + return a, d +end + +def downgrade(ta, td, a, d) + unless ta.key? "alarm_history_ttl" + a.delete("alarm_history_ttl") + end + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb new file mode 100644 index 0000000000..c7d08ecb44 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/database/109_separate_db_roles.rb @@ -0,0 +1,63 @@ +def upgrade(ta, td, a, d) + db_engine = a["sql_engine"] + + # 'ha' hash needs to be moved under 'postgresql' to keep it consistent with mysql + if db_engine == "postgresql" + a["postgresql"]["ha"] = a["ha"] + else + a["postgresql"]["ha"] = ta["postgresql"]["ha"] + a["mysql"]["ha"]["enabled"] = true if a["ha"]["enabled"] + end + a.delete("ha") if a.key? "ha" + + d["element_states"] = td["element_states"] + d["element_order"] = td["element_order"] + + if db_engine == "mysql" + # For the time of upgrade, we're adding new 'mysql-server role', while old 'database-server' + # is reserved for existing postgresql setup. + # For users that already have mysql (mariadb) deployed with 'database-server' role, we need to + # adapt the role assignments so the code that is looking for 'mysql-server' instances always + # finds correct mysql nodes. + d["elements"]["mysql-server"] = d["elements"]["database-server"] + d["elements"]["database-server"] = [] + if d.fetch("elements_expanded", {}).key? "database-server" + d["elements_expanded"]["mysql-server"] = d["elements_expanded"]["database-server"] + d["elements_expanded"].delete("database-server") + end + + chef_order = BarclampCatalog.chef_order("database") + nodes = NodeObject.find("run_list_map:database-server") + nodes.each do |node| + node.add_to_run_list("mysql-server", chef_order, + td["element_states"]["mysql-server"]) + node.delete_from_run_list("database-server") + node.save + end + end + return a, d +end + +def downgrade(ta, td, a, d) + d["element_states"] = td["element_states"] + d["element_order"] = td["element_order"] + + if a["sql_engine"] == "mysql" + d["elements"]["database-server"] = d["elements"]["mysql-server"] + d["elements"].delete("mysql-server") + if d.fetch("elements_expanded", {}).key? "mysql-server" + d["elements_expanded"]["database-server"] = d["elements_expanded"]["mysql-server"] + d["elements_expanded"].delete("mysql-server") + end + + chef_order = BarclampCatalog.chef_order("database") + nodes = NodeObject.find("run_list_map:mysql-server") + nodes.each do |node| + node.add_to_run_list("database-server", chef_order, + td["element_states"]["database-server"]) + node.delete_from_run_list("mysql-server") + node.save + end + end + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/database/105_add_resource_limits.rb b/chef/data_bags/crowbar/migrate/database/110_add_resource_limits.rb similarity index 100% rename from chef/data_bags/crowbar/migrate/database/105_add_resource_limits.rb rename to chef/data_bags/crowbar/migrate/database/110_add_resource_limits.rb diff --git a/chef/data_bags/crowbar/migrate/database/111_make_wsrep_provider_options_configurable.rb b/chef/data_bags/crowbar/migrate/database/111_make_wsrep_provider_options_configurable.rb new file mode 100644 index 0000000000..d8316924dc --- /dev/null +++ b/chef/data_bags/crowbar/migrate/database/111_make_wsrep_provider_options_configurable.rb @@ -0,0 +1,13 @@ +def upgrade(template_attrs, template_deployment, attrs, deployment) + attrs["mysql"]["wsrep_provider_options_custom"] = template_attrs["mysql"]["wsrep_provider_options_custom"] unless attrs["mysql"]["wsrep_provider_options_custom"] + attrs["mysql"]["gcs_fc_limit_multiplier"] = template_attrs["mysql"]["gcs_fc_limit_multiplier"] unless attrs["mysql"]["gcs_fc_limit_multiplier"] + attrs["mysql"]["gcs_fc_factor"] = template_attrs["mysql"]["gcs_fc_factor"] unless attrs["mysql"]["gcs_fc_factor"] + return attrs, deployment +end + +def downgrade(template_attrs, template_deployment, attrs, deployment) + attrs["mysql"].delete("wsrep_provider_options_custom") unless template_attrs["mysql"].key?("wsrep_provider_options_custom") + attrs["mysql"].delete("gcs_fc_limit_multiplier") unless template_attrs["mysql"].key?("gcs_fc_limit_multiplier") + attrs["mysql"].delete("gcs_fc_factor") unless template_attrs["mysql"].key?("gcs_fc_factor") + return attrs, deployment +end diff --git a/chef/data_bags/crowbar/migrate/glance/105_add_rate_limit.rb b/chef/data_bags/crowbar/migrate/glance/105_add_rate_limit.rb new file mode 100644 index 0000000000..64051f37d0 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/glance/105_add_rate_limit.rb @@ -0,0 +1,9 @@ +def upgrade(ta, td, a, d) + a["ha_rate_limit"] = ta["ha_rate_limit"] unless a.key? "ha_rate_limit" + return a, d +end + +def downgrade(ta, td, a, d) + a.delete("ha_rate_limit") unless ta.key? "ha_rate_limit" + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/keystone/114_remove_updated_password.rb b/chef/data_bags/crowbar/migrate/keystone/114_remove_updated_password.rb new file mode 100644 index 0000000000..9d532eac72 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/keystone/114_remove_updated_password.rb @@ -0,0 +1,16 @@ +def upgrade(ta, td, a, d) + a["admin"].delete("updated_password") + nodes = NodeObject.find("roles:keystone-server") + nodes.each do |node| + unless node[:keystone][:admin].key?("old_password") + node[:keystone][:admin][:old_password] = node[:keystone][:admin][:password] + end + node.save + end + return a, d +end + +def downgrade(ta, td, a, d) + a["admin"]["updated_password"] = ta["admin"]["updated_password"] + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/monasca/103_fix_ping_check_type.rb b/chef/data_bags/crowbar/migrate/monasca/103_fix_ping_check_type.rb new file mode 100644 index 0000000000..c4934907e3 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/monasca/103_fix_ping_check_type.rb @@ -0,0 +1,24 @@ +def upgrade(ta, td, a, d) + key_pingcheck = a["agent"]["plugins"]["libvirt"].key?("ping_check") + ta_pingcheck = ta["agent"]["plugins"]["libvirt"]["ping_check"] + + # If there is no ping_check key at all, simply migrate to current value + unless key_pingcheck + a["agent"]["plugins"]["libvirt"]["ping_check"] = ta_pingcheck + return a, d + end + + a_pingcheck = a["agent"]["plugins"]["libvirt"]["ping_check"] + + # Only override existing value if it is boolean + a["agent"]["plugins"]["libvirt"]["ping_check"] = ta_pingcheck if + a_pingcheck.is_a?(TrueClass) || a_pingcheck.is_a?(FalseClass) + + return a, d +end + +def downgrade(ta, td, a, d) + a["agent"]["plugins"]["libvirt"]["ping_check"] = false + + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/monasca/104_add_elasticsearch_tunables.rb b/chef/data_bags/crowbar/migrate/monasca/104_add_elasticsearch_tunables.rb new file mode 100644 index 0000000000..4662a7f8a3 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/monasca/104_add_elasticsearch_tunables.rb @@ -0,0 +1,14 @@ +def upgrade(ta, td, a, d) + # this migration already happened if the tsdb key exists + return a, d if a["elasticsearch"].key?("tunables") + + a["elasticsearch"]["tunables"] = ta["elasticsearch"]["tunables"] + + return a, d +end + +def downgrade(ta, td, a, d) + a["elasticsearch"].delete("tunables") + + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/neutron/114_add_cisco_apic_multipod.rb b/chef/data_bags/crowbar/migrate/neutron/114_add_cisco_apic_multipod.rb new file mode 100644 index 0000000000..885e943d64 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/114_add_cisco_apic_multipod.rb @@ -0,0 +1,21 @@ +def upgrade(ta, td, a, d) + if a.key?("apic") && a["apic"]["opflex"].is_a?(Hash) + nodes = a["apic"]["apic_switches"] + .map { |_, value| value["switch_ports"].keys } + .flatten + .uniq + a["apic"]["opflex"]["nodes"] = nodes + opflex = [ta["apic"]["opflex"].first.merge(a["apic"]["opflex"])] + a["apic"]["opflex"] = opflex + end + return a, d +end + +def downgrade(ta, td, a, d) + if a.key?("apic") && ta["apic"]["opflex"].is_a?(Array) + a["apic"]["opflex"] = a["apic"]["opflex"].first + a["apic"]["opflex"].delete("pod") + a["apic"]["opflex"].delete("nodes") + end + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/neutron/115_add_apic_optimized_dhcp_metadata.rb b/chef/data_bags/crowbar/migrate/neutron/115_add_apic_optimized_dhcp_metadata.rb new file mode 100644 index 0000000000..d83193af05 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/115_add_apic_optimized_dhcp_metadata.rb @@ -0,0 +1,12 @@ +def upgrade(ta, td, a, d) + a["apic"]["optimized_metadata"] = ta["apic"]["optimized_metadata"] \ + unless a["apic"].key? "optimized_metadata" + a["apic"]["optimized_dhcp"] = ta["apic"]["optimized_dhcp"] unless a["apic"].key? "optimized_dhcp" + return a, d +end + +def downgrade(ta, td, a, d) + a["apic"].delete("optimized_metadata") unless ta["apic"].key? "optimized_metadata" + a["apic"].delete("optimized_dhcp") unless ta["apic"].key? "optimized_dhcp" + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/neutron/116_add_allow_automatic_lbass_agent_failover.rb b/chef/data_bags/crowbar/migrate/neutron/116_add_allow_automatic_lbass_agent_failover.rb new file mode 100644 index 0000000000..33f690705d --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/116_add_allow_automatic_lbass_agent_failover.rb @@ -0,0 +1,11 @@ +def upgrade(ta, td, a, d) + attr = "allow_automatic_lbaas_agent_failover" + a[attr] = ta[attr] unless a.key? attr + return a, d +end + +def downgrade(ta, td, a, d) + attr = "allow_automatic_lbaas_agent_failover" + a.delete(attr) unless ta.key? attr + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/neutron/117_add_apic_external_network_attributes.rb b/chef/data_bags/crowbar/migrate/neutron/117_add_apic_external_network_attributes.rb new file mode 100644 index 0000000000..bcc0fb0738 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/117_add_apic_external_network_attributes.rb @@ -0,0 +1,9 @@ +def upgrade(ta, td, a, d) + a["apic"]["ext_net"] = ta["apic"]["ext_net"] unless a["apic"].key? "ext_net" + return a, d +end + +def downgrade(ta, td, a, d) + a["apic"].delete("ext_net") unless ta["apic"].key? "ext_net" + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/neutron/118_add_force_metadata_attributes.rb b/chef/data_bags/crowbar/migrate/neutron/118_add_force_metadata_attributes.rb new file mode 100644 index 0000000000..2fa9bb4d88 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/118_add_force_metadata_attributes.rb @@ -0,0 +1,9 @@ +def upgrade(ta, td, a, d) + a["metadata"] = ta["metadata"] unless a.key? "metadata" + return a, d +end + +def downgrade(ta, td, a, d) + a.delete("metadata") unless ta.key? "metadata" + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/neutron/119_add_cisco_aci_role.rb b/chef/data_bags/crowbar/migrate/neutron/119_add_cisco_aci_role.rb new file mode 100644 index 0000000000..4e297a78d1 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/119_add_cisco_aci_role.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +def upgrade(ta, td, a, d) + unless d["element_states"].key?("neutron-sdn-cisco-aci-agents") + d["element_states"] = td["element_states"] + d["element_order"] = td["element_order"] + d["element_run_list_order"] = td["element_run_list_order"] + + if a["networking_plugin"] == "ml2" && + (a["ml2_mechanism_drivers"].include?("cisco_apic_ml2") || + a["ml2_mechanism_drivers"].include?("apic_gbp")) + nodes = NodeObject.find("roles:nova-compute-kvm") + nodes.each do |node| + node.add_to_run_list("neutron-sdn-cisco-aci-agents", + td["element_run_list_order"]["neutron-sdn-cisco-aci-agents"], + td["element_states"]["neutron-sdn-cisco-aci-agents"]) + node.save + end + end + end + return a, d +end + +def downgrade(ta, td, a, d) + unless td["element_states"].key?("neutron-sdn-cisco-aci-agents") + d["element_states"] = td["element_states"] + d["element_order"] = td["element_order"] + d["element_run_list_order"] = td["element_run_list_order"] + d["elements"].delete("neutron-sdn-cisco-aci-agents") + + nodes = NodeObject.find("roles:neutron-sdn-cisco-aci-agents") + nodes.each do |node| + node.delete_from_run_list("neutron-sdn-cisco-aci-agents") + node.save + end + end + + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/neutron/103_remove_use_lbaasv2.rb b/chef/data_bags/crowbar/migrate/neutron/120_remove_use_lbaasv2.rb similarity index 100% rename from chef/data_bags/crowbar/migrate/neutron/103_remove_use_lbaasv2.rb rename to chef/data_bags/crowbar/migrate/neutron/120_remove_use_lbaasv2.rb diff --git a/chef/data_bags/crowbar/migrate/neutron/105_add_back_use_l2pop.rb b/chef/data_bags/crowbar/migrate/neutron/121_add_back_use_l2pop.rb similarity index 100% rename from chef/data_bags/crowbar/migrate/neutron/105_add_back_use_l2pop.rb rename to chef/data_bags/crowbar/migrate/neutron/121_add_back_use_l2pop.rb diff --git a/chef/data_bags/crowbar/migrate/neutron/122_add_default_log_levels.rb b/chef/data_bags/crowbar/migrate/neutron/122_add_default_log_levels.rb new file mode 100644 index 0000000000..4a81f81290 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/122_add_default_log_levels.rb @@ -0,0 +1,11 @@ +def upgrade(template_attributes, template_deployment, attributes, deployment) + key = "default_log_levels" + attributes[key] = template_attributes[key] unless attributes.key? key + return attributes, deployment +end + +def downgrade(template_attributes, template_deployment, attributes, deployment) + key = "default_log_levels" + attributes.delete(key) unless template_attributes.key? key + return attributes, deployment +end diff --git a/chef/data_bags/crowbar/migrate/neutron/123_add_use_l3_ha.rb b/chef/data_bags/crowbar/migrate/neutron/123_add_use_l3_ha.rb new file mode 100644 index 0000000000..ba3bc17421 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/123_add_use_l3_ha.rb @@ -0,0 +1,22 @@ +def upgrade(ta, td, a, d) + unless a.key? "l3_ha" + a["l3_ha"] = ta["l3_ha"] + + unless defined?(@@neutron_l3_ha_vrrp_password) + service = ServiceObject.new "fake-logger" + @@neutron_l3_ha_vrrp_password = service.random_password + end + + a["l3_ha"]["vrrp_password"] = @@neutron_l3_ha_vrrp_password + end + + return a, d +end + +def downgrade(ta, td, a, d) + unless ta.key?("l3_ha") + a.delete("l3_ha") + end + + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/neutron/124_add_ovs_of_inactivity_probe.rb b/chef/data_bags/crowbar/migrate/neutron/124_add_ovs_of_inactivity_probe.rb new file mode 100644 index 0000000000..41af631153 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/124_add_ovs_of_inactivity_probe.rb @@ -0,0 +1,15 @@ +def upgrade(tattr, tdep, attr, dep) + unless attr["ovs"].key?("of_inactivity_probe") + attr["ovs"]["of_inactivity_probe"] = tattr["ovs"]["of_inactivity_probe"] + end + + return attr, dep +end + +def downgrade(tattr, tdep, attr, dep) + unless tattr["ovs"].key?("of_inactivity_probe") + attr["ovs"].delete("of_inactivity_probe") if attr.key?("ovs") + end + + return attr, dep +end diff --git a/chef/data_bags/crowbar/migrate/neutron/125_add_apic_multi_vmm_domains.rb b/chef/data_bags/crowbar/migrate/neutron/125_add_apic_multi_vmm_domains.rb new file mode 100644 index 0000000000..ac15244d9d --- /dev/null +++ b/chef/data_bags/crowbar/migrate/neutron/125_add_apic_multi_vmm_domains.rb @@ -0,0 +1,15 @@ +def upgrade(tattr, tdep, attr, dep) + unless attr["apic"].key?("apic_vmms") + attr["apic"]["apic_vmms"] = tattr["apic"]["apic_vmms"] + end + + return attr, dep +end + +def downgrade(tattr, tdep, attr, dep) + unless tattr["apic"].key?("apic_vmms") + attr["apic"].delete("apic_vmms") if attr.key?("apic_vmms") + end + + return attr, dep +end diff --git a/chef/data_bags/crowbar/migrate/nova/122_add_scheduler_default_filters.rb b/chef/data_bags/crowbar/migrate/nova/122_add_scheduler_default_filters.rb new file mode 100644 index 0000000000..0a64fb1f7f --- /dev/null +++ b/chef/data_bags/crowbar/migrate/nova/122_add_scheduler_default_filters.rb @@ -0,0 +1,9 @@ +def upgrade(ta, td, a, d) + a["scheduler"]["default_filters"] = ta["scheduler"]["default_filters"] + return a, d +end + +def downgrade(ta, td, a, d) + a["scheduler"].delete("default_filters") + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/nova/123_placement_api.rb b/chef/data_bags/crowbar/migrate/nova/123_placement_api.rb new file mode 100644 index 0000000000..0cf3764d78 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/nova/123_placement_api.rb @@ -0,0 +1,21 @@ +def upgrade(ta, td, a, d) + a["placement_db"] = ta["placement_db"] + + if a["placement_db"]["password"].nil? || a["placement_db"]["password"].empty? + service = ServiceObject.new "fake-logger" + a["placement_db"]["password"] = service.random_password + end + + if a["placement_service_password"].nil? || a["placement_service_password"].empty? + service = ServiceObject.new "fake-logger" + a["placement_service_password"] = service.random_password + end + + return a, d +end + +def downgrade(ta, td, a, d) + a.delete("placement_db") + a.delete("placement_service_password") + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/nova/124_add_cachemodes.rb b/chef/data_bags/crowbar/migrate/nova/124_add_cachemodes.rb new file mode 100644 index 0000000000..e15adfa91b --- /dev/null +++ b/chef/data_bags/crowbar/migrate/nova/124_add_cachemodes.rb @@ -0,0 +1,13 @@ +def upgrade(ta, td, a, d) + unless a["kvm"].key? "disk_cachemodes" + a["kvm"]["disk_cachemodes"] = ta["kvm"]["disk_cachemodes"] + end + return a, d +end + +def downgrade(ta, td, a, d) + unless ta["kvm"].key? "disk_cachemodes" + a["kvm"].delete("disk_cachemodes") + end + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/nova/125_add_default_log_levels.rb b/chef/data_bags/crowbar/migrate/nova/125_add_default_log_levels.rb new file mode 100644 index 0000000000..4a81f81290 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/nova/125_add_default_log_levels.rb @@ -0,0 +1,11 @@ +def upgrade(template_attributes, template_deployment, attributes, deployment) + key = "default_log_levels" + attributes[key] = template_attributes[key] unless attributes.key? key + return attributes, deployment +end + +def downgrade(template_attributes, template_deployment, attributes, deployment) + key = "default_log_levels" + attributes.delete(key) unless template_attributes.key? key + return attributes, deployment +end diff --git a/chef/data_bags/crowbar/migrate/nova/126_add_max_threads.rb b/chef/data_bags/crowbar/migrate/nova/126_add_max_threads.rb new file mode 100644 index 0000000000..a1703e730b --- /dev/null +++ b/chef/data_bags/crowbar/migrate/nova/126_add_max_threads.rb @@ -0,0 +1,11 @@ +def upgrade(template_attributes, template_deployment, attributes, deployment) + key = "max_threads_per_process" + attributes["kvm"][key] = template_attributes["kvm"][key] unless attributes["kvm"].key? key + return attributes, deployment +end + +def downgrade(template_attributes, template_deployment, attributes, deployment) + key = "max_threads_per_process" + attributes["kvm"].delete(key) unless template_attributes["kvm"].key? key + return attributes, deployment +end diff --git a/chef/data_bags/crowbar/migrate/rabbitmq/001_ha.rb b/chef/data_bags/crowbar/migrate/rabbitmq/002_ha.rb similarity index 100% rename from chef/data_bags/crowbar/migrate/rabbitmq/001_ha.rb rename to chef/data_bags/crowbar/migrate/rabbitmq/002_ha.rb diff --git a/chef/data_bags/crowbar/migrate/rabbitmq/105_add_extra_users.rb b/chef/data_bags/crowbar/migrate/rabbitmq/105_add_extra_users.rb new file mode 100644 index 0000000000..22dfda4169 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/rabbitmq/105_add_extra_users.rb @@ -0,0 +1,9 @@ +def upgrade(ta, td, a, d) + a["extra_users"] = ta["extra_users"] unless a["extra_users"] + return a, d +end + +def downgrade(ta, td, a, d) + a.delete("extra_users") unless ta.key?("extra_users") + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/rabbitmq/106_add_notifications.rb b/chef/data_bags/crowbar/migrate/rabbitmq/106_add_notifications.rb new file mode 100644 index 0000000000..8e123ec321 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/rabbitmq/106_add_notifications.rb @@ -0,0 +1,12 @@ +def upgrade(ta, td, a, d) + unless a["client"].key?("enable_notifications") + # keep it always enabled on upgrade for compat + a["client"]["enable_notifications"] = true + end + return a, d +end + +def downgrade(ta, td, a, d) + a["client"].delete("enable_notifications") unless ta["client"].key?("enable_notifications") + return a, d +end diff --git a/chef/data_bags/crowbar/migrate/rabbitmq/107_add_enable_queue_mirroring.rb b/chef/data_bags/crowbar/migrate/rabbitmq/107_add_enable_queue_mirroring.rb new file mode 100644 index 0000000000..b9320f6214 --- /dev/null +++ b/chef/data_bags/crowbar/migrate/rabbitmq/107_add_enable_queue_mirroring.rb @@ -0,0 +1,11 @@ +def upgrade(template_attributes, template_deployment, attributes, deployment) + key = "enable_queue_mirroring" + attributes[key] = template_attributes[key] unless attributes.key? key + return attributes, deployment +end + +def downgrade(template_attributes, template_deployment, attributes, deployment) + key = "enable_queue_mirroring" + attributes.delete(key) unless template_attributes.key? key + return attributes, deployment +end diff --git a/chef/data_bags/crowbar/template-aodh.json b/chef/data_bags/crowbar/template-aodh.json index d7b6eb1e6a..0fd5b2ed8e 100644 --- a/chef/data_bags/crowbar/template-aodh.json +++ b/chef/data_bags/crowbar/template-aodh.json @@ -6,6 +6,7 @@ "debug": false, "verbose": true, "evaluation_interval": 600, + "alarm_history_ttl": -1, "rabbitmq_instance": "none", "database_instance": "none", "keystone_instance": "none", @@ -35,7 +36,7 @@ "aodh": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 100, + "schema-revision": 101, "element_states": { "aodh-server": [ "readying", "ready", "applying" ] }, diff --git a/chef/data_bags/crowbar/template-aodh.schema b/chef/data_bags/crowbar/template-aodh.schema index 83747d4b30..d2e51765e1 100644 --- a/chef/data_bags/crowbar/template-aodh.schema +++ b/chef/data_bags/crowbar/template-aodh.schema @@ -15,6 +15,7 @@ "debug": { "type": "bool", "required": true }, "verbose": { "type": "bool", "required": true }, "evaluation_interval": { "type": "int", "required": true }, + "alarm_history_ttl": { "type": "int", "required": true }, "database_instance": { "type": "str", "required": true }, "rabbitmq_instance": { "type": "str", "required": true }, "keystone_instance": { "type": "str", "required": true }, diff --git a/chef/data_bags/crowbar/template-database.json b/chef/data_bags/crowbar/template-database.json index 599a55474d..b4d12e2900 100644 --- a/chef/data_bags/crowbar/template-database.json +++ b/chef/data_bags/crowbar/template-database.json @@ -3,7 +3,7 @@ "description": "Installation for Database", "attributes": { "database": { - "sql_engine": "postgresql", + "sql_engine": "mysql", "mysql": { "datadir": "/var/lib/mysql", "slow_query_logging": true, @@ -15,6 +15,9 @@ "expire_logs_days": 10, "bootstrap_timeout": 600, "wsrep_slave_threads" : 1, + "gcs_fc_limit_multiplier" : 5, + "gcs_fc_factor" : 0.8, + "wsrep_provider_options_custom" : [], "innodb_buffer_pool_size": 256, "innodb_tunings": [ "# log_file_size should be ~ 25% of buffer_pool_size", @@ -43,10 +46,10 @@ "timeout": "60s" }, "promote": { - "timeout": "300s" + "timeout": "600s" }, "demote": { - "timeout": "60s" + "timeout": "600s" } } } @@ -57,18 +60,18 @@ "log_filename": "postgresql.log-%Y%m%d%H%M", "log_truncate_on_rotation": false, "log_min_duration_statement": -1 - } - }, - "ha": { - "storage": { - "mode": "shared", - "drbd": { - "size": 50 - }, - "shared": { - "device": "", - "fstype": "", - "options": "" + }, + "ha": { + "storage": { + "mode": "shared", + "drbd": { + "size": 50 + }, + "shared": { + "device": "", + "fstype": "", + "options": "" + } } } }, @@ -83,15 +86,17 @@ "database": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 108, + "schema-revision": 111, "element_states": { - "database-server": [ "readying", "ready", "applying" ] + "database-server": [ "readying", "ready", "applying" ], + "mysql-server": [ "readying", "ready", "applying" ] }, "elements": { - "database-server": [] + "database-server": [], + "mysql-server": [] }, "element_order": [ - [ "database-server" ] + [ "database-server", "mysql-server" ] ], "config": { "environment": "database-base-config", diff --git a/chef/data_bags/crowbar/template-database.schema b/chef/data_bags/crowbar/template-database.schema index 401d087c31..3c54de2980 100644 --- a/chef/data_bags/crowbar/template-database.schema +++ b/chef/data_bags/crowbar/template-database.schema @@ -32,6 +32,9 @@ "expire_logs_days": { "type": "int", "required": true }, "bootstrap_timeout": { "type": "int", "required": true }, "wsrep_slave_threads": { "type": "int", "required": true }, + "gcs_fc_limit_multiplier": { "type": "int", "required": true }, + "gcs_fc_factor": { "type": "float", "required": true }, + "wsrep_provider_options_custom": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] }, "ssl": { "type": "map", "required": true, "mapping": { "enabled": { "type": "bool", "required": true }, @@ -85,32 +88,32 @@ "log_filename": {"type": "str" }, "log_min_duration_statement": { "type": "int" } } - } - } - }, - "ha" : { - "type": "map", - "required": true, - "mapping" : { - "storage": { + }, + "ha" : { "type": "map", "required": true, "mapping" : { - "mode": { "type": "str", "required": true }, - "drbd": { + "storage": { "type": "map", "required": true, "mapping" : { - "size": { "type": "int", "required": true } - } - }, - "shared": { - "type": "map", - "required": true, - "mapping" : { - "device": { "type": "str", "required": true }, - "fstype": { "type": "str", "required": true }, - "options": { "type": "str", "required": true } + "mode": { "type": "str", "required": true }, + "drbd": { + "type": "map", + "required": true, + "mapping" : { + "size": { "type": "int", "required": true } + } + }, + "shared": { + "type": "map", + "required": true, + "mapping" : { + "device": { "type": "str", "required": true }, + "fstype": { "type": "str", "required": true }, + "options": { "type": "str", "required": true } + } + } } } } diff --git a/chef/data_bags/crowbar/template-glance.json b/chef/data_bags/crowbar/template-glance.json index 23a5334f57..c38caa31bc 100644 --- a/chef/data_bags/crowbar/template-glance.json +++ b/chef/data_bags/crowbar/template-glance.json @@ -65,14 +65,17 @@ "keystone_instance": "none", "service_user": "glance", "database_instance": "none", - "rabbitmq_instance": "none" + "rabbitmq_instance": "none", + "ha_rate_limit": { + "glance-api": 0 + } } }, "deployment": { "glance": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 104, + "schema-revision": 105, "element_states": { "glance-server": [ "readying", "ready", "applying" ] }, diff --git a/chef/data_bags/crowbar/template-glance.schema b/chef/data_bags/crowbar/template-glance.schema index abd2c2e357..d4142d8d8b 100644 --- a/chef/data_bags/crowbar/template-glance.schema +++ b/chef/data_bags/crowbar/template-glance.schema @@ -96,7 +96,12 @@ "keystone_instance": { "type": "str", "required": true }, "service_user": { "type": "str", "required": true }, "service_password": { "type": "str" }, - "database_instance": { "type": "str", "required": true } + "database_instance": { "type": "str", "required": true }, + "ha_rate_limit": { + "type": "map", "required": true, "mapping": { + "glance-api": { "type": "int", "required": true } + } + } } } } diff --git a/chef/data_bags/crowbar/template-keystone.json b/chef/data_bags/crowbar/template-keystone.json index cb9b78011a..14ae7adf15 100644 --- a/chef/data_bags/crowbar/template-keystone.json +++ b/chef/data_bags/crowbar/template-keystone.json @@ -36,13 +36,12 @@ "version": "3", "region": "RegionOne", "processes" : 8, - "threads" : 8 + "threads" : 1 }, "admin": { "tenant": "admin", "username": "admin", - "password": "crowbar", - "updated_password": "" + "password": "crowbar" }, "service": { "tenant": "service", @@ -188,7 +187,7 @@ "keystone": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 113, + "schema-revision": 114, "element_states": { "keystone-server": [ "readying", "ready", "applying" ] }, diff --git a/chef/data_bags/crowbar/template-keystone.schema b/chef/data_bags/crowbar/template-keystone.schema index e16594cf64..668068ef1a 100644 --- a/chef/data_bags/crowbar/template-keystone.schema +++ b/chef/data_bags/crowbar/template-keystone.schema @@ -46,8 +46,7 @@ "admin": { "type": "map", "required": true, "mapping": { "tenant": { "type" : "str", "required" : true }, "username": { "type" : "str", "required" : true }, - "password": { "type" : "str", "required" : true }, - "updated_password": { "type" : "str", "required" : false } + "password": { "type" : "str", "required" : true } }}, "service": { "type": "map", "required": true, "mapping": { "tenant": { "type" : "str", "required" : true }, diff --git a/chef/data_bags/crowbar/template-monasca.json b/chef/data_bags/crowbar/template-monasca.json index abbb1b3f22..9f611c1e90 100644 --- a/chef/data_bags/crowbar/template-monasca.json +++ b/chef/data_bags/crowbar/template-monasca.json @@ -24,7 +24,7 @@ "tenant_name" ], "nova_refresh": 14400, - "ping_check": false, + "ping_check": "/bin/ip netns exec NAMESPACE /usr/bin/ping", "vm_cpu_check_enable": true, "vm_disks_check_enable": true, "vm_extended_disks_check_enable": false, @@ -69,7 +69,16 @@ "log_level": "INFO" }, "elasticsearch": { - "repo_dir": [] + "repo_dir": [], + "tunables": { + "heap_size": "4g", + "max_locked_memory": "infinity", + "max_open_files_hard_limit": 65536, + "max_open_files_soft_limit": 16384, + "max_procs": 65536, + "memory_lock": true, + "vm_max_map_count": 262144 + } }, "elasticsearch_curator": { "delete_exclude_index": [ ".kibana" ], @@ -132,7 +141,7 @@ "monasca": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 102, + "schema-revision": 104, "element_states": { "monasca-server": [ "readying", "ready", "applying" ], "monasca-master": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-monasca.schema b/chef/data_bags/crowbar/template-monasca.schema index 402f899733..06bfa2194d 100644 --- a/chef/data_bags/crowbar/template-monasca.schema +++ b/chef/data_bags/crowbar/template-monasca.schema @@ -40,7 +40,7 @@ "max_ping_concurrency": { "type": "int", "required": true }, "metadata": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] }, "nova_refresh": { "type": "int", "required": true }, - "ping_check": { "type": "bool", "required": true }, + "ping_check": { "type": "str", "required": true }, "vm_cpu_check_enable": { "type": "bool", "required": true }, "vm_disks_check_enable": { "type": "bool", "required": true }, "vm_extended_disks_check_enable": { "type": "bool", "required": true }, @@ -107,6 +107,19 @@ "required": true, "type": "seq", "sequence": [ { "type": "str" } ] + }, + "tunables": { + "required": true, + "type": "map", + "mapping": { + "heap_size": { "type": "str", "required": true }, + "max_locked_memory": { "type": "str", "required": true }, + "max_open_files_hard_limit": { "type": "int", "required": true }, + "max_open_files_soft_limit": { "type": "int", "required": true }, + "max_procs": { "type": "int", "required": true }, + "memory_lock": { "type": "bool", "required": true }, + "vm_max_map_count": { "type": "int", "required": true } + } } } }, diff --git a/chef/data_bags/crowbar/template-neutron.json b/chef/data_bags/crowbar/template-neutron.json index 86c46ccc7d..7e30ee04ee 100644 --- a/chef/data_bags/crowbar/template-neutron.json +++ b/chef/data_bags/crowbar/template-neutron.json @@ -14,11 +14,16 @@ "rpc_workers": 1, "use_lbaas": true, "lbaasv2_driver": "haproxy", + "allow_automatic_lbaas_agent_failover": true, "use_l2pop": false, "l2pop": { "agent_boot_time": 180 }, "use_dvr": false, + "l3_ha": { + "use_l3_ha": false, + "vrrp_password": "" + }, "additional_external_networks": [], "networking_plugin": "ml2", "ml2_mechanism_drivers": ["openvswitch"], @@ -38,19 +43,33 @@ "ovs": { "tunnel_csum": false, "of_interface": "native", - "ovsdb_interface": "native" + "ovsdb_interface": "native", + "of_inactivity_probe": 10 }, "apic": { "hosts": "", "system_id": "soc", "username": "admin", "password": "", - "opflex": { + "optimized_metadata": true, + "optimized_dhcp": true, + "ext_net": { + "name": "l3out", + "preexisting": true, + "ext_epg": "l3out-epg", + "host_pool_cidr": "" + }, + "opflex": [{ + "pod": "", + "nodes" : [], "peer_ip": "", "peer_port": 8009, + "ssl_mode": "encrypted", "encap": "vxlan", + "integration_bridge": "br-int", + "access_bridge": "br-fabric", "vxlan": { - "encap_iface": "br-int_vxlan0", + "encap_iface": "br-fab_vxlan0", "uplink_iface": "vlan.4093", "uplink_vlan": 4093, "remote_ip": "", @@ -59,7 +78,7 @@ "vlan": { "encap_iface": "" } - }, + }], "apic_switches": { "101": { "switch_ports": { @@ -81,7 +100,17 @@ } } } - } + }, + "apic_vmms": [{ + "vmm_name": "soc_kvm_domain", + "vmm_type": "openstack", + "vlan_ranges": "" + }, + { + "vmm_name": "soc_vm_domain", + "vmm_type": "vmware", + "vlan_ranges": "" + }] }, "allow_overlapping_ips": true, "use_syslog": false, @@ -168,26 +197,33 @@ }, "ha_rate_limit": { "neutron-server": 0 - } + }, + "metadata": { + "force": false + }, + "default_log_levels": [] } }, "deployment": { "neutron": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 113, + "schema-revision": 125, "element_states": { "neutron-server": [ "readying", "ready", "applying" ], - "neutron-network": [ "readying", "ready", "applying" ] + "neutron-network": [ "readying", "ready", "applying" ], + "neutron-sdn-cisco-aci-agents": [ "readying", "ready", "applying" ] }, "elements": {}, "element_order": [ ["neutron-server" ], - ["neutron-network" ] + ["neutron-network" ], + ["neutron-sdn-cisco-aci-agents" ] ], "element_run_list_order": { "neutron-server": 94, - "neutron-network": 95 + "neutron-network": 95, + "neutron-sdn-cisco-aci-agents": 96 }, "config": { "environment": "neutron-config-base", diff --git a/chef/data_bags/crowbar/template-neutron.schema b/chef/data_bags/crowbar/template-neutron.schema index 2736c43cf4..55da58a801 100644 --- a/chef/data_bags/crowbar/template-neutron.schema +++ b/chef/data_bags/crowbar/template-neutron.schema @@ -19,11 +19,16 @@ "rpc_workers": { "type": "int", "required": true }, "use_lbaas": { "type": "bool", "required": true }, "lbaasv2_driver": { "type": "str", "required": true }, + "allow_automatic_lbaas_agent_failover": { "type": "bool", "required": true }, "use_l2pop": { "type": "bool", "required": true }, "l2pop": { "type": "map", "required": true, "mapping": { "agent_boot_time": { "type" : "int", "required" : true } }}, "use_dvr": { "type": "bool", "required": true }, + "l3_ha": { "type": "map", "required": true, "mapping": { + "use_l3_ha": { "type": "bool", "required": true }, + "vrrp_password": { "type": "str", "required": true } + }}, "additional_external_networks": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] }, "networking_plugin": { "type": "str", "required": true }, "ml2_mechanism_drivers": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] }, @@ -43,28 +48,46 @@ "ovs": { "type": "map", "required": true, "mapping": { "tunnel_csum": { "type": "bool", "required": true }, "ovsdb_interface": { "type": "str", "required": true }, - "of_interface": { "type": "str", "required": true } + "of_interface": { "type": "str", "required": true }, + "of_inactivity_probe": { "type": "int", "required": true } }}, "apic": { "type": "map", "required": true, "mapping": { "hosts": { "type" : "str", "required" : true }, "system_id": { "type" : "str", "required" : true }, "username": { "type" : "str", "required": true }, "password": { "type" : "str", "required": true }, - "opflex": { "type": "map", "required": true, "mapping": { - "peer_ip": { "type": "str", "required" : true }, - "peer_port": { "type": "int", "required" : true }, - "encap": { "type": "str", "required": true }, - "vxlan": { "type": "map", "required": true, "mapping" : { - "encap_iface": {"type": "str", "required": true }, - "uplink_iface": { "type": "str", "required": true }, - "uplink_vlan": { "type": "int", "required": true }, - "remote_ip": { "type": "str", "required": true }, - "remote_port": { "type": "int", "required": true } - }}, - "vlan": { "type": "map", "required": true, "mapping": { - "encap_iface": { "type": "str", "required": true } - }} + "optimized_metadata": { "type" : "bool", "required": true }, + "optimized_dhcp": { "type" : "bool", "required": true }, + "vpc_pairs": { "type": "str", "required": false }, + "ext_net": { "type" : "map", "required" : true, "mapping" : { + "name": { "type" : "str", "required" : true }, + "preexisting": { "type" : "bool", "required" : true }, + "nat_enabled": { "type" : "bool", "required" : false }, + "ext_epg": { "type" : "str", "required" : true }, + "host_pool_cidr": { "type" : "str", "required" : true } }}, + "opflex": { "type": "seq", "required": true, "sequence": [ { + "type": "map", "required": true, "mapping": { + "pod": { "type" : "str", "required" : false }, + "nodes": { "type" : "seq", "required" : true, "sequence": [ { "type": "str" } ] }, + "peer_ip": { "type": "str", "required" : true }, + "peer_port": { "type": "int", "required" : true }, + "ssl_mode": { "type": "str", "required": true }, + "encap": { "type": "str", "required": true }, + "integration_bridge": { "type": "str", "required": true }, + "access_bridge": { "type": "str", "required": true }, + "vxlan": { "type": "map", "required": true, "mapping" : { + "encap_iface": {"type": "str", "required": true }, + "uplink_iface": { "type": "str", "required": true }, + "uplink_vlan": { "type": "int", "required": true }, + "remote_ip": { "type": "str", "required": true }, + "remote_port": { "type": "int", "required": true } + }}, + "vlan": { "type": "map", "required": true, "mapping": { + "encap_iface": { "type": "str", "required": true } + }} + } + } ] }, "apic_switches": { "type" : "map", "required" : true, "mapping" : { = : { "type" : "map", "required" : true, "mapping" : { "switch_ports": { "type" : "map", "required" : true, "mapping" : { @@ -73,7 +96,14 @@ }} }} }} - } + }, + "apic_vmms": { "type" : "seq", "required" : true, "sequence" : [ { + "type" : "map", "required" : true, "mapping" : { + "vmm_name": { "type": "str", "required": true }, + "vmm_type": { "type": "str", "required": true }, + "vlan_ranges": { "type": "str", "required": true } + } + } ] } }}, "allow_overlapping_ips": { "type": "bool", "required": true }, "cisco_switches": { @@ -207,6 +237,16 @@ "type": "map", "required": true, "mapping": { "neutron-server": { "type": "int", "required": true } } + }, + "metadata": { + "type": "map", "required": true, "mapping": { + "force": { "type": "bool", "required": true } + } + }, + "default_log_levels": { + "type": "seq", + "required": false, + "sequence": [ { "type": "str" } ] } }} }}, diff --git a/chef/data_bags/crowbar/template-nova.json b/chef/data_bags/crowbar/template-nova.json index 99d78a4a0c..7f1fc1c466 100644 --- a/chef/data_bags/crowbar/template-nova.json +++ b/chef/data_bags/crowbar/template-nova.json @@ -36,7 +36,8 @@ "ram_allocation_ratio": 1.0, "cpu_allocation_ratio": 16.0, "disk_allocation_ratio": 1.0, - "reserved_host_memory_mb": 512 + "reserved_host_memory_mb": 512, + "default_filters": "" }, "ec2-api": { "db": { @@ -64,13 +65,20 @@ "user": "nova_api", "database": "nova_api" }, - + "placement_db": { + "password": "", + "user": "placement", + "database": "placement" + }, "rbd": { "user": "", "secret_uuid": "" }, "kvm": { - "ksm_enabled": false + "nested_virt": false, + "ksm_enabled": false, + "disk_cachemodes": "network=writeback", + "max_threads_per_process": 0 }, "vcenter": { "host": "", @@ -167,14 +175,15 @@ "openstack-nova-compute": { "LimitNOFILE": null } - } + }, + "default_log_levels": [] } }, "deployment": { "nova": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 121, + "schema-revision": 126, "element_states": { "nova-controller": [ "readying", "ready", "applying" ], "nova-compute-ironic": [ "readying", "ready", "applying" ], diff --git a/chef/data_bags/crowbar/template-nova.schema b/chef/data_bags/crowbar/template-nova.schema index cd07ef299f..c4d2224d20 100644 --- a/chef/data_bags/crowbar/template-nova.schema +++ b/chef/data_bags/crowbar/template-nova.schema @@ -18,6 +18,7 @@ "keystone_instance": { "type": "str", "required": true }, "service_user": { "type": "str", "required": true }, "service_password": { "type": "str" }, + "placement_service_password": { "type": "str" }, "glance_instance": { "type": "str", "required": true }, "cinder_instance": { "type": "str", "required": true }, "neutron_instance": { "type": "str", "required": true }, @@ -53,7 +54,8 @@ "ram_allocation_ratio": { "type": "number" }, "cpu_allocation_ratio": { "type": "number" }, "disk_allocation_ratio": { "type": "number" }, - "reserved_host_memory_mb": { "type": "number" } + "reserved_host_memory_mb": { "type": "number" }, + "default_filters": { "type": "str", "required": true } } }, "ec2-api": { @@ -110,6 +112,19 @@ "min_pool_size": { "type": "int", "required": false } } }, + "placement_db": { + "type": "map", + "required": true, + "mapping": { + "password": { "type": "str", "required": true }, + "user": { "type": "str", "required": true }, + "database": { "type": "str", "required": true }, + "max_pool_size": { "type": "int", "required": false }, + "max_overflow": { "type": "int", "required": false }, + "pool_timeout": { "type": "int", "required": false }, + "min_pool_size": { "type": "int", "required": false } + } + }, "rbd": { "type": "map", "required": false, "mapping": { "user": { "type": "str", "required": true }, @@ -118,7 +133,10 @@ }, "kvm": { "type": "map", "required": true, "mapping": { - "ksm_enabled": { "type": "bool", "required": true } + "nested_virt": { "type": "bool", "required": false }, + "ksm_enabled": { "type": "bool", "required": true }, + "disk_cachemodes": { "type": "str", "required": true }, + "max_threads_per_process": { "type": "int", "required": true } } }, "vcenter": { @@ -256,6 +274,11 @@ "mapping": { "LimitNOFILE": { "type": "int", "required": false }} } } + }, + "default_log_levels": { + "type": "seq", + "required": false, + "sequence": [ { "type": "str" } ] } } } diff --git a/chef/data_bags/crowbar/template-rabbitmq.json b/chef/data_bags/crowbar/template-rabbitmq.json index 60783dc047..d220c88dea 100644 --- a/chef/data_bags/crowbar/template-rabbitmq.json +++ b/chef/data_bags/crowbar/template-rabbitmq.json @@ -7,6 +7,7 @@ "port": 5672, "password": "", "user": "nova", + "extra_users": {}, "vhost": "/nova", "ssl": { "enabled": false, @@ -20,7 +21,8 @@ "client_ca_certs": "/etc/ssl/certs/rabbitca.pem" }, "client": { - "heartbeat_timeout": 10 + "heartbeat_timeout": 60, + "enable_notifications": false }, "cluster": false, "ha": { @@ -50,14 +52,15 @@ "mnesia": { "dump_log_write_threshold": 100, "dump_log_time_threshold": 180000 - } + }, + "enable_queue_mirroring": true } }, "deployment": { "rabbitmq": { "crowbar-revision": 0, "crowbar-applied": false, - "schema-revision": 104, + "schema-revision": 107, "element_states": { "rabbitmq-server": [ "readying", "ready", "applying" ] }, @@ -74,4 +77,3 @@ } } } - diff --git a/chef/data_bags/crowbar/template-rabbitmq.schema b/chef/data_bags/crowbar/template-rabbitmq.schema index b57d43d236..46372b5fe6 100644 --- a/chef/data_bags/crowbar/template-rabbitmq.schema +++ b/chef/data_bags/crowbar/template-rabbitmq.schema @@ -16,6 +16,20 @@ "port": { "type": "int", "required": true }, "password": { "type": "str", "required": true }, "user": { "type": "str", "required": true }, + "extra_users": { + "type": "map", + "required": false, + "mapping": { + = : { + "type": "map", + "required": false, + "mapping": { + "permissions": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] }, + "tags": { "type": "seq", "required": true, "sequence": [ { "type": "str" } ] } + } + } + } + }, "vhost": { "type": "str", "required": true }, "ssl": { "type": "map", "required": true, "mapping": { @@ -34,7 +48,8 @@ "type": "map", "required": true, "mapping" : { - "heartbeat_timeout": { "type": "int", "required": true } + "heartbeat_timeout": { "type": "int", "required": true }, + "enable_notifications": { "type": "bool", "required": true } } }, "cluster": { "type": "bool", "required": true }, @@ -95,7 +110,8 @@ "dump_log_write_threshold": { "type": "int", "required": true}, "dump_log_time_threshold": { "type": "int", "required": true} } - } + }, + "enable_queue_mirroring": { "type": "bool", "required": true} } } } diff --git a/chef/roles/database-server.rb b/chef/roles/database-server.rb index 0651705110..ad9b8ca79c 100644 --- a/chef/roles/database-server.rb +++ b/chef/roles/database-server.rb @@ -1,6 +1,5 @@ name "database-server" -description "Database Server Role" +description "PostgreSQL Server Role" run_list("recipe[database::role_database_server]") default_attributes() override_attributes() - diff --git a/chef/roles/mysql-server.rb b/chef/roles/mysql-server.rb new file mode 100644 index 0000000000..d6d5b9e9c3 --- /dev/null +++ b/chef/roles/mysql-server.rb @@ -0,0 +1,5 @@ +name "mysql-server" +description "MySQL/MariaDB Server Role" +run_list("recipe[database::role_mysql_server]") +default_attributes +override_attributes diff --git a/chef/roles/neutron-sdn-cisco-aci-agents.rb b/chef/roles/neutron-sdn-cisco-aci-agents.rb new file mode 100644 index 0000000000..cf66565834 --- /dev/null +++ b/chef/roles/neutron-sdn-cisco-aci-agents.rb @@ -0,0 +1,4 @@ +name "neutron-sdn-cisco-aci-agents" +description "Nodes attached to one of the Cisco ACI Leaf Ports" + +run_list("recipe[neutron::role_neutron_sdn_cisco_aci_agents]") diff --git a/crowbar_framework/app/assets/javascripts/barclamps/database/application.js b/crowbar_framework/app/assets/javascripts/barclamps/database/application.js index 23d49f55cb..a3b0db4916 100644 --- a/crowbar_framework/app/assets/javascripts/barclamps/database/application.js +++ b/crowbar_framework/app/assets/javascripts/barclamps/database/application.js @@ -1,6 +1,6 @@ /** * Copyright 2011-2013, Dell - * Copyright 2013-2014, SUSE LINUX Products GmbH + * Copyright 2013-2018, SUSE LINUX Products GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,23 +16,53 @@ */ $(document).ready(function($) { - $('#sql_engine').on('change', function() { - var value = $(this).val(); + function updateDBEngines() { + // defer update of selected engines to make sure roles assignment + // is updated by event handlers from NodeList. + setTimeout(function() { + var nodes = { + postgresql: $('ul#database-server li').length, + mysql: $('ul#mysql-server li').length + }; - var types = [ - 'mysql', - 'postgresql' - ]; + var selector = $.map(nodes, function(val, index) { + return '#{0}_container'.format(index); + }).join(', '); - var selector = $.map(types, function(val, index) { - return '#{0}_container'.format(val); - }).join(', '); + var currentEngines = $.grep(Object.keys(nodes), function(val) { return nodes[val] > 0; }); - var current = '#{0}_container'.format( - value - ); + var current = $.map(currentEngines, function(val, index) { + return '#{0}_container'.format(val); + }).join(', '); - $(selector).hide(100).attr('disabled', 'disabled'); - $(current).show(100).removeAttr('disabled'); - }).trigger('change'); + $(selector).hide(100).attr('disabled', 'disabled'); + $(current).show(100).removeAttr('disabled'); + + // automatically select active engine only for new proposals + // note that this check is not perfect and will trigger autoselect also for saved but not applied + // proposals (even old ones). + if ($('#proposal_deployment').readJsonAttribute('crowbar-applied') === false) { + // update sql_engine if only one engine was selected and default to mysql if no roles are assigned + var activeEngine = $('#sql_engine').val(); + if (currentEngines.length === 1) { + activeEngine = currentEngines[0]; + } else if (currentEngines.length === 0) { + activeEngine = 'mysql'; + } + $('#sql_engine').val(activeEngine); + $('#proposal_attributes').writeJsonAttribute('sql_engine', activeEngine); + } + + // make sure all items have handlers attached + setupEventHandlers(); + }, 0); + } + + function setupEventHandlers() { + $('[data-droppable=true]').off('drop', updateDBEngines).on('drop', updateDBEngines); + $('.dropzone .delete').off('click', updateDBEngines).on('click', updateDBEngines); + $('.dropzone .unassign').off('click', updateDBEngines).on('click', updateDBEngines); + } + + updateDBEngines(); }); diff --git a/crowbar_framework/app/helpers/barclamp/database_helper.rb b/crowbar_framework/app/helpers/barclamp/database_helper.rb index f69020ff70..dbe1a2b295 100644 --- a/crowbar_framework/app/helpers/barclamp/database_helper.rb +++ b/crowbar_framework/app/helpers/barclamp/database_helper.rb @@ -30,8 +30,8 @@ def engines_for_database(selected) def ha_storage_mode_for_database(selected) options_for_select( [ - [t(".ha.storage.modes.drbd"), "drbd"], - [t(".ha.storage.modes.shared"), "shared"] + [t(".postgresql.ha.storage.modes.drbd"), "drbd"], + [t(".postgresql.ha.storage.modes.shared"), "shared"] ], selected.to_s ) diff --git a/crowbar_framework/app/models/ceilometer_service.rb b/crowbar_framework/app/models/ceilometer_service.rb index 4e8de79380..8719e68a62 100644 --- a/crowbar_framework/app/models/ceilometer_service.rb +++ b/crowbar_framework/app/models/ceilometer_service.rb @@ -142,6 +142,19 @@ def validate_proposal_after_save(proposal) end end end + + rabbitmq_proposal = Proposal.find_by( + barclamp: "rabbitmq", + name: proposal["attributes"][@bc_name]["rabbitmq_instance"] + ) + + unless rabbitmq_proposal && + rabbitmq_proposal["attributes"]["rabbitmq"]["client"]["enable_notifications"] + validation_error I18n.t( + "barclamp.#{@bc_name}.validation.notifications_enabled" + ) + end + super end diff --git a/crowbar_framework/app/models/cinder_service.rb b/crowbar_framework/app/models/cinder_service.rb index cdaabbbebb..f5924fd4ec 100644 --- a/crowbar_framework/app/models/cinder_service.rb +++ b/crowbar_framework/app/models/cinder_service.rb @@ -242,14 +242,13 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) # Generate secrets uuid for libvirt rbd backend dirty = false proposal = Proposal.find_by(barclamp: "cinder", name: role.inst) - role.default_attributes[:cinder][:volumes].each_with_index do |volume, volid| - next unless volume[:backend_driver] == "rbd" - if volume[:rbd][:secret_uuid].empty? - secret_uuid = `uuidgen`.strip - volume[:rbd][:secret_uuid] = secret_uuid - proposal[:attributes][:cinder][:volumes][volid][:rbd][:secret_uuid] = secret_uuid - dirty = true - end + role.default_attributes["cinder"]["volumes"].each_with_index do |volume, volid| + next unless volume["backend_driver"] == "rbd" + next unless volume["rbd"]["secret_uuid"].empty? + secret_uuid = `uuidgen`.strip + volume["rbd"]["secret_uuid"] = secret_uuid + proposal["attributes"]["cinder"]["volumes"][volid]["rbd"]["secret_uuid"] = secret_uuid + dirty = true end if dirty # This makes the proposal in the UI looked as 'applied', even if you make changes to it diff --git a/crowbar_framework/app/models/database_service.rb b/crowbar_framework/app/models/database_service.rb index ae5b17d230..bf2533579d 100644 --- a/crowbar_framework/app/models/database_service.rb +++ b/crowbar_framework/app/models/database_service.rb @@ -38,6 +38,16 @@ def role_constraints "suse" => "< 12.2", "windows" => "/.*/" } + }, + "mysql-server" => { + "unique" => false, + "count" => 1, + "cluster" => true, + "admin" => false, + "exclude_platform" => { + "suse" => "< 12.2", + "windows" => "/.*/" + } } } end @@ -47,12 +57,18 @@ def create_proposal @logger.debug("Database create_proposal: entering") base = super + db_role = if base["attributes"]["sql_engine"] == "postgresql" + "database-server" + else + "mysql-server" + end + nodes = NodeObject.all nodes.delete_if { |n| n.nil? or n.admin? } if nodes.size >= 1 controller = nodes.find { |n| n.intended_role == "controller" } || nodes.first base["deployment"]["database"]["elements"] = { - "database-server" => [controller[:fqdn]] + db_role => [controller[:fqdn]] } end @@ -60,12 +76,25 @@ def create_proposal base end - def validate_ha_attributes(attributes, cluster) - storage_mode = attributes["ha"]["storage"]["mode"] + def role_for_engine(engine) + if engine == "postgresql" + "database-server" + else + "mysql-server" + end + end + + def already_applied?(proposal_name="default") + !RoleObject.find_role_by_name("#{@bc_name}-config-#{proposal_name}").nil? + end + + def validate_ha_attributes(attributes, cluster, sql_engine) role = available_clusters[cluster] - case attributes["sql_engine"] + case sql_engine when "postgresql" + ha_attr = attributes["postgresql"]["ha"] + storage_mode = ha_attr["storage"]["mode"] unless ["shared", "drbd"].include?(storage_mode) validation_error I18n.t( "barclamp.#{@bc_name}.validation.unknown_mode_ha", @@ -73,12 +102,12 @@ def validate_ha_attributes(attributes, cluster) ) end if storage_mode == "shared" - if attributes["ha"]["storage"]["shared"]["device"].blank? + if ha_attr["storage"]["shared"]["device"].blank? validation_error I18n.t( "barclamp.#{@bc_name}.validation.no_device" ) end - if attributes["ha"]["storage"]["shared"]["fstype"].blank? + if ha_attr["storage"]["shared"]["fstype"].blank? validation_error I18n.t( "barclamp.#{@bc_name}.validation.no_filesystem" ) @@ -90,7 +119,7 @@ def validate_ha_attributes(attributes, cluster) cluster_name: cluster_name(cluster) ) end - if attributes["ha"]["storage"]["drbd"]["size"] <= 0 + if ha_attr["storage"]["drbd"]["size"] <= 0 validation_error I18n.t( "barclamp.#{@bc_name}.validation.invalid_size_drbd" ) @@ -111,21 +140,66 @@ def validate_ha_attributes(attributes, cluster) end def validate_proposal_after_save(proposal) - validate_one_for_role proposal, "database-server" - attributes = proposal["attributes"][@bc_name] - db_engine = attributes["sql_engine"] + deployment = proposal["deployment"][@bc_name] + active_engine = attributes["sql_engine"] + validation_error I18n.t( "barclamp.#{@bc_name}.validation.invalid_db_engine", - db_engine: db_engine - ) unless %w(mysql postgresql).include?(db_engine) - - # HA validation - servers = proposal["deployment"][@bc_name]["elements"]["database-server"] - unless servers.nil? || servers.first.nil? || !is_cluster?(servers.first) - cluster = servers.first - validate_ha_attributes(attributes, cluster) + db_engine: active_engine + ) unless ["mysql", "postgresql"].include?(active_engine) + + selected_engines = ["postgresql", "mysql"].select do |engine| + nodes = deployment["elements"][role_for_engine engine] + !nodes.nil? && !nodes.first.nil? + end + + expand_nodes_for_all(deployment["elements"]["mysql-server"] || []).flatten.each do |n| + node = Node.find_by_name(n) + validation_error I18n.t( + "barclamp.#{@bc_name}.validation.monasca_deployed", + node_name: n + ) if node.roles.include?("monasca-server") + end + + validation_error I18n.t( + "barclamp.#{@bc_name}.validation.new_proposal_multi_engine" + ) if selected_engines.length > 1 && !already_applied? + + validation_error I18n.t( + "barclamp.#{@bc_name}.validation.engine_roles_mismatch", + db_engine: active_engine + ) unless selected_engines.include?(active_engine) + + selected_engines.each do |engine| + db_role = role_for_engine engine + validate_one_for_role proposal, db_role + + # HA validation + servers = deployment["elements"][db_role] + unless servers.nil? || servers.first.nil? || !is_cluster?(servers.first) + cluster = servers.first + validate_ha_attributes(attributes, cluster, engine) + end end + super + end + + def validate_proposal_elements(proposal_elements) + old_proposal = proposals_raw.first + + return super if old_proposal.nil? + + # disallow adding psql when mysql is already deployed + old_psql_nodes = old_proposal.elements["database-server"] || [] + old_mysql_nodes = old_proposal.elements["mysql-server"] || [] + new_psql_nodes = proposal_elements["database-server"] || [] + raise I18n.t( + "barclamp.#{@bc_name}.validation.secondary_psql" + ) if already_applied? && + !old_mysql_nodes.empty? && + old_psql_nodes.empty? && + !new_psql_nodes.empty? super end @@ -134,23 +208,35 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) @logger.debug("Database apply_role_pre_chef_call: entering #{all_nodes.inspect}") return if all_nodes.empty? - database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, "database-server") - Openstack::HA.set_controller_role(database_nodes) if database_ha_enabled - - vip_networks = ["admin"] - dirty = prepare_role_for_ha_with_haproxy(role, ["database", "ha", "enabled"], - database_ha_enabled, - database_elements, - vip_networks) - role.save if dirty - - reset_sync_marks_on_clusters_founders(database_elements) - sql_engine = role.default_attributes["database"]["sql_engine"] - if database_ha_enabled - net_svc = NetworkService.new @logger - case sql_engine + vip_networks = ["admin"] + dirty = false + net_svc = NetworkService.new @logger + db_enabled = { + "mysql" => { + "enabled" => false, + "ha" => false + }, + "postgresql" => { + "enabled" => false, + "ha" => false + } + } + ["postgresql", "mysql"].each do |engine| + db_role = role_for_engine engine + database_elements, database_nodes, database_ha_enabled = role_expand_elements(role, db_role) + db_enabled[engine]["enabled"] = true unless database_nodes.empty? + db_enabled[engine]["ha"] = database_ha_enabled + Openstack::HA.set_controller_role(database_nodes) if database_ha_enabled + dirty = prepare_role_for_ha_with_haproxy(role, + ["database", engine, "ha", "enabled"], + database_ha_enabled, + database_elements, + vip_networks) || dirty + reset_sync_marks_on_clusters_founders(database_elements) + next unless database_ha_enabled + case engine when "postgresql" unless database_elements.length == 1 && PacemakerServiceObject.is_cluster?(database_elements[0]) raise "Internal error: HA enabled, but element is not a cluster" @@ -168,17 +254,19 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) allocate_virtual_ips_for_any_cluster_in_networks(database_elements, vip_networks) end end + role.save if dirty role.default_attributes["database"][sql_engine] = {} if role.default_attributes["database"][sql_engine].nil? role.default_attributes["database"]["db_maker_password"] = (old_role && old_role.default_attributes["database"]["db_maker_password"]) || random_password - if ( sql_engine == "mysql" ) + if db_enabled["mysql"]["enabled"] role.default_attributes["database"]["mysql"]["server_root_password"] = (old_role && old_role.default_attributes["database"]["mysql"]["server_root_password"]) || random_password - if database_ha_enabled + if db_enabled["mysql"]["ha"] role.default_attributes["database"]["mysql"]["sstuser_password"] = (old_role && old_role.default_attributes["database"]["mysql"]["sstuser_password"]) || random_password end @logger.debug("setting mysql specific attributes") - elsif ( sql_engine == "postgresql" ) + end + if db_enabled["postgresql"]["enabled"] # Attribute is not living in "database" namespace, but that's because # it's for the postgresql cookbook. We're not using default_attributes # because the upstream cookbook use node.set_unless which would override diff --git a/crowbar_framework/app/models/ironic_service.rb b/crowbar_framework/app/models/ironic_service.rb index f19b550613..20e3ecc8e0 100644 --- a/crowbar_framework/app/models/ironic_service.rb +++ b/crowbar_framework/app/models/ironic_service.rb @@ -16,7 +16,7 @@ # class IronicService < ServiceObject - def initialize(thelogger) + def initialize(thelogger = nil) super(thelogger) @bc_name = "ironic" end diff --git a/crowbar_framework/app/models/keystone_service.rb b/crowbar_framework/app/models/keystone_service.rb index 19e9c0f417..878e9d8816 100644 --- a/crowbar_framework/app/models/keystone_service.rb +++ b/crowbar_framework/app/models/keystone_service.rb @@ -133,26 +133,6 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) @logger.debug("Keystone apply_role_pre_chef_call: leaving") end - def update_proposal_status(inst, status, message, bc = @bc_name) - @logger.debug("update_proposal_status: enter #{inst} #{bc} #{status} #{message}") - - prop = Proposal.where(barclamp: bc, name: inst).first - unless prop.nil? - prop["deployment"][bc]["crowbar-status"] = status - prop["deployment"][bc]["crowbar-failed"] = message - # save the updated_password into the password field to update the raw_view - if status == "success" && !prop["attributes"][bc]["admin"]["updated_password"].blank? - prop["attributes"][bc]["admin"]["password"] = prop["attributes"][bc]["admin"]["updated_password"] - end - res = prop.save - else - res = true - end - - @logger.debug("update_proposal_status: exit #{inst} #{bc} #{status} #{message}") - res - end - def apply_role_post_chef_call(old_role, role, all_nodes) @logger.debug("Keystone apply_role_post_chef_call: entering #{all_nodes.inspect}") diff --git a/crowbar_framework/app/models/manila_service.rb b/crowbar_framework/app/models/manila_service.rb index 5ce22009da..3ab354de33 100644 --- a/crowbar_framework/app/models/manila_service.rb +++ b/crowbar_framework/app/models/manila_service.rb @@ -78,10 +78,17 @@ def create_proposal storage = select_nodes_for_role( nodes, "manila-share", "storage") || [] + # Do not put manila-share roles to compute nodes + # (it does not work with non-disruptive upgrade) + shares = storage.reject { |n| n.roles.include? "nova-compute-kvm" } + + # Take at least one manila-share role if it was emptied by previous filter + shares << controllers.first if shares.empty? + base["deployment"][@bc_name]["elements"] = { "manila-server" => controllers.empty? ? [] : [controllers.first.name], - "manila-share" => storage.map(&:name) + "manila-share" => shares.map(&:name) } base["attributes"][@bc_name]["database_instance"] = diff --git a/crowbar_framework/app/models/monasca_service.rb b/crowbar_framework/app/models/monasca_service.rb index fcb9ac4c4c..a268da9c1d 100644 --- a/crowbar_framework/app/models/monasca_service.rb +++ b/crowbar_framework/app/models/monasca_service.rb @@ -93,7 +93,7 @@ def create_proposal nodes = NodeObject.all non_db_nodes = nodes.reject do |n| # Do not deploy monasca-server to the node running database cluster (already running mariadb) - n.roles.include?("database-server") && n[:database][:sql_engine] == "mysql" + n.roles.include?("mysql-server") end monasca_server = select_nodes_for_role(non_db_nodes, "monasca-server", "monitoring") || [] @@ -141,7 +141,7 @@ def validate_proposal_after_save(proposal) nodes = proposal["deployment"][@bc_name]["elements"] nodes["monasca-server"].each do |node| n = NodeObject.find_node_by_name(node) - if n.roles.include?("database-server") && n[:database][:sql_engine] == "mysql" + if n.roles.include?("mysql-server") validation_error( "monasca-server role cannot be deployed to the node with other MariaDB instance." ) diff --git a/crowbar_framework/app/models/neutron_service.rb b/crowbar_framework/app/models/neutron_service.rb index 0a122baf64..fc2d3b4d6d 100644 --- a/crowbar_framework/app/models/neutron_service.rb +++ b/crowbar_framework/app/models/neutron_service.rb @@ -61,6 +61,16 @@ def role_constraints "windows" => "/.*/" }, "cluster" => true + }, + "neutron-sdn-cisco-aci-agents" => { + "unique" => false, + "count" => -1, + "admin" => false, + "exclude_platform" => { + "suse" => "< 12.2", + "windows" => "/.*/" + }, + "cluster" => true } } end @@ -110,10 +120,11 @@ def create_proposal base["deployment"]["neutron"]["elements"] = { "neutron-server" => [controller_node[:fqdn]], "neutron-network" => network_nodes.map { |x| x[:fqdn] } - } unless nodes.nil? or nodes.length ==0 + } unless nodes.nil? || nodes.length.zero? base["attributes"]["neutron"]["service_password"] = random_password base["attributes"][@bc_name][:db][:password] = random_password + base["attributes"][@bc_name][:l3_ha][:vrrp_password] = random_password base end @@ -219,39 +230,6 @@ def validate_ml2(proposal) validation_error I18n.t("barclamp.#{@bc_name}.validation.vmware_dvs_vlan") end - # Checks for Cisco ACI ml2 driver - if ml2_mechanism_drivers.include?("cisco_apic_ml2") && - ml2_mechanism_drivers.include?("apic_gbp") - validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_ml2_gbp") - end - - if ml2_mechanism_drivers.include?("cisco_apic_ml2") || - ml2_mechanism_drivers.include?("apic_gbp") - # openvswitch should not be used when cisco_apic_ml2 mechanism driver is used - if ml2_mechanism_drivers.include?("openvswitch") - validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_ml2") - end - - if ml2_mechanism_drivers.include?("linuxbridge") - validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_linuxbridge") - end - - # cisco_apic_ml2 mechanism driver needs opflex as the type_driver - unless ml2_type_drivers.include?("opflex") - validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_type") - end - - # Validate if ACI configurations are provided - if proposal["attributes"]["neutron"]["apic"].nil? || - proposal["attributes"]["neutron"]["apic"].empty? - validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_no_config") - end - - # Cisco APIC already distributes neutron services not needing DVR - if proposal["attributes"]["neutron"]["use_dvr"] - validation_error I18n.t("barcalmp.#{@bc_name}.validation.cisco_apic_dvr") - end - end # for now, openvswitch and linuxbrige can't be used in parallel if ml2_mechanism_drivers.include?("openvswitch") && @@ -332,6 +310,54 @@ def validate_dvr(proposal) end end + def validate_cisco_aci(proposal) + # Checks for Cisco ACI ml2 driver + ml2_mechanism_drivers = proposal["attributes"]["neutron"]["ml2_mechanism_drivers"] + ml2_type_drivers = proposal["attributes"]["neutron"]["ml2_type_drivers"] + + if ml2_mechanism_drivers.include?("cisco_apic_ml2") && + ml2_mechanism_drivers.include?("apic_gbp") + validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_ml2_gbp") + end + + if ml2_mechanism_drivers.include?("cisco_apic_ml2") || + ml2_mechanism_drivers.include?("apic_gbp") + + validate_at_least_n_for_role proposal, "neutron-sdn-cisco-aci-agents", 1 + + # openvswitch should not be used when cisco_apic_ml2 mechanism driver is used + if ml2_mechanism_drivers.include?("openvswitch") + validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_ml2") + end + + if ml2_mechanism_drivers.include?("linuxbridge") + validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_linuxbridge") + end + + # cisco_apic_ml2 mechanism driver needs opflex as the type_driver + unless ml2_type_drivers.include?("opflex") + validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_type") + end + + # Validate if ACI configurations are provided + if proposal["attributes"]["neutron"]["apic"].nil? || + proposal["attributes"]["neutron"]["apic"].empty? + validation_error I18n.t("barclamp.#{@bc_name}.validation.cisco_apic_no_config") + end + + # Cisco APIC already distributes neutron services not needing DVR + if proposal["attributes"]["neutron"]["use_dvr"] + validation_error I18n.t("barcalmp.#{@bc_name}.validation.cisco_apic_dvr") + end + end + end + + def validate_l3ha(proposal) + if proposal["attributes"]["neutron"]["l3_ha"]["enabled"] + validate_multiple_for_role_or_cluster proposal, "neutron-network" + end + end + def validate_external_networks(external_networks) net_svc = NetworkService.new @logger network_proposal = Proposal.find_by(barclamp: net_svc.bc_name, name: "default") @@ -374,6 +400,8 @@ def validate_proposal_after_save(proposal) validate_ml2(proposal) if plugin == "ml2" validate_l2pop(proposal) validate_dvr(proposal) + validate_cisco_aci(proposal) + validate_l3ha(proposal) if proposal[:attributes][:neutron][:use_infoblox] validate_infoblox(proposal) end diff --git a/crowbar_framework/app/models/nova_service.rb b/crowbar_framework/app/models/nova_service.rb index f0728079a3..20d7603ab6 100644 --- a/crowbar_framework/app/models/nova_service.rb +++ b/crowbar_framework/app/models/nova_service.rb @@ -186,7 +186,9 @@ def create_proposal base["attributes"][@bc_name]["neutron_instance"] = find_dep_proposal("neutron") base["attributes"]["nova"]["service_password"] = random_password + base["attributes"]["nova"]["placement_service_password"] = random_password base["attributes"]["nova"]["api_db"]["password"] = random_password + base["attributes"]["nova"]["placement_db"]["password"] = random_password base["attributes"]["nova"]["db"]["password"] = random_password base["attributes"]["nova"]["neutron_metadata_proxy_shared_secret"] = random_password diff --git a/crowbar_framework/app/models/rabbitmq_service.rb b/crowbar_framework/app/models/rabbitmq_service.rb index 822f17c7f3..7525f994d0 100644 --- a/crowbar_framework/app/models/rabbitmq_service.rb +++ b/crowbar_framework/app/models/rabbitmq_service.rb @@ -71,6 +71,35 @@ def apply_role_pre_chef_call(old_role, role, all_nodes) @logger.debug("Rabbitmq apply_role_pre_chef_call: entering #{all_nodes.inspect}") return if all_nodes.empty? + # prepare extra users + save_role = false + old_attrs = old_role.nil? ? nil : old_role.default_attributes[@bc_name] + role.default_attributes[@bc_name]["users"] ||= [] + role.default_attributes[@bc_name]["extra_users"].each do |username, user| + save_role = true + updated_user = { + username: username, + tags: user["tags"], + permissions: user["permissions"] + } + if !old_attrs.nil? && old_attrs.include?("users") && !old_attrs["users"].each.select do |u| + u["username"] == username + end.empty? + # reuse the existing pass + pass = old_attrs["users"].each.select do |u| + u["username"] == username + end.first["password"] + + updated_user.update(password: pass) + else + # new user, so create a random pass + updated_user.update(password: random_password) + end + role.default_attributes[@bc_name]["users"].push(updated_user) + end + + role.save if save_role + rabbitmq_elements, rabbitmq_nodes, rabbitmq_ha_enabled = role_expand_elements(role, "rabbitmq-server") Openstack::HA.set_controller_role(rabbitmq_nodes) if rabbitmq_ha_enabled @@ -125,6 +154,17 @@ def validate_proposal_after_save(proposal) servers = proposal["deployment"][@bc_name]["elements"]["rabbitmq-server"] ha_enabled = !(servers.nil? || servers.first.nil? || !is_cluster?(servers.first)) + # extra users validation for permissions + unless attributes["extra_users"].empty? + attributes["extra_users"].each do |username, user| + if user["permissions"].length != 3 + validation_error I18n.t( + "barclamp.#{bc_name}.validation.wrong_permissions", user: username + ) + end + end + end + # Shared storage validation for HA if ha_enabled && !attributes["cluster"] storage_mode = attributes["ha"]["storage"]["mode"] @@ -154,4 +194,3 @@ def validate_proposal_after_save(proposal) super end end - diff --git a/crowbar_framework/app/views/barclamp/aodh/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/aodh/_edit_attributes.html.haml index 9692919632..c8b9a0705f 100644 --- a/crowbar_framework/app/views/barclamp/aodh/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/aodh/_edit_attributes.html.haml @@ -9,7 +9,7 @@ = instance_field :ceilometer = integer_field :evaluation_interval - + = integer_field :alarm_history_ttl %fieldset %legend diff --git a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml index 3cd7af0441..ce5caece27 100644 --- a/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/database/_edit_attributes.html.haml @@ -3,51 +3,59 @@ = header show_raw_deployment?, true .panel-body - = select_field :sql_engine, :collection => :engines_for_database + .alert.alert-warning + = t(".engine_upgrade") - #mysql_container - %fieldset - %legend - = t('.mysql_attributes') - - = integer_field %w(mysql max_connections) - = integer_field %w(mysql expire_logs_days) - = boolean_field %w(mysql slow_query_logging) - - %fieldset - %legend - = t(".mysql.ssl_header") + = string_field :sql_engine, disabled: true - = boolean_field %w(mysql ssl enabled), - "data-sslprefix" => "ssl" - - #ssl_container - = boolean_field %w(mysql ssl generate_certs) - = string_field %w(mysql ssl certfile) - = string_field %w(mysql ssl keyfile) - = boolean_field %w(mysql ssl insecure) - = string_field %w(mysql ssl ca_certs) + %ul.list-group#mysql_container + %li.list-group-item.active + %h3.list-group-item-heading + = t('.mysql_attributes') - #postgresql_container - %fieldset - %legend + %li.list-group-item + %fieldset + = integer_field %w(mysql max_connections) + = integer_field %w(mysql expire_logs_days) + = boolean_field %w(mysql slow_query_logging) + + %fieldset{ "style" => "display:none" } + %legend + = t(".mysql.ssl_header") + + = boolean_field %w(mysql ssl enabled), + "data-sslprefix" => "ssl" + + #ssl_container + = boolean_field %w(mysql ssl generate_certs) + = string_field %w(mysql ssl certfile) + = string_field %w(mysql ssl keyfile) + = boolean_field %w(mysql ssl insecure) + = string_field %w(mysql ssl ca_certs) + + %ul.list-group#postgresql_container + %li.list-group-item.active + %h3.list-group-item-heading = t('.postgresql_attributes') - = integer_field %w(postgresql config max_connections) + %li.list-group-item + %fieldset + = integer_field %w(postgresql config max_connections) + + -# As HA is only supported for postgresql, we put this section in #postgresql_container + %fieldset#ha-setup{ "data-show-for-clusters-only" => "true", "data-elements-path" => "database-server" } + %legend + = t('.ha_header') - -# As HA is only supported for postgresql, we put this section in #postgresql_container - %fieldset#ha-setup{ "data-show-for-clusters-only" => "true", "data-elements-path" => "database-server" } - %legend - = t('.ha_header') + = select_field %w(postgresql ha storage mode), :collection => :ha_storage_mode_for_database, "data-showit" => ["drbd", "shared"].join(";"), "data-showit-target" => "#drbd_storage_container;#shared_storage_container", "data-showit-direct" => "true" - = select_field %w(ha storage mode), :collection => :ha_storage_mode_for_database, "data-showit" => ["drbd", "shared"].join(";"), "data-showit-target" => "#drbd_storage_container;#shared_storage_container", "data-showit-direct" => "true" + #drbd_storage_container + .alert.alert-info + = t('.postgresql.ha.storage.drbd_info') + = integer_field %w(postgresql ha storage drbd size) - #drbd_storage_container - .alert.alert-info - = t('.ha.storage.drbd_info') - = integer_field %w(ha storage drbd size) + #shared_storage_container + = string_field %w(postgresql ha storage shared device) + = string_field %w(postgresql ha storage shared fstype) + = string_field %w(postgresql ha storage shared options) - #shared_storage_container - = string_field %w(ha storage shared device) - = string_field %w(ha storage shared fstype) - = string_field %w(ha storage shared options) diff --git a/crowbar_framework/app/views/barclamp/keystone/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/keystone/_edit_attributes.html.haml index 04b642918e..006f6c849d 100644 --- a/crowbar_framework/app/views/barclamp/keystone/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/keystone/_edit_attributes.html.haml @@ -14,12 +14,7 @@ = string_field %w(default tenant) = string_field %w(admin username) - - if @proposal.active? - = password_field %w(admin updated_password) - .alert.alert-info - = t('.admin.updated_password_hint') - - else - = password_field %w(admin password) + = password_field %w(admin password) = boolean_field %w(default create_user), "data-showit" => "true", diff --git a/crowbar_framework/app/views/barclamp/manila/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/manila/_edit_attributes.html.haml index f9abd67d65..637301d315 100644 --- a/crowbar_framework/app/views/barclamp/manila/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/manila/_edit_attributes.html.haml @@ -72,7 +72,7 @@ = boolean_field %w(shares {{@index}} cephfs use_crowbar), "data-hideit" => "true", "data-hideit-target" => "#cephfs_conf_path_{{@index}}", "data-hideit-direct" => "true" %div{:id => "cephfs_conf_path_{{@index}}"} = string_field %w(shares {{@index}} cephfs cephfs_conf_path) - = string_field %w(shares {{@index}} cephfs cephfs_clustername) + = string_field %w(shares {{@index}} cephfs cephfs_cluster_name) = string_field %w(shares {{@index}} cephfs cephfs_auth_id) {{/if_eq}} diff --git a/crowbar_framework/app/views/barclamp/monasca/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/monasca/_edit_attributes.html.haml index f75aeeb847..65d3d68367 100644 --- a/crowbar_framework/app/views/barclamp/monasca/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/monasca/_edit_attributes.html.haml @@ -10,6 +10,7 @@ = select_field %w(agent log_level), :collection => :agent_log_levels = integer_field %w(agent statsd_port) + = boolean_field %w(agent monitor_ceph) = boolean_field %w(agent monitor_libvirt) = integer_field %w(agent check_frequency) = integer_field %w(agent num_collector_threads) diff --git a/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml b/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml index 9408427e99..108699c3e9 100644 --- a/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml +++ b/crowbar_framework/app/views/barclamp/rabbitmq/_edit_attributes.html.haml @@ -6,6 +6,37 @@ = string_field :vhost = integer_field :port = string_field :user + = boolean_field %w(client enable_notifications) + + %fieldset + %legend + = t(".extra_users.title") + + %table.table.table-middle{ "data-dynamic" => "#extrauser-entries", "data-namespace" => "extra_users", "data-optional" => "tags", + "data-invalid" => t(".extra_users.error_invalid"), "data-duplicate" => t(".extra_users.error_duplicate") } + %thead + %tr + %th.col-sm-2 + = t(".extra_users.username") + %th.col-sm-3 + = t(".extra_users.permissions") + %th.col-sm-3 + = t(".extra_users.tags") + %th.col-sm-1 + %tbody + %tfoot + %tr + %td + = text_field_tag "extrauser[name]", "", :placeholder => t(".extra_users.username"), + :class => "form-control", "data-name" => "name", "data-type" => "string" + %td + = text_field_tag "extrauser[permissions]", "", :placeholder => t(".extra_users.permissions"), + :class => "form-control", "data-name" => "permissions", "data-type" => "array-string" + %td + = text_field_tag "extrauser[tags]", "", :placeholder => t(".extra_users.tags"), + :class => "form-control", "data-name" => "tags", "data-type" => "array-string" + %td + = link_to t(".extra_users.add"), "#", :class => "btn btn-default btn-block", "data-add" => true %fieldset %legend @@ -48,3 +79,26 @@ = string_field %w(ha storage shared device) = string_field %w(ha storage shared fstype) = string_field %w(ha storage shared options) + + +%script#extrauser-entries{ :type => "text/x-handlebars-template" } + {{#each entries}} + %tr.edit + %td + = text_field_tag "extrauser[name]", "{{name}}", :placeholder => t(".extra_users.username"), + :class => "form-control", :disabled => "disabled" + %td + = text_field_tag "extrauser[permissions]", "{{permissions}}", :placeholder => t(".extra_users.permissions"), + :class => "form-control", "data-update" => "extra_users/{{name}}/permissions", "data-name" => "permissions", + "data-type" => "array-string" + %td + = text_field_tag "extrauser[tags]", "{{tags}}", :placeholder => t(".extra_users.tags"), :class => "form-control", + "data-update" => "extra_users/{{name}}/tags", "data-name" => "tags", "data-type" => "array-string" + %td + = link_to t(".extra_users.record_remove"), "#", :class => "btn btn-default btn-block", "data-remove" => "{{name}}" + {{else}} + %tr + %td{ :colspan => 4 } + .empty.alert.alert-info.text-center + = t(".extra_users.no_records") + {{/each}} diff --git a/crowbar_framework/config/locales/aodh/en.yml b/crowbar_framework/config/locales/aodh/en.yml index da7e097fef..7256c91390 100644 --- a/crowbar_framework/config/locales/aodh/en.yml +++ b/crowbar_framework/config/locales/aodh/en.yml @@ -27,6 +27,8 @@ en: evaluation_interval: 'Evaluation interval for threshold alarms (in seconds).' logging_header: 'Logging' verbose: 'Verbose Logging' + alarm_history_ttl: 'Number of seconds that alarm histories are kept in the database for (<= 0 means forever).' + api: protocol: 'Protocol' ssl_header: 'SSL Support' diff --git a/crowbar_framework/config/locales/ceilometer/en.yml b/crowbar_framework/config/locales/ceilometer/en.yml index 00a9317ecf..be0d10a7de 100644 --- a/crowbar_framework/config/locales/ceilometer/en.yml +++ b/crowbar_framework/config/locales/ceilometer/en.yml @@ -48,6 +48,7 @@ en: cert_required: 'Require Client Certificate' ca_certs: 'SSL CA Certificates File' validation: + notifications_enabled: 'Sending notifications has to be enabled in the RabbitMQ proposal first.' hyper_v_support: 'Hyper-V support is not available.' swift_proxy: 'Nodes with the ceilometer-swift-proxy-middleware role must also have the swift-proxy role.' nodes_count: 'The cluster assigned to the ceilometer-server role should have at least 3 nodes, but it only has %{nodes_count}.' diff --git a/crowbar_framework/config/locales/database/en.yml b/crowbar_framework/config/locales/database/en.yml index be5868d852..1fd5a67423 100644 --- a/crowbar_framework/config/locales/database/en.yml +++ b/crowbar_framework/config/locales/database/en.yml @@ -21,7 +21,8 @@ en: barclamp: database: edit_attributes: - sql_engine: 'SQL Engine' + engine_upgrade: 'Deployment of multiple database engines at the same time is only supported before SUSE OpenStack Cloud 8 upgrade to allow the migration from PostgreSQL to MariaDB. Please refer to the SUSE OpenStack Cloud documentation for more information about the upgrade procedure.' + sql_engine: 'Active SQL Engine' mysql_attributes: 'MariaDB Options' mysql: datadir: 'Datadir' @@ -40,20 +41,20 @@ en: postgresql: config: max_connections: 'Global Connection Limit (max_connections)' + ha: + storage: + mode: 'Storage Mode' + modes: + drbd: 'DRBD' + shared: 'Shared Storage' + drbd_info: 'The cluster must have been setup for DRBD.' + drbd: + size: 'Size to Allocate for DRBD Device (in Gigabytes)' + shared: + device: 'Name of Block Device or NFS Mount Specification' + fstype: 'Filesystem Type' + options: 'Mount Options' ha_header: 'High Availability' - ha: - storage: - mode: 'Storage Mode' - modes: - drbd: 'DRBD' - shared: 'Shared Storage' - drbd_info: 'The cluster must have been setup for DRBD.' - drbd: - size: 'Size to Allocate for DRBD Device (in Gigabytes)' - shared: - device: 'Name of Block Device or NFS Mount Specification' - fstype: 'Filesystem Type' - options: 'Mount Options' validation: invalid_db_engine: 'Invalid database engine: %{db_engine}.' unknown_mode_ha: 'Unknown mode for HA storage: %{storage_mode}.' @@ -63,3 +64,7 @@ en: invalid_size_drbd: 'Invalid size for DRBD device.' cluster_size_one: 'The Galera cluster needs more than one cluster member.' cluster_size_even: 'The Galera cluster needs an odd number of cluster members and at least three of them.' + engine_roles_mismatch: 'Assigned roles do not match selected database engine: %{db_engine}.' + secondary_psql: 'PostgreSQL can only be deployed as first SQL engine. Migration from MariaDB to PostgreSQL is not supported.' + new_proposal_multi_engine: 'Second SQL engine can only be added to an existing database deployment.' + monasca_deployed: 'MariaDB cannot be deployed on a node with monasca-server role: %{node_name}.' diff --git a/crowbar_framework/config/locales/keystone/en.yml b/crowbar_framework/config/locales/keystone/en.yml index 11fabcdafc..ac615688ce 100644 --- a/crowbar_framework/config/locales/keystone/en.yml +++ b/crowbar_framework/config/locales/keystone/en.yml @@ -32,8 +32,6 @@ en: admin: username: 'Administrator Username' password: 'Administrator Password' - updated_password: 'Update Administrator Password' - updated_password_hint: 'Changing the admin password directly in OpenStack its not supported. You can change the admin password directly using this field.' ssl_header: 'SSL Support' api: protocol: 'Protocol' diff --git a/crowbar_framework/config/locales/manila/en.yml b/crowbar_framework/config/locales/manila/en.yml index 3d7c13126b..df5714314c 100644 --- a/crowbar_framework/config/locales/manila/en.yml +++ b/crowbar_framework/config/locales/manila/en.yml @@ -74,7 +74,7 @@ en: cephfs: use_crowbar: 'Use Ceph deployed by Crowbar' cephfs_conf_path: 'Path to Ceph configuration file' - cephfs_clustername: 'Cluster name' + cephfs_cluster_name: 'Cluster name' cephfs_auth_id: 'Authentication ID' manual: config: 'Options' diff --git a/crowbar_framework/config/locales/monasca/en.yml b/crowbar_framework/config/locales/monasca/en.yml index 5defda6cd1..dfbd174768 100644 --- a/crowbar_framework/config/locales/monasca/en.yml +++ b/crowbar_framework/config/locales/monasca/en.yml @@ -35,7 +35,8 @@ en: insecure: 'Do you want insecure connection?' ca_file: 'Sets the path to the ca certs file if using certificates. Required only if insecure is set to False (ca_file)' log_level: 'Log level' - monitor_libvirt: 'Whether to monitor the libvirt process on compute nodes' + monitor_ceph: 'Monitor Ceph' + monitor_libvirt: 'Monitor libvirt' statsd_port: 'Monasca Statsd port' check_frequency: 'Time to wait between collection runs (check_frequency)' num_collector_threads: 'Number of Collector Threads to run (num_collector_threads)' diff --git a/crowbar_framework/config/locales/rabbitmq/en.yml b/crowbar_framework/config/locales/rabbitmq/en.yml index f8c512379b..399db3b6ea 100644 --- a/crowbar_framework/config/locales/rabbitmq/en.yml +++ b/crowbar_framework/config/locales/rabbitmq/en.yml @@ -21,6 +21,18 @@ en: edit_attributes: vhost: 'Virtual host' user: 'User' + client: + enable_notifications: 'Configure Clients to send notifications' + extra_users: + username: 'Username' + permissions: 'Permissions (3 comma separated items for configure, write, read; e.g. ".*,.*,.*")' + tags: 'Tags (comma separated)' + error_invalid: 'Username and Permissions cannot be empty' + error_duplicate: 'There is a user with this username' + title: 'Extra users' + add: 'Add' + record_remove: 'Delete' + no_records: 'No records' port: 'Port' ssl_header: 'SSL Support' ssl: @@ -54,3 +66,4 @@ en: no_filesystem: 'No filesystem type specified for shared storage.' drbd: 'DRBD is not enabled for cluster %{cluster_name}.' invalid_size: 'Invalid size for DRBD device.' + wrong_permissions: 'Wrong permissions for user %{user}. Permissions need to be 3 comma separated items (configure, write, read)' diff --git a/crowbar_framework/lib/openstack/ha.rb b/crowbar_framework/lib/openstack/ha.rb index f25e5ed1ce..29fdfd099b 100644 --- a/crowbar_framework/lib/openstack/ha.rb +++ b/crowbar_framework/lib/openstack/ha.rb @@ -21,6 +21,8 @@ def self.set_role(nodes, role) save_it = false node = NodeObject.find_node_by_name nodename + node[:pacemaker] ||= {} + node[:pacemaker][:attributes] ||= {} if node[:pacemaker][:attributes]["OpenStack-role"] != role node[:pacemaker][:attributes]["OpenStack-role"] = role