From ea612508652783868f0874671bdd1676ae42292f Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Wed, 13 Jan 2021 20:35:12 +0100 Subject: [PATCH 01/84] Add Visual Studio Code directory with settings.json and launch.json so that replication-manager-osc can be run through Deelve --- .vscode/launch.json | 17 +++++++++++++++++ .vscode/settings.json | 6 ++++++ 2 files changed, 23 insertions(+) create mode 100644 .vscode/launch.json create mode 100644 .vscode/settings.json diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 000000000..379d98d36 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,17 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "replication-manager-osc", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/main.go", + "env": {}, + "args": ["monitor", "--config", "/etc/replication-manager/config.toml", "--log-file", "/var/lib/replication-manager/data/replication-manager.log"], + } + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..d780f5282 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "go.buildTags": "netgo server", + "go.buildFlags": [ + "--ldflags -extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.WithProvisioning=OFF -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" + ] +} \ No newline at end of file From b0e3d0ad881f4d2d557cb0714b22fc80e8321490 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Thu, 21 Jan 2021 23:26:09 +0100 Subject: [PATCH 02/84] Started initial work on refactoring cluster/prx* --- cluster/cluster.go | 16 +- cluster/cluster_add.go | 18 +- cluster/cluster_get.go | 28 +-- cluster/cluster_has.go | 2 +- cluster/cluster_set.go | 8 +- cluster/cluster_tst.go | 12 +- cluster/prov.go | 46 ++-- cluster/prov_k8s_prx.go | 14 +- cluster/prov_localhost_haproxy.go | 10 +- cluster/prov_localhost_proxysql.go | 10 +- cluster/prov_localhost_prx.go | 74 +++--- cluster/prov_opensvc_haproxy.go | 4 +- cluster/prov_opensvc_maxscale.go | 4 +- cluster/prov_opensvc_proxysql.go | 6 +- cluster/prov_opensvc_prx.go | 151 ++++++------- cluster/prov_opensvc_shardproxy.go | 4 +- cluster/prov_opensvc_sphinx.go | 6 +- cluster/prov_splaos_prx.go | 8 +- cluster/prx.go | 352 ++++++++++++++++++----------- cluster/prx_get.go | 116 +++++----- cluster/prx_gobetween.go | 4 +- cluster/prx_haproxy.go | 26 ++- cluster/prx_maxscale.go | 57 +++-- cluster/prx_myproxy.go | 13 +- cluster/prx_proxysql.go | 78 +++++-- cluster/prx_shardproxy.go | 53 +++-- cluster/prx_sphinx.go | 19 +- server/api_cluster.go | 12 +- server/api_database.go | 2 +- server/http.go | 2 +- 30 files changed, 672 insertions(+), 483 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index e1d5ff49a..3e885ad33 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -860,12 +860,12 @@ func (cluster *Cluster) MonitorSchema() { } t.Table_clusters = strings.Join(tableCluster, ",") tables[t.Table_schema+"."+t.Table_name] = t - if haschanged { - for _, pr := range cluster.Proxies { - if cluster.Conf.MdbsProxyOn && pr.Type == config.ConstProxySpider { + if haschanged && cluster.Conf.MdbsProxyOn { + for _, pri := range cluster.Proxies { + if prx, ok := pri.(*MdbsProxy); ok { if !(t.Table_schema == "replication_manager_schema" || strings.Contains(t.Table_name, "_copy") == true || strings.Contains(t.Table_name, "_back") == true || strings.Contains(t.Table_name, "_old") == true || strings.Contains(t.Table_name, "_reshard") == true) { cluster.LogPrintf(LvlDbg, "blabla table %s %s %s", duplicates, t.Table_schema, t.Table_name) - cluster.ShardProxyCreateVTable(pr, t.Table_schema, t.Table_name, duplicates, false) + cluster.ShardProxyCreateVTable(prx, t.Table_schema, t.Table_name, duplicates, false) } } } @@ -881,8 +881,12 @@ func (cluster *Cluster) MonitorQueryRules() { if !cluster.Conf.MonitorQueryRules { return } - for _, prx := range cluster.Proxies { - if cluster.Conf.ProxysqlOn && prx.Type == config.ConstProxySqlproxy { + // exit early + if !cluster.Conf.ProxysqlOn { + return + } + for _, pri := range cluster.Proxies { + if prx, ok := pri.(*ProxySQLProxy); ok { qr := prx.QueryRules for _, rule := range qr { var myRule config.QueryRule diff --git a/cluster/cluster_add.go b/cluster/cluster_add.go index dc0ecaf2e..a090e83ef 100644 --- a/cluster/cluster_add.go +++ b/cluster/cluster_add.go @@ -119,9 +119,12 @@ func (cluster *Cluster) AddUser(user string) error { return nil } -func (cluster *Cluster) AddShardingHostGroup(proxy *Proxy) error { - for _, pr := range cluster.Proxies { - if pr.Type == config.ConstProxySqlproxy && pr.ClusterGroup.Conf.ClusterHead == "" { +func (cluster *Cluster) AddShardingHostGroup(proxy *MdbsProxy) error { + if cluster.Conf.ClusterHead != "" { + return nil + } + for _, pri := range cluster.Proxies { + if pr, ok := pri.(*ProxySQLProxy); ok { cluster.AddShardProxy(pr, proxy) } } @@ -129,8 +132,11 @@ func (cluster *Cluster) AddShardingHostGroup(proxy *Proxy) error { } func (cluster *Cluster) AddShardingQueryRules(schema string, table string) error { - for _, pr := range cluster.Proxies { - if pr.Type == config.ConstProxySqlproxy && pr.ClusterGroup.Conf.ClusterHead == "" { + if cluster.Conf.ClusterHead != "" { + return nil + } + for _, pri := range cluster.Proxies { + if pr, ok := pri.(*ProxySQLProxy); ok { var qr proxysql.QueryRule var qrs []proxysql.QueryRule qr.Id = misc.Hash("dml." + schema + "." + table) @@ -139,7 +145,7 @@ func (cluster *Cluster) AddShardingQueryRules(schema string, table string) error qr.Apply = 1 qr.DestinationHostgroup.Int64 = 999 qrs = append(qrs, qr) - cluster.AddQueryRulesProxysql(pr, qrs) + pr.AddQueryRulesProxysql(qrs) } } return nil diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index e32924811..e470931b6 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -330,17 +330,14 @@ func (cluster *Cluster) GetServerFromURL(url string) *ServerMonitor { return nil } -func (cluster *Cluster) GetProxyFromURL(url string) *Proxy { - if strings.Contains(url, ":") { - for _, proxy := range cluster.Proxies { - // cluster.LogPrintf(LvlInfo, " search prx %s %s for url %s", proxy.Host, proxy.Port, url) - if proxy.Host+":"+proxy.Port == url { +func (cluster *Cluster) GetProxyFromURL(url string) DatabaseProxy { + for _, proxy := range cluster.Proxies { + if strings.Contains(url, ":") { + if proxy.GetHost()+":"+proxy.GetPort() == url { return proxy } - } - } else { - for _, proxy := range cluster.Proxies { - if proxy.Host == url { + } else { + if proxy.GetHost() == url { return proxy } } @@ -427,7 +424,7 @@ func (cluster *Cluster) GetDBServerIdList() []string { func (cluster *Cluster) GetProxyServerIdList() []string { ret := make([]string, len(cluster.Proxies)) for i, server := range cluster.Proxies { - ret[i] = server.Id + ret[i] = server.GetId() } return ret } @@ -469,17 +466,6 @@ func (cluster *Cluster) GetProxyTags() []string { return strings.Split(cluster.Conf.ProvProxTags, ",") } -func (cluster *Cluster) GetLocalProxy(this *Proxy) Proxy { - // dirty: need to point LB to all DB proxies, just pick the first one so far - var prx Proxy - for _, p := range cluster.Proxies { - if p != this && p.Type != config.ConstProxySphinx { - return *p - } - } - return prx -} - func (cluster *Cluster) GetCron() []cron.Entry { return cluster.scheduler.Entries() diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index 607764cb5..0cd5468eb 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -52,7 +52,7 @@ func (cluster *Cluster) IsProvisioned() bool { if !px.HasProvisionCookie() { if px.IsRunning() { px.SetProvisionCookie() - cluster.LogPrintf(LvlInfo, "Can Proxy Connect creating cookie state:%s", px.State) + cluster.LogPrintf(LvlInfo, "Can Proxy Connect creating cookie state:%s", px.GetState()) } else { return false } diff --git a/cluster/cluster_set.go b/cluster/cluster_set.go index c933bd733..4a0dd6124 100644 --- a/cluster/cluster_set.go +++ b/cluster/cluster_set.go @@ -534,7 +534,7 @@ func (cluster *Cluster) SetProxyServersCredential(credential string, proxytype s cluster.Conf.MxsUser, cluster.Conf.MxsPass = misc.SplitPair(credential) } for _, prx := range cluster.Proxies { - prx.User, prx.Pass = misc.SplitPair(credential) + prx.SetCredential(credential) prx.SetRestartCookie() } } @@ -737,9 +737,9 @@ func (cluster *Cluster) SetServicePlan(theplan string) error { oriClusters, err := cluster.GetClusterFromName(cluster.Conf.ClusterHead) if err == nil { for _, oriProxy := range oriClusters.Proxies { - cluster.LogPrintf(LvlInfo, "Adding new proxy %s copy %s:%s", oriProxy.Type, oriProxy.Host, oriProxy.Port) - if oriProxy.Type == config.ConstProxySpider { - cluster.AddSeededProxy(oriProxy.Type, oriProxy.Host, oriProxy.Port, oriProxy.User, oriProxy.Pass) + cluster.LogPrintf(LvlInfo, "Adding new proxy %s copy %s:%s", oriProxy.GetType(), oriProxy.GetHost(), oriProxy.GetPort()) + if oriProxy.GetType() == config.ConstProxySpider { + cluster.AddSeededProxy(oriProxy.GetType(), oriProxy.GetHost(), oriProxy.GetPort(), oriProxy.GetUser(), oriProxy.GetPass()) } } if cluster.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { diff --git a/cluster/cluster_tst.go b/cluster/cluster_tst.go index f21cc3bcf..6f5d2844a 100644 --- a/cluster/cluster_tst.go +++ b/cluster/cluster_tst.go @@ -51,7 +51,7 @@ func (cluster *Cluster) PrepareBench() error { time := "--max-time=60" mode := "--oltp-test-mode=complex" var cmdprep *exec.Cmd - cmdprep = exec.Command(cluster.Conf.SysbenchBinaryPath, test, tablesize, "--db-driver=mysql", "--mysql-db=replication_manager_schema", "--mysql-user="+cluster.dbUser, "--mysql-password="+cluster.dbPass, "--mysql-host="+prx.Host, "--mysql-port="+strconv.Itoa(prx.WritePort), time, mode, requests, threads, "prepare") + cmdprep = exec.Command(cluster.Conf.SysbenchBinaryPath, test, tablesize, "--db-driver=mysql", "--mysql-db=replication_manager_schema", "--mysql-user="+cluster.dbUser, "--mysql-password="+cluster.dbPass, "--mysql-host="+prx.GetHost(), "--mysql-port="+strconv.Itoa(prx.GetWritePort()), time, mode, requests, threads, "prepare") if cluster.Conf.SysbenchV1 { test = "oltp_read_write" @@ -60,7 +60,7 @@ func (cluster *Cluster) PrepareBench() error { requests = "" // --events=N time = "--time=60" mode = "" - cmdprep = exec.Command(cluster.Conf.SysbenchBinaryPath, test, tablesize, "--db-driver=mysql", "--mysql-db=replication_manager_schema", "--mysql-user="+cluster.dbUser, "--mysql-password="+cluster.dbPass, "--mysql-host="+prx.Host, "--mysql-port="+strconv.Itoa(prx.WritePort), time, threads, "prepare") + cmdprep = exec.Command(cluster.Conf.SysbenchBinaryPath, test, tablesize, "--db-driver=mysql", "--mysql-db=replication_manager_schema", "--mysql-user="+cluster.dbUser, "--mysql-password="+cluster.dbPass, "--mysql-host="+prx.GetHost(), "--mysql-port="+strconv.Itoa(prx.GetWritePort()), time, threads, "prepare") } @@ -94,10 +94,10 @@ func (cluster *Cluster) CleanupBench() error { if cluster.Conf.SysbenchV1 { test = "oltp_read_write" } - var cleanup = cluster.Conf.SysbenchBinaryPath + " --test=oltp --db-driver=mysql --mysql-db=replication_manager_schema --mysql-user=" + cluster.rplUser + " --mysql-password=" + cluster.rplPass + " --mysql-host=" + prx.Host + " --mysql-port=" + strconv.Itoa(prx.WritePort) + " cleanup" + var cleanup = cluster.Conf.SysbenchBinaryPath + " --test=oltp --db-driver=mysql --mysql-db=replication_manager_schema --mysql-user=" + cluster.rplUser + " --mysql-password=" + cluster.rplPass + " --mysql-host=" + prx.GetHost() + " --mysql-port=" + strconv.Itoa(prx.GetWritePort()) + " cleanup" cluster.LogPrintf("BENCH", "%s", strings.Replace(cleanup, cluster.rplPass, "XXXXX", -1)) var cmdcls *exec.Cmd - cmdcls = exec.Command(cluster.Conf.SysbenchBinaryPath, test, "--db-driver=mysql", "--mysql-db=replication_manager_schema", "--mysql-user="+cluster.rplUser, "--mysql-password="+cluster.rplPass, "--mysql-host="+prx.Host, "--mysql-port="+strconv.Itoa(prx.WritePort), "cleanup") + cmdcls = exec.Command(cluster.Conf.SysbenchBinaryPath, test, "--db-driver=mysql", "--mysql-db=replication_manager_schema", "--mysql-user="+cluster.rplUser, "--mysql-password="+cluster.rplPass, "--mysql-host="+prx.GetHost(), "--mysql-port="+strconv.Itoa(prx.GetWritePort()), "cleanup") var outcls bytes.Buffer cmdcls.Stdout = &outcls @@ -149,14 +149,14 @@ func (cluster *Cluster) RunBench() error { time := "--max-time=" + strconv.Itoa(cluster.Conf.SysbenchTime) mode := "--oltp-test-mode=complex" var cmdrun *exec.Cmd - cmdrun = exec.Command(cluster.Conf.SysbenchBinaryPath, test, tablesize, "--db-driver=mysql", "--mysql-db=replication_manager_schema", "--mysql-user="+cluster.dbUser, "--mysql-password="+cluster.dbPass, "--mysql-host="+prx.Host, "--mysql-port="+strconv.Itoa(prx.WritePort), time, mode, requests, threads, "run") + cmdrun = exec.Command(cluster.Conf.SysbenchBinaryPath, test, tablesize, "--db-driver=mysql", "--mysql-db=replication_manager_schema", "--mysql-user="+cluster.dbUser, "--mysql-password="+cluster.dbPass, "--mysql-host="+prx.GetHost(), "--mysql-port="+strconv.Itoa(prx.GetWritePort()), time, mode, requests, threads, "run") if cluster.Conf.SysbenchV1 { test = "oltp_read_write" tablesize = "--table-size=1000000" threads = "--threads=" + strconv.Itoa(cluster.Conf.SysbenchThreads) requests = "" // --events=N time = "--time=" + strconv.Itoa(cluster.Conf.SysbenchTime) - cmdrun = exec.Command(cluster.Conf.SysbenchBinaryPath, test, tablesize, "--db-driver=mysql", "--mysql-db=replication_manager_schema", "--mysql-user="+cluster.dbUser, "--mysql-password="+cluster.dbPass, "--mysql-host="+prx.Host, "--mysql-port="+strconv.Itoa(prx.WritePort), time, threads, "run") + cmdrun = exec.Command(cluster.Conf.SysbenchBinaryPath, test, tablesize, "--db-driver=mysql", "--mysql-db=replication_manager_schema", "--mysql-user="+cluster.dbUser, "--mysql-password="+cluster.dbPass, "--mysql-host="+prx.GetHost(), "--mysql-port="+strconv.Itoa(prx.GetWritePort()), time, threads, "run") } cluster.LogPrintf(LvlInfo, "Command: %s", strings.Replace(cmdrun.String(), cluster.dbPass, "XXXX", -1)) diff --git a/cluster/prov.go b/cluster/prov.go index b803256b5..678207b90 100644 --- a/cluster/prov.go +++ b/cluster/prov.go @@ -90,7 +90,12 @@ func (cluster *Cluster) ProvisionServices() error { } } } - for _, prx := range cluster.Proxies { + + for _, pri := range cluster.Proxies { + prx, ok := pri.(*Proxy) + if !ok { + continue + } switch cluster.Conf.ProvOrchestrator { case config.ConstOrchestratorOpenSVC: go cluster.OpenSVCProvisionProxyService(prx) @@ -101,17 +106,22 @@ func (cluster *Cluster) ProvisionServices() error { case config.ConstOrchestratorLocalhost: go cluster.LocalhostProvisionProxyService(prx) default: + // TODO: wtf? it never hits the second loop cluster.sme.RemoveFailoverState() return nil } } - for _, prx := range cluster.Proxies { + for _, pri := range cluster.Proxies { + prx, ok := pri.(*Proxy) + if !ok { + continue + } select { case err := <-cluster.errorChan: if err != nil { - cluster.LogPrintf(LvlErr, "Provisionning proxy error %s on %s", err, cluster.Name+"/svc/"+prx.Name) + cluster.LogPrintf(LvlErr, "Provisionning proxy error %s on %s", err, cluster.Name+"/svc/"+prx.GetName()) } else { - cluster.LogPrintf(LvlInfo, "Provisionning done for proxy %s", cluster.Name+"/svc/"+prx.Name) + cluster.LogPrintf(LvlInfo, "Provisionning done for proxy %s", cluster.Name+"/svc/"+prx.GetName()) prx.SetProvisionCookie() } } @@ -151,7 +161,7 @@ func (cluster *Cluster) InitDatabaseService(server *ServerMonitor) error { return nil } -func (cluster *Cluster) InitProxyService(prx *Proxy) error { +func (cluster *Cluster) InitProxyService(prx DatabaseProxy) error { switch cluster.Conf.ProvOrchestrator { case config.ConstOrchestratorOpenSVC: go cluster.OpenSVCProvisionProxyService(prx) @@ -206,7 +216,11 @@ func (cluster *Cluster) Unprovision() error { } } } - for _, prx := range cluster.Proxies { + for _, pri := range cluster.Proxies { + prx, ok := pri.(*Proxy) + if !ok { + continue + } switch cluster.Conf.ProvOrchestrator { case config.ConstOrchestratorOpenSVC: go cluster.OpenSVCUnprovisionProxyService(prx) @@ -221,13 +235,17 @@ func (cluster *Cluster) Unprovision() error { return nil } } - for _, prx := range cluster.Proxies { + for _, pri := range cluster.Proxies { + prx, ok := pri.(*Proxy) + if !ok { + continue + } select { case err := <-cluster.errorChan: if err != nil { - cluster.LogPrintf(LvlErr, "Unprovision proxy error %s on %s", err, cluster.Name+"/svc/"+prx.Name) + cluster.LogPrintf(LvlErr, "Unprovision proxy error %s on %s", err, cluster.Name+"/svc/"+prx.GetName()) } else { - cluster.LogPrintf(LvlInfo, "Unprovision done for proxy %s", cluster.Name+"/svc/"+prx.Name) + cluster.LogPrintf(LvlInfo, "Unprovision done for proxy %s", cluster.Name+"/svc/"+prx.GetName()) prx.DelProvisionCookie() prx.DelRestartCookie() prx.DelReprovisionCookie() @@ -244,7 +262,7 @@ func (cluster *Cluster) Unprovision() error { return nil } -func (cluster *Cluster) UnprovisionProxyService(prx *Proxy) error { +func (cluster *Cluster) UnprovisionProxyService(prx DatabaseProxy) error { switch cluster.Conf.ProvOrchestrator { case config.ConstOrchestratorOpenSVC: go cluster.OpenSVCUnprovisionProxyService(prx) @@ -316,7 +334,7 @@ func (cluster *Cluster) StopDatabaseService(server *ServerMonitor) error { return nil } -func (cluster *Cluster) StopProxyService(server *Proxy) error { +func (cluster *Cluster) StopProxyService(server DatabaseProxy) error { switch cluster.Conf.ProvOrchestrator { case config.ConstOrchestratorOpenSVC: @@ -332,7 +350,7 @@ func (cluster *Cluster) StopProxyService(server *Proxy) error { return nil } -func (cluster *Cluster) StartProxyService(server *Proxy) error { +func (cluster *Cluster) StartProxyService(server DatabaseProxy) error { switch cluster.Conf.ProvOrchestrator { case config.ConstOrchestratorOpenSVC: @@ -629,7 +647,7 @@ func (cluster *Cluster) GetDatabaseAgent(server *ServerMonitor) (Agent, error) { return agent, errors.New("Indice not found in database node list") } -func (cluster *Cluster) GetProxyAgent(server *Proxy) (Agent, error) { +func (cluster *Cluster) GetProxyAgent(server DatabaseProxy) (Agent, error) { var agent Agent agents := strings.Split(cluster.Conf.ProvProxAgents, ",") if len(agents) == 0 { @@ -637,7 +655,7 @@ func (cluster *Cluster) GetProxyAgent(server *Proxy) (Agent, error) { } for i, srv := range cluster.Servers { - if srv.Id == server.Id { + if srv.Id == server.GetId() { agentName := agents[i%len(agents)] agent, err := cluster.GetAgentInOrchetrator(agentName) if err != nil { diff --git a/cluster/prov_k8s_prx.go b/cluster/prov_k8s_prx.go index 00fc9b8ad..43d8cc984 100644 --- a/cluster/prov_k8s_prx.go +++ b/cluster/prov_k8s_prx.go @@ -9,7 +9,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (cluster *Cluster) K8SProvisionProxyService(prx *Proxy) { +func (cluster *Cluster) K8SProvisionProxyService(prx DatabaseProxy) { clientset, err := cluster.K8SConnectAPI() if err != nil { cluster.LogPrintf(LvlErr, "Cannot init Kubernetes client API %s ", err) @@ -18,7 +18,7 @@ func (cluster *Cluster) K8SProvisionProxyService(prx *Proxy) { } deploymentsClient := clientset.AppsV1().Deployments(cluster.Name) - port, _ := strconv.Atoi(prx.Port) + port, _ := strconv.Atoi(prx.GetPort()) deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: cluster.Name + "-deployment", @@ -39,11 +39,11 @@ func (cluster *Cluster) K8SProvisionProxyService(prx *Proxy) { Spec: apiv1.PodSpec{ Containers: []apiv1.Container{ { - Name: prx.Name, + Name: prx.GetName(), Image: cluster.Conf.ProvProxProxysqlImg, Ports: []apiv1.ContainerPort{ { - Name: prx.Name, + Name: prx.GetName(), Protocol: apiv1.ProtocolTCP, ContainerPort: int32(port), }, @@ -68,13 +68,13 @@ func (cluster *Cluster) K8SProvisionProxyService(prx *Proxy) { return } -func (cluster *Cluster) K8SUnprovisionProxyService(prx *Proxy) { +func (cluster *Cluster) K8SUnprovisionProxyService(prx DatabaseProxy) { cluster.errorChan <- nil } -func (cluster *Cluster) K8SStartProxyService(server *Proxy) error { +func (cluster *Cluster) K8SStartProxyService(server DatabaseProxy) error { return errors.New("Can't start proxy") } -func (cluster *Cluster) K8SStopProxyService(server *Proxy) error { +func (cluster *Cluster) K8SStopProxyService(server DatabaseProxy) error { return errors.New("Can't stop proxy") } diff --git a/cluster/prov_localhost_haproxy.go b/cluster/prov_localhost_haproxy.go index 66102a574..4f6062bdb 100644 --- a/cluster/prov_localhost_haproxy.go +++ b/cluster/prov_localhost_haproxy.go @@ -15,14 +15,14 @@ import ( "strings" ) -func (cluster *Cluster) LocalhostUnprovisionHaProxyService(prx *Proxy) error { +func (cluster *Cluster) LocalhostUnprovisionHaProxyService(prx *HaproxyProxy) error { cluster.LocalhostStopHaProxyService(prx) os.RemoveAll(prx.Datadir + "/var") cluster.errorChan <- nil return nil } -func (cluster *Cluster) LocalhostProvisionHaProxyService(prx *Proxy) error { +func (cluster *Cluster) LocalhostProvisionHaProxyService(prx *HaproxyProxy) error { out := &bytes.Buffer{} path := prx.Datadir + "/var" @@ -51,7 +51,7 @@ func (cluster *Cluster) LocalhostProvisionHaProxyService(prx *Proxy) error { return nil } -func (cluster *Cluster) LocalhostStopHaProxyService(prx *Proxy) error { +func (cluster *Cluster) LocalhostStopHaProxyService(prx *HaproxyProxy) error { // cluster.LogPrintf("TEST", "Killing database %s %d", server.Id, server.Process.Pid) @@ -64,10 +64,10 @@ func (cluster *Cluster) LocalhostStopHaProxyService(prx *Proxy) error { return nil } -func (cluster *Cluster) LocalhostStartHaProxyService(prx *Proxy) error { +func (cluster *Cluster) LocalhostStartHaProxyService(prx *HaproxyProxy) error { prx.GetProxyConfig() //init haproxy do start or reload - cluster.initHaproxy(prx) + prx.Init() /*mariadbdCmd := exec.Command(cluster.Conf.HaproxyBinaryPath+"/haproxy", "--config="+prx.Datadir+"/init/conf/haproxy.cnf", "--datadir="+prx.Datadir+"/var") cluster.LogPrintf(LvlInfo, "%s %s", mariadbdCmd.Path, mariadbdCmd.Args) diff --git a/cluster/prov_localhost_proxysql.go b/cluster/prov_localhost_proxysql.go index 489000a71..0c6ecb2c3 100644 --- a/cluster/prov_localhost_proxysql.go +++ b/cluster/prov_localhost_proxysql.go @@ -14,13 +14,15 @@ import ( "time" ) -func (cluster *Cluster) LocalhostUnprovisionProxySQLService(prx *Proxy) error { +// TODO: Make all of these part of ProxySQLProxy and not Cluster + +func (cluster *Cluster) LocalhostUnprovisionProxySQLService(prx *ProxySQLProxy) error { cluster.LocalhostStopProxysqlService(prx) cluster.errorChan <- nil return nil } -func (cluster *Cluster) LocalhostProvisionProxySQLService(prx *Proxy) error { +func (cluster *Cluster) LocalhostProvisionProxySQLService(prx *ProxySQLProxy) error { out := &bytes.Buffer{} path := prx.Datadir + "/var" @@ -49,7 +51,7 @@ func (cluster *Cluster) LocalhostProvisionProxySQLService(prx *Proxy) error { return nil } -func (cluster *Cluster) LocalhostStopProxysqlService(prx *Proxy) error { +func (cluster *Cluster) LocalhostStopProxysqlService(prx *ProxySQLProxy) error { // cluster.LogPrintf("TEST", "Killing database %s %d", server.Id, server.Process.Pid) @@ -58,7 +60,7 @@ func (cluster *Cluster) LocalhostStopProxysqlService(prx *Proxy) error { return nil } -func (cluster *Cluster) LocalhostStartProxySQLService(prx *Proxy) error { +func (cluster *Cluster) LocalhostStartProxySQLService(prx *ProxySQLProxy) error { prx.GetProxyConfig() /* path := prx.Datadir + "/var" diff --git a/cluster/prov_localhost_prx.go b/cluster/prov_localhost_prx.go index c1e484024..1fc0dbe6e 100644 --- a/cluster/prov_localhost_prx.go +++ b/cluster/prov_localhost_prx.go @@ -6,19 +6,12 @@ package cluster -import ( - "errors" +func (cluster *Cluster) LocalhostProvisionProxyService(pri DatabaseProxy) error { + pri.GetProxyConfig() - "github.com/signal18/replication-manager/config" -) - -func (cluster *Cluster) LocalhostProvisionProxyService(prx *Proxy) error { - prx.GetProxyConfig() - - switch prx.Type { - case config.ConstProxySpider: + if prx, ok := pri.(*MdbsProxy); ok { cluster.LogPrintf(LvlInfo, "Bootstrap MariaDB Sharding Cluster") - srv, _ := cluster.newServerMonitor(prx.Host+":"+prx.Port, prx.User, prx.Pass, true, "") + srv, _ := cluster.newServerMonitor(prx.Host+":"+prx.GetPort(), prx.User, prx.Pass, true, "") err := srv.Refresh() if err == nil { cluster.LogPrintf(LvlWarn, "Can connect to requested signal18 sharding proxy") @@ -35,73 +28,68 @@ func (cluster *Cluster) LocalhostProvisionProxyService(prx *Proxy) error { } srv.Close() cluster.ShardProxyBootstrap(prx) + } - case config.ConstProxySqlproxy: + if prx, ok := pri.(*ProxySQLProxy); ok { err := cluster.LocalhostProvisionProxySQLService(prx) if err != nil { cluster.LogPrintf(LvlErr, "Bootstrap Proxysql Failed") cluster.errorChan <- err return err } - case config.ConstProxyHaproxy: + } + + if prx, ok := pri.(*HaproxyProxy); ok { err := cluster.LocalhostProvisionHaProxyService(prx) cluster.errorChan <- err return err } + cluster.errorChan <- nil return nil } -func (cluster *Cluster) LocalhostUnprovisionProxyService(prx *Proxy) error { - switch prx.Type { - case config.ConstProxySpider: +func (cluster *Cluster) LocalhostUnprovisionProxyService(pri DatabaseProxy) error { + if prx, ok := pri.(*MdbsProxy); ok { cluster.LocalhostUnprovisionDatabaseService(prx.ShardProxy) - case config.ConstProxySphinx: + } - case config.ConstProxyHaproxy: + if prx, ok := pri.(*HaproxyProxy); ok { cluster.LocalhostUnprovisionHaProxyService(prx) - case config.ConstProxySqlproxy: - cluster.LocalhostUnprovisionProxySQLService(prx) - case config.ConstProxyMaxscale: + } - default: + if prx, ok := pri.(*ProxySQLProxy); ok { + cluster.LocalhostUnprovisionProxySQLService(prx) } + cluster.errorChan <- nil return nil } -func (cluster *Cluster) LocalhostStartProxyService(prx *Proxy) error { - switch prx.Type { - case config.ConstProxySpider: +func (cluster *Cluster) LocalhostStartProxyService(pri DatabaseProxy) error { + if prx, ok := pri.(*MdbsProxy); ok { prx.ShardProxy.Shutdown() - case config.ConstProxySphinx: + } - case config.ConstProxyHaproxy: + if prx, ok := pri.(*HaproxyProxy); ok { cluster.LocalhostStartHaProxyService(prx) - case config.ConstProxySqlproxy: - cluster.LocalhostStartProxySQLService(prx) - case config.ConstProxyMaxscale: + } - default: + if prx, ok := pri.(*ProxySQLProxy); ok { + cluster.LocalhostStartProxySQLService(prx) } + cluster.errorChan <- nil return nil } -func (cluster *Cluster) LocalhostStopProxyService(prx *Proxy) error { - switch prx.Type { - case config.ConstProxySpider: - - case config.ConstProxySphinx: - - case config.ConstProxyHaproxy: +func (cluster *Cluster) LocalhostStopProxyService(pri DatabaseProxy) error { + if prx, ok := pri.(*HaproxyProxy); ok { cluster.LocalhostStartHaProxyService(prx) - case config.ConstProxySqlproxy: + } + if prx, ok := pri.(*ProxySQLProxy); ok { cluster.LocalhostStartProxySQLService(prx) - case config.ConstProxyMaxscale: - - default: - return errors.New("Can't stop proxy") } + return nil } diff --git a/cluster/prov_opensvc_haproxy.go b/cluster/prov_opensvc_haproxy.go index 8e1062346..437d3e5d3 100644 --- a/cluster/prov_opensvc_haproxy.go +++ b/cluster/prov_opensvc_haproxy.go @@ -13,7 +13,7 @@ import ( "github.com/signal18/replication-manager/opensvc" ) -func (cluster *Cluster) OpenSVCGetHaproxyContainerSection(server *Proxy) map[string]string { +func (cluster *Cluster) OpenSVCGetHaproxyContainerSection(server *HaproxyProxy) map[string]string { svccontainer := make(map[string]string) if server.ClusterGroup.Conf.ProvProxType == "docker" || server.ClusterGroup.Conf.ProvProxType == "podman" || server.ClusterGroup.Conf.ProvProxType == "oci" { svccontainer["tags"] = "" @@ -32,7 +32,7 @@ func (cluster *Cluster) OpenSVCGetHaproxyContainerSection(server *Proxy) map[str return svccontainer } -func (cluster *Cluster) GetHaproxyTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *Proxy) (string, error) { +func (cluster *Cluster) GetHaproxyTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *HaproxyProxy) (string, error) { conf := ` [DEFAULT] diff --git a/cluster/prov_opensvc_maxscale.go b/cluster/prov_opensvc_maxscale.go index 840dd33ca..6172c628e 100644 --- a/cluster/prov_opensvc_maxscale.go +++ b/cluster/prov_opensvc_maxscale.go @@ -13,7 +13,7 @@ import ( "github.com/signal18/replication-manager/opensvc" ) -func (cluster *Cluster) OpenSVCGetMaxscaleContainerSection(server *Proxy) map[string]string { +func (cluster *Cluster) OpenSVCGetMaxscaleContainerSection(server *MaxscaleProxy) map[string]string { svccontainer := make(map[string]string) if server.ClusterGroup.Conf.ProvProxType == "docker" || server.ClusterGroup.Conf.ProvProxType == "podman" || server.ClusterGroup.Conf.ProvProxType == "oci" { svccontainer["tags"] = "" @@ -31,7 +31,7 @@ func (cluster *Cluster) OpenSVCGetMaxscaleContainerSection(server *Proxy) map[st return svccontainer } -func (cluster *Cluster) GetMaxscaleTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *Proxy) (string, error) { +func (cluster *Cluster) GetMaxscaleTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *MaxscaleProxy) (string, error) { conf := ` [DEFAULT] diff --git a/cluster/prov_opensvc_proxysql.go b/cluster/prov_opensvc_proxysql.go index c229055a9..777118cd8 100644 --- a/cluster/prov_opensvc_proxysql.go +++ b/cluster/prov_opensvc_proxysql.go @@ -13,14 +13,14 @@ import ( "github.com/signal18/replication-manager/opensvc" ) -func (proxy *Proxy) ProxySQLReadOnMaster() string { +func (proxy *ProxySQLProxy) ProxySQLReadOnMaster() string { if proxy.IsFilterInTags("proxy.route.readonmaster") { return "1" } return "0" } -func (cluster *Cluster) OpenSVCGetProxysqlContainerSection(server *Proxy) map[string]string { +func (cluster *Cluster) OpenSVCGetProxysqlContainerSection(server *ProxySQLProxy) map[string]string { svccontainer := make(map[string]string) if server.ClusterGroup.Conf.ProvProxType == "docker" || server.ClusterGroup.Conf.ProvProxType == "podman" || server.ClusterGroup.Conf.ProvProxType == "oci" { svccontainer["tags"] = "" @@ -39,7 +39,7 @@ func (cluster *Cluster) OpenSVCGetProxysqlContainerSection(server *Proxy) map[st return svccontainer } -func (cluster *Cluster) GetProxysqlTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *Proxy) (string, error) { +func (cluster *Cluster) GetProxysqlTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *ProxySQLProxy) (string, error) { conf := ` [DEFAULT] diff --git a/cluster/prov_opensvc_prx.go b/cluster/prov_opensvc_prx.go index 2c6af8f64..6fcbdf9a0 100644 --- a/cluster/prov_opensvc_prx.go +++ b/cluster/prov_opensvc_prx.go @@ -14,16 +14,15 @@ import ( "strconv" "strings" - "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/opensvc" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" ) -func (cluster *Cluster) OpenSVCStopProxyService(server *Proxy) error { +func (cluster *Cluster) OpenSVCStopProxyService(server DatabaseProxy) error { svc := cluster.OpenSVCConnect() if cluster.Conf.ProvOpensvcUseCollectorAPI { - service, err := svc.GetServiceFromName(cluster.Name + "/svc/" + server.Name) + service, err := svc.GetServiceFromName(cluster.Name + "/svc/" + server.GetName()) if err != nil { return err } @@ -38,7 +37,7 @@ func (cluster *Cluster) OpenSVCStopProxyService(server *Proxy) error { cluster.LogPrintf(LvlErr, "Can not stop proxy: %s ", err) return err } - err = svc.StopServiceV2(cluster.Name, server.ServiceName, agent.HostName) + err = svc.StopServiceV2(cluster.Name, server.GetServiceName(), agent.HostName) if err != nil { cluster.LogPrintf(LvlErr, "Can not stop proxy: %s ", err) return err @@ -47,10 +46,10 @@ func (cluster *Cluster) OpenSVCStopProxyService(server *Proxy) error { return nil } -func (cluster *Cluster) OpenSVCStartProxyService(server *Proxy) error { +func (cluster *Cluster) OpenSVCStartProxyService(server DatabaseProxy) error { svc := cluster.OpenSVCConnect() if cluster.Conf.ProvOpensvcUseCollectorAPI { - service, err := svc.GetServiceFromName(cluster.Name + "/svc/" + server.Name) + service, err := svc.GetServiceFromName(cluster.Name + "/svc/" + server.GetName()) if err != nil { return err } @@ -65,7 +64,7 @@ func (cluster *Cluster) OpenSVCStartProxyService(server *Proxy) error { cluster.LogPrintf(LvlErr, "Can not stop proxy: %s ", err) return err } - err = svc.StartServiceV2(cluster.Name, server.ServiceName, agent.HostName) + err = svc.StartServiceV2(cluster.Name, server.GetServiceName(), agent.HostName) if err != nil { cluster.LogPrintf(LvlErr, "Can not stop proxy: %s ", err) return err @@ -74,9 +73,9 @@ func (cluster *Cluster) OpenSVCStartProxyService(server *Proxy) error { return nil } -func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { +func (cluster *Cluster) OpenSVCProvisionProxyService(pri DatabaseProxy) error { svc := cluster.OpenSVCConnect() - agent, err := cluster.FoundProxyAgent(prx) + agent, err := cluster.FoundProxyAgent(pri) if err != nil { cluster.errorChan <- err return err @@ -84,20 +83,20 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { // Unprovision if already in OpenSVC if cluster.Conf.ProvOpensvcUseCollectorAPI { var idsrv string - mysrv, err := svc.GetServiceFromName(cluster.Name + "/svc/" + prx.Name) + mysrv, err := svc.GetServiceFromName(cluster.Name + "/svc/" + pri.GetName()) if err == nil { idsrv = mysrv.Svc_id - cluster.LogPrintf(LvlInfo, "Found existing service %s service %s", cluster.Name+"/"+prx.Name, idsrv) + cluster.LogPrintf(LvlInfo, "Found existing service %s service %s", cluster.Name+"/"+pri.GetName(), idsrv) } else { - idsrv, err = svc.CreateService(cluster.Name+"/svc/"+prx.Name, "MariaDB") + idsrv, err = svc.CreateService(cluster.Name+"/svc/"+pri.GetName(), "MariaDB") if err != nil { cluster.LogPrintf(LvlErr, "Can't create OpenSVC proxy service") cluster.errorChan <- err return err } } - cluster.LogPrintf(LvlInfo, "Attaching internal id %s to opensvc service id %s", cluster.Name+"/"+prx.Name, idsrv) + cluster.LogPrintf(LvlInfo, "Attaching internal id %s to opensvc service id %s", cluster.Name+"/"+pri.GetName(), idsrv) err = svc.DeteteServiceTags(idsrv) if err != nil { @@ -120,7 +119,7 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { for i, s := range cluster.Servers { srvlist[i] = s.Host } - if prx.Type == config.ConstProxyMaxscale { + if prx, ok := pri.(*MaxscaleProxy); ok { if !cluster.Conf.ProvOpensvcUseCollectorAPI { res, err := cluster.OpenSVCGetProxyTemplateV2(strings.Join(srvlist, " "), agent, prx) if err != nil { @@ -139,13 +138,13 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { cluster.errorChan <- err return err } - idtemplate, err := svc.CreateTemplate(cluster.Name+"/svc/"+prx.Name, res) + idtemplate, err := svc.CreateTemplate(cluster.Name+"/svc/"+prx.GetName(), res) if err != nil { cluster.errorChan <- err return err } - idaction, _ := svc.ProvisionTemplate(idtemplate, agent.Node_id, cluster.Name+"/svc/"+prx.Name) + idaction, _ := svc.ProvisionTemplate(idtemplate, agent.Node_id, cluster.Name+"/svc/"+prx.GetName()) cluster.OpenSVCWaitDequeue(svc, idaction) task := svc.GetAction(strconv.Itoa(idaction)) if task != nil { @@ -156,10 +155,9 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { } } } - if prx.Type == config.ConstProxySpider { - + if prx, ok := pri.(*MdbsProxy); ok { if strings.Contains(svc.ProvProxAgents, agent.Node_name) { - srv, _ := cluster.newServerMonitor(prx.Host+":"+prx.Port, prx.User, prx.Pass, true, cluster.GetDomain()) + srv, _ := cluster.newServerMonitor(prx.GetHost()+":"+prx.GetPort(), prx.User, prx.Pass, true, cluster.GetDomain()) err := srv.Refresh() if err == nil { cluster.LogPrintf(LvlWarn, "Can connect to requested signal18 sharding proxy") @@ -185,13 +183,13 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { cluster.errorChan <- err return err } - idtemplate, err := svc.CreateTemplate(cluster.Name+"/svc/"+prx.Name, res) + idtemplate, err := svc.CreateTemplate(cluster.Name+"/svc/"+prx.GetName(), res) if err != nil { cluster.errorChan <- err return err } - idaction, _ := svc.ProvisionTemplate(idtemplate, agent.Node_id, cluster.Name+"/svc/"+prx.Name) + idaction, _ := svc.ProvisionTemplate(idtemplate, agent.Node_id, cluster.Name+"/svc/"+prx.GetName()) cluster.OpenSVCWaitDequeue(svc, idaction) task := svc.GetAction(strconv.Itoa(idaction)) if task != nil { @@ -202,7 +200,7 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { } } } - if prx.Type == config.ConstProxyHaproxy { + if prx, ok := pri.(*HaproxyProxy); ok { if !cluster.Conf.ProvOpensvcUseCollectorAPI { res, err := cluster.OpenSVCGetProxyTemplateV2(strings.Join(srvlist, " "), agent, prx) if err != nil { @@ -221,13 +219,13 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { cluster.errorChan <- err return err } - idtemplate, err := svc.CreateTemplate(cluster.Name+"/svc/"+prx.Name, res) + idtemplate, err := svc.CreateTemplate(cluster.Name+"/svc/"+prx.GetName(), res) if err != nil { cluster.errorChan <- err return err } - idaction, _ := svc.ProvisionTemplate(idtemplate, agent.Node_id, cluster.Name+"/svc/"+prx.Name) + idaction, _ := svc.ProvisionTemplate(idtemplate, agent.Node_id, cluster.Name+"/svc/"+prx.GetName()) cluster.OpenSVCWaitDequeue(svc, idaction) task := svc.GetAction(strconv.Itoa(idaction)) if task != nil { @@ -238,7 +236,7 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { } } } - if prx.Type == config.ConstProxySphinx { + if prx, ok := pri.(*SphinxProxy); ok { if !cluster.Conf.ProvOpensvcUseCollectorAPI { } else { res, err := cluster.OpenSVCGetProxyTemplateV2(strings.Join(srvlist, " "), agent, prx) @@ -257,13 +255,13 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { cluster.errorChan <- err return err } - idtemplate, err := svc.CreateTemplate(cluster.Name+"/svc/"+prx.Name, res) + idtemplate, err := svc.CreateTemplate(cluster.Name+"/svc/"+prx.GetName(), res) if err != nil { cluster.errorChan <- err return err } - idaction, _ := svc.ProvisionTemplate(idtemplate, agent.Node_id, cluster.Name+"/svc/"+prx.Name) + idaction, _ := svc.ProvisionTemplate(idtemplate, agent.Node_id, cluster.Name+"/svc/"+prx.GetName()) cluster.OpenSVCWaitDequeue(svc, idaction) task := svc.GetAction(strconv.Itoa(idaction)) if task != nil { @@ -274,7 +272,7 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { } } } - if prx.Type == config.ConstProxySqlproxy { + if prx, ok := pri.(*ProxySQLProxy); ok { if !cluster.Conf.ProvOpensvcUseCollectorAPI { res, err := cluster.OpenSVCGetProxyTemplateV2(strings.Join(srvlist, " "), agent, prx) if err != nil { @@ -293,13 +291,13 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { cluster.errorChan <- err return err } - idtemplate, err := svc.CreateTemplate(cluster.Name+"/svc/"+prx.Name, res) + idtemplate, err := svc.CreateTemplate(cluster.Name+"/svc/"+prx.GetName(), res) if err != nil { cluster.errorChan <- err return err } - idaction, _ := svc.ProvisionTemplate(idtemplate, agent.Node_id, cluster.Name+"/svc/"+prx.Name) + idaction, _ := svc.ProvisionTemplate(idtemplate, agent.Node_id, cluster.Name+"/svc/"+prx.GetName()) cluster.OpenSVCWaitDequeue(svc, idaction) task := svc.GetAction(strconv.Itoa(idaction)) if task != nil { @@ -314,10 +312,10 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(prx *Proxy) error { return nil } -func (cluster *Cluster) OpenSVCGetProxyTemplateV2(servers string, agent opensvc.Host, prx *Proxy) (string, error) { +func (cluster *Cluster) OpenSVCGetProxyTemplateV2(servers string, agent opensvc.Host, pri DatabaseProxy) (string, error) { svcsection := make(map[string]map[string]string) - svcsection["DEFAULT"] = prx.OpenSVCGetProxyDefaultSection() + svcsection["DEFAULT"] = pri.OpenSVCGetProxyDefaultSection() svcsection["ip#01"] = cluster.OpenSVCGetNetSection() if cluster.Conf.ProvProxDiskType != "volume" { svcsection["disk#0000"] = cluster.OpenSVCGetDiskZpoolDockerPrivateSection() @@ -337,21 +335,29 @@ func (cluster *Cluster) OpenSVCGetProxyTemplateV2(servers string, agent opensvc. } svcsection["container#0001"] = cluster.OpenSVCGetNamespaceContainerSection() svcsection["container#0002"] = cluster.OpenSVCGetInitContainerSection() - switch prx.Type { - case config.ConstProxySpider: + + if prx, ok := pri.(*MdbsProxy); ok { svcsection["container#0003"] = cluster.OpenSVCGetShardproxyContainerSection(prx) - case config.ConstProxySphinx: + } + + if prx, ok := pri.(*SphinxProxy); ok { svcsection["container#0003"] = cluster.OpenSVCGetSphinxContainerSection(prx) svcsection["task#01"] = cluster.OpenSVCGetSphinxTaskSection(prx) - case config.ConstProxyHaproxy: + } + + if prx, ok := pri.(*HaproxyProxy); ok { svcsection["container#0003"] = cluster.OpenSVCGetHaproxyContainerSection(prx) - case config.ConstProxySqlproxy: + } + + if prx, ok := pri.(*ProxySQLProxy); ok { svcsection["container#0003"] = cluster.OpenSVCGetProxysqlContainerSection(prx) - case config.ConstProxyMaxscale: + } + + if prx, ok := pri.(*MaxscaleProxy); ok { svcsection["container#0003"] = cluster.OpenSVCGetMaxscaleContainerSection(prx) - default: } - svcsection["env"] = cluster.OpenSVCGetProxyEnvSection(servers, agent, prx) + + svcsection["env"] = cluster.OpenSVCGetProxyEnvSection(servers, agent, pri) svcsectionJson, err := json.MarshalIndent(svcsection, "", "\t") if err != nil { @@ -362,23 +368,23 @@ func (cluster *Cluster) OpenSVCGetProxyTemplateV2(servers string, agent opensvc. } -func (cluster *Cluster) OpenSVCUnprovisionProxyService(prx *Proxy) { +func (cluster *Cluster) OpenSVCUnprovisionProxyService(prx DatabaseProxy) { opensvc := cluster.OpenSVCConnect() //agents := opensvc.GetNodes() node, _ := cluster.FoundProxyAgent(prx) for _, svc := range node.Svc { - if cluster.Name+"/svc/"+prx.Name == svc.Svc_name { + if cluster.Name+"/svc/"+prx.GetName() == svc.Svc_name { idaction, _ := opensvc.UnprovisionService(node.Node_id, svc.Svc_id) err := cluster.OpenSVCWaitDequeue(opensvc, idaction) if err != nil { - cluster.LogPrintf(LvlErr, "Can't unprovision proxy %s, %s", prx.Id, err) + cluster.LogPrintf(LvlErr, "Can't unprovision proxy %s, %s", prx.GetId(), err) } } } cluster.errorChan <- nil } -func (cluster *Cluster) FoundProxyAgent(proxy *Proxy) (opensvc.Host, error) { +func (cluster *Cluster) FoundProxyAgent(proxy DatabaseProxy) (opensvc.Host, error) { svc := cluster.OpenSVCConnect() agents, err := svc.GetNodes() if err != nil { @@ -395,14 +401,14 @@ func (cluster *Cluster) FoundProxyAgent(proxy *Proxy) (opensvc.Host, error) { return agent, errors.New("Indice not found in proxies agent list") } for i, srv := range cluster.Proxies { - if srv.Id == proxy.Id { + if srv.GetId() == proxy.GetId() { return clusteragents[i%len(clusteragents)], nil } } return agent, errors.New("Indice not found in proxies agent list") } -func (cluster *Cluster) OpenSVCGetProxyEnvSection(servers string, agent opensvc.Host, prx *Proxy) map[string]string { +func (cluster *Cluster) OpenSVCGetProxyEnvSection(servers string, agent opensvc.Host, prx DatabaseProxy) map[string]string { ips := strings.Split(cluster.Conf.ProvProxGateway, ".") masks := strings.Split(cluster.Conf.ProvProxNetmask, ".") @@ -420,8 +426,8 @@ func (cluster *Cluster) OpenSVCGetProxyEnvSection(servers string, agent opensvc. svcenv["nodes"] = agent.Node_name svcenv["base_dir"] = "/srv/{namespace}-{svcname}" svcenv["size"] = cluster.Conf.ProvProxDisk + "b" - svcenv["ip_pod01"] = prx.Host - svcenv["port_pod01"] = prx.Port + svcenv["ip_pod01"] = prx.GetHost() + svcenv["port_pod01"] = prx.GetPort() svcenv["mysql_root_password"] = cluster.dbPass svcenv["mysql_root_user"] = cluster.dbUser svcenv["network"] = network @@ -438,28 +444,28 @@ func (cluster *Cluster) OpenSVCGetProxyEnvSection(servers string, agent opensvc. svcenv["vip_addr"] = cluster.Conf.ProvProxRouteAddr svcenv["vip_port"] = cluster.Conf.ProvProxRoutePort svcenv["vip_netmask"] = cluster.Conf.ProvProxRouteMask - svcenv["port_rw"] = strconv.Itoa(prx.WritePort) - svcenv["port_rw_split"] = strconv.Itoa(prx.ReadWritePort) - svcenv["port_r_lb"] = strconv.Itoa(prx.ReadPort) + svcenv["port_rw"] = strconv.Itoa(prx.GetWritePort()) + svcenv["port_rw_split"] = strconv.Itoa(prx.GetReadWritePort()) + svcenv["port_r_lb"] = strconv.Itoa(prx.GetReadPort()) svcenv["port_http"] = "80" svcenv["backend_ips"] = servers svcenv["port_binlog"] = strconv.Itoa(cluster.Conf.MxsBinlogPort) - svcenv["port_telnet"] = prx.Port - svcenv["port_admin"] = prx.Port - svcenv["user_admin"] = prx.User + svcenv["port_telnet"] = prx.GetPort() + svcenv["port_admin"] = prx.GetPort() + svcenv["user_admin"] = prx.GetUser() svcenv["mrm_api_addr"] = cluster.Conf.MonitorAddress + ":" + cluster.Conf.HttpPort svcenv["mrm_cluster_name"] = cluster.GetClusterName() return svcenv } -func (cluster *Cluster) GetProxiesEnv(collector opensvc.Collector, servers string, agent opensvc.Host, prx *Proxy) string { +func (cluster *Cluster) GetProxiesEnv(collector opensvc.Collector, servers string, agent opensvc.Host, prx DatabaseProxy) string { i := 0 ipPods := "" //if !cluster.Conf.ProvNetCNI { - ipPods = ipPods + `ip_pod` + fmt.Sprintf("%02d", i+1) + ` = ` + prx.Host + ` + ipPods = ipPods + `ip_pod` + fmt.Sprintf("%02d", i+1) + ` = ` + prx.GetHost() + ` ` - portPods := `port_pod` + fmt.Sprintf("%02d", i+1) + ` = ` + prx.Port + ` + portPods := `port_pod` + fmt.Sprintf("%02d", i+1) + ` = ` + prx.GetPort() + ` ` /*} else { ipPods = ipPods + `ip_pod` + fmt.Sprintf("%02d", i+1) + ` = 0.0.0.0` @@ -498,37 +504,26 @@ maxscale_maxinfo_port =` + strconv.Itoa(cluster.Conf.MxsMaxinfoPort) + ` vip_addr = ` + cluster.Conf.ProvProxRouteAddr + ` vip_port = ` + cluster.Conf.ProvProxRoutePort + ` vip_netmask = ` + cluster.Conf.ProvProxRouteMask + ` -port_rw = ` + strconv.Itoa(prx.WritePort) + ` -port_rw_split = ` + strconv.Itoa(prx.ReadWritePort) + ` -port_r_lb = ` + strconv.Itoa(prx.ReadPort) + ` +port_rw = ` + strconv.Itoa(prx.GetWritePort()) + ` +port_rw_split = ` + strconv.Itoa(prx.GetReadWritePort()) + ` +port_r_lb = ` + strconv.Itoa(prx.GetReadPort()) + ` port_http = 80 base_dir = /srv/{namespace}-{svcname} backend_ips = ` + servers + ` port_binlog = ` + strconv.Itoa(cluster.Conf.MxsBinlogPort) + ` -port_telnet = ` + prx.Port + ` -port_admin = ` + prx.Port + ` -user_admin = ` + prx.User + ` -password_admin = ` + prx.Pass + ` +port_telnet = ` + prx.GetPort() + ` +port_admin = ` + prx.GetPort() + ` +user_admin = ` + prx.GetUser() + ` +password_admin = ` + prx.GetPass() + ` mrm_api_addr = ` + cluster.Conf.MonitorAddress + ":" + cluster.Conf.HttpPort + ` mrm_cluster_name = ` + cluster.GetClusterName() + ` -proxysql_read_on_master = ` + prx.ProxySQLReadOnMaster() + ` ` - return conf -} -func (proxy *Proxy) GetPRXEnv() map[string]string { - return map[string]string{ - // "%%ENV:NODES_CPU_CORES%%": server.ClusterGroup.Conf.ProvCores, - // "%%ENV:SVC_CONF_ENV_MAX_CORES%%": server.ClusterGroup.Conf.ProvCores, - "%%ENV:SVC_CONF_ENV_CRC32_ID%%": string(proxy.Id[2:10]), - "%%ENV:SVC_CONF_ENV_SERVER_ID%%": string(proxy.Id[2:10]), - // "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%": server.ClusterGroup.dbPass, - - "%%ENV:SERVER_IP%%": "0.0.0.0", - "%%ENV:SERVER_PORT%%": proxy.Port, - "%%ENV:SVC_CONF_ENV_PROXYSQL_READ_ON_MASTER%%": proxy.ProxySQLReadOnMaster(), + if pr, ok := prx.(*ProxySQLProxy); ok { + conf += `proxysql_read_on_master = ` + pr.ProxySQLReadOnMaster() } + return conf } func (server *Proxy) OpenSVCGetProxyDefaultSection() map[string]string { diff --git a/cluster/prov_opensvc_shardproxy.go b/cluster/prov_opensvc_shardproxy.go index 4b364a36f..f36fe10cc 100644 --- a/cluster/prov_opensvc_shardproxy.go +++ b/cluster/prov_opensvc_shardproxy.go @@ -15,7 +15,7 @@ import ( "github.com/signal18/replication-manager/opensvc" ) -func (cluster *Cluster) OpenSVCGetShardproxyContainerSection(server *Proxy) map[string]string { +func (cluster *Cluster) OpenSVCGetShardproxyContainerSection(server *MdbsProxy) map[string]string { svccontainer := make(map[string]string) if server.ClusterGroup.Conf.ProvProxType == "docker" || server.ClusterGroup.Conf.ProvProxType == "podman" || server.ClusterGroup.Conf.ProvProxType == "oci" { @@ -35,7 +35,7 @@ func (cluster *Cluster) OpenSVCGetShardproxyContainerSection(server *Proxy) map[ return svccontainer } -func (cluster *Cluster) GetShardproxyTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *Proxy) (string, error) { +func (cluster *Cluster) GetShardproxyTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *MdbsProxy) (string, error) { ipPods := "" diff --git a/cluster/prov_opensvc_sphinx.go b/cluster/prov_opensvc_sphinx.go index 4761cd01e..73abecba9 100644 --- a/cluster/prov_opensvc_sphinx.go +++ b/cluster/prov_opensvc_sphinx.go @@ -13,7 +13,7 @@ import ( "github.com/signal18/replication-manager/opensvc" ) -func (cluster *Cluster) OpenSVCGetSphinxContainerSection(server *Proxy) map[string]string { +func (cluster *Cluster) OpenSVCGetSphinxContainerSection(server *SphinxProxy) map[string]string { svccontainer := make(map[string]string) if server.ClusterGroup.Conf.ProvProxType == "docker" || server.ClusterGroup.Conf.ProvProxType == "podman" || server.ClusterGroup.Conf.ProvProxType == "oci" { svccontainer["tags"] = "" @@ -31,7 +31,7 @@ func (cluster *Cluster) OpenSVCGetSphinxContainerSection(server *Proxy) map[stri return svccontainer } -func (cluster *Cluster) OpenSVCGetSphinxTaskSection(server *Proxy) map[string]string { +func (cluster *Cluster) OpenSVCGetSphinxTaskSection(server *SphinxProxy) map[string]string { svccontainer := make(map[string]string) svccontainer["schedule"] = cluster.Conf.ProvSphinxCron svccontainer["command"] = "{env.base_dir}/{namespace}-{svcname}/pod01/init/reindex.sh" @@ -40,7 +40,7 @@ func (cluster *Cluster) OpenSVCGetSphinxTaskSection(server *Proxy) map[string]st return svccontainer } -func (cluster *Cluster) GetSphinxTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *Proxy) (string, error) { +func (cluster *Cluster) GetSphinxTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *SphinxProxy) (string, error) { conf := ` [DEFAULT] diff --git a/cluster/prov_splaos_prx.go b/cluster/prov_splaos_prx.go index f6f2f730f..92c009576 100644 --- a/cluster/prov_splaos_prx.go +++ b/cluster/prov_splaos_prx.go @@ -1,19 +1,19 @@ package cluster -func (cluster *Cluster) SlapOSProvisionProxyService(prx *Proxy) { +func (cluster *Cluster) SlapOSProvisionProxyService(prx DatabaseProxy) { } -func (cluster *Cluster) SlapOSUnprovisionProxyService(prx *Proxy) { +func (cluster *Cluster) SlapOSUnprovisionProxyService(prx DatabaseProxy) { } -func (cluster *Cluster) SlapOSStartProxyService(server *Proxy) error { +func (cluster *Cluster) SlapOSStartProxyService(server DatabaseProxy) error { server.SetWaitStartCookie() return nil } -func (cluster *Cluster) SlapOSStopProxyService(server *Proxy) error { +func (cluster *Cluster) SlapOSStopProxyService(server DatabaseProxy) error { server.SetWaitStopCookie() return nil } diff --git a/cluster/prx.go b/cluster/prx.go index c1ab95645..d602ffd43 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -17,6 +17,7 @@ import ( "sync" "time" + "github.com/jmoiron/sqlx" "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/graphite" "github.com/signal18/replication-manager/router/myproxy" @@ -29,6 +30,7 @@ import ( // Proxy defines a proxy type Proxy struct { + DatabaseProxy Id string `json:"id"` Name string `json:"name"` ServiceName string `json:"serviceName"` @@ -62,6 +64,130 @@ type Proxy struct { Variables map[string]string `json:"-"` } +func (p *Proxy) GetType() string { + return p.Type +} + +func (p *Proxy) GetHost() string { + return p.Host +} + +func (p *Proxy) GetPort() string { + return p.Port +} + +func (p *Proxy) GetWritePort() int { + return p.WritePort +} + +func (p *Proxy) GetId() string { + return p.Id +} + +func (p *Proxy) GetState() string { + return p.State +} + +func (p *Proxy) SetState(v string) { + p.State = v +} + +func (p *Proxy) GetUser() string { + return p.User +} + +func (p *Proxy) GetPass() string { + return p.Pass +} + +func (p *Proxy) GetFailCount() int { + return p.FailCount +} + +func (p *Proxy) SetFailCount(c int) { + p.FailCount = c +} + +func (p *Proxy) SetCredential(credential string) { + p.User, p.Pass = misc.SplitPair(credential) +} + +func (p *Proxy) GetPrevState() string { + return p.PrevState +} + +func (p *Proxy) SetPrevState(state string) { + p.PrevState = state +} + +type DatabaseProxy interface { + // Init oldmaster is only currently used by Maxscale + Init() + Refresh() error + Failover() + SetMaintenance(server *ServerMonitor) + GetType() string + DelRestartCookie() + DelReprovisionCookie() + + SetProvisionCookie() + HasProvisionCookie() bool + IsRunning() bool + SetRestartCookie() + HasRestartCookie() bool + SetReprovCookie() + HasReprovCookie() bool + + SetCredential(credential string) + + GetFailCount() int + SetFailCount(c int) + + GetName() string + GetHost() string + GetPort() string + GetWritePort() int + GetReadWritePort() int + GetReadPort() int + GetId() string + GetState() string + SetState(v string) + GetUser() string + GetPass() string + GetServiceName() string + + GetPrevState() string + SetPrevState(state string) + + GetCluster() (*sqlx.DB, error) + + SetMaintenanceHaproxy(server *ServerMonitor) + + IsFilterInTags(filter string) bool + HasWaitStartCookie() bool + HasWaitStopCookie() bool + IsDown() bool + + DelProvisionCookie() + DelWaitStartCookie() + DelWaitStopCookie() + GetProxyConfig() string + // GetInitContainer(collector opensvc.Collector) string + GetBindAddress() string + GetBindAddressExtraIPV6() string + GetUseSSL() string + GetUseCompression() string + GetDatadir() string + GetEnv() map[string]string + GetConfigProxyModule(variable string) string + + SendStats() error + + OpenSVCGetProxyDefaultSection() map[string]string + SetWaitStartCookie() + SetWaitStopCookie() +} + type Backend struct { Host string `json:"host"` Port string `json:"port"` @@ -76,7 +202,7 @@ type Backend struct { PrxMaintenance bool `json:"prxMaintenance"` } -type proxyList []*Proxy +type proxyList []DatabaseProxy func (cluster *Cluster) newProxyList() error { nbproxies := 0 @@ -107,7 +233,7 @@ func (cluster *Cluster) newProxyList() error { if cluster.Conf.MyproxyOn { nbproxies++ } - cluster.Proxies = make([]*Proxy, nbproxies) + cluster.Proxies = make([]DatabaseProxy, nbproxies) cluster.LogPrintf(LvlInfo, "Loading %d proxies", nbproxies) @@ -117,7 +243,8 @@ func (cluster *Cluster) newProxyList() error { slapospartitions := strings.Split(cluster.Conf.SlapOSHaProxyPartitions, ",") for k, proxyHost := range strings.Split(cluster.Conf.MxsHost, ",") { - prx := new(Proxy) + // prx := new(Proxy) + prx := new(MaxscaleProxy) prx.Type = config.ConstProxyMaxscale if k < len(slapospartitions) { prx.SlapOSDatadir = slapospartitions[k] @@ -142,11 +269,11 @@ func (cluster *Cluster) newProxyList() error { prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) prx.ClusterGroup = cluster prx.SetDataDir() - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.Port) - - cluster.Proxies[ctproxy], err = cluster.newProxy(prx) + cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) + prx.State = stateSuspect + cluster.Proxies[ctproxy] = prx if err != nil { - cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.Port, err) + cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } ctproxy++ } @@ -155,7 +282,7 @@ func (cluster *Cluster) newProxyList() error { slapospartitions := strings.Split(cluster.Conf.SlapOSHaProxyPartitions, ",") for k, proxyHost := range strings.Split(cluster.Conf.HaproxyHosts, ",") { - prx := new(Proxy) + prx := new(HaproxyProxy) if k < len(slapospartitions) { prx.SlapOSDatadir = slapospartitions[k] } @@ -172,10 +299,11 @@ func (cluster *Cluster) newProxyList() error { prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) prx.ClusterGroup = cluster prx.SetDataDir() - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.Port) - cluster.Proxies[ctproxy], err = cluster.newProxy(prx) + cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) + prx.State = stateSuspect + cluster.Proxies[ctproxy] = prx if err != nil { - cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.Port, err) + cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } ctproxy++ @@ -185,7 +313,7 @@ func (cluster *Cluster) newProxyList() error { prx := new(Proxy) prx.Type = config.ConstProxyExternal prx.Host, prx.Port = misc.SplitHostPort(cluster.Conf.ExtProxyVIP) - prx.WritePort, _ = strconv.Atoi(prx.Port) + prx.WritePort, _ = strconv.Atoi(prx.GetPort()) prx.ReadPort = prx.WritePort prx.ReadWritePort = prx.WritePort if prx.Name == "" { @@ -194,7 +322,7 @@ func (cluster *Cluster) newProxyList() error { prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) prx.ClusterGroup = cluster prx.SetDataDir() - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.Port) + cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) cluster.Proxies[ctproxy], err = cluster.newProxy(prx) ctproxy++ } @@ -203,44 +331,31 @@ func (cluster *Cluster) newProxyList() error { ipv6hosts := strings.Split(cluster.Conf.ProxysqlHostsIPV6, ",") for k, proxyHost := range strings.Split(cluster.Conf.ProxysqlHosts, ",") { - prx := new(Proxy) + // prx := new(Proxy) + // prx := new(ProxySQLProxy) + prx := NewProxySQLProxy(cluster.Name, proxyHost, cluster.Conf) if k < len(slapospartitions) { prx.SlapOSDatadir = slapospartitions[k] } if k < len(ipv6hosts) { prx.HostIPV6 = ipv6hosts[k] } - prx.Type = config.ConstProxySqlproxy - prx.Port = cluster.Conf.ProxysqlAdminPort - prx.ReadWritePort, _ = strconv.Atoi(cluster.Conf.ProxysqlPort) - prx.User = cluster.Conf.ProxysqlUser - prx.Pass = cluster.Conf.ProxysqlPassword - prx.ReaderHostgroup, _ = strconv.Atoi(cluster.Conf.ProxysqlReaderHostgroup) - prx.WriterHostgroup, _ = strconv.Atoi(cluster.Conf.ProxysqlWriterHostgroup) - prx.WritePort, _ = strconv.Atoi(cluster.Conf.ProxysqlPort) - prx.ReadPort, _ = strconv.Atoi(cluster.Conf.ProxysqlPort) + if cluster.key != nil { p := crypto.Password{Key: cluster.key} p.CipherText = prx.Pass p.Decrypt() prx.Pass = p.PlainText } - prx.Name = proxyHost - prx.Host = proxyHost - if cluster.Conf.ProvNetCNI { - if cluster.Conf.ClusterHead == "" { - prx.Host = prx.Host + "." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster - } else { - prx.Host = prx.Host + "." + cluster.Conf.ClusterHead + ".svc." + cluster.Conf.ProvOrchestratorCluster - } - } - prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) + prx.ClusterGroup = cluster prx.SetDataDir() - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.Port) - cluster.Proxies[ctproxy], err = cluster.newProxy(prx) + cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) + // cluster.Proxies[ctproxy], err = cluster.newProxy(prx) + prx.State = stateSuspect + cluster.Proxies[ctproxy] = prx if err != nil { - cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.Port, err) + cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } ctproxy++ } @@ -249,7 +364,7 @@ func (cluster *Cluster) newProxyList() error { slapospartitions := strings.Split(cluster.Conf.SlapOSShardProxyPartitions, ",") ipv6hosts := strings.Split(cluster.Conf.MdbsHostsIPV6, ",") for k, proxyHost := range strings.Split(cluster.Conf.MdbsProxyHosts, ",") { - prx := new(Proxy) + prx := new(MdbsProxy) if k < len(slapospartitions) { prx.SlapOSDatadir = slapospartitions[k] } @@ -259,8 +374,8 @@ func (cluster *Cluster) newProxyList() error { prx.Type = config.ConstProxySpider prx.Host, prx.Port = misc.SplitHostPort(proxyHost) prx.User, prx.Pass = misc.SplitPair(cluster.Conf.MdbsProxyCredential) - prx.ReadPort, _ = strconv.Atoi(prx.Port) - prx.ReadWritePort, _ = strconv.Atoi(prx.Port) + prx.ReadPort, _ = strconv.Atoi(prx.GetPort()) + prx.ReadWritePort, _ = strconv.Atoi(prx.GetPort()) prx.Name = proxyHost if cluster.Conf.ProvNetCNI { if cluster.Conf.ClusterHead == "" { @@ -270,30 +385,31 @@ func (cluster *Cluster) newProxyList() error { } prx.Port = "3306" } - prx.WritePort, _ = strconv.Atoi(prx.Port) + prx.WritePort, _ = strconv.Atoi(prx.GetPort()) prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) prx.ClusterGroup = cluster prx.SetDataDir() - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.Port) - cluster.Proxies[ctproxy], err = cluster.newProxy(prx) + cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) + prx.State = stateSuspect + cluster.Proxies[ctproxy] = prx if err != nil { - cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.Port, err) + cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } - cluster.LogPrintf(LvlDbg, "New MdbShardProxy proxy created: %s %s", prx.Host, prx.Port) + cluster.LogPrintf(LvlDbg, "New MdbShardProxy proxy created: %s %s", prx.Host, prx.GetPort()) ctproxy++ } } if cluster.Conf.SphinxHosts != "" && cluster.Conf.SphinxOn { for _, proxyHost := range strings.Split(cluster.Conf.SphinxHosts, ",") { - prx := new(Proxy) + prx := new(SphinxProxy) prx.Type = config.ConstProxySphinx prx.Port = cluster.Conf.SphinxQLPort prx.User = "" prx.Pass = "" - prx.ReadPort, _ = strconv.Atoi(prx.Port) - prx.WritePort, _ = strconv.Atoi(prx.Port) - prx.ReadWritePort, _ = strconv.Atoi(prx.Port) + prx.ReadPort, _ = strconv.Atoi(prx.GetPort()) + prx.WritePort, _ = strconv.Atoi(prx.GetPort()) + prx.ReadWritePort, _ = strconv.Atoi(prx.GetPort()) prx.Name = proxyHost prx.Host = proxyHost if cluster.Conf.ProvNetCNI { @@ -302,12 +418,13 @@ func (cluster *Cluster) newProxyList() error { prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) prx.ClusterGroup = cluster prx.SetDataDir() - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.Port) - cluster.Proxies[ctproxy], err = cluster.newProxy(prx) + cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) + prx.State = stateSuspect + cluster.Proxies[ctproxy] = prx if err != nil { - cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.Port, err) + cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } - cluster.LogPrintf(LvlDbg, "New SphinxSearch proxy created: %s %s", prx.Host, prx.Port) + cluster.LogPrintf(LvlDbg, "New SphinxSearch proxy created: %s %s", prx.Host, prx.GetPort()) ctproxy++ } } @@ -330,7 +447,7 @@ func (cluster *Cluster) newProxyList() error { } prx.ClusterGroup = cluster prx.SetDataDir() - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.Port) + cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) cluster.Proxies[ctproxy], err = cluster.newProxy(prx) ctproxy++ } @@ -338,6 +455,7 @@ func (cluster *Cluster) newProxyList() error { return nil } +// TODO: clarify why this happens func (cluster *Cluster) newProxy(p *Proxy) (*Proxy, error) { proxy := new(Proxy) proxy = p @@ -350,15 +468,15 @@ func (cluster *Cluster) InjectProxiesTraffic() { // Found server from ServerId if cluster.GetMaster() != nil { for _, pr := range cluster.Proxies { - if pr.Type == config.ConstProxySphinx || pr.Type == config.ConstProxyMyProxy { + if pr.GetType() == config.ConstProxySphinx || pr.GetType() == config.ConstProxyMyProxy { // Does not yet understand CREATE OR REPLACE VIEW continue } - db, err := cluster.GetClusterThisProxyConn(pr) + db, err := pr.GetCluster() if err != nil { cluster.sme.AddState("ERR00050", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00050"], err), ErrFrom: "TOPO"}) } else { - if pr.Type == config.ConstProxyMyProxy { + if pr.GetType() == config.ConstProxyMyProxy { definer = "DEFINER = root@localhost" } else { definer = "" @@ -380,7 +498,7 @@ func (cluster *Cluster) IsProxyEqualMaster() bool { // Found server from ServerId if cluster.GetMaster() != nil { for _, pr := range cluster.Proxies { - db, err := cluster.GetClusterThisProxyConn(pr) + db, err := pr.GetCluster() if err != nil { if cluster.IsVerbose() { cluster.LogPrintf(LvlErr, "Can't get a proxy connection: %s", err) @@ -407,7 +525,7 @@ func (cluster *Cluster) IsProxyEqualMaster() bool { if cluster.IsVerbose() { cluster.LogPrintf(LvlInfo, "Proxy compare master: %d %d", cluster.GetMaster().ServerID, uint(sid)) } - if cluster.GetMaster().ServerID == uint64(sid) || pr.Type == config.ConstProxySpider { + if cluster.GetMaster().ServerID == uint64(sid) || pr.GetType() == config.ConstProxySpider { return true } } @@ -419,22 +537,28 @@ func (cluster *Cluster) SetProxyServerMaintenance(serverid uint64) { // Found server from ServerId for _, pr := range cluster.Proxies { server := cluster.GetServerFromId(serverid) - if cluster.Conf.HaproxyOn && pr.Type == config.ConstProxyHaproxy { - if cluster.Conf.HaproxyMode == "runtimeapi" { - cluster.setMaintenanceHaproxy(pr, server) - } - if cluster.Conf.HaproxyMode == "standby" { - cluster.initHaproxy(pr) + if cluster.Conf.HaproxyOn { + if prx, ok := pr.(*HaproxyProxy); ok { + if cluster.Conf.HaproxyMode == "runtimeapi" { + prx.SetMaintenance(server) + } + if cluster.Conf.HaproxyMode == "standby" { + prx.Init() + } } } - if cluster.Conf.MxsOn && pr.Type == config.ConstProxyMaxscale { - if cluster.GetMaster() != nil { - cluster.setMaintenanceMaxscale(pr, server) + if cluster.Conf.MxsOn { + if prx, ok := pr.(*MaxscaleProxy); ok { + if cluster.GetMaster() != nil { + prx.SetMaintenance(server) + } } } - if cluster.Conf.ProxysqlOn && pr.Type == config.ConstProxySqlproxy { - if cluster.GetMaster() != nil { - cluster.setMaintenanceProxysql(pr, server) + if cluster.Conf.ProxysqlOn { + if prx, ok := pr.(*ProxySQLProxy); ok { + if cluster.GetMaster() != nil { + prx.SetMaintenance(server) + } } } } @@ -452,45 +576,34 @@ func (cluster *Cluster) refreshProxies(wcg *sync.WaitGroup) { for _, pr := range cluster.Proxies { var err error - if cluster.Conf.MxsOn && pr.Type == config.ConstProxyMaxscale { - err = cluster.refreshMaxscale(pr) - } - if cluster.Conf.MdbsProxyOn && pr.Type == config.ConstProxySpider { - err = cluster.refreshMdbsproxy(nil, pr) - } - if cluster.Conf.ProxysqlOn && pr.Type == config.ConstProxySqlproxy { - err = cluster.refreshProxysql(pr) - } - if cluster.Conf.HaproxyOn && pr.Type == config.ConstProxyHaproxy { - err = cluster.refreshHaproxy(pr) - } - if cluster.Conf.SphinxOn && pr.Type == config.ConstProxySphinx { - err = cluster.refreshSphinx(pr) - } + err = pr.Refresh() if err == nil { - pr.FailCount = 0 - pr.State = stateProxyRunning + pr.SetFailCount(0) + pr.SetState(stateProxyRunning) if pr.HasWaitStartCookie() { pr.DelWaitStartCookie() } } else { - pr.FailCount++ - if pr.FailCount >= pr.ClusterGroup.Conf.MaxFail { - if pr.FailCount == pr.ClusterGroup.Conf.MaxFail { - pr.ClusterGroup.LogPrintf("INFO", "Declaring %s proxy as failed %s:%s %s", pr.Type, pr.Host, pr.Port, err) + fc := pr.GetFailCount() + 1 + // TODO: Can pr.ClusterGroup be different from cluster *Cluster? code doesn't imply it. if not change to + // cl, err := pr.GetCluster() + // cl.Conf.MaxFail + if fc >= cluster.Conf.MaxFail { + if fc == cluster.Conf.MaxFail { + cluster.LogPrintf("INFO", "Declaring %s proxy as failed %s:%s %s", pr.GetType(), pr.GetHost(), pr.GetPort(), err) } - pr.State = stateFailed + pr.SetState(stateFailed) pr.DelWaitStopCookie() pr.DelRestartCookie() } else { - pr.State = stateSuspect + pr.SetState(stateSuspect) } } - if pr.PrevState != pr.State { - pr.PrevState = pr.State + if pr.GetPrevState() != pr.GetState() { + pr.SetPrevState(pr.GetState()) } if cluster.Conf.GraphiteMetrics { - cluster.SendProxyStats(pr) + pr.SendStats() } } @@ -498,51 +611,28 @@ func (cluster *Cluster) refreshProxies(wcg *sync.WaitGroup) { func (cluster *Cluster) failoverProxies() { for _, pr := range cluster.Proxies { - cluster.LogPrintf(LvlInfo, "Failover Proxy Type: %s Host: %s Port: %s", pr.Type, pr.Host, pr.Port) - if cluster.Conf.HaproxyOn && pr.Type == config.ConstProxyHaproxy { - if cluster.Conf.HaproxyMode == "runtimeapi" { - cluster.refreshHaproxy(pr) - } - if cluster.Conf.HaproxyMode == "standby" { - cluster.initHaproxy(pr) - } - } - if cluster.Conf.MxsOn && pr.Type == config.ConstProxyMaxscale { - cluster.initMaxscale(nil, pr) - } - if cluster.Conf.MdbsProxyOn && pr.Type == config.ConstProxySpider { - cluster.failoverMdbsproxy(nil, pr) - } - if cluster.Conf.ProxysqlOn && pr.Type == config.ConstProxySqlproxy { - cluster.failoverProxysql(pr) - } + cluster.LogPrintf(LvlInfo, "Failover Proxy Type: %s Host: %s Port: %s", pr.GetType(), pr.GetHost(), pr.GetPort()) + pr.Failover() } cluster.initConsul() } +// TODO: reduce to +// for { pr.Init() } func (cluster *Cluster) initProxies() { for _, pr := range cluster.Proxies { - cluster.LogPrintf(LvlInfo, "New proxy monitored: %s %s:%s", pr.Type, pr.Host, pr.Port) - if cluster.Conf.HaproxyOn && pr.Type == config.ConstProxyHaproxy { - cluster.initHaproxy(pr) - } - if cluster.Conf.MxsOn && pr.Type == config.ConstProxyMaxscale { - cluster.initMaxscale(nil, pr) - } - if cluster.Conf.MdbsProxyOn && pr.Type == config.ConstProxySpider { - cluster.initMdbsproxy(nil, pr) - } - if cluster.Conf.ProxysqlOn && pr.Type == config.ConstProxySqlproxy { - cluster.initProxysql(pr) - } - if cluster.Conf.MyproxyOn && pr.Type == config.ConstProxyMyProxy { - cluster.initMyProxy(pr) - } + cluster.LogPrintf(LvlInfo, "New proxy monitored: %s %s:%s", pr.GetType(), pr.GetHost(), pr.GetPort()) + pr.Init() } cluster.initConsul() } -func (cluster *Cluster) SendProxyStats(proxy *Proxy) error { +func (cluster *Cluster) SendProxyStats(proxy DatabaseProxy) error { + return proxy.SendStats() +} + +func (proxy *Proxy) SendStats() error { + cluster := proxy.ClusterGroup graph, err := graphite.NewGraphite(cluster.Conf.GraphiteCarbonHost, cluster.Conf.GraphiteCarbonPort) if err != nil { return err diff --git a/cluster/prx_get.go b/cluster/prx_get.go index 9f3dfa658..1ef8b3aa0 100644 --- a/cluster/prx_get.go +++ b/cluster/prx_get.go @@ -23,9 +23,9 @@ import ( "github.com/signal18/replication-manager/utils/misc" ) -func (cluster *Cluster) GetProxyFromName(name string) *Proxy { +func (cluster *Cluster) GetProxyFromName(name string) DatabaseProxy { for _, pr := range cluster.Proxies { - if pr.Id == name { + if pr.GetId() == name { return pr } } @@ -41,8 +41,8 @@ func (cluster *Cluster) GetClusterProxyConn() (*sqlx.DB, error) { params := fmt.Sprintf("?timeout=%ds", cluster.Conf.Timeout) dsn := cluster.dbUser + ":" + cluster.dbPass + "@" - if prx.Host != "" { - dsn += "tcp(" + prx.Host + ":" + strconv.Itoa(prx.WritePort) + ")/" + params + if prx.GetHost() != "" { + dsn += "tcp(" + prx.GetHost() + ":" + strconv.Itoa(prx.GetWritePort()) + ")/" + params } else { return nil, errors.New("No proxies definition") @@ -55,7 +55,13 @@ func (cluster *Cluster) GetClusterProxyConn() (*sqlx.DB, error) { } +// Deprecated: replaced by proxy.GetCluster() func (cluster *Cluster) GetClusterThisProxyConn(prx *Proxy) (*sqlx.DB, error) { + return prx.GetCluster() +} + +func (prx *Proxy) GetCluster() (*sqlx.DB, error) { + cluster := prx.ClusterGroup params := fmt.Sprintf("?timeout=%ds", cluster.Conf.Timeout) dsn := cluster.dbUser + ":" + cluster.dbPass + "@" if cluster.Conf.MonitorWriteHeartbeatCredential != "" { @@ -74,16 +80,6 @@ func (cluster *Cluster) GetClusterThisProxyConn(prx *Proxy) (*sqlx.DB, error) { } func (proxy *Proxy) GetProxyConfig() string { - - if proxy.Type == config.ConstProxySpider { - if proxy.ShardProxy == nil { - proxy.ClusterGroup.LogPrintf(LvlErr, "Can't get shard proxy config start monitoring") - proxy.ClusterGroup.ShardProxyBootstrap(proxy) - return proxy.ShardProxy.GetDatabaseConfig() - } else { - return proxy.ShardProxy.GetDatabaseConfig() - } - } type File struct { Path string `json:"path"` Content string `json:"fmt"` @@ -217,48 +213,60 @@ func (proxy *Proxy) GetDatadir() string { return "/tmp" } +func (proxy *ProxySQLProxy) GetEnv() map[string]string { + env := proxy.GetBaseEnv() + env["%%ENV:SVC_CONF_ENV_PROXYSQL_READ_ON_MASTER%%"] = proxy.ProxySQLReadOnMaster() + return env +} + func (proxy *Proxy) GetEnv() map[string]string { - return map[string]string{ - "%%ENV:NODES_CPU_CORES%%": proxy.ClusterGroup.Conf.ProvCores, - "%%ENV:SVC_CONF_ENV_MAX_CORES%%": proxy.ClusterGroup.Conf.ProvCores, - "%%ENV:SVC_CONF_ENV_CRC32_ID%%": string(proxy.Id[2:10]), - "%%ENV:SVC_CONF_ENV_SERVER_ID%%": string(proxy.Id[2:10]), - "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%": proxy.ClusterGroup.dbPass, - "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%": proxy.ClusterGroup.dbUser, - "%%ENV:SERVER_IP%%": proxy.GetBindAddress(), - "%%ENV:EXTRA_BIND_SERVER_IPV6%%": proxy.GetBindAddressExtraIPV6(), - "%%ENV:SVC_CONF_ENV_PROXY_USE_SSL%%": proxy.GetUseSSL(), - "%%ENV:SVC_CONF_ENV_PROXY_USE_COMPRESS%%": proxy.GetUseCompression(), - "%%ENV:SERVER_PORT%%": proxy.Port, - "%%ENV:SVC_NAMESPACE%%": proxy.ClusterGroup.Name, - "%%ENV:SVC_NAME%%": proxy.Name, - "%%ENV:SERVERS_HAPROXY_WRITE%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_HAPROXY_WRITE%%"), - "%%ENV:SERVERS_HAPROXY_READ%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_HAPROXY_READ%%"), - "%%ENV:SERVERS_HAPROXY_WRITE_BACKEND%%": proxy.ClusterGroup.Conf.HaproxyAPIWriteBackend, - "%%ENV:SERVERS_HAPROXY_READ_BACKEND%%": proxy.ClusterGroup.Conf.HaproxyAPIReadBackend, - "%%ENV:SERVERS_PROXYSQL%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_PROXYSQL%%"), - "%%ENV:SERVERS%%": proxy.GetConfigProxyModule("%%ENV:SERVERS%%"), - "%%ENV:SERVERS_LIST%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_LIST%%"), - "%%ENV:SVC_CONF_ENV_PORT_HTTP%%": "80", - "%%ENV:SVC_CONF_ENV_PORT_R_LB%%": strconv.Itoa(proxy.ReadPort), - "%%ENV:SVC_CONF_ENV_PORT_RW%%": strconv.Itoa(proxy.WritePort), - "%%ENV:SVC_CONF_ENV_MAXSCALE_MAXINFO_PORT%%": strconv.Itoa(proxy.ClusterGroup.Conf.MxsMaxinfoPort), - "%%ENV:SVC_CONF_ENV_PORT_RW_SPLIT%%": strconv.Itoa(proxy.ReadWritePort), - "%%ENV:SVC_CONF_ENV_PORT_BINLOG%%": strconv.Itoa(proxy.ClusterGroup.Conf.MxsBinlogPort), - "%%ENV:SVC_CONF_ENV_PORT_TELNET%%": proxy.Port, - "%%ENV:SVC_CONF_ENV_PORT_ADMIN%%": proxy.Port, - "%%ENV:SVC_CONF_ENV_USER_ADMIN%%": proxy.User, - "%%ENV:SVC_CONF_ENV_PASSWORD_ADMIN%%": proxy.Pass, - "%%ENV:SVC_CONF_ENV_SPHINX_MEM%%": proxy.ClusterGroup.Conf.ProvSphinxMem, - "%%ENV:SVC_CONF_ENV_SPHINX_MAX_CHILDREN%%": proxy.ClusterGroup.Conf.ProvSphinxMaxChildren, - "%%ENV:SVC_CONF_ENV_VIP_ADDR%%": proxy.ClusterGroup.Conf.ProvProxRouteAddr, - "%%ENV:SVC_CONF_ENV_VIP_NETMASK%%": proxy.ClusterGroup.Conf.ProvProxRouteMask, - "%%ENV:SVC_CONF_ENV_VIP_PORT%%": proxy.ClusterGroup.Conf.ProvProxRoutePort, - "%%ENV:SVC_CONF_ENV_MRM_API_ADDR%%": proxy.ClusterGroup.Conf.MonitorAddress + ":" + proxy.ClusterGroup.Conf.HttpPort, - "%%ENV:SVC_CONF_ENV_MRM_CLUSTER_NAME%%": proxy.ClusterGroup.GetClusterName(), - "%%ENV:SVC_CONF_ENV_PROXYSQL_READ_ON_MASTER%%": proxy.ProxySQLReadOnMaster(), - "%%ENV:SVC_CONF_ENV_DATADIR%%": proxy.GetDatadir(), + return proxy.GetBaseEnv() +} + +func (proxy *Proxy) GetBaseEnv() map[string]string { + var env map[string]string + env = map[string]string{ + "%%ENV:NODES_CPU_CORES%%": proxy.ClusterGroup.Conf.ProvCores, + "%%ENV:SVC_CONF_ENV_MAX_CORES%%": proxy.ClusterGroup.Conf.ProvCores, + "%%ENV:SVC_CONF_ENV_CRC32_ID%%": string(proxy.Id[2:10]), + "%%ENV:SVC_CONF_ENV_SERVER_ID%%": string(proxy.Id[2:10]), + "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%": proxy.ClusterGroup.dbPass, + "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%": proxy.ClusterGroup.dbUser, + "%%ENV:SERVER_IP%%": proxy.GetBindAddress(), + "%%ENV:EXTRA_BIND_SERVER_IPV6%%": proxy.GetBindAddressExtraIPV6(), + "%%ENV:SVC_CONF_ENV_PROXY_USE_SSL%%": proxy.GetUseSSL(), + "%%ENV:SVC_CONF_ENV_PROXY_USE_COMPRESS%%": proxy.GetUseCompression(), + "%%ENV:SERVER_PORT%%": proxy.Port, + "%%ENV:SVC_NAMESPACE%%": proxy.ClusterGroup.Name, + "%%ENV:SVC_NAME%%": proxy.Name, + "%%ENV:SERVERS_HAPROXY_WRITE%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_HAPROXY_WRITE%%"), + "%%ENV:SERVERS_HAPROXY_READ%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_HAPROXY_READ%%"), + "%%ENV:SERVERS_HAPROXY_WRITE_BACKEND%%": proxy.ClusterGroup.Conf.HaproxyAPIWriteBackend, + "%%ENV:SERVERS_HAPROXY_READ_BACKEND%%": proxy.ClusterGroup.Conf.HaproxyAPIReadBackend, + "%%ENV:SERVERS_PROXYSQL%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_PROXYSQL%%"), + "%%ENV:SERVERS%%": proxy.GetConfigProxyModule("%%ENV:SERVERS%%"), + "%%ENV:SERVERS_LIST%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_LIST%%"), + "%%ENV:SVC_CONF_ENV_PORT_HTTP%%": "80", + "%%ENV:SVC_CONF_ENV_PORT_R_LB%%": strconv.Itoa(proxy.ReadPort), + "%%ENV:SVC_CONF_ENV_PORT_RW%%": strconv.Itoa(proxy.WritePort), + "%%ENV:SVC_CONF_ENV_MAXSCALE_MAXINFO_PORT%%": strconv.Itoa(proxy.ClusterGroup.Conf.MxsMaxinfoPort), + "%%ENV:SVC_CONF_ENV_PORT_RW_SPLIT%%": strconv.Itoa(proxy.ReadWritePort), + "%%ENV:SVC_CONF_ENV_PORT_BINLOG%%": strconv.Itoa(proxy.ClusterGroup.Conf.MxsBinlogPort), + "%%ENV:SVC_CONF_ENV_PORT_TELNET%%": proxy.Port, + "%%ENV:SVC_CONF_ENV_PORT_ADMIN%%": proxy.Port, + "%%ENV:SVC_CONF_ENV_USER_ADMIN%%": proxy.User, + "%%ENV:SVC_CONF_ENV_PASSWORD_ADMIN%%": proxy.Pass, + "%%ENV:SVC_CONF_ENV_SPHINX_MEM%%": proxy.ClusterGroup.Conf.ProvSphinxMem, + "%%ENV:SVC_CONF_ENV_SPHINX_MAX_CHILDREN%%": proxy.ClusterGroup.Conf.ProvSphinxMaxChildren, + "%%ENV:SVC_CONF_ENV_VIP_ADDR%%": proxy.ClusterGroup.Conf.ProvProxRouteAddr, + "%%ENV:SVC_CONF_ENV_VIP_NETMASK%%": proxy.ClusterGroup.Conf.ProvProxRouteMask, + "%%ENV:SVC_CONF_ENV_VIP_PORT%%": proxy.ClusterGroup.Conf.ProvProxRoutePort, + "%%ENV:SVC_CONF_ENV_MRM_API_ADDR%%": proxy.ClusterGroup.Conf.MonitorAddress + ":" + proxy.ClusterGroup.Conf.HttpPort, + "%%ENV:SVC_CONF_ENV_MRM_CLUSTER_NAME%%": proxy.ClusterGroup.GetClusterName(), + "%%ENV:SVC_CONF_ENV_DATADIR%%": proxy.GetDatadir(), } + + return env } func (proxy *Proxy) GetConfigProxyModule(variable string) string { diff --git a/cluster/prx_gobetween.go b/cluster/prx_gobetween.go index fcfac77d7..f5e09834c 100644 --- a/cluster/prx_gobetween.go +++ b/cluster/prx_gobetween.go @@ -6,11 +6,11 @@ package cluster -func (cluster *Cluster) initGoBetween(oldmaster *ServerMonitor, proxy *Proxy) { +func (cluster *Cluster) initGoBetween(oldmaster *ServerMonitor, proxy DatabaseProxy) { } -func (cluster *Cluster) refreshGoBetween(proxy *Proxy) error { +func (cluster *Cluster) refreshGoBetween(proxy DatabaseProxy) error { return nil } diff --git a/cluster/prx_haproxy.go b/cluster/prx_haproxy.go index 898a98308..326f53c53 100644 --- a/cluster/prx_haproxy.go +++ b/cluster/prx_haproxy.go @@ -22,7 +22,12 @@ import ( "github.com/signal18/replication-manager/utils/state" ) -func (cluster *Cluster) initHaproxy(proxy *Proxy) { +type HaproxyProxy struct { + Proxy +} + +func (proxy *HaproxyProxy) Init() { + cluster := proxy.ClusterGroup haproxydatadir := proxy.Datadir + "/var" if _, err := os.Stat(haproxydatadir); os.IsNotExist(err) { @@ -143,8 +148,8 @@ func (cluster *Cluster) initHaproxy(proxy *Proxy) { } -func (cluster *Cluster) refreshHaproxy(proxy *Proxy) error { - +func (proxy *HaproxyProxy) Refresh() error { + cluster := proxy.ClusterGroup // if proxy.ClusterGroup.Conf.HaproxyStatHttp { /* @@ -266,6 +271,11 @@ func (cluster *Cluster) refreshHaproxy(proxy *Proxy) error { } func (cluster *Cluster) setMaintenanceHaproxy(pr *Proxy, server *ServerMonitor) { + pr.SetMaintenance(server) +} + +func (pr *Proxy) SetMaintenance(server *ServerMonitor) { + cluster := pr.ClusterGroup haRuntime := haproxy.Runtime{ Binary: cluster.Conf.HaproxyBinaryPath, SockFile: filepath.Join(pr.Datadir+"/var", "/haproxy.stats.sock"), @@ -286,3 +296,13 @@ func (cluster *Cluster) setMaintenanceHaproxy(pr *Proxy, server *ServerMonitor) } } } + +func (prx *Proxy) Failover() { + cluster := prx.ClusterGroup + if cluster.Conf.HaproxyMode == "runtimeapi" { + prx.Refresh() + } + if cluster.Conf.HaproxyMode == "standby" { + prx.Init() + } +} diff --git a/cluster/prx_maxscale.go b/cluster/prx_maxscale.go index 39c860863..2d7752ace 100644 --- a/cluster/prx_maxscale.go +++ b/cluster/prx_maxscale.go @@ -16,7 +16,16 @@ import ( "github.com/signal18/replication-manager/utils/state" ) -func (cluster *Cluster) refreshMaxscale(proxy *Proxy) error { +type MaxscaleProxy struct { + Proxy +} + +func (cluster *Cluster) refreshMaxscale(proxy *MaxscaleProxy) error { + return proxy.refresh() +} + +func (proxy *MaxscaleProxy) refresh() error { + cluster := proxy.ClusterGroup if cluster.Conf.MxsOn == false { return nil } @@ -83,7 +92,12 @@ func (cluster *Cluster) refreshMaxscale(proxy *Proxy) error { return nil } -func (cluster *Cluster) initMaxscale(oldmaster *ServerMonitor, proxy *Proxy) { +func (cluster *Cluster) initMaxscale(proxy DatabaseProxy) { + proxy.Init() +} + +func (proxy *MaxscaleProxy) Init() { + cluster := proxy.ClusterGroup if cluster.Conf.MxsOn == false { return } @@ -178,37 +192,15 @@ func (cluster *Cluster) initMaxscale(oldmaster *ServerMonitor, proxy *Proxy) { } } } - if oldmaster != nil { - err = m.ClearServer(oldmaster.MxsServerName, "master") - if err != nil { - cluster.LogPrintf(LvlErr, "MaxScale client could not send command:%s", err) - } - - if oldmaster.State != stateSlave { - err = m.ClearServer(oldmaster.MxsServerName, "slave") - if err != nil { - cluster.LogPrintf(LvlErr, "MaxScale client could not send command:%s", err) - } - err = m.ClearServer(oldmaster.MxsServerName, "running") - if err != nil { - cluster.LogPrintf(LvlErr, "MaxScale client could not send command:%s", err) - } - } else { - err = m.SetServer(oldmaster.MxsServerName, "slave") - if err != nil { - cluster.LogPrintf(LvlErr, "MaxScale client could not send command:%s", err) - } - err = m.SetServer(oldmaster.MxsServerName, "running") - if err != nil { - cluster.LogPrintf(LvlErr, "MaxScale client could not send command:%s", err) - } - - } - } } } -func (cluster *Cluster) setMaintenanceMaxscale(pr *Proxy, server *ServerMonitor) { +func (cluster *Cluster) setMaintenanceMaxscale(pr DatabaseProxy, server *ServerMonitor) { + pr.SetMaintenance(server) +} + +func (pr *MaxscaleProxy) SetMaintenance(server *ServerMonitor) { + cluster := pr.ClusterGroup m := maxscale.MaxScale{Host: pr.Host, Port: pr.Port, User: pr.User, Pass: pr.Pass} err := m.Connect() if err != nil { @@ -225,3 +217,8 @@ func (cluster *Cluster) setMaintenanceMaxscale(pr *Proxy, server *ServerMonitor) } m.Close() } + +// Failover for MaxScale simply calls Init +func (prx *MaxscaleProxy) Failover() { + prx.Init() +} diff --git a/cluster/prx_myproxy.go b/cluster/prx_myproxy.go index fd43b8003..7b454462f 100644 --- a/cluster/prx_myproxy.go +++ b/cluster/prx_myproxy.go @@ -7,15 +7,24 @@ import ( "github.com/signal18/replication-manager/router/myproxy" ) -func (cluster *Cluster) initMyProxy(proxy *Proxy) { +type MyProxyProxy struct { + Proxy +} + +func (cluster *Cluster) initMyProxy(proxy *MyProxyProxy) { + proxy.Init() +} + +func (proxy *MyProxyProxy) Init() { if proxy.InternalProxy != nil { proxy.InternalProxy.Close() } + cluster := proxy.ClusterGroup db, err := sql.Open("mysql", cluster.master.DSN) if err != nil { cluster.LogPrintf(LvlErr, "Could not connect to Master for MyProxy %s", err) return } - proxy.InternalProxy, _ = myproxy.NewProxyServer("0.0.0.0:"+proxy.Port, proxy.User, proxy.Pass, db) + proxy.InternalProxy, _ = myproxy.NewProxyServer("0.0.0.0:"+proxy.GetPort(), proxy.GetUser(), proxy.GetPass(), db) go proxy.InternalProxy.Run() } diff --git a/cluster/prx_proxysql.go b/cluster/prx_proxysql.go index 270a23e9b..8453cdaeb 100644 --- a/cluster/prx_proxysql.go +++ b/cluster/prx_proxysql.go @@ -3,15 +3,48 @@ package cluster import ( "errors" "fmt" + "hash/crc64" "strconv" + "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/router/proxysql" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" ) -func connectProxysql(proxy *Proxy) (proxysql.ProxySQL, error) { +type ProxySQLProxy struct { + Proxy +} + +func NewProxySQLProxy(clusterName string, proxyHost string, conf config.Config) *ProxySQLProxy { + prx := new(ProxySQLProxy) + prx.Name = proxyHost + prx.Host = proxyHost + prx.Type = config.ConstProxySqlproxy + prx.Port = conf.ProxysqlAdminPort + prx.ReadWritePort, _ = strconv.Atoi(conf.ProxysqlPort) + prx.User = conf.ProxysqlUser + prx.Pass = conf.ProxysqlPassword + prx.ReaderHostgroup, _ = strconv.Atoi(conf.ProxysqlReaderHostgroup) + prx.WriterHostgroup, _ = strconv.Atoi(conf.ProxysqlWriterHostgroup) + prx.WritePort, _ = strconv.Atoi(conf.ProxysqlPort) + prx.ReadPort, _ = strconv.Atoi(conf.ProxysqlPort) + + if conf.ProvNetCNI { + if conf.ClusterHead == "" { + prx.Host = prx.Host + "." + clusterName + ".svc." + conf.ProvOrchestratorCluster + } else { + prx.Host = prx.Host + "." + conf.ClusterHead + ".svc." + conf.ProvOrchestratorCluster + } + } + + prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(clusterName+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) + + return prx +} + +func (proxy *ProxySQLProxy) Connect() (proxysql.ProxySQL, error) { psql := proxysql.ProxySQL{ User: proxy.User, Password: proxy.Pass, @@ -29,25 +62,30 @@ func connectProxysql(proxy *Proxy) (proxysql.ProxySQL, error) { return psql, nil } -func (cluster *Cluster) AddShardProxy(proxysql *Proxy, shardproxy *Proxy) { +func (cluster *Cluster) AddShardProxy(proxysql *ProxySQLProxy, shardproxy *MdbsProxy) { + proxysql.AddShardProxy(shardproxy) +} + +func (proxy *ProxySQLProxy) AddShardProxy(shardproxy *MdbsProxy) { + cluster := proxy.ClusterGroup if cluster.Conf.ProxysqlOn == false { return } - psql, err := connectProxysql(proxysql) + psql, err := proxy.Connect() if err != nil { cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return } defer psql.Connection.Close() psql.AddShardServer(misc.Unbracket(shardproxy.Host), shardproxy.Port) - } -func (cluster *Cluster) AddQueryRulesProxysql(proxy *Proxy, rules []proxysql.QueryRule) error { +func (proxy *ProxySQLProxy) AddQueryRulesProxysql(rules []proxysql.QueryRule) error { + cluster := proxy.ClusterGroup if cluster.Conf.ProxysqlOn == false { return errors.New("No proxysql enable in config") } - psql, err := connectProxysql(proxy) + psql, err := proxy.Connect() if err != nil { cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return err @@ -57,12 +95,13 @@ func (cluster *Cluster) AddQueryRulesProxysql(proxy *Proxy, rules []proxysql.Que return err } -func (cluster *Cluster) initProxysql(proxy *Proxy) { +func (proxy *ProxySQLProxy) Init() { + cluster := proxy.ClusterGroup if !cluster.Conf.ProxysqlBootstrap || !cluster.Conf.ProxysqlOn { return } - psql, err := connectProxysql(proxy) + psql, err := proxy.Connect() if err != nil { cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return @@ -114,8 +153,13 @@ func (cluster *Cluster) initProxysql(proxy *Proxy) { } } -func (cluster *Cluster) failoverProxysql(proxy *Proxy) { - psql, err := connectProxysql(proxy) +func (cluster *Cluster) failoverProxysql(proxy *ProxySQLProxy) { + proxy.Failover() +} + +func (proxy *ProxySQLProxy) Failover() { + cluster := proxy.ClusterGroup + psql, err := proxy.Connect() if err != nil { cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return @@ -153,12 +197,13 @@ func (cluster *Cluster) failoverProxysql(proxy *Proxy) { } -func (cluster *Cluster) refreshProxysql(proxy *Proxy) error { +func (proxy *ProxySQLProxy) Refresh() error { + cluster := proxy.ClusterGroup if cluster.Conf.ProxysqlOn == false { return nil } - psql, err := connectProxysql(proxy) + psql, err := proxy.Connect() if err != nil { cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) cluster.sme.CopyOldStateFromUnknowServer(proxy.Name) @@ -326,12 +371,17 @@ func (cluster *Cluster) refreshProxysql(proxy *Proxy) error { return nil } -func (cluster *Cluster) setMaintenanceProxysql(proxy *Proxy, s *ServerMonitor) { +func (cluster *Cluster) setMaintenanceProxysql(proxy *ProxySQLProxy, s *ServerMonitor) { + proxy.SetMaintenance(s) +} + +func (proxy *ProxySQLProxy) SetMaintenance(s *ServerMonitor) { + cluster := proxy.ClusterGroup if cluster.Conf.ProxysqlOn == false { return } - psql, err := connectProxysql(proxy) + psql, err := proxy.Connect() if err != nil { cluster.sme.AddState("ERR00051", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00051"], err), ErrFrom: "MON"}) return diff --git a/cluster/prx_shardproxy.go b/cluster/prx_shardproxy.go index 6cd6eee0e..091e92b0c 100644 --- a/cluster/prx_shardproxy.go +++ b/cluster/prx_shardproxy.go @@ -18,7 +18,6 @@ import ( _ "github.com/go-sql-driver/mysql" "github.com/jmoiron/sqlx" - "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" @@ -26,13 +25,12 @@ import ( var crcTable = crc64.MakeTable(crc64.ECMA) -func (cluster *Cluster) failoverMdbsproxy(oldmaster *ServerMonitor, proxy *Proxy) { - - cluster.failoverMdbShardBackends(proxy) - +type MdbsProxy struct { + Proxy } -func (cluster *Cluster) initMdbsproxy(oldmaster *ServerMonitor, proxy *Proxy) { +func (proxy *MdbsProxy) Init() { + cluster := proxy.ClusterGroup cluster.LogPrintf(LvlInfo, "Init MdbShardProxy %s %s", proxy.Host, proxy.Port) cluster.ShardProxyBootstrap(proxy) if cluster.Conf.MdbsProxyLoadSystem { @@ -42,7 +40,18 @@ func (cluster *Cluster) initMdbsproxy(oldmaster *ServerMonitor, proxy *Proxy) { cluster.AddShardingHostGroup(proxy) } -func (cluster *Cluster) failoverMdbShardBackends(proxy *Proxy) { +func (proxy *MdbsProxy) GetProxyConfig() string { + if proxy.ShardProxy == nil { + proxy.ClusterGroup.LogPrintf(LvlErr, "Can't get shard proxy config start monitoring") + proxy.ClusterGroup.ShardProxyBootstrap(proxy) + return proxy.ShardProxy.GetDatabaseConfig() + } else { + return proxy.ShardProxy.GetDatabaseConfig() + } +} + +func (proxy *MdbsProxy) Failover() { + cluster := proxy.ClusterGroup if cluster.master == nil { return } @@ -87,7 +96,7 @@ func (cluster *Cluster) failoverMdbShardBackends(proxy *Proxy) { } } -func (cluster *Cluster) CheckMdbShardServersSchema(proxy *Proxy) { +func (cluster *Cluster) CheckMdbShardServersSchema(proxy *MdbsProxy) { if cluster.master == nil { return } @@ -129,7 +138,7 @@ func (cluster *Cluster) CheckMdbShardServersSchema(proxy *Proxy) { } -func (cluster *Cluster) refreshMdbsproxy(oldmaster *ServerMonitor, proxy *Proxy) error { +func (cluster *Cluster) refreshMdbsproxy(oldmaster *ServerMonitor, proxy *MdbsProxy) error { if proxy.ShardProxy == nil { return errors.New("Sharding proxy no database monitor yet initialize") } @@ -194,7 +203,7 @@ func (cluster *Cluster) ShardProxyGetHeadCluster() *Cluster { return nil } -func (cluster *Cluster) ShardProxyCreateVTable(proxy *Proxy, schema string, table string, duplicates []*ServerMonitor, withreshard bool) error { +func (cluster *Cluster) ShardProxyCreateVTable(proxy *MdbsProxy, schema string, table string, duplicates []*ServerMonitor, withreshard bool) error { checksum64 := crc64.Checksum([]byte(schema+"_"+cluster.GetName()), crcTable) var err error var ddl string @@ -283,7 +292,7 @@ func (cluster *Cluster) ShardProxyCreateVTable(proxy *Proxy, schema string, tabl return nil } -func (cluster *Cluster) ShardSetUniversalTable(proxy *Proxy, schema string, table string) error { +func (cluster *Cluster) ShardSetUniversalTable(proxy *MdbsProxy, schema string, table string) error { master := cluster.GetMaster() if master == nil { return errors.New("Universal table no valid master on current cluster") @@ -315,8 +324,8 @@ func (cluster *Cluster) ShardSetUniversalTable(proxy *Proxy, schema string, tabl cluster.Conf.MdbsUniversalTables = cluster.Conf.MdbsUniversalTables + "," + schema + "." + table + "_copy" cluster.Conf.MdbsUniversalTables = cluster.Conf.MdbsUniversalTables + "," + schema + "." + table - for _, pr := range cluster.Proxies { - if cluster.Conf.MdbsProxyOn && pr.Type == config.ConstProxySpider { + for _, pri := range cluster.Proxies { + if pr, ok := pri.(*MdbsProxy); ok { err := cluster.ShardProxyCreateVTable(pr, schema, table+"_copy", duplicates, false) if err != nil { return err @@ -395,7 +404,7 @@ func (cluster *Cluster) ShardSetUniversalTable(proxy *Proxy, schema string, tabl return nil } -func (cluster *Cluster) ShardProxyMoveTable(proxy *Proxy, schema string, table string, destCluster *Cluster) error { +func (cluster *Cluster) ShardProxyMoveTable(proxy *MdbsProxy, schema string, table string, destCluster *Cluster) error { master := cluster.GetMaster() if master == nil { return errors.New("Move table no valid master on current cluster") @@ -422,8 +431,8 @@ func (cluster *Cluster) ShardProxyMoveTable(proxy *Proxy, schema string, table s var duplicates []*ServerMonitor duplicates = append(duplicates, destmaster) - for _, pr := range cluster.Proxies { - if cluster.Conf.MdbsProxyOn && pr.Type == config.ConstProxySpider { + for _, pri := range cluster.Proxies { + if pr, ok := pri.(*MdbsProxy); ok { err := destCluster.ShardProxyCreateVTable(pr, schema, table+"_copy", duplicates, false) if err != nil { return err @@ -503,7 +512,7 @@ func (cluster *Cluster) ShardProxyMoveTable(proxy *Proxy, schema string, table s return nil } -func (cluster *Cluster) ShardProxyReshardTable(proxy *Proxy, schema string, table string, clusters map[string]*Cluster) error { +func (cluster *Cluster) ShardProxyReshardTable(proxy *MdbsProxy, schema string, table string, clusters map[string]*Cluster) error { master := cluster.GetMaster() if master == nil { @@ -535,8 +544,8 @@ func (cluster *Cluster) ShardProxyReshardTable(proxy *Proxy, schema string, tabl } } - for _, pr := range cluster.Proxies { - if cluster.Conf.MdbsProxyOn && pr.Type == config.ConstProxySpider { + for _, pri := range cluster.Proxies { + if pr, ok := pri.(*MdbsProxy); ok { err := cluster.ShardProxyCreateVTable(pr, schema, table+"_reshard", duplicates, false) if err != nil { return err @@ -654,7 +663,7 @@ func (cluster *Cluster) RunQueryWithLog(server *ServerMonitor, query string) err return nil } -func (cluster *Cluster) ShardProxyBootstrap(proxy *Proxy) error { +func (cluster *Cluster) ShardProxyBootstrap(proxy *MdbsProxy) error { var err error if proxy.ShardProxy != nil { @@ -674,7 +683,7 @@ func (cluster *Cluster) ShardProxyBootstrap(proxy *Proxy) error { return err } -func (cluster *Cluster) ShardProxyCreateSystemTable(proxy *Proxy) error { +func (cluster *Cluster) ShardProxyCreateSystemTable(proxy *MdbsProxy) error { params := fmt.Sprintf("?timeout=60s") @@ -950,6 +959,6 @@ func (cluster *Cluster) ShardProxyCreateSystemTable(proxy *Proxy) error { return nil } -func (cluster *Cluster) MdbsproxyCopyTable(oldmaster *ServerMonitor, newmaster *ServerMonitor, proxy *Proxy) { +func (cluster *Cluster) MdbsproxyCopyTable(oldmaster *ServerMonitor, newmaster *ServerMonitor, proxy *MdbsProxy) { } diff --git a/cluster/prx_sphinx.go b/cluster/prx_sphinx.go index c9f832445..fbdd83e22 100644 --- a/cluster/prx_sphinx.go +++ b/cluster/prx_sphinx.go @@ -15,7 +15,11 @@ import ( "github.com/signal18/replication-manager/utils/state" ) -func connectSphinx(proxy *Proxy) (sphinx.SphinxSQL, error) { +type SphinxProxy struct { + Proxy +} + +func (proxy *SphinxProxy) Connect() (sphinx.SphinxSQL, error) { sphinx := sphinx.SphinxSQL{ User: proxy.User, Password: proxy.Pass, @@ -31,12 +35,14 @@ func connectSphinx(proxy *Proxy) (sphinx.SphinxSQL, error) { return sphinx, nil } -func (cluster *Cluster) initSphinx(proxy *Proxy) { +func (proxy *SphinxProxy) Init() { + cluster := proxy.ClusterGroup + if cluster.Conf.SphinxOn == false { return } - sphinx, err := connectSphinx(proxy) + sphinx, err := proxy.Connect() if err != nil { cluster.sme.AddState("ERR00058", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00058"], err), ErrFrom: "MON"}) return @@ -45,12 +51,13 @@ func (cluster *Cluster) initSphinx(proxy *Proxy) { } -func (cluster *Cluster) refreshSphinx(proxy *Proxy) error { +func (proxy *SphinxProxy) Refresh() error { + cluster := proxy.ClusterGroup if cluster.Conf.SphinxOn == false { return nil } - sphinx, err := connectSphinx(proxy) + sphinx, err := proxy.Connect() if err != nil { cluster.sme.AddState("ERR00058", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00058"], err), ErrFrom: "MON"}) return err @@ -79,7 +86,7 @@ func (cluster *Cluster) refreshSphinx(proxy *Proxy) error { return nil } -func (cluster *Cluster) setMaintenanceSphinx(proxy *Proxy, host string, port string) { +func (cluster *Cluster) setMaintenanceSphinx(proxy *SphinxProxy, host string, port string) { if cluster.Conf.SphinxOn == false { return } diff --git a/server/api_cluster.go b/server/api_cluster.go index 3c8a464e0..7c3ca0386 100644 --- a/server/api_cluster.go +++ b/server/api_cluster.go @@ -1669,8 +1669,8 @@ func (repman *ReplicationManager) handlerMuxClusterSchemaUniversalTable(w http.R http.Error(w, "No valid ACL", 403) return } - for _, pr := range mycluster.Proxies { - if mycluster.Conf.MdbsProxyOn { + for _, pri := range mycluster.Proxies { + if pr, ok := pri.(*cluster.MdbsProxy); ok { go mycluster.ShardSetUniversalTable(pr, vars["schemaName"], vars["tableName"]) } } @@ -1692,8 +1692,8 @@ func (repman *ReplicationManager) handlerMuxClusterSchemaReshardTable(w http.Res http.Error(w, "No valid ACL", 403) return } - for _, pr := range mycluster.Proxies { - if mycluster.Conf.MdbsProxyOn { + for _, pri := range mycluster.Proxies { + if pr, ok := pri.(*cluster.MdbsProxy); ok { clusters := mycluster.GetClusterListFromShardProxy(mycluster.Conf.MdbsProxyHosts) if vars["clusterList"] == "" { mycluster.ShardProxyReshardTable(pr, vars["schemaName"], vars["tableName"], clusters) @@ -1727,8 +1727,8 @@ func (repman *ReplicationManager) handlerMuxClusterSchemaMoveTable(w http.Respon http.Error(w, "No valid ACL", 403) return } - for _, pr := range mycluster.Proxies { - if mycluster.Conf.MdbsProxyOn { + for _, pri := range mycluster.Proxies { + if pr, ok := pri.(*cluster.MdbsProxy); ok { if vars["clusterShard"] != "" { destcluster := repman.getClusterByName(vars["clusterShard"]) if mycluster != nil { diff --git a/server/api_database.go b/server/api_database.go index 9158bea0f..d4b572dd6 100644 --- a/server/api_database.go +++ b/server/api_database.go @@ -1374,7 +1374,7 @@ func (repman *ReplicationManager) handlerMuxServersPortConfig(w http.ResponseWri } else if proxy != nil { proxy.GetProxyConfig() - data, err := ioutil.ReadFile(string(proxy.Datadir + "/config.tar.gz")) + data, err := ioutil.ReadFile(string(proxy.GetDatadir() + "/config.tar.gz")) if err != nil { r.URL.Path = r.URL.Path + ".tar.gz" w.WriteHeader(404) diff --git a/server/http.go b/server/http.go index 55f2286f1..008d7bb0b 100644 --- a/server/http.go +++ b/server/http.go @@ -44,7 +44,7 @@ func (repman *ReplicationManager) httpserver() { // before starting the http server, check that the dashboard is present if err := repman.testFile("app.html"); err != nil { - log.Println("ERROR", "Dashboard app.html file missing - will not start http server %s", err) + log.Printf("ERROR", "Dashboard app.html file missing - will not start http server %s\n", err) return } From 96286427b157c9b1dfffa686456c26ade226b904 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Thu, 21 Jan 2021 23:27:34 +0100 Subject: [PATCH 03/84] spelling correction --- cluster/{prov_slpaos.go => prov_slapos.go} | 0 cluster/{prov_splaos_prx.go => prov_slapos_prx.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename cluster/{prov_slpaos.go => prov_slapos.go} (100%) rename cluster/{prov_splaos_prx.go => prov_slapos_prx.go} (100%) diff --git a/cluster/prov_slpaos.go b/cluster/prov_slapos.go similarity index 100% rename from cluster/prov_slpaos.go rename to cluster/prov_slapos.go diff --git a/cluster/prov_splaos_prx.go b/cluster/prov_slapos_prx.go similarity index 100% rename from cluster/prov_splaos_prx.go rename to cluster/prov_slapos_prx.go From f1906fe46548f3676dec8b790c289d71adcdca68 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Fri, 22 Jan 2021 00:05:59 +0100 Subject: [PATCH 04/84] Tiny code revert to get a smaller diff --- cluster/prx_get.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/cluster/prx_get.go b/cluster/prx_get.go index 1ef8b3aa0..8168d53e1 100644 --- a/cluster/prx_get.go +++ b/cluster/prx_get.go @@ -224,8 +224,7 @@ func (proxy *Proxy) GetEnv() map[string]string { } func (proxy *Proxy) GetBaseEnv() map[string]string { - var env map[string]string - env = map[string]string{ + return map[string]string{ "%%ENV:NODES_CPU_CORES%%": proxy.ClusterGroup.Conf.ProvCores, "%%ENV:SVC_CONF_ENV_MAX_CORES%%": proxy.ClusterGroup.Conf.ProvCores, "%%ENV:SVC_CONF_ENV_CRC32_ID%%": string(proxy.Id[2:10]), @@ -265,8 +264,6 @@ func (proxy *Proxy) GetBaseEnv() map[string]string { "%%ENV:SVC_CONF_ENV_MRM_CLUSTER_NAME%%": proxy.ClusterGroup.GetClusterName(), "%%ENV:SVC_CONF_ENV_DATADIR%%": proxy.GetDatadir(), } - - return env } func (proxy *Proxy) GetConfigProxyModule(variable string) string { From 731ce574f1bde40d241d3abad5feee1bc137b8c5 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 9 Feb 2021 22:38:01 +0100 Subject: [PATCH 05/84] Fix a few missed conflicts, remove cluster.newProxy() and replace by proxy.SetSuspect() --- cluster/prov_opensvc_prx.go | 13 ++++--------- cluster/prx.go | 37 +++++++++++++++---------------------- cluster/prx_get.go | 10 ---------- 3 files changed, 19 insertions(+), 41 deletions(-) diff --git a/cluster/prov_opensvc_prx.go b/cluster/prov_opensvc_prx.go index 827841b26..ee039fe4c 100644 --- a/cluster/prov_opensvc_prx.go +++ b/cluster/prov_opensvc_prx.go @@ -32,7 +32,7 @@ func (cluster *Cluster) OpenSVCStopProxyService(server DatabaseProxy) error { } svc.StopService(agent.Node_id, service.Svc_id) } else { - err := svc.StopServiceV2(cluster.Name, server.GetServiceName(), server.Agent) + err := svc.StopServiceV2(cluster.Name, server.GetServiceName(), server.GetAgent()) if err != nil { cluster.LogPrintf(LvlErr, "Can not stop proxy: %s ", err) return err @@ -54,7 +54,7 @@ func (cluster *Cluster) OpenSVCStartProxyService(server DatabaseProxy) error { } svc.StartService(agent.Node_id, service.Svc_id) } else { - err := svc.StartServiceV2(cluster.Name, server.GetServiceName(), server.Agent) + err := svc.StartServiceV2(cluster.Name, server.GetServiceName(), server.GetAgent()) if err != nil { cluster.LogPrintf(LvlErr, "Can not stop proxy: %s ", err) return err @@ -302,12 +302,7 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(pri DatabaseProxy) error { return nil } -<<<<<<< HEAD -func (cluster *Cluster) OpenSVCGetProxyTemplateV2(servers string, agent opensvc.Host, pri DatabaseProxy) (string, error) { -======= func (cluster *Cluster) OpenSVCGetProxyTemplateV2(servers string, pri DatabaseProxy) (string, error) { ->>>>>>> 80837f54dec55bac9b6fc7c27f9e18a07f795b50 - svcsection := make(map[string]map[string]string) svcsection["DEFAULT"] = pri.OpenSVCGetProxyDefaultSection() svcsection["ip#01"] = cluster.OpenSVCGetNetSection() @@ -329,7 +324,7 @@ func (cluster *Cluster) OpenSVCGetProxyTemplateV2(servers string, pri DatabasePr } svcsection["container#01"] = cluster.OpenSVCGetNamespaceContainerSection() - svcsection["container#02"] = cluster.OpenSVCGetInitContainerSection(prx.GetPort()) + svcsection["container#02"] = cluster.OpenSVCGetInitContainerSection(pri.GetPort()) if prx, ok := pri.(*MdbsProxy); ok { svcsection["container#prx"] = cluster.OpenSVCGetShardproxyContainerSection(prx) @@ -425,7 +420,7 @@ func (cluster *Cluster) OpenSVCGetProxyEnvSection(servers string, prx DatabasePr cluster.Conf.ProvProxRouteAddr, cluster.Conf.ProvProxRoutePort = misc.SplitHostPort(cluster.Conf.ExtProxyVIP) } svcenv := make(map[string]string) - svcenv["nodes"] = prx.Agent + svcenv["nodes"] = prx.GetAgent() svcenv["base_dir"] = "/srv/{namespace}-{svcname}" svcenv["size"] = cluster.Conf.ProvProxDisk + "g" svcenv["ip_pod01"] = prx.GetHost() diff --git a/cluster/prx.go b/cluster/prx.go index 89ac67f46..8067dc4e3 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -65,6 +65,10 @@ type Proxy struct { Agent string `json:"agent"` } +func (p *Proxy) GetAgent() string { + return p.Agent +} + func (p *Proxy) GetType() string { return p.Type } @@ -121,6 +125,10 @@ func (p *Proxy) SetPrevState(state string) { p.PrevState = state } +func (p *Proxy) SetSuspect() { + p.State = stateSuspect +} + type DatabaseProxy interface { // Init oldmaster is only currently used by Maxscale Init() @@ -144,6 +152,7 @@ type DatabaseProxy interface { GetFailCount() int SetFailCount(c int) + GetAgent() string GetName() string GetHost() string GetPort() string @@ -187,6 +196,8 @@ type DatabaseProxy interface { OpenSVCGetProxyDefaultSection() map[string]string SetWaitStartCookie() SetWaitStopCookie() + + SetSuspect() } type Backend struct { @@ -323,7 +334,8 @@ func (cluster *Cluster) newProxyList() error { prx.SetDataDir() prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) - cluster.Proxies[ctproxy], err = cluster.newProxy(prx) + prx.State = stateSuspect + cluster.Proxies[ctproxy] = prx ctproxy++ } if cluster.Conf.ProxysqlOn { @@ -331,12 +343,6 @@ func (cluster *Cluster) newProxyList() error { for k, proxyHost := range strings.Split(cluster.Conf.ProxysqlHosts, ",") { prx := NewProxySQLProxy(cluster.Name, proxyHost, cluster.Conf) prx.SetPlacement(k, cluster.Conf.ProvProxAgents, cluster.Conf.SlapOSProxySQLPartitions, cluster.Conf.ProxysqlHostsIPV6) - if k < len(slapospartitions) { - prx.SlapOSDatadir = slapospartitions[k] - } - if k < len(ipv6hosts) { - prx.HostIPV6 = ipv6hosts[k] - } if cluster.key != nil { p := crypto.Password{Key: cluster.key} @@ -361,12 +367,6 @@ func (cluster *Cluster) newProxyList() error { for k, proxyHost := range strings.Split(cluster.Conf.MdbsProxyHosts, ",") { prx := new(MdbsProxy) prx.SetPlacement(k, cluster.Conf.ProvProxAgents, cluster.Conf.SlapOSShardProxyPartitions, cluster.Conf.MdbsHostsIPV6) - if k < len(slapospartitions) { - prx.SlapOSDatadir = slapospartitions[k] - } - if k < len(ipv6hosts) { - prx.HostIPV6 = ipv6hosts[k] - } prx.Type = config.ConstProxySpider prx.Host, prx.Port = misc.SplitHostPort(proxyHost) prx.User, prx.Pass = misc.SplitPair(cluster.Conf.MdbsProxyCredential) @@ -448,21 +448,14 @@ func (cluster *Cluster) newProxyList() error { prx.SetDataDir() prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) - cluster.Proxies[ctproxy], err = cluster.newProxy(prx) + prx.State = stateSuspect + cluster.Proxies[ctproxy] = prx ctproxy++ } return nil } -// TODO: clarify why this happens -func (cluster *Cluster) newProxy(p *Proxy) (*Proxy, error) { - proxy := new(Proxy) - proxy = p - proxy.State = stateSuspect - return proxy, nil -} - func (cluster *Cluster) InjectProxiesTraffic() { var definer string // Found server from ServerId diff --git a/cluster/prx_get.go b/cluster/prx_get.go index a17742eb0..af5074c68 100644 --- a/cluster/prx_get.go +++ b/cluster/prx_get.go @@ -82,16 +82,6 @@ func (prx *Proxy) GetCluster() (*sqlx.DB, error) { func (proxy *Proxy) GetProxyConfig() string { proxy.ClusterGroup.LogPrintf(LvlInfo, "Proxy Config generation "+proxy.Datadir+"/config.tar.gz") - if proxy.Type == config.ConstProxySpider { - if proxy.ShardProxy == nil { - proxy.ClusterGroup.LogPrintf(LvlErr, "Can't get shard proxy config start monitoring") - proxy.ClusterGroup.ShardProxyBootstrap(proxy) - return proxy.ShardProxy.GetDatabaseConfig() - } else { - return proxy.ShardProxy.GetDatabaseConfig() - } - } - type File struct { Path string `json:"path"` Content string `json:"fmt"` From 4cf9a6f5401cd537f67607217c68b61d54e0af84 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Sat, 13 Feb 2021 19:19:21 +0100 Subject: [PATCH 06/84] Rename MdbsProxy to MariadbShardProxy and move Flags for Proxies from the init to the respective Proxies --- cluster/cluster.go | 2 +- cluster/cluster_add.go | 2 +- cluster/prov_localhost_prx.go | 6 +- cluster/prov_opensvc_prx.go | 4 +- cluster/prov_opensvc_shardproxy.go | 4 +- cluster/prx.go | 7 +- cluster/prx_haproxy.go | 20 ++++++ cluster/prx_maxscale.go | 24 +++++++ cluster/prx_myproxy.go | 9 +++ cluster/prx_proxysql.go | 26 +++++++- cluster/prx_shardproxy.go | 45 ++++++++----- cluster/prx_sphinx.go | 17 +++++ main.go | 102 +++++++---------------------- server/api_cluster.go | 6 +- 14 files changed, 161 insertions(+), 113 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 144a35650..a51fb197a 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -844,7 +844,7 @@ func (cluster *Cluster) MonitorSchema() { tables[t.Table_schema+"."+t.Table_name] = t if haschanged && cluster.Conf.MdbsProxyOn { for _, pri := range cluster.Proxies { - if prx, ok := pri.(*MdbsProxy); ok { + if prx, ok := pri.(*MariadbShardProxy); ok { if !(t.Table_schema == "replication_manager_schema" || strings.Contains(t.Table_name, "_copy") == true || strings.Contains(t.Table_name, "_back") == true || strings.Contains(t.Table_name, "_old") == true || strings.Contains(t.Table_name, "_reshard") == true) { cluster.LogPrintf(LvlDbg, "blabla table %s %s %s", duplicates, t.Table_schema, t.Table_name) cluster.ShardProxyCreateVTable(prx, t.Table_schema, t.Table_name, duplicates, false) diff --git a/cluster/cluster_add.go b/cluster/cluster_add.go index 7c5a4abdd..a000779ba 100644 --- a/cluster/cluster_add.go +++ b/cluster/cluster_add.go @@ -141,7 +141,7 @@ func (cluster *Cluster) AddUser(user string) error { return nil } -func (cluster *Cluster) AddShardingHostGroup(proxy *MdbsProxy) error { +func (cluster *Cluster) AddShardingHostGroup(proxy *MariadbShardProxy) error { if cluster.Conf.ClusterHead != "" { return nil } diff --git a/cluster/prov_localhost_prx.go b/cluster/prov_localhost_prx.go index 1fc0dbe6e..2ebdca99e 100644 --- a/cluster/prov_localhost_prx.go +++ b/cluster/prov_localhost_prx.go @@ -9,7 +9,7 @@ package cluster func (cluster *Cluster) LocalhostProvisionProxyService(pri DatabaseProxy) error { pri.GetProxyConfig() - if prx, ok := pri.(*MdbsProxy); ok { + if prx, ok := pri.(*MariadbShardProxy); ok { cluster.LogPrintf(LvlInfo, "Bootstrap MariaDB Sharding Cluster") srv, _ := cluster.newServerMonitor(prx.Host+":"+prx.GetPort(), prx.User, prx.Pass, true, "") err := srv.Refresh() @@ -50,7 +50,7 @@ func (cluster *Cluster) LocalhostProvisionProxyService(pri DatabaseProxy) error } func (cluster *Cluster) LocalhostUnprovisionProxyService(pri DatabaseProxy) error { - if prx, ok := pri.(*MdbsProxy); ok { + if prx, ok := pri.(*MariadbShardProxy); ok { cluster.LocalhostUnprovisionDatabaseService(prx.ShardProxy) } @@ -67,7 +67,7 @@ func (cluster *Cluster) LocalhostUnprovisionProxyService(pri DatabaseProxy) erro } func (cluster *Cluster) LocalhostStartProxyService(pri DatabaseProxy) error { - if prx, ok := pri.(*MdbsProxy); ok { + if prx, ok := pri.(*MariadbShardProxy); ok { prx.ShardProxy.Shutdown() } diff --git a/cluster/prov_opensvc_prx.go b/cluster/prov_opensvc_prx.go index ee039fe4c..8f3f686ca 100644 --- a/cluster/prov_opensvc_prx.go +++ b/cluster/prov_opensvc_prx.go @@ -145,7 +145,7 @@ func (cluster *Cluster) OpenSVCProvisionProxyService(pri DatabaseProxy) error { } } } - if prx, ok := pri.(*MdbsProxy); ok { + if prx, ok := pri.(*MariadbShardProxy); ok { if strings.Contains(svc.ProvProxAgents, agent.Node_name) { srv, _ := cluster.newServerMonitor(prx.GetHost()+":"+prx.GetPort(), prx.User, prx.Pass, true, cluster.GetDomain()) err := srv.Refresh() @@ -326,7 +326,7 @@ func (cluster *Cluster) OpenSVCGetProxyTemplateV2(servers string, pri DatabasePr svcsection["container#01"] = cluster.OpenSVCGetNamespaceContainerSection() svcsection["container#02"] = cluster.OpenSVCGetInitContainerSection(pri.GetPort()) - if prx, ok := pri.(*MdbsProxy); ok { + if prx, ok := pri.(*MariadbShardProxy); ok { svcsection["container#prx"] = cluster.OpenSVCGetShardproxyContainerSection(prx) } diff --git a/cluster/prov_opensvc_shardproxy.go b/cluster/prov_opensvc_shardproxy.go index 1754ff4c0..63b1f6840 100644 --- a/cluster/prov_opensvc_shardproxy.go +++ b/cluster/prov_opensvc_shardproxy.go @@ -15,7 +15,7 @@ import ( "github.com/signal18/replication-manager/opensvc" ) -func (cluster *Cluster) OpenSVCGetShardproxyContainerSection(server *MdbsProxy) map[string]string { +func (cluster *Cluster) OpenSVCGetShardproxyContainerSection(server *MariadbShardProxy) map[string]string { svccontainer := make(map[string]string) if server.ClusterGroup.Conf.ProvProxType == "docker" || server.ClusterGroup.Conf.ProvProxType == "podman" || server.ClusterGroup.Conf.ProvProxType == "oci" { @@ -35,7 +35,7 @@ func (cluster *Cluster) OpenSVCGetShardproxyContainerSection(server *MdbsProxy) return svccontainer } -func (cluster *Cluster) GetShardproxyTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *MdbsProxy) (string, error) { +func (cluster *Cluster) GetShardproxyTemplate(collector opensvc.Collector, servers string, agent opensvc.Host, prx *MariadbShardProxy) (string, error) { ipPods := "" diff --git a/cluster/prx.go b/cluster/prx.go index 8067dc4e3..e3adf2568 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -26,6 +26,7 @@ import ( "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" + "github.com/spf13/pflag" ) // Proxy defines a proxy @@ -130,7 +131,7 @@ func (p *Proxy) SetSuspect() { } type DatabaseProxy interface { - // Init oldmaster is only currently used by Maxscale + AddFlags(flags *pflag.FlagSet, conf config.Config) Init() Refresh() error Failover() @@ -365,7 +366,7 @@ func (cluster *Cluster) newProxyList() error { } if cluster.Conf.MdbsProxyHosts != "" && cluster.Conf.MdbsProxyOn { for k, proxyHost := range strings.Split(cluster.Conf.MdbsProxyHosts, ",") { - prx := new(MdbsProxy) + prx := new(MariadbShardProxy) prx.SetPlacement(k, cluster.Conf.ProvProxAgents, cluster.Conf.SlapOSShardProxyPartitions, cluster.Conf.MdbsHostsIPV6) prx.Type = config.ConstProxySpider prx.Host, prx.Port = misc.SplitHostPort(proxyHost) @@ -428,7 +429,7 @@ func (cluster *Cluster) newProxyList() error { } } if cluster.Conf.MyproxyOn { - prx := new(Proxy) + prx := new(MyProxyProxy) prx.Type = config.ConstProxyMyProxy prx.Port = strconv.Itoa(cluster.Conf.MyproxyPort) prx.Host = "0.0.0.0" diff --git a/cluster/prx_haproxy.go b/cluster/prx_haproxy.go index 326f53c53..a4777f69e 100644 --- a/cluster/prx_haproxy.go +++ b/cluster/prx_haproxy.go @@ -18,14 +18,34 @@ import ( "strconv" "strings" + "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/router/haproxy" "github.com/signal18/replication-manager/utils/state" + "github.com/spf13/pflag" ) type HaproxyProxy struct { Proxy } +func (proxy *HaproxyProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { + flags.BoolVar(&conf.HaproxyOn, "haproxy", false, "Wrapper to use HaProxy on same host") + flags.StringVar(&conf.HaproxyMode, "haproxy-mode", "runtimeapi", "HaProxy mode [standby|runtimeapi|dataplaneapi]") + flags.StringVar(&conf.HaproxyUser, "haproxy-user", "admin", "Haproxy API user") + flags.StringVar(&conf.HaproxyPassword, "haproxy-password", "admin", "Haproxy API password") + flags.StringVar(&conf.HaproxyHosts, "haproxy-servers", "127.0.0.1", "HaProxy hosts") + flags.IntVar(&conf.HaproxyAPIPort, "haproxy-api-port", 1999, "HaProxy runtime api port") + flags.IntVar(&conf.HaproxyWritePort, "haproxy-write-port", 3306, "HaProxy read-write port to leader") + flags.IntVar(&conf.HaproxyReadPort, "haproxy-read-port", 3307, "HaProxy load balance read port to all nodes") + flags.IntVar(&conf.HaproxyStatPort, "haproxy-stat-port", 1988, "HaProxy statistics port") + flags.StringVar(&conf.HaproxyBinaryPath, "haproxy-binary-path", "/usr/sbin/haproxy", "HaProxy binary location") + flags.StringVar(&conf.HaproxyReadBindIp, "haproxy-ip-read-bind", "0.0.0.0", "HaProxy input bind address for read") + flags.StringVar(&conf.HaproxyWriteBindIp, "haproxy-ip-write-bind", "0.0.0.0", "HaProxy input bind address for write") + flags.StringVar(&conf.HaproxyAPIReadBackend, "haproxy-api-read-backend", "service_read", "HaProxy API backend name used for read") + flags.StringVar(&conf.HaproxyAPIWriteBackend, "haproxy-api-write-backend", "service_write", "HaProxy API backend name used for write") + flags.StringVar(&conf.HaproxyHostsIPV6, "haproxy-servers-ipv6", "", "ipv6 bind address ") +} + func (proxy *HaproxyProxy) Init() { cluster := proxy.ClusterGroup haproxydatadir := proxy.Datadir + "/var" diff --git a/cluster/prx_maxscale.go b/cluster/prx_maxscale.go index 2d7752ace..e423377dd 100644 --- a/cluster/prx_maxscale.go +++ b/cluster/prx_maxscale.go @@ -12,8 +12,10 @@ import ( "fmt" "strconv" + "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/router/maxscale" "github.com/signal18/replication-manager/utils/state" + "github.com/spf13/pflag" ) type MaxscaleProxy struct { @@ -24,6 +26,28 @@ func (cluster *Cluster) refreshMaxscale(proxy *MaxscaleProxy) error { return proxy.refresh() } +func (proxy *MaxscaleProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { + flags.BoolVar(&conf.MxsOn, "maxscale", false, "MaxScale proxy server is query for backend status") + flags.BoolVar(&conf.CheckFalsePositiveMaxscale, "failover-falsepositive-maxscale", false, "Failover checks that maxscale detect failed master") + flags.IntVar(&conf.CheckFalsePositiveMaxscaleTimeout, "failover-falsepositive-maxscale-timeout", 14, "Failover checks that maxscale detect failed master") + flags.BoolVar(&conf.MxsBinlogOn, "maxscale-binlog", false, "Maxscale binlog server topolgy") + flags.MarkDeprecated("maxscale-monitor", "Deprecate disable maxscale monitoring for 2 nodes cluster") + flags.BoolVar(&conf.MxsDisableMonitor, "maxscale-disable-monitor", false, "Disable maxscale monitoring and fully drive server state") + flags.StringVar(&conf.MxsGetInfoMethod, "maxscale-get-info-method", "maxadmin", "How to get infos from Maxscale maxinfo|maxadmin") + flags.StringVar(&conf.MxsHost, "maxscale-servers", "", "MaxScale hosts ") + flags.StringVar(&conf.MxsPort, "maxscale-port", "6603", "MaxScale admin port") + flags.StringVar(&conf.MxsUser, "maxscale-user", "admin", "MaxScale admin user") + flags.StringVar(&conf.MxsPass, "maxscale-pass", "mariadb", "MaxScale admin password") + flags.IntVar(&conf.MxsWritePort, "maxscale-write-port", 3306, "MaxScale read-write port to leader") + flags.IntVar(&conf.MxsReadPort, "maxscale-read-port", 3307, "MaxScale load balance read port to all nodes") + flags.IntVar(&conf.MxsReadWritePort, "maxscale-read-write-port", 3308, "MaxScale load balance read port to all nodes") + flags.IntVar(&conf.MxsMaxinfoPort, "maxscale-maxinfo-port", 3309, "MaxScale maxinfo plugin http port") + flags.IntVar(&conf.MxsBinlogPort, "maxscale-binlog-port", 3309, "MaxScale maxinfo plugin http port") + flags.BoolVar(&conf.MxsServerMatchPort, "maxscale-server-match-port", false, "Match servers running on same host with different port") + flags.StringVar(&conf.MxsBinaryPath, "maxscale-binary-path", "/usr/sbin/maxscale", "Maxscale binary location") + flags.StringVar(&conf.MxsHostsIPV6, "maxscale-servers-ipv6", "", "ipv6 bind address ") +} + func (proxy *MaxscaleProxy) refresh() error { cluster := proxy.ClusterGroup if cluster.Conf.MxsOn == false { diff --git a/cluster/prx_myproxy.go b/cluster/prx_myproxy.go index 7b454462f..66c02e017 100644 --- a/cluster/prx_myproxy.go +++ b/cluster/prx_myproxy.go @@ -4,7 +4,9 @@ import ( "database/sql" _ "github.com/go-sql-driver/mysql" + "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/router/myproxy" + "github.com/spf13/pflag" ) type MyProxyProxy struct { @@ -15,6 +17,13 @@ func (cluster *Cluster) initMyProxy(proxy *MyProxyProxy) { proxy.Init() } +func (proxy *MyProxyProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { + flags.BoolVar(&conf.MyproxyOn, "myproxy", false, "Use Internal Proxy") + flags.IntVar(&conf.MyproxyPort, "myproxy-port", 4000, "Internal proxy read/write port") + flags.StringVar(&conf.MyproxyUser, "myproxy-user", "admin", "Myproxy user") + flags.StringVar(&conf.MyproxyPassword, "myproxy-password", "repman", "Myproxy password") +} + func (proxy *MyProxyProxy) Init() { if proxy.InternalProxy != nil { proxy.InternalProxy.Close() diff --git a/cluster/prx_proxysql.go b/cluster/prx_proxysql.go index 76583c950..b774ac258 100644 --- a/cluster/prx_proxysql.go +++ b/cluster/prx_proxysql.go @@ -11,6 +11,7 @@ import ( "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" + "github.com/spf13/pflag" ) type ProxySQLProxy struct { @@ -44,6 +45,27 @@ func NewProxySQLProxy(clusterName string, proxyHost string, conf config.Config) return prx } +func (proxy *ProxySQLProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { + flags.BoolVar(&conf.ProxysqlOn, "proxysql", false, "Use ProxySQL") + flags.BoolVar(&conf.ProxysqlSaveToDisk, "proxysql-save-to-disk", false, "Save proxysql change to sqllight") + flags.StringVar(&conf.ProxysqlHosts, "proxysql-servers", "", "ProxySQL hosts") + flags.StringVar(&conf.ProxysqlHostsIPV6, "proxysql-servers-ipv6", "", "ProxySQL extra IPV6 bind for interfaces") + flags.StringVar(&conf.ProxysqlPort, "proxysql-port", "3306", "ProxySQL read/write proxy port") + flags.StringVar(&conf.ProxysqlAdminPort, "proxysql-admin-port", "6032", "ProxySQL admin interface port") + flags.StringVar(&conf.ProxysqlReaderHostgroup, "proxysql-reader-hostgroup", "1", "ProxySQL reader hostgroup") + flags.StringVar(&conf.ProxysqlWriterHostgroup, "proxysql-writer-hostgroup", "0", "ProxySQL writer hostgroup") + flags.StringVar(&conf.ProxysqlUser, "proxysql-user", "admin", "ProxySQL admin user") + flags.StringVar(&conf.ProxysqlPassword, "proxysql-password", "admin", "ProxySQL admin password") + flags.BoolVar(&conf.ProxysqlCopyGrants, "proxysql-bootstrap-users", true, "Copy users from master") + flags.BoolVar(&conf.ProxysqlMultiplexing, "proxysql-multiplexing", false, "Multiplexing") + flags.BoolVar(&conf.ProxysqlBootstrap, "proxysql-bootstrap", false, "Bootstrap ProxySQL backend servers and hostgroup") + flags.BoolVar(&conf.ProxysqlBootstrapVariables, "proxysql-bootstrap-variables", false, "Bootstrap ProxySQL backend servers and hostgroup") + flags.BoolVar(&conf.ProxysqlBootstrapHG, "proxysql-bootstrap-hostgroups", false, "Bootstrap ProxySQL hostgroups") + flags.BoolVar(&conf.ProxysqlBootstrapQueryRules, "proxysql-bootstrap-query-rules", false, "Bootstrap Query rules into ProxySQL") + flags.StringVar(&conf.ProxysqlBinaryPath, "proxysql-binary-path", "/usr/sbin/proxysql", "proxysql binary location") + flags.BoolVar(&conf.ProxysqlMasterIsReader, "proxysql-master-is-reader", false, "Add the master to the reader group") +} + func (proxy *ProxySQLProxy) Connect() (proxysql.ProxySQL, error) { psql := proxysql.ProxySQL{ User: proxy.User, @@ -62,11 +84,11 @@ func (proxy *ProxySQLProxy) Connect() (proxysql.ProxySQL, error) { return psql, nil } -func (cluster *Cluster) AddShardProxy(proxysql *ProxySQLProxy, shardproxy *MdbsProxy) { +func (cluster *Cluster) AddShardProxy(proxysql *ProxySQLProxy, shardproxy *MariadbShardProxy) { proxysql.AddShardProxy(shardproxy) } -func (proxy *ProxySQLProxy) AddShardProxy(shardproxy *MdbsProxy) { +func (proxy *ProxySQLProxy) AddShardProxy(shardproxy *MariadbShardProxy) { cluster := proxy.ClusterGroup if cluster.Conf.ProxysqlOn == false { return diff --git a/cluster/prx_shardproxy.go b/cluster/prx_shardproxy.go index 091e92b0c..4787ee163 100644 --- a/cluster/prx_shardproxy.go +++ b/cluster/prx_shardproxy.go @@ -18,18 +18,31 @@ import ( _ "github.com/go-sql-driver/mysql" "github.com/jmoiron/sqlx" + "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" + "github.com/spf13/pflag" ) var crcTable = crc64.MakeTable(crc64.ECMA) -type MdbsProxy struct { +type MariadbShardProxy struct { Proxy } -func (proxy *MdbsProxy) Init() { +func (proxy *MariadbShardProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { + flags.BoolVar(&conf.MdbsProxyOn, "shardproxy", false, "MariaDB Spider proxy") + flags.StringVar(&conf.MdbsProxyHosts, "shardproxy-servers", "127.0.0.1:3307", "MariaDB spider proxy hosts IP:Port,IP:Port") + flags.StringVar(&conf.MdbsProxyCredential, "shardproxy-credential", "root:mariadb", "MariaDB spider proxy credential") + flags.BoolVar(&conf.MdbsProxyCopyGrants, "shardproxy-copy-grants", true, "Copy grants from shards master") + flags.BoolVar(&conf.MdbsProxyLoadSystem, "shardproxy-load-system", true, "Load Spider system tables") + flags.StringVar(&conf.MdbsUniversalTables, "shardproxy-universal-tables", "replication_manager_schema.bench", "MariaDB spider proxy table list that are federarated to all master") + flags.StringVar(&conf.MdbsIgnoreTables, "shardproxy-ignore-tables", "", "MariaDB spider proxy master table list that are ignored") + flags.StringVar(&conf.MdbsHostsIPV6, "shardproxy-servers-ipv6", "", "ipv6 bind address ") +} + +func (proxy *MariadbShardProxy) Init() { cluster := proxy.ClusterGroup cluster.LogPrintf(LvlInfo, "Init MdbShardProxy %s %s", proxy.Host, proxy.Port) cluster.ShardProxyBootstrap(proxy) @@ -40,7 +53,7 @@ func (proxy *MdbsProxy) Init() { cluster.AddShardingHostGroup(proxy) } -func (proxy *MdbsProxy) GetProxyConfig() string { +func (proxy *MariadbShardProxy) GetProxyConfig() string { if proxy.ShardProxy == nil { proxy.ClusterGroup.LogPrintf(LvlErr, "Can't get shard proxy config start monitoring") proxy.ClusterGroup.ShardProxyBootstrap(proxy) @@ -50,7 +63,7 @@ func (proxy *MdbsProxy) GetProxyConfig() string { } } -func (proxy *MdbsProxy) Failover() { +func (proxy *MariadbShardProxy) Failover() { cluster := proxy.ClusterGroup if cluster.master == nil { return @@ -96,7 +109,7 @@ func (proxy *MdbsProxy) Failover() { } } -func (cluster *Cluster) CheckMdbShardServersSchema(proxy *MdbsProxy) { +func (cluster *Cluster) CheckMdbShardServersSchema(proxy *MariadbShardProxy) { if cluster.master == nil { return } @@ -138,7 +151,7 @@ func (cluster *Cluster) CheckMdbShardServersSchema(proxy *MdbsProxy) { } -func (cluster *Cluster) refreshMdbsproxy(oldmaster *ServerMonitor, proxy *MdbsProxy) error { +func (cluster *Cluster) refreshMdbsproxy(oldmaster *ServerMonitor, proxy *MariadbShardProxy) error { if proxy.ShardProxy == nil { return errors.New("Sharding proxy no database monitor yet initialize") } @@ -203,7 +216,7 @@ func (cluster *Cluster) ShardProxyGetHeadCluster() *Cluster { return nil } -func (cluster *Cluster) ShardProxyCreateVTable(proxy *MdbsProxy, schema string, table string, duplicates []*ServerMonitor, withreshard bool) error { +func (cluster *Cluster) ShardProxyCreateVTable(proxy *MariadbShardProxy, schema string, table string, duplicates []*ServerMonitor, withreshard bool) error { checksum64 := crc64.Checksum([]byte(schema+"_"+cluster.GetName()), crcTable) var err error var ddl string @@ -292,7 +305,7 @@ func (cluster *Cluster) ShardProxyCreateVTable(proxy *MdbsProxy, schema string, return nil } -func (cluster *Cluster) ShardSetUniversalTable(proxy *MdbsProxy, schema string, table string) error { +func (cluster *Cluster) ShardSetUniversalTable(proxy *MariadbShardProxy, schema string, table string) error { master := cluster.GetMaster() if master == nil { return errors.New("Universal table no valid master on current cluster") @@ -325,7 +338,7 @@ func (cluster *Cluster) ShardSetUniversalTable(proxy *MdbsProxy, schema string, cluster.Conf.MdbsUniversalTables = cluster.Conf.MdbsUniversalTables + "," + schema + "." + table for _, pri := range cluster.Proxies { - if pr, ok := pri.(*MdbsProxy); ok { + if pr, ok := pri.(*MariadbShardProxy); ok { err := cluster.ShardProxyCreateVTable(pr, schema, table+"_copy", duplicates, false) if err != nil { return err @@ -404,7 +417,7 @@ func (cluster *Cluster) ShardSetUniversalTable(proxy *MdbsProxy, schema string, return nil } -func (cluster *Cluster) ShardProxyMoveTable(proxy *MdbsProxy, schema string, table string, destCluster *Cluster) error { +func (cluster *Cluster) ShardProxyMoveTable(proxy *MariadbShardProxy, schema string, table string, destCluster *Cluster) error { master := cluster.GetMaster() if master == nil { return errors.New("Move table no valid master on current cluster") @@ -432,7 +445,7 @@ func (cluster *Cluster) ShardProxyMoveTable(proxy *MdbsProxy, schema string, tab duplicates = append(duplicates, destmaster) for _, pri := range cluster.Proxies { - if pr, ok := pri.(*MdbsProxy); ok { + if pr, ok := pri.(*MariadbShardProxy); ok { err := destCluster.ShardProxyCreateVTable(pr, schema, table+"_copy", duplicates, false) if err != nil { return err @@ -512,7 +525,7 @@ func (cluster *Cluster) ShardProxyMoveTable(proxy *MdbsProxy, schema string, tab return nil } -func (cluster *Cluster) ShardProxyReshardTable(proxy *MdbsProxy, schema string, table string, clusters map[string]*Cluster) error { +func (cluster *Cluster) ShardProxyReshardTable(proxy *MariadbShardProxy, schema string, table string, clusters map[string]*Cluster) error { master := cluster.GetMaster() if master == nil { @@ -545,7 +558,7 @@ func (cluster *Cluster) ShardProxyReshardTable(proxy *MdbsProxy, schema string, } for _, pri := range cluster.Proxies { - if pr, ok := pri.(*MdbsProxy); ok { + if pr, ok := pri.(*MariadbShardProxy); ok { err := cluster.ShardProxyCreateVTable(pr, schema, table+"_reshard", duplicates, false) if err != nil { return err @@ -663,7 +676,7 @@ func (cluster *Cluster) RunQueryWithLog(server *ServerMonitor, query string) err return nil } -func (cluster *Cluster) ShardProxyBootstrap(proxy *MdbsProxy) error { +func (cluster *Cluster) ShardProxyBootstrap(proxy *MariadbShardProxy) error { var err error if proxy.ShardProxy != nil { @@ -683,7 +696,7 @@ func (cluster *Cluster) ShardProxyBootstrap(proxy *MdbsProxy) error { return err } -func (cluster *Cluster) ShardProxyCreateSystemTable(proxy *MdbsProxy) error { +func (cluster *Cluster) ShardProxyCreateSystemTable(proxy *MariadbShardProxy) error { params := fmt.Sprintf("?timeout=60s") @@ -959,6 +972,6 @@ func (cluster *Cluster) ShardProxyCreateSystemTable(proxy *MdbsProxy) error { return nil } -func (cluster *Cluster) MdbsproxyCopyTable(oldmaster *ServerMonitor, newmaster *ServerMonitor, proxy *MdbsProxy) { +func (cluster *Cluster) MdbsproxyCopyTable(oldmaster *ServerMonitor, newmaster *ServerMonitor, proxy *MariadbShardProxy) { } diff --git a/cluster/prx_sphinx.go b/cluster/prx_sphinx.go index fbdd83e22..558a070e9 100644 --- a/cluster/prx_sphinx.go +++ b/cluster/prx_sphinx.go @@ -10,15 +10,32 @@ package cluster import ( "fmt" + "runtime" + "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/router/sphinx" "github.com/signal18/replication-manager/utils/state" + "github.com/spf13/pflag" ) type SphinxProxy struct { Proxy } +func (proxy *SphinxProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { + flags.BoolVar(&conf.SphinxOn, "sphinx", false, "Turn on SphinxSearch detection") + flags.StringVar(&conf.SphinxHosts, "sphinx-servers", "127.0.0.1", "SphinxSearch hosts") + flags.StringVar(&conf.SphinxPort, "sphinx-port", "9312", "SphinxSearch API port") + flags.StringVar(&conf.SphinxQLPort, "sphinx-sql-port", "9306", "SphinxSearch SQL port") + if runtime.GOOS == "linux" { + flags.StringVar(&conf.SphinxConfig, "sphinx-config", "/usr/share/replication-manager/shinx/sphinx.conf", "Path to sphinx config") + } + if runtime.GOOS == "darwin" { + flags.StringVar(&conf.SphinxConfig, "sphinx-config", "/opt/replication-manager/share/sphinx/sphinx.conf", "Path to sphinx config") + } + flags.StringVar(&conf.SphinxHostsIPV6, "sphinx-servers-ipv6", "", "ipv6 bind address ") +} + func (proxy *SphinxProxy) Connect() (sphinx.SphinxSQL, error) { sphinx := sphinx.SphinxSQL{ User: proxy.User, diff --git a/main.go b/main.go index 8a81c8e26..61d70e605 100644 --- a/main.go +++ b/main.go @@ -20,6 +20,7 @@ import ( mysqllog "log" "github.com/go-sql-driver/mysql" + "github.com/signal18/replication-manager/cluster" "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/server" @@ -330,27 +331,12 @@ func init() { monitorCmd.Flags().StringVar(&conf.ExtProxyVIP, "extproxy-address", "", "Network address when route is manage via external script, host:[port] format") if WithMaxscale == "ON" { - monitorCmd.Flags().BoolVar(&conf.MxsOn, "maxscale", false, "MaxScale proxy server is query for backend status") - monitorCmd.Flags().BoolVar(&conf.CheckFalsePositiveMaxscale, "failover-falsepositive-maxscale", false, "Failover checks that maxscale detect failed master") - monitorCmd.Flags().IntVar(&conf.CheckFalsePositiveMaxscaleTimeout, "failover-falsepositive-maxscale-timeout", 14, "Failover checks that maxscale detect failed master") - monitorCmd.Flags().BoolVar(&conf.MxsBinlogOn, "maxscale-binlog", false, "Maxscale binlog server topolgy") - monitorCmd.Flags().MarkDeprecated("maxscale-monitor", "Deprecate disable maxscale monitoring for 2 nodes cluster") - monitorCmd.Flags().BoolVar(&conf.MxsDisableMonitor, "maxscale-disable-monitor", false, "Disable maxscale monitoring and fully drive server state") - monitorCmd.Flags().StringVar(&conf.MxsGetInfoMethod, "maxscale-get-info-method", "maxadmin", "How to get infos from Maxscale maxinfo|maxadmin") - monitorCmd.Flags().StringVar(&conf.MxsHost, "maxscale-servers", "", "MaxScale hosts ") - monitorCmd.Flags().StringVar(&conf.MxsPort, "maxscale-port", "6603", "MaxScale admin port") - monitorCmd.Flags().StringVar(&conf.MxsUser, "maxscale-user", "admin", "MaxScale admin user") - monitorCmd.Flags().StringVar(&conf.MxsPass, "maxscale-pass", "mariadb", "MaxScale admin password") - monitorCmd.Flags().IntVar(&conf.MxsWritePort, "maxscale-write-port", 3306, "MaxScale read-write port to leader") - monitorCmd.Flags().IntVar(&conf.MxsReadPort, "maxscale-read-port", 3307, "MaxScale load balance read port to all nodes") - monitorCmd.Flags().IntVar(&conf.MxsReadWritePort, "maxscale-read-write-port", 3308, "MaxScale load balance read port to all nodes") - monitorCmd.Flags().IntVar(&conf.MxsMaxinfoPort, "maxscale-maxinfo-port", 3309, "MaxScale maxinfo plugin http port") - monitorCmd.Flags().IntVar(&conf.MxsBinlogPort, "maxscale-binlog-port", 3309, "MaxScale maxinfo plugin http port") - monitorCmd.Flags().BoolVar(&conf.MxsServerMatchPort, "maxscale-server-match-port", false, "Match servers running on same host with different port") - monitorCmd.Flags().StringVar(&conf.MxsBinaryPath, "maxscale-binary-path", "/usr/sbin/maxscale", "Maxscale binary location") - monitorCmd.Flags().StringVar(&conf.MxsHostsIPV6, "maxscale-servers-ipv6", "", "ipv6 bind address ") + maxscaleprx := new(cluster.MaxscaleProxy) + maxscaleprx.AddFlags(monitorCmd.Flags(), conf) } + // TODO: this seems dead code / unimplemented + // start if WithMySQLRouter == "ON" { monitorCmd.Flags().BoolVar(&conf.MysqlRouterOn, "mysqlrouter", false, "MySQLRouter proxy server is query for backend status") monitorCmd.Flags().StringVar(&conf.MysqlRouterHosts, "mysqlrouter-servers", "127.0.0.1", "MaxScale hosts ") @@ -361,73 +347,32 @@ func init() { monitorCmd.Flags().IntVar(&conf.MysqlRouterReadPort, "mysqlrouter-read-port", 3307, "MySQLRouter load balance read port to all nodes") monitorCmd.Flags().IntVar(&conf.MysqlRouterReadWritePort, "mysqlrouter-read-write-port", 3308, "MySQLRouter load balance read port to all nodes") } + // end of dead code if WithMariadbshardproxy == "ON" { - monitorCmd.Flags().BoolVar(&conf.MdbsProxyOn, "shardproxy", false, "MariaDB Spider proxy") - monitorCmd.Flags().StringVar(&conf.MdbsProxyHosts, "shardproxy-servers", "127.0.0.1:3307", "MariaDB spider proxy hosts IP:Port,IP:Port") - monitorCmd.Flags().StringVar(&conf.MdbsProxyCredential, "shardproxy-credential", "root:mariadb", "MariaDB spider proxy credential") - monitorCmd.Flags().BoolVar(&conf.MdbsProxyCopyGrants, "shardproxy-copy-grants", true, "Copy grants from shards master") - monitorCmd.Flags().BoolVar(&conf.MdbsProxyLoadSystem, "shardproxy-load-system", true, "Load Spider system tables") - monitorCmd.Flags().StringVar(&conf.MdbsUniversalTables, "shardproxy-universal-tables", "replication_manager_schema.bench", "MariaDB spider proxy table list that are federarated to all master") - monitorCmd.Flags().StringVar(&conf.MdbsIgnoreTables, "shardproxy-ignore-tables", "", "MariaDB spider proxy master table list that are ignored") - monitorCmd.Flags().StringVar(&conf.MdbsHostsIPV6, "shardproxy-servers-ipv6", "", "ipv6 bind address ") + mdbsprx := new(cluster.MariadbShardProxy) + mdbsprx.AddFlags(monitorCmd.Flags(), conf) } if WithHaproxy == "ON" { - monitorCmd.Flags().BoolVar(&conf.HaproxyOn, "haproxy", false, "Wrapper to use HaProxy on same host") - monitorCmd.Flags().StringVar(&conf.HaproxyMode, "haproxy-mode", "runtimeapi", "HaProxy mode [standby|runtimeapi|dataplaneapi]") - monitorCmd.Flags().StringVar(&conf.HaproxyUser, "haproxy-user", "admin", "Haproxy API user") - monitorCmd.Flags().StringVar(&conf.HaproxyPassword, "haproxy-password", "admin", "Haproxy API password") - monitorCmd.Flags().StringVar(&conf.HaproxyHosts, "haproxy-servers", "127.0.0.1", "HaProxy hosts") - monitorCmd.Flags().IntVar(&conf.HaproxyAPIPort, "haproxy-api-port", 1999, "HaProxy runtime api port") - monitorCmd.Flags().IntVar(&conf.HaproxyWritePort, "haproxy-write-port", 3306, "HaProxy read-write port to leader") - monitorCmd.Flags().IntVar(&conf.HaproxyReadPort, "haproxy-read-port", 3307, "HaProxy load balance read port to all nodes") - monitorCmd.Flags().IntVar(&conf.HaproxyStatPort, "haproxy-stat-port", 1988, "HaProxy statistics port") - monitorCmd.Flags().StringVar(&conf.HaproxyBinaryPath, "haproxy-binary-path", "/usr/sbin/haproxy", "HaProxy binary location") - monitorCmd.Flags().StringVar(&conf.HaproxyReadBindIp, "haproxy-ip-read-bind", "0.0.0.0", "HaProxy input bind address for read") - monitorCmd.Flags().StringVar(&conf.HaproxyWriteBindIp, "haproxy-ip-write-bind", "0.0.0.0", "HaProxy input bind address for write") - monitorCmd.Flags().StringVar(&conf.HaproxyAPIReadBackend, "haproxy-api-read-backend", "service_read", "HaProxy API backend name used for read") - monitorCmd.Flags().StringVar(&conf.HaproxyAPIWriteBackend, "haproxy-api-write-backend", "service_write", "HaProxy API backend name used for write") - monitorCmd.Flags().StringVar(&conf.HaproxyHostsIPV6, "haproxy-servers-ipv6", "", "ipv6 bind address ") + haprx := new(cluster.HaproxyProxy) + haprx.AddFlags(monitorCmd.Flags(), conf) } - monitorCmd.Flags().BoolVar(&conf.MyproxyOn, "myproxy", false, "Use Internal Proxy") - monitorCmd.Flags().IntVar(&conf.MyproxyPort, "myproxy-port", 4000, "Internal proxy read/write port") - monitorCmd.Flags().StringVar(&conf.MyproxyUser, "myproxy-user", "admin", "Myproxy user") - monitorCmd.Flags().StringVar(&conf.MyproxyPassword, "myproxy-password", "repman", "Myproxy password") - if WithProxysql == "ON" { - monitorCmd.Flags().BoolVar(&conf.ProxysqlOn, "proxysql", false, "Use ProxySQL") - monitorCmd.Flags().BoolVar(&conf.ProxysqlSaveToDisk, "proxysql-save-to-disk", false, "Save proxysql change to sqllight") - monitorCmd.Flags().StringVar(&conf.ProxysqlHosts, "proxysql-servers", "", "ProxySQL hosts") - monitorCmd.Flags().StringVar(&conf.ProxysqlHostsIPV6, "proxysql-servers-ipv6", "", "ProxySQL extra IPV6 bind for interfaces") - monitorCmd.Flags().StringVar(&conf.ProxysqlPort, "proxysql-port", "3306", "ProxySQL read/write proxy port") - monitorCmd.Flags().StringVar(&conf.ProxysqlAdminPort, "proxysql-admin-port", "6032", "ProxySQL admin interface port") - monitorCmd.Flags().StringVar(&conf.ProxysqlReaderHostgroup, "proxysql-reader-hostgroup", "1", "ProxySQL reader hostgroup") - monitorCmd.Flags().StringVar(&conf.ProxysqlWriterHostgroup, "proxysql-writer-hostgroup", "0", "ProxySQL writer hostgroup") - monitorCmd.Flags().StringVar(&conf.ProxysqlUser, "proxysql-user", "admin", "ProxySQL admin user") - monitorCmd.Flags().StringVar(&conf.ProxysqlPassword, "proxysql-password", "admin", "ProxySQL admin password") - monitorCmd.Flags().BoolVar(&conf.ProxysqlCopyGrants, "proxysql-bootstrap-users", true, "Copy users from master") - monitorCmd.Flags().BoolVar(&conf.ProxysqlMultiplexing, "proxysql-multiplexing", false, "Multiplexing") - monitorCmd.Flags().BoolVar(&conf.ProxysqlBootstrap, "proxysql-bootstrap", false, "Bootstrap ProxySQL backend servers and hostgroup") - monitorCmd.Flags().BoolVar(&conf.ProxysqlBootstrapVariables, "proxysql-bootstrap-variables", false, "Bootstrap ProxySQL backend servers and hostgroup") - - monitorCmd.Flags().BoolVar(&conf.ProxysqlBootstrapHG, "proxysql-bootstrap-hostgroups", false, "Bootstrap ProxySQL hostgroups") - monitorCmd.Flags().BoolVar(&conf.ProxysqlBootstrapQueryRules, "proxysql-bootstrap-query-rules", false, "Bootstrap Query rules into ProxySQL") - monitorCmd.Flags().StringVar(&conf.ProxysqlBinaryPath, "proxysql-binary-path", "/usr/sbin/proxysql", "proxysql binary location") - monitorCmd.Flags().BoolVar(&conf.ProxysqlMasterIsReader, "proxysql-master-is-reader", false, "Add the master to the reader group") + proxysqlprx := new(cluster.ProxySQLProxy) + proxysqlprx.AddFlags(monitorCmd.Flags(), conf) } if WithSphinx == "ON" { - monitorCmd.Flags().BoolVar(&conf.SphinxOn, "sphinx", false, "Turn on SphinxSearch detection") - monitorCmd.Flags().StringVar(&conf.SphinxHosts, "sphinx-servers", "127.0.0.1", "SphinxSearch hosts") - monitorCmd.Flags().StringVar(&conf.SphinxPort, "sphinx-port", "9312", "SphinxSearch API port") - monitorCmd.Flags().StringVar(&conf.SphinxQLPort, "sphinx-sql-port", "9306", "SphinxSearch SQL port") - if GoOS == "linux" { - monitorCmd.Flags().StringVar(&conf.SphinxConfig, "sphinx-config", "/usr/share/replication-manager/shinx/sphinx.conf", "Path to sphinx config") - } - if GoOS == "darwin" { - monitorCmd.Flags().StringVar(&conf.SphinxConfig, "sphinx-config", "/opt/replication-manager/share/sphinx/sphinx.conf", "Path to sphinx config") - } - monitorCmd.Flags().StringVar(&conf.SphinxHostsIPV6, "sphinx-servers-ipv6", "", "ipv6 bind address ") + sphinxprx := new(cluster.SphinxProxy) + sphinxprx.AddFlags(monitorCmd.Flags(), conf) + } + + myproxyprx := new(cluster.MyProxyProxy) + myproxyprx.AddFlags(monitorCmd.Flags(), conf) + + if WithSpider == "ON" { + monitorCmd.Flags().BoolVar(&conf.Spider, "spider", false, "Turn on spider detection") } + if WithMonitoring == "ON" { monitorCmd.Flags().IntVar(&conf.GraphiteCarbonPort, "graphite-carbon-port", 2003, "Graphite Carbon Metrics TCP & UDP port") monitorCmd.Flags().IntVar(&conf.GraphiteCarbonApiPort, "graphite-carbon-api-port", 10002, "Graphite Carbon API port") @@ -451,9 +396,6 @@ func init() { monitorCmd.Flags().IntVar(&conf.ArbitrationReadTimout, "arbitration-read-timeout", 800, "Read timeout for arbotration response in millisec don't woveload monitoring ticker in second") } - if WithSpider == "ON" { - monitorCmd.Flags().BoolVar(&conf.Spider, "spider", false, "Turn on spider detection") - } monitorCmd.Flags().StringVar(&conf.SchedulerReceiverPorts, "scheduler-db-servers-receiver-ports", "4444", "Scheduler TCP port to send data to db node, if list port affection is modulo db nodes") monitorCmd.Flags().BoolVar(&conf.SchedulerBackupLogical, "scheduler-db-servers-logical-backup", true, "Schedule logical backup") monitorCmd.Flags().BoolVar(&conf.SchedulerBackupPhysical, "scheduler-db-servers-physical-backup", false, "Schedule logical backup") diff --git a/server/api_cluster.go b/server/api_cluster.go index 588ae212c..ff6cb99b6 100644 --- a/server/api_cluster.go +++ b/server/api_cluster.go @@ -1672,7 +1672,7 @@ func (repman *ReplicationManager) handlerMuxClusterSchemaUniversalTable(w http.R return } for _, pri := range mycluster.Proxies { - if pr, ok := pri.(*cluster.MdbsProxy); ok { + if pr, ok := pri.(*cluster.MariadbShardProxy); ok { go mycluster.ShardSetUniversalTable(pr, vars["schemaName"], vars["tableName"]) } } @@ -1695,7 +1695,7 @@ func (repman *ReplicationManager) handlerMuxClusterSchemaReshardTable(w http.Res return } for _, pri := range mycluster.Proxies { - if pr, ok := pri.(*cluster.MdbsProxy); ok { + if pr, ok := pri.(*cluster.MariadbShardProxy); ok { clusters := mycluster.GetClusterListFromShardProxy(mycluster.Conf.MdbsProxyHosts) if vars["clusterList"] == "" { mycluster.ShardProxyReshardTable(pr, vars["schemaName"], vars["tableName"], clusters) @@ -1730,7 +1730,7 @@ func (repman *ReplicationManager) handlerMuxClusterSchemaMoveTable(w http.Respon return } for _, pri := range mycluster.Proxies { - if pr, ok := pri.(*cluster.MdbsProxy); ok { + if pr, ok := pri.(*cluster.MariadbShardProxy); ok { if vars["clusterShard"] != "" { destcluster := repman.getClusterByName(vars["clusterShard"]) if mycluster != nil { From 56f3c21cf5496c7806922f09bcafe30821cc9234 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Sat, 13 Feb 2021 19:20:04 +0100 Subject: [PATCH 07/84] Rename the file --- cluster/{prx_shardproxy.go => prx_mariadbshardproxy.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename cluster/{prx_shardproxy.go => prx_mariadbshardproxy.go} (100%) diff --git a/cluster/prx_shardproxy.go b/cluster/prx_mariadbshardproxy.go similarity index 100% rename from cluster/prx_shardproxy.go rename to cluster/prx_mariadbshardproxy.go From ed5ee94d959d609543c274a80995f3fd6b21c9b4 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Sat, 13 Feb 2021 20:13:24 +0100 Subject: [PATCH 08/84] Simplify build-flags --- .vscode/settings.json | 2 +- Makefile | 24 ++++++++++++------------ build_linux_amd64.sh | 24 ++++++++++++------------ main.go | 31 +++++++++++++++---------------- 4 files changed, 40 insertions(+), 41 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index d780f5282..eacfd4dc8 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,6 +1,6 @@ { "go.buildTags": "netgo server", "go.buildFlags": [ - "--ldflags -extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.WithProvisioning=OFF -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" + "--ldflags -extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.WithProvisioning=OFF" ] } \ No newline at end of file diff --git a/Makefile b/Makefile index b78f14e87..06f1dcb96 100755 --- a/Makefile +++ b/Makefile @@ -20,40 +20,40 @@ bin: osc tst pro arm osc-cgo tar: osc-basedir tst-basedir pro-basedir arm-basedir osc-cgo-basedir osc: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC) + env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC) osc-basedir: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC)-basedir + env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC)-basedir osc-cgo: - env CGO_ENABLED=1 GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC-CGO) + env CGO_ENABLED=1 GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC-CGO) osc-cgo-basedir: - env CGO_ENABLED=1 GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC-CGO)-basedir + env CGO_ENABLED=1 GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC-CGO)-basedir tst: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=ON -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=OFF" $(LDFLAGS) -o $(BINDIR)/$(BIN-TST) + env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithDeprecate=OFF" $(LDFLAGS) -o $(BINDIR)/$(BIN-TST) tst-basedir: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=ON -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=OFF" $(LDFLAGS) -o $(BINDIR)/$(BIN-TST)-basedir + env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithDeprecate=OFF" $(LDFLAGS) -o $(BINDIR)/$(BIN-TST)-basedir pro: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "netcgo server" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" $(LDFLAGS) -o $(BINDIR)/$(BIN-PRO) + env GOOS=$(OS) GOARCH=amd64 go build -v --tags "netcgo server" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-PRO) pro-basedir: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" $(LDFLAGS) -o $(BINDIR)/$(BIN-PRO)-basedir + env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-PRO)-basedir arm: - env GOOS=$(OS) GOARCH=arm64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=arm64 -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" $(LDFLAGS) -o $(BINDIR)/$(BIN-ARM) + env GOOS=$(OS) GOARCH=arm64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=arm64 -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-ARM) arm-basedir: - env GOOS=$(OS) GOARCH=arm64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=arm64 -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" $(LDFLAGS) -o $(BINDIR)/$(BIN-ARM)-basedir + env GOOS=$(OS) GOARCH=arm64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=arm64 -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-ARM)-basedir cli: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "clients" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=OFF -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" $(LDFLAGS) -o $(BINDIR)/$(BIN-CLI) + env GOOS=$(OS) GOARCH=amd64 go build -v --tags "clients" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON -X main.WithArbitrationClient=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-CLI) arb: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "arbitrator" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=OFF -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" $(LDFLAGS) -o $(BINDIR)/$(BIN-ARB) + env GOOS=$(OS) GOARCH=amd64 go build -v --tags "arbitrator" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON -X main.WithArbitration=ON -X main.WithBackup=OFF" $(LDFLAGS) -o $(BINDIR)/$(BIN-ARB) package: all nobuild=0 ./package_$(OS)_amd64.sh diff --git a/build_linux_amd64.sh b/build_linux_amd64.sh index 0639c9115..8eedfbc78 100755 --- a/build_linux_amd64.sh +++ b/build_linux_amd64.sh @@ -9,34 +9,34 @@ BUILD=$(date +%FT%T%z) TAR="-X main.WithTarball=ON" BINARY=replication-manager-osc -env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" ${LDFLAGS} -o ${BINARY} +env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF " ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-osc-basedir -env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" ${LDFLAGS} -o ${BINARY} +env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF " ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-tst -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=ON -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-tst-basedir -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=ON -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-pro -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON " ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-pro-basedir -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON " ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-min -env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF -X main.WithOpenSVC=OFF -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithArbitration=OFF -X main.WithArbitrationClient=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithMail=ON -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithArbitrationClient=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-min-basedir -env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF -X main.WithOpenSVC=OFF -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithArbitration=OFF -X main.WithArbitrationClient=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithMail=ON -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithArbitrationClient=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-cli -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo clients" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=OFF -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo clients" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithArbitrationClient=OFF " ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-arb -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo arbitrator" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON" ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo arbitrator" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithArbitration=ON" ${LDFLAGS} -o ${BINARY} #BINARY=mrm-test -#env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithArbitration=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithMail=ON -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +#env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} #BINARY=mrm-cli -#env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo clients" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=ON -X main.WithOpenSVC=ON -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithArbitration=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithMail=ON -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +#env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo clients" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} diff --git a/main.go b/main.go index 61d70e605..1dec35299 100644 --- a/main.go +++ b/main.go @@ -34,25 +34,24 @@ var ( // Version is the semantic version number, e.g. 1.0.1 Version string // Provisoning to add flags for compile - WithProvisioning string - WithArbitration string - WithArbitrationClient string - WithProxysql string - WithHaproxy string - WithMaxscale string - WithMariadbshardproxy string - WithMonitoring string - WithMail string - WithHttp string + WithProvisioning string = "ON" + WithArbitration string = "OFF" + WithArbitrationClient string = "ON" + WithProxysql string = "ON" + WithHaproxy string = "ON" + WithMaxscale string = "ON" + WithMariadbshardproxy string = "ON" + WithMonitoring string = "ON" + WithMail string = "ON" + WithHttp string = "ON" WithSpider string - WithEnforce string - WithDeprecate string - WithOpenSVC string - WithMultiTiers string + WithEnforce string = "ON" + WithDeprecate string = "ON" + WithOpenSVC string = "OFF" WithTarball string WithMySQLRouter string - WithSphinx string - WithBackup string + WithSphinx string = "ON" + WithBackup string = "ON" // FullVersion is the semantic version number + git commit hash FullVersion string // Build is the build date of replication-manager From a51562c56c310a72c7bb58fdcfe12c895068604d Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Sat, 13 Feb 2021 20:20:19 +0100 Subject: [PATCH 09/84] Remove no longer used build tags --- .vscode/settings.json | 1 - Makefile | 18 +++++++++--------- build_linux_amd64.sh | 24 ++++++++++++------------ password.go | 2 -- 4 files changed, 21 insertions(+), 24 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index eacfd4dc8..1ca2e3f05 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,5 +1,4 @@ { - "go.buildTags": "netgo server", "go.buildFlags": [ "--ldflags -extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.WithProvisioning=OFF" ] diff --git a/Makefile b/Makefile index 06f1dcb96..73ad1377d 100755 --- a/Makefile +++ b/Makefile @@ -20,34 +20,34 @@ bin: osc tst pro arm osc-cgo tar: osc-basedir tst-basedir pro-basedir arm-basedir osc-cgo-basedir osc: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC) + env GOOS=$(OS) GOARCH=amd64 go build -v --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC) osc-basedir: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC)-basedir + env GOOS=$(OS) GOARCH=amd64 go build -v --ldflags "-extldflags '-static' -w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC)-basedir osc-cgo: - env CGO_ENABLED=1 GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC-CGO) + env CGO_ENABLED=1 GOOS=$(OS) GOARCH=amd64 go build -v --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC-CGO) osc-cgo-basedir: - env CGO_ENABLED=1 GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC-CGO)-basedir + env CGO_ENABLED=1 GOOS=$(OS) GOARCH=amd64 go build -v --ldflags "-extldflags '-static' -w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithProvisioning=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-OSC-CGO)-basedir tst: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithDeprecate=OFF" $(LDFLAGS) -o $(BINDIR)/$(BIN-TST) + env GOOS=$(OS) GOARCH=amd64 go build -v --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithDeprecate=OFF" $(LDFLAGS) -o $(BINDIR)/$(BIN-TST) tst-basedir: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithDeprecate=OFF" $(LDFLAGS) -o $(BINDIR)/$(BIN-TST)-basedir + env GOOS=$(OS) GOARCH=amd64 go build -v --ldflags "-w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithDeprecate=OFF" $(LDFLAGS) -o $(BINDIR)/$(BIN-TST)-basedir pro: env GOOS=$(OS) GOARCH=amd64 go build -v --tags "netcgo server" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-PRO) pro-basedir: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "server" --ldflags "-w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-PRO)-basedir + env GOOS=$(OS) GOARCH=amd64 go build -v --ldflags "-w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-PRO)-basedir arm: - env GOOS=$(OS) GOARCH=arm64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=arm64 -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-ARM) + env GOOS=$(OS) GOARCH=arm64 go build -v --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=arm64 -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-ARM) arm-basedir: - env GOOS=$(OS) GOARCH=arm64 go build -v --tags "server" --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=arm64 -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-ARM)-basedir + env GOOS=$(OS) GOARCH=arm64 go build -v --ldflags "-extldflags '-static' -w -s -X main.GoOS=$(OS) -X main.GoArch=arm64 -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-ARM)-basedir cli: env GOOS=$(OS) GOARCH=amd64 go build -v --tags "clients" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON -X main.WithArbitrationClient=OFF " $(LDFLAGS) -o $(BINDIR)/$(BIN-CLI) diff --git a/build_linux_amd64.sh b/build_linux_amd64.sh index 8eedfbc78..416517347 100755 --- a/build_linux_amd64.sh +++ b/build_linux_amd64.sh @@ -9,34 +9,34 @@ BUILD=$(date +%FT%T%z) TAR="-X main.WithTarball=ON" BINARY=replication-manager-osc -env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF " ${LDFLAGS} -o ${BINARY} +env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --ldflags "-extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF " ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-osc-basedir -env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF " ${LDFLAGS} -o ${BINARY} +env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --ldflags "-extldflags 'static' -w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF " ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-tst -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-tst-basedir -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --ldflags "-w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-pro -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON " ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON " ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-pro-basedir -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON " ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --ldflags "-w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON " ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-min -env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithArbitrationClient=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --ldflags "-extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithArbitrationClient=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-min-basedir -env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-extldflags 'static' -w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithArbitrationClient=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -v --ldflags "-extldflags 'static' -w -s $TAR -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithProvisioning=OFF -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithArbitrationClient=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-cli -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo clients" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithArbitrationClient=OFF " ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --tags "clients" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithArbitrationClient=OFF " ${LDFLAGS} -o ${BINARY} BINARY=replication-manager-arb -env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo arbitrator" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithArbitration=ON" ${LDFLAGS} -o ${BINARY} +env GOOS=linux GOARCH=amd64 go build -a -v --tags "arbitrator" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithArbitration=ON" ${LDFLAGS} -o ${BINARY} #BINARY=mrm-test -#env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo server" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +#env GOOS=linux GOARCH=amd64 go build -a -v --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} #BINARY=mrm-cli -#env GOOS=linux GOARCH=amd64 go build -a -v --tags "netgo clients" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} +#env GOOS=linux GOARCH=amd64 go build -a -v --tags "clients" --ldflags "-w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=${VERSION} -X main.FullVersion=${FULLVERSION} -X main.Build=${BUILD} -X main.WithOpenSVC=ON -X main.WithHaproxy=OFF -X main.WithMaxscale=OFF -X main.WithMariadbshardproxy=OFF -X main.WithProxysql=OFF -X main.WithMonitoring=OFF -X main.WithHttp=OFF -X main.WithEnforce=OFF -X main.WithDeprecate=OFF" ${LDFLAGS} -o ${BINARY} diff --git a/password.go b/password.go index 0363d64cc..fc634e366 100644 --- a/password.go +++ b/password.go @@ -1,5 +1,3 @@ -// +build server - // replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL // Copyright 2017 Signal 18 SARL // Authors: Guillaume Lefranc From 0190a9bee33880944256410d151e455a5cedf932 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Sat, 13 Feb 2021 20:49:34 +0100 Subject: [PATCH 10/84] Cleanup more build-flags and simplify the settings.json for vscode --- .vscode/settings.json | 3 --- Makefile | 2 +- main.go | 4 ++-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 1ca2e3f05..7a73a41bf 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,5 +1,2 @@ { - "go.buildFlags": [ - "--ldflags -extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.WithProvisioning=OFF" - ] } \ No newline at end of file diff --git a/Makefile b/Makefile index 73ad1377d..faca3ff90 100755 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ tst-basedir: env GOOS=$(OS) GOARCH=amd64 go build -v --ldflags "-w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithDeprecate=OFF" $(LDFLAGS) -o $(BINDIR)/$(BIN-TST)-basedir pro: - env GOOS=$(OS) GOARCH=amd64 go build -v --tags "netcgo server" --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-PRO) + env GOOS=$(OS) GOARCH=amd64 go build -v --ldflags "-w -s -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-PRO) pro-basedir: env GOOS=$(OS) GOARCH=amd64 go build -v --ldflags "-w -s $(TAR) -X main.GoOS=$(OS) -X main.GoArch=amd64 -X main.Version=$(VERSION) -X main.FullVersion=$(FULLVERSION) -X main.Build=$(BUILD) -X main.WithOpenSVC=ON " $(LDFLAGS) -o $(BINDIR)/$(BIN-PRO)-basedir diff --git a/main.go b/main.go index 1dec35299..8580313fa 100644 --- a/main.go +++ b/main.go @@ -56,8 +56,8 @@ var ( FullVersion string // Build is the build date of replication-manager Build string - GoOS string - GoArch string + GoOS string = "linux" + GoArch string = "amd64" conf config.Config cfgGroup string ) From 29a2fa625c3a27782123007b10fde54e6b6dc8c5 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Sun, 14 Feb 2021 11:37:31 +0100 Subject: [PATCH 11/84] Stop multi source on leader during failover Copy multi source to elected leader and reset source Found password from master host in other clusters --- cluster/cluster_fail.go | 135 +++++++++++++++++++++++++++++----------- cluster/cluster_get.go | 36 +++++++++++ cluster/srv.go | 68 +++++++++++++++++--- config/config.go | 1 + main.go | 2 +- 5 files changed, 198 insertions(+), 44 deletions(-) diff --git a/cluster/cluster_fail.go b/cluster/cluster_fail.go index e2b83c2e5..bd4291c82 100644 --- a/cluster/cluster_fail.go +++ b/cluster/cluster_fail.go @@ -24,7 +24,7 @@ import ( "github.com/signal18/replication-manager/utils/state" ) -// MasterFailover triggers a master switchover and returns the new master URL +// MasterFailover triggers a leader change and returns the new master URL when single possible leader func (cluster *Cluster) MasterFailover(fail bool) bool { if cluster.GetTopology() == topoMultiMasterRing || cluster.GetTopology() == topoMultiMasterWsrep { res := cluster.VMasterFailover(fail) @@ -148,15 +148,8 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { } } } - if cluster.Conf.FailEventScheduler { - cluster.LogPrintf(LvlInfo, "Disable Event Scheduler on old master") - logs, err := cluster.oldMaster.SetEventScheduler(false) - cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Could not disable event scheduler on old master") - } + cluster.oldMaster.freeze() - cluster.LogPrintf(LvlInfo, "Rejecting updates on %s (old master)", cluster.oldMaster.URL) - logs, err := dbhelper.FlushTablesWithReadLock(cluster.oldMaster.Conn, cluster.oldMaster.DBVersion) - cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Could not lock tables on %s (old master) %s", cluster.oldMaster.URL, err) } // Sync candidate depending on the master status. @@ -178,12 +171,12 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { dbhelper.MasterWaitGTID(cluster.master.Conn, cluster.oldMaster.GTIDBinlogPos.Sprint(), 30) } else {*/ // Failover - cluster.LogPrintf(LvlInfo, "Waiting for candidate master to apply relay log") + cluster.LogPrintf(LvlInfo, "Waiting for candidate master %s to apply relay log", cluster.master.URL) err = cluster.master.ReadAllRelayLogs() if err != nil { - cluster.LogPrintf(LvlErr, "Error while reading relay logs on candidate: %s", err) + cluster.LogPrintf(LvlErr, "Error while reading relay logs on candidate %s: %s", cluster.master, err) } - cluster.LogPrintf(LvlDbg, "Save replication status before electing") + cluster.LogPrintf(LvlDbg, "Save replication status before opening traffic") ms, err := cluster.master.GetSlaveStatus(cluster.master.ReplicationSourceName) if err != nil { cluster.LogPrintf(LvlErr, "Failover can not fetch replication info on new master: %s", err) @@ -326,7 +319,8 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { logs, err := dbhelper.UnlockTables(cluster.oldMaster.Conn) cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Could not unlock tables on old master %s", err) - cluster.oldMaster.StopSlave() // This is helpful in some cases the old master can have an old replication running + // Moved in freeze + //cluster.oldMaster.StopSlave() // This is helpful in some cases the old master can have an old replication running one_shoot_slave_pos := false if cluster.oldMaster.DBVersion.IsMariaDB() && cluster.oldMaster.HaveMariaDBGTID == false && cluster.oldMaster.DBVersion.Major >= 10 { logs, err := dbhelper.SetGTIDSlavePos(cluster.oldMaster.Conn, cluster.master.GTIDBinlogPos.Sprint()) @@ -419,6 +413,7 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { logs, err = cluster.oldMaster.StartSlave() cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Start slave failed on old master,%s reason: %s ", cluster.oldMaster.URL, err) } else { + // Is Maxscale // Don't start slave until the relay as been point to new master cluster.LogPrintf(LvlInfo, "Pointing old master to relay server") @@ -457,7 +452,6 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { } } cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Change master failed on old master %s", err) - if cluster.Conf.ReadOnly { logs, err = dbhelper.SetReadOnly(cluster.oldMaster.Conn, true) cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Could not set old master as read-only, %s", err) @@ -479,6 +473,10 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { cluster.slaves = append(cluster.slaves, cluster.oldMaster) } } + // End Old Alive Leader as new replica + + // Multi source on old leader case + cluster.FailoverExtraMultiSource(cluster.oldMaster, cluster.master, fail) // ******** // Phase 5: Switch slaves to new master @@ -655,6 +653,90 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { return true } +// FailoverExtraMultiSource care of master extra muti source replications +func (cluster *Cluster) FailoverExtraMultiSource(oldMaster *ServerMonitor, NewMaster *ServerMonitor, fail bool) error { + + for _, rep := range oldMaster.Replications { + + if rep.ConnectionName.String != cluster.Conf.MasterConn { + myparentrplpassword := "" + parentCluster := cluster.GetParentClusterFromReplicationSource(rep) + cluster.LogPrintf(LvlInfo, "Failover replication source %s ", rep.ConnectionName) + if parentCluster != nil { + myparentrplpassword = parentCluster.rplPass + } else { + cluster.LogPrintf(LvlErr, "Unable to found a monitored cluster for replication source %s ", rep.ConnectionName) + cluster.LogPrintf(LvlErr, "Moving source %s with empty password to preserve replication stream on new master", rep.ConnectionName) + } + // need a way to found parent replication password + if rep.UsingGtid.String == "No" { + + logs, err := dbhelper.ChangeMaster(NewMaster.Conn, dbhelper.ChangeMasterOpt{ + Host: rep.MasterHost.String, + Port: rep.MasterPort.String, + User: rep.MasterUser.String, + Password: myparentrplpassword, + Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), + Heartbeat: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime), + Mode: "POSITIONAL", + Logfile: rep.MasterLogFile.String, + Logpos: rep.ExecMasterLogPos.String, + SSL: cluster.Conf.ReplicationSSL, + Channel: rep.ConnectionName.String, + IsDelayed: false, + Delay: "0", + PostgressDB: NewMaster.PostgressDB, + }, NewMaster.DBVersion) + cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Change master failed on slave %s, %s", NewMaster.URL, err) + } else { + if rep.UsingGtid.String == strings.ToUpper("SLAVE_POS") || rep.UsingGtid.String == strings.ToUpper("CURRENT_POS") { + // We herite last GTID state from the old leader failed or not + logs, err := dbhelper.ChangeMaster(cluster.oldMaster.Conn, dbhelper.ChangeMasterOpt{ + Host: rep.MasterHost.String, + Port: rep.MasterPort.String, + User: rep.MasterUser.String, + Password: myparentrplpassword, + Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), + Heartbeat: strconv.Itoa(int(rep.SlaveHeartbeatPeriod)), + Mode: strings.ToUpper(rep.UsingGtid.String), + SSL: cluster.Conf.ReplicationSSL, + Channel: rep.ConnectionName.String, + IsDelayed: false, + Delay: "0", + PostgressDB: NewMaster.PostgressDB, + }, NewMaster.DBVersion) + cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Change master failed on slave %s, %s", NewMaster.URL, err) + + } else if rep.RetrievedGtidSet.Valid && rep.ExecutedGtidSet.String != "" { + logs, err := dbhelper.ChangeMaster(NewMaster.Conn, dbhelper.ChangeMasterOpt{ + Host: rep.MasterHost.String, + Port: rep.MasterPort.String, + User: rep.MasterUser.String, + Password: myparentrplpassword, + Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), + Heartbeat: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime), + Mode: "MASTER_AUTO_POSITION", + SSL: cluster.Conf.ReplicationSSL, + Channel: rep.ConnectionName.String, + IsDelayed: false, + Delay: "0", + PostgressDB: NewMaster.PostgressDB, + }, NewMaster.DBVersion) + cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Change master failed on slave %s, %s", NewMaster.URL, err) + + } + } + logs, err := dbhelper.StartSlave(NewMaster.Conn, rep.ConnectionName.String, NewMaster.DBVersion) + cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Start replication source %s failed on %s, %s", rep.ConnectionName, NewMaster.URL, err) + if fail == false { + logs, err := dbhelper.ResetSlave(oldMaster.Conn, true, rep.ConnectionName.String, oldMaster.DBVersion) + cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Reset replication source %s failed on %s, %s", rep.ConnectionName, oldMaster.URL, err) + } + } + } + return nil +} + // Returns a candidate from a list of slaves. If there's only one slave it will be the de facto candidate. func (cluster *Cluster) electSwitchoverCandidate(l []*ServerMonitor, forcingLog bool) int { ll := len(l) @@ -772,8 +854,9 @@ func (cluster *Cluster) electSwitchoverCandidate(l []*ServerMonitor, forcingLog return -1 } +// electFailoverCandidate ound the most up to date and look after a possibility to failover on it func (cluster *Cluster) electFailoverCandidate(l []*ServerMonitor, forcingLog bool) int { - //Found the most uptodate and look after a possibility to failover on it + ll := len(l) seqList := make([]uint64, ll) posList := make([]uint64, ll) @@ -1025,6 +1108,7 @@ func (cluster *Cluster) foundPreferedMaster(l []*ServerMonitor) *ServerMonitor { return nil } +// VMasterFailover triggers a leader change and returns the new master URL when all possible leader multimaster ring or galera func (cluster *Cluster) VMasterFailover(fail bool) bool { cluster.sme.SetFailoverState() @@ -1286,27 +1370,6 @@ func (cluster *Cluster) electVirtualCandidate(oldMaster *ServerMonitor, forcingL return -1 } -func (cluster *Cluster) GetRingChildServer(oldMaster *ServerMonitor) *ServerMonitor { - for _, s := range cluster.Servers { - if s.ServerID != cluster.oldMaster.ServerID { - //cluster.LogPrintf(LvlDbg, "test %s failed %s", s.URL, cluster.oldMaster.URL) - master, err := cluster.GetMasterFromReplication(s) - if err == nil && master.ServerID == oldMaster.ServerID { - return s - } - } - } - return nil -} - -func (cluster *Cluster) GetRingParentServer(oldMaster *ServerMonitor) *ServerMonitor { - ss, err := cluster.oldMaster.GetSlaveStatusLastSeen(cluster.oldMaster.ReplicationSourceName) - if err != nil { - return nil - } - return cluster.GetServerFromURL(ss.MasterHost.String + ":" + ss.MasterPort.String) -} - func (cluster *Cluster) CloseRing(oldMaster *ServerMonitor) error { cluster.LogPrintf(LvlInfo, "Closing ring around %s", cluster.oldMaster.URL) child := cluster.GetRingChildServer(cluster.oldMaster) diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index 24d34bad1..9bcd0ba9c 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -18,6 +18,7 @@ import ( "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/utils/cron" + "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" ) @@ -531,6 +532,41 @@ func (cluster *Cluster) GetChildClusters() map[string]*Cluster { return clusters } +func (cluster *Cluster) GetParentClusterFromReplicationSource(rep dbhelper.SlaveStatus) *Cluster { + + for _, c := range cluster.clusterList { + if cluster.Name != c.Name { + for _, srv := range c.Servers { + if srv.Host == rep.MasterHost.String && srv.Port == rep.MasterPort.String { + return c + } + } + } + } + return nil +} + +func (cluster *Cluster) GetRingChildServer(oldMaster *ServerMonitor) *ServerMonitor { + for _, s := range cluster.Servers { + if s.ServerID != cluster.oldMaster.ServerID { + //cluster.LogPrintf(LvlDbg, "test %s failed %s", s.URL, cluster.oldMaster.URL) + master, err := cluster.GetMasterFromReplication(s) + if err == nil && master.ServerID == oldMaster.ServerID { + return s + } + } + } + return nil +} + +func (cluster *Cluster) GetRingParentServer(oldMaster *ServerMonitor) *ServerMonitor { + ss, err := cluster.oldMaster.GetSlaveStatusLastSeen(cluster.oldMaster.ReplicationSourceName) + if err != nil { + return nil + } + return cluster.GetServerFromURL(ss.MasterHost.String + ":" + ss.MasterPort.String) +} + func (cluster *Cluster) GetClusterFromName(name string) (*Cluster, error) { for _, c := range cluster.clusterList { diff --git a/cluster/srv.go b/cluster/srv.go index e6456b6dc..4db560633 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -842,34 +842,52 @@ func (server *ServerMonitor) Refresh() error { return nil } -/* Handles write freeze and existing transactions on a server */ +/* Handles write freeze and shoot existing transactions on a server */ func (server *ServerMonitor) freeze() bool { - logs, err := dbhelper.SetReadOnly(server.Conn, true) + if server.ClusterGroup.Conf.FailEventScheduler { + server.ClusterGroup.LogPrintf(LvlInfo, "Freezing writes from Event Scheduler on %s", server.URL) + logs, err := server.SetEventScheduler(false) + server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlErr, "Could not disable event scheduler on %s", server.URL) + } + + server.ClusterGroup.LogPrintf(LvlInfo, "Freezing writes stopping all slaves on %s", server.URL) + logs, err := server.StopAllSlaves() + server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlErr, "Could not stop replicas source on ", server.URL) + + server.ClusterGroup.LogPrintf(LvlInfo, "Freezing writes set read only on %s", server.URL) + logs, err = dbhelper.SetReadOnly(server.Conn, true) server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlInfo, "Could not set %s as read-only: %s", server.URL, err) if err != nil { return false } for i := server.ClusterGroup.Conf.SwitchWaitKill; i > 0; i -= 500 { threads, logs, err := dbhelper.CheckLongRunningWrites(server.Conn, 0) - server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlErr, "Could not check long running Writes %s as read-only: %s", server.URL, err) + server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlErr, "Could not check long running writes %s as read-only: %s", server.URL, err) if threads == 0 { break } - server.ClusterGroup.LogPrintf(LvlInfo, "Waiting for %d write threads to complete on %s", threads, server.URL) + server.ClusterGroup.LogPrintf(LvlInfo, "Freezing writes Waiting for %d write threads to complete %s", threads, server.URL) time.Sleep(500 * time.Millisecond) } + server.ClusterGroup.LogPrintf(LvlInfo, "Freezing writes saving max_connections on %s ", server.URL) + server.maxConn, logs, err = dbhelper.GetVariableByName(server.Conn, "MAX_CONNECTIONS", server.DBVersion) - server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlErr, "Could not get max_connections value on demoted leader") + server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlErr, "Could not save max_connections value on %s", server.URL) if err != nil { } else { if server.ClusterGroup.Conf.SwitchDecreaseMaxConn { + server.ClusterGroup.LogPrintf(LvlInfo, "Freezing writes decreasing max_connections to 1 on %s ", server.URL) logs, err := dbhelper.SetMaxConnections(server.Conn, strconv.FormatInt(server.ClusterGroup.Conf.SwitchDecreaseMaxConnValue, 10), server.DBVersion) - server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlErr, "Could not set max_connections to 1 on demoted leader %s %s", server.URL, err) + server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlErr, "Could not set max_connections to 1 on %s %s", server.URL, err) } } - server.ClusterGroup.LogPrintf("INFO", "Terminating all threads on %s", server.URL) + server.ClusterGroup.LogPrintf("INFO", "Freezing writes killing all other remaining threads on %s", server.URL) dbhelper.KillThreads(server.Conn, server.DBVersion) + server.ClusterGroup.LogPrintf(LvlInfo, "Freezing writes rejecting writes via FTWRL on %s ", server.URL) + logs, err = dbhelper.FlushTablesWithReadLock(server.Conn, server.DBVersion) + server.ClusterGroup.LogSQL(logs, err, server.URL, "MasterFailover", LvlErr, "Could not lock tables on %s : %s", server.URL, err) + return true } @@ -972,6 +990,42 @@ func (server *ServerMonitor) StopSlave() (string, error) { return dbhelper.StopSlave(server.Conn, server.ClusterGroup.Conf.MasterConn, server.DBVersion) } +func (server *ServerMonitor) StopAllSlaves() (string, error) { + if server.Conn == nil { + return "", errors.New("No database connection pool") + } + sql := "" + var lasterror error + for _, rep := range server.Replications { + res, errslave := dbhelper.StopSlave(server.Conn, rep.ConnectionName.String, server.DBVersion) + sql += res + if errslave != nil { + lasterror = errslave + } + } + + return sql, lasterror +} + +func (server *ServerMonitor) StopAllExtraSourceSlaves() (string, error) { + if server.Conn == nil { + return "", errors.New("No database connection pool") + } + sql := "" + var lasterror error + for _, rep := range server.Replications { + if rep.ConnectionName.String != server.ClusterGroup.Conf.MasterConn { + res, errslave := dbhelper.StopSlave(server.Conn, rep.ConnectionName.String, server.DBVersion) + sql += res + if errslave != nil { + lasterror = errslave + } + } + } + + return sql, lasterror +} + func (server *ServerMonitor) StartSlave() (string, error) { if server.Conn == nil { return "", errors.New("No databse connection") diff --git a/config/config.go b/config/config.go index 0a5ab6acf..f90e223da 100644 --- a/config/config.go +++ b/config/config.go @@ -99,6 +99,7 @@ type Config struct { PRXServersBackendMaxReplicationLag int `mapstructure:"proxy-servers-backend-max-replication-lag" toml:"proxy-servers-backend--max-replication-lag" json:"proxyServersBackendMaxReplicationLag"` PRXServersBackendMaxConnections int `mapstructure:"proxy-servers-backend-max-connections" toml:"proxy-servers-backend--max-connections" json:"proxyServersBackendMaxConnections"` ClusterHead string `mapstructure:"cluster-head" toml:"cluster-head" json:"clusterHead"` + ReplicationMultisourceHeadClusters string `mapstructure:"replication-multisource-head-clusters" toml:"replication-multisource-head-clusters" json:"replicationMultisourceHeadClusters"` MasterConnectRetry int `mapstructure:"replication-master-connect-retry" toml:"replication-master-connect-retry" json:"replicationMasterConnectRetry"` RplUser string `mapstructure:"replication-credential" toml:"replication-credential" json:"replicationCredential"` ReplicationErrorScript string `mapstructure:"replication-error-script" toml:"replication-error-script" json:"replicationErrorScript"` diff --git a/main.go b/main.go index 8a81c8e26..caa4edf03 100644 --- a/main.go +++ b/main.go @@ -205,7 +205,7 @@ func init() { monitorCmd.Flags().Int64Var(&conf.SwitchDecreaseMaxConnValue, "switchover-decrease-max-conn-value", 10, "Switchover decrease max connection to this value different according to flavor") monitorCmd.Flags().IntVar(&conf.SwitchSlaveWaitRouteChange, "switchover-wait-route-change", 2, "Switchover wait for unmanged proxy monitor to dicoverd new state") monitorCmd.Flags().StringVar(&conf.MasterConn, "replication-source-name", "", "Replication channel name to use for multisource") - + monitorCmd.Flags().StringVar(&conf.ReplicationMultisourceHeadClusters, "replication-multisource-head-clusters", "", "Multi source link to parent cluster, autodiscoverd but can be materialized for bootstraping replication") monitorCmd.Flags().StringVar(&conf.HostsDelayed, "replication-delayed-hosts", "", "Database hosts list that need delayed replication separated by commas") monitorCmd.Flags().IntVar(&conf.HostsDelayedTime, "replication-delayed-time", 3600, "Delayed replication time") From 950d2c408f4b2cc9abb5b49b6fad7a40d98fccd4 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Thu, 18 Feb 2021 14:54:15 +0100 Subject: [PATCH 12/84] More fixing on head cluster switchover --- cluster/cluster_del.go | 7 + cluster/cluster_fail.go | 16 +- cluster/cluster_get.go | 22 +++ cluster/cluster_log.go | 2 +- cluster/cluster_set.go | 1 + cluster/cluster_topo.go | 30 ++-- cluster/prx_haproxy.go | 2 +- etc/local/config.toml.osx | 4 +- etc/local/multisource/config.toml | 19 +-- .../opensvc/moduleset_mariadb.svc.mrm.db.json | 152 +++++++++++++++++- 10 files changed, 213 insertions(+), 42 deletions(-) diff --git a/cluster/cluster_del.go b/cluster/cluster_del.go index 38e247040..9ea352658 100644 --- a/cluster/cluster_del.go +++ b/cluster/cluster_del.go @@ -8,6 +8,13 @@ package cluster import "strings" +func (cluster *Cluster) RemoveServerFromIndex(index int) { + newServers := make([]*ServerMonitor, 0) + newServers = append(newServers, cluster.Servers[:index]...) + newServers = append(newServers, cluster.Servers[index+1:]...) + cluster.Servers = newServers +} + func (cluster *Cluster) CancelRollingRestart() error { cluster.LogPrintf(LvlInfo, "API receive cancel rolling restart") for _, pr := range cluster.Proxies { diff --git a/cluster/cluster_fail.go b/cluster/cluster_fail.go index bd4291c82..5a97c03a5 100644 --- a/cluster/cluster_fail.go +++ b/cluster/cluster_fail.go @@ -661,15 +661,15 @@ func (cluster *Cluster) FailoverExtraMultiSource(oldMaster *ServerMonitor, NewMa if rep.ConnectionName.String != cluster.Conf.MasterConn { myparentrplpassword := "" parentCluster := cluster.GetParentClusterFromReplicationSource(rep) - cluster.LogPrintf(LvlInfo, "Failover replication source %s ", rep.ConnectionName) + cluster.LogPrintf(LvlInfo, "Failover replication source %s ", rep.ConnectionName.String) if parentCluster != nil { myparentrplpassword = parentCluster.rplPass } else { - cluster.LogPrintf(LvlErr, "Unable to found a monitored cluster for replication source %s ", rep.ConnectionName) - cluster.LogPrintf(LvlErr, "Moving source %s with empty password to preserve replication stream on new master", rep.ConnectionName) + cluster.LogPrintf(LvlErr, "Unable to found a monitored cluster for replication source %s ", rep.ConnectionName.String) + cluster.LogPrintf(LvlErr, "Moving source %s with empty password to preserve replication stream on new master", rep.ConnectionName.String) } // need a way to found parent replication password - if rep.UsingGtid.String == "No" { + if strings.ToUpper(rep.UsingGtid.String) == "NO" { logs, err := dbhelper.ChangeMaster(NewMaster.Conn, dbhelper.ChangeMasterOpt{ Host: rep.MasterHost.String, @@ -689,9 +689,9 @@ func (cluster *Cluster) FailoverExtraMultiSource(oldMaster *ServerMonitor, NewMa }, NewMaster.DBVersion) cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Change master failed on slave %s, %s", NewMaster.URL, err) } else { - if rep.UsingGtid.String == strings.ToUpper("SLAVE_POS") || rep.UsingGtid.String == strings.ToUpper("CURRENT_POS") { + if strings.ToUpper(rep.UsingGtid.String) == "SLAVE_POS" || strings.ToUpper(rep.UsingGtid.String) == "CURRENT_POS" { // We herite last GTID state from the old leader failed or not - logs, err := dbhelper.ChangeMaster(cluster.oldMaster.Conn, dbhelper.ChangeMasterOpt{ + logs, err := dbhelper.ChangeMaster(NewMaster.Conn, dbhelper.ChangeMasterOpt{ Host: rep.MasterHost.String, Port: rep.MasterPort.String, User: rep.MasterUser.String, @@ -727,10 +727,10 @@ func (cluster *Cluster) FailoverExtraMultiSource(oldMaster *ServerMonitor, NewMa } } logs, err := dbhelper.StartSlave(NewMaster.Conn, rep.ConnectionName.String, NewMaster.DBVersion) - cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Start replication source %s failed on %s, %s", rep.ConnectionName, NewMaster.URL, err) + cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Start replication source %s failed on %s, %s", rep.ConnectionName.String, NewMaster.URL, err) if fail == false { logs, err := dbhelper.ResetSlave(oldMaster.Conn, true, rep.ConnectionName.String, oldMaster.DBVersion) - cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Reset replication source %s failed on %s, %s", rep.ConnectionName, oldMaster.URL, err) + cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Reset replication source %s failed on %s, %s", rep.ConnectionName.String, oldMaster.URL, err) } } } diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index 9bcd0ba9c..e66b185d1 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -491,6 +491,17 @@ func (cluster *Cluster) GetCron() []cron.Entry { } +func (cluster *Cluster) GetServerIndice(srv *ServerMonitor) int { + for i, sv := range cluster.Servers { + // cluster.LogPrintf(LvlInfo, "HasServer:%s %s, %s %s", sv.Id, srv.Id, sv.URL, srv.URL) + // id can not be used for checking equality because same srv in different clusters + if sv.URL == srv.URL { + return i + } + } + return 0 +} + func (cluster *Cluster) getClusterByName(clname string) *Cluster { for _, c := range cluster.clusterList { @@ -528,6 +539,17 @@ func (cluster *Cluster) GetChildClusters() map[string]*Cluster { if cluster.Name == c.Conf.ClusterHead { clusters[c.Name] = c } + // lopp over master multi source replication + condidateclustermaster := c.GetMaster() + if condidateclustermaster != nil && c.Name != cluster.Name { + for _, rep := range condidateclustermaster.Replications { + // is a source name has my cluster name or is any child cluster master point to my master + if rep.ConnectionName.String == cluster.Name || (cluster.GetMaster() != nil && cluster.master.Host == rep.MasterHost.String && cluster.master.Port == rep.MasterPort.String) { + cluster.LogPrintf(LvlDbg, "Discovering of a child cluster %s replication source %s", c.Name, rep.ConnectionName.String) + clusters[c.Name] = c + } + } + } } return clusters } diff --git a/cluster/cluster_log.go b/cluster/cluster_log.go index 0b306a9b5..5015a6463 100644 --- a/cluster/cluster_log.go +++ b/cluster/cluster_log.go @@ -148,7 +148,7 @@ func (cluster *Cluster) LogPrintf(level string, format string, args ...interface // f := fmt.Sprintln(stamp, format) // io.WriteString(cluster.logPtr, fmt.Sprintf(f, args...)) - log.WithField("cluster", cluster.Name).Debugf(cliformat, args...) + // log.WithField("cluster", cluster.Name).Debugf(cliformat, args...) } if cluster.tlog != nil && cluster.tlog.Len > 0 { cluster.tlog.Add(fmt.Sprintf(format, args...)) diff --git a/cluster/cluster_set.go b/cluster/cluster_set.go index 8760bc3f6..ff26df80a 100644 --- a/cluster/cluster_set.go +++ b/cluster/cluster_set.go @@ -686,6 +686,7 @@ func (cl *Cluster) SetArbitratorReport() error { return nil } +// SetClusterHead for MariaDB spider we can arbtitraty shard tables to child clusters func (cluster *Cluster) SetClusterHead(ClusterName string) { cluster.Conf.ClusterHead = ClusterName } diff --git a/cluster/cluster_topo.go b/cluster/cluster_topo.go index 9f5b3dad5..80b170b5f 100644 --- a/cluster/cluster_topo.go +++ b/cluster/cluster_topo.go @@ -73,19 +73,27 @@ func (cluster *Cluster) AddChildServers() error { mychilds := cluster.GetChildClusters() for _, c := range mychilds { for _, sv := range c.Servers { - + cluster.LogPrintf(LvlDbg, "AddChildServers chacking %s of %s ", sv.URL, c.Name) if sv.IsSlaveOfReplicationSource(cluster.Conf.MasterConn) { - mymaster, _ := cluster.GetMasterFromReplication(sv) - if mymaster != nil { - // cluster.slaves = append(cluster.slaves, sv) - if !cluster.HasServer(sv) { - srv, err := cluster.newServerMonitor(sv.Name+":"+sv.Port, sv.ClusterGroup.dbUser, sv.ClusterGroup.dbPass, false, c.GetDomain()) - if err != nil { - return err - } - srv.Ignored = true - cluster.Servers = append(cluster.Servers, srv) + cluster.LogPrintf(LvlDbg, "AddChildServers %s IsSlaveOfReplicationSource %s ", sv.URL, cluster.Conf.MasterConn) + // mymaster, _ := cluster.GetMasterFromReplication(sv) + // if mymaster != nil { + cluster.LogPrintf(LvlDbg, "AddChildServers %s master found %s ", sv.URL, cluster.Conf.MasterConn) + + if !cluster.HasServer(sv) { + cluster.LogPrintf(LvlDbg, "AddChildServers %s Has server already found %s ", sv.URL, cluster.Conf.MasterConn) + + srv, err := cluster.newServerMonitor(sv.Name+":"+sv.Port, sv.ClusterGroup.dbUser, sv.ClusterGroup.dbPass, false, c.GetDomain()) + if err != nil { + return err } + srv.Ignored = true + cluster.Servers = append(cluster.Servers, srv) + // } + } + } else { + if cluster.HasServer(sv) { + cluster.RemoveServerFromIndex(cluster.GetServerIndice(sv)) } } } diff --git a/cluster/prx_haproxy.go b/cluster/prx_haproxy.go index 898a98308..4631e4e66 100644 --- a/cluster/prx_haproxy.go +++ b/cluster/prx_haproxy.go @@ -254,7 +254,7 @@ func (cluster *Cluster) refreshHaproxy(proxy *Proxy) error { cluster.LogPrintf(LvlInfo, "Detecting broken resplication and UP state in haproxy %s drain server %s", proxy.Host+":"+proxy.Port, srv.URL) haRuntime.SetDrain(srv.Id, cluster.Conf.HaproxyAPIReadBackend) } - if (srv.State == stateSlave || srv.State == stateRelay) && line[17] == "DRAIN" { + if (srv.State == stateSlave || srv.State == stateRelay) && line[17] == "DRAIN" && !srv.IsIgnored() { cluster.LogPrintf(LvlInfo, "Detecting valid resplication and DRAIN state in haproxy %s enable traffic on server %s", proxy.Host+":"+proxy.Port, srv.URL) haRuntime.SetReady(srv.Id, cluster.Conf.HaproxyAPIReadBackend) } diff --git a/etc/local/config.toml.osx b/etc/local/config.toml.osx index 56695e4f0..3719fb739 100644 --- a/etc/local/config.toml.osx +++ b/etc/local/config.toml.osx @@ -33,14 +33,14 @@ backup-mysqldump-path = "/usr/local/bin/mysqldump" #include="etc/local/features/scheduler" #include="etc/local/masterslave" #include="etc/local/masterslave/consul" -include="etc/local/masterslave/haproxy" +##include="etc/local/masterslave/haproxy" #include="etc/local/masterslave/maxscale" #include="etc/local/masterslave/myproxy" #include="etc/local/masterslave/shardproxy" #include="etc/local/masterslaveslave" #include="etc/local/multimaster" #include="etc/local/multimasterring" -#include="etc/local/multisource" +include="etc/local/multisource" #include="etc/local/multitiersalve" #include="etc/local/mutlitiercluster" #include="etc/local/postgres" diff --git a/etc/local/multisource/config.toml b/etc/local/multisource/config.toml index 6a3f2843c..d1e467fec 100644 --- a/etc/local/multisource/config.toml +++ b/etc/local/multisource/config.toml @@ -7,9 +7,8 @@ [Cluster_Haproxy_A] title = "ClusterTestMasterSlave" -db-servers-hosts = "127.0.0.1:3310,127.0.0.1:3311,127.0.0.1:3312" +db-servers-hosts = "127.0.0.1:3310,127.0.0.1:3311" db-servers-prefered-master = "127.0.0.1:3310" -db-servers-ignored-hosts = "127.0.0.1:3312" db-servers-credential = "root:test" db-servers-connect-timeout = 1 @@ -19,15 +18,15 @@ replication-source-name = "cluster_a" haproxy = true haproxy-write-port = 3303 haproxy-read-port = 3302 - +haproxy-api-port = 1999 +haproxy-stat-port = 1998 [Cluster_Haproxy_B] title = "ClusterTestMasterSlave" -db-servers-hosts = "127.0.0.1:3313,127.0.0.1:3314,127.0.0.1:3312" +db-servers-hosts = "127.0.0.1:3313,127.0.0.1:3314" db-servers-prefered-master = "127.0.0.1:3313" -db-servers-ignored-hosts = "127.0.0.1:3312" db-servers-credential = "root:test" db-servers-connect-timeout = 1 @@ -37,11 +36,5 @@ replication-source-name = "cluster_b" haproxy = true haproxy-write-port = 3304 haproxy-read-port = 3305 - -[Default] -prov-orchestrator = "local" -db-servers-binary-path= "/usr/local/mysql/bin/" -haproxy-binary-path= "/usr/local/bin/haproxy" -verbose = true -http-server = true -http-bind-address = "0.0.0.0" +haproxy-api-port = 1979 +haproxy-stat-port = 1978 diff --git a/share/opensvc/moduleset_mariadb.svc.mrm.db.json b/share/opensvc/moduleset_mariadb.svc.mrm.db.json index 94fbd9a32..acf5110d5 100644 --- a/share/opensvc/moduleset_mariadb.svc.mrm.db.json +++ b/share/opensvc/moduleset_mariadb.svc.mrm.db.json @@ -2144,6 +2144,63 @@ } ], "fset_name": "mariadb.engine.archive" + }, + { + "fset_stats": false, + "id": 402, + "filters": [ + { + "filter": { + "f_op": "=", + "f_field": "tag_name", + "f_value": "deb", + "f_table": "v_tags", + "id": 359 + }, + "f_order": 0, + "f_log_op": "AND", + "filterset": null + } + ], + "fset_name": "system.deb" + }, + { + "fset_stats": false, + "id": 403, + "filters": [ + { + "filter": { + "f_op": "=", + "f_field": "tag_name", + "f_value": "rpm", + "f_table": "v_tags", + "id": 360 + }, + "f_order": 0, + "f_log_op": "AND", + "filterset": null + } + ], + "fset_name": "system.rpm" + }, + { + "fset_stats": false, + "id": 404, + "filters": [ + { + "filter": { + "f_op": "=", + "f_field": "tag_name", + "f_value": "trace", + "f_table": "v_tags", + "id": 361 + }, + "f_order": 0, + "f_log_op": "AND", + "filterset": null + } + ], + "fset_name": "mariadb.optimizer.trace" } ], "rulesets": [ @@ -2156,8 +2213,8 @@ "rulesets": [ "mariadb.svc.env", "mariadb.svc.mrm.db.cnf.docker", - "mariadb.svc.mrm.db.cnf.package", - "mariadb.svc.mrm.db.cnf.generic" + "mariadb.svc.mrm.db.cnf.generic", + "mariadb.svc.mrm.db.cnf.package" ], "publications": [ "replication-manager" @@ -2475,7 +2532,9 @@ "ruleset_public": false, "ruleset_type": "contextual", "rulesets": [ - "mariadb.svc.mrm.db.cnf.package.ssl" + "mariadb.svc.mrm.db.cnf.package.ssl", + "mariadb.svc.mrm.db.cnf.package.deb", + "mariadb.svc.mrm.db.cnf.package.rpm" ], "publications": [ "replication-manager" @@ -2548,8 +2607,8 @@ { "var_author": "admin Manager", "var_class": "file", - "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/default_optimizer.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_command: SET GLOBAL max_prepared_stmt_count=200000;SET GLOBAL optimizer_search_depth = 0;SET GLOBAL join_cache_level = 6;SET GLOBAL optimizer_switch ='extended_keys=off';SET GLOBAL optimizer_switch ='index_condition_pushdown=on';SET GLOBAL optimizer_switch ='engine_condition_pushdown=on';SET GLOBAL optimizer_switch ='derived_merge=on';SET GLOBAL optimizer_switch ='derived_with_keys=on';SET GLOBAL optimizer_switch ='firstmatch=off';SET GLOBAL optimizer_switch ='loosescan=off';SET GLOBAL optimizer_switch ='materialization=on';SET GLOBAL optimizer_switch ='in_to_exists=on';SET GLOBAL optimizer_switch ='semijoin=on';SET GLOBAL optimizer_switch ='partial_match_rowid_merge=on';SET GLOBAL optimizer_switch ='partial_match_table_scan=on';SET GLOBAL optimizer_switch ='subquery_cache=off';SET GLOBAL optimizer_switch ='mrr=on';SET GLOBAL optimizer_switch ='mrr_cost_based=off';SET GLOBAL optimizer_switch ='mrr_sort_keys=on';SET GLOBAL optimizer_switch ='outer_join_with_cache=on';SET GLOBAL optimizer_switch ='semijoin_with_cache=off';SET GLOBAL optimizer_switch ='join_cache_incremental=on';SET GLOBAL optimizer_switch ='join_cache_hashed=on';SET GLOBAL optimizer_switch ='join_cache_bka=on';SET GLOBAL optimizer_switch ='optimize_join_buffer_size=on';SET GLOBAL optimizer_switch ='table_elimination=on';SET GLOBAL eq_range_index_dive_limit=10;SET GLOBAL in_predicate_conversion_threshold=1000;SET GLOBAL optimizer_switch='orderby_uses_equalities=on';SET GLOBAL max_prepared_stmt_count=200000;SET GLOBAL eq_range_index_dive_limit=10;SET GLOBAL in_predicate_conversion_threshold=1000;\\n\\n# mysql_command: SET GLOBAL max_prepared_stmt_count=200000;SET GLOBAL optimizer_search_depth = 0;SET GLOBAL join_cache_level = 6;SET GLOBAL optimizer_switch ='extended_keys=off';SET GLOBAL optimizer_switch ='index_condition_pushdown=on';SET GLOBAL optimizer_switch ='engine_condition_pushdown=on';SET GLOBAL optimizer_switch ='derived_merge=on';SET GLOBAL optimizer_switch ='derived_with_keys=on';SET GLOBAL optimizer_switch ='firstmatch=off';SET GLOBAL optimizer_switch ='loosescan=off';SET GLOBAL optimizer_switch ='materialization=on';SET GLOBAL optimizer_switch ='in_to_exists=on';SET GLOBAL optimizer_switch ='semijoin=on';SET GLOBAL optimizer_switch ='partial_match_rowid_merge=on';SET GLOBAL optimizer_switch ='partial_match_table_scan=on';SET GLOBAL optimizer_switch ='subquery_cache=off';SET GLOBAL optimizer_switch ='mrr=on';SET GLOBAL optimizer_switch ='mrr_cost_based=off';SET GLOBAL optimizer_switch ='mrr_sort_keys=on';SET GLOBAL optimizer_switch ='outer_join_with_cache=on';SET GLOBAL optimizer_switch ='semijoin_with_cache=off';SET GLOBAL optimizer_switch ='join_cache_incremental=on';SET GLOBAL optimizer_switch ='join_cache_hashed=on';SET GLOBAL optimizer_switch ='join_cache_bka=on';SET GLOBAL optimizer_switch ='optimize_join_buffer_size=on';SET GLOBAL optimizer_switch ='table_elimination=on';SET GLOBAL eq_range_index_dive_limit=10;SET GLOBAL in_predicate_conversion_threshold=1000;SET GLOBAL optimizer_switch='orderby_uses_equalities=on';SET GLOBAL optimizer_switch='use_index_extensions=off'; SET GLOBAL optimizer_switch='duplicateweedout=on'; \\n\\n[mysqld]\\nloose_optimizer_switch='use_index_extensions=off'\\nloose_optimizer_switch ='index_condition_pushdown=on'\\nloose_optimizer_switch ='engine_condition_pushdown=on'\\nloose_optimizer_switch ='derived_merge=on'\\nloose_optimizer_switch ='mrr=on'\\nloose_optimizer_switch ='mrr_cost_based=off'\\nloose_optimizer_switch ='firstmatch=off'\\nloose_optimizer_switch ='loosescan=off'\\nloose_optimizer_switch ='semijoin=on'\\nloose_optimizer_switch ='duplicateweedout=on'\\nloose_optimizer_switch ='materialization=on'\\n\\n[mariadb]\\nloose_optimizer_search_depth = 0\\nloose_join_cache_level = 6\\nloose_optimizer_switch ='extended_keys=off'\\nloose_optimizer_switch ='derived_with_keys=on'\\nloose_optimizer_switch ='in_to_exists=on'\\nloose_optimizer_switch ='partial_match_rowid_merge=on'\\nloose_optimizer_switch ='partial_match_table_scan=on'\\nloose_optimizer_switch ='subquery_cache=off'\\nloose_optimizer_switch ='mrr_sort_keys=on'\\nloose_optimizer_switch ='outer_join_with_cache=on'\\nloose_optimizer_switch ='semijoin_with_cache=off'\\nloose_optimizer_switch ='join_cache_incremental=on'\\nloose_optimizer_switch ='join_cache_hashed=on'\\nloose_optimizer_switch ='join_cache_bka=on'\\nloose_optimizer_switch ='optimize_join_buffer_size=on'\\nloose_optimizer_switch ='table_elimination=on'\\n#10.1.16\\nloose_optimizer_switch='orderby_uses_equalities=on'\\n#10.3.18, 10.4.8\\nloose_optimizer_switch ='in_predicate_conversion=on'\\nloose_eq_range_index_dive_limit=10\\nloose_in_predicate_conversion_threshold=1000\\n#10.4\\nloose_max_prepared_stmt_count=200000\\n\"}", - "var_updated": "2021-01-15 16:10:13", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/default_optimizer.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_command: SET GLOBAL max_prepared_stmt_count=200000;SET GLOBAL optimizer_search_depth = 0;SET GLOBAL join_cache_level = 6;SET GLOBAL optimizer_switch ='extended_keys=off';SET GLOBAL optimizer_switch ='index_condition_pushdown=on';SET GLOBAL optimizer_switch ='engine_condition_pushdown=on';SET GLOBAL optimizer_switch ='derived_merge=on';SET GLOBAL optimizer_switch ='derived_with_keys=on';SET GLOBAL optimizer_switch ='firstmatch=off';SET GLOBAL optimizer_switch ='loosescan=off';SET GLOBAL optimizer_switch ='materialization=on';SET GLOBAL optimizer_switch ='in_to_exists=on';SET GLOBAL optimizer_switch ='semijoin=on';SET GLOBAL optimizer_switch ='partial_match_rowid_merge=on';SET GLOBAL optimizer_switch ='partial_match_table_scan=on';SET GLOBAL optimizer_switch ='subquery_cache=off';SET GLOBAL optimizer_switch ='mrr=on';SET GLOBAL optimizer_switch ='mrr_cost_based=off';SET GLOBAL optimizer_switch ='mrr_sort_keys=on';SET GLOBAL optimizer_switch ='outer_join_with_cache=on';SET GLOBAL optimizer_switch ='semijoin_with_cache=off';SET GLOBAL optimizer_switch ='join_cache_incremental=on';SET GLOBAL optimizer_switch ='join_cache_hashed=on';SET GLOBAL optimizer_switch ='join_cache_bka=on';SET GLOBAL optimizer_switch ='optimize_join_buffer_size=on';SET GLOBAL optimizer_switch ='table_elimination=on';SET GLOBAL eq_range_index_dive_limit=10;SET GLOBAL in_predicate_conversion_threshold=1000;SET GLOBAL optimizer_switch='orderby_uses_equalities=on';SET GLOBAL max_prepared_stmt_count=200000;SET GLOBAL eq_range_index_dive_limit=10;SET GLOBAL in_predicate_conversion_threshold=1000;\\n\\n# mysql_command: SET GLOBAL max_prepared_stmt_count=200000;SET GLOBAL optimizer_search_depth = 0;SET GLOBAL join_cache_level = 6;SET GLOBAL optimizer_switch ='extended_keys=off';SET GLOBAL optimizer_switch ='index_condition_pushdown=on';SET GLOBAL optimizer_switch ='engine_condition_pushdown=on';SET GLOBAL optimizer_switch ='derived_merge=on';SET GLOBAL optimizer_switch ='derived_with_keys=on';SET GLOBAL optimizer_switch ='firstmatch=off';SET GLOBAL optimizer_switch ='loosescan=off';SET GLOBAL optimizer_switch ='materialization=on';SET GLOBAL optimizer_switch ='in_to_exists=on';SET GLOBAL optimizer_switch ='semijoin=on';SET GLOBAL optimizer_switch ='partial_match_rowid_merge=on';SET GLOBAL optimizer_switch ='partial_match_table_scan=on';SET GLOBAL optimizer_switch ='subquery_cache=off';SET GLOBAL optimizer_switch ='mrr=on';SET GLOBAL optimizer_switch ='mrr_cost_based=off';SET GLOBAL optimizer_switch ='mrr_sort_keys=on';SET GLOBAL optimizer_switch ='outer_join_with_cache=on';SET GLOBAL optimizer_switch ='semijoin_with_cache=off';SET GLOBAL optimizer_switch ='join_cache_incremental=on';SET GLOBAL optimizer_switch ='join_cache_hashed=on';SET GLOBAL optimizer_switch ='join_cache_bka=on';SET GLOBAL optimizer_switch ='optimize_join_buffer_size=on';SET GLOBAL optimizer_switch ='table_elimination=on';SET GLOBAL eq_range_index_dive_limit=10;SET GLOBAL in_predicate_conversion_threshold=1000;SET GLOBAL optimizer_switch='orderby_uses_equalities=on';SET GLOBAL optimizer_switch='use_index_extensions=off'; SET GLOBAL optimizer_switch='duplicateweedout=on'; \\n\\n[mysqld]\\nloose_optimizer_switch='use_index_extensions=off'\\nloose_optimizer_switch ='index_condition_pushdown=on'\\nloose_optimizer_switch ='engine_condition_pushdown=on'\\nloose_optimizer_switch ='derived_merge=on'\\nloose_optimizer_switch ='mrr=on'\\nloose_optimizer_switch ='mrr_cost_based=off'\\nloose_optimizer_switch ='firstmatch=off'\\nloose_optimizer_switch ='loosescan=off'\\nloose_optimizer_switch ='semijoin=on'\\nloose_optimizer_switch ='duplicateweedout=on'\\nloose_optimizer_switch ='materialization=on'\\n\\n[mariadb]\\nloose_optimizer_search_depth = 0\\nloose_join_cache_level = 6\\nloose_optimizer_switch ='extended_keys=off'\\nloose_optimizer_switch ='derived_with_keys=on'\\nloose_optimizer_switch ='in_to_exists=on'\\nloose_optimizer_switch ='partial_match_rowid_merge=on'\\nloose_optimizer_switch ='partial_match_table_scan=on'\\nloose_optimizer_switch ='subquery_cache=off'\\nloose_optimizer_switch ='mrr_sort_keys=on'\\nloose_optimizer_switch ='outer_join_with_cache=on'\\nloose_optimizer_switch ='semijoin_with_cache=off'\\nloose_optimizer_switch ='join_cache_incremental=on'\\nloose_optimizer_switch ='join_cache_hashed=on'\\nloose_optimizer_switch ='join_cache_bka=on'\\nloose_optimizer_switch ='optimize_join_buffer_size=on'\\nloose_optimizer_switch ='table_elimination=on'\\n#10.1.16\\nloose_optimizer_switch='orderby_uses_equalities=on'\\n# 10.3.18, 10.4.8\\nloose_optimizer_switch ='in_predicate_conversion=on'\\nloose_eq_range_index_dive_limit=10\\nloose_in_predicate_conversion_threshold=1000\\n\\n# 10.6\\nloose_optimizer_switch ='not_null_range_scan=off'\\nloose_optimizer_switch ='cost_based_order_by_limit=on'\\n\\n# 10.4\\nloose_max_prepared_stmt_count=200000\\n\"}", + "var_updated": "2021-02-09 11:19:57", "var_name": "db_cnf_default_optimizer", "id": 5913 }, @@ -3768,6 +3827,14 @@ "var_updated": "2021-02-05 21:00:18", "var_name": "db_cnf_script_bootstrap", "id": 6267 + }, + { + "var_author": "admin Manager", + "var_class": "file", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/with_opt_trace.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_documentation: https://mariadb.com/kb/en/optimizer-trace-overview/\\n# mariadb_version: 10.4.4\\n# mariadb_command: SET GLOBAL optimizer_trace='enabled=on';\\n\\n# mysql_documentation: https://dev.mysql.com/doc/dev/mysql-server/latest/PAGE_OPT_TRACE.html\\n# mysql_version: 7.0\\n# mysql_command: SET GLOBAL optimizer_trace='enabled=on';\\n\\n\\n[mysqld]\\nloose_optimizer_trace='enabled=on'\"}", + "var_updated": "2021-02-15 10:31:28", + "var_name": "db_cnf_opt_with_trace", + "id": 6273 } ], "ruleset_public": false, @@ -3882,7 +3949,8 @@ "mariadb.svc.mrm.db.cnf.generic.log_nowarnings", "mariadb.svc.mrm.db.cnf.generic.rep_nologslavestatement", "mariadb.svc.mrm.db.cnf.generic.rep_nosyncbinlog", - "mariadb.svc.mrm.db.cnf.generic.enginearchive" + "mariadb.svc.mrm.db.cnf.generic.enginearchive", + "mariadb.svc.mrm.db.cnf.generic.opt_withtrace" ], "publications": [ "replication-manager" @@ -5828,6 +5896,30 @@ "replication-manager" ] }, + { + "fset_name": "system.deb", + "ruleset_name": "mariadb.svc.mrm.db.cnf.package.deb", + "variables": [ + { + "var_author": "admin Manager", + "var_class": "file", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/init/post_bootstrap\",\"mode\":755,\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_UID%%\"}", + "var_updated": "2021-02-09 12:53:54", + "var_name": "post_bootstrap", + "id": 6272 + } + ], + "ruleset_public": false, + "ruleset_type": "contextual", + "rulesets": [], + "publications": [ + "replication-manager" + ], + "id": 1111, + "responsibles": [ + "replication-manager" + ] + }, { "fset_name": "mariadb.replication.row", "ruleset_name": "mariadb.svc.mrm.db.cnf.generic.rep_withrow", @@ -6621,6 +6713,54 @@ "responsibles": [ "replication-manager" ] + }, + { + "fset_name": "system.rpm", + "ruleset_name": "mariadb.svc.mrm.db.cnf.package.rpm", + "variables": [ + { + "var_author": "admin Manager", + "var_class": "file", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/init/post_bootstrap\",\"mode\":755,\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\"}", + "var_updated": "2021-02-09 12:53:48", + "var_name": "post_bootstrap", + "id": 6271 + } + ], + "ruleset_public": true, + "ruleset_type": "contextual", + "rulesets": [], + "publications": [ + "replication-manager" + ], + "id": 1146, + "responsibles": [ + "replication-manager" + ] + }, + { + "fset_name": "mariadb.optimizer.trace", + "ruleset_name": "mariadb.svc.mrm.db.cnf.generic.opt_withtrace", + "variables": [ + { + "var_author": "admin Manager", + "var_class": "symlink", + "var_value": "{\"symlink\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/conf.d/99_with_opt_trace.cnf\",\"target\":\"../with_opt_trace.cnf\"}", + "var_updated": "2021-02-15 10:34:16", + "var_name": "db_link_with_eits", + "id": 6274 + } + ], + "ruleset_public": false, + "ruleset_type": "contextual", + "rulesets": [], + "publications": [ + "replication-manager" + ], + "id": 1147, + "responsibles": [ + "replication-manager" + ] } ] } From 584352b5a092f5cc1b1c9e659ecf5b6b2536ca2b Mon Sep 17 00:00:00 2001 From: svaroqui Date: Thu, 18 Feb 2021 17:30:16 +0100 Subject: [PATCH 13/84] Avoid crahing if Default section not present --- server/server.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/server.go b/server/server.go index 228625004..adcd4808a 100644 --- a/server/server.go +++ b/server/server.go @@ -275,13 +275,13 @@ func (repman *ReplicationManager) InitConfig(conf config.Config) { cf1 := viper.Sub("Default") //cf1.Debug() - cf1.AutomaticEnv() - cf1.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_")) - cf1.SetEnvPrefix("DEFAULT") if cf1 == nil { //log.Fatal("config.toml has no [Default] configuration group and config group has not been specified") log.Warning("config.toml has no [Default] configuration group and config group has not been specified") } else { + cf1.AutomaticEnv() + cf1.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_")) + cf1.SetEnvPrefix("DEFAULT") cf1.Unmarshal(&conf) From 5e71d8c13dbe152c8fe3b0b164f69800bf4bfd89 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Mon, 22 Feb 2021 23:41:08 +0100 Subject: [PATCH 14/84] Move prx.Id repetition to Proxy.SetID() and move crcTable to be cluster property --- cluster/cluster.go | 3 +++ cluster/prx.go | 15 ++++++--------- cluster/prx_mariadbshardproxy.go | 12 +++++------- cluster/prx_proxysql.go | 3 +-- cluster/prx_set.go | 9 +++++++++ cluster/srv.go | 7 +++---- 6 files changed, 27 insertions(+), 22 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index a51fb197a..9abd83b78 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -11,6 +11,7 @@ import ( "encoding/json" "errors" "fmt" + "hash/crc64" "io/ioutil" "os" "os/exec" @@ -154,6 +155,7 @@ type Cluster struct { WaitingSwitchover int `json:"waitingSwitchover"` WaitingFailover int `json:"waitingFailover"` sync.Mutex + crcTable *crc64.Table } type ClusterSorter []*Cluster @@ -226,6 +228,7 @@ const ( // Init initial cluster definition func (cluster *Cluster) Init(conf config.Config, cfgGroup string, tlog *s18log.TermLog, log *s18log.HttpLog, termlength int, runUUID string, repmgrVersion string, repmgrHostname string, key []byte) error { + cluster.crcTable = crc64.MakeTable(crc64.ECMA) // http://golang.org/pkg/hash/crc64/#pkg-constants cluster.switchoverChan = make(chan bool) // should use buffered channels or it will block cluster.statecloseChan = make(chan state.State, 100) diff --git a/cluster/prx.go b/cluster/prx.go index e3adf2568..4d7b503a7 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -10,7 +10,6 @@ package cluster import ( "fmt" - "hash/crc64" "os" "strconv" "strings" @@ -220,7 +219,6 @@ type proxyList []DatabaseProxy func (cluster *Cluster) newProxyList() error { nbproxies := 0 - crcTable := crc64.MakeTable(crc64.ECMA) // http://golang.org/pkg/hash/crc64/#pkg-constants if cluster.Conf.MxsHost != "" && cluster.Conf.MxsOn { nbproxies += len(strings.Split(cluster.Conf.MxsHost, ",")) } @@ -277,9 +275,8 @@ func (cluster *Cluster) newProxyList() error { if cluster.Conf.ProvNetCNI { prx.Host = prx.Host + "." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster } - prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) prx.ClusterGroup = cluster - + prx.SetID() prx.SetDataDir() prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) @@ -306,8 +303,8 @@ func (cluster *Cluster) newProxyList() error { if cluster.Conf.ProvNetCNI { prx.Host = prx.Host + "." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster } - prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) prx.ClusterGroup = cluster + prx.SetID() prx.SetDataDir() prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) @@ -330,8 +327,8 @@ func (cluster *Cluster) newProxyList() error { if prx.Name == "" { prx.Name = prx.Host } - prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) prx.ClusterGroup = cluster + prx.SetID() prx.SetDataDir() prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) @@ -383,8 +380,8 @@ func (cluster *Cluster) newProxyList() error { prx.Port = "3306" } prx.WritePort, _ = strconv.Atoi(prx.GetPort()) - prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) prx.ClusterGroup = cluster + prx.SetID() prx.SetDataDir() prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) @@ -414,8 +411,8 @@ func (cluster *Cluster) newProxyList() error { if cluster.Conf.ProvNetCNI { prx.Host = prx.Host + "." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster } - prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) prx.ClusterGroup = cluster + prx.SetID() prx.SetDataDir() prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) @@ -441,11 +438,11 @@ func (cluster *Cluster) newProxyList() error { if prx.Name == "" { prx.Name = prx.Host } - prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) if prx.Host == "" { prx.Host = "repman." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster } prx.ClusterGroup = cluster + prx.SetID() prx.SetDataDir() prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) diff --git a/cluster/prx_mariadbshardproxy.go b/cluster/prx_mariadbshardproxy.go index 4787ee163..d5c518323 100644 --- a/cluster/prx_mariadbshardproxy.go +++ b/cluster/prx_mariadbshardproxy.go @@ -25,8 +25,6 @@ import ( "github.com/spf13/pflag" ) -var crcTable = crc64.MakeTable(crc64.ECMA) - type MariadbShardProxy struct { Proxy } @@ -77,7 +75,7 @@ func (proxy *MariadbShardProxy) Failover() { if s == "replication_manager_schema" { foundReplicationManagerSchema = true } - checksum64 := crc64.Checksum([]byte(s+"_"+cluster.GetName()), crcTable) + checksum64 := crc64.Checksum([]byte(s+"_"+cluster.GetName()), cluster.crcTable) query := "CREATE OR REPLACE SERVER RW" + strconv.FormatUint(checksum64, 10) + " FOREIGN DATA WRAPPER mysql OPTIONS (HOST '" + misc.Unbracket(cluster.master.Host) + "', DATABASE '" + s + "', USER '" + cluster.master.User + "', PASSWORD '" + cluster.master.Pass + "', PORT " + cluster.master.Port + ")" _, err = proxy.ShardProxy.Conn.Exec(query) @@ -123,7 +121,7 @@ func (cluster *Cluster) CheckMdbShardServersSchema(proxy *MariadbShardProxy) { if s == "replication_manager_schema" { foundReplicationManagerSchema = true } - checksum64 := crc64.Checksum([]byte(s+"_"+cluster.GetName()), crcTable) + checksum64 := crc64.Checksum([]byte(s+"_"+cluster.GetName()), cluster.crcTable) query := "CREATE SERVER IF NOT EXISTS RW" + strconv.FormatUint(checksum64, 10) + " FOREIGN DATA WRAPPER mysql OPTIONS (HOST '" + misc.Unbracket(cluster.master.Host) + "', DATABASE '" + s + "', USER '" + cluster.master.User + "', PASSWORD '" + cluster.master.Pass + "', PORT " + cluster.master.Port + ")" _, err = proxy.ShardProxy.Conn.Exec(query) @@ -217,7 +215,7 @@ func (cluster *Cluster) ShardProxyGetHeadCluster() *Cluster { } func (cluster *Cluster) ShardProxyCreateVTable(proxy *MariadbShardProxy, schema string, table string, duplicates []*ServerMonitor, withreshard bool) error { - checksum64 := crc64.Checksum([]byte(schema+"_"+cluster.GetName()), crcTable) + checksum64 := crc64.Checksum([]byte(schema+"_"+cluster.GetName()), cluster.crcTable) var err error var ddl string if len(duplicates) == 1 { @@ -239,7 +237,7 @@ func (cluster *Cluster) ShardProxyCreateVTable(proxy *MariadbShardProxy, schema link_status_def := " link_status \"" for _, cl := range cluster.ShardProxyGetShardClusters() { cl.CheckMdbShardServersSchema(proxy) - checksum64 := crc64.Checksum([]byte(schema+"_"+cl.GetName()), crcTable) + checksum64 := crc64.Checksum([]byte(schema+"_"+cl.GetName()), cluster.crcTable) srv_def = srv_def + "RW" + strconv.FormatUint(checksum64, 10) + " " link_status_def = link_status_def + "0 " } @@ -281,7 +279,7 @@ func (cluster *Cluster) ShardProxyCreateVTable(proxy *MariadbShardProxy, schema clusterList := cluster.ShardProxyGetShardClusters() for _, cl := range clusterList { cl.CheckMdbShardServersSchema(proxy) - checksum64 := crc64.Checksum([]byte(schema+"_"+cl.GetName()), crcTable) + checksum64 := crc64.Checksum([]byte(schema+"_"+cl.GetName()), cluster.crcTable) query = query + " PARTITION pt" + strconv.Itoa(i) + " COMMENT ='srv \"RW" + strconv.FormatUint(checksum64, 10) + "\", tbl \"" + table + "\", database \"" + schema + "\"'" if i != len(clusterList) { query = query + ",\n" diff --git a/cluster/prx_proxysql.go b/cluster/prx_proxysql.go index b774ac258..21b49cd9a 100644 --- a/cluster/prx_proxysql.go +++ b/cluster/prx_proxysql.go @@ -3,7 +3,6 @@ package cluster import ( "errors" "fmt" - "hash/crc64" "strconv" "github.com/signal18/replication-manager/config" @@ -40,7 +39,7 @@ func NewProxySQLProxy(clusterName string, proxyHost string, conf config.Config) } } - prx.Id = "px" + strconv.FormatUint(crc64.Checksum([]byte(clusterName+prx.Name+":"+strconv.Itoa(prx.WritePort)), crcTable), 10) + prx.SetID() return prx } diff --git a/cluster/prx_set.go b/cluster/prx_set.go index 95f810c27..c5280855a 100644 --- a/cluster/prx_set.go +++ b/cluster/prx_set.go @@ -10,10 +10,19 @@ package cluster import ( "fmt" + "hash/crc64" "os" + "strconv" "strings" ) +func (p *Proxy) SetID() { + cluster := p.ClusterGroup + p.Id = "px" + strconv.FormatUint( + crc64.Checksum([]byte(cluster.Name+p.Name+":"+strconv.Itoa(p.WritePort)), cluster.crcTable), + 10) +} + func (proxy *Proxy) SetServiceName(namespace string, name string) { proxy.ServiceName = namespace + "/svc/" + name } diff --git a/cluster/srv.go b/cluster/srv.go index e6456b6dc..edc1a1b6c 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -207,7 +207,6 @@ func (cluster *Cluster) newServerMonitor(url string, user string, pass string, c server.IsCompute = compute server.Domain = domain server.TLSConfigUsed = ConstTLSCurrentConfig - server.CrcTable = crc64.MakeTable(crc64.ECMA) server.ClusterGroup = cluster server.DBVersion = dbhelper.NewMySQLVersion("Unknowed-0.0.0", "") server.Name, server.Port, server.PostgressDB = misc.SplitHostPortDB(url) @@ -221,9 +220,9 @@ func (cluster *Cluster) newServerMonitor(url string, user string, pass string, c }*/ url = server.Name + server.Domain + ":3306" } - server.Id = "db" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+server.Name+server.Port), crcTable), 10) + server.Id = "db" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+server.Name+server.Port), cluster.crcTable), 10) var sid uint64 - sid, err = strconv.ParseUint(strconv.FormatUint(crc64.Checksum([]byte(server.Name+server.Port), server.CrcTable), 10), 10, 64) + sid, err = strconv.ParseUint(strconv.FormatUint(crc64.Checksum([]byte(server.Name+server.Port), cluster.crcTable), 10), 10, 64) server.ServerID = sid if cluster.Conf.TunnelHost != "" { go server.Tunnel() @@ -732,7 +731,7 @@ func (server *ServerMonitor) Refresh() error { if server.DBVersion.IsPPostgreSQL() { //PostgresQL as no server_id concept mimic via internal server id for topology detection var sid uint64 - sid, err = strconv.ParseUint(strconv.FormatUint(crc64.Checksum([]byte(server.SlaveStatus.MasterHost.String+server.SlaveStatus.MasterPort.String), server.CrcTable), 10), 10, 64) + sid, err = strconv.ParseUint(strconv.FormatUint(crc64.Checksum([]byte(server.SlaveStatus.MasterHost.String+server.SlaveStatus.MasterPort.String), cluster.crcTable), 10), 10, 64) if err != nil { server.ClusterGroup.LogPrintf(LvlWarn, "PG Could not assign server_id s", err) } From f0f69a1d144212e0e4b26ac610e80e91af2c16f1 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Mon, 22 Feb 2021 23:49:05 +0100 Subject: [PATCH 15/84] Correct ordering of assignment --- cluster/prx.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cluster/prx.go b/cluster/prx.go index 4d7b503a7..fc4c1e4ff 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -435,12 +435,12 @@ func (cluster *Cluster) newProxyList() error { prx.ReadWritePort = cluster.Conf.MyproxyPort prx.User = cluster.Conf.MyproxyUser prx.Pass = cluster.Conf.MyproxyPassword - if prx.Name == "" { - prx.Name = prx.Host - } if prx.Host == "" { prx.Host = "repman." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster } + if prx.Name == "" { + prx.Name = prx.Host + } prx.ClusterGroup = cluster prx.SetID() prx.SetDataDir() From ac171be0a1b698dd2ec973300ed205e9added6b6 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Mon, 22 Feb 2021 23:49:30 +0100 Subject: [PATCH 16/84] Move Get/Set methods to appropriate file --- cluster/prx.go | 68 ++-------------------------------------------- cluster/prx_get.go | 49 +++++++++++++++++++++++++++++---- cluster/prx_set.go | 22 +++++++++++++++ 3 files changed, 68 insertions(+), 71 deletions(-) diff --git a/cluster/prx.go b/cluster/prx.go index fc4c1e4ff..4e2651ef0 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -65,70 +65,6 @@ type Proxy struct { Agent string `json:"agent"` } -func (p *Proxy) GetAgent() string { - return p.Agent -} - -func (p *Proxy) GetType() string { - return p.Type -} - -func (p *Proxy) GetHost() string { - return p.Host -} - -func (p *Proxy) GetPort() string { - return p.Port -} - -func (p *Proxy) GetWritePort() int { - return p.WritePort -} - -func (p *Proxy) GetId() string { - return p.Id -} - -func (p *Proxy) GetState() string { - return p.State -} - -func (p *Proxy) SetState(v string) { - p.State = v -} - -func (p *Proxy) GetUser() string { - return p.User -} - -func (p *Proxy) GetPass() string { - return p.Pass -} - -func (p *Proxy) GetFailCount() int { - return p.FailCount -} - -func (p *Proxy) SetFailCount(c int) { - p.FailCount = c -} - -func (p *Proxy) SetCredential(credential string) { - p.User, p.Pass = misc.SplitPair(credential) -} - -func (p *Proxy) GetPrevState() string { - return p.PrevState -} - -func (p *Proxy) SetPrevState(state string) { - p.PrevState = state -} - -func (p *Proxy) SetSuspect() { - p.State = stateSuspect -} - type DatabaseProxy interface { AddFlags(flags *pflag.FlagSet, conf config.Config) Init() @@ -608,8 +544,6 @@ func (cluster *Cluster) failoverProxies() { cluster.initConsul() } -// TODO: reduce to -// for { pr.Init() } func (cluster *Cluster) initProxies() { for _, pr := range cluster.Proxies { cluster.LogPrintf(LvlInfo, "New proxy monitored: %s %s:%s", pr.GetType(), pr.GetHost(), pr.GetPort()) @@ -630,6 +564,8 @@ func (proxy *Proxy) SendStats() error { } for _, wbackend := range proxy.BackendsWrite { var metrics = make([]graphite.Metric, 4) + + // TODO: clarify what this replacer does and what the purpose is replacer := strings.NewReplacer("`", "", "?", "", " ", "_", ".", "-", "(", "-", ")", "-", "/", "_", "<", "-", "'", "-", "\"", "-", ":", "-") server := "rw-" + replacer.Replace(wbackend.PrxName) metrics[0] = graphite.NewMetric(fmt.Sprintf("proxy.%s%s.%s.bytes_send", proxy.Type, proxy.Id, server), wbackend.PrxByteOut, time.Now().Unix()) diff --git a/cluster/prx_get.go b/cluster/prx_get.go index af5074c68..46f525aa7 100644 --- a/cluster/prx_get.go +++ b/cluster/prx_get.go @@ -55,11 +55,6 @@ func (cluster *Cluster) GetClusterProxyConn() (*sqlx.DB, error) { } -// Deprecated: replaced by proxy.GetCluster() -func (cluster *Cluster) GetClusterThisProxyConn(prx *Proxy) (*sqlx.DB, error) { - return prx.GetCluster() -} - func (prx *Proxy) GetCluster() (*sqlx.DB, error) { cluster := prx.ClusterGroup params := fmt.Sprintf("?timeout=%ds", cluster.Conf.Timeout) @@ -327,3 +322,47 @@ protocol=MySQLBackend } return "" } + +func (p *Proxy) GetAgent() string { + return p.Agent +} + +func (p *Proxy) GetType() string { + return p.Type +} + +func (p *Proxy) GetHost() string { + return p.Host +} + +func (p *Proxy) GetPort() string { + return p.Port +} + +func (p *Proxy) GetWritePort() int { + return p.WritePort +} + +func (p *Proxy) GetId() string { + return p.Id +} + +func (p *Proxy) GetState() string { + return p.State +} + +func (p *Proxy) GetUser() string { + return p.User +} + +func (p *Proxy) GetPass() string { + return p.Pass +} + +func (p *Proxy) GetFailCount() int { + return p.FailCount +} + +func (p *Proxy) GetPrevState() string { + return p.PrevState +} diff --git a/cluster/prx_set.go b/cluster/prx_set.go index c5280855a..547e55966 100644 --- a/cluster/prx_set.go +++ b/cluster/prx_set.go @@ -14,6 +14,8 @@ import ( "os" "strconv" "strings" + + "github.com/signal18/replication-manager/utils/misc" ) func (p *Proxy) SetID() { @@ -93,3 +95,23 @@ func (proxy *Proxy) SetReprovCookie() { } newFile.Close() } + +func (p *Proxy) SetPrevState(state string) { + p.PrevState = state +} + +func (p *Proxy) SetSuspect() { + p.State = stateSuspect +} + +func (p *Proxy) SetFailCount(c int) { + p.FailCount = c +} + +func (p *Proxy) SetCredential(credential string) { + p.User, p.Pass = misc.SplitPair(credential) +} + +func (p *Proxy) SetState(v string) { + p.State = v +} From 8a542eeab1e8fabd59f77a2d82f6ac0196303912 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Mon, 22 Feb 2021 23:53:57 +0100 Subject: [PATCH 17/84] Correct scoping for crcTable --- cluster/srv.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/srv.go b/cluster/srv.go index edc1a1b6c..183d79e3c 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -731,7 +731,7 @@ func (server *ServerMonitor) Refresh() error { if server.DBVersion.IsPPostgreSQL() { //PostgresQL as no server_id concept mimic via internal server id for topology detection var sid uint64 - sid, err = strconv.ParseUint(strconv.FormatUint(crc64.Checksum([]byte(server.SlaveStatus.MasterHost.String+server.SlaveStatus.MasterPort.String), cluster.crcTable), 10), 10, 64) + sid, err = strconv.ParseUint(strconv.FormatUint(crc64.Checksum([]byte(server.SlaveStatus.MasterHost.String+server.SlaveStatus.MasterPort.String), server.ClusterGroup.crcTable), 10), 10, 64) if err != nil { server.ClusterGroup.LogPrintf(LvlWarn, "PG Could not assign server_id s", err) } From 98fcab4959a08e2edd7d5b4b81e1b4ea9b9c210a Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Mon, 22 Feb 2021 23:58:27 +0100 Subject: [PATCH 18/84] Reduce complexity --- cluster/prx.go | 57 +++++++++----------------------------------------- 1 file changed, 10 insertions(+), 47 deletions(-) diff --git a/cluster/prx.go b/cluster/prx.go index 4e2651ef0..f71d201f9 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -153,38 +153,7 @@ type Backend struct { type proxyList []DatabaseProxy func (cluster *Cluster) newProxyList() error { - nbproxies := 0 - - if cluster.Conf.MxsHost != "" && cluster.Conf.MxsOn { - nbproxies += len(strings.Split(cluster.Conf.MxsHost, ",")) - } - if cluster.Conf.HaproxyOn { - nbproxies += len(strings.Split(cluster.Conf.HaproxyHosts, ",")) - } - if cluster.Conf.MdbsProxyHosts != "" && cluster.Conf.MdbsProxyOn { - nbproxies += len(strings.Split(cluster.Conf.MdbsProxyHosts, ",")) - } - if cluster.Conf.ProxysqlOn { - nbproxies += len(strings.Split(cluster.Conf.ProxysqlHosts, ",")) - } - if cluster.Conf.MysqlRouterOn { - nbproxies += len(strings.Split(cluster.Conf.MysqlRouterHosts, ",")) - } - if cluster.Conf.SphinxOn { - nbproxies += len(strings.Split(cluster.Conf.SphinxHosts, ",")) - } - if cluster.Conf.ExtProxyOn { - nbproxies++ - } - // internal myproxy - if cluster.Conf.MyproxyOn { - nbproxies++ - } - cluster.Proxies = make([]DatabaseProxy, nbproxies) - - cluster.LogPrintf(LvlInfo, "Loading %d proxies", nbproxies) - - var ctproxy = 0 + cluster.Proxies = make([]DatabaseProxy, 0) var err error if cluster.Conf.MxsHost != "" && cluster.Conf.MxsOn { @@ -217,11 +186,10 @@ func (cluster *Cluster) newProxyList() error { prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) prx.State = stateSuspect - cluster.Proxies[ctproxy] = prx + cluster.Proxies = append(cluster.Proxies, prx) if err != nil { cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } - ctproxy++ } } if cluster.Conf.HaproxyOn { @@ -245,12 +213,10 @@ func (cluster *Cluster) newProxyList() error { prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) prx.State = stateSuspect - cluster.Proxies[ctproxy] = prx + cluster.Proxies = append(cluster.Proxies, prx) if err != nil { cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } - - ctproxy++ } } if cluster.Conf.ExtProxyOn { @@ -269,8 +235,7 @@ func (cluster *Cluster) newProxyList() error { prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) prx.State = stateSuspect - cluster.Proxies[ctproxy] = prx - ctproxy++ + cluster.Proxies = append(cluster.Proxies, prx) } if cluster.Conf.ProxysqlOn { @@ -290,11 +255,10 @@ func (cluster *Cluster) newProxyList() error { prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) prx.State = stateSuspect - cluster.Proxies[ctproxy] = prx + cluster.Proxies = append(cluster.Proxies, prx) if err != nil { cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } - ctproxy++ } } if cluster.Conf.MdbsProxyHosts != "" && cluster.Conf.MdbsProxyOn { @@ -322,12 +286,11 @@ func (cluster *Cluster) newProxyList() error { prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) prx.State = stateSuspect - cluster.Proxies[ctproxy] = prx + cluster.Proxies = append(cluster.Proxies, prx) if err != nil { cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } cluster.LogPrintf(LvlDbg, "New MdbShardProxy proxy created: %s %s", prx.Host, prx.GetPort()) - ctproxy++ } } if cluster.Conf.SphinxHosts != "" && cluster.Conf.SphinxOn { @@ -353,12 +316,11 @@ func (cluster *Cluster) newProxyList() error { prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) prx.State = stateSuspect - cluster.Proxies[ctproxy] = prx + cluster.Proxies = append(cluster.Proxies, prx) if err != nil { cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } cluster.LogPrintf(LvlDbg, "New SphinxSearch proxy created: %s %s", prx.Host, prx.GetPort()) - ctproxy++ } } if cluster.Conf.MyproxyOn { @@ -383,10 +345,11 @@ func (cluster *Cluster) newProxyList() error { prx.SetServiceName(cluster.Name, prx.Name) cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) prx.State = stateSuspect - cluster.Proxies[ctproxy] = prx - ctproxy++ + cluster.Proxies = append(cluster.Proxies, prx) } + cluster.LogPrintf(LvlInfo, "Loaded %d proxies", len(cluster.Proxies)) + return nil } From 8fd9a3bce4db9c5e70a8b2181a6f843219f0aba3 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 23 Feb 2021 00:14:46 +0100 Subject: [PATCH 19/84] Reduce complexity even more, also deduplicate --- cluster/cluster.go | 10 ++++++ cluster/prx.go | 71 +++++++++++++---------------------------- cluster/prx_proxysql.go | 2 -- cluster/prx_set.go | 4 +++ 4 files changed, 37 insertions(+), 50 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 9abd83b78..ec011d6ec 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -1290,3 +1290,13 @@ func (cluster *Cluster) ConfigDiscovery() error { return nil } + +func (c *Cluster) AddProxy(prx DatabaseProxy) { + prx.SetCluster(c) + prx.SetID() + prx.SetDataDir() + prx.SetServiceName(c.Name, prx.GetName()) + c.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.GetType(), prx.GetHost(), prx.GetPort()) + prx.SetState(stateSuspect) + c.Proxies = append(c.Proxies, prx) +} diff --git a/cluster/prx.go b/cluster/prx.go index f71d201f9..dcfada88a 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -66,6 +66,7 @@ type Proxy struct { } type DatabaseProxy interface { + SetCluster(c *Cluster) AddFlags(flags *pflag.FlagSet, conf config.Config) Init() Refresh() error @@ -134,6 +135,10 @@ type DatabaseProxy interface { SetWaitStopCookie() SetSuspect() + + SetID() + SetDataDir() + SetServiceName(string, string) } type Backend struct { @@ -180,13 +185,9 @@ func (cluster *Cluster) newProxyList() error { if cluster.Conf.ProvNetCNI { prx.Host = prx.Host + "." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster } - prx.ClusterGroup = cluster - prx.SetID() - prx.SetDataDir() - prx.SetServiceName(cluster.Name, prx.Name) - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) - prx.State = stateSuspect - cluster.Proxies = append(cluster.Proxies, prx) + + cluster.AddProxy(prx) + if err != nil { cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } @@ -207,13 +208,9 @@ func (cluster *Cluster) newProxyList() error { if cluster.Conf.ProvNetCNI { prx.Host = prx.Host + "." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster } - prx.ClusterGroup = cluster - prx.SetID() - prx.SetDataDir() - prx.SetServiceName(cluster.Name, prx.Name) - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) - prx.State = stateSuspect - cluster.Proxies = append(cluster.Proxies, prx) + + cluster.AddProxy(prx) + if err != nil { cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } @@ -229,13 +226,8 @@ func (cluster *Cluster) newProxyList() error { if prx.Name == "" { prx.Name = prx.Host } - prx.ClusterGroup = cluster - prx.SetID() - prx.SetDataDir() - prx.SetServiceName(cluster.Name, prx.Name) - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) - prx.State = stateSuspect - cluster.Proxies = append(cluster.Proxies, prx) + + cluster.AddProxy(prx) } if cluster.Conf.ProxysqlOn { @@ -250,12 +242,8 @@ func (cluster *Cluster) newProxyList() error { prx.Pass = p.PlainText } - prx.ClusterGroup = cluster - prx.SetDataDir() - prx.SetServiceName(cluster.Name, prx.Name) - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) - prx.State = stateSuspect - cluster.Proxies = append(cluster.Proxies, prx) + cluster.AddProxy(prx) + if err != nil { cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } @@ -280,13 +268,9 @@ func (cluster *Cluster) newProxyList() error { prx.Port = "3306" } prx.WritePort, _ = strconv.Atoi(prx.GetPort()) - prx.ClusterGroup = cluster - prx.SetID() - prx.SetDataDir() - prx.SetServiceName(cluster.Name, prx.Name) - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) - prx.State = stateSuspect - cluster.Proxies = append(cluster.Proxies, prx) + + cluster.AddProxy(prx) + if err != nil { cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } @@ -310,13 +294,9 @@ func (cluster *Cluster) newProxyList() error { if cluster.Conf.ProvNetCNI { prx.Host = prx.Host + "." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster } - prx.ClusterGroup = cluster - prx.SetID() - prx.SetDataDir() - prx.SetServiceName(cluster.Name, prx.Name) - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) - prx.State = stateSuspect - cluster.Proxies = append(cluster.Proxies, prx) + + cluster.AddProxy(prx) + if err != nil { cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) } @@ -339,13 +319,8 @@ func (cluster *Cluster) newProxyList() error { if prx.Name == "" { prx.Name = prx.Host } - prx.ClusterGroup = cluster - prx.SetID() - prx.SetDataDir() - prx.SetServiceName(cluster.Name, prx.Name) - cluster.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.Type, prx.Host, prx.GetPort()) - prx.State = stateSuspect - cluster.Proxies = append(cluster.Proxies, prx) + + cluster.AddProxy(prx) } cluster.LogPrintf(LvlInfo, "Loaded %d proxies", len(cluster.Proxies)) diff --git a/cluster/prx_proxysql.go b/cluster/prx_proxysql.go index 21b49cd9a..dced998c8 100644 --- a/cluster/prx_proxysql.go +++ b/cluster/prx_proxysql.go @@ -39,8 +39,6 @@ func NewProxySQLProxy(clusterName string, proxyHost string, conf config.Config) } } - prx.SetID() - return prx } diff --git a/cluster/prx_set.go b/cluster/prx_set.go index 547e55966..29943000c 100644 --- a/cluster/prx_set.go +++ b/cluster/prx_set.go @@ -115,3 +115,7 @@ func (p *Proxy) SetCredential(credential string) { func (p *Proxy) SetState(v string) { p.State = v } + +func (p *Proxy) SetCluster(c *Cluster) { + p.ClusterGroup = c +} From d017dce152405cde02e97d7068f612d6e2050954 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 23 Feb 2021 00:16:03 +0100 Subject: [PATCH 20/84] Remove futile error check --- cluster/prx.go | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/cluster/prx.go b/cluster/prx.go index dcfada88a..b45b9202d 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -159,7 +159,6 @@ type proxyList []DatabaseProxy func (cluster *Cluster) newProxyList() error { cluster.Proxies = make([]DatabaseProxy, 0) - var err error if cluster.Conf.MxsHost != "" && cluster.Conf.MxsOn { @@ -187,10 +186,6 @@ func (cluster *Cluster) newProxyList() error { } cluster.AddProxy(prx) - - if err != nil { - cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) - } } } if cluster.Conf.HaproxyOn { @@ -210,10 +205,6 @@ func (cluster *Cluster) newProxyList() error { } cluster.AddProxy(prx) - - if err != nil { - cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) - } } } if cluster.Conf.ExtProxyOn { @@ -243,10 +234,6 @@ func (cluster *Cluster) newProxyList() error { } cluster.AddProxy(prx) - - if err != nil { - cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) - } } } if cluster.Conf.MdbsProxyHosts != "" && cluster.Conf.MdbsProxyOn { @@ -270,10 +257,6 @@ func (cluster *Cluster) newProxyList() error { prx.WritePort, _ = strconv.Atoi(prx.GetPort()) cluster.AddProxy(prx) - - if err != nil { - cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) - } cluster.LogPrintf(LvlDbg, "New MdbShardProxy proxy created: %s %s", prx.Host, prx.GetPort()) } } @@ -296,10 +279,6 @@ func (cluster *Cluster) newProxyList() error { } cluster.AddProxy(prx) - - if err != nil { - cluster.LogPrintf(LvlErr, "Could not open connection to proxy %s %s: %s", prx.Host, prx.GetPort(), err) - } cluster.LogPrintf(LvlDbg, "New SphinxSearch proxy created: %s %s", prx.Host, prx.GetPort()) } } From e2041e9a290e85a167d699d886fd6139182a5b77 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 23 Feb 2021 00:48:54 +0100 Subject: [PATCH 21/84] Move the initialisation of the Proxy to a New*Proxy method in the appropriate file --- cluster/prx.go | 106 ++----------------------------- cluster/prx_haproxy.go | 16 +++++ cluster/prx_mariadbshardproxy.go | 23 +++++++ cluster/prx_maxscale.go | 27 ++++++++ cluster/prx_myproxy.go | 22 +++++++ cluster/prx_proxysql.go | 15 ++++- cluster/prx_sphinx.go | 22 +++++++ 7 files changed, 130 insertions(+), 101 deletions(-) diff --git a/cluster/prx.go b/cluster/prx.go index b45b9202d..6b5b094ed 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -21,7 +21,6 @@ import ( "github.com/signal18/replication-manager/graphite" "github.com/signal18/replication-manager/router/myproxy" "github.com/signal18/replication-manager/router/proxysql" - "github.com/signal18/replication-manager/utils/crypto" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" @@ -161,49 +160,14 @@ func (cluster *Cluster) newProxyList() error { cluster.Proxies = make([]DatabaseProxy, 0) if cluster.Conf.MxsHost != "" && cluster.Conf.MxsOn { - for k, proxyHost := range strings.Split(cluster.Conf.MxsHost, ",") { - // prx := new(Proxy) - prx := new(MaxscaleProxy) - prx.Type = config.ConstProxyMaxscale - prx.SetPlacement(k, cluster.Conf.ProvProxAgents, cluster.Conf.SlapOSMaxscalePartitions, cluster.Conf.MxsHostsIPV6) - prx.Port = cluster.Conf.MxsPort - prx.User = cluster.Conf.MxsUser - prx.Pass = cluster.Conf.MxsPass - if cluster.key != nil { - p := crypto.Password{Key: cluster.key} - p.CipherText = prx.Pass - p.Decrypt() - prx.Pass = p.PlainText - } - prx.ReadPort = cluster.Conf.MxsReadPort - prx.WritePort = cluster.Conf.MxsWritePort - prx.ReadWritePort = cluster.Conf.MxsReadWritePort - prx.Name = proxyHost - prx.Host = proxyHost - if cluster.Conf.ProvNetCNI { - prx.Host = prx.Host + "." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster - } - + prx := NewMaxscaleProxy(k, cluster, proxyHost) cluster.AddProxy(prx) } } if cluster.Conf.HaproxyOn { - for k, proxyHost := range strings.Split(cluster.Conf.HaproxyHosts, ",") { - prx := new(HaproxyProxy) - prx.SetPlacement(k, cluster.Conf.ProvProxAgents, cluster.Conf.SlapOSHaProxyPartitions, cluster.Conf.HaproxyHostsIPV6) - prx.Type = config.ConstProxyHaproxy - prx.Port = strconv.Itoa(cluster.Conf.HaproxyAPIPort) - prx.ReadPort = cluster.Conf.HaproxyReadPort - prx.WritePort = cluster.Conf.HaproxyWritePort - prx.ReadWritePort = cluster.Conf.HaproxyWritePort - prx.Name = proxyHost - prx.Host = proxyHost - if cluster.Conf.ProvNetCNI { - prx.Host = prx.Host + "." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster - } - + prx := NewHaproxyProxy(k, cluster, proxyHost) cluster.AddProxy(prx) } } @@ -221,84 +185,28 @@ func (cluster *Cluster) newProxyList() error { cluster.AddProxy(prx) } if cluster.Conf.ProxysqlOn { - for k, proxyHost := range strings.Split(cluster.Conf.ProxysqlHosts, ",") { - prx := NewProxySQLProxy(cluster.Name, proxyHost, cluster.Conf) - prx.SetPlacement(k, cluster.Conf.ProvProxAgents, cluster.Conf.SlapOSProxySQLPartitions, cluster.Conf.ProxysqlHostsIPV6) - - if cluster.key != nil { - p := crypto.Password{Key: cluster.key} - p.CipherText = prx.Pass - p.Decrypt() - prx.Pass = p.PlainText - } - + prx := NewProxySQLProxy(k, cluster, proxyHost) cluster.AddProxy(prx) } } if cluster.Conf.MdbsProxyHosts != "" && cluster.Conf.MdbsProxyOn { for k, proxyHost := range strings.Split(cluster.Conf.MdbsProxyHosts, ",") { - prx := new(MariadbShardProxy) - prx.SetPlacement(k, cluster.Conf.ProvProxAgents, cluster.Conf.SlapOSShardProxyPartitions, cluster.Conf.MdbsHostsIPV6) - prx.Type = config.ConstProxySpider - prx.Host, prx.Port = misc.SplitHostPort(proxyHost) - prx.User, prx.Pass = misc.SplitPair(cluster.Conf.MdbsProxyCredential) - prx.ReadPort, _ = strconv.Atoi(prx.GetPort()) - prx.ReadWritePort, _ = strconv.Atoi(prx.GetPort()) - prx.Name = proxyHost - if cluster.Conf.ProvNetCNI { - if cluster.Conf.ClusterHead == "" { - prx.Host = prx.Host + "." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster - } else { - prx.Host = prx.Host + "." + cluster.Conf.ClusterHead + ".svc." + cluster.Conf.ProvOrchestratorCluster - } - prx.Port = "3306" - } - prx.WritePort, _ = strconv.Atoi(prx.GetPort()) - + prx := NewMariadbShardProxy(k, cluster, proxyHost) cluster.AddProxy(prx) cluster.LogPrintf(LvlDbg, "New MdbShardProxy proxy created: %s %s", prx.Host, prx.GetPort()) } } if cluster.Conf.SphinxHosts != "" && cluster.Conf.SphinxOn { for k, proxyHost := range strings.Split(cluster.Conf.SphinxHosts, ",") { - prx := new(SphinxProxy) - prx.SetPlacement(k, cluster.Conf.ProvProxAgents, cluster.Conf.SlapOSSphinxPartitions, cluster.Conf.SphinxHostsIPV6) - prx.Type = config.ConstProxySphinx - - prx.Port = cluster.Conf.SphinxQLPort - prx.User = "" - prx.Pass = "" - prx.ReadPort, _ = strconv.Atoi(prx.GetPort()) - prx.WritePort, _ = strconv.Atoi(prx.GetPort()) - prx.ReadWritePort, _ = strconv.Atoi(prx.GetPort()) - prx.Name = proxyHost - prx.Host = proxyHost - if cluster.Conf.ProvNetCNI { - prx.Host = prx.Host + "." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster - } + prx := NewSphinxProxy(k, cluster, proxyHost) cluster.AddProxy(prx) - cluster.LogPrintf(LvlDbg, "New SphinxSearch proxy created: %s %s", prx.Host, prx.GetPort()) + cluster.LogPrintf(LvlDbg, "New SphinxSearch proxy created: %s %s", prx.GetHost(), prx.GetPort()) } } if cluster.Conf.MyproxyOn { - prx := new(MyProxyProxy) - prx.Type = config.ConstProxyMyProxy - prx.Port = strconv.Itoa(cluster.Conf.MyproxyPort) - prx.Host = "0.0.0.0" - prx.ReadPort = cluster.Conf.MyproxyPort - prx.WritePort = cluster.Conf.MyproxyPort - prx.ReadWritePort = cluster.Conf.MyproxyPort - prx.User = cluster.Conf.MyproxyUser - prx.Pass = cluster.Conf.MyproxyPassword - if prx.Host == "" { - prx.Host = "repman." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster - } - if prx.Name == "" { - prx.Name = prx.Host - } - + prx := NewMyProxyProxy(0, cluster, "") cluster.AddProxy(prx) } diff --git a/cluster/prx_haproxy.go b/cluster/prx_haproxy.go index a4777f69e..d716f3b89 100644 --- a/cluster/prx_haproxy.go +++ b/cluster/prx_haproxy.go @@ -28,6 +28,22 @@ type HaproxyProxy struct { Proxy } +func NewHaproxyProxy(placement int, cluster *Cluster, proxyHost string) *HaproxyProxy { + conf := cluster.Conf + prx := new(HaproxyProxy) + prx.SetPlacement(placement, conf.ProvProxAgents, conf.SlapOSHaProxyPartitions, conf.HaproxyHostsIPV6) + prx.Type = config.ConstProxyHaproxy + prx.Port = strconv.Itoa(conf.HaproxyAPIPort) + prx.ReadPort = conf.HaproxyReadPort + prx.WritePort = conf.HaproxyWritePort + prx.ReadWritePort = conf.HaproxyWritePort + prx.Name = proxyHost + prx.Host = proxyHost + if conf.ProvNetCNI { + prx.Host = prx.Host + "." + cluster.Name + ".svc." + conf.ProvOrchestratorCluster + } +} + func (proxy *HaproxyProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { flags.BoolVar(&conf.HaproxyOn, "haproxy", false, "Wrapper to use HaProxy on same host") flags.StringVar(&conf.HaproxyMode, "haproxy-mode", "runtimeapi", "HaProxy mode [standby|runtimeapi|dataplaneapi]") diff --git a/cluster/prx_mariadbshardproxy.go b/cluster/prx_mariadbshardproxy.go index d5c518323..681a7fd4e 100644 --- a/cluster/prx_mariadbshardproxy.go +++ b/cluster/prx_mariadbshardproxy.go @@ -29,6 +29,29 @@ type MariadbShardProxy struct { Proxy } +func NewMariadbShardProxy(placement int, cluster *Cluster, proxyHost string) *MariadbShardProxy { + conf := cluster.Conf + prx := new(MariadbShardProxy) + prx.SetPlacement(placement, conf.ProvProxAgents, conf.SlapOSShardProxyPartitions, conf.MdbsHostsIPV6) + prx.Type = config.ConstProxySpider + prx.Host, prx.Port = misc.SplitHostPort(proxyHost) + prx.User, prx.Pass = misc.SplitPair(conf.MdbsProxyCredential) + prx.ReadPort, _ = strconv.Atoi(prx.GetPort()) + prx.ReadWritePort, _ = strconv.Atoi(prx.GetPort()) + prx.Name = proxyHost + if conf.ProvNetCNI { + if conf.ClusterHead == "" { + prx.Host = prx.Host + "." + cluster.Name + ".svc." + conf.ProvOrchestratorCluster + } else { + prx.Host = prx.Host + "." + conf.ClusterHead + ".svc." + conf.ProvOrchestratorCluster + } + prx.Port = "3306" + } + prx.WritePort, _ = strconv.Atoi(prx.GetPort()) + + return prx +} + func (proxy *MariadbShardProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { flags.BoolVar(&conf.MdbsProxyOn, "shardproxy", false, "MariaDB Spider proxy") flags.StringVar(&conf.MdbsProxyHosts, "shardproxy-servers", "127.0.0.1:3307", "MariaDB spider proxy hosts IP:Port,IP:Port") diff --git a/cluster/prx_maxscale.go b/cluster/prx_maxscale.go index e423377dd..88432ace5 100644 --- a/cluster/prx_maxscale.go +++ b/cluster/prx_maxscale.go @@ -14,6 +14,7 @@ import ( "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/router/maxscale" + "github.com/signal18/replication-manager/utils/crypto" "github.com/signal18/replication-manager/utils/state" "github.com/spf13/pflag" ) @@ -26,6 +27,32 @@ func (cluster *Cluster) refreshMaxscale(proxy *MaxscaleProxy) error { return proxy.refresh() } +func NewMaxscaleProxy(placement int, cluster *Cluster, proxyHost string) *MaxscaleProxy { + conf := cluster.Conf + prx := new(MaxscaleProxy) + prx.Type = config.ConstProxyMaxscale + prx.SetPlacement(placement, conf.ProvProxAgents, conf.SlapOSMaxscalePartitions, conf.MxsHostsIPV6) + prx.Port = conf.MxsPort + prx.User = conf.MxsUser + prx.Pass = conf.MxsPass + if cluster.key != nil { + p := crypto.Password{Key: cluster.key} + p.CipherText = prx.Pass + p.Decrypt() + prx.Pass = p.PlainText + } + prx.ReadPort = conf.MxsReadPort + prx.WritePort = conf.MxsWritePort + prx.ReadWritePort = conf.MxsReadWritePort + prx.Name = proxyHost + prx.Host = proxyHost + if cluster.Conf.ProvNetCNI { + prx.Host = prx.Host + "." + cluster.Name + ".svc." + conf.ProvOrchestratorCluster + } + + return prx +} + func (proxy *MaxscaleProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { flags.BoolVar(&conf.MxsOn, "maxscale", false, "MaxScale proxy server is query for backend status") flags.BoolVar(&conf.CheckFalsePositiveMaxscale, "failover-falsepositive-maxscale", false, "Failover checks that maxscale detect failed master") diff --git a/cluster/prx_myproxy.go b/cluster/prx_myproxy.go index 66c02e017..cd5543f57 100644 --- a/cluster/prx_myproxy.go +++ b/cluster/prx_myproxy.go @@ -2,6 +2,7 @@ package cluster import ( "database/sql" + "strconv" _ "github.com/go-sql-driver/mysql" "github.com/signal18/replication-manager/config" @@ -13,6 +14,27 @@ type MyProxyProxy struct { Proxy } +// NewMyProxyProxy follows the same signature as the other Proxies for future compatibility, simply pass 0 and "" for the values not needed +func NewMyProxyProxy(placement int, cluster *Cluster, proxyHost string) *MyProxyProxy { + prx := new(MyProxyProxy) + prx.Type = config.ConstProxyMyProxy + prx.Port = strconv.Itoa(cluster.Conf.MyproxyPort) + prx.Host = "0.0.0.0" + prx.ReadPort = cluster.Conf.MyproxyPort + prx.WritePort = cluster.Conf.MyproxyPort + prx.ReadWritePort = cluster.Conf.MyproxyPort + prx.User = cluster.Conf.MyproxyUser + prx.Pass = cluster.Conf.MyproxyPassword + if prx.Host == "" { + prx.Host = "repman." + cluster.Name + ".svc." + cluster.Conf.ProvOrchestratorCluster + } + if prx.Name == "" { + prx.Name = prx.Host + } + + return prx +} + func (cluster *Cluster) initMyProxy(proxy *MyProxyProxy) { proxy.Init() } diff --git a/cluster/prx_proxysql.go b/cluster/prx_proxysql.go index dced998c8..ef2c97cbe 100644 --- a/cluster/prx_proxysql.go +++ b/cluster/prx_proxysql.go @@ -7,6 +7,7 @@ import ( "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/router/proxysql" + "github.com/signal18/replication-manager/utils/crypto" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" @@ -17,7 +18,8 @@ type ProxySQLProxy struct { Proxy } -func NewProxySQLProxy(clusterName string, proxyHost string, conf config.Config) *ProxySQLProxy { +func NewProxySQLProxy(placement int, cluster *Cluster, proxyHost string) *ProxySQLProxy { + conf := cluster.Conf prx := new(ProxySQLProxy) prx.Name = proxyHost prx.Host = proxyHost @@ -31,14 +33,23 @@ func NewProxySQLProxy(clusterName string, proxyHost string, conf config.Config) prx.WritePort, _ = strconv.Atoi(conf.ProxysqlPort) prx.ReadPort, _ = strconv.Atoi(conf.ProxysqlPort) + prx.SetPlacement(placement, conf.ProvProxAgents, conf.SlapOSProxySQLPartitions, conf.ProxysqlHostsIPV6) + if conf.ProvNetCNI { if conf.ClusterHead == "" { - prx.Host = prx.Host + "." + clusterName + ".svc." + conf.ProvOrchestratorCluster + prx.Host = prx.Host + "." + cluster.Name + ".svc." + conf.ProvOrchestratorCluster } else { prx.Host = prx.Host + "." + conf.ClusterHead + ".svc." + conf.ProvOrchestratorCluster } } + if cluster.key != nil { + p := crypto.Password{Key: cluster.key} + p.CipherText = prx.Pass + p.Decrypt() + prx.Pass = p.PlainText + } + return prx } diff --git a/cluster/prx_sphinx.go b/cluster/prx_sphinx.go index 558a070e9..e13602662 100644 --- a/cluster/prx_sphinx.go +++ b/cluster/prx_sphinx.go @@ -11,6 +11,7 @@ package cluster import ( "fmt" "runtime" + "strconv" "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/router/sphinx" @@ -22,6 +23,27 @@ type SphinxProxy struct { Proxy } +func NewSphinxProxy(placement int, cluster *Cluster, proxyHost string) *SphinxProxy { + conf := cluster.Conf + prx := new(SphinxProxy) + prx.SetPlacement(placement, conf.ProvProxAgents, conf.SlapOSSphinxPartitions, conf.SphinxHostsIPV6) + prx.Type = config.ConstProxySphinx + + prx.Port = conf.SphinxQLPort + prx.User = "" + prx.Pass = "" + prx.ReadPort, _ = strconv.Atoi(prx.GetPort()) + prx.WritePort, _ = strconv.Atoi(prx.GetPort()) + prx.ReadWritePort, _ = strconv.Atoi(prx.GetPort()) + prx.Name = proxyHost + prx.Host = proxyHost + if conf.ProvNetCNI { + prx.Host = prx.Host + "." + cluster.Name + ".svc." + conf.ProvOrchestratorCluster + } + + return prx +} + func (proxy *SphinxProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { flags.BoolVar(&conf.SphinxOn, "sphinx", false, "Turn on SphinxSearch detection") flags.StringVar(&conf.SphinxHosts, "sphinx-servers", "127.0.0.1", "SphinxSearch hosts") From d90b9cb5cdb14ad15a3f2fa315fed9808bfe6afe Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 23 Feb 2021 01:14:43 +0100 Subject: [PATCH 22/84] Small reductions, also checking for an error and doing continue is easier to read --- cluster/cluster.go | 2 +- cluster/prx.go | 4 ++-- cluster/prx_haproxy.go | 2 ++ cluster/prx_set.go | 5 +++-- cluster/spider.go | 36 +++++++++++++++++++----------------- 5 files changed, 27 insertions(+), 22 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index ec011d6ec..0096a4236 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -1295,7 +1295,7 @@ func (c *Cluster) AddProxy(prx DatabaseProxy) { prx.SetCluster(c) prx.SetID() prx.SetDataDir() - prx.SetServiceName(c.Name, prx.GetName()) + prx.SetServiceName(c.Name) c.LogPrintf(LvlInfo, "New proxy monitored %s: %s:%s", prx.GetType(), prx.GetHost(), prx.GetPort()) prx.SetState(stateSuspect) c.Proxies = append(c.Proxies, prx) diff --git a/cluster/prx.go b/cluster/prx.go index 6b5b094ed..0fab7e3b1 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -137,7 +137,7 @@ type DatabaseProxy interface { SetID() SetDataDir() - SetServiceName(string, string) + SetServiceName(namespace string) } type Backend struct { @@ -194,7 +194,7 @@ func (cluster *Cluster) newProxyList() error { for k, proxyHost := range strings.Split(cluster.Conf.MdbsProxyHosts, ",") { prx := NewMariadbShardProxy(k, cluster, proxyHost) cluster.AddProxy(prx) - cluster.LogPrintf(LvlDbg, "New MdbShardProxy proxy created: %s %s", prx.Host, prx.GetPort()) + cluster.LogPrintf(LvlDbg, "New MdbShardProxy proxy created: %s %s", prx.GetHost(), prx.GetPort()) } } if cluster.Conf.SphinxHosts != "" && cluster.Conf.SphinxOn { diff --git a/cluster/prx_haproxy.go b/cluster/prx_haproxy.go index d716f3b89..e30b56f3d 100644 --- a/cluster/prx_haproxy.go +++ b/cluster/prx_haproxy.go @@ -42,6 +42,8 @@ func NewHaproxyProxy(placement int, cluster *Cluster, proxyHost string) *Haproxy if conf.ProvNetCNI { prx.Host = prx.Host + "." + cluster.Name + ".svc." + conf.ProvOrchestratorCluster } + + return prx } func (proxy *HaproxyProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { diff --git a/cluster/prx_set.go b/cluster/prx_set.go index 29943000c..8279802e8 100644 --- a/cluster/prx_set.go +++ b/cluster/prx_set.go @@ -25,8 +25,9 @@ func (p *Proxy) SetID() { 10) } -func (proxy *Proxy) SetServiceName(namespace string, name string) { - proxy.ServiceName = namespace + "/svc/" + name +// TODO: clarify where this is used, can maybe be replaced with a Getter +func (proxy *Proxy) SetServiceName(namespace string) { + proxy.ServiceName = namespace + "/svc/" + proxy.Name } func (proxy *Proxy) SetPlacement(k int, ProvAgents string, SlapOSDBPartitions string, ProxysqlHostsIPV6 string) { diff --git a/cluster/spider.go b/cluster/spider.go index 3126aaec7..0c40cdb4c 100644 --- a/cluster/spider.go +++ b/cluster/spider.go @@ -22,29 +22,31 @@ func (cluster *Cluster) SpiderShardsDiscovery() { for _, s := range cluster.Servers { cluster.tlog.Add(fmt.Sprintf("INFO: Is Spider Monitor server %s ", s.URL)) mon, err := dbhelper.GetSpiderMonitor(s.Conn) - if err == nil { - if mon != "" { - cluster.tlog.Add(fmt.Sprintf("INFO: Retriving Spider Shards Server %s ", s.URL)) - extraUrl, err := dbhelper.GetSpiderShardUrl(s.Conn) - if err == nil { - if extraUrl != "" { - for j, url := range strings.Split(extraUrl, ",") { - var err error - srv, err := cluster.newServerMonitor(url, cluster.dbUser, cluster.dbPass, true, cluster.GetDomain()) - srv.State = stateShard - cluster.Servers = append(cluster.Servers, srv) - if err != nil { - log.Fatalf("ERROR: Could not open connection to Spider Shard server %s : %s", cluster.Servers[j].URL, err) - } - if cluster.Conf.Verbose { - cluster.tlog.Add(fmt.Sprintf("[%s] DEBUG: New server created: %v", cluster.Name, cluster.Servers[j].URL)) - } + if err != nil { + continue + } + if mon != "" { + cluster.tlog.Add(fmt.Sprintf("INFO: Retriving Spider Shards Server %s ", s.URL)) + extraUrl, err := dbhelper.GetSpiderShardUrl(s.Conn) + if err == nil { + if extraUrl != "" { + for j, url := range strings.Split(extraUrl, ",") { + var err error + srv, err := cluster.newServerMonitor(url, cluster.dbUser, cluster.dbPass, true, cluster.GetDomain()) + srv.State = stateShard + cluster.Servers = append(cluster.Servers, srv) + if err != nil { + log.Fatalf("ERROR: Could not open connection to Spider Shard server %s : %s", cluster.Servers[j].URL, err) + } + if cluster.Conf.Verbose { + cluster.tlog.Add(fmt.Sprintf("[%s] DEBUG: New server created: %v", cluster.Name, cluster.Servers[j].URL)) } } } } } } + } func (cluster *Cluster) SpiderSetShardsRepl() { From 00db46c762cc43545a20931d86502f9c3f1db2f4 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 23 Feb 2021 01:51:01 +0100 Subject: [PATCH 23/84] Simplify set/has/del of cookies for prx and prov --- cluster/prx_del.go | 41 +++++++++++++++++------------------------ cluster/prx_has.go | 28 ++++++++++------------------ cluster/prx_set.go | 46 +++++++++++++++++----------------------------- cluster/srv_del.go | 40 +++++++++++++++++----------------------- cluster/srv_has.go | 43 ++++++++++--------------------------------- cluster/srv_set.go | 45 +++++++++++++++++---------------------------- 6 files changed, 88 insertions(+), 155 deletions(-) diff --git a/cluster/prx_del.go b/cluster/prx_del.go index 4e40a7a59..9f2b7507d 100644 --- a/cluster/prx_del.go +++ b/cluster/prx_del.go @@ -9,41 +9,34 @@ package cluster import ( - "fmt" "os" ) -func (proxy *Proxy) DelProvisionCookie() { - err := os.Remove(proxy.Datadir + "/@cookie_prov") +func (proxy *Proxy) delCookie(key string) error { + err := os.Remove(proxy.Datadir + "/@/" + key) if err != nil { - fmt.Println("Error:", err) + proxy.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie (%s) %s", key, err) } + + return err } -func (proxy *Proxy) DelReprovisionCookie() { - err := os.Remove(proxy.Datadir + "/@cookie_reprov") - if err != nil { - proxy.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie %s", err) - } +func (proxy *Proxy) DelProvisionCookie() error { + return proxy.delCookie("cookie_prov") } -func (proxy *Proxy) DelRestartCookie() { - err := os.Remove(proxy.Datadir + "/@cookie_restart") - if err != nil { - proxy.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie %s", err) - } +func (proxy *Proxy) DelReprovisionCookie() error { + return proxy.delCookie("cookie_reprov") } -func (proxy *Proxy) DelWaitStartCookie() { - err := os.Remove(proxy.Datadir + "/@cookie_waitstart") - if err != nil { - proxy.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie %s", err) - } +func (proxy *Proxy) DelRestartCookie() error { + return proxy.delCookie("cookie_restart") } -func (proxy *Proxy) DelWaitStopCookie() { - err := os.Remove(proxy.Datadir + "/@cookie_waitstop") - if err != nil { - proxy.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie %s", err) - } +func (proxy *Proxy) DelWaitStartCookie() error { + return proxy.delCookie("cookie_waitstart") +} + +func (proxy *Proxy) DelWaitStopCookie() error { + return proxy.delCookie("cookie_waitstop") } diff --git a/cluster/prx_has.go b/cluster/prx_has.go index edafdd35f..7bb372e57 100644 --- a/cluster/prx_has.go +++ b/cluster/prx_has.go @@ -24,39 +24,31 @@ func (proxy *Proxy) IsFilterInTags(filter string) bool { return false } -func (proxy *Proxy) HasProvisionCookie() bool { - if _, err := os.Stat(proxy.Datadir + "/@cookie_prov"); os.IsNotExist(err) { +func (proxy *Proxy) hasCookie(key string) bool { + if _, err := os.Stat(proxy.Datadir + "/@" + key); os.IsNotExist(err) { return false } return true } +func (proxy *Proxy) HasProvisionCookie() bool { + return proxy.hasCookie("cookie_prov") +} + func (proxy *Proxy) HasWaitStartCookie() bool { - if _, err := os.Stat(proxy.Datadir + "/@cookie_waitstart"); os.IsNotExist(err) { - return false - } - return true + return proxy.hasCookie("cookie_waitstart") } func (proxy *Proxy) HasWaitStopCookie() bool { - if _, err := os.Stat(proxy.Datadir + "/@cookie_waitstop"); os.IsNotExist(err) { - return false - } - return true + return proxy.hasCookie("cookie_waitstop") } func (proxy *Proxy) HasRestartCookie() bool { - if _, err := os.Stat(proxy.Datadir + "/@cookie_restart"); os.IsNotExist(err) { - return false - } - return true + return proxy.hasCookie("cookie_restart") } func (proxy *Proxy) HasReprovCookie() bool { - if _, err := os.Stat(proxy.Datadir + "/@cookie_reprov"); os.IsNotExist(err) { - return false - } - return true + return proxy.hasCookie("cookie_reprov") } func (proxy *Proxy) IsRunning() bool { diff --git a/cluster/prx_set.go b/cluster/prx_set.go index 8279802e8..33bd16d10 100644 --- a/cluster/prx_set.go +++ b/cluster/prx_set.go @@ -9,7 +9,6 @@ package cluster import ( - "fmt" "hash/crc64" "os" "strconv" @@ -57,44 +56,33 @@ func (proxy *Proxy) SetDataDir() { } } -func (proxy *Proxy) SetProvisionCookie() { - newFile, err := os.Create(proxy.Datadir + "/@cookie_prov") +func (proxy *Proxy) createCookie(key string) error { + newFile, err := os.Create(proxy.Datadir + "/@" + key) + defer newFile.Close() if err != nil { - fmt.Println("Error:", err) + proxy.ClusterGroup.LogPrintf(LvlDbg, "Create cookie (%s) %s", key, err) } - newFile.Close() + return err } -func (proxy *Proxy) SetWaitStartCookie() { - newFile, err := os.Create(proxy.Datadir + "/@cookie_waitstart") - if err != nil { - fmt.Println("Error:", err) - } - newFile.Close() +func (proxy *Proxy) SetProvisionCookie() error { + return proxy.createCookie("cookie_prov") } -func (proxy *Proxy) SetWaitStopCookie() { - newFile, err := os.Create(proxy.Datadir + "/@cookie_waitstop") - if err != nil { - fmt.Println("Error:", err) - } - newFile.Close() +func (proxy *Proxy) SetWaitStartCookie() error { + return proxy.createCookie("cookie_waitstart") } -func (proxy *Proxy) SetRestartCookie() { - newFile, err := os.Create(proxy.Datadir + "/@cookie_restart") - if err != nil { - fmt.Println("Error:", err) - } - newFile.Close() +func (proxy *Proxy) SetWaitStopCookie() error { + return proxy.createCookie("cookie_waitstop") } -func (proxy *Proxy) SetReprovCookie() { - newFile, err := os.Create(proxy.Datadir + "/@cookie_reprov") - if err != nil { - fmt.Println("Error:", err) - } - newFile.Close() +func (proxy *Proxy) SetRestartCookie() error { + return proxy.createCookie("cookie_restart") +} + +func (proxy *Proxy) SetReprovCookie() error { + return proxy.createCookie("cookie_reprov") } func (p *Proxy) SetPrevState(state string) { diff --git a/cluster/srv_del.go b/cluster/srv_del.go index 5c962649a..d798755a4 100644 --- a/cluster/srv_del.go +++ b/cluster/srv_del.go @@ -13,37 +13,31 @@ import ( "os" ) -func (server *ServerMonitor) DelProvisionCookie() { - err := os.Remove(server.Datadir + "/@cookie_prov") +func (server *ServerMonitor) delCookie(key string) error { + err := os.Remove(server.Datadir + "/@/" + key) if err != nil { - server.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie %s", err) + server.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie (%s) %s", key, err) } + + return err } -func (server *ServerMonitor) DelWaitStartCookie() { - err := os.Remove(server.Datadir + "/@cookie_waitstart") - if err != nil { - server.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie %s", err) - } +func (server *ServerMonitor) DelProvisionCookie() error { + return server.delCookie("cookie_prov") } -func (server *ServerMonitor) DelWaitStopCookie() { - err := os.Remove(server.Datadir + "/@cookie_waitstop") - if err != nil { - server.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie %s", err) - } +func (server *ServerMonitor) DelWaitStartCookie() error { + return server.delCookie("cookie_waitstart") } -func (server *ServerMonitor) DelReprovisionCookie() { - err := os.Remove(server.Datadir + "/@cookie_reprov") - if err != nil { - server.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie %s", err) - } +func (server *ServerMonitor) DelWaitStopCookie() error { + return server.delCookie("cookie_waitstop") } -func (server *ServerMonitor) DelRestartCookie() { - err := os.Remove(server.Datadir + "/@cookie_restart") - if err != nil { - server.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie %s", err) - } +func (server *ServerMonitor) DelReprovisionCookie() error { + return server.delCookie("cookie_reprov") +} + +func (server *ServerMonitor) DelRestartCookie() error { + return server.delCookie("cookie_restart") } diff --git a/cluster/srv_has.go b/cluster/srv_has.go index 4f6c56708..1eccc572b 100644 --- a/cluster/srv_has.go +++ b/cluster/srv_has.go @@ -66,54 +66,31 @@ func (server *ServerMonitor) IsSlaveOfReplicationSource(name string) bool { return false } -func (server *ServerMonitor) HasProvisionCookie() bool { - if server == nil { - return false - } - if _, err := os.Stat(server.Datadir + "/@cookie_prov"); os.IsNotExist(err) { +func (server *ServerMonitor) hasCookie(key string) bool { + if _, err := os.Stat(server.Datadir + "/@" + key); os.IsNotExist(err) { return false } return true } +func (server *ServerMonitor) HasProvisionCookie() bool { + return server.hasCookie("cookie_prov") +} + func (server *ServerMonitor) HasWaitStartCookie() bool { - if server == nil { - return false - } - if _, err := os.Stat(server.Datadir + "/@cookie_waitstart"); os.IsNotExist(err) { - return false - } - return true + return server.hasCookie("cookie_waitstart") } func (server *ServerMonitor) HasWaitStopCookie() bool { - if server == nil { - return false - } - if _, err := os.Stat(server.Datadir + "/@cookie_waitstop"); os.IsNotExist(err) { - return false - } - return true + return server.hasCookie("cookie_waitstop") } func (server *ServerMonitor) HasRestartCookie() bool { - if server == nil { - return false - } - if _, err := os.Stat(server.Datadir + "/@cookie_restart"); os.IsNotExist(err) { - return false - } - return true + return server.hasCookie("cookie_restart") } func (server *ServerMonitor) HasReprovCookie() bool { - if server == nil { - return false - } - if _, err := os.Stat(server.Datadir + "/@cookie_reprov"); os.IsNotExist(err) { - return false - } - return true + return server.hasCookie("cookie_reprov") } func (server *ServerMonitor) HasReadOnly() bool { diff --git a/cluster/srv_set.go b/cluster/srv_set.go index 728a84300..cd628e61f 100644 --- a/cluster/srv_set.go +++ b/cluster/srv_set.go @@ -289,42 +289,31 @@ func (server *ServerMonitor) SetInnoDBMonitor() { dbhelper.SetInnoDBLockMonitor(server.Conn) } -func (server *ServerMonitor) SetProvisionCookie() { - newFile, err := os.Create(server.Datadir + "/@cookie_prov") +func (server *ServerMonitor) createCookie(key string) error { + newFile, err := os.Create(server.Datadir + "/@" + key) + defer newFile.Close() if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Can't save provision cookie %s", err) + server.ClusterGroup.LogPrintf(LvlDbg, "Create cookie (%s) %s", key, err) } - newFile.Close() + return err } -func (server *ServerMonitor) SetRestartCookie() { - newFile, err := os.Create(server.Datadir + "/@cookie_restart") - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Can't save restart cookie %s", err) - } - newFile.Close() +func (server *ServerMonitor) SetProvisionCookie() error { + return server.createCookie("cookie_prov") } -func (server *ServerMonitor) SetWaitStartCookie() { - newFile, err := os.Create(server.Datadir + "/@cookie_waitstart") - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Can't save wait start cookie %s", err) - } - newFile.Close() +func (server *ServerMonitor) SetRestartCookie() error { + return server.createCookie("cookie_restart") } -func (server *ServerMonitor) SetWaitStopCookie() { - newFile, err := os.Create(server.Datadir + "/@cookie_waitstop") - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Can't save wait start cookie %s", err) - } - newFile.Close() +func (server *ServerMonitor) SetWaitStartCookie() error { + return server.createCookie("cookie_waitstart") } -func (server *ServerMonitor) SetReprovCookie() { - newFile, err := os.Create(server.Datadir + "/@cookie_reprov") - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Can't save restart cookie %s", err) - } - newFile.Close() +func (server *ServerMonitor) SetWaitStopCookie() error { + return server.createCookie("cookie_waitstop") +} + +func (server *ServerMonitor) SetReprovCookie() error { + return server.createCookie("cookie_reprov") } From b6745c182a6f54f10126eaaec6ea2a1b09f80697 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 23 Feb 2021 02:14:44 +0100 Subject: [PATCH 24/84] More deduplication --- cluster/prx.go | 34 ++++++++++---------- cluster/srv_set.go | 80 +++++++++++++++++++--------------------------- server/http.go | 2 +- 3 files changed, 52 insertions(+), 64 deletions(-) diff --git a/cluster/prx.go b/cluster/prx.go index 0fab7e3b1..679cd02f2 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -72,17 +72,8 @@ type DatabaseProxy interface { Failover() SetMaintenance(server *ServerMonitor) GetType() string - DelRestartCookie() - DelReprovisionCookie() - SetProvisionCookie() - HasProvisionCookie() bool IsRunning() bool - SetRestartCookie() - HasRestartCookie() bool - SetReprovCookie() - HasReprovCookie() bool - SetCredential(credential string) GetFailCount() int @@ -110,13 +101,8 @@ type DatabaseProxy interface { SetMaintenanceHaproxy(server *ServerMonitor) IsFilterInTags(filter string) bool - HasWaitStartCookie() bool - HasWaitStopCookie() bool IsDown() bool - DelProvisionCookie() - DelWaitStartCookie() - DelWaitStopCookie() GetProxyConfig() string // GetInitContainer(collector opensvc.Collector) string GetBindAddress() string @@ -130,14 +116,30 @@ type DatabaseProxy interface { SendStats() error OpenSVCGetProxyDefaultSection() map[string]string - SetWaitStartCookie() - SetWaitStopCookie() SetSuspect() SetID() SetDataDir() SetServiceName(namespace string) + + SetProvisionCookie() error + SetReprovCookie() error + SetRestartCookie() error + SetWaitStartCookie() error + SetWaitStopCookie() error + + HasProvisionCookie() bool + HasReprovCookie() bool + HasRestartCookie() bool + HasWaitStartCookie() bool + HasWaitStopCookie() bool + + DelProvisionCookie() error + DelReprovisionCookie() error + DelRestartCookie() error + DelWaitStartCookie() error + DelWaitStopCookie() error } type Backend struct { diff --git a/cluster/srv_set.go b/cluster/srv_set.go index cd628e61f..cf4156609 100644 --- a/cluster/srv_set.go +++ b/cluster/srv_set.go @@ -189,73 +189,59 @@ func (server *ServerMonitor) SetCredential(url string, user string, pass string) func (server *ServerMonitor) SetReplicationGTIDSlavePosFromServer(master *ServerMonitor) (string, error) { server.StopSlave() - if server.IsMariaDB() { - return dbhelper.ChangeMaster(server.Conn, dbhelper.ChangeMasterOpt{ - Host: master.Host, - Port: master.Port, - User: master.ClusterGroup.rplUser, - Password: master.ClusterGroup.rplPass, - Retry: strconv.Itoa(master.ClusterGroup.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(master.ClusterGroup.Conf.ForceSlaveHeartbeatTime), - Mode: "SLAVE_POS", - SSL: server.ClusterGroup.Conf.ReplicationSSL, - Channel: server.ClusterGroup.Conf.MasterConn, - IsDelayed: server.IsDelayed, - Delay: strconv.Itoa(server.ClusterGroup.Conf.HostsDelayedTime), - PostgressDB: server.PostgressDB, - }, server.DBVersion) - } - return dbhelper.ChangeMaster(server.Conn, dbhelper.ChangeMasterOpt{ + + changeOpt := dbhelper.ChangeMasterOpt{ Host: master.Host, Port: master.Port, User: master.ClusterGroup.rplUser, Password: master.ClusterGroup.rplPass, Retry: strconv.Itoa(master.ClusterGroup.Conf.ForceSlaveHeartbeatRetry), Heartbeat: strconv.Itoa(master.ClusterGroup.Conf.ForceSlaveHeartbeatTime), - Mode: "MASTER_AUTO_POSITION", SSL: server.ClusterGroup.Conf.ReplicationSSL, Channel: server.ClusterGroup.Conf.MasterConn, IsDelayed: server.IsDelayed, Delay: strconv.Itoa(server.ClusterGroup.Conf.HostsDelayedTime), PostgressDB: server.PostgressDB, - }, server.DBVersion) + } + + if server.IsMariaDB() { + changeOpt.Mode = "SLAVE_POS" + return dbhelper.ChangeMaster(server.Conn, changeOpt, server.DBVersion) + } + changeOpt.Mode = "MASTER_AUTO_POSITION" + return dbhelper.ChangeMaster(server.Conn, changeOpt, server.DBVersion) } func (server *ServerMonitor) SetReplicationGTIDCurrentPosFromServer(master *ServerMonitor) (string, error) { var err error logs := "" + changeOpt := dbhelper.ChangeMasterOpt{ + SSL: server.ClusterGroup.Conf.ReplicationSSL, + Channel: server.ClusterGroup.Conf.MasterConn, + IsDelayed: server.IsDelayed, + Delay: strconv.Itoa(server.ClusterGroup.Conf.HostsDelayedTime), + PostgressDB: server.PostgressDB, + } if server.DBVersion.IsMySQLOrPerconaGreater57() { // We can do MySQL 5.7 style failover server.ClusterGroup.LogPrintf(LvlInfo, "Doing MySQL GTID switch of the old master") - logs, err = dbhelper.ChangeMaster(server.Conn, dbhelper.ChangeMasterOpt{ - Host: server.ClusterGroup.master.Host, - Port: server.ClusterGroup.master.Port, - User: server.ClusterGroup.rplUser, - Password: server.ClusterGroup.rplPass, - Retry: strconv.Itoa(server.ClusterGroup.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(server.ClusterGroup.Conf.ForceSlaveHeartbeatTime), - Mode: "MASTER_AUTO_POSITION", - SSL: server.ClusterGroup.Conf.ReplicationSSL, - Channel: server.ClusterGroup.Conf.MasterConn, - IsDelayed: server.IsDelayed, - Delay: strconv.Itoa(server.ClusterGroup.Conf.HostsDelayedTime), - PostgressDB: server.PostgressDB, - }, server.DBVersion) + changeOpt.Host = server.ClusterGroup.master.Host + changeOpt.Port = server.ClusterGroup.master.Port + changeOpt.User = server.ClusterGroup.rplUser + changeOpt.Password = server.ClusterGroup.rplPass + changeOpt.Retry = strconv.Itoa(server.ClusterGroup.Conf.ForceSlaveHeartbeatRetry) + changeOpt.Heartbeat = strconv.Itoa(server.ClusterGroup.Conf.ForceSlaveHeartbeatTime) + changeOpt.Mode = "MASTER_AUTO_POSITION" + logs, err = dbhelper.ChangeMaster(server.Conn, changeOpt, server.DBVersion) } else { - logs, err = dbhelper.ChangeMaster(server.Conn, dbhelper.ChangeMasterOpt{ - Host: master.Host, - Port: master.Port, - User: master.ClusterGroup.rplUser, - Password: master.ClusterGroup.rplPass, - Retry: strconv.Itoa(master.ClusterGroup.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(master.ClusterGroup.Conf.ForceSlaveHeartbeatTime), - Mode: "CURRENT_POS", - SSL: server.ClusterGroup.Conf.ReplicationSSL, - Channel: server.ClusterGroup.Conf.MasterConn, - IsDelayed: server.IsDelayed, - Delay: strconv.Itoa(server.ClusterGroup.Conf.HostsDelayedTime), - PostgressDB: server.PostgressDB, - }, server.DBVersion) + changeOpt.Host = master.Host + changeOpt.Port = master.Port + changeOpt.User = master.ClusterGroup.rplUser + changeOpt.Password = master.ClusterGroup.rplPass + changeOpt.Retry = strconv.Itoa(master.ClusterGroup.Conf.ForceSlaveHeartbeatRetry) + changeOpt.Heartbeat = strconv.Itoa(master.ClusterGroup.Conf.ForceSlaveHeartbeatTime) + changeOpt.Mode = "CURRENT_POS" + logs, err = dbhelper.ChangeMaster(server.Conn, changeOpt, server.DBVersion) } return logs, err } diff --git a/server/http.go b/server/http.go index 008d7bb0b..3ff4f9ee4 100644 --- a/server/http.go +++ b/server/http.go @@ -44,7 +44,7 @@ func (repman *ReplicationManager) httpserver() { // before starting the http server, check that the dashboard is present if err := repman.testFile("app.html"); err != nil { - log.Printf("ERROR", "Dashboard app.html file missing - will not start http server %s\n", err) + log.Printf("ERROR: Dashboard app.html file missing - will not start http server %s\n", err) return } From c95bcd1da43f9957e58e22547c809e1c2dc65ed5 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 23 Feb 2021 04:06:25 +0100 Subject: [PATCH 25/84] Single commit to reduce the complexity of the cluster.checkFailed as it is critical for a good review --- cluster/cluster_chk.go | 55 +++++++++++++++++------------------------- 1 file changed, 22 insertions(+), 33 deletions(-) diff --git a/cluster/cluster_chk.go b/cluster/cluster_chk.go index 8781c368b..33ce78e00 100644 --- a/cluster/cluster_chk.go +++ b/cluster/cluster_chk.go @@ -28,42 +28,31 @@ func (cluster *Cluster) CheckFailed() { cluster.sme.AddState("ERR00001", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["ERR00001"]), ErrFrom: "CHECK"}) return } - if cluster.master != nil { - if cluster.isFoundCandidateMaster() { - if cluster.isBetweenFailoverTimeValid() { - if cluster.IsNotHavingMySQLErrantTransaction() { - if cluster.IsSameWsrepUUID() { - if cluster.isMaxMasterFailedCountReached() { - if cluster.isActiveArbitration() { - if cluster.isMaxClusterFailoverCountNotReached() { - if cluster.isAutomaticFailover() { - if cluster.isMasterFailed() { - if cluster.isNotFirstSlave() { - if cluster.isArbitratorAlive() { - - // False Positive - if cluster.isExternalOk() == false { - if cluster.isOneSlaveHeartbeatIncreasing() == false { - if cluster.isMaxscaleSupectRunning() == false { - cluster.MasterFailover(true) - cluster.failoverCond.Send <- true - } - } - } - } - - } - } - } - } - } - } - } + if cluster.master == nil { + cluster.LogPrintf(LvlDbg, "Master not discovered, skipping failover check") + } + + if cluster.isFoundCandidateMaster() && + cluster.isBetweenFailoverTimeValid() && + cluster.IsNotHavingMySQLErrantTransaction() && + cluster.IsSameWsrepUUID() && + cluster.isMaxMasterFailedCountReached() && + cluster.isActiveArbitration() && + cluster.isMaxClusterFailoverCountNotReached() && + cluster.isAutomaticFailover() && + cluster.isMasterFailed() && + cluster.isNotFirstSlave() && + cluster.isArbitratorAlive() { + + // False Positive + if cluster.isExternalOk() == false { + if cluster.isOneSlaveHeartbeatIncreasing() == false { + if cluster.isMaxscaleSupectRunning() == false { + cluster.MasterFailover(true) + cluster.failoverCond.Send <- true } } } - } else { - cluster.LogPrintf(LvlDbg, "Master not discovered, skipping failover check") } } From cf59f33c5b152b06f7f779c26e9b954e8a27236b Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 26 Feb 2021 17:07:25 +0100 Subject: [PATCH 26/84] Inter clusters replication source failover --- cluster/cluster.go | 2 + cluster/cluster_chk.go | 4 +- cluster/cluster_fail.go | 270 ++++++++++++------------------------- cluster/cluster_get.go | 17 ++- cluster/cluster_topo.go | 40 ++++-- cluster/prov_opensvc_db.go | 2 +- cluster/prx_get.go | 4 + cluster/prx_haproxy.go | 30 +++-- cluster/srv.go | 16 ++- cluster/srv_get.go | 4 + cluster/srv_has.go | 3 + cluster/srv_set.go | 4 + config/config.go | 2 +- main.go | 2 +- utils/gtid/gtid.go | 11 ++ 15 files changed, 193 insertions(+), 218 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index bcca7accd..bdaf31d9c 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -448,6 +448,8 @@ func (cluster *Cluster) Run() { } wg.Wait() + // AddChildServers can't be done before TopologyDiscover but need a refresh aquiring more fresh gtid vs current cluster so elelection win but server is ignored see electFailoverCandidate + cluster.AddChildServers() cluster.IsFailable = cluster.GetStatus() // CheckFailed trigger failover code if passing all false positiv and constraints diff --git a/cluster/cluster_chk.go b/cluster/cluster_chk.go index 8781c368b..c97e958e7 100644 --- a/cluster/cluster_chk.go +++ b/cluster/cluster_chk.go @@ -80,13 +80,13 @@ func (cluster *Cluster) isSlaveElectableForSwitchover(sl *ServerMonitor, forcing } return false } - if hasBinLogs == false && cluster.Conf.CheckBinFilter == true { + if hasBinLogs == false && cluster.Conf.CheckBinFilter == true && (sl.GetSourceClusterName() == cluster.Name || sl.GetSourceClusterName() == "") { if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Binlog filters differ on master and slave %s. Skipping", sl.URL) } return false } - if cluster.IsEqualReplicationFilters(cluster.master, sl) == false && cluster.Conf.CheckReplFilter == true { + if cluster.IsEqualReplicationFilters(cluster.master, sl) == false && (sl.GetSourceClusterName() == cluster.Name || sl.GetSourceClusterName() == "") && cluster.Conf.CheckReplFilter == true { if cluster.Conf.LogLevel > 1 || forcingLog { cluster.LogPrintf(LvlWarn, "Replication filters differ on master and slave %s. Skipping", sl.URL) } diff --git a/cluster/cluster_fail.go b/cluster/cluster_fail.go index 5a97c03a5..8290413a4 100644 --- a/cluster/cluster_fail.go +++ b/cluster/cluster_fail.go @@ -174,7 +174,7 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { cluster.LogPrintf(LvlInfo, "Waiting for candidate master %s to apply relay log", cluster.master.URL) err = cluster.master.ReadAllRelayLogs() if err != nil { - cluster.LogPrintf(LvlErr, "Error while reading relay logs on candidate %s: %s", cluster.master, err) + cluster.LogPrintf(LvlErr, "Error while reading relay logs on candidate %s: %s", cluster.master.URL, err) } cluster.LogPrintf(LvlDbg, "Save replication status before opening traffic") ms, err := cluster.master.GetSlaveStatus(cluster.master.ReplicationSourceName) @@ -315,14 +315,14 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { // ******** cluster.LogPrintf(LvlInfo, "Killing new connections on old master showing before update route") dbhelper.KillThreads(cluster.oldMaster.Conn, cluster.oldMaster.DBVersion) - cluster.LogPrintf(LvlInfo, "Switching old master as a slave") + cluster.LogPrintf(LvlInfo, "Switching old leader to slave") logs, err := dbhelper.UnlockTables(cluster.oldMaster.Conn) cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Could not unlock tables on old master %s", err) // Moved in freeze //cluster.oldMaster.StopSlave() // This is helpful in some cases the old master can have an old replication running one_shoot_slave_pos := false - if cluster.oldMaster.DBVersion.IsMariaDB() && cluster.oldMaster.HaveMariaDBGTID == false && cluster.oldMaster.DBVersion.Major >= 10 { + if cluster.oldMaster.DBVersion.IsMariaDB() && cluster.oldMaster.HaveMariaDBGTID == false && cluster.oldMaster.DBVersion.Major >= 10 && cluster.Conf.SwitchoverCopyOldLeaderGtid { logs, err := dbhelper.SetGTIDSlavePos(cluster.oldMaster.Conn, cluster.master.GTIDBinlogPos.Sprint()) cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Could not set old master gtid_slave_pos , reason: %s", err) one_shoot_slave_pos = true @@ -330,128 +330,61 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Could not check old master GTID status: %s", err) var changeMasterErr error - // Do positional switch if we are not MariaDB and no using GTID + var changemasteropt dbhelper.ChangeMasterOpt + changemasteropt.Host = cluster.master.Host + changemasteropt.Port = cluster.master.Port + changemasteropt.User = cluster.rplUser + changemasteropt.Password = cluster.rplPass + changemasteropt.Logfile = cluster.master.BinaryLogFile + changemasteropt.Logpos = cluster.master.BinaryLogPos + changemasteropt.Retry = strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry) + changemasteropt.Heartbeat = strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime) + changemasteropt.SSL = cluster.Conf.ReplicationSSL + changemasteropt.Channel = cluster.Conf.MasterConn + changemasteropt.IsDelayed = cluster.oldMaster.IsDelayed + changemasteropt.Delay = strconv.Itoa(cluster.oldMaster.ClusterGroup.Conf.HostsDelayedTime) + changemasteropt.PostgressDB = cluster.master.PostgressDB + oldmasterneedslavestart := true if cluster.oldMaster.HasMariaDBGTID() == false && cluster.oldMaster.HasMySQLGTID() == false { + changemasteropt.Mode = "POSITIONAL" cluster.LogPrintf(LvlInfo, "Doing positional switch of old Master") - logs, changeMasterErr = dbhelper.ChangeMaster(cluster.oldMaster.Conn, dbhelper.ChangeMasterOpt{ - Host: cluster.master.Host, - Port: cluster.master.Port, - User: cluster.rplUser, - Password: cluster.rplPass, - Logfile: cluster.master.BinaryLogFile, - Logpos: cluster.master.BinaryLogPos, - Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime), - Mode: "POSITIONAL", - SSL: cluster.Conf.ReplicationSSL, - Channel: cluster.Conf.MasterConn, - IsDelayed: cluster.oldMaster.IsDelayed, - Delay: strconv.Itoa(cluster.oldMaster.ClusterGroup.Conf.HostsDelayedTime), - PostgressDB: cluster.master.PostgressDB, - }, cluster.oldMaster.DBVersion) - cluster.LogSQL(logs, changeMasterErr, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Change master failed on old master, reason:%s ", changeMasterErr) - - logs, err = cluster.oldMaster.StartSlave() - cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Start slave failed on old master,%s reason: %s ", cluster.oldMaster.URL, err) - } else if cluster.oldMaster.HasMySQLGTID() == true { // We can do MySQL 5.7 style failover cluster.LogPrintf(LvlInfo, "Doing MySQL GTID switch of the old master") - logs, changeMasterErr = dbhelper.ChangeMaster(cluster.oldMaster.Conn, dbhelper.ChangeMasterOpt{ - Host: cluster.master.Host, - Port: cluster.master.Port, - User: cluster.rplUser, - Password: cluster.rplPass, - Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime), - Mode: "MASTER_AUTO_POSITION", - SSL: cluster.Conf.ReplicationSSL, - Channel: cluster.Conf.MasterConn, - IsDelayed: cluster.oldMaster.IsDelayed, - Delay: strconv.Itoa(cluster.oldMaster.ClusterGroup.Conf.HostsDelayedTime), - PostgressDB: cluster.master.PostgressDB, - }, cluster.oldMaster.DBVersion) - cluster.LogSQL(logs, changeMasterErr, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Change master failed on old master %s", logs) - logs, err = cluster.oldMaster.StartSlave() - cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Start slave failed on old master,%s reason: %s ", cluster.oldMaster.URL, err) + changemasteropt.Mode = "MASTER_AUTO_POSITION" } else if cluster.Conf.MxsBinlogOn == false { - cluster.LogPrintf(LvlInfo, "Doing MariaDB GTID switch of the old master") // current pos is needed on old master as writes diverges from slave pos // if gtid_slave_pos was forced use slave_pos : positional to GTID promotion + cluster.LogPrintf(LvlInfo, "Doing MariaDB GTID switch of the old master") if one_shoot_slave_pos { - logs, changeMasterErr = dbhelper.ChangeMaster(cluster.oldMaster.Conn, dbhelper.ChangeMasterOpt{ - Host: cluster.master.Host, - Port: cluster.master.Port, - User: cluster.rplUser, - Password: cluster.rplPass, - Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime), - Mode: "SLAVE_POS", - SSL: cluster.Conf.ReplicationSSL, - Channel: cluster.Conf.MasterConn, - IsDelayed: cluster.oldMaster.IsDelayed, - Delay: strconv.Itoa(cluster.oldMaster.ClusterGroup.Conf.HostsDelayedTime), - PostgressDB: cluster.master.PostgressDB, - }, cluster.oldMaster.DBVersion) + changemasteropt.Mode = "SLAVE_POS" } else { - logs, changeMasterErr = dbhelper.ChangeMaster(cluster.oldMaster.Conn, dbhelper.ChangeMasterOpt{ - Host: cluster.master.Host, - Port: cluster.master.Port, - User: cluster.rplUser, - Password: cluster.rplPass, - Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime), - Mode: "CURRENT_POS", - SSL: cluster.Conf.ReplicationSSL, - Channel: cluster.Conf.MasterConn, - IsDelayed: cluster.oldMaster.IsDelayed, - Delay: strconv.Itoa(cluster.oldMaster.ClusterGroup.Conf.HostsDelayedTime), - PostgressDB: cluster.master.PostgressDB, - }, cluster.oldMaster.DBVersion) + changemasteropt.Mode = "CURRENT_POS" } - cluster.LogSQL(logs, changeMasterErr, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Change master failed on old master %s", changeMasterErr) - logs, err = cluster.oldMaster.StartSlave() - cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Start slave failed on old master,%s reason: %s ", cluster.oldMaster.URL, err) } else { - // Is Maxscale // Don't start slave until the relay as been point to new master + oldmasterneedslavestart = false cluster.LogPrintf(LvlInfo, "Pointing old master to relay server") if relaymaster.MxsHaveGtid { - logs, err = dbhelper.ChangeMaster(cluster.oldMaster.Conn, dbhelper.ChangeMasterOpt{ - Host: relaymaster.Host, - Port: relaymaster.Port, - User: cluster.rplUser, - Password: cluster.rplPass, - Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime), - Mode: "SLAVE_POS", - SSL: cluster.Conf.ReplicationSSL, - Channel: cluster.Conf.MasterConn, - IsDelayed: cluster.oldMaster.IsDelayed, - Delay: strconv.Itoa(cluster.oldMaster.ClusterGroup.Conf.HostsDelayedTime), - PostgressDB: relaymaster.PostgressDB, - }, cluster.oldMaster.DBVersion) + changemasteropt.Mode = "SLAVE_POS" + changemasteropt.Host = relaymaster.Host + changemasteropt.Port = relaymaster.Port } else { - logs, err = dbhelper.ChangeMaster(cluster.oldMaster.Conn, dbhelper.ChangeMasterOpt{ - Host: relaymaster.Host, - Port: relaymaster.Port, - User: cluster.rplUser, - Password: cluster.rplPass, - Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime), - Mode: "POSITIONAL", - Logfile: crash.FailoverMasterLogFile, - Logpos: crash.FailoverMasterLogPos, - SSL: cluster.Conf.ReplicationSSL, - Channel: cluster.Conf.MasterConn, - IsDelayed: cluster.oldMaster.IsDelayed, - Delay: strconv.Itoa(cluster.oldMaster.ClusterGroup.Conf.HostsDelayedTime), - PostgressDB: relaymaster.PostgressDB, - }, cluster.oldMaster.DBVersion) + changemasteropt.Mode = "POSITIONAL" + changemasteropt.Host = relaymaster.Host + changemasteropt.Port = relaymaster.Port + changemasteropt.Logfile = crash.FailoverMasterLogFile + changemasteropt.Logpos = crash.FailoverMasterLogPos } } - cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Change master failed on old master %s", err) + logs, changeMasterErr = dbhelper.ChangeMaster(cluster.oldMaster.Conn, changemasteropt, cluster.oldMaster.DBVersion) + cluster.LogSQL(logs, changeMasterErr, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Change master failed on old master, reason:%s ", changeMasterErr) + if oldmasterneedslavestart { + logs, err = cluster.oldMaster.StartSlave() + cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Start slave failed on old master,%s reason: %s ", cluster.oldMaster.URL, err) + } + if cluster.Conf.ReadOnly { logs, err = dbhelper.SetReadOnly(cluster.oldMaster.Conn, true) cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Could not set old master as read-only, %s", err) @@ -497,7 +430,7 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { logs, err = sl.StopSlave() cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Could not stop slave on server %s, %s", sl.URL, err) if fail == false && cluster.Conf.MxsBinlogOn == false && cluster.Conf.SwitchSlaveWaitCatch { - if cluster.Conf.FailForceGtid && sl.DBVersion.IsMariaDB() { + if cluster.Conf.SwitchoverCopyOldLeaderGtid && sl.DBVersion.IsMariaDB() { logs, err := dbhelper.SetGTIDSlavePos(sl.Conn, cluster.oldMaster.GTIDBinlogPos.Sprint()) cluster.LogSQL(logs, err, sl.URL, "MasterFailover", LvlErr, "Could not set gtid_slave_pos on slave %s, %s", sl.URL, err) } @@ -505,6 +438,19 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { var changeMasterErr error + var changemasteropt dbhelper.ChangeMasterOpt + changemasteropt.Host = cluster.master.Host + changemasteropt.Port = cluster.master.Port + changemasteropt.User = cluster.rplUser + changemasteropt.Password = cluster.rplPass + changemasteropt.Retry = strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry) + changemasteropt.Heartbeat = strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime) + changemasteropt.SSL = cluster.Conf.ReplicationSSL + changemasteropt.Channel = cluster.Conf.MasterConn + changemasteropt.IsDelayed = sl.IsDelayed + changemasteropt.Delay = strconv.Itoa(sl.ClusterGroup.Conf.HostsDelayedTime) + changemasteropt.PostgressDB = cluster.master.PostgressDB + // Not MariaDB and not using MySQL GTID, 2.0 stop doing any thing until pseudo GTID if sl.HasMariaDBGTID() == false && cluster.master.HasMySQLGTID() == false { @@ -528,22 +474,10 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { cluster.LogPrintf(LvlInfo, "Found skip coordinate on master %s, %s", mFile, mPos) cluster.LogPrintf(LvlInfo, "Doing Positional switch of slave %s", sl.URL) - logs, changeMasterErr = dbhelper.ChangeMaster(sl.Conn, dbhelper.ChangeMasterOpt{ - Host: cluster.master.Host, - Port: cluster.master.Port, - User: cluster.rplUser, - Password: cluster.rplPass, - Logfile: mFile, - Logpos: mPos, - Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime), - Mode: "POSITIONAL", - SSL: cluster.Conf.ReplicationSSL, - Channel: cluster.Conf.MasterConn, - IsDelayed: sl.IsDelayed, - Delay: strconv.Itoa(sl.ClusterGroup.Conf.HostsDelayedTime), - PostgressDB: cluster.master.PostgressDB, - }, sl.DBVersion) + changemasteropt.Logfile = mFile + changemasteropt.Logpos = mPos + changemasteropt.Mode = "POSITIONAL" + logs, changeMasterErr = dbhelper.ChangeMaster(sl.Conn, changemasteropt, sl.DBVersion) } else { sl.SetMaintenance() } @@ -662,76 +596,47 @@ func (cluster *Cluster) FailoverExtraMultiSource(oldMaster *ServerMonitor, NewMa myparentrplpassword := "" parentCluster := cluster.GetParentClusterFromReplicationSource(rep) cluster.LogPrintf(LvlInfo, "Failover replication source %s ", rep.ConnectionName.String) + // need a way to found parent replication password if parentCluster != nil { myparentrplpassword = parentCluster.rplPass } else { cluster.LogPrintf(LvlErr, "Unable to found a monitored cluster for replication source %s ", rep.ConnectionName.String) cluster.LogPrintf(LvlErr, "Moving source %s with empty password to preserve replication stream on new master", rep.ConnectionName.String) } - // need a way to found parent replication password - if strings.ToUpper(rep.UsingGtid.String) == "NO" { - logs, err := dbhelper.ChangeMaster(NewMaster.Conn, dbhelper.ChangeMasterOpt{ - Host: rep.MasterHost.String, - Port: rep.MasterPort.String, - User: rep.MasterUser.String, - Password: myparentrplpassword, - Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime), - Mode: "POSITIONAL", - Logfile: rep.MasterLogFile.String, - Logpos: rep.ExecMasterLogPos.String, - SSL: cluster.Conf.ReplicationSSL, - Channel: rep.ConnectionName.String, - IsDelayed: false, - Delay: "0", - PostgressDB: NewMaster.PostgressDB, - }, NewMaster.DBVersion) - cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Change master failed on slave %s, %s", NewMaster.URL, err) + var changemasteropt dbhelper.ChangeMasterOpt + changemasteropt.Host = rep.MasterHost.String + changemasteropt.Port = rep.MasterPort.String + changemasteropt.User = rep.MasterUser.String + changemasteropt.Password = myparentrplpassword + changemasteropt.Retry = strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry) + changemasteropt.Heartbeat = strconv.Itoa(int(rep.SlaveHeartbeatPeriod)) + changemasteropt.Logfile = rep.MasterLogFile.String + changemasteropt.Logpos = rep.ExecMasterLogPos.String + changemasteropt.SSL = cluster.Conf.ReplicationSSL + changemasteropt.Channel = rep.ConnectionName.String + changemasteropt.IsDelayed = false + changemasteropt.Delay = "0" + changemasteropt.PostgressDB = NewMaster.PostgressDB + if strings.ToUpper(rep.UsingGtid.String) == "NO" { + changemasteropt.Mode = "POSITIONAL" } else { if strings.ToUpper(rep.UsingGtid.String) == "SLAVE_POS" || strings.ToUpper(rep.UsingGtid.String) == "CURRENT_POS" { - // We herite last GTID state from the old leader failed or not - logs, err := dbhelper.ChangeMaster(NewMaster.Conn, dbhelper.ChangeMasterOpt{ - Host: rep.MasterHost.String, - Port: rep.MasterPort.String, - User: rep.MasterUser.String, - Password: myparentrplpassword, - Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(int(rep.SlaveHeartbeatPeriod)), - Mode: strings.ToUpper(rep.UsingGtid.String), - SSL: cluster.Conf.ReplicationSSL, - Channel: rep.ConnectionName.String, - IsDelayed: false, - Delay: "0", - PostgressDB: NewMaster.PostgressDB, - }, NewMaster.DBVersion) - cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Change master failed on slave %s, %s", NewMaster.URL, err) + changemasteropt.Mode = strings.ToUpper(rep.UsingGtid.String) } else if rep.RetrievedGtidSet.Valid && rep.ExecutedGtidSet.String != "" { - logs, err := dbhelper.ChangeMaster(NewMaster.Conn, dbhelper.ChangeMasterOpt{ - Host: rep.MasterHost.String, - Port: rep.MasterPort.String, - User: rep.MasterUser.String, - Password: myparentrplpassword, - Retry: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatRetry), - Heartbeat: strconv.Itoa(cluster.Conf.ForceSlaveHeartbeatTime), - Mode: "MASTER_AUTO_POSITION", - SSL: cluster.Conf.ReplicationSSL, - Channel: rep.ConnectionName.String, - IsDelayed: false, - Delay: "0", - PostgressDB: NewMaster.PostgressDB, - }, NewMaster.DBVersion) - cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Change master failed on slave %s, %s", NewMaster.URL, err) - + changemasteropt.Mode = "MASTER_AUTO_POSITION" } } - logs, err := dbhelper.StartSlave(NewMaster.Conn, rep.ConnectionName.String, NewMaster.DBVersion) - cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Start replication source %s failed on %s, %s", rep.ConnectionName.String, NewMaster.URL, err) - if fail == false { + logs, err := dbhelper.ChangeMaster(NewMaster.Conn, changemasteropt, NewMaster.DBVersion) + cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Change master failed on slave %s, %s", NewMaster.URL, err) + if fail == false && err == nil { logs, err := dbhelper.ResetSlave(oldMaster.Conn, true, rep.ConnectionName.String, oldMaster.DBVersion) - cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Reset replication source %s failed on %s, %s", rep.ConnectionName.String, oldMaster.URL, err) + cluster.LogSQL(logs, err, oldMaster.URL, "MasterFailover", LvlErr, "Reset replication source %s failed on %s, %s", rep.ConnectionName.String, oldMaster.URL, err) } + logs, err = dbhelper.StartSlave(NewMaster.Conn, rep.ConnectionName.String, NewMaster.DBVersion) + cluster.LogSQL(logs, err, NewMaster.URL, "MasterFailover", LvlErr, "Start replication source %s failed on %s, %s", rep.ConnectionName.String, NewMaster.URL, err) + } } return nil @@ -824,9 +729,10 @@ func (cluster *Cluster) electSwitchoverCandidate(l []*ServerMonitor, forcingLog if errss == nil { if cluster.master.State != stateFailed { - seqnos = sl.SlaveGtid.GetSeqNos() + // seqnos = sl.SlaveGtid.GetSeqNos() + seqnos = sl.SlaveGtid.GetSeqDomainIdNos(cluster.master.DomainID) } else { - seqnos = gtid.NewList(ss.GtidIOPos.String).GetSeqNos() + seqnos = gtid.NewList(ss.GtidIOPos.String).GetSeqDomainIdNos(cluster.master.DomainID) } } @@ -947,9 +853,9 @@ func (cluster *Cluster) electFailoverCandidate(l []*ServerMonitor, forcingLog bo if errss == nil { if cluster.master.State != stateFailed { // Need MySQL GTID support - seqnos = sl.SlaveGtid.GetSeqNos() + seqnos = sl.SlaveGtid.GetSeqDomainIdNos(cluster.master.DomainID) } else { - seqnos = gtid.NewList(ss.GtidIOPos.String).GetSeqNos() + seqnos = gtid.NewList(ss.GtidIOPos.String).GetSeqDomainIdNos(cluster.master.DomainID) } } diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index e66b185d1..60deae8f5 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -9,6 +9,7 @@ package cluster import ( "encoding/json" "errors" + "hash/crc32" "io/ioutil" "net/http" "sort" @@ -257,7 +258,7 @@ func (cluster *Cluster) getPreferedMaster() *ServerMonitor { } for _, server := range cluster.Servers { if cluster.Conf.LogLevel > 2 { - cluster.LogPrintf(LvlDbg, "Lookup server %s if preferred master: %s", server.URL, cluster.Conf.PrefMaster) + cluster.LogPrintf(LvlDbg, "Lookup if server: %s is preferred master: %s", server.URL, cluster.Conf.PrefMaster) } if server.URL == cluster.Conf.PrefMaster { return server @@ -272,7 +273,7 @@ func (cluster *Cluster) GetRelayServer() *ServerMonitor { } for _, server := range cluster.Servers { if cluster.Conf.LogLevel > 2 { - cluster.LogPrintf(LvlDbg, "Lookup server %s if maxscale binlog server: %s", server.URL, cluster.Conf.PrefMaster) + cluster.LogPrintf(LvlDbg, "Check for relay server %s: relay: %t", server.URL, server.IsRelay) } if server.IsRelay { return server @@ -537,6 +538,7 @@ func (cluster *Cluster) GetChildClusters() map[string]*Cluster { for _, c := range cluster.clusterList { // cluster.LogPrintf(LvlErr, "GetChildClusters %s %s ", cluster.Name, c.Conf.ClusterHead) if cluster.Name == c.Conf.ClusterHead { + cluster.LogPrintf(LvlDbg, "Discovering of a child cluster via ClusterHead %s replication source %s", c.Name, c.Conf.ClusterHead) clusters[c.Name] = c } // lopp over master multi source replication @@ -545,7 +547,7 @@ func (cluster *Cluster) GetChildClusters() map[string]*Cluster { for _, rep := range condidateclustermaster.Replications { // is a source name has my cluster name or is any child cluster master point to my master if rep.ConnectionName.String == cluster.Name || (cluster.GetMaster() != nil && cluster.master.Host == rep.MasterHost.String && cluster.master.Port == rep.MasterPort.String) { - cluster.LogPrintf(LvlDbg, "Discovering of a child cluster %s replication source %s", c.Name, rep.ConnectionName.String) + cluster.LogPrintf(LvlDbg, "Discovering of a child cluster via multi source %s replication source %s", c.Name, rep.ConnectionName.String) clusters[c.Name] = c } } @@ -692,6 +694,15 @@ func (cluster *Cluster) GetConfigRelaySpaceLimit() string { return strconv.Itoa(10 * 1024 * 1024) } +func (cluster *Cluster) GetConfigReplicationDomain() string { + // Multi source need differnt domain id + if cluster.Conf.MasterConn != "" && cluster.Conf.ProvDomain == "0" { + crcTable := crc32.MakeTable(0xD5828281) + return strconv.FormatUint(uint64(crc32.Checksum([]byte(cluster.Name), crcTable)), 10) + } + return cluster.Conf.ProvDomain +} + // GetConfigInnoDBBPSize configure 80% of the ConfigMemory in Megabyte func (cluster *Cluster) GetConfigInnoDBBPSize() string { containermem, err := strconv.ParseInt(cluster.Conf.ProvMem, 10, 64) diff --git a/cluster/cluster_topo.go b/cluster/cluster_topo.go index 80b170b5f..208cc80e2 100644 --- a/cluster/cluster_topo.go +++ b/cluster/cluster_topo.go @@ -70,34 +70,47 @@ func (cluster *Cluster) newServerList() error { // AddChildServers Add child clusters nodes if they get same source name func (cluster *Cluster) AddChildServers() error { + mychilds := cluster.GetChildClusters() + for _, c := range mychilds { for _, sv := range c.Servers { - cluster.LogPrintf(LvlDbg, "AddChildServers chacking %s of %s ", sv.URL, c.Name) + cluster.LogPrintf(LvlDbg, "AddChildServers checking %s of %s ", sv.URL, c.Name) if sv.IsSlaveOfReplicationSource(cluster.Conf.MasterConn) { - cluster.LogPrintf(LvlDbg, "AddChildServers %s IsSlaveOfReplicationSource %s ", sv.URL, cluster.Conf.MasterConn) - // mymaster, _ := cluster.GetMasterFromReplication(sv) - // if mymaster != nil { - cluster.LogPrintf(LvlDbg, "AddChildServers %s master found %s ", sv.URL, cluster.Conf.MasterConn) - + cluster.LogPrintf(LvlDbg, "Inter cluster multi-source check %s IsSlaveOfReplicationSource %s ", sv.URL, cluster.Conf.MasterConn) if !cluster.HasServer(sv) { - cluster.LogPrintf(LvlDbg, "AddChildServers %s Has server already found %s ", sv.URL, cluster.Conf.MasterConn) + cluster.LogPrintf(LvlInfo, "Inter cluster multi-source %s add server not yet discovered %s ", sv.URL, cluster.Conf.MasterConn) srv, err := cluster.newServerMonitor(sv.Name+":"+sv.Port, sv.ClusterGroup.dbUser, sv.ClusterGroup.dbPass, false, c.GetDomain()) if err != nil { + cluster.LogPrintf(LvlErr, "Inter cluster multi-source %s add server not yet discovered %s error %s", sv.URL, cluster.Conf.MasterConn, err) + return err } - srv.Ignored = true + + srv.SetSourceClusterName(c.Name) + srv.SetIgnored(true) cluster.Servers = append(cluster.Servers, srv) - // } - } - } else { - if cluster.HasServer(sv) { - cluster.RemoveServerFromIndex(cluster.GetServerIndice(sv)) + wg := new(sync.WaitGroup) + wg.Add(1) + cluster.TopologyDiscover(wg) + wg.Wait() + return nil + // leave for next monitor loop to remove the sever if no more link } } } } + for _, sv := range cluster.Servers { + cluster.LogPrintf(LvlDbg, "Inter cluster multi-source check drop unlinked server %s source cluster %s vs this cluster %s ", sv.URL, sv.GetSourceClusterName(), cluster.Name) + if sv.GetSourceClusterName() != cluster.Name && sv.GetSourceClusterName() != "" { + + if !sv.IsSlaveOfReplicationSource(cluster.Conf.MasterConn) { + cluster.LogPrintf(LvlInfo, "Inter cluster multi-source %s drop unlinked server %s ", sv.URL, cluster.Conf.MasterConn) + cluster.RemoveServerFromIndex(cluster.GetServerIndice(sv)) + } + } + } return nil // End child clusters same multi source server discorvery } @@ -106,7 +119,6 @@ func (cluster *Cluster) AddChildServers() error { // Create a connection to each host and build list of slaves. func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error { defer wcg.Done() - cluster.AddChildServers() //monitor ignored server fist so that their replication position get oldest wg := new(sync.WaitGroup) if cluster.Conf.Hosts == "" { diff --git a/cluster/prov_opensvc_db.go b/cluster/prov_opensvc_db.go index d80a13890..62b4587c9 100644 --- a/cluster/prov_opensvc_db.go +++ b/cluster/prov_opensvc_db.go @@ -856,7 +856,7 @@ func (server *ServerMonitor) GetEnv() map[string]string { "%%ENV:SVC_NAMESPACE%%": server.ClusterGroup.Name, "%%ENV:SVC_NAME%%": server.Name, "%%ENV:SVC_CONF_ENV_SST_METHOD%%": server.ClusterGroup.Conf.MultiMasterWsrepSSTMethod, - "%%ENV:SVC_CONF_ENV_DOMAIN_ID%%": server.ClusterGroup.Conf.ProvDomain, + "%%ENV:SVC_CONF_ENV_DOMAIN_ID%%": server.ClusterGroup.GetConfigReplicationDomain(), "%%ENV:SVC_CONF_ENV_SST_RECEIVER_PORT%%": server.SSTPort, "%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_ADDR%%": server.ClusterGroup.Conf.MonitorAddress + ":" + server.ClusterGroup.Conf.HttpPort, "%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_URL%%": server.ClusterGroup.Conf.MonitorAddress + ":" + server.ClusterGroup.Conf.APIPort, diff --git a/cluster/prx_get.go b/cluster/prx_get.go index 7cf2876d6..bb6724562 100644 --- a/cluster/prx_get.go +++ b/cluster/prx_get.go @@ -304,6 +304,10 @@ protocol=MySQLBackend confmaxscaleserverlist += "server" + strconv.Itoa(i) + } + if confhaproxywrite == "" && proxy.ClusterGroup.Conf.HaproxyMode == "runtimeapi" { + confhaproxywrite += ` +server leader unknown:3306 weight 100 maxconn 2000 check inter 1000` } switch variable { case "%%ENV:SERVERS_HAPROXY_WRITE%%": diff --git a/cluster/prx_haproxy.go b/cluster/prx_haproxy.go index 4631e4e66..b512a40fa 100644 --- a/cluster/prx_haproxy.go +++ b/cluster/prx_haproxy.go @@ -93,8 +93,12 @@ func (cluster *Cluster) initHaproxy(proxy *Proxy) { if err = haConfig.AddServer(cluster.Conf.HaproxyAPIWriteBackend, &s); err != nil { // log.Printf("Failed to add server to service_write ") } + } else { + s := haproxy.ServerDetail{Name: "leader", Host: "unknown", Port: 3306, Weight: 100, MaxConn: 2000, Check: true, CheckInterval: 1000} + if err = haConfig.AddServer(cluster.Conf.HaproxyAPIWriteBackend, &s); err != nil { + // log.Printf("Failed to add server to service_write ") + } } - fer := haproxy.Frontend{Name: "my_read_frontend", Mode: "tcp", DefaultBackend: cluster.Conf.HaproxyAPIReadBackend, BindPort: cluster.Conf.HaproxyReadPort, BindIp: cluster.Conf.HaproxyReadBindIp} if err := haConfig.AddFrontend(&fer); err != nil { cluster.LogPrintf(LvlErr, "Haproxy failed to add frontend read") @@ -128,17 +132,17 @@ func (cluster *Cluster) initHaproxy(proxy *Proxy) { err = haConfig.Render() if err != nil { - cluster.LogPrintf(LvlErr, "Could not render initial haproxy config, exiting...") + cluster.LogPrintf(LvlErr, "Could not create haproxy config %s", err) } if err := haRuntime.SetPid(haConfig.PidFile); err != nil { - cluster.LogPrintf(LvlInfo, "Haproxy reload config err %s", err.Error()) + cluster.LogPrintf(LvlInfo, "Haproxy set pid %s", err) } else { cluster.LogPrintf(LvlInfo, "Haproxy reload config on pid %s", haConfig.PidFile) } err = haRuntime.Reload(&haConfig) if err != nil { - cluster.LogPrintf(LvlErr, "Can't Reloadhaproxy config %s"+err.Error()) + cluster.LogPrintf(LvlErr, "Can't reload haproxy config %s", err) } } @@ -194,7 +198,7 @@ func (cluster *Cluster) refreshHaproxy(proxy *Proxy) error { proxy.BackendsWrite = nil proxy.BackendsRead = nil - + foundMasterInStat := false for { line, error := reader.Read() if error == io.EOF { @@ -211,7 +215,7 @@ func (cluster *Cluster) refreshHaproxy(proxy *Proxy) error { srv := cluster.GetServerFromURL(line[73]) if srv != nil { - + foundMasterInStat = true proxy.BackendsWrite = append(proxy.BackendsWrite, Backend{ Host: srv.Host, Port: srv.Port, @@ -223,7 +227,6 @@ func (cluster *Cluster) refreshHaproxy(proxy *Proxy) error { PrxByteOut: line[9], PrxLatency: line[61], //ttime: average session time in ms over the 1024 last requests }) - if !srv.IsMaster() { master := cluster.GetMaster() if master != nil { @@ -231,9 +234,7 @@ func (cluster *Cluster) refreshHaproxy(proxy *Proxy) error { haRuntime.SetMaster(master.Host, master.Port) } } - } - } if strings.Contains(strings.ToLower(line[0]), "read") { srv := cluster.GetServerFromURL(line[73]) @@ -261,7 +262,16 @@ func (cluster *Cluster) refreshHaproxy(proxy *Proxy) error { } } } - + if !foundMasterInStat { + master := cluster.GetMaster() + if master != nil { + res, err := haRuntime.SetMaster(master.Host, master.Port) + cluster.LogPrintf(LvlInfo, "Have leader in cluster but not in haproxy %s fixing it to master %s return %s", proxy.Host+":"+proxy.Port, master.URL, res) + if err != nil { + cluster.LogPrintf(LvlErr, "Can add leader %s in cluster but not in haproxy %s : %s", master.URL, proxy.Host+":"+proxy.Port, err) + } + } + } return nil } diff --git a/cluster/srv.go b/cluster/srv.go index 4db560633..6465a3d28 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -41,8 +41,9 @@ import ( type ServerMonitor struct { Id string `json:"id"` //Unique name given by cluster & crc64(URL) used by test to provision Name string `json:"name"` - Domain string `json:"domain"` + Domain string `json:"domain"` // Use to store orchestrator CNI domain ..svc. ServiceName string `json:"serviceName"` + SourceClusterName string `json:"sourceClusterName"` //Used to idenfied server added from other clusters linked with multi source Conn *sqlx.DB `json:"-"` User string `json:"user"` Pass string `json:"-"` @@ -54,6 +55,7 @@ type ServerMonitor struct { IP string `json:"ip"` Strict string `json:"strict"` ServerID uint64 `json:"serverId"` + DomainID uint64 `json:"domainId"` GTIDBinlogPos *gtid.List `json:"gtidBinlogPos"` CurrentGtid *gtid.List `json:"currentGtid"` SlaveGtid *gtid.List `json:"slaveGtid"` @@ -223,6 +225,7 @@ func (cluster *Cluster) newServerMonitor(url string, user string, pass string, c } server.Id = "db" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+server.Name+server.Port), crcTable), 10) var sid uint64 + //will be overide in Refresh with show variables server_id, used for provisionning configurator for server_id sid, err = strconv.ParseUint(strconv.FormatUint(crc64.Checksum([]byte(server.Name+server.Port), server.CrcTable), 10), 10, 64) server.ServerID = sid if cluster.Conf.TunnelHost != "" { @@ -600,6 +603,13 @@ func (server *ServerMonitor) Refresh() error { server.CurrentGtid = gtid.NewList(server.Variables["GTID_CURRENT_POS"]) server.SlaveGtid = gtid.NewList(server.Variables["GTID_SLAVE_POS"]) + sid, err := strconv.ParseUint(server.Variables["GTID_DOMAIN_ID"], 10, 64) + if err != nil { + server.ClusterGroup.LogPrintf(LvlErr, "Could not parse domain_id, reason: %s", err) + } else { + server.DomainID = uint64(sid) + } + } else { server.GTIDBinlogPos = gtid.NewMySQLList(server.Variables["GTID_EXECUTED"]) server.GTIDExecuted = server.Variables["GTID_EXECUTED"] @@ -849,11 +859,9 @@ func (server *ServerMonitor) freeze() bool { logs, err := server.SetEventScheduler(false) server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlErr, "Could not disable event scheduler on %s", server.URL) } - server.ClusterGroup.LogPrintf(LvlInfo, "Freezing writes stopping all slaves on %s", server.URL) logs, err := server.StopAllSlaves() - server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlErr, "Could not stop replicas source on ", server.URL) - + server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlErr, "Could not stop replicas source on %s ", server.URL) server.ClusterGroup.LogPrintf(LvlInfo, "Freezing writes set read only on %s", server.URL) logs, err = dbhelper.SetReadOnly(server.Conn, true) server.ClusterGroup.LogSQL(logs, err, server.URL, "Freeze", LvlInfo, "Could not set %s as read-only: %s", server.URL, err) diff --git a/cluster/srv_get.go b/cluster/srv_get.go index 47339bbe2..2a7b3aade 100644 --- a/cluster/srv_get.go +++ b/cluster/srv_get.go @@ -34,6 +34,10 @@ func (server *ServerMonitor) GetProcessList() []dbhelper.Processlist { return server.FullProcessList } +func (server *ServerMonitor) GetSourceClusterName() string { + return server.SourceClusterName +} + func (server *ServerMonitor) GetProcessListReplicationLongQuery() string { if !server.ClusterGroup.Conf.MonitorProcessList { return "" diff --git a/cluster/srv_has.go b/cluster/srv_has.go index 4f6c56708..2f64271c3 100644 --- a/cluster/srv_has.go +++ b/cluster/srv_has.go @@ -56,8 +56,11 @@ func (server *ServerMonitor) IsMysqlDumpUValidOption(option string) bool { } func (server *ServerMonitor) IsSlaveOfReplicationSource(name string) bool { + if server.Replications != nil { + for _, ss := range server.Replications { + server.ClusterGroup.LogPrintf(LvlDbg, "IsSlaveOfReplicationSource check %s drop unlinked server %s ", ss.ConnectionName.String, name) if ss.ConnectionName.String == name { return true } diff --git a/cluster/srv_set.go b/cluster/srv_set.go index 728a84300..abf2b3950 100644 --- a/cluster/srv_set.go +++ b/cluster/srv_set.go @@ -36,6 +36,10 @@ func (server *ServerMonitor) SetPlacement(k int, ProvAgents string, SlapOSDBPart server.SSTPort = sstports[k%len(sstports)] } +func (server *ServerMonitor) SetSourceClusterName(name string) { + server.SourceClusterName = name +} + func (server *ServerMonitor) SetIgnored(ignored bool) { server.Ignored = ignored } diff --git a/config/config.go b/config/config.go index f90e223da..4efa37b12 100644 --- a/config/config.go +++ b/config/config.go @@ -304,7 +304,7 @@ type Config struct { ArbitratorAddress string `mapstructure:"arbitrator-bind-address" toml:"arbitrator-bind-address" json:"arbitratorBindAddress"` ArbitratorDriver string `mapstructure:"arbitrator-driver" toml:"arbitrator-driver" json:"arbitratorDriver"` ArbitrationReadTimout int `mapstructure:"arbitration-read-timeout" toml:"arbitration-read-timeout" json:"arbitrationReadTimout"` - FailForceGtid bool `toml:"-" json:"-"` //suspicious code + SwitchoverCopyOldLeaderGtid bool `toml:"-" json:"-"` //suspicious code Test bool `mapstructure:"test" toml:"test" json:"test"` TestInjectTraffic bool `mapstructure:"test-inject-traffic" toml:"test-inject-traffic" json:"testInjectTraffic"` Enterprise bool `toml:"enterprise" json:"enterprise"` //used to talk to opensvc collector diff --git a/main.go b/main.go index caa4edf03..48454f901 100644 --- a/main.go +++ b/main.go @@ -116,7 +116,6 @@ var versionCmd = &cobra.Command{ func init() { - //conf.FailForceGtid = true conf.GoArch = GoArch conf.GoOS = GoOS conf.Version = Version @@ -202,6 +201,7 @@ func init() { monitorCmd.Flags().BoolVar(&conf.SwitchGtidCheck, "switchover-at-equal-gtid", false, "Switchover only when slaves are fully in sync") monitorCmd.Flags().BoolVar(&conf.SwitchSlaveWaitCatch, "switchover-slave-wait-catch", true, "Switchover wait for slave to catch with replication, not needed in GTID mode but enable to detect possible issues like witing on old master") monitorCmd.Flags().BoolVar(&conf.SwitchDecreaseMaxConn, "switchover-decrease-max-conn", true, "Switchover decrease max connection on old master") + monitorCmd.Flags().BoolVar(&conf.SwitchoverCopyOldLeaderGtid, "switchover-copy-old-leader-gtid", false, "Switchover copy old leader GTID") monitorCmd.Flags().Int64Var(&conf.SwitchDecreaseMaxConnValue, "switchover-decrease-max-conn-value", 10, "Switchover decrease max connection to this value different according to flavor") monitorCmd.Flags().IntVar(&conf.SwitchSlaveWaitRouteChange, "switchover-wait-route-change", 2, "Switchover wait for unmanged proxy monitor to dicoverd new state") monitorCmd.Flags().StringVar(&conf.MasterConn, "replication-source-name", "", "Replication channel name to use for multisource") diff --git a/utils/gtid/gtid.go b/utils/gtid/gtid.go index 583b2f4df..e679d0db6 100644 --- a/utils/gtid/gtid.go +++ b/utils/gtid/gtid.go @@ -167,6 +167,17 @@ func (gl List) GetSeqNos() []uint64 { return d } +// GetSeqNos returns a slice of sequence integers per domaine +func (gl List) GetSeqDomainIdNos(domainId uint64) []uint64 { + var d []uint64 + for _, g := range gl { + if g.DomainID == domainId { + d = append(d, g.SeqNo) + } + } + return d +} + // return the sequence of a sprecific domain func (gl List) GetSeqServerIdNos(serverId uint64) uint64 { for _, g := range gl { From 1abc4b87d68db7ae4f24bd9e99d0c8e354891610 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Tue, 2 Mar 2021 14:17:00 +0100 Subject: [PATCH 27/84] Fixing server id --- cluster/srv.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cluster/srv.go b/cluster/srv.go index d114563ff..0db4f0996 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -214,6 +214,7 @@ func (cluster *Cluster) newServerMonitor(url string, user string, pass string, c server.Name, server.Port, server.PostgressDB = misc.SplitHostPortDB(url) server.ClusterGroup = cluster server.ServiceName = cluster.Name + "/svc/" + server.Name + if cluster.Conf.ProvNetCNI { /* if server.IsCompute && cluster.Conf.ClusterHead != "" { url = server.Name + "." + cluster.Conf.ClusterHead + ".svc." + server.ClusterGroup.Conf.ProvOrchestratorCluster + ":3306" @@ -222,12 +223,13 @@ func (cluster *Cluster) newServerMonitor(url string, user string, pass string, c }*/ url = server.Name + server.Domain + ":3306" } - server.Id = "db" + strconv.FormatUint(crc64.Checksum([]byte(cluster.Name+server.Name+server.Port), cluster.crcTable), 10) + server.CrcTable = crc64.MakeTable(crc64.ECMA) var sid uint64 - //will be overide in Refresh with show variables server_id, used for provisionning configurator for server_id sid, err = strconv.ParseUint(strconv.FormatUint(crc64.Checksum([]byte(server.Name+server.Port), server.CrcTable), 10), 10, 64) server.ServerID = sid + server.Id = fmt.Sprintf("%s%d", "db", sid) + if cluster.Conf.TunnelHost != "" { go server.Tunnel() } From 279f6b10228bec402e8869beec5aedf007ba7fea Mon Sep 17 00:00:00 2001 From: svaroqui Date: Tue, 2 Mar 2021 17:59:58 +0100 Subject: [PATCH 28/84] Add consul proxy and use interface for each proxy --- cluster/prx.go | 42 +++++++--------------- cluster/prx_consul.go | 62 ++++++++++++++++++++++++++++++-- cluster/prx_haproxy.go | 30 +++++++++++----- cluster/prx_mariadbshardproxy.go | 8 +++++ cluster/prx_maxscale.go | 10 ++++++ cluster/prx_myproxy.go | 12 +++++++ cluster/prx_proxysql.go | 8 +++++ cluster/prx_sphinx.go | 11 +++--- config/config.go | 3 +- 9 files changed, 140 insertions(+), 46 deletions(-) diff --git a/cluster/prx.go b/cluster/prx.go index 679cd02f2..c3f16ba63 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -71,6 +71,7 @@ type DatabaseProxy interface { Refresh() error Failover() SetMaintenance(server *ServerMonitor) + BackendsStateChange() GetType() string IsRunning() bool @@ -212,6 +213,11 @@ func (cluster *Cluster) newProxyList() error { cluster.AddProxy(prx) } + if cluster.Conf.RegistryConsul { + prx := NewConsulProxy(0, cluster, "") + cluster.AddProxy(prx) + } + cluster.LogPrintf(LvlInfo, "Loaded %d proxies", len(cluster.Proxies)) return nil @@ -289,39 +295,18 @@ func (cluster *Cluster) IsProxyEqualMaster() bool { func (cluster *Cluster) SetProxyServerMaintenance(serverid uint64) { // Found server from ServerId + server := cluster.GetServerFromId(serverid) for _, pr := range cluster.Proxies { - server := cluster.GetServerFromId(serverid) - if cluster.Conf.HaproxyOn { - if prx, ok := pr.(*HaproxyProxy); ok { - if cluster.Conf.HaproxyMode == "runtimeapi" { - prx.SetMaintenance(server) - } - if cluster.Conf.HaproxyMode == "standby" { - prx.Init() - } - } - } - if cluster.Conf.MxsOn { - if prx, ok := pr.(*MaxscaleProxy); ok { - if cluster.GetMaster() != nil { - prx.SetMaintenance(server) - } - } - } - if cluster.Conf.ProxysqlOn { - if prx, ok := pr.(*ProxySQLProxy); ok { - if cluster.GetMaster() != nil { - prx.SetMaintenance(server) - } - } - } + cluster.LogPrintf(LvlInfo, "Notify server %s in maintenance in Proxy Type: %s Host: %s Port: %s", server.URL, pr.GetType(), pr.GetHost(), pr.GetPort()) + pr.SetMaintenance(server) } - cluster.initConsul() } // called by server monitor if state change func (cluster *Cluster) backendStateChangeProxies() { - cluster.initConsul() + for _, pr := range cluster.Proxies { + pr.BackendsStateChange() + } } // Used to monitor proxies call by main monitor loop @@ -368,7 +353,7 @@ func (cluster *Cluster) failoverProxies() { cluster.LogPrintf(LvlInfo, "Failover Proxy Type: %s Host: %s Port: %s", pr.GetType(), pr.GetHost(), pr.GetPort()) pr.Failover() } - cluster.initConsul() + } func (cluster *Cluster) initProxies() { @@ -376,7 +361,6 @@ func (cluster *Cluster) initProxies() { cluster.LogPrintf(LvlInfo, "New proxy monitored: %s %s:%s", pr.GetType(), pr.GetHost(), pr.GetPort()) pr.Init() } - cluster.initConsul() } func (cluster *Cluster) SendProxyStats(proxy DatabaseProxy) error { diff --git a/cluster/prx_consul.go b/cluster/prx_consul.go index b83709acc..64de8b589 100644 --- a/cluster/prx_consul.go +++ b/cluster/prx_consul.go @@ -14,18 +14,61 @@ import ( "strings" "github.com/micro/go-micro/registry" + "github.com/signal18/replication-manager/config" + "github.com/signal18/replication-manager/utils/crypto" ) -func (cluster *Cluster) initConsul() error { +type ConsulProxy struct { + Proxy +} + +func NewConsulProxy(placement int, cluster *Cluster, proxyHost string) *ConsulProxy { + conf := cluster.Conf + prx := new(ConsulProxy) + prx.Name = proxyHost + prx.Host = proxyHost + prx.Type = config.ConstProxyConsul + prx.Port = conf.ProxysqlAdminPort + prx.ReadWritePort, _ = strconv.Atoi(conf.ProxysqlPort) + prx.User = conf.ProxysqlUser + prx.Pass = conf.ProxysqlPassword + prx.ReaderHostgroup, _ = strconv.Atoi(conf.ProxysqlReaderHostgroup) + prx.WriterHostgroup, _ = strconv.Atoi(conf.ProxysqlWriterHostgroup) + prx.WritePort, _ = strconv.Atoi(conf.ProxysqlPort) + prx.ReadPort, _ = strconv.Atoi(conf.ProxysqlPort) + + prx.SetPlacement(placement, conf.ProvProxAgents, conf.SlapOSProxySQLPartitions, conf.ProxysqlHostsIPV6) + + if conf.ProvNetCNI { + if conf.ClusterHead == "" { + prx.Host = prx.Host + "." + cluster.Name + ".svc." + conf.ProvOrchestratorCluster + } else { + prx.Host = prx.Host + "." + conf.ClusterHead + ".svc." + conf.ProvOrchestratorCluster + } + } + + if cluster.key != nil { + p := crypto.Password{Key: cluster.key} + p.CipherText = prx.Pass + p.Decrypt() + prx.Pass = p.PlainText + } + + return prx +} + +func (proxy *ConsulProxy) Init() { + cluster := proxy.ClusterGroup var opt registry.Options //opt := consul.DefaultConfig() if cluster.Conf.RegistryConsul == false || cluster.IsActive() == false { - return nil + return } opt.Addrs = strings.Split(cluster.Conf.RegistryHosts, ",") //DefaultRegistry() //opt := registry.DefaultRegistry reg := registry.NewRegistry() + if cluster.GetMaster() != nil { port, _ := strconv.Atoi(cluster.GetMaster().Port) @@ -94,5 +137,20 @@ func (cluster *Cluster) initConsul() error { } } +} + +func (proxy *ConsulProxy) Refresh() error { return nil } + +func (proxy *ConsulProxy) Failover() { + proxy.Init() +} + +func (proxy *ConsulProxy) BackendsStateChange() { + proxy.Init() +} + +func (proxy *ConsulProxy) SetMaintenance(s *ServerMonitor) { + proxy.Init() +} diff --git a/cluster/prx_haproxy.go b/cluster/prx_haproxy.go index bf3758423..15dd96f9a 100644 --- a/cluster/prx_haproxy.go +++ b/cluster/prx_haproxy.go @@ -322,13 +322,21 @@ func (cluster *Cluster) setMaintenanceHaproxy(pr *Proxy, server *ServerMonitor) pr.SetMaintenance(server) } -func (pr *Proxy) SetMaintenance(server *ServerMonitor) { - cluster := pr.ClusterGroup +func (proxy *Proxy) SetMaintenance(server *ServerMonitor) { + cluster := proxy.ClusterGroup + if cluster.Conf.HaproxyOn { + return + } + if cluster.Conf.HaproxyMode == "standby" { + proxy.Init() + return + } + haRuntime := haproxy.Runtime{ Binary: cluster.Conf.HaproxyBinaryPath, - SockFile: filepath.Join(pr.Datadir+"/var", "/haproxy.stats.sock"), - Port: pr.Port, - Host: pr.Host, + SockFile: filepath.Join(proxy.Datadir+"/var", "/haproxy.stats.sock"), + Port: proxy.Port, + Host: proxy.Host, } if server.IsMaintenance { @@ -345,12 +353,16 @@ func (pr *Proxy) SetMaintenance(server *ServerMonitor) { } } -func (prx *Proxy) Failover() { - cluster := prx.ClusterGroup +func (proxy *Proxy) Failover() { + cluster := proxy.ClusterGroup if cluster.Conf.HaproxyMode == "runtimeapi" { - prx.Refresh() + proxy.Refresh() } if cluster.Conf.HaproxyMode == "standby" { - prx.Init() + proxy.Init() } } + +func (proxy *HaproxyProxy) BackendsStateChange() { + proxy.Refresh() +} diff --git a/cluster/prx_mariadbshardproxy.go b/cluster/prx_mariadbshardproxy.go index 681a7fd4e..d2e051954 100644 --- a/cluster/prx_mariadbshardproxy.go +++ b/cluster/prx_mariadbshardproxy.go @@ -130,6 +130,14 @@ func (proxy *MariadbShardProxy) Failover() { } } +func (proxy *MariadbShardProxy) BackendsStateChange() { + return +} + +func (proxy *MariadbShardProxy) SetMaintenance(s *ServerMonitor) { + return +} + func (cluster *Cluster) CheckMdbShardServersSchema(proxy *MariadbShardProxy) { if cluster.master == nil { return diff --git a/cluster/prx_maxscale.go b/cluster/prx_maxscale.go index 88432ace5..4c257ad2c 100644 --- a/cluster/prx_maxscale.go +++ b/cluster/prx_maxscale.go @@ -250,8 +250,18 @@ func (cluster *Cluster) setMaintenanceMaxscale(pr DatabaseProxy, server *ServerM pr.SetMaintenance(server) } +func (proxy *MaxscaleProxy) BackendsStateChange() { + return +} + func (pr *MaxscaleProxy) SetMaintenance(server *ServerMonitor) { cluster := pr.ClusterGroup + if cluster.GetMaster() != nil { + return + } + if cluster.Conf.MxsOn { + return + } m := maxscale.MaxScale{Host: pr.Host, Port: pr.Port, User: pr.User, Pass: pr.Pass} err := m.Connect() if err != nil { diff --git a/cluster/prx_myproxy.go b/cluster/prx_myproxy.go index cd5543f57..42490e52b 100644 --- a/cluster/prx_myproxy.go +++ b/cluster/prx_myproxy.go @@ -39,6 +39,18 @@ func (cluster *Cluster) initMyProxy(proxy *MyProxyProxy) { proxy.Init() } +func (proxy *MyProxyProxy) BackendsStateChange() { + return +} + +func (proxy *MyProxyProxy) SetMaintenance(s *ServerMonitor) { + return +} + +func (proxy *MyProxyProxy) Refresh() error { + return nil +} + func (proxy *MyProxyProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { flags.BoolVar(&conf.MyproxyOn, "myproxy", false, "Use Internal Proxy") flags.IntVar(&conf.MyproxyPort, "myproxy-port", 4000, "Internal proxy read/write port") diff --git a/cluster/prx_proxysql.go b/cluster/prx_proxysql.go index ef2c97cbe..7c49f76cd 100644 --- a/cluster/prx_proxysql.go +++ b/cluster/prx_proxysql.go @@ -404,8 +404,16 @@ func (cluster *Cluster) setMaintenanceProxysql(proxy *ProxySQLProxy, s *ServerMo proxy.SetMaintenance(s) } +func (proxy *ProxySQLProxy) BackendsStateChange() { + proxy.Refresh() +} + func (proxy *ProxySQLProxy) SetMaintenance(s *ServerMonitor) { cluster := proxy.ClusterGroup + // TODO ? check if needed + if cluster.GetMaster() != nil { + return + } if cluster.Conf.ProxysqlOn == false { return } diff --git a/cluster/prx_sphinx.go b/cluster/prx_sphinx.go index e13602662..1b426facc 100644 --- a/cluster/prx_sphinx.go +++ b/cluster/prx_sphinx.go @@ -90,6 +90,10 @@ func (proxy *SphinxProxy) Init() { } +func (proxy *SphinxProxy) BackendsStateChange() { + return +} + func (proxy *SphinxProxy) Refresh() error { cluster := proxy.ClusterGroup if cluster.Conf.SphinxOn == false { @@ -125,9 +129,6 @@ func (proxy *SphinxProxy) Refresh() error { return nil } -func (cluster *Cluster) setMaintenanceSphinx(proxy *SphinxProxy, host string, port string) { - if cluster.Conf.SphinxOn == false { - return - } - +func (proxy *SphinxProxy) SetMaintenance(s *ServerMonitor) { + return } diff --git a/config/config.go b/config/config.go index 4efa37b12..ff851a12b 100644 --- a/config/config.go +++ b/config/config.go @@ -304,7 +304,7 @@ type Config struct { ArbitratorAddress string `mapstructure:"arbitrator-bind-address" toml:"arbitrator-bind-address" json:"arbitratorBindAddress"` ArbitratorDriver string `mapstructure:"arbitrator-driver" toml:"arbitrator-driver" json:"arbitratorDriver"` ArbitrationReadTimout int `mapstructure:"arbitration-read-timeout" toml:"arbitration-read-timeout" json:"arbitrationReadTimout"` - SwitchoverCopyOldLeaderGtid bool `toml:"-" json:"-"` //suspicious code + SwitchoverCopyOldLeaderGtid bool `toml:"-" json:"-"` //suspicious code Test bool `mapstructure:"test" toml:"test" json:"test"` TestInjectTraffic bool `mapstructure:"test-inject-traffic" toml:"test-inject-traffic" json:"testInjectTraffic"` Enterprise bool `toml:"enterprise" json:"enterprise"` //used to talk to opensvc collector @@ -543,6 +543,7 @@ const ( ConstProxyMysqlrouter string = "mysqlrouter" ConstProxySphinx string = "sphinx" ConstProxyMyProxy string = "myproxy" + ConstProxyConsul string = "consul" ) type ServicePlan struct { From d1269657745687a82299e98f15100ca01bf246fc Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 3 Mar 2021 09:54:50 +0100 Subject: [PATCH 29/84] Fixing proxy flags inside virtual interface --- cluster/prx.go | 2 +- cluster/prx_haproxy.go | 2 +- cluster/prx_mariadbshardproxy.go | 2 +- cluster/prx_maxscale.go | 2 +- cluster/prx_myproxy.go | 2 +- cluster/prx_proxysql.go | 2 +- cluster/prx_sphinx.go | 2 +- main.go | 12 ++++++------ 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/cluster/prx.go b/cluster/prx.go index c3f16ba63..de5c91998 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -66,7 +66,7 @@ type Proxy struct { type DatabaseProxy interface { SetCluster(c *Cluster) - AddFlags(flags *pflag.FlagSet, conf config.Config) + AddFlags(flags *pflag.FlagSet, conf *config.Config) Init() Refresh() error Failover() diff --git a/cluster/prx_haproxy.go b/cluster/prx_haproxy.go index 15dd96f9a..99f378519 100644 --- a/cluster/prx_haproxy.go +++ b/cluster/prx_haproxy.go @@ -46,7 +46,7 @@ func NewHaproxyProxy(placement int, cluster *Cluster, proxyHost string) *Haproxy return prx } -func (proxy *HaproxyProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { +func (proxy *HaproxyProxy) AddFlags(flags *pflag.FlagSet, conf *config.Config) { flags.BoolVar(&conf.HaproxyOn, "haproxy", false, "Wrapper to use HaProxy on same host") flags.StringVar(&conf.HaproxyMode, "haproxy-mode", "runtimeapi", "HaProxy mode [standby|runtimeapi|dataplaneapi]") flags.StringVar(&conf.HaproxyUser, "haproxy-user", "admin", "Haproxy API user") diff --git a/cluster/prx_mariadbshardproxy.go b/cluster/prx_mariadbshardproxy.go index d2e051954..711cf2ba4 100644 --- a/cluster/prx_mariadbshardproxy.go +++ b/cluster/prx_mariadbshardproxy.go @@ -52,7 +52,7 @@ func NewMariadbShardProxy(placement int, cluster *Cluster, proxyHost string) *Ma return prx } -func (proxy *MariadbShardProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { +func (proxy *MariadbShardProxy) AddFlags(flags *pflag.FlagSet, conf *config.Config) { flags.BoolVar(&conf.MdbsProxyOn, "shardproxy", false, "MariaDB Spider proxy") flags.StringVar(&conf.MdbsProxyHosts, "shardproxy-servers", "127.0.0.1:3307", "MariaDB spider proxy hosts IP:Port,IP:Port") flags.StringVar(&conf.MdbsProxyCredential, "shardproxy-credential", "root:mariadb", "MariaDB spider proxy credential") diff --git a/cluster/prx_maxscale.go b/cluster/prx_maxscale.go index 4c257ad2c..ddf245e2c 100644 --- a/cluster/prx_maxscale.go +++ b/cluster/prx_maxscale.go @@ -53,7 +53,7 @@ func NewMaxscaleProxy(placement int, cluster *Cluster, proxyHost string) *Maxsca return prx } -func (proxy *MaxscaleProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { +func (proxy *MaxscaleProxy) AddFlags(flags *pflag.FlagSet, conf *config.Config) { flags.BoolVar(&conf.MxsOn, "maxscale", false, "MaxScale proxy server is query for backend status") flags.BoolVar(&conf.CheckFalsePositiveMaxscale, "failover-falsepositive-maxscale", false, "Failover checks that maxscale detect failed master") flags.IntVar(&conf.CheckFalsePositiveMaxscaleTimeout, "failover-falsepositive-maxscale-timeout", 14, "Failover checks that maxscale detect failed master") diff --git a/cluster/prx_myproxy.go b/cluster/prx_myproxy.go index 42490e52b..84361f7d7 100644 --- a/cluster/prx_myproxy.go +++ b/cluster/prx_myproxy.go @@ -51,7 +51,7 @@ func (proxy *MyProxyProxy) Refresh() error { return nil } -func (proxy *MyProxyProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { +func (proxy *MyProxyProxy) AddFlags(flags *pflag.FlagSet, conf *config.Config) { flags.BoolVar(&conf.MyproxyOn, "myproxy", false, "Use Internal Proxy") flags.IntVar(&conf.MyproxyPort, "myproxy-port", 4000, "Internal proxy read/write port") flags.StringVar(&conf.MyproxyUser, "myproxy-user", "admin", "Myproxy user") diff --git a/cluster/prx_proxysql.go b/cluster/prx_proxysql.go index 7c49f76cd..680ec6a41 100644 --- a/cluster/prx_proxysql.go +++ b/cluster/prx_proxysql.go @@ -53,7 +53,7 @@ func NewProxySQLProxy(placement int, cluster *Cluster, proxyHost string) *ProxyS return prx } -func (proxy *ProxySQLProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { +func (proxy *ProxySQLProxy) AddFlags(flags *pflag.FlagSet, conf *config.Config) { flags.BoolVar(&conf.ProxysqlOn, "proxysql", false, "Use ProxySQL") flags.BoolVar(&conf.ProxysqlSaveToDisk, "proxysql-save-to-disk", false, "Save proxysql change to sqllight") flags.StringVar(&conf.ProxysqlHosts, "proxysql-servers", "", "ProxySQL hosts") diff --git a/cluster/prx_sphinx.go b/cluster/prx_sphinx.go index 1b426facc..91f7c40b5 100644 --- a/cluster/prx_sphinx.go +++ b/cluster/prx_sphinx.go @@ -44,7 +44,7 @@ func NewSphinxProxy(placement int, cluster *Cluster, proxyHost string) *SphinxPr return prx } -func (proxy *SphinxProxy) AddFlags(flags *pflag.FlagSet, conf config.Config) { +func (proxy *SphinxProxy) AddFlags(flags *pflag.FlagSet, conf *config.Config) { flags.BoolVar(&conf.SphinxOn, "sphinx", false, "Turn on SphinxSearch detection") flags.StringVar(&conf.SphinxHosts, "sphinx-servers", "127.0.0.1", "SphinxSearch hosts") flags.StringVar(&conf.SphinxPort, "sphinx-port", "9312", "SphinxSearch API port") diff --git a/main.go b/main.go index a6ae232e7..8e9c55e27 100644 --- a/main.go +++ b/main.go @@ -331,7 +331,7 @@ func init() { if WithMaxscale == "ON" { maxscaleprx := new(cluster.MaxscaleProxy) - maxscaleprx.AddFlags(monitorCmd.Flags(), conf) + maxscaleprx.AddFlags(monitorCmd.Flags(), &conf) } // TODO: this seems dead code / unimplemented @@ -350,23 +350,23 @@ func init() { if WithMariadbshardproxy == "ON" { mdbsprx := new(cluster.MariadbShardProxy) - mdbsprx.AddFlags(monitorCmd.Flags(), conf) + mdbsprx.AddFlags(monitorCmd.Flags(), &conf) } if WithHaproxy == "ON" { haprx := new(cluster.HaproxyProxy) - haprx.AddFlags(monitorCmd.Flags(), conf) + haprx.AddFlags(monitorCmd.Flags(), &conf) } if WithProxysql == "ON" { proxysqlprx := new(cluster.ProxySQLProxy) - proxysqlprx.AddFlags(monitorCmd.Flags(), conf) + proxysqlprx.AddFlags(monitorCmd.Flags(), &conf) } if WithSphinx == "ON" { sphinxprx := new(cluster.SphinxProxy) - sphinxprx.AddFlags(monitorCmd.Flags(), conf) + sphinxprx.AddFlags(monitorCmd.Flags(), &conf) } myproxyprx := new(cluster.MyProxyProxy) - myproxyprx.AddFlags(monitorCmd.Flags(), conf) + myproxyprx.AddFlags(monitorCmd.Flags(), &conf) if WithSpider == "ON" { monitorCmd.Flags().BoolVar(&conf.Spider, "spider", false, "Turn on spider detection") From 8f0c6cb1ba1ea5ae0cfad87834a363973e58c0dd Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 17 Mar 2021 16:04:09 +0100 Subject: [PATCH 30/84] Don't load PFS query sample on monitoring push it to explain, this create temp table on disk for nothing at every monitor loop --- utils/dbhelper/dbhelper.go | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/utils/dbhelper/dbhelper.go b/utils/dbhelper/dbhelper.go index d496a7de0..52c294034 100644 --- a/utils/dbhelper/dbhelper.go +++ b/utils/dbhelper/dbhelper.go @@ -1456,15 +1456,35 @@ func DisablePFSQueries(db *sqlx.DB) (string, error) { return query, err } +func GetSampleQueryFromPFS(db *sqlx.DB, Query PFSQuery) (string, error) { + query := "SELECT COALESCE( B.SQL_TEXT,'') as query FROM performance_schema.events_statements_history_long B WHERE B.DIGEST =''" + Query.Digest + "'" + rows, err := db.Queryx(query) + if err != nil { + return "", err + } + defer rows.Close() + for rows.Next() { + var res string + err := rows.Scan(&res) + if err != nil { + return "", err + } + return res, nil + } + return "", err +} + func GetQueries(db *sqlx.DB) (map[string]PFSQuery, string, error) { vars := make(map[string]PFSQuery) query := "set session group_concat_max_len=2048" db.Exec(query) + /* COALESCE((SELECT B.SQL_TEXT FROM performance_schema.events_statements_history_long B WHERE + A.DIGEST = B.DIGEST LIMIT 1 ),'') as query, */ + // to expensive FULL SCAN to extact during explain query = `SELECT A.digest as digest, - COALESCE((SELECT B.SQL_TEXT FROM performance_schema.events_statements_history_long B WHERE - A.DIGEST = B.DIGEST LIMIT 1 ),'') as query, + '' as query, A.digest_text as digest_text, A.LAST_SEEN as last_seen, COALESCE(A.SCHEMA_NAME,'') as schema_name, From 38e62d6b501e296e429eadf70e27363cb51413eb Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Wed, 17 Mar 2021 19:25:38 +0100 Subject: [PATCH 31/84] Fix broken receiver --- .vscode/settings.json | 4 ++++ cluster/prx_haproxy.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 7a73a41bf..5e593f3c7 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,2 +1,6 @@ { +<<<<<<< Updated upstream +======= + "go.testFlags": ["-v"] +>>>>>>> Stashed changes } \ No newline at end of file diff --git a/cluster/prx_haproxy.go b/cluster/prx_haproxy.go index 99f378519..4211fc18b 100644 --- a/cluster/prx_haproxy.go +++ b/cluster/prx_haproxy.go @@ -353,7 +353,7 @@ func (proxy *Proxy) SetMaintenance(server *ServerMonitor) { } } -func (proxy *Proxy) Failover() { +func (proxy *HaproxyProxy) Failover() { cluster := proxy.ClusterGroup if cluster.Conf.HaproxyMode == "runtimeapi" { proxy.Refresh() From cdf4c02748adeea3ba4f074f4e74454eef92db4d Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 19 Mar 2021 16:09:21 +0100 Subject: [PATCH 32/84] Add missing servie plan in share in case not fetchable from internet Start interface for Orchestrtors --- cluster/orchestrator.go | 39 +++++++++++++++++++++++++++++++++++++++ share/serviceplan.json | 1 + 2 files changed, 40 insertions(+) create mode 100644 cluster/orchestrator.go create mode 100644 share/serviceplan.json diff --git a/cluster/orchestrator.go b/cluster/orchestrator.go new file mode 100644 index 000000000..1affc9bc6 --- /dev/null +++ b/cluster/orchestrator.go @@ -0,0 +1,39 @@ +package cluster + +import ( + "github.com/signal18/replication-manager/config" + "github.com/spf13/pflag" +) + +type Orchetrator struct { + DatabaseOrchetrator + Id string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Cluster *Cluster +} + +type DatabaseOrchetrator interface { + SetCluster(c *Cluster) + AddFlags(flags *pflag.FlagSet, conf config.Config) + Init() + GetNodes() ([]Agent, error) + ProvisionDatabaseService(server *ServerMonitor) + ProvisionProxyService(server DatabaseProxy) error + UnprovisionDatabaseService(server *ServerMonitor) + UnprovisionProxyService(server DatabaseProxy) error + StartDatabaseService(server *ServerMonitor) + StartProxyService(server DatabaseProxy) error + StopDatabaseService(server *ServerMonitor) + StopProxyService(server DatabaseProxy) error +} + +type orchestratorList []DatabaseOrchetrator + +func (o *Orchetrator) SetCluster(c *Cluster) { + o.Cluster = c +} + +func (o *Orchetrator) GetType() string { + return o.Type +} diff --git a/share/serviceplan.json b/share/serviceplan.json new file mode 100644 index 000000000..419c32a1b --- /dev/null +++ b/share/serviceplan.json @@ -0,0 +1 @@ +{"rows":[{"plan":"x1.small","memory":16384,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.small.compute","memory":16384,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.small.perf","memory":32768,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.middle","memory":32768,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.middle.compute","memory":32768,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.middle.perf","memory":65536,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.large","memory":65536,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.large.compute","memory":65536,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.large.perf","memory":131072,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.huge","memory":131072,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.huge.compute","memory":131072,"cores":32,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.huge.perf","memory":262144,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.small","memory":16384,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.small.compute","memory":16384,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.small.perf","memory":32768,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.middle","memory":32768,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.middle.compute","memory":32768,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.middle.perf","memory":65536,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.large","memory":65536,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.large.compute","memory":65536,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.large.perf","memory":131072,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.huge","memory":131072,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.huge.compute","memory":131072,"cores":32,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.huge.perf","memory":262144,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.small","memory":16384,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.small.compute","memory":16384,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.small.perf","memory":32768,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.middle","memory":32768,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.middle.compute","memory":32768,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.middle.perf","memory":65536,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.large","memory":65536,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.large.compute","memory":65536,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.large.perf","memory":131072,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.huge","memory":131072,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.huge.compute","memory":131072,"cores":32,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.huge.perf","memory":262144,"cores":16,"dbdatasize":700,"dbsystemsize":300}]} \ No newline at end of file From 0576a871dac3202f5daf023ad7176d6839daa011 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Mon, 26 Apr 2021 08:09:21 +0200 Subject: [PATCH 33/84] Disable default backup-binlogs --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index 8e9c55e27..656eda31b 100644 --- a/main.go +++ b/main.go @@ -449,7 +449,7 @@ func init() { monitorCmd.Flags().StringVar(&conf.BackupMysqldumpOptions, "backup-mysqldump-options", "--hex-blob --single-transaction --verbose --all-databases --add-drop-database --system=all", "Extra options") monitorCmd.Flags().StringVar(&conf.BackupMysqlbinlogPath, "backup-mysqlbinlog-path", "", "Path to mysqlbinlog binary") monitorCmd.Flags().StringVar(&conf.BackupMysqlclientPath, "backup-mysqlclient-path", "", "Path to mysql client binary") - monitorCmd.Flags().BoolVar(&conf.BackupBinlogs, "backup-binlogs", true, "Archive binlogs") + monitorCmd.Flags().BoolVar(&conf.BackupBinlogs, "backup-binlogs", false, "Archive binlogs") monitorCmd.Flags().IntVar(&conf.BackupBinlogsKeep, "backup-binlogs-keep", 10, "Number of master binlog to keep") monitorCmd.Flags().BoolVar(&conf.ProvBinaryInTarball, "prov-db-binary-in-tarball", false, "Add prov-db-binary-tarball-name binaries to init tarball") monitorCmd.Flags().StringVar(&conf.ProvBinaryTarballName, "prov-db-binary-tarball-name", "mysql-8.0.17-macos10.14-x86_64.tar.gz", "Name of binary tarball to put in tarball") From e4813819211bbf14b8f3ca2de4e52148c715c9b0 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Tue, 4 May 2021 08:02:15 +0200 Subject: [PATCH 34/84] MariaDB Backup Stage support for mysqldump OnPremise bootstrap via SSH for poxysql and haproxy --- cluster/prov.go | 108 ++++++++++++--------- cluster/prov_k8s_db.go | 6 +- cluster/prov_onpremise_db.go | 24 +++-- cluster/prov_onpremise_haproxy.go | 65 +++++++++++++ cluster/prov_onpremise_proxysql.go | 60 ++++++++++++ cluster/prov_onpremise_prx.go | 147 +++++++++++++++++++++++++++++ cluster/prov_scripts.go | 7 ++ cluster/prov_slapos_db.go | 6 +- cluster/srv_job.go | 23 ++++- 9 files changed, 389 insertions(+), 57 deletions(-) create mode 100644 cluster/prov_onpremise_haproxy.go create mode 100644 cluster/prov_onpremise_proxysql.go create mode 100644 cluster/prov_onpremise_prx.go create mode 100644 cluster/prov_scripts.go diff --git a/cluster/prov.go b/cluster/prov.go index 678207b90..00f0877b5 100644 --- a/cluster/prov.go +++ b/cluster/prov.go @@ -72,6 +72,9 @@ func (cluster *Cluster) ProvisionServices() error { go cluster.SlapOSProvisionDatabaseService(server) case config.ConstOrchestratorLocalhost: go cluster.LocalhostProvisionDatabaseService(server) + case config.ConstOrchestratorOnPremise: + go cluster.OnPremiseProvisionDatabaseService(server) + default: cluster.sme.RemoveFailoverState() return nil @@ -105,6 +108,8 @@ func (cluster *Cluster) ProvisionServices() error { go cluster.SlapOSProvisionProxyService(prx) case config.ConstOrchestratorLocalhost: go cluster.LocalhostProvisionProxyService(prx) + case config.ConstOrchestratorOnPremise: + go cluster.OnPremiseProvisionProxyService(prx) default: // TODO: wtf? it never hits the second loop cluster.sme.RemoveFailoverState() @@ -144,6 +149,8 @@ func (cluster *Cluster) InitDatabaseService(server *ServerMonitor) error { go cluster.SlapOSProvisionDatabaseService(server) case config.ConstOrchestratorLocalhost: go cluster.LocalhostProvisionDatabaseService(server) + case config.ConstOrchestratorOnPremise: + go cluster.OnPremiseProvisionDatabaseService(server) default: cluster.sme.RemoveFailoverState() return nil @@ -171,6 +178,8 @@ func (cluster *Cluster) InitProxyService(prx DatabaseProxy) error { go cluster.SlapOSProvisionProxyService(prx) case config.ConstOrchestratorLocalhost: go cluster.LocalhostProvisionProxyService(prx) + case config.ConstOrchestratorOnPremise: + go cluster.OnPremiseProvisionProxyService(prx) default: return nil } @@ -198,6 +207,8 @@ func (cluster *Cluster) Unprovision() error { go cluster.SlapOSUnprovisionDatabaseService(server) case config.ConstOrchestratorLocalhost: go cluster.LocalhostUnprovisionDatabaseService(server) + case config.ConstOrchestratorOnPremise: + go cluster.OnPremiseUnprovisionDatabaseService(server) default: cluster.sme.RemoveFailoverState() return nil @@ -230,6 +241,8 @@ func (cluster *Cluster) Unprovision() error { go cluster.SlapOSUnprovisionProxyService(prx) case config.ConstOrchestratorLocalhost: go cluster.LocalhostUnprovisionProxyService(prx) + case config.ConstOrchestratorOnPremise: + go cluster.OnPremiseUnprovisionProxyService(prx) default: cluster.sme.RemoveFailoverState() return nil @@ -272,6 +285,8 @@ func (cluster *Cluster) UnprovisionProxyService(prx DatabaseProxy) error { go cluster.SlapOSUnprovisionProxyService(prx) case config.ConstOrchestratorLocalhost: go cluster.LocalhostUnprovisionProxyService(prx) + case config.ConstOrchestratorOnPremise: + go cluster.OnPremiseUnprovisionProxyService(prx) default: } select { @@ -295,6 +310,8 @@ func (cluster *Cluster) UnprovisionDatabaseService(server *ServerMonitor) error go cluster.K8SUnprovisionDatabaseService(server) case config.ConstOrchestratorSlapOS: go cluster.SlapOSUnprovisionDatabaseService(server) + case config.ConstOrchestratorOnPremise: + go cluster.OnPremiseUnprovisionDatabaseService(server) default: go cluster.LocalhostUnprovisionDatabaseService(server) } @@ -315,55 +332,74 @@ func (cluster *Cluster) RollingUpgrade() { } func (cluster *Cluster) StopDatabaseService(server *ServerMonitor) error { + cluster.LogPrintf(LvlInfo, "Stopping database service %s", cluster.Name+"/svc/"+server.Name) + var err error switch cluster.Conf.ProvOrchestrator { case config.ConstOrchestratorOpenSVC: - return cluster.OpenSVCStopDatabaseService(server) + err = cluster.OpenSVCStopDatabaseService(server) case config.ConstOrchestratorKubernetes: - cluster.K8SStopDatabaseService(server) + err = cluster.K8SStopDatabaseService(server) case config.ConstOrchestratorSlapOS: - cluster.SlapOSStopDatabaseService(server) + err = cluster.SlapOSStopDatabaseService(server) case config.ConstOrchestratorOnPremise: - cluster.OnPremiseStopDatabaseService(server) + err = cluster.OnPremiseStopDatabaseService(server) case config.ConstOrchestratorLocalhost: - return cluster.LocalhostStopDatabaseService(server) + err = cluster.OnPremiseStopDatabaseService(server) default: return errors.New("No valid orchestrator") } - server.DelRestartCookie() - return nil + if err == nil { + server.DelRestartCookie() + } + return err } func (cluster *Cluster) StopProxyService(server DatabaseProxy) error { + cluster.LogPrintf(LvlInfo, "Stopping Proxy service %s", cluster.Name+"/svc/"+server.GetName()) + var err error switch cluster.Conf.ProvOrchestrator { case config.ConstOrchestratorOpenSVC: - return cluster.OpenSVCStopProxyService(server) + err = cluster.OpenSVCStopProxyService(server) case config.ConstOrchestratorKubernetes: - cluster.K8SStopProxyService(server) + err = cluster.K8SStopProxyService(server) case config.ConstOrchestratorSlapOS: - cluster.SlapOSStopProxyService(server) + err = cluster.SlapOSStopProxyService(server) + case config.ConstOrchestratorOnPremise: + err = cluster.OnPremiseStopProxyService(server) + case config.ConstOrchestratorLocalhost: + err = cluster.LocalhostStopProxyService(server) default: - return cluster.LocalhostStopProxyService(server) + return errors.New("No valid orchestrator") } - server.DelRestartCookie() - return nil + if err == nil { + server.DelRestartCookie() + } + return err } func (cluster *Cluster) StartProxyService(server DatabaseProxy) error { - + cluster.LogPrintf(LvlInfo, "Starting Proxy service %s", cluster.Name+"/svc/"+server.GetName()) + var err error switch cluster.Conf.ProvOrchestrator { case config.ConstOrchestratorOpenSVC: - return cluster.OpenSVCStartProxyService(server) + err = cluster.OpenSVCStartProxyService(server) case config.ConstOrchestratorKubernetes: - cluster.K8SStartProxyService(server) + err = cluster.K8SStartProxyService(server) case config.ConstOrchestratorSlapOS: - cluster.SlapOSStartProxyService(server) + err = cluster.SlapOSStartProxyService(server) + case config.ConstOrchestratorOnPremise: + err = cluster.OnPremiseStartProxyService(server) + case config.ConstOrchestratorLocalhost: + err = cluster.LocalhostStartProxyService(server) default: - return cluster.LocalhostStartProxyService(server) + return errors.New("No valid orchestrator") } - server.DelRestartCookie() - return nil + if err == nil { + server.DelRestartCookie() + } + return err } func (cluster *Cluster) ShutdownDatabase(server *ServerMonitor) error { @@ -374,39 +410,25 @@ func (cluster *Cluster) ShutdownDatabase(server *ServerMonitor) error { func (cluster *Cluster) StartDatabaseService(server *ServerMonitor) error { cluster.LogPrintf(LvlInfo, "Starting Database service %s", cluster.Name+"/svc/"+server.Name) + var err error switch cluster.Conf.ProvOrchestrator { case config.ConstOrchestratorOpenSVC: - return cluster.OpenSVCStartDatabaseService(server) + err = cluster.OpenSVCStartDatabaseService(server) case config.ConstOrchestratorKubernetes: - cluster.K8SStartDatabaseService(server) + err = cluster.K8SStartDatabaseService(server) case config.ConstOrchestratorSlapOS: - cluster.SlapOSStartDatabaseService(server) + err = cluster.SlapOSStartDatabaseService(server) case config.ConstOrchestratorOnPremise: - cluster.OnPremiseStartDatabaseService(server) + err = cluster.OnPremiseStartDatabaseService(server) case config.ConstOrchestratorLocalhost: - return cluster.LocalhostStartDatabaseService(server) + err = cluster.LocalhostStartDatabaseService(server) default: return errors.New("No valid orchestrator") } - server.DelRestartCookie() - return nil -} - -func (cluster *Cluster) GetOchestaratorPlacement(server *ServerMonitor) error { - cluster.LogPrintf(LvlInfo, "Starting Database service %s", cluster.Name+"/svc/"+server.Name) - switch cluster.Conf.ProvOrchestrator { - case config.ConstOrchestratorOpenSVC: - return cluster.OpenSVCStartDatabaseService(server) - case config.ConstOrchestratorKubernetes: - cluster.K8SStartDatabaseService(server) - case config.ConstOrchestratorSlapOS: - cluster.SlapOSStartDatabaseService(server) - case config.ConstOrchestratorLocalhost: - return cluster.LocalhostStartDatabaseService(server) - default: - return errors.New("No valid orchestrator") + if err == nil { + server.DelRestartCookie() } - return nil + return err } func (cluster *Cluster) StartAllNodes() error { diff --git a/cluster/prov_k8s_db.go b/cluster/prov_k8s_db.go index 5707b9a05..1cfe5c1f7 100644 --- a/cluster/prov_k8s_db.go +++ b/cluster/prov_k8s_db.go @@ -219,10 +219,12 @@ func (cluster *Cluster) K8SProvisionDatabaseService(s *ServerMonitor) { cluster.errorChan <- nil } -func (cluster *Cluster) K8SStopDatabaseService(s *ServerMonitor) { +func (cluster *Cluster) K8SStopDatabaseService(s *ServerMonitor) error { + return nil } -func (cluster *Cluster) K8SStartDatabaseService(s *ServerMonitor) { +func (cluster *Cluster) K8SStartDatabaseService(s *ServerMonitor) error { + return nil } func (cluster *Cluster) K8SUnprovisionDatabaseService(s *ServerMonitor) { diff --git a/cluster/prov_onpremise_db.go b/cluster/prov_onpremise_db.go index 2c59e7155..dbedb0209 100644 --- a/cluster/prov_onpremise_db.go +++ b/cluster/prov_onpremise_db.go @@ -3,6 +3,7 @@ package cluster import ( "errors" "os" + "strconv" "github.com/helloyi/go-sshclient" sshcli "github.com/helloyi/go-sshclient" @@ -10,11 +11,16 @@ import ( ) func (cluster *Cluster) OnPremiseConnect(server *ServerMonitor) (*sshclient.Client, error) { - if server.ClusterGroup.IsInFailover() { + if cluster.IsInFailover() { return nil, errors.New("OnPremise Provisioning cancel during connect") } + if cluster.Conf.OnPremiseSSH { + return nil, errors.New("onpremise-ssh disable ") + } + user, _ := misc.SplitPair(cluster.Conf.OnPremiseSSHCredential) + key := os.Getenv("HOME") + "/.ssh/id_rsa" - client, err := sshcli.DialWithKey(misc.Unbracket(server.Host)+":22", "root", key) + client, err := sshcli.DialWithKey(misc.Unbracket(server.Host)+":"+strconv.Itoa(cluster.Conf.OnPremiseSSHPort), user, key) if err != nil { return nil, errors.New("OnPremise Provisioning via SSH %s" + err.Error()) } @@ -58,28 +64,30 @@ func (cluster *Cluster) OnPremiseProvisionDatabaseService(server *ServerMonitor) cluster.errorChan <- nil } -func (cluster *Cluster) OnPremiseSUnprovisionDatabaseService(server *ServerMonitor) { +func (cluster *Cluster) OnPremiseUnprovisionDatabaseService(server *ServerMonitor) { cluster.errorChan <- nil } -func (cluster *Cluster) OnPremiseStopDatabaseService(server *ServerMonitor) { +func (cluster *Cluster) OnPremiseStopDatabaseService(server *ServerMonitor) error { //s.JobServerStop() need an agent or ssh to trigger this server.Shutdown() + return nil } -func (cluster *Cluster) OnPremiseStartDatabaseService(server *ServerMonitor) { +func (cluster *Cluster) OnPremiseStartDatabaseService(server *ServerMonitor) error { server.SetWaitStartCookie() client, err := cluster.OnPremiseConnect(server) if err != nil { - cluster.errorChan <- err + return err } defer client.Close() - out, err := client.Cmd("systemctl stop mysql").SmartOutput() + out, err := client.Cmd("systemctl start mysql").SmartOutput() if err != nil { - cluster.errorChan <- err + return err } server.ClusterGroup.LogPrintf(LvlInfo, "OnPremise Provisioning : %s", string(out)) + return nil } diff --git a/cluster/prov_onpremise_haproxy.go b/cluster/prov_onpremise_haproxy.go new file mode 100644 index 000000000..7d73eaf99 --- /dev/null +++ b/cluster/prov_onpremise_haproxy.go @@ -0,0 +1,65 @@ +package cluster + +func (cluster *Cluster) OnPremiseProvisionHaProxyService(prx *HaproxyProxy) error { + client, err := cluster.OnPremiseConnectProxy(prx) + if err != nil { + cluster.errorChan <- err + return err + } + defer client.Close() + err = cluster.OnPremiseProvisionBootsrapProxy(prx, client) + if err != nil { + cluster.errorChan <- err + return err + } + out, err := client.Cmd("rm -f /etc/haproxy/haproxy.cfg").Cmd("cp -rp /bootstrap/etc/haproxy.cfg /etc/haproxy/").Cmd("systemctl start haproxy ").SmartOutput() + if err != nil { + cluster.errorChan <- err + return err + } + cluster.LogPrintf(LvlInfo, "OnPremise Provisioning : %s", string(out)) + cluster.errorChan <- nil + return nil +} + +func (cluster *Cluster) OnPremiseUnprovisionHaProxyService(prx *HaproxyProxy) { + + cluster.errorChan <- nil + +} + +func (cluster *Cluster) OnPremiseStopHaproxyService(server DatabaseProxy) error { + server.SetWaitStartCookie() + client, err := cluster.OnPremiseConnectProxy(server) + if err != nil { + return err + } + defer client.Close() + out, err := client.Cmd("systemctl stop haproxy").SmartOutput() + if err != nil { + return err + } + cluster.LogPrintf(LvlInfo, "OnPremise Stop Haproxy : %s", string(out)) + return nil +} + +func (cluster *Cluster) OnPremiseStartHaProxyService(server DatabaseProxy) error { + + server.SetWaitStartCookie() + client, err := cluster.OnPremiseConnectProxy(server) + if err != nil { + return err + } + defer client.Close() + err = cluster.OnPremiseProvisionBootsrapProxy(server, client) + if err != nil { + cluster.errorChan <- err + return err + } + out, err := client.Cmd("systemctl start haproxy").SmartOutput() + if err != nil { + return err + } + cluster.LogPrintf(LvlInfo, "OnPremise start HaProxy : %s", string(out)) + return nil +} diff --git a/cluster/prov_onpremise_proxysql.go b/cluster/prov_onpremise_proxysql.go new file mode 100644 index 000000000..19dc10411 --- /dev/null +++ b/cluster/prov_onpremise_proxysql.go @@ -0,0 +1,60 @@ +package cluster + +func (cluster *Cluster) OnPremiseProvisionProxySQLService(prx *ProxySQLProxy) error { + client, err := cluster.OnPremiseConnectProxy(prx) + if err != nil { + cluster.errorChan <- err + return err + } + defer client.Close() + err = cluster.OnPremiseProvisionBootsrapProxy(prx, client) + if err != nil { + cluster.errorChan <- err + return err + } + out, err := client.Cmd("rm -f /etc/proxysql.cnf").Cmd("cp -rp /bootstrap/etc/proxysql.cnf /etc").Cmd("proxysql –initial ").SmartOutput() + if err != nil { + cluster.errorChan <- err + return err + } + cluster.LogPrintf(LvlInfo, "OnPremise Provisioning : %s", string(out)) + cluster.errorChan <- nil + return nil +} + +func (cluster *Cluster) OnPremiseUnprovisionProxySQLService(prx *ProxySQLProxy) { + + cluster.errorChan <- nil + +} + +func (cluster *Cluster) OnPremiseStopProxySQLService(server DatabaseProxy) error { + server.SetWaitStartCookie() + client, err := cluster.OnPremiseConnectProxy(server) + if err != nil { + return err + } + defer client.Close() + out, err := client.Cmd("systemctl stop proxysql").SmartOutput() + if err != nil { + return err + } + cluster.LogPrintf(LvlInfo, "OnPremise stop ProxySQL : %s", string(out)) + return nil +} + +func (cluster *Cluster) OnPremiseStartProxySQLService(server DatabaseProxy) error { + + server.SetWaitStartCookie() + client, err := cluster.OnPremiseConnectProxy(server) + if err != nil { + return err + } + defer client.Close() + out, err := client.Cmd("systemctl start proxysql").SmartOutput() + if err != nil { + return err + } + cluster.LogPrintf(LvlInfo, "OnPremise start ProxySQL : %s", string(out)) + return nil +} diff --git a/cluster/prov_onpremise_prx.go b/cluster/prov_onpremise_prx.go new file mode 100644 index 000000000..6199c4767 --- /dev/null +++ b/cluster/prov_onpremise_prx.go @@ -0,0 +1,147 @@ +// replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL +// Copyright 2017 Signal 18 SARL +// Authors: Guillaume Lefranc +// Stephane Varoqui +// This source code is licensed under the GNU General Public License, version 3. + +package cluster + +import ( + "errors" + "os" + "strconv" + + "github.com/helloyi/go-sshclient" + sshcli "github.com/helloyi/go-sshclient" + "github.com/signal18/replication-manager/utils/misc" +) + +func (cluster *Cluster) OnPremiseProvisionBootsrapProxy(server DatabaseProxy, client *sshclient.Client) error { + adminuser := "admin" + adminpassword := "repman" + if user, ok := cluster.APIUsers[adminuser]; ok { + adminpassword = user.Password + } + out, err := client.Cmd("export MYSQL_ROOT_PASSWORD=" + server.GetPass()).Cmd("export REPLICATION_MANAGER_URL=" + cluster.Conf.MonitorAddress + ":" + cluster.Conf.APIPort).Cmd("export REPLICATION_MANAGER_USER=" + adminuser).Cmd("export REPLICATION_MANAGER_PASSWORD=" + adminpassword).Cmd("export REPLICATION_MANAGER_HOST_NAME=" + server.GetHost()).Cmd("export REPLICATION_MANAGER_HOST_PORT=" + server.GetPort()).Cmd("export REPLICATION_MANAGER_CLUSTER_NAME=" + cluster.Name).SmartOutput() + if err != nil { + return errors.New("OnPremise Bootsrap via SSH %s" + err.Error()) + } + cluster.LogPrintf(LvlInfo, "OnPremise Provisioning : %s", string(out)) + out, err = client.Cmd("wget --no-check-certificate -q -O- $REPLICATION_MANAGER_URL/static/configurator/opensvc/bootstrap | sh").SmartOutput() + if err != nil { + return errors.New("OnPremise Bootsrap via SSH %s" + err.Error()) + } + cluster.LogPrintf(LvlInfo, "OnPremise Bootsrap : %s", string(out)) + return nil +} + +func (cluster *Cluster) OnPremiseConnectProxy(server DatabaseProxy) (*sshclient.Client, error) { + + if cluster.IsInFailover() { + return nil, errors.New("OnPremise Provisioning cancel during connect") + } + if cluster.Conf.OnPremiseSSH { + return nil, errors.New("onpremise-ssh disable ") + } + + user, _ := misc.SplitPair(cluster.Conf.OnPremiseSSHCredential) + key := os.Getenv("HOME") + "/.ssh/id_rsa" + client, err := sshcli.DialWithKey(misc.Unbracket(server.GetHost())+":"+strconv.Itoa(cluster.Conf.OnPremiseSSHPort), user, key) + if err != nil { + return nil, errors.New("OnPremise Provisioning via SSH %s" + err.Error()) + } + return client, nil +} + +func (cluster *Cluster) OnPremiseProvisionProxyService(pri DatabaseProxy) error { + pri.GetProxyConfig() + + if prx, ok := pri.(*MariadbShardProxy); ok { + cluster.LogPrintf(LvlInfo, "Bootstrap MariaDB Sharding Cluster") + srv, _ := cluster.newServerMonitor(prx.Host+":"+prx.GetPort(), prx.User, prx.Pass, true, "") + err := srv.Refresh() + if err == nil { + cluster.LogPrintf(LvlWarn, "Can connect to requested signal18 sharding proxy") + //that's ok a sharding proxy can be decalre in multiple cluster , should not block provisionning + cluster.errorChan <- err + return nil + } + srv.ClusterGroup = cluster + cluster.OnPremiseProvisionDatabaseService(srv) + if err != nil { + cluster.LogPrintf(LvlErr, "Bootstrap MariaDB Sharding Cluster Failed") + cluster.errorChan <- err + return err + } + srv.Close() + cluster.ShardProxyBootstrap(prx) + } + + if prx, ok := pri.(*ProxySQLProxy); ok { + err := cluster.OnPremiseProvisionProxySQLService(prx) + if err != nil { + cluster.LogPrintf(LvlErr, "Bootstrap Proxysql Failed") + cluster.errorChan <- err + return err + } + } + + if prx, ok := pri.(*HaproxyProxy); ok { + err := cluster.OnPremiseProvisionHaProxyService(prx) + cluster.errorChan <- err + return err + } + + cluster.errorChan <- nil + return nil +} + +func (cluster *Cluster) OnPremiseUnprovisionProxyService(pri DatabaseProxy) error { + if prx, ok := pri.(*MariadbShardProxy); ok { + cluster.OnPremiseUnprovisionDatabaseService(prx.ShardProxy) + } + + if prx, ok := pri.(*HaproxyProxy); ok { + cluster.OnPremiseUnprovisionHaProxyService(prx) + } + + if prx, ok := pri.(*ProxySQLProxy); ok { + cluster.OnPremiseUnprovisionProxySQLService(prx) + } + + cluster.errorChan <- nil + return nil +} + +func (cluster *Cluster) OnPremiseStartProxyService(pri DatabaseProxy) error { + if prx, ok := pri.(*MariadbShardProxy); ok { + cluster.OnPremiseStartDatabaseService(prx.ShardProxy) + } + + if prx, ok := pri.(*HaproxyProxy); ok { + cluster.OnPremiseStartHaProxyService(prx) + } + + if prx, ok := pri.(*ProxySQLProxy); ok { + cluster.OnPremiseStartProxySQLService(prx) + } + + cluster.errorChan <- nil + return nil +} + +func (cluster *Cluster) OnPremiseStopProxyService(pri DatabaseProxy) error { + + if prx, ok := pri.(*MariadbShardProxy); ok { + prx.ShardProxy.Shutdown() + } + + if prx, ok := pri.(*HaproxyProxy); ok { + cluster.OnPremiseStartHaProxyService(prx) + } + if prx, ok := pri.(*ProxySQLProxy); ok { + cluster.OnPremiseStartProxySQLService(prx) + } + + return nil +} diff --git a/cluster/prov_scripts.go b/cluster/prov_scripts.go new file mode 100644 index 000000000..064f84db8 --- /dev/null +++ b/cluster/prov_scripts.go @@ -0,0 +1,7 @@ +// replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL +// Copyright 2017 Signal 18 SARL +// Authors: Guillaume Lefranc +// Stephane Varoqui +// This source code is licensed under the GNU General Public License, version 3. + +package cluster diff --git a/cluster/prov_slapos_db.go b/cluster/prov_slapos_db.go index f955a4963..e0d985149 100644 --- a/cluster/prov_slapos_db.go +++ b/cluster/prov_slapos_db.go @@ -11,11 +11,13 @@ func (cluster *Cluster) SlapOSUnprovisionDatabaseService(s *ServerMonitor) { } -func (cluster *Cluster) SlapOSStopDatabaseService(s *ServerMonitor) { +func (cluster *Cluster) SlapOSStopDatabaseService(s *ServerMonitor) error { s.Shutdown() + return nil } -func (cluster *Cluster) SlapOSStartDatabaseService(s *ServerMonitor) { +func (cluster *Cluster) SlapOSStartDatabaseService(s *ServerMonitor) error { // s.JobServerRestart() s.SetWaitStartCookie() + return nil } diff --git a/cluster/srv_job.go b/cluster/srv_job.go index eacc66aa6..26ec8577e 100644 --- a/cluster/srv_job.go +++ b/cluster/srv_job.go @@ -581,6 +581,22 @@ func (server *ServerMonitor) JobBackupLogical() error { return nil } + if server.IsMariaDB() && server.DBVersion.Major == 10 && + server.DBVersion.Minor >= 4 && server.DBVersion.Minor < 6 && + server.ClusterGroup.Conf.BackupLockDDL && + (server.ClusterGroup.Conf.BackupLogicalType == config.ConstBackupLogicalTypeMysqldump || server.ClusterGroup.Conf.BackupLogicalType == config.ConstBackupLogicalTypeMydumper) { + bckConn, err := server.GetNewDBConn() + if err != nil { + server.ClusterGroup.LogPrintf(LvlErr, "Error backup request: %s", err) + } + defer bckConn.Close() + _, err = bckConn.Exec("BACKUP STAGE START") + server.ClusterGroup.LogSQL("BACKUP STAGE START", err, server.URL, "JobBackupLogical", LvlErr, "Failed SQL for server %s: %s ", server.URL, err) + _, err = bckConn.Exec("BACKUP STAGE BLOCK_DDL") + server.ClusterGroup.LogSQL("BACKUP BLOCK_DDL", err, server.URL, "JobBackupLogical", LvlErr, "Failed SQL for server %s: %s ", server.URL, err) + server.ClusterGroup.LogPrintf(LvlInfo, "Blocking DDL via BACKUP STAGE") + } + if server.ClusterGroup.Conf.BackupLogicalType == config.ConstBackupLogicalTypeRiver { cfg := new(river.Config) cfg.MyHost = server.URL @@ -608,7 +624,10 @@ func (server *ServerMonitor) JobBackupLogical() error { river.NewRiver(cfg) } + + // Blocking DDL if server.ClusterGroup.Conf.BackupLogicalType == config.ConstBackupLogicalTypeMysqldump { + usegtid := "--gtid" events := "" dumpslave := "" @@ -629,7 +648,7 @@ func (server *ServerMonitor) JobBackupLogical() error { server.ClusterGroup.LogPrintf(LvlInfo, "Command: %s ", strings.Replace(dumpCmd.String(), server.ClusterGroup.dbPass, "XXXX", -1)) f, err := os.Create(server.GetMyBackupDirectory() + "mysqldump.sql.gz") if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Error backup request: %s", err) + server.ClusterGroup.LogPrintf(LvlErr, "Error mysqldump backup request: %s", err) return err } wf := bufio.NewWriter(f) @@ -653,7 +672,7 @@ func (server *ServerMonitor) JobBackupLogical() error { err := dumpCmd.Wait() if err != nil { - log.Println(err) + server.ClusterGroup.LogPrintf(LvlErr, "mysqldump: %s", err) } gw.Flush() gw.Close() From d93b98d5d2b3b888bb956e008735e6390d28c718 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 7 May 2021 09:55:25 +0200 Subject: [PATCH 35/84] Implement provision call external script --- cluster/prov.go | 20 ++- cluster/prov_scripts.go | 280 ++++++++++++++++++++++++++++++++++++++++ cluster/srv_job.go | 8 -- config/config.go | 12 ++ main.go | 13 ++ 5 files changed, 322 insertions(+), 11 deletions(-) diff --git a/cluster/prov.go b/cluster/prov.go index 00f0877b5..d58de3c7b 100644 --- a/cluster/prov.go +++ b/cluster/prov.go @@ -79,6 +79,7 @@ func (cluster *Cluster) ProvisionServices() error { cluster.sme.RemoveFailoverState() return nil } + cluster.ProvisionDatabaseScript(server) } for _, server := range cluster.Servers { select { @@ -115,6 +116,7 @@ func (cluster *Cluster) ProvisionServices() error { cluster.sme.RemoveFailoverState() return nil } + cluster.ProvisionProxyScript(prx) } for _, pri := range cluster.Proxies { prx, ok := pri.(*Proxy) @@ -155,14 +157,15 @@ func (cluster *Cluster) InitDatabaseService(server *ServerMonitor) error { cluster.sme.RemoveFailoverState() return nil } + cluster.ProvisionDatabaseScript(server) select { case err := <-cluster.errorChan: cluster.sme.RemoveFailoverState() if err == nil { server.SetProvisionCookie() } else { + return err } - return err } return nil @@ -183,13 +186,15 @@ func (cluster *Cluster) InitProxyService(prx DatabaseProxy) error { default: return nil } + cluster.ProvisionProxyScript(prx) select { case err := <-cluster.errorChan: cluster.sme.RemoveFailoverState() if err == nil { prx.SetProvisionCookie() + } else { + return err } - return err } return nil } @@ -213,6 +218,7 @@ func (cluster *Cluster) Unprovision() error { cluster.sme.RemoveFailoverState() return nil } + cluster.UnprovisionDatabaseScript(server) } for _, server := range cluster.Servers { select { @@ -247,6 +253,7 @@ func (cluster *Cluster) Unprovision() error { cluster.sme.RemoveFailoverState() return nil } + cluster.UnprovisionProxyScript(prx) } for _, pri := range cluster.Proxies { prx, ok := pri.(*Proxy) @@ -289,6 +296,7 @@ func (cluster *Cluster) UnprovisionProxyService(prx DatabaseProxy) error { go cluster.OnPremiseUnprovisionProxyService(prx) default: } + cluster.UnprovisionProxyScript(prx) select { case err := <-cluster.errorChan: if err == nil { @@ -315,6 +323,7 @@ func (cluster *Cluster) UnprovisionDatabaseService(server *ServerMonitor) error default: go cluster.LocalhostUnprovisionDatabaseService(server) } + cluster.UnprovisionDatabaseScript(server) select { case err := <-cluster.errorChan: @@ -322,8 +331,9 @@ func (cluster *Cluster) UnprovisionDatabaseService(server *ServerMonitor) error server.DelProvisionCookie() server.DelReprovisionCookie() server.DelRestartCookie() + } else { + return err } - return err } return nil } @@ -349,6 +359,7 @@ func (cluster *Cluster) StopDatabaseService(server *ServerMonitor) error { default: return errors.New("No valid orchestrator") } + cluster.StopDatabaseService(server) if err == nil { server.DelRestartCookie() } @@ -373,6 +384,7 @@ func (cluster *Cluster) StopProxyService(server DatabaseProxy) error { default: return errors.New("No valid orchestrator") } + cluster.StopProxyService(server) if err == nil { server.DelRestartCookie() } @@ -396,6 +408,7 @@ func (cluster *Cluster) StartProxyService(server DatabaseProxy) error { default: return errors.New("No valid orchestrator") } + cluster.StartProxyService(server) if err == nil { server.DelRestartCookie() } @@ -425,6 +438,7 @@ func (cluster *Cluster) StartDatabaseService(server *ServerMonitor) error { default: return errors.New("No valid orchestrator") } + cluster.StartDatabaseService(server) if err == nil { server.DelRestartCookie() } diff --git a/cluster/prov_scripts.go b/cluster/prov_scripts.go index 064f84db8..f3d22154c 100644 --- a/cluster/prov_scripts.go +++ b/cluster/prov_scripts.go @@ -5,3 +5,283 @@ // This source code is licensed under the GNU General Public License, version 3. package cluster + +import ( + "bufio" + "io" + "os/exec" + "strings" + "sync" + + "github.com/signal18/replication-manager/utils/misc" +) + +func (cluster *Cluster) UnprovisionDatabaseScript(server *ServerMonitor) error { + if cluster.Conf.ProvDbCleanupScript == "" { + return nil + } + scriptCmd := exec.Command(cluster.Conf.ProvDbCleanupScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass) + cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), cluster.dbPass, "XXXX", 1)) + + stdoutIn, _ := scriptCmd.StdoutPipe() + stderrIn, _ := scriptCmd.StderrPipe() + scriptCmd.Start() + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + server.copyLogs(stdoutIn) + }() + go func() { + defer wg.Done() + server.copyLogs(stderrIn) + }() + wg.Wait() + if err := scriptCmd.Wait(); err != nil { + cluster.LogPrintf(LvlErr, " %s", err) + return err + } + return nil +} + +func (cluster *Cluster) ProvisionDatabaseScript(server *ServerMonitor) error { + if cluster.Conf.ProvDbBootstrapScript == "" { + return nil + } + scriptCmd := exec.Command(cluster.Conf.ProvDbBootstrapScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass) + cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), cluster.dbPass, "XXXX", 1)) + + stdoutIn, _ := scriptCmd.StdoutPipe() + stderrIn, _ := scriptCmd.StderrPipe() + scriptCmd.Start() + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + server.copyLogs(stdoutIn) + }() + go func() { + defer wg.Done() + server.copyLogs(stderrIn) + }() + wg.Wait() + if err := scriptCmd.Wait(); err != nil { + cluster.LogPrintf(LvlErr, " %s", err) + return err + } + return nil +} + +func (cluster *Cluster) StopDatabaseScript(server *ServerMonitor) error { + if cluster.Conf.ProvDbStopScript == "" { + return nil + } + scriptCmd := exec.Command(cluster.Conf.ProvDbStopScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass) + cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), cluster.dbPass, "XXXX", 1)) + + stdoutIn, _ := scriptCmd.StdoutPipe() + stderrIn, _ := scriptCmd.StderrPipe() + scriptCmd.Start() + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + server.copyLogs(stdoutIn) + }() + go func() { + defer wg.Done() + server.copyLogs(stderrIn) + }() + wg.Wait() + if err := scriptCmd.Wait(); err != nil { + cluster.LogPrintf(LvlErr, " %s", err) + return err + } + return nil +} + +func (cluster *Cluster) StartDatabaseScript(server *ServerMonitor) error { + if cluster.Conf.ProvDbStartScript == "" { + return nil + } + scriptCmd := exec.Command(cluster.Conf.ProvDbStartScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass) + cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), cluster.dbPass, "XXXX", 1)) + + stdoutIn, _ := scriptCmd.StdoutPipe() + stderrIn, _ := scriptCmd.StderrPipe() + scriptCmd.Start() + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + server.copyLogs(stdoutIn) + }() + go func() { + defer wg.Done() + server.copyLogs(stderrIn) + }() + wg.Wait() + if err := scriptCmd.Wait(); err != nil { + cluster.LogPrintf(LvlErr, " %s", err) + return err + } + return nil +} + +func (cluster *Cluster) UnprovisionProxyScript(server DatabaseProxy) error { + if cluster.Conf.ProvProxyCleanupScript == "" { + return nil + } + scriptCmd := exec.Command(cluster.Conf.ProvProxyCleanupScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass()) + cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), server.GetPass(), "XXXX", 1)) + + stdoutIn, _ := scriptCmd.StdoutPipe() + stderrIn, _ := scriptCmd.StderrPipe() + scriptCmd.Start() + + copyLogs := func(r io.Reader) { + // buf := make([]byte, 1024) + s := bufio.NewScanner(r) + for { + if !s.Scan() { + break + } else { + cluster.LogPrintf(LvlInfo, "%s", s.Text()) + } + } + } + + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + copyLogs(stdoutIn) + }() + go func() { + defer wg.Done() + copyLogs(stderrIn) + }() + wg.Wait() + if err := scriptCmd.Wait(); err != nil { + cluster.LogPrintf(LvlErr, " %s", err) + return err + } + return nil +} + +func (cluster *Cluster) ProvisionProxyScript(server DatabaseProxy) error { + if cluster.Conf.ProvProxyBootstrapScript == "" { + return nil + } + scriptCmd := exec.Command(cluster.Conf.ProvProxyBootstrapScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass()) + cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), server.GetPass(), "XXXX", 1)) + + stdoutIn, _ := scriptCmd.StdoutPipe() + stderrIn, _ := scriptCmd.StderrPipe() + scriptCmd.Start() + var wg sync.WaitGroup + copyLogs := func(r io.Reader) { + // buf := make([]byte, 1024) + s := bufio.NewScanner(r) + for { + if !s.Scan() { + break + } else { + cluster.LogPrintf(LvlInfo, "%s", s.Text()) + } + } + } + wg.Add(2) + go func() { + defer wg.Done() + copyLogs(stdoutIn) + }() + go func() { + defer wg.Done() + copyLogs(stderrIn) + }() + wg.Wait() + if err := scriptCmd.Wait(); err != nil { + cluster.LogPrintf(LvlErr, " %s", err) + return err + } + return nil +} + +func (cluster *Cluster) StartProxyScript(server DatabaseProxy) error { + if cluster.Conf.ProvProxyStartScript == "" { + return nil + } + scriptCmd := exec.Command(cluster.Conf.ProvProxyStartScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass()) + cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), server.GetPass(), "XXXX", 1)) + + stdoutIn, _ := scriptCmd.StdoutPipe() + stderrIn, _ := scriptCmd.StderrPipe() + scriptCmd.Start() + var wg sync.WaitGroup + copyLogs := func(r io.Reader) { + // buf := make([]byte, 1024) + s := bufio.NewScanner(r) + for { + if !s.Scan() { + break + } else { + cluster.LogPrintf(LvlInfo, "%s", s.Text()) + } + } + } + wg.Add(2) + go func() { + defer wg.Done() + copyLogs(stdoutIn) + }() + go func() { + defer wg.Done() + copyLogs(stderrIn) + }() + wg.Wait() + if err := scriptCmd.Wait(); err != nil { + cluster.LogPrintf(LvlErr, " %s", err) + return err + } + return nil +} + +func (cluster *Cluster) StopProxyScript(server DatabaseProxy) error { + if cluster.Conf.ProvProxyStopScript == "" { + return nil + } + scriptCmd := exec.Command(cluster.Conf.ProvProxyStopScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass()) + cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), server.GetPass(), "XXXX", 1)) + + stdoutIn, _ := scriptCmd.StdoutPipe() + stderrIn, _ := scriptCmd.StderrPipe() + scriptCmd.Start() + var wg sync.WaitGroup + copyLogs := func(r io.Reader) { + // buf := make([]byte, 1024) + s := bufio.NewScanner(r) + for { + if !s.Scan() { + break + } else { + cluster.LogPrintf(LvlInfo, "%s", s.Text()) + } + } + } + wg.Add(2) + go func() { + defer wg.Done() + copyLogs(stdoutIn) + }() + go func() { + defer wg.Done() + copyLogs(stderrIn) + }() + wg.Wait() + if err := scriptCmd.Wait(); err != nil { + cluster.LogPrintf(LvlErr, " %s", err) + return err + } + return nil +} diff --git a/cluster/srv_job.go b/cluster/srv_job.go index 26ec8577e..dc717348e 100644 --- a/cluster/srv_job.go +++ b/cluster/srv_job.go @@ -766,14 +766,6 @@ func (server *ServerMonitor) copyLogs(r io.Reader) { } else { server.ClusterGroup.LogPrintf(LvlInfo, "%s", s.Text()) } - - /*n, err := r.Read(buf) - if n > 0 { - server.ClusterGroup.LogPrintf(LvlInfo, "%s", buf) - } - if err != nil { - break - }*/ } } diff --git a/config/config.go b/config/config.go index ff851a12b..a031d920c 100644 --- a/config/config.go +++ b/config/config.go @@ -317,6 +317,9 @@ type Config struct { SlapOSShardProxyPartitions string `mapstructure:"slapos-shardproxy-partitions" toml:"slapos-shardproxy-partitions" json:"slaposShardproxyPartitions"` SlapOSSphinxPartitions string `mapstructure:"slapos-sphinx-partitions" toml:"slapos-sphinx-partitions" json:"slaposSphinxPartitions"` ProvHost string `mapstructure:"opensvc-host" toml:"opensvc-host" json:"opensvcHost"` + OnPremiseSSH bool `mapstructure:"onpremise-ssh" toml:"onpremise-ssh" json:"onpremiseSsh"` + OnPremiseSSHPort int `mapstructure:"onpremise-ssh-port" toml:"onpremise-ssh-port" json:"onpremiseSshPort"` + OnPremiseSSHCredential string `mapstructure:"onpremise-ssh-credential" toml:"onpremise-ssh-credential" json:"onpremiseSshCredential"` ProvOpensvcP12Certificate string `mapstructure:"opensvc-p12-certificate" toml:"opensvc-p12-certificat" json:"opensvcP12Certificate"` ProvOpensvcP12Secret string `mapstructure:"opensvc-p12-secret" toml:"opensvc-p12-secret" json:"opensvcP12Secret"` ProvOpensvcUseCollectorAPI bool `mapstructure:"opensvc-use-collector-api" toml:"opensvc-use-collector-api" json:"opensvcUseCollectorApi"` @@ -412,6 +415,14 @@ type Config struct { ProvDockerDaemonPrivate bool `mapstructure:"prov-docker-daemon-private" toml:"prov-docker-daemon-private" json:"provDockerDaemonPrivate"` ProvServicePlan string `mapstructure:"prov-service-plan" toml:"prov-service-plan" json:"provServicePlan"` ProvServicePlanRegistry string `mapstructure:"prov-service-plan-registry" toml:"prov-service-plan-registry" json:"provServicePlanRegistry"` + ProvDbBootstrapScript string `mapstructure:"prov-db-bootstrap-script" toml:"prov-db-bootstrap-script" json:"provDbBootstrapScript"` + ProvProxyBootstrapScript string `mapstructure:"prov-proxy-bootstrap-script" toml:"prov-proxy-bootstrap-script" json:"provProxyBootstrapScript"` + ProvDbCleanupScript string `mapstructure:"prov-db-cleanup-script" toml:"prov-db-cleanup-script" json:"provDbCleanupScript"` + ProvProxyCleanupScript string `mapstructure:"prov-proxy-cleanup-script" toml:"prov-proxy-cleanup-script" json:"provProxyCleanupScript"` + ProvDbStartScript string `mapstructure:"prov-db-start-script" toml:"prov-db-start-script" json:"provDbStartScript"` + ProvProxyStartScript string `mapstructure:"prov-proxy-start-script" toml:"prov-proxy-start-script" json:"provProxyStartScript"` + ProvDbStopScript string `mapstructure:"prov-db-stop-script" toml:"prov-db-stop-script" json:"provDbStopScript"` + ProvProxyStopScript string `mapstructure:"prov-proxy-stop-script" toml:"prov-proxy-stop-script" json:"provProxyStopScript"` APIUsers string `mapstructure:"api-credentials" toml:"api-credentials" json:"apiCredentials"` APIUsersExternal string `mapstructure:"api-credentials-external" toml:"api-credentials-external" json:"apiCredentialsExternal"` APIUsersACLAllow string `mapstructure:"api-credentials-acl-allow" toml:"api-credentials-acl-allow" json:"apiCredentialsACLAllow"` @@ -475,6 +486,7 @@ type Config struct { BackupMysqlclientPath string `mapstructure:"backup-mysqlclient-path" toml:"backup-mysqlclient-path" json:"backupMysqlclientgPath"` BackupBinlogs bool `mapstructure:"backup-binlogs" toml:"backup-binlogs" json:"backupBinlogs"` BackupBinlogsKeep int `mapstructure:"backup-binlogs-keep" toml:"backup-binlogs-keep" json:"backupBinlogsKeep"` + BackupLockDDL bool `mapstructure:"backup-lockddl" toml:"backup-lockddl" json:"backupLockDDL"` ClusterConfigPath string `mapstructure:"cluster-config-file" toml:"-" json:"-"` // BackupResticStoragePolicy string `mapstructure:"backup-restic-storage-policy" toml:"backup-restic-storage-policy" json:"backupResticStoragePolicy"` diff --git a/main.go b/main.go index 656eda31b..0ddb235bd 100644 --- a/main.go +++ b/main.go @@ -416,6 +416,7 @@ func init() { monitorCmd.Flags().StringVar(&conf.SchedulerJobsSSHCron, "scheduler-jobs-ssh-cron", "0 * * * * *", "Remote execution of dbjobs via ssh ") monitorCmd.Flags().BoolVar(&conf.Backup, "backup", false, "Turn on Backup") + monitorCmd.Flags().BoolVar(&conf.BackupLockDDL, "backup-lockddl", true, "Use mariadb backup stage") monitorCmd.Flags().IntVar(&conf.BackupLogicalLoadThreads, "backup-logical-load-threads", 2, "Number of threads to load database") monitorCmd.Flags().IntVar(&conf.BackupLogicalDumpThreads, "backup-logical-dump-threads", 2, "Number of threads to dump database") monitorCmd.Flags().BoolVar(&conf.BackupLogicalDumpSystemTables, "backup-logical-dump-system-tables", false, "Backup restore the mysql database") @@ -496,6 +497,18 @@ func init() { monitorCmd.Flags().StringVar(&conf.SlapOSMaxscalePartitions, "slapos-maxscale-partitions", "", "List maxscale slapos partitions path") monitorCmd.Flags().StringVar(&conf.SlapOSShardProxyPartitions, "slapos-shardproxy-partitions", "", "List spider slapos partitions path") monitorCmd.Flags().StringVar(&conf.SlapOSSphinxPartitions, "slapos-sphinx-partitions", "", "List sphinx slapos partitions path") + monitorCmd.Flags().StringVar(&conf.ProvDbBootstrapScript, "prov-db-bootstrap-script", "", "Database bootstrap script") + monitorCmd.Flags().StringVar(&conf.ProvProxyBootstrapScript, "prov-proxy-bootstrap-script", "", "Proxy bootstrap script") + monitorCmd.Flags().StringVar(&conf.ProvDbCleanupScript, "prov-db-cleanup-script", "", "Database cleanup script") + monitorCmd.Flags().StringVar(&conf.ProvProxyCleanupScript, "prov-proxy-cleanup-script", "", "Proxy cleanup script") + monitorCmd.Flags().StringVar(&conf.ProvDbStartScript, "prov-db-start-script", "", "Database start script") + monitorCmd.Flags().StringVar(&conf.ProvProxyStartScript, "prov-proxy-start-script", "", "Proxy start script") + monitorCmd.Flags().StringVar(&conf.ProvDbStopScript, "prov-db-stop-script", "", "Database stop script") + monitorCmd.Flags().StringVar(&conf.ProvProxyStopScript, "prov-proxy-stop-script", "", "Proxy stop script") + + monitorCmd.Flags().BoolVar(&conf.OnPremiseSSH, "onpremise-ssh", false, "Connect to host via SSH using user private key") + monitorCmd.Flags().IntVar(&conf.OnPremiseSSHPort, "onpremise-ssh-port", 22, "Connect to host via SSH using ssh port") + monitorCmd.Flags().StringVar(&conf.OnPremiseSSHCredential, "onpremise-ssh-credential", "root:", "User:password for ssh if no password using current user private key") if WithProvisioning == "ON" { monitorCmd.Flags().StringVar(&conf.ProvDatadirVersion, "prov-db-datadir-version", "10.2", "Empty datadir to deploy for localtest") monitorCmd.Flags().StringVar(&conf.ProvDiskSystemSize, "prov-db-disk-system-size", "2", "Disk in g for micro service VM") From 3b482f793dfbb33dea7a4c9bc5dbb41fcff5ec21 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 7 May 2021 10:09:40 +0200 Subject: [PATCH 36/84] Fixing service plan download --- share/serviceplan.csv | 38 ++++++++++++++++++++++++++++++++++++++ share/serviceplan.json | 1 - 2 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 share/serviceplan.csv delete mode 100644 share/serviceplan.json diff --git a/share/serviceplan.csv b/share/serviceplan.csv new file mode 100644 index 000000000..d476fa6ab --- /dev/null +++ b/share/serviceplan.csv @@ -0,0 +1,38 @@ +id,plan,dbmemory,dbcores,dbdatasize,dbsystemsize,dbiops,dbcpufreq,prxcores,prxdatasize +1,x1.small,16384,2,700,300,300,3000,1,80 +2,x1.small.compute,16384,4,700,300,300,3000,1,80 +3,x1.small.perf,32768,2,700,300,600,3000,1,80 +4,x1.middle,32768,4,700,300,600,3000,2,80 +5,x1.middle.compute,32768,8,700,300,600,3000,2,80 +6,x1.middle.perf,65536,4,700,300,800,3000,2,80 +7,x1.large,65536,8,700,300,800,3000,2,80 +8,x1.large.compute,65536,16,700,300,800,3000,2,80 +9,x1.large.perf,131072,8,700,300,1000,3000,2,80 +10,x1.huge,131072,16,700,300,1000,3000,4,80 +11,x1.huge.compute,131072,32,700,300,1000,3000,4,80 +12,x1.huge.perf,262144,16,700,300,2000,3000,4,80 +13,x2.tiny,1024,2,700,300,300,3000,4,80 +14,x2.small,16384,2,700,300,300,3000,1,80 +15,x2.small.compute,16384,4,700,300,300,3000,1,80 +16,x2.small.perf,32768,2,700,300,600,3000,1,80 +17,x2.middle,32768,4,700,300,600,3000,2,80 +18,x2.middle.compute,32768,8,700,300,600,3000,2,80 +19,x2.middle.perf,65536,4,700,300,800,3000,2,80 +20,x2.large,65536,8,700,300,800,3000,2,80 +21,x2.large.compute,65536,16,700,300,800,3000,2,80 +22,x2.large.perf,131072,8,700,300,1000,3000,2,80 +23,x2.huge,131072,16,700,300,1000,3000,4,80 +24,x2.huge.compute,131072,32,700,300,1000,3000,4,80 +25,x2.huge.perf,262144,16,700,300,2000,3000,4,80 +26,x3.small,16384,2,700,300,300,3000,1,80 +27,x3.small.compute,16384,4,700,300,300,3000,1,80 +28,x3.small.perf,32768,2,700,300,600,3000,1,80 +29,x3.middle,32768,4,700,300,600,3000,2,80 +30,x3.middle.compute,32768,8,700,300,600,3000,2,80 +31,x3.middle.perf,65536,4,700,300,800,3000,2,80 +32,x3.large,65536,8,700,300,800,3000,2,80 +33,x3.large.compute,65536,16,700,300,800,3000,2,80 +34,x3.large.perf,131072,8,700,300,1000,3000,2,80 +35,x3.huge,131072,16,700,300,1000,3000,4,80 +36,x3.huge.compute,131072,32,700,300,1000,3000,4,80 +37,x3.huge.perf,262144,16,700,300,2000,3000,4,80 \ No newline at end of file diff --git a/share/serviceplan.json b/share/serviceplan.json deleted file mode 100644 index 419c32a1b..000000000 --- a/share/serviceplan.json +++ /dev/null @@ -1 +0,0 @@ -{"rows":[{"plan":"x1.small","memory":16384,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.small.compute","memory":16384,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.small.perf","memory":32768,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.middle","memory":32768,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.middle.compute","memory":32768,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.middle.perf","memory":65536,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.large","memory":65536,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.large.compute","memory":65536,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.large.perf","memory":131072,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.huge","memory":131072,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.huge.compute","memory":131072,"cores":32,"dbdatasize":700,"dbsystemsize":300},{"plan":"x1.huge.perf","memory":262144,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.small","memory":16384,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.small.compute","memory":16384,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.small.perf","memory":32768,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.middle","memory":32768,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.middle.compute","memory":32768,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.middle.perf","memory":65536,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.large","memory":65536,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.large.compute","memory":65536,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.large.perf","memory":131072,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.huge","memory":131072,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.huge.compute","memory":131072,"cores":32,"dbdatasize":700,"dbsystemsize":300},{"plan":"x2.huge.perf","memory":262144,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.small","memory":16384,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.small.compute","memory":16384,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.small.perf","memory":32768,"cores":2,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.middle","memory":32768,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.middle.compute","memory":32768,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.middle.perf","memory":65536,"cores":4,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.large","memory":65536,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.large.compute","memory":65536,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.large.perf","memory":131072,"cores":8,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.huge","memory":131072,"cores":16,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.huge.compute","memory":131072,"cores":32,"dbdatasize":700,"dbsystemsize":300},{"plan":"x3.huge.perf","memory":262144,"cores":16,"dbdatasize":700,"dbsystemsize":300}]} \ No newline at end of file From 8293c03ac87980d752a37f327e18034da05ea7ee Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 7 May 2021 10:19:00 +0200 Subject: [PATCH 37/84] ServicePlan copy csv from share if can not download --- server/server.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/server/server.go b/server/server.go index adcd4808a..e47eda2af 100644 --- a/server/server.go +++ b/server/server.go @@ -827,18 +827,21 @@ func (repman *ReplicationManager) DownloadFile(url string, file string) error { func (repman *ReplicationManager) InitServicePlans() error { if repman.Conf.ProvServicePlanRegistry == "" { - return nil - } - - err := repman.DownloadFile(repman.Conf.ProvServicePlanRegistry, repman.Conf.WorkingDir+"/serviceplan.csv") - if err != nil { - log.Errorf("GetServicePlans download csv %s", err) + err := repman.DownloadFile(repman.Conf.ProvServicePlanRegistry, repman.Conf.WorkingDir+"/serviceplan.csv") + if err != nil { + log.Errorf("GetServicePlans download csv %s", err) + // copy from share if not downloadable + if _, err := os.Stat(repman.Conf.WorkingDir + "/serviceplan.csv"); os.IsNotExist(err) { + misc.CopyFile(repman.Conf.ShareDir+"/serviceplan.csv", repman.Conf.WorkingDir+"/serviceplan.csv") + } + } + } else { // copy from share if not downloadable if _, err := os.Stat(repman.Conf.WorkingDir + "/serviceplan.csv"); os.IsNotExist(err) { misc.CopyFile(repman.Conf.ShareDir+"/serviceplan.csv", repman.Conf.WorkingDir+"/serviceplan.csv") } } - err = misc.ConvertCSVtoJSON(repman.Conf.WorkingDir+"/serviceplan.csv", repman.Conf.WorkingDir+"/serviceplan.json", ",") + err := misc.ConvertCSVtoJSON(repman.Conf.WorkingDir+"/serviceplan.csv", repman.Conf.WorkingDir+"/serviceplan.json", ",") if err != nil { log.Errorf("GetServicePlans ConvertCSVtoJSON %s", err) return err From 939d845aa98a7ec454357bdc666072c7a248744d Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 12 May 2021 09:58:55 +0200 Subject: [PATCH 38/84] Fixing multiple crash in service plan add cluster --- cluster/cluster_get.go | 35 ++-- cluster/cluster_set.go | 14 +- .../opensvc/moduleset_mariadb.svc.mrm.db.json | 159 +++++++++++++++++- 3 files changed, 174 insertions(+), 34 deletions(-) diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index c3d0ee05b..138d0db31 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -11,12 +11,11 @@ import ( "errors" "hash/crc32" "io/ioutil" - "net/http" "sort" "strconv" "strings" - "time" + "github.com/siddontang/go/log" "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/utils/cron" "github.com/signal18/replication-manager/utils/dbhelper" @@ -396,7 +395,15 @@ func (cluster *Cluster) GetBackupServer() *ServerMonitor { if !cluster.IsDiscovered() || len(cluster.Servers) < 1 { return nil } + //1 cluster.LogPrintf(LvlInfo, "%d ", len(cluster.Servers)) + for _, server := range cluster.Servers { + if server == nil { + return nil + } + // cluster.LogPrintf(LvlInfo, "%s ", server.State) + // cluster.LogPrintf(LvlInfo, "%t ", server.PreferedBackup) + if server.State != stateFailed && server.PreferedBackup { return server } @@ -899,36 +906,24 @@ func (cluster *Cluster) GetQueryRules() []config.QueryRule { } func (cluster *Cluster) GetServicePlans() []config.ServicePlan { + type Message struct { Rows []config.ServicePlan `json:"rows"` } var m Message - client := http.Client{ - Timeout: 300 * time.Millisecond, - } - response, err := client.Get(cluster.Conf.ProvServicePlanRegistry) + file, err := ioutil.ReadFile(cluster.Conf.WorkingDir + "/serviceplan.json") if err != nil { - cluster.LogPrintf(LvlErr, "GetServicePlans: %s %s", cluster.Conf.ProvServicePlanRegistry, err) + log.Errorf("failed opening file because: %s", err.Error()) return nil } - defer response.Body.Close() - contents, err := ioutil.ReadAll(response.Body) - if err != nil { - cluster.LogPrintf(LvlErr, "GetServicePlans: %s", err) - return nil - } - err = json.Unmarshal(contents, &m) + + err = json.Unmarshal([]byte(file), &m.Rows) if err != nil { cluster.LogPrintf(LvlErr, "GetServicePlans %s", err) return nil } - /* - r := make([]config.ServicePlan, 0, len(m.Rows)) - for _, value := range m.Rows { - r = append(r, value) - } - /*sort.Sort(QueryRuleSorter(r))*/ + return m.Rows } diff --git a/cluster/cluster_set.go b/cluster/cluster_set.go index 8e77f06c2..749cac249 100644 --- a/cluster/cluster_set.go +++ b/cluster/cluster_set.go @@ -697,12 +697,7 @@ func (cluster *Cluster) SetServicePlan(theplan string) error { if plan.Plan == theplan { cluster.LogPrintf(LvlInfo, "Attaching service plan %s", theplan) cluster.Conf.ProvServicePlan = theplan - cluster.SetDBCores(strconv.Itoa(plan.DbCores)) - cluster.SetDBMemorySize(strconv.Itoa(plan.DbMemory)) - cluster.SetDBDiskSize(strconv.Itoa(plan.DbDataSize)) - cluster.SetDBDiskIOPS(strconv.Itoa(plan.DbIops)) - cluster.SetProxyCores(strconv.Itoa(plan.PrxCores)) - cluster.SetProxyDiskSize(strconv.Itoa(plan.PrxDataSize)) + if cluster.Conf.User == "" { cluster.LogPrintf(LvlInfo, "Settting database root credential to admin:repman ") cluster.Conf.User = "admin:repman" @@ -797,7 +792,12 @@ func (cluster *Cluster) SetServicePlan(theplan string) error { } } } - + cluster.SetDBCores(strconv.Itoa(plan.DbCores)) + cluster.SetDBMemorySize(strconv.Itoa(plan.DbMemory)) + cluster.SetDBDiskSize(strconv.Itoa(plan.DbDataSize)) + cluster.SetDBDiskIOPS(strconv.Itoa(plan.DbIops)) + cluster.SetProxyCores(strconv.Itoa(plan.PrxCores)) + cluster.SetProxyDiskSize(strconv.Itoa(plan.PrxDataSize)) return nil } } diff --git a/share/opensvc/moduleset_mariadb.svc.mrm.db.json b/share/opensvc/moduleset_mariadb.svc.mrm.db.json index acf5110d5..fe35f8050 100644 --- a/share/opensvc/moduleset_mariadb.svc.mrm.db.json +++ b/share/opensvc/moduleset_mariadb.svc.mrm.db.json @@ -2201,6 +2201,44 @@ } ], "fset_name": "mariadb.optimizer.trace" + }, + { + "fset_stats": false, + "id": 406, + "filters": [ + { + "filter": { + "f_op": "=", + "f_field": "tag_name", + "f_value": "noauth", + "f_table": "v_tags", + "id": 363 + }, + "f_order": 0, + "f_log_op": "AND", + "filterset": null + } + ], + "fset_name": "mariadb.security.noauth" + }, + { + "fset_stats": false, + "id": 407, + "filters": [ + { + "filter": { + "f_op": "=", + "f_field": "tag_name", + "f_value": "interleave", + "f_table": "v_tags", + "id": 364 + }, + "f_order": 0, + "f_log_op": "AND", + "filterset": null + } + ], + "fset_name": "mariadb.memory.interleave" } ], "rulesets": [ @@ -2783,8 +2821,8 @@ { "var_author": "admin Manager", "var_class": "file", - "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/engine_myrock.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_command: INSTALL SONAME 'ha_myrocks';\\n\\n[mariadb]\\nplugin-load-add=ha_rocksdb.so\\nloose_rocksdb_max_open_files=-1\\nloose_rocksdb_base_background_compactions=1\\nloose_rocksdb_max_background_compactions=8\\nloose_rocksdb_max_total_wal_size=4G\\nloose_rocksdb_max_background_flushes=4\\nloose_rocksdb_block_size=16384\\nloose_rocksdb_block_cache_size=%%ENV:SVC_CONF_ENV_ROCKSDB_CACHE_SIZE%%M\\nloose_rocksdb_table_cache_numshardbits=6\\n\\n# rate limiter\\nloose_rocksdb_bytes_per_sync=4194304\\nloose_rocksdb_wal_bytes_per_sync=4194304\\nloose_rocksdb_rate_limiter_bytes_per_sec=104857600 #100MB/s\\n\\n# triggering compaction if there are many sequential deletes\\nloose_rocksdb_compaction_sequential_deletes_count_sd=1\\nloose_rocksdb_compaction_sequential_deletes=199999\\nloose_rocksdb_compaction_sequential_deletes_window=200000\\n\\n# read free replication\\n#rocksdb_rpl_lookup_rows=0\\n\\nloose_rocksdb_default_cf_options=write_buffer_size=128m;target_file_size_base=32m;max_bytes_for_level_base=512m;level0_file_num_compaction_trigger=4;level0_slowdown_writes_trigger=10;level0_stop_writes_trigger=15;max_write_buffer_number=4;compression_per_level=kLZ4Compression;bottommost_compression=kZSTD;compression_opts=-14:1:0;block_based_table_factory={cache_index_and_filter_blocks=1;filter_policy=bloomfilter:10:false;whole_key_filtering=0};level_compaction_dynamic_level_bytes=true;optimize_filters_for_hits=true;memtable_prefix_bloom_size_ratio=0.05;prefix_extractor=capped:12;compaction_pri=kMinOverlappingRatio\\n\\nloose_rocksdb_override_cf_options=cf_link_pk={prefix_extractor=capped:20};rev:cf_link_id1_type={prefix_extractor=capped:20}\\n \\nloose_rocksdb_allow_concurrent_memtable_write=1\\nloose_rocksdb_enable_write_thread_adaptive_yield=1\"}", - "var_updated": "2020-12-23 17:59:05", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/engine_myrock.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_command: INSTALL SONAME 'ha_myrocks';\\n\\n[mariadb]\\nplugin-load-add=ha_rocksdb.so\\nloose_rocksdb_max_open_files=-1\\nloose_rocksdb_base_background_compactions=1\\nloose_rocksdb_max_background_compactions=8\\nloose_rocksdb_max_total_wal_size=4G\\nloose_rocksdb_max_background_flushes=4\\nloose_rocksdb_block_size=16384\\nloose_rocksdb_block_cache_size=%%ENV:SVC_CONF_ENV_ROCKSDB_CACHE_SIZE%%M\\nloose_rocksdb_table_cache_numshardbits=6\\n\\n# rate limiter\\nloose_rocksdb_bytes_per_sync=4194304\\nloose_rocksdb_wal_bytes_per_sync=4194304\\nloose_rocksdb_rate_limiter_bytes_per_sec=104857600 #100MB/s\\n\\n# triggering compaction if there are many sequential deletes\\nloose_rocksdb_compaction_sequential_deletes_count_sd=1\\nloose_rocksdb_compaction_sequential_deletes=199999\\nloose_rocksdb_compaction_sequential_deletes_window=200000\\n\\n# read free replication\\n#rocksdb_rpl_lookup_rows=0\\n\\n# From benchmark \\n#rocksdb_default_cf_options=\\\"write_buffer_size=256m;target_file_size_base=32m;max_bytes_for_level_base=512m;max_write_buffer_number=4;level0_file_num_compaction_trigger=4;level0_slowdown_writes_trigger=20;level0_stop_writes_trigger=30;max_write_buffer_number=4;block_based_table_factory={cache_index_and_filter_blocks=1;filter_policy=bloomfilter:10:false;whole_key_filtering=0};level_compaction_dynamic_level_bytes=true;optimize_filters_for_hits=true;memtable_prefix_bloom_size_ratio=0.05;prefix_extractor=capped:12;compaction_pri=kMinOverlappingRatio;compression=kZSTD;bottommost_compression=kZSTD;compression_opts=-14:1:0\\\"\\n\\n\\nloose_rocksdb_default_cf_options=write_buffer_size=128m;target_file_size_base=32m;max_bytes_for_level_base=512m;level0_file_num_compaction_trigger=4;level0_slowdown_writes_trigger=10;level0_stop_writes_trigger=15;max_write_buffer_number=4;compression_per_level=kLZ4Compression;bottommost_compression=kZSTD;compression_opts=-14:1:0;block_based_table_factory={cache_index_and_filter_blocks=1;filter_policy=bloomfilter:10:false;whole_key_filtering=0};level_compaction_dynamic_level_bytes=true;optimize_filters_for_hits=true;memtable_prefix_bloom_size_ratio=0.05;prefix_extractor=capped:12;compaction_pri=kMinOverlappingRatio\\n\\nloose_rocksdb_override_cf_options=cf_link_pk={prefix_extractor=capped:20};rev:cf_link_id1_type={prefix_extractor=capped:20}\\n \\nloose_rocksdb_allow_concurrent_memtable_write=1\\nloose_rocksdb_enable_write_thread_adaptive_yield=1\"}", + "var_updated": "2021-03-17 11:38:53", "var_name": "db_cnf_engine_myrocks", "id": 5937 }, @@ -3537,7 +3575,7 @@ "var_class": "file", "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/with_rep_idempotent.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"#mariadb_command: SET GLOBAL slave_exec_mode='IDEMPOTENT';\\n#mariadb_default: SET GLOBAL slave_exec_mode='STRICT';\\n\\n#mysql_command: SET GLOBAL slave_exec_mode='IDEMPOTENT';\\n#mysql_default: SET GLOBAL slave_exec_mode='STRICT';\\n\\n[mysqld]\\nloose_slave_exec_mode=IDEMPOTENT\"}", "var_updated": "2021-01-03 22:47:04", - "var_name": "db_cnf_rep_idempotent", + "var_name": "db_cnf_rep_with_idempotent", "id": 6183 }, { @@ -3545,7 +3583,7 @@ "var_class": "file", "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/with_rep_loosy.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_command: SET GLOBAL loose_slave_type_conversions='ALL_NON_LOSSY,ALL_LOSSY';\\n# mariadb_default: SET GLOBAL loose_slave_type_conversions='ALL_NON_LOSSY';\\n\\n[mysqld]\\nloose_slave_type_conversions='ALL_NON_LOSSY,ALL_LOSSY'\"}", "var_updated": "2021-01-13 18:26:15", - "var_name": "db_cnf_rep_lossy", + "var_name": "db_cnf_rep_with_ossy", "id": 6184 }, { @@ -3785,7 +3823,7 @@ "var_class": "file", "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/no_rep_slavestatement.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_command: SET GLOBAL replicate_annotate_row_events = 0;SET GLOBAL binlog_annotate_row_events = 0;\\n# mariadb_default: SET GLOBAL replicate_annotate_row_events = 1;SET GLOBAL binlog_annotate_row_events = 1;\\n\\n# mysql_command: SET GLOBAL binlog_row_query_log_events = 0;\\n# mysql_default: SET GLOBAL binlog_row_query_log_events = 1;\\n\\n[mariadb]\\nloose_replicate_annotate_row_events = 0\\nloose_binlog_annotate_row_events = 0\\n\\n[mysqld]\\nbinlog_row_query_log_events = 0\"}", "var_updated": "2021-01-03 22:43:45", - "var_name": "db_cnf_repl_no_logslavestatement", + "var_name": "db_cnf_rep_no_logslavestatement", "id": 6257 }, { @@ -3793,7 +3831,7 @@ "var_class": "file", "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/no_rep_syncbinlog.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_command: SET GLOBAL sync_binlog = 0;\\n# mariadb_default: SET GLOBAL sync_binlog = 1;\\n\\n# mysql_command: SET GLOBAL sync_binlog = 0;\\n# mysql_default: SET GLOBAL sync_binlog = 1;\\n\\n[mysqld]\\nsync_binlog = 0\"}", "var_updated": "2021-01-03 22:44:24", - "var_name": "db_cnf_repl_no_syncbinlog", + "var_name": "db_cnf_rep_no_syncbinlog", "id": 6258 }, { @@ -3835,6 +3873,38 @@ "var_updated": "2021-02-15 10:31:28", "var_name": "db_cnf_opt_with_trace", "id": 6273 + }, + { + "var_author": "admin Manager", + "var_class": "file", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/with_rep_networkcompress.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_documantation: https://mariadb.com/kb/en/replication-and-binary-log-system-variables/#slave_compressed_protocol\\n# mariadb_version: 5.5 \\n# mariadb_command: SET GLOBAL slave_compressed_protocol=1; \\n# mariadb_default: SET GLOBAL slave_compressed_protocol=0; \\n\\n# mysql_version: 5.5\\n\\n[mysqld]\\nloose_slave_compressed_protocol=1\"}", + "var_updated": "2021-03-01 11:56:21", + "var_name": "db_cnf_rep_with_compression", + "id": 6275 + }, + { + "var_author": "admin Manager", + "var_class": "file", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/no_sec_auth.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"[mysqld]\\nskip-grant-tables\"}", + "var_updated": "2021-05-12 09:05:30", + "var_name": "db_cnf_sec_no_auth", + "id": 6276 + }, + { + "var_author": "admin Manager", + "var_class": "file", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/with_rep_rowminimal.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_command: set global binlog_format='ROW';set global binlog_row_image=MINIMAL;\\n# mariadb_default: set global binlog_format='MIXED';set global binlog_row_image=MINIMAL;\\n\\n# mysql_command: set global binlog_format='ROW';set global binlog_row_image=MINIMAL;\\n# mysql_default: set global binlog_format='MIXED';set global binlog_row_image=MINIMAL;\\n\\n[mysqld]\\nloose_binlog_format=ROW\\nloose_binlog_row_image=MINIMAL\"}", + "var_updated": "2021-05-12 09:23:07", + "var_name": "db_cnf_rep_with_formatrowmin", + "id": 6277 + }, + { + "var_author": "admin Manager", + "var_class": "file", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/with_mem_interleave.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"[mysqld]\\nloose_innodb_numa_interleave=1\"}", + "var_updated": "2021-05-12 09:20:43", + "var_name": "db_cnf_mem_with_interleave", + "id": 6278 } ], "ruleset_public": false, @@ -3950,7 +4020,10 @@ "mariadb.svc.mrm.db.cnf.generic.rep_nologslavestatement", "mariadb.svc.mrm.db.cnf.generic.rep_nosyncbinlog", "mariadb.svc.mrm.db.cnf.generic.enginearchive", - "mariadb.svc.mrm.db.cnf.generic.opt_withtrace" + "mariadb.svc.mrm.db.cnf.generic.opt_withtrace", + "mariadb.svc.mrm.db.cnf.generic.sec_noauth", + "mariadb.svc.mrm.db.cnf.generic.rep_withrowminimal", + "mariadb.svc.mrm.db.cnf.generic.mem_withinterleave" ], "publications": [ "replication-manager" @@ -6761,6 +6834,78 @@ "responsibles": [ "replication-manager" ] + }, + { + "fset_name": "mariadb.security.noauth", + "ruleset_name": "mariadb.svc.mrm.db.cnf.generic.sec_noauth", + "variables": [ + { + "var_author": "admin Manager", + "var_class": "symlink", + "var_value": "{\"symlink\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/conf.d/20_no_sec_auth.cnf\",\"target\":\"../no_sec_auth.cnf\"}", + "var_updated": "2021-05-12 09:35:40", + "var_name": "symlink_withsecnoauth", + "id": 6279 + } + ], + "ruleset_public": false, + "ruleset_type": "contextual", + "rulesets": [], + "publications": [ + "replication-manager" + ], + "id": 1148, + "responsibles": [ + "replication-manager" + ] + }, + { + "fset_name": "mariadb.memory.interleave", + "ruleset_name": "mariadb.svc.mrm.db.cnf.generic.mem_withinterleave", + "variables": [ + { + "var_author": "admin Manager", + "var_class": "symlink", + "var_value": "{\"symlink\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/conf.d/30_with_mem_interleave.cnf\",\"target\":\"../with_mem_interleave.cnf\"}", + "var_updated": "2021-05-12 09:33:17", + "var_name": "symlink_withinterleave", + "id": 6280 + } + ], + "ruleset_public": false, + "ruleset_type": "contextual", + "rulesets": [], + "publications": [ + "replication-manager" + ], + "id": 1149, + "responsibles": [ + "replication-manager" + ] + }, + { + "fset_name": "mariadb.replication.minimal", + "ruleset_name": "mariadb.svc.mrm.db.cnf.generic.rep_withrowminimal", + "variables": [ + { + "var_author": "admin Manager", + "var_class": "symlink", + "var_value": "{\"symlink\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/conf.d/30_with_rep_rowminimal.cnf\",\"target\":\"../with_rep_rowminimal.cnf\"}", + "var_updated": "2021-05-12 09:45:28", + "var_name": "symlink_withrowminimal", + "id": 6281 + } + ], + "ruleset_public": false, + "ruleset_type": "contextual", + "rulesets": [], + "publications": [ + "replication-manager" + ], + "id": 1150, + "responsibles": [ + "replication-manager" + ] } ] } From ad2cdc08230cf173f1eaa881f40b1f6f6e79db37 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 12 May 2021 18:24:34 +0200 Subject: [PATCH 39/84] More fixing to cluster add via service plan --- cluster/cluster_has.go | 44 +++++++++++++++++-------- cluster/cluster_topo.go | 12 ++++--- cluster/prx.go | 56 ++++++++++++++++---------------- cluster/prx_mariadbshardproxy.go | 45 +++++++++++++++++++++++++ 4 files changed, 110 insertions(+), 47 deletions(-) diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index 0cd5468eb..b88bb1cf6 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -118,10 +118,13 @@ func (cluster *Cluster) HasAllDbUp() bool { return false } for _, s := range cluster.Servers { - if s.State == stateFailed || s.State == stateSuspect /*&& misc.Contains(cluster.ignoreList, s.URL) == false*/ { - return false + if s != nil { + if s.State == stateFailed || s.State == stateSuspect /*&& misc.Contains(cluster.ignoreList, s.URL) == false*/ { + return false + } } } + return true } @@ -130,8 +133,10 @@ func (cluster *Cluster) HasRequestDBRestart() bool { return false } for _, s := range cluster.Servers { - if s.HasRestartCookie() { - return true + if s != nil { + if s.HasRestartCookie() { + return true + } } } return false @@ -143,8 +148,10 @@ func (cluster *Cluster) HasRequestDBRollingRestart() bool { return false } for _, s := range cluster.Servers { - if !s.HasRestartCookie() { - return false + if s != nil { + if !s.HasRestartCookie() { + return false + } } } return ret @@ -156,17 +163,22 @@ func (cluster *Cluster) HasRequestDBRollingReprov() bool { return false } for _, s := range cluster.Servers { - if !s.HasReprovCookie() { - return false + if s != nil { + if !s.HasReprovCookie() { + return false + } } } + return ret } func (cluster *Cluster) HasRequestDBReprov() bool { for _, s := range cluster.Servers { - if s.HasReprovCookie() { - return true + if s != nil { + if s.HasReprovCookie() { + return true + } } } return false @@ -174,8 +186,10 @@ func (cluster *Cluster) HasRequestDBReprov() bool { func (cluster *Cluster) HasRequestProxiesRestart() bool { for _, p := range cluster.Proxies { - if p.HasRestartCookie() { - return true + if p != nil { + if p.HasRestartCookie() { + return true + } } } return false @@ -183,8 +197,10 @@ func (cluster *Cluster) HasRequestProxiesRestart() bool { func (cluster *Cluster) HasRequestProxiesReprov() bool { for _, p := range cluster.Proxies { - if p.HasReprovCookie() { - return true + if p != nil { + if p.HasReprovCookie() { + return true + } } } return false diff --git a/cluster/cluster_topo.go b/cluster/cluster_topo.go index 208cc80e2..4c9471884 100644 --- a/cluster/cluster_topo.go +++ b/cluster/cluster_topo.go @@ -102,12 +102,14 @@ func (cluster *Cluster) AddChildServers() error { } } for _, sv := range cluster.Servers { - cluster.LogPrintf(LvlDbg, "Inter cluster multi-source check drop unlinked server %s source cluster %s vs this cluster %s ", sv.URL, sv.GetSourceClusterName(), cluster.Name) - if sv.GetSourceClusterName() != cluster.Name && sv.GetSourceClusterName() != "" { + if sv != nil { + cluster.LogPrintf(LvlDbg, "Inter cluster multi-source check drop unlinked server %s source cluster %s vs this cluster %s ", sv.URL, sv.GetSourceClusterName(), cluster.Name) + if sv.GetSourceClusterName() != cluster.Name && sv.GetSourceClusterName() != "" { - if !sv.IsSlaveOfReplicationSource(cluster.Conf.MasterConn) { - cluster.LogPrintf(LvlInfo, "Inter cluster multi-source %s drop unlinked server %s ", sv.URL, cluster.Conf.MasterConn) - cluster.RemoveServerFromIndex(cluster.GetServerIndice(sv)) + if !sv.IsSlaveOfReplicationSource(cluster.Conf.MasterConn) { + cluster.LogPrintf(LvlInfo, "Inter cluster multi-source %s drop unlinked server %s ", sv.URL, cluster.Conf.MasterConn) + cluster.RemoveServerFromIndex(cluster.GetServerIndice(sv)) + } } } } diff --git a/cluster/prx.go b/cluster/prx.go index de5c91998..8cd930bbc 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -312,40 +312,40 @@ func (cluster *Cluster) backendStateChangeProxies() { // Used to monitor proxies call by main monitor loop func (cluster *Cluster) refreshProxies(wcg *sync.WaitGroup) { defer wcg.Done() - for _, pr := range cluster.Proxies { - var err error - err = pr.Refresh() - if err == nil { - pr.SetFailCount(0) - pr.SetState(stateProxyRunning) - if pr.HasWaitStartCookie() { - pr.DelWaitStartCookie() - } - } else { - fc := pr.GetFailCount() + 1 - // TODO: Can pr.ClusterGroup be different from cluster *Cluster? code doesn't imply it. if not change to - // cl, err := pr.GetCluster() - // cl.Conf.MaxFail - if fc >= cluster.Conf.MaxFail { - if fc == cluster.Conf.MaxFail { - cluster.LogPrintf("INFO", "Declaring %s proxy as failed %s:%s %s", pr.GetType(), pr.GetHost(), pr.GetPort(), err) + if pr != nil { + var err error + err = pr.Refresh() + if err == nil { + pr.SetFailCount(0) + pr.SetState(stateProxyRunning) + if pr.HasWaitStartCookie() { + pr.DelWaitStartCookie() } - pr.SetState(stateFailed) - pr.DelWaitStopCookie() - pr.DelRestartCookie() } else { - pr.SetState(stateSuspect) + fc := pr.GetFailCount() + 1 + // TODO: Can pr.ClusterGroup be different from cluster *Cluster? code doesn't imply it. if not change to + // cl, err := pr.GetCluster() + // cl.Conf.MaxFail + if fc >= cluster.Conf.MaxFail { + if fc == cluster.Conf.MaxFail { + cluster.LogPrintf("INFO", "Declaring %s proxy as failed %s:%s %s", pr.GetType(), pr.GetHost(), pr.GetPort(), err) + } + pr.SetState(stateFailed) + pr.DelWaitStopCookie() + pr.DelRestartCookie() + } else { + pr.SetState(stateSuspect) + } + } + if pr.GetPrevState() != pr.GetState() { + pr.SetPrevState(pr.GetState()) + } + if cluster.Conf.GraphiteMetrics { + pr.SendStats() } - } - if pr.GetPrevState() != pr.GetState() { - pr.SetPrevState(pr.GetState()) - } - if cluster.Conf.GraphiteMetrics { - pr.SendStats() } } - } func (cluster *Cluster) failoverProxies() { diff --git a/cluster/prx_mariadbshardproxy.go b/cluster/prx_mariadbshardproxy.go index 711cf2ba4..ffca7bfbe 100644 --- a/cluster/prx_mariadbshardproxy.go +++ b/cluster/prx_mariadbshardproxy.go @@ -180,6 +180,51 @@ func (cluster *Cluster) CheckMdbShardServersSchema(proxy *MariadbShardProxy) { } +func (proxy *MariadbShardProxy) Refresh() error { + if proxy.ShardProxy == nil { + return errors.New("Sharding proxy no database monitor yet initialize") + } + err := proxy.ShardProxy.Refresh() + if err != nil { + + return err + } + proxy.Version = proxy.ShardProxy.Variables["VERSION"] + + proxy.BackendsWrite = nil + proxy.BackendsRead = nil + + servers, _, _ := dbhelper.GetServers(proxy.ShardProxy.Conn) + for _, s := range servers { + myport := strconv.FormatUint(uint64(s.Port), 10) + var bke = Backend{ + Host: s.Host, + Port: myport, + PrxName: s.Host + ":" + myport, + PrxStatus: "ONLINE", + PrxHostgroup: "WRITE", + } + + //PrxConnections: s.Variables, + //PrxByteIn: strconv.Itoa(proxysqlByteOut), + //PrxByteOut: strconv.Itoa(proxysqlByteIn), + //PrxLatency: strconv.Itoa(proxysqlLatency), + + proxy.BackendsWrite = append(proxy.BackendsWrite, bke) + + var bkeread = Backend{ + Host: s.Host, + Port: myport, + PrxName: s.Host + ":" + myport, + PrxStatus: "ONLINE", + PrxHostgroup: "READ", + } + proxy.BackendsRead = append(proxy.BackendsRead, bkeread) + } + proxy.ClusterGroup.CheckMdbShardServersSchema(proxy) + return nil +} + func (cluster *Cluster) refreshMdbsproxy(oldmaster *ServerMonitor, proxy *MariadbShardProxy) error { if proxy.ShardProxy == nil { return errors.New("Sharding proxy no database monitor yet initialize") From 823a36d5d02cda1f2e7ef67de0766fa5c27dc31f Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 14 May 2021 11:45:58 +0200 Subject: [PATCH 40/84] Preserve state for low frenquency of server CheckPrivileges --- cluster/srv.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cluster/srv.go b/cluster/srv.go index 0db4f0996..8253484f8 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -633,6 +633,13 @@ func (server *ServerMonitor) Refresh() error { } if server.ClusterGroup.sme.GetHeartbeats()%30 == 0 { server.CheckPrivileges() + } else { + server.ClusterGroup.sme.PreserveState("ERR00007") + server.ClusterGroup.sme.PreserveState("ERR00006") + server.ClusterGroup.sme.PreserveState("ERR00008") + server.ClusterGroup.sme.PreserveState("ERR00015") + server.ClusterGroup.sme.PreserveState("ERR00078") + server.ClusterGroup.sme.PreserveState("ERR00009") } if server.ClusterGroup.Conf.FailEventScheduler && server.IsMaster() && !server.HasEventScheduler() { server.ClusterGroup.LogPrintf(LvlInfo, "Enable Event Scheduler on master") From 697e1ebd81068e5be7c6314f516215931baadfdc Mon Sep 17 00:00:00 2001 From: svaroqui Date: Mon, 17 May 2021 15:21:38 +0200 Subject: [PATCH 41/84] Disable cross origin protocol for graph --- dashboard/static/vendor/graph.js | 2 +- etc/local/config.toml.osx | 4 ++-- etc/local/features/graphite/config.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/dashboard/static/vendor/graph.js b/dashboard/static/vendor/graph.js index 34071b70a..fd8eb7631 100755 --- a/dashboard/static/vendor/graph.js +++ b/dashboard/static/vendor/graph.js @@ -1,4 +1,4 @@ -var graphite_url = "http://"+window.location.hostname+":10002"; // enter your graphite url, e.g. http://your.graphite.com +var graphite_url = window.location.protocol+"//"+window.location.hostname+":10002"; // enter your graphite url, e.g. http://your.graphite.com var dashboards = [ diff --git a/etc/local/config.toml.osx b/etc/local/config.toml.osx index 3719fb739..eeba62021 100644 --- a/etc/local/config.toml.osx +++ b/etc/local/config.toml.osx @@ -28,7 +28,7 @@ backup-mysqldump-path = "/usr/local/bin/mysqldump" #include="etc/local/features/backup-s3" #include="etc/local/features/compat1etc/local0" #include="etc/local/features/enforce" -#include="etc/local/features/graphite" +include="etc/local/features/graphite" #include="etc/local/features/ipv6" #include="etc/local/features/scheduler" #include="etc/local/masterslave" @@ -40,7 +40,7 @@ backup-mysqldump-path = "/usr/local/bin/mysqldump" #include="etc/local/masterslaveslave" #include="etc/local/multimaster" #include="etc/local/multimasterring" -include="etc/local/multisource" +#include="etc/local/multisource" #include="etc/local/multitiersalve" #include="etc/local/mutlitiercluster" #include="etc/local/postgres" diff --git a/etc/local/features/graphite/config.toml b/etc/local/features/graphite/config.toml index 4b30586c3..2abe3a984 100644 --- a/etc/local/features/graphite/config.toml +++ b/etc/local/features/graphite/config.toml @@ -17,9 +17,9 @@ haproxy-read-port = 3302 [Default] graphite-metrics = true +graphite-embedded = true graphite-carbon-host = "127.0.0.1" graphite-carbon-port = 2003 -graphite-embedded = true graphite-carbon-api-port = 10002 graphite-carbon-server-port = 10003 graphite-carbon-link-port = 7002 From e4d287c7e19158bef49df6bbdebe07e711c859d1 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Thu, 20 May 2021 10:39:49 +0200 Subject: [PATCH 42/84] Add splitredolog tag in configurator Fixing typo for innodbpurgethread in config increase innodb log size to half the bufferpool --- cluster/cluster_get.go | 4 +- .../opensvc/moduleset_mariadb.svc.mrm.db.json | 124 +++++++++++++----- 2 files changed, 90 insertions(+), 38 deletions(-) diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index 138d0db31..7ebe06b7f 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -838,7 +838,7 @@ func (cluster *Cluster) GetConfigInnoDBLogFileSize() string { if err != nil { return "1024" } - value = value / 10 + value = value / 2 if value < valuemin { value = valuemin } @@ -893,7 +893,7 @@ func (cluster *Cluster) GetConfigInnoDBReadIoThreads() string { } func (cluster *Cluster) GetConfigInnoDBPurgeThreads() string { - return cluster.GetConfigInnoDBWriteIoThreads() + return "4" } func (cluster *Cluster) GetQueryRules() []config.QueryRule { diff --git a/share/opensvc/moduleset_mariadb.svc.mrm.db.json b/share/opensvc/moduleset_mariadb.svc.mrm.db.json index fe35f8050..94bd78ae1 100644 --- a/share/opensvc/moduleset_mariadb.svc.mrm.db.json +++ b/share/opensvc/moduleset_mariadb.svc.mrm.db.json @@ -2239,6 +2239,25 @@ } ], "fset_name": "mariadb.memory.interleave" + }, + { + "fset_stats": false, + "id": 408, + "filters": [ + { + "filter": { + "f_op": "=", + "f_field": "tag_name", + "f_value": "splitundolog", + "f_table": "v_tags", + "id": 365 + }, + "f_order": 0, + "f_log_op": "AND", + "filterset": null + } + ], + "fset_name": "mariadb.disk.splitundolog" } ], "rulesets": [ @@ -2251,8 +2270,8 @@ "rulesets": [ "mariadb.svc.env", "mariadb.svc.mrm.db.cnf.docker", - "mariadb.svc.mrm.db.cnf.generic", - "mariadb.svc.mrm.db.cnf.package" + "mariadb.svc.mrm.db.cnf.package", + "mariadb.svc.mrm.db.cnf.generic" ], "publications": [ "replication-manager" @@ -2677,8 +2696,8 @@ { "var_author": "admin Manager", "var_class": "file", - "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/engine_innodb.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# INNODB\\n# ------\\n# mariadb_command: SET GLOBAL innodb_adaptive_hash_index=0;SET GLOBAL innodb_buffer_pool_size = %%ENV:SVC_CONF_ENV_MAX_MEM%%*1024*1024;SET GLOBAL innodb_io_capacity = %%ENV:CHECKPOINTIOPS%%;SET GLOBAL innodb_io_capacity_max = %%ENV:SVC_CONF_ENV_MAX_IOPS%%;SET GLOBAL innodb_max_dirty_pages_pct = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT%%;SET GLOBAL innodb_max_dirty_pages_pct_lwm = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT_LWM%%;SET GLOBAL innodb_flush_neighbors = 1;EXECUTE IMMEDIATE IF(@@innodb_buffer_pool_instances=%%ENV:IBPINSTANCES%%,\\\"SELECT 'Unchanged innodb_buffer_pool_instances'\\\", \\\"SET GLOBAL innodb_buffer_pool_instances =%%ENV:IBPINSTANCES%%\\\"); EXECUTE IMMEDIATE IF(@@innodb_read_io_threads=%%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%,\\\"SELECT 'Unchanged innodb_read_io_threads'\\\", \\\"SET GLOBAL innodb_read_io_threads = %%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%\\\");EXECUTE IMMEDIATE IF(@@innodb_write_io_threads=%%ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%,\\\"SELECT 'Unchanged innodb_write_io_threads'\\\", \\\"SET GLOBAL innodb_write_io_threads = %%ENV:ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%\\\");EXECUTE IMMEDIATE IF(@@innodb_log_buffer_size=%%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%*1024*1024,\\\"SELECT 'Unchanged innodb_log_buffer_size'\\\", \\\"SET GLOBAL innodb_log_buffer_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%*1024*1024\\\");SET GLOBAL innodb_thread_concurrency = 0;EXECUTE IMMEDIATE IF(@@innodb_autoinc_lock_mode=2,\\\"SELECT 'Unchanged innodb_autoinc_lock_mode'\\\", \\\"SET GLOBAL innodb_autoinc_lock_mode=2\\\");SET GLOBAL innodb_stats_on_metadata = 0;SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 1;EXECUTE IMMEDIATE IF(@@innodb_buffer_pool_load_at_startup=1,\\\"SELECT 'Unchanged innodb_buffer_pool_load_at_startup'\\\", \\\"SET GLOBAL innodb_buffer_pool_load_at_startup=1\\\");EXECUTE IMMEDIATE IF(@@innodb_open_files=16384,\\\"SELECT 'Unchanged innodb_open_files'\\\", \\\"SET GLOBAL innodb_open_files=16384\\\");EXECUTE IMMEDIATE IF(@@innodb_purge_threads=%%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%,\\\"SELECT 'Unchanged innodb_purge_threads'\\\", \\\"SET GLOBAL innodb_purge_threads = %%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%\\\");SET GLOBAL innodb_concurrency_tickets = 5000;SET GLOBAL innodb_print_all_deadlocks = 1; EXECUTE IMMEDIATE IF(@@innodb_large_prefix=1 OR @@innodb_large_prefix IS NULL,\\\"SELECT 'Unchanged innodb_large_prefix'\\\", \\\"SET GLOBAL innodb_large_prefix = 1\\\"); EXECUTE IMMEDIATE IF(@@innodb_file_format='barracuda' OR @@innodb_file_format IS NULL,\\\"SELECT 'Unchanged barracuda'\\\", \\\"SET GLOBAL innodb_file_format='barracuda'\\\");SET GLOBAL innodb_default_row_format=dynamic;\\n \\n[mysqld-5.5]\\ninnodb_file_format = barracuda\\n[mysqld-5.6]\\ninnodb_file_format = barracuda\\n[mysqld-5.7]\\ninnodb_file_format = barracuda \\n#deprecated in 5.8\\n\\n[mariadb]\\n#innodb_buffer_pool_populate=0 \\n[mariadb 5.5]\\ninnodb_file_format = barracuda\\n[mariadb 10.0]\\ninnodb_file_format = barracuda\\n \\n[mariadb 10.1]\\ninnodb_force_primary_key = 1\\ninnodb_default_row_format=dynamic\\n#innodb_defragment = 1\\ninnodb_file_format = barracuda\\n\\n[mariadb 10.2]\\ninnodb_lock_schedule_algorithm=fcfs\\ninnodb_file_format = barracuda\\n\\n[mariadb 10.3]\\ninnodb-page-cleaners= %%ENV:NODES_CPU_CORES%%\\n\\n[mysqld]\\ntransaction-isolation = READ-COMMITTED\\ndefault_storage_engine = InnoDB\\ninnodb_buffer_pool_size = %%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_SIZE%%M\\ninnodb_buffer_pool_instances = %%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_INSTANCES%%\\ninnodb_read_io_threads = %%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%\\ninnodb_write_io_threads = %%ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%\\ninnodb_io_capacity = %%ENV:CHECKPOINTIOPS%%\\ninnodb_io_capacity_max = %%ENV:SVC_CONF_ENV_MAX_IOPS%%\\ninnodb_max_dirty_pages_pct = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT%%\\ninnodb_max_dirty_pages_pct_lwm = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT_LWM%%\\ninnodb_log_file_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_FILE_SIZE%%M\\n# innodb_log_buffer_size change Crash 10.2.14\\ninnodb_log_buffer_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%M\\nloose_innodb_purge_threads = ENV:%%SVC_CONF_ENV_INNODB_PURGE_THREADS%%\\ninnodb_adaptive_hash_index = 0\\ninnodb_file_per_table= 1\\ninnodb_log_files_in_group = 1\\ninnodb_flush_neighbors = 1\\ninnodb_thread_concurrency = 0\\ninnodb_flush_method = O_DIRECT\\n#innodb_undo_tablespaces = 4\\ninnodb_autoinc_lock_mode = 2\\ninnodb_stats_on_metadata = 0\\ninnodb_buffer_pool_dump_at_shutdown = 1\\ninnodb_buffer_pool_load_at_startup = 1\\ninnodb_open_files = 16384\\ninnodb_concurrency_tickets = 5000\\ninnodb_print_all_deadlocks = 1 \\n# innodb_monitor_enable = all // cost perf \\n\\n[mariadb 10.4]\\ninnodb_large_prefix = 1\\ninnodb_file_format = barracuda\\ninnodb_default_row_format=dynamic\\n\"}", - "var_updated": "2021-01-15 15:44:23", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/engine_innodb.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# INNODB\\n# ------\\n# mariadb_command: SET GLOBAL innodb_adaptive_hash_index=0;SET GLOBAL innodb_buffer_pool_size = %%ENV:SVC_CONF_ENV_MAX_MEM%%*1024*1024;SET GLOBAL innodb_io_capacity = %%ENV:CHECKPOINTIOPS%%;SET GLOBAL innodb_io_capacity_max = %%ENV:SVC_CONF_ENV_MAX_IOPS%%;SET GLOBAL innodb_max_dirty_pages_pct = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT%%;SET GLOBAL innodb_max_dirty_pages_pct_lwm = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT_LWM%%;SET GLOBAL innodb_flush_neighbors = 1;EXECUTE IMMEDIATE IF(@@innodb_buffer_pool_instances=%%ENV:IBPINSTANCES%%,\\\"SELECT 'Unchanged innodb_buffer_pool_instances'\\\", \\\"SET GLOBAL innodb_buffer_pool_instances =%%ENV:IBPINSTANCES%%\\\"); EXECUTE IMMEDIATE IF(@@innodb_read_io_threads=%%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%,\\\"SELECT 'Unchanged innodb_read_io_threads'\\\", \\\"SET GLOBAL innodb_read_io_threads = %%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%\\\");EXECUTE IMMEDIATE IF(@@innodb_write_io_threads=%%ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%,\\\"SELECT 'Unchanged innodb_write_io_threads'\\\", \\\"SET GLOBAL innodb_write_io_threads = %%ENV:ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%\\\");EXECUTE IMMEDIATE IF(@@innodb_log_buffer_size=%%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%*1024*1024,\\\"SELECT 'Unchanged innodb_log_buffer_size'\\\", \\\"SET GLOBAL innodb_log_buffer_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%*1024*1024\\\");SET GLOBAL innodb_thread_concurrency = 0;EXECUTE IMMEDIATE IF(@@innodb_autoinc_lock_mode=2,\\\"SELECT 'Unchanged innodb_autoinc_lock_mode'\\\", \\\"SET GLOBAL innodb_autoinc_lock_mode=2\\\");SET GLOBAL innodb_stats_on_metadata = 0;SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 1;EXECUTE IMMEDIATE IF(@@innodb_buffer_pool_load_at_startup=1,\\\"SELECT 'Unchanged innodb_buffer_pool_load_at_startup'\\\", \\\"SET GLOBAL innodb_buffer_pool_load_at_startup=1\\\");EXECUTE IMMEDIATE IF(@@innodb_open_files=16384,\\\"SELECT 'Unchanged innodb_open_files'\\\", \\\"SET GLOBAL innodb_open_files=16384\\\");EXECUTE IMMEDIATE IF(@@innodb_purge_threads=%%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%,\\\"SELECT 'Unchanged innodb_purge_threads'\\\", \\\"SET GLOBAL innodb_purge_threads = %%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%\\\");SET GLOBAL innodb_concurrency_tickets = 5000;SET GLOBAL innodb_print_all_deadlocks = 1; EXECUTE IMMEDIATE IF(@@innodb_large_prefix=1 OR @@innodb_large_prefix IS NULL,\\\"SELECT 'Unchanged innodb_large_prefix'\\\", \\\"SET GLOBAL innodb_large_prefix = 1\\\"); EXECUTE IMMEDIATE IF(@@innodb_file_format='barracuda' OR @@innodb_file_format IS NULL,\\\"SELECT 'Unchanged barracuda'\\\", \\\"SET GLOBAL innodb_file_format='barracuda'\\\");SET GLOBAL innodb_default_row_format=dynamic;\\n \\n[mysqld-5.5]\\ninnodb_file_format = barracuda\\n[mysqld-5.6]\\ninnodb_file_format = barracuda\\n[mysqld-5.7]\\ninnodb_file_format = barracuda \\n#deprecated in 5.8\\n\\n[mariadb]\\n#innodb_buffer_pool_populate=0 \\n[mariadb 5.5]\\ninnodb_file_format = barracuda\\n[mariadb 10.0]\\ninnodb_file_format = barracuda\\n \\n[mariadb 10.1]\\ninnodb_force_primary_key = 1\\ninnodb_default_row_format=dynamic\\n#innodb_defragment = 1\\ninnodb_file_format = barracuda\\n\\n[mariadb 10.2]\\ninnodb_lock_schedule_algorithm=fcfs\\ninnodb_file_format = barracuda\\n\\n[mariadb 10.3]\\ninnodb-page-cleaners= %%ENV:NODES_CPU_CORES%%\\n\\n[mysqld]\\ntransaction-isolation = READ-COMMITTED\\ndefault_storage_engine = InnoDB\\ninnodb_buffer_pool_size = %%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_SIZE%%M\\ninnodb_buffer_pool_instances = %%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_INSTANCES%%\\ninnodb_read_io_threads = %%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%\\ninnodb_write_io_threads = %%ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%\\ninnodb_io_capacity = %%ENV:CHECKPOINTIOPS%%\\ninnodb_io_capacity_max = %%ENV:SVC_CONF_ENV_MAX_IOPS%%\\ninnodb_max_dirty_pages_pct = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT%%\\ninnodb_max_dirty_pages_pct_lwm = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT_LWM%%\\ninnodb_log_file_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_FILE_SIZE%%M\\n# innodb_log_buffer_size change Crash 10.2.14\\ninnodb_log_buffer_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%M\\nloose_innodb_purge_threads = %%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%\\ninnodb_adaptive_hash_index = 0\\ninnodb_file_per_table= 1\\ninnodb_log_files_in_group = 1\\ninnodb_flush_neighbors = 1\\ninnodb_thread_concurrency = 0\\ninnodb_flush_method = O_DIRECT\\n#innodb_undo_tablespaces = 4\\ninnodb_autoinc_lock_mode = 2\\ninnodb_stats_on_metadata = 0\\ninnodb_buffer_pool_dump_at_shutdown = 1\\ninnodb_buffer_pool_load_at_startup = 1\\ninnodb_open_files = 16384\\ninnodb_concurrency_tickets = 5000\\ninnodb_print_all_deadlocks = 1 \\n# innodb_monitor_enable = all // cost perf \\n\\n[mariadb 10.4]\\ninnodb_large_prefix = 1\\ninnodb_file_format = barracuda\\ninnodb_default_row_format=dynamic\\n\"}", + "var_updated": "2021-05-20 10:26:25", "var_name": "db_cnf_engine_innodb", "id": 5917 }, @@ -2861,8 +2880,8 @@ { "var_author": "admin Manager", "var_class": "file", - "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/no_disk_doublewrite.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_version: 5.0\\n# mariadb_documentation: https://mariadb.com/kb/en/innodb-doublewrite-buffer/\\n# mariadb_command: set global innodb_doublewrite=0;\\n# mariadb_default: set global innodb_doublewrite=1;\\n\\n# mariadb_command_since: \\n# mysql_version: 5.0\\n# mysql_command: set global innodb_doublewrite=0;\\n# mysql_default: set global innodb_doublewrite=1;\\n# mysql_command_since: \\n# mariadb_command: SET GLOBAL innodb_doublewrite = 0;\\n# mysql_command: SET GLOBAL innodb_doublewrite = 0;\\n\\n[mysqld]\\ninnodb_doublewrite=0\\ninnodb_checksum_algorithm = none\"}", - "var_updated": "2021-01-02 09:58:26", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/no_disk_doublewrite.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_version: 5.0\\n# mariadb_documentation: https://mariadb.com/kb/en/innodb-doublewrite-buffer/\\n# mariadb_command: set global innodb_doublewrite=0;\\n# mariadb_default: set global innodb_doublewrite=1;\\n\\n# mariadb_command_since: \\n# mysql_version: 5.0\\n# mysql_command: set global innodb_doublewrite=0;\\n# mysql_default: set global innodb_doublewrite=1;\\n# mysql_command_since: \\n# mariadb_command: SET GLOBAL innodb_doublewrite = 0;\\n# mysql_command: SET GLOBAL innodb_doublewrite = 0;\\n\\n[mysqld]\\ninnodb_doublewrite=0\\nloose_innodb_checksum_algorithm = none\"}", + "var_updated": "2021-05-18 19:05:37", "var_name": "db_cnf_disk_no_doublewrite", "id": 5942 }, @@ -3037,8 +3056,8 @@ { "var_author": "admin Manager", "var_class": "file", - "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/with_sec_keyfileencrypt.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"[mariadb]\\n# File Key Management\\nplugin-load-add = file_key_management\\nloose_file_key_management_filename = %%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/ssl/keys\\nloose_file-key-management-encryption-algorithm = aes_ctr\\n# InnoDB encryption setup\\nloose_innodb-encrypt-tables=ON\\nloose_innodb-encrypt-log=ON\\nloose_innodb-encryption-rotate-key-age=1024\\nloose_innodb-encryption-threads=4\\nloose_innodb-tablespaces-encryption\\n\\nloose_encrypt_tmp_files = ON\\nloose_encrypt_binlog = ON\\nloose_encrypt_tmp_disk_tables = 1\\n\\n\"}", - "var_updated": "2021-02-06 08:54:16", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/with_sec_keyfileencrypt.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_alert:10.5.9 MDEV-25121\\n\\n[mariadb]\\n# File Key Management\\nplugin-load-add = file_key_management\\nloose_file_key_management_filename = %%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/ssl/keys\\nloose_file-key-management-encryption-algorithm = aes_ctr\\n# InnoDB encryption setup\\nloose_innodb-encrypt-tables=ON\\nloose_innodb-encrypt-log=ON\\nloose_innodb-encryption-rotate-key-age=1024\\nloose_innodb-encryption-threads=4\\nloose_innodb-tablespaces-encryption\\n\\nloose_encrypt_tmp_files = ON\\nloose_encrypt_binlog = ON\\nloose_encrypt_tmp_disk_tables = 1\\n\\n\"}", + "var_updated": "2021-05-19 19:18:31", "var_name": "db_cnf_sec_with_keyfileencrypt", "id": 6013 }, @@ -3905,26 +3924,34 @@ "var_updated": "2021-05-12 09:20:43", "var_name": "db_cnf_mem_with_interleave", "id": 6278 + }, + { + "var_author": "admin Manager", + "var_class": "file", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/with_disk_splitundolog.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_documentation: https://mariadb.com/kb/en/innodb-system-variables/#innodb_undo_log_truncate\\n# mariadb_documentation: https://mariadb.com/kb/en/innodb-system-variables/#innodb_rollback_segments\\n# mariadb_documentation: https://mysqlonarm.github.io/2x-mysql-performance/\\n\\n[mysqld]\\nloose_innodb_undo_tablespaces=64\\nloose_innodb_undo_log_truncate=1\"}", + "var_updated": "2021-05-20 10:19:15", + "var_name": "db_cnf_disk_splitundo", + "id": 6282 } ], "ruleset_public": false, "ruleset_type": "explicit", "rulesets": [ - "mariadb.svc.mrm.db.cnf.generic.disk_withinnodbpagecompression", - "mariadb.svc.mrm.db.cnf.generic.withlogaudit", + "mariadb.svc.mrm.db.cnf.generic.rep_withsemisync", "mariadb.svc.mrm.db.cnf.generic.engineinnodb", + "mariadb.svc.mrm.db.cnf.generic.ent_withthreadpool", "mariadb.svc.mrm.db.cnf.generic.net_noquerycache", - "mariadb.svc.mrm.db.cnf.generic.rep_withsemisync", + "mariadb.svc.mrm.db.cnf.generic.disk_withinnodbpagecompression", + "mariadb.svc.mrm.db.cnf.generic.enginespider", + "mariadb.svc.mrm.db.cnf.generic.enginetokudb", + "mariadb.svc.mrm.db.cnf.generic.enginerocksdb", "mariadb.svc.mrm.db.cnf.generic.withloggeneral", "mariadb.svc.mrm.db.cnf.generic.withlogmetadatalock", + "mariadb.svc.mrm.db.cnf.generic.withlogaudit", "mariadb.svc.mrm.db.cnf.generic.withlogsqlerror", - "mariadb.svc.mrm.db.cnf.generic.enginerocksdb", - "mariadb.svc.mrm.db.cnf.generic.enginespider", - "mariadb.svc.mrm.db.cnf.generic.ent_withthreadpool", - "mariadb.svc.mrm.db.cnf.generic.enginetokudb", "mariadb.svc.mrm.db.cnf.generic.rep_withmultidomains", - "mariadb.svc.mrm.db.cnf.generic.rep_nologslaveupdates", "mariadb.svc.mrm.db.cnf.generic.rep_withmysqlgtid", + "mariadb.svc.mrm.db.cnf.generic.rep_nologslaveupdates", "mariadb.svc.mrm.db.cnf.generic.disk_withsmallredolog", "mariadb.svc.mrm.db.cnf.generic.rep_withwsrep", "mariadb.svc.mrm.db.cnf.generic.opt_noautocommit", @@ -3941,19 +3968,20 @@ "mariadb.svc.mrm.db.cnf.generic.opt_witheits", "mariadb.svc.mrm.db.cnf.generic.disk_noodirect", "mariadb.svc.mrm.db.cnf.generic.disk_withdefrag", - "mariadb.svc.mrm.db.cnf.generic.sec_withcheckpwdsimple", - "mariadb.svc.mrm.db.cnf.generic.sec_withcheckpwdcracklib", + "mariadb.svc.mrm.db.cnf.generic.engineblackhole", "mariadb.svc.mrm.db.cnf.generic.engineoqgraph", "mariadb.svc.mrm.db.cnf.generic.engineconnect", - "mariadb.svc.mrm.db.cnf.generic.engineblackhole", + "mariadb.svc.mrm.db.cnf.generic.sec_withcheckpwdsimple", + "mariadb.svc.mrm.db.cnf.generic.sec_withcheckpwdcracklib", "mariadb.svc.mrm.db.cnf.generic.disk_withssd", "mariadb.svc.mrm.db.cnf.generic.disk_withzfs", "mariadb.svc.mrm.db.cnf.generic.sec_withkeyfileencrypt", - "mariadb.svc.mrm.db.cnf.generic.net_withproxyprotocol", "mariadb.svc.mrm.db.cnf.generic.net_withresolvdns", + "mariadb.svc.mrm.db.cnf.generic.net_withproxyprotocol", "mariadb.svc.mrm.db.cnf.generic.withlogtable", "mariadb.svc.mrm.db.cnf.generic.disk_withdiskmonitor", "mariadb.svc.mrm.db.cnf.generic.sec_withssl", + "mariadb.svc.mrm.db.cnf.generic.charset_nohandshake", "mariadb.svc.mrm.db.cnf.generic.rep_withgtidstrict", "mariadb.svc.mrm.db.cnf.generic.sec_withlocalinfile", "mariadb.svc.mrm.db.cnf.generic.opt_noicp", @@ -3965,41 +3993,40 @@ "mariadb.svc.mrm.db.cnf.generic.withcharsetmb4cs", "mariadb.svc.mrm.db.cnf.generic.withcharsetutf8cs", "mariadb.svc.mrm.db.cnf.generic.withcharsetutf8ci", - "mariadb.svc.mrm.db.cnf.generic.opt_withnestedjoin", "mariadb.svc.mrm.db.cnf.generic.opt_withhashjoin", - "mariadb.svc.mrm.db.cnf.generic.opt_withextendedkeys", - "mariadb.svc.mrm.db.cnf.generic.opt_withsubquerycache", + "mariadb.svc.mrm.db.cnf.generic.opt_withnestedjoin", "mariadb.svc.mrm.db.cnf.generic.opt_withfirstmatch", - "mariadb.svc.mrm.db.cnf.generic.opt_withloosescan", "mariadb.svc.mrm.db.cnf.generic.opt_withsemijoincache", + "mariadb.svc.mrm.db.cnf.generic.opt_withloosescan", + "mariadb.svc.mrm.db.cnf.generic.opt_withextendedkeys", "mariadb.svc.mrm.db.cnf.generic.opt_noderivedmerge", + "mariadb.svc.mrm.db.cnf.generic.opt_notableelimination", + "mariadb.svc.mrm.db.cnf.generic.opt_nosemijoin", "mariadb.svc.mrm.db.cnf.generic.opt_noderivedwithkeys", "mariadb.svc.mrm.db.cnf.generic.opt_nointoexists", "mariadb.svc.mrm.db.cnf.generic.opt_nomaterialization", "mariadb.svc.mrm.db.cnf.generic.opt_nomrr", "mariadb.svc.mrm.db.cnf.generic.opt_noouterjoincache", - "mariadb.svc.mrm.db.cnf.generic.opt_nosemijoin", - "mariadb.svc.mrm.db.cnf.generic.opt_notableelimination", + "mariadb.svc.mrm.db.cnf.generic.opt_withsubquerycache", "mariadb.svc.mrm.db.cnf.generic.engines3", - "mariadb.svc.mrm.db.cnf.generic.enginemroonga", "mariadb.svc.mrm.db.cnf.generic.opt_withutctime", - "mariadb.svc.mrm.db.cnf.generic.charset_nohandshake", + "mariadb.svc.mrm.db.cnf.generic.enginemroonga", "mariadb.svc.mrm.db.cnf.generic.opt_withreadcommitted", - "mariadb.svc.mrm.db.cnf.generic.rep_withstatement", + "mariadb.svc.mrm.db.cnf.generic.rep_withlossyconv", "mariadb.svc.mrm.db.cnf.generic.rep_withidempotent", "mariadb.svc.mrm.db.cnf.generic.rep_withrow", - "mariadb.svc.mrm.db.cnf.generic.rep_withlossyconv", + "mariadb.svc.mrm.db.cnf.generic.rep_withstatement", "mariadb.svc.mrm.db.cnf.generic.enginemcs", "mariadb.svc.mrm.db.cnf.generic.sec_withed25519", "mariadb.svc.mrm.db.cnf.generic.opt_withsequence", - "mariadb.svc.mrm.db.cnf.generic.opt_nodcp", "mariadb.svc.mrm.db.cnf.generic.opt_noecp", + "mariadb.svc.mrm.db.cnf.generic.opt_noscp", "mariadb.svc.mrm.db.cnf.generic.opt_nohcp", + "mariadb.svc.mrm.db.cnf.generic.opt_nodcp", "mariadb.svc.mrm.db.cnf.generic.opt_nointosubquery", "mariadb.svc.mrm.db.cnf.generic.opt_nolateralderived", - "mariadb.svc.mrm.db.cnf.generic.opt_noscp", - "mariadb.svc.mrm.db.cnf.generic.opt_nobloomfilter", "mariadb.svc.mrm.db.cnf.generic.opt_withimsi", + "mariadb.svc.mrm.db.cnf.generic.opt_nobloomfilter", "mariadb.svc.mrm.db.cnf.generic.rep_withtrustfunctioncreators", "mariadb.svc.mrm.db.cnf.generic.net_nobind", "mariadb.svc.mrm.db.cnf.generic.disk_withrelayspacelimit", @@ -4008,13 +4035,13 @@ "mariadb.svc.mrm.db.cnf.generic.rep_withminimal", "mariadb.svc.mrm.db.cnf.generic.disk_noadaptativeflush", "mariadb.svc.mrm.db.cnf.generic.opt_withhistograms", + "mariadb.svc.mrm.db.cnf.generic.opt_withreadsnapshot", "mariadb.svc.mrm.db.cnf.generic.opt_withreaddirty", "mariadb.svc.mrm.db.cnf.generic.opt_withreadserialized", - "mariadb.svc.mrm.db.cnf.generic.opt_withreadsnapshot", "mariadb.svc.mrm.db.cnf.generic.rep_withaggressive", "mariadb.svc.mrm.db.cnf.generic.log_nodeadlocks", - "mariadb.svc.mrm.db.cnf.generic.log_noslowddlqueries", "mariadb.svc.mrm.db.cnf.generic.log_noslowqueries", + "mariadb.svc.mrm.db.cnf.generic.log_noslowddlqueries", "mariadb.svc.mrm.db.cnf.generic.log_noslowslavequeries", "mariadb.svc.mrm.db.cnf.generic.log_nowarnings", "mariadb.svc.mrm.db.cnf.generic.rep_nologslavestatement", @@ -4022,8 +4049,9 @@ "mariadb.svc.mrm.db.cnf.generic.enginearchive", "mariadb.svc.mrm.db.cnf.generic.opt_withtrace", "mariadb.svc.mrm.db.cnf.generic.sec_noauth", + "mariadb.svc.mrm.db.cnf.generic.mem_withinterleave", "mariadb.svc.mrm.db.cnf.generic.rep_withrowminimal", - "mariadb.svc.mrm.db.cnf.generic.mem_withinterleave" + "mariadb.svc.mrm.db.cnf.generic.disk_withsplitundolog" ], "publications": [ "replication-manager" @@ -6906,6 +6934,30 @@ "responsibles": [ "replication-manager" ] + }, + { + "fset_name": "mariadb.disk.splitundolog", + "ruleset_name": "mariadb.svc.mrm.db.cnf.generic.disk_withsplitundolog", + "variables": [ + { + "var_author": "admin Manager", + "var_class": "symlink", + "var_value": "{\"symlink\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/conf.d/50_with_disk_splitundolog.cnf\",\"target\":\"../with_disk_splitundolog.cnf\"}", + "var_updated": "2021-05-20 10:23:08", + "var_name": "db_link_spllitundolog", + "id": 6283 + } + ], + "ruleset_public": false, + "ruleset_type": "contextual", + "rulesets": [], + "publications": [ + "replication-manager" + ], + "id": 1151, + "responsibles": [ + "replication-manager" + ] } ] } From 64934da68c8cf89a2130f0e4690f390503e72785 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 2 Jun 2021 14:45:03 +0200 Subject: [PATCH 43/84] Rig the election for switchover in case of multiple prefered master --- cluster/cluster_fail.go | 2 +- cluster/cluster_set.go | 1 + etc/local/config.toml.osx | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cluster/cluster_fail.go b/cluster/cluster_fail.go index 8290413a4..27b144ac8 100644 --- a/cluster/cluster_fail.go +++ b/cluster/cluster_fail.go @@ -689,7 +689,7 @@ func (cluster *Cluster) electSwitchoverCandidate(l []*ServerMonitor, forcingLog } /* Rig the election if the examined slave is preferred candidate master in switchover */ - if sl.URL == cluster.Conf.PrefMaster { + if cluster.IsInPreferedHosts(sl) { if (cluster.Conf.LogLevel > 1 || forcingLog) && cluster.IsInFailover() { cluster.LogPrintf(LvlDbg, "Election rig: %s elected as preferred master", sl.URL) } diff --git a/cluster/cluster_set.go b/cluster/cluster_set.go index 749cac249..7423d74c2 100644 --- a/cluster/cluster_set.go +++ b/cluster/cluster_set.go @@ -318,6 +318,7 @@ func (cluster *Cluster) SetBenchMethod(m string) { cluster.benchmarkType = m } +// SetPrefMaster is used by regtest test_switchover_semisync_switchback_prefmaster_norplcheck func (cluster *Cluster) SetPrefMaster(PrefMaster string) { for _, srv := range cluster.Servers { if srv.URL == PrefMaster || srv.Name == PrefMaster { diff --git a/etc/local/config.toml.osx b/etc/local/config.toml.osx index eeba62021..56695e4f0 100644 --- a/etc/local/config.toml.osx +++ b/etc/local/config.toml.osx @@ -28,12 +28,12 @@ backup-mysqldump-path = "/usr/local/bin/mysqldump" #include="etc/local/features/backup-s3" #include="etc/local/features/compat1etc/local0" #include="etc/local/features/enforce" -include="etc/local/features/graphite" +#include="etc/local/features/graphite" #include="etc/local/features/ipv6" #include="etc/local/features/scheduler" #include="etc/local/masterslave" #include="etc/local/masterslave/consul" -##include="etc/local/masterslave/haproxy" +include="etc/local/masterslave/haproxy" #include="etc/local/masterslave/maxscale" #include="etc/local/masterslave/myproxy" #include="etc/local/masterslave/shardproxy" From a35dce0839d6ba5d1c26fc9398fd452d0a9932e0 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Thu, 3 Jun 2021 12:24:11 +0200 Subject: [PATCH 44/84] Fixing switchover with multiple prefered master #345 --- cluster/cluster_fail.go | 18 ++++++++------- cluster/cluster_get.go | 4 ++-- cluster/cluster_set.go | 6 ++--- cluster/cluster_topo.go | 2 +- config/config.go | 1 + etc/local/config.toml.osx | 3 ++- etc/local/features/prefered/default.toml | 28 ++++++++++++++++++++++++ main.go | 1 + server/api_cluster.go | 2 +- 9 files changed, 49 insertions(+), 16 deletions(-) create mode 100644 etc/local/features/prefered/default.toml diff --git a/cluster/cluster_fail.go b/cluster/cluster_fail.go index 27b144ac8..63137a121 100644 --- a/cluster/cluster_fail.go +++ b/cluster/cluster_fail.go @@ -569,14 +569,6 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { // if consul or internal proxy need to adapt read only route to new slaves cluster.backendStateChangeProxies() - if fail == true && cluster.Conf.PrefMaster != cluster.oldMaster.URL && cluster.master.URL != cluster.Conf.PrefMaster && cluster.Conf.PrefMaster != "" { - prm := cluster.foundPreferedMaster(cluster.slaves) - if prm != nil { - cluster.LogPrintf(LvlInfo, "Not on Preferred Master after failover") - cluster.MasterFailover(false) - } - } - cluster.LogPrintf(LvlInfo, "Master switch on %s complete", cluster.master.URL) cluster.master.FailCount = 0 if fail == true { @@ -584,6 +576,16 @@ func (cluster *Cluster) MasterFailover(fail bool) bool { cluster.FailoverTs = time.Now().Unix() } cluster.sme.RemoveFailoverState() + + // Not a prefered master this code is not default + if cluster.Conf.FailoverSwitchToPrefered && fail == true && cluster.Conf.PrefMaster != "" && !cluster.master.IsPrefered() { + prm := cluster.foundPreferedMaster(cluster.slaves) + if prm != nil { + cluster.LogPrintf(LvlInfo, "Switchover after failover not on a prefered leader after failover") + cluster.MasterFailover(false) + } + } + return true } diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index 7ebe06b7f..96f73977b 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -251,7 +251,7 @@ func (cluster *Cluster) GetGComm() string { return strings.Join(gcomms, ",") } -func (cluster *Cluster) getPreferedMaster() *ServerMonitor { +func (cluster *Cluster) getOnePreferedMaster() *ServerMonitor { if cluster.Conf.PrefMaster == "" { return nil } @@ -259,7 +259,7 @@ func (cluster *Cluster) getPreferedMaster() *ServerMonitor { if cluster.Conf.LogLevel > 2 { cluster.LogPrintf(LvlDbg, "Lookup if server: %s is preferred master: %s", server.URL, cluster.Conf.PrefMaster) } - if server.URL == cluster.Conf.PrefMaster { + if strings.Contains(cluster.Conf.PrefMaster, server.URL) { return server } } diff --git a/cluster/cluster_set.go b/cluster/cluster_set.go index 7423d74c2..ed6727c2e 100644 --- a/cluster/cluster_set.go +++ b/cluster/cluster_set.go @@ -318,16 +318,16 @@ func (cluster *Cluster) SetBenchMethod(m string) { cluster.benchmarkType = m } -// SetPrefMaster is used by regtest test_switchover_semisync_switchback_prefmaster_norplcheck +// SetPrefMaster is used by regtest test_switchover_semisync_switchback_prefmaster_norplcheck and API to force a server func (cluster *Cluster) SetPrefMaster(PrefMaster string) { + cluster.Conf.PrefMaster = PrefMaster for _, srv := range cluster.Servers { - if srv.URL == PrefMaster || srv.Name == PrefMaster { + if strings.Contains(PrefMaster, srv.URL) { srv.SetPrefered(true) } else { srv.SetPrefered(false) } } - cluster.Conf.PrefMaster = PrefMaster } func (cluster *Cluster) SetFailoverCtr(failoverCtr int) { diff --git a/cluster/cluster_topo.go b/cluster/cluster_topo.go index 4c9471884..dd60e7cea 100644 --- a/cluster/cluster_topo.go +++ b/cluster/cluster_topo.go @@ -259,7 +259,7 @@ func (cluster *Cluster) TopologyDiscover(wcg *sync.WaitGroup) error { } if srw > 1 { cluster.SetState("WARN0004", state.State{ErrType: "WARNING", ErrDesc: "RO server count > 1 in multi-master mode. switching to preferred master.", ErrFrom: "TOPO"}) - server := cluster.getPreferedMaster() + server := cluster.getOnePreferedMaster() if server != nil { server.SetReadWrite() } else { diff --git a/config/config.go b/config/config.go index a031d920c..60392c86e 100644 --- a/config/config.go +++ b/config/config.go @@ -145,6 +145,7 @@ type Config struct { CheckFalsePositiveExternal bool `mapstructure:"failover-falsepositive-external" toml:"failover-falsepositive-external" json:"failoverFalsePositiveExternal"` CheckFalsePositiveExternalPort int `mapstructure:"failover-falsepositive-external-port" toml:"failover-falsepositive-external-port" json:"failoverFalsePositiveExternalPort"` FailoverLogFileKeep int `mapstructure:"failover-log-file-keep" toml:"failover-log-file-keep" json:"failoverLogFileKeep"` + FailoverSwitchToPrefered bool `mapstructure:"failover-switch-to-prefered" toml:"failover-switch-to-prefered" json:"failoverSwithToPrefered"` Autorejoin bool `mapstructure:"autorejoin" toml:"autorejoin" json:"autorejoin"` Autoseed bool `mapstructure:"autoseed" toml:"autoseed" json:"autoseed"` AutorejoinFlashback bool `mapstructure:"autorejoin-flashback" toml:"autorejoin-flashback" json:"autorejoinFlashback"` diff --git a/etc/local/config.toml.osx b/etc/local/config.toml.osx index 56695e4f0..cd9c5549f 100644 --- a/etc/local/config.toml.osx +++ b/etc/local/config.toml.osx @@ -31,9 +31,10 @@ backup-mysqldump-path = "/usr/local/bin/mysqldump" #include="etc/local/features/graphite" #include="etc/local/features/ipv6" #include="etc/local/features/scheduler" +include="etc/local/features/prefered" #include="etc/local/masterslave" #include="etc/local/masterslave/consul" -include="etc/local/masterslave/haproxy" +#include="etc/local/masterslave/haproxy" #include="etc/local/masterslave/maxscale" #include="etc/local/masterslave/myproxy" #include="etc/local/masterslave/shardproxy" diff --git a/etc/local/features/prefered/default.toml b/etc/local/features/prefered/default.toml new file mode 100644 index 000000000..bd917d7ec --- /dev/null +++ b/etc/local/features/prefered/default.toml @@ -0,0 +1,28 @@ +# config.toml +# Example replication-manager configuration file + +# change the service file in /etc/systemd/system/replication-manager.service to looks like : +# replication-manager --config=./etc/config.toml.sample --cluster=Cluster01,Cluster_Test_2_Nodes monitor + + +[Cluster_Test_MasterSlaveSlave] +title = "ClusterTestMasterSlaveSlave" +db-servers-hosts = "127.0.0.1:3317,127.0.0.1:3318,127.0.0.1:3319" +db-servers-prefered-master = "127.0.0.1:3317,127.0.0.1:3318" +db-servers-credential = "root:test" +db-servers-connect-timeout = 1 + +replication-credential = "root:test" + +haproxy=true +haproxy-write-port = 3307 +haproxy-read-port = 3306 +failover-switch-to-prefered = true + +[Default] +prov-orchestrator = "local" +db-servers-binary-path= "/usr/local/mysql/bin/" +haproxy-binary-path= "/usr/local/bin/haproxy" +verbose = true +http-server = true +http-bind-address = "0.0.0.0" diff --git a/main.go b/main.go index 0ddb235bd..a404fa5d1 100644 --- a/main.go +++ b/main.go @@ -233,6 +233,7 @@ func init() { monitorCmd.Flags().Int64Var(&conf.FailTime, "failover-time-limit", 0, "Failover is canceled if timer in sec is not passed with previous failover (0: do not wait)") monitorCmd.Flags().BoolVar(&conf.FailSync, "failover-at-sync", false, "Failover only when state semisync is sync for last status") monitorCmd.Flags().BoolVar(&conf.FailEventScheduler, "failover-event-scheduler", false, "Failover event scheduler") + monitorCmd.Flags().BoolVar(&conf.FailoverSwitchToPrefered, "failover-switch-to-prefered", false, "Failover always pick most up to date slave following it with switchover to prefered leader") monitorCmd.Flags().BoolVar(&conf.FailEventStatus, "failover-event-status", false, "Failover event status ENABLE OR DISABLE ON SLAVE") monitorCmd.Flags().BoolVar(&conf.CheckFalsePositiveHeartbeat, "failover-falsepositive-heartbeat", true, "Failover checks that slaves do not receive heartbeat") monitorCmd.Flags().IntVar(&conf.CheckFalsePositiveHeartbeatTimeout, "failover-falsepositive-heartbeat-timeout", 3, "Failover checks that slaves do not receive heartbeat detection timeout ") diff --git a/server/api_cluster.go b/server/api_cluster.go index ff6cb99b6..522eef278 100644 --- a/server/api_cluster.go +++ b/server/api_cluster.go @@ -769,7 +769,7 @@ func (repman *ReplicationManager) handlerMuxSwitchover(w http.ResponseWriter, r } r.ParseForm() // Parses the request body newPrefMaster := r.Form.Get("prefmaster") - mycluster.LogPrintf(cluster.LvlInfo, "Was ask for prefered master: %s", newPrefMaster) + mycluster.LogPrintf(cluster.LvlInfo, "API force for prefered master: %s", newPrefMaster) if mycluster.IsInHostList(newPrefMaster) { mycluster.SetPrefMaster(newPrefMaster) } else { From 5273b44316ae45ad93e6bf1656730b01f897b4f7 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 4 Jun 2021 13:00:52 +0200 Subject: [PATCH 45/84] Stop database service under infinit loop --- cluster/prov.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cluster/prov.go b/cluster/prov.go index d58de3c7b..3706876ef 100644 --- a/cluster/prov.go +++ b/cluster/prov.go @@ -359,7 +359,6 @@ func (cluster *Cluster) StopDatabaseService(server *ServerMonitor) error { default: return errors.New("No valid orchestrator") } - cluster.StopDatabaseService(server) if err == nil { server.DelRestartCookie() } From 5cbe43a8b35355d51b4113bc54fa1554263b4f92 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 4 Jun 2021 13:10:01 +0200 Subject: [PATCH 46/84] Add cluster name parameter to all prov scripts --- cluster/prov.go | 7 ++++--- cluster/prov_scripts.go | 16 ++++++++-------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/cluster/prov.go b/cluster/prov.go index 3706876ef..dc747cc62 100644 --- a/cluster/prov.go +++ b/cluster/prov.go @@ -359,6 +359,7 @@ func (cluster *Cluster) StopDatabaseService(server *ServerMonitor) error { default: return errors.New("No valid orchestrator") } + cluster.StopDatabaseScript(server) if err == nil { server.DelRestartCookie() } @@ -383,7 +384,7 @@ func (cluster *Cluster) StopProxyService(server DatabaseProxy) error { default: return errors.New("No valid orchestrator") } - cluster.StopProxyService(server) + cluster.StopProxyScript(server) if err == nil { server.DelRestartCookie() } @@ -407,7 +408,7 @@ func (cluster *Cluster) StartProxyService(server DatabaseProxy) error { default: return errors.New("No valid orchestrator") } - cluster.StartProxyService(server) + cluster.StartProxyScript(server) if err == nil { server.DelRestartCookie() } @@ -437,7 +438,7 @@ func (cluster *Cluster) StartDatabaseService(server *ServerMonitor) error { default: return errors.New("No valid orchestrator") } - cluster.StartDatabaseService(server) + cluster.StartDatabaseScript(server) if err == nil { server.DelRestartCookie() } diff --git a/cluster/prov_scripts.go b/cluster/prov_scripts.go index f3d22154c..87d577bad 100644 --- a/cluster/prov_scripts.go +++ b/cluster/prov_scripts.go @@ -20,7 +20,7 @@ func (cluster *Cluster) UnprovisionDatabaseScript(server *ServerMonitor) error { if cluster.Conf.ProvDbCleanupScript == "" { return nil } - scriptCmd := exec.Command(cluster.Conf.ProvDbCleanupScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass) + scriptCmd := exec.Command(cluster.Conf.ProvDbCleanupScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass, cluster.Name) cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), cluster.dbPass, "XXXX", 1)) stdoutIn, _ := scriptCmd.StdoutPipe() @@ -48,7 +48,7 @@ func (cluster *Cluster) ProvisionDatabaseScript(server *ServerMonitor) error { if cluster.Conf.ProvDbBootstrapScript == "" { return nil } - scriptCmd := exec.Command(cluster.Conf.ProvDbBootstrapScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass) + scriptCmd := exec.Command(cluster.Conf.ProvDbBootstrapScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass, cluster.Name) cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), cluster.dbPass, "XXXX", 1)) stdoutIn, _ := scriptCmd.StdoutPipe() @@ -76,7 +76,7 @@ func (cluster *Cluster) StopDatabaseScript(server *ServerMonitor) error { if cluster.Conf.ProvDbStopScript == "" { return nil } - scriptCmd := exec.Command(cluster.Conf.ProvDbStopScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass) + scriptCmd := exec.Command(cluster.Conf.ProvDbStopScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass, cluster.Name) cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), cluster.dbPass, "XXXX", 1)) stdoutIn, _ := scriptCmd.StdoutPipe() @@ -104,7 +104,7 @@ func (cluster *Cluster) StartDatabaseScript(server *ServerMonitor) error { if cluster.Conf.ProvDbStartScript == "" { return nil } - scriptCmd := exec.Command(cluster.Conf.ProvDbStartScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass) + scriptCmd := exec.Command(cluster.Conf.ProvDbStartScript, misc.Unbracket(server.Host), server.Port, cluster.dbUser, cluster.dbPass, cluster.Name) cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), cluster.dbPass, "XXXX", 1)) stdoutIn, _ := scriptCmd.StdoutPipe() @@ -132,7 +132,7 @@ func (cluster *Cluster) UnprovisionProxyScript(server DatabaseProxy) error { if cluster.Conf.ProvProxyCleanupScript == "" { return nil } - scriptCmd := exec.Command(cluster.Conf.ProvProxyCleanupScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass()) + scriptCmd := exec.Command(cluster.Conf.ProvProxyCleanupScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass(), cluster.Name) cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), server.GetPass(), "XXXX", 1)) stdoutIn, _ := scriptCmd.StdoutPipe() @@ -173,7 +173,7 @@ func (cluster *Cluster) ProvisionProxyScript(server DatabaseProxy) error { if cluster.Conf.ProvProxyBootstrapScript == "" { return nil } - scriptCmd := exec.Command(cluster.Conf.ProvProxyBootstrapScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass()) + scriptCmd := exec.Command(cluster.Conf.ProvProxyBootstrapScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass(), cluster.Name) cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), server.GetPass(), "XXXX", 1)) stdoutIn, _ := scriptCmd.StdoutPipe() @@ -212,7 +212,7 @@ func (cluster *Cluster) StartProxyScript(server DatabaseProxy) error { if cluster.Conf.ProvProxyStartScript == "" { return nil } - scriptCmd := exec.Command(cluster.Conf.ProvProxyStartScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass()) + scriptCmd := exec.Command(cluster.Conf.ProvProxyStartScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass(), cluster.Name) cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), server.GetPass(), "XXXX", 1)) stdoutIn, _ := scriptCmd.StdoutPipe() @@ -251,7 +251,7 @@ func (cluster *Cluster) StopProxyScript(server DatabaseProxy) error { if cluster.Conf.ProvProxyStopScript == "" { return nil } - scriptCmd := exec.Command(cluster.Conf.ProvProxyStopScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass()) + scriptCmd := exec.Command(cluster.Conf.ProvProxyStopScript, misc.Unbracket(server.GetHost()), server.GetPort(), server.GetUser(), server.GetPass(), cluster.Name) cluster.LogPrintf(LvlInfo, "%s", strings.Replace(scriptCmd.String(), server.GetPass(), "XXXX", 1)) stdoutIn, _ := scriptCmd.StdoutPipe() From 265d08b5f8b92eeedb6f244b3191f2eaaa09bfd7 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Sat, 5 Jun 2021 19:00:36 +0200 Subject: [PATCH 47/84] Implement alerting for specific monitor triggers --- cluster/cluster.go | 1 + cluster/cluster_chk.go | 45 ++++++++++++++++++++++++++++++++++++++++++ cluster/srv_snd.go | 33 +++++-------------------------- config/config.go | 1 + main.go | 1 + 5 files changed, 53 insertions(+), 28 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index b943b67fa..29b359ee9 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -528,6 +528,7 @@ func (cluster *Cluster) StateProcessing() { ostates := cluster.sme.GetOpenStates() for _, s := range ostates { cluster.CheckCapture(s) + cluster.CheckAlert(s) } cluster.sme.ClearState() if cluster.sme.GetHeartbeats()%60 == 0 { diff --git a/cluster/cluster_chk.go b/cluster/cluster_chk.go index 2bbe40617..49515f33b 100644 --- a/cluster/cluster_chk.go +++ b/cluster/cluster_chk.go @@ -13,11 +13,13 @@ import ( "fmt" "io/ioutil" "net/http" + "os/exec" "strconv" "strings" "time" "github.com/signal18/replication-manager/router/maxscale" + "github.com/signal18/replication-manager/utils/alert" "github.com/signal18/replication-manager/utils/dbhelper" "github.com/signal18/replication-manager/utils/state" ) @@ -452,6 +454,49 @@ func (cluster *Cluster) CheckCapture(state state.State) { } } +func (cluster *Cluster) CheckAlert(state state.State) { + if cluster.Conf.MonitoringAlertTrigger == "" { + return + } + + if strings.Contains(cluster.Conf.MonitoringAlertTrigger, state.ErrKey) { + a := alert.Alert{ + State: state.ErrKey, + Origin: cluster.Name, + } + + err := cluster.SendAlert(a) + cluster.LogPrintf("ERROR", "Could not send alert: %s ", err) + } +} + +func (cluster *Cluster) SendAlert(alert alert.Alert) error { + if cluster.Conf.MailTo != "" { + alert.From = cluster.Conf.MailFrom + alert.To = cluster.Conf.MailTo + alert.Destination = cluster.Conf.MailSMTPAddr + alert.User = cluster.Conf.MailSMTPUser + alert.Password = cluster.Conf.MailSMTPPassword + alert.TlsVerify = cluster.Conf.MailSMTPTLSSkipVerify + err := alert.Email() + if err != nil { + cluster.LogPrintf("ERROR", "Could not send mail alert: %s ", err) + } + } + if cluster.Conf.AlertScript != "" { + cluster.LogPrintf("INFO", "Calling alert script") + var out []byte + out, err := exec.Command(cluster.Conf.AlertScript, alert.Origin, alert.PrevState, alert.State).CombinedOutput() + if err != nil { + cluster.LogPrintf("ERROR", "%s", err) + } + + cluster.LogPrintf("INFO", "Alert script complete:", string(out)) + } + + return nil +} + func (cluster *Cluster) CheckAllTableChecksum() { for _, t := range cluster.master.Tables { cluster.CheckTableChecksum(t.Table_schema, t.Table_name) diff --git a/cluster/srv_snd.go b/cluster/srv_snd.go index b51dbcddc..e4c6e7679 100644 --- a/cluster/srv_snd.go +++ b/cluster/srv_snd.go @@ -11,7 +11,6 @@ package cluster import ( "fmt" - "os/exec" "strconv" "strings" "time" @@ -102,33 +101,11 @@ func (server *ServerMonitor) SendAlert() error { return nil } - if server.ClusterGroup.Conf.MailTo != "" { - a := alert.Alert{ - From: server.ClusterGroup.Conf.MailFrom, - To: server.ClusterGroup.Conf.MailTo, - State: server.State, - PrevState: server.PrevState, - Origin: server.URL, - Destination: server.ClusterGroup.Conf.MailSMTPAddr, - User: server.ClusterGroup.Conf.MailSMTPUser, - Password: server.ClusterGroup.Conf.MailSMTPPassword, - TlsVerify: server.ClusterGroup.Conf.MailSMTPTLSSkipVerify, - } - err := a.Email() - if err != nil { - server.ClusterGroup.LogPrintf("ERROR", "Could not send mail alert: %s ", err) - } - } - if server.ClusterGroup.Conf.AlertScript != "" { - server.ClusterGroup.LogPrintf("INFO", "Calling alert script") - var out []byte - out, err := exec.Command(server.ClusterGroup.Conf.AlertScript, server.URL, server.PrevState, server.State).CombinedOutput() - if err != nil { - server.ClusterGroup.LogPrintf("ERROR", "%s", err) - } - - server.ClusterGroup.LogPrintf("INFO", "Alert script complete:", string(out)) + a := alert.Alert{ + State: server.State, + PrevState: server.PrevState, + Origin: server.URL, } - return nil + return server.ClusterGroup.SendAlert(a) } diff --git a/config/config.go b/config/config.go index 60392c86e..98307ec3c 100644 --- a/config/config.go +++ b/config/config.go @@ -67,6 +67,7 @@ type Config struct { MonitorCaptureTrigger string `mapstructure:"monitoring-capture-trigger" toml:"monitoring-capture-trigger" json:"monitoringCaptureTrigger"` MonitorIgnoreError string `mapstructure:"monitoring-ignore-errors" toml:"monitoring-ignore-errors" json:"monitoringIgnoreErrors"` MonitorTenant string `mapstructure:"monitoring-tenant" toml:"monitoring-tenant" json:"monitoringTenant"` + MonitoringAlertTrigger string `mapstructure:"monitoring-alert-trigger" toml:"monitoring-alert-trigger" json:"MonitoringAlertTrigger"` Interactive bool `mapstructure:"interactive" toml:"-" json:"interactive"` Verbose bool `mapstructure:"verbose" toml:"verbose" json:"verbose"` LogFile string `mapstructure:"log-file" toml:"log-file" json:"logFile"` diff --git a/main.go b/main.go index a404fa5d1..fb8a288b8 100644 --- a/main.go +++ b/main.go @@ -182,6 +182,7 @@ func init() { monitorCmd.Flags().BoolVar(&conf.MonitorCapture, "monitoring-capture", true, "Enable capture on error for 5 monitor loops") monitorCmd.Flags().StringVar(&conf.MonitorCaptureTrigger, "monitoring-capture-trigger", "ERR00076,ERR00041", "List of errno triggering capture mode") monitorCmd.Flags().IntVar(&conf.MonitorCaptureFileKeep, "monitoring-capture-file-keep", 5, "Purge capture file keep that number of them") + monitorCmd.Flags().StringVar(&conf.MonitoringAlertTrigger, "monitoring-alert-trigger", "", "List of errno triggering an alert to be send") monitorCmd.Flags().StringVar(&conf.User, "db-servers-credential", "root:mariadb", "Database login, specified in the [user]:[password] format") monitorCmd.Flags().StringVar(&conf.Hosts, "db-servers-hosts", "", "Database hosts list to monitor, IP and port (optional), specified in the host:[port] format and separated by commas") monitorCmd.Flags().BoolVar(&conf.DBServersTLSUseGeneratedCertificate, "db-servers-tls-use-generated-cert", false, "Use the auto generated certificates to connect to database backend") From 163ec0cc09bc2ee8e784d3f2e6b9e490f3ec61e8 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Thu, 10 Jun 2021 13:22:43 +0200 Subject: [PATCH 48/84] Resolve PR requests --- config/config.go | 2 +- main.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/config.go b/config/config.go index 98307ec3c..19b27e587 100644 --- a/config/config.go +++ b/config/config.go @@ -67,7 +67,7 @@ type Config struct { MonitorCaptureTrigger string `mapstructure:"monitoring-capture-trigger" toml:"monitoring-capture-trigger" json:"monitoringCaptureTrigger"` MonitorIgnoreError string `mapstructure:"monitoring-ignore-errors" toml:"monitoring-ignore-errors" json:"monitoringIgnoreErrors"` MonitorTenant string `mapstructure:"monitoring-tenant" toml:"monitoring-tenant" json:"monitoringTenant"` - MonitoringAlertTrigger string `mapstructure:"monitoring-alert-trigger" toml:"monitoring-alert-trigger" json:"MonitoringAlertTrigger"` + MonitoringAlertTrigger string `mapstructure:"monitoring-alert-trigger" toml:"monitoring-alert-trigger" json:"monitoringAlertTrigger"` Interactive bool `mapstructure:"interactive" toml:"-" json:"interactive"` Verbose bool `mapstructure:"verbose" toml:"verbose" json:"verbose"` LogFile string `mapstructure:"log-file" toml:"log-file" json:"logFile"` diff --git a/main.go b/main.go index fb8a288b8..242946786 100644 --- a/main.go +++ b/main.go @@ -182,7 +182,7 @@ func init() { monitorCmd.Flags().BoolVar(&conf.MonitorCapture, "monitoring-capture", true, "Enable capture on error for 5 monitor loops") monitorCmd.Flags().StringVar(&conf.MonitorCaptureTrigger, "monitoring-capture-trigger", "ERR00076,ERR00041", "List of errno triggering capture mode") monitorCmd.Flags().IntVar(&conf.MonitorCaptureFileKeep, "monitoring-capture-file-keep", 5, "Purge capture file keep that number of them") - monitorCmd.Flags().StringVar(&conf.MonitoringAlertTrigger, "monitoring-alert-trigger", "", "List of errno triggering an alert to be send") + monitorCmd.Flags().StringVar(&conf.MonitoringAlertTrigger, "monitoring-alert-trigger", "ERR00027,ERR00042", "List of errno triggering an alert to be send") monitorCmd.Flags().StringVar(&conf.User, "db-servers-credential", "root:mariadb", "Database login, specified in the [user]:[password] format") monitorCmd.Flags().StringVar(&conf.Hosts, "db-servers-hosts", "", "Database hosts list to monitor, IP and port (optional), specified in the host:[port] format and separated by commas") monitorCmd.Flags().BoolVar(&conf.DBServersTLSUseGeneratedCertificate, "db-servers-tls-use-generated-cert", false, "Use the auto generated certificates to connect to database backend") From a419004c9cdf0f8471ef87cb411c8e43817ce127 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 11 Jun 2021 09:21:04 +0200 Subject: [PATCH 49/84] Don't purge local binlog when not backup-binlogs enabled --- cluster/srv_job.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cluster/srv_job.go b/cluster/srv_job.go index dc717348e..9f3217c2c 100644 --- a/cluster/srv_job.go +++ b/cluster/srv_job.go @@ -911,6 +911,9 @@ func (server *ServerMonitor) JobBackupBinlogPurge(binlogfile string) error { if !server.IsMaster() { return errors.New("Purge only master binlog") } + if !server.ClusterGroup.Conf.BackupBinlogs { + return errors.New("Copy binlog not enable") + } binlogfilestart, _ := strconv.Atoi(strings.Split(binlogfile, ".")[1]) prefix := strings.Split(binlogfile, ".")[0] binlogfilestop := binlogfilestart - server.ClusterGroup.Conf.BackupBinlogsKeep From 8105433850b7120ca29869a588e1c40b0e3b0349 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 11 Jun 2021 09:27:54 +0200 Subject: [PATCH 50/84] Add some extra config sample --- etc/local/features/positional/default.toml | 22 ++++++++++++++++++++++ etc/local/features/pseudogtid/default.toml | 22 ++++++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 etc/local/features/positional/default.toml create mode 100644 etc/local/features/pseudogtid/default.toml diff --git a/etc/local/features/positional/default.toml b/etc/local/features/positional/default.toml new file mode 100644 index 000000000..4155e123d --- /dev/null +++ b/etc/local/features/positional/default.toml @@ -0,0 +1,22 @@ +Cluster_Test_MasterSlaveSlave] +title = "ClusterTestMasterSlaveSlave" +db-servers-hosts = "127.0.0.1:3317,127.0.0.1:3318,127.0.0.1:3319" +db-servers-prefered-master = "127.0.0.1:3317,127.0.0.1:3318" +db-servers-credential = "root:test" +db-servers-connect-timeout = 1 + +replication-credential = "root:test" + +haproxy=true +haproxy-write-port = 3307 +haproxy-read-port = 3306 + +force-slave-no-gtid-mode = true + +[Default] +prov-orchestrator = "local" +db-servers-binary-path= "/usr/local/mysql/bin/" +haproxy-binary-path= "/usr/local/bin/haproxy" +verbose = true +http-server = true +http-bind-address = "0.0.0.0" diff --git a/etc/local/features/pseudogtid/default.toml b/etc/local/features/pseudogtid/default.toml new file mode 100644 index 000000000..4276ee9aa --- /dev/null +++ b/etc/local/features/pseudogtid/default.toml @@ -0,0 +1,22 @@ +[Cluster_Test_MasterSlaveSlave] +title = "ClusterTestMasterSlaveSlave" +db-servers-hosts = "127.0.0.1:3317,127.0.0.1:3318,127.0.0.1:3319" +db-servers-prefered-master = "127.0.0.1:3317,127.0.0.1:3318" +db-servers-credential = "root:test" +db-servers-connect-timeout = 1 + +replication-credential = "root:test" + +haproxy=true +haproxy-write-port = 3307 +haproxy-read-port = 3306 + +autorejoin-slave-positional-heartbeat = true + +[Default] +prov-orchestrator = "local" +db-servers-binary-path= "/usr/local/mysql/bin/" +haproxy-binary-path= "/usr/local/bin/haproxy" +verbose = true +http-server = true +http-bind-address = "0.0.0.0" From 370e89fc08eda31fa2d332e04ad2bc4f6230296b Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 11 Jun 2021 12:58:19 +0200 Subject: [PATCH 51/84] Rig tag bing vs nobind --- share/opensvc/moduleset_mariadb.svc.mrm.db.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/share/opensvc/moduleset_mariadb.svc.mrm.db.json b/share/opensvc/moduleset_mariadb.svc.mrm.db.json index 94bd78ae1..b9c1d092f 100644 --- a/share/opensvc/moduleset_mariadb.svc.mrm.db.json +++ b/share/opensvc/moduleset_mariadb.svc.mrm.db.json @@ -3928,8 +3928,8 @@ { "var_author": "admin Manager", "var_class": "file", - "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/with_disk_splitundolog.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_documentation: https://mariadb.com/kb/en/innodb-system-variables/#innodb_undo_log_truncate\\n# mariadb_documentation: https://mariadb.com/kb/en/innodb-system-variables/#innodb_rollback_segments\\n# mariadb_documentation: https://mysqlonarm.github.io/2x-mysql-performance/\\n\\n[mysqld]\\nloose_innodb_undo_tablespaces=64\\nloose_innodb_undo_log_truncate=1\"}", - "var_updated": "2021-05-20 10:19:15", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/with_disk_splitundolog.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# mariadb_alert: from:10.5.0;to:;label:MDEV-25801;trigger: SELECT IF(variable_value=\\\"ON\\\", 1,0) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME= \\\"innodb_undo_log_truncate\\\";\\n# mariadb_alert: from:10.6.0 to: label:MDEV-25801\\n\\n# mariadb_documentation: https://mariadb.com/kb/en/innodb-system-variables/#innodb_undo_log_truncate\\n# mariadb_documentation: https://mariadb.com/kb/en/innodb-system-variables/#innodb_rollback_segments\\n# mariadb_documentation: https://mysqlonarm.github.io/2x-mysql-performance/\\n\\n[mysqld]\\nloose_innodb_undo_tablespaces=64\\nloose_innodb_undo_log_truncate=1\"}", + "var_updated": "2021-06-10 13:51:46", "var_name": "db_cnf_disk_splitundo", "id": 6282 } @@ -5380,8 +5380,8 @@ { "var_author": "admin Manager", "var_class": "symlink", - "var_value": "{\"symlink\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/conf.d/01_with_net_bind.cnf\",\"target\":\"../with_net_bind.cnf\"}", - "var_updated": "2021-01-04 16:04:13", + "var_value": "{\"symlink\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/conf.d/99_with_net_bind.cnf\",\"target\":\"../with_net_bind.cnf\"}", + "var_updated": "2021-06-11 12:51:23", "var_name": "db_link_withnetbind", "id": 6122 } @@ -6366,8 +6366,8 @@ { "var_author": "admin Manager", "var_class": "symlink", - "var_value": "{\"symlink\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/conf.d/99_no_net_bind.cnf\",\"target\":\"../no_net_bind.cnf\"}", - "var_updated": "2021-01-04 16:04:32", + "var_value": "{\"symlink\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/conf.d/10_no_net_bind.cnf\",\"target\":\"../no_net_bind.cnf\"}", + "var_updated": "2021-06-11 12:50:01", "var_name": "db_link_nobind", "id": 6217 } From 700f060f9d7dd6f1301cac5fa9ceb59e87ebfad3 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Mon, 14 Jun 2021 08:18:23 +0200 Subject: [PATCH 52/84] Add logicalbackup physicalbackup cookie when backup is finished --- cluster/srv_del.go | 12 ++++++++++++ cluster/srv_has.go | 12 ++++++++++++ cluster/srv_job.go | 8 ++++++-- cluster/srv_rejoin.go | 30 ++++++++++++++++-------------- cluster/srv_set.go | 11 +++++++++++ 5 files changed, 57 insertions(+), 16 deletions(-) diff --git a/cluster/srv_del.go b/cluster/srv_del.go index d798755a4..08e979d49 100644 --- a/cluster/srv_del.go +++ b/cluster/srv_del.go @@ -41,3 +41,15 @@ func (server *ServerMonitor) DelReprovisionCookie() error { func (server *ServerMonitor) DelRestartCookie() error { return server.delCookie("cookie_restart") } + +func (server *ServerMonitor) DelWaitBackupCookie() error { + return server.delCookie("cookie_waitbackup") +} + +func (server *ServerMonitor) DelBackupLogicalCookie() error { + return server.delCookie("cookie_logicalbackup") +} + +func (server *ServerMonitor) DelBackupPhysicalCookie() error { + return server.delCookie("cookie_physicalbackup") +} diff --git a/cluster/srv_has.go b/cluster/srv_has.go index cb3ae1b8a..df7a1b005 100644 --- a/cluster/srv_has.go +++ b/cluster/srv_has.go @@ -84,6 +84,10 @@ func (server *ServerMonitor) HasWaitStartCookie() bool { return server.hasCookie("cookie_waitstart") } +func (server *ServerMonitor) HasWaitBackupCookie() bool { + return server.hasCookie("cookie_waitbackup") +} + func (server *ServerMonitor) HasWaitStopCookie() bool { return server.hasCookie("cookie_waitstop") } @@ -96,6 +100,14 @@ func (server *ServerMonitor) HasReprovCookie() bool { return server.hasCookie("cookie_reprov") } +func (server *ServerMonitor) HasBackupLogicalCookie() bool { + return server.hasCookie("cookie_logicalbackup") +} + +func (server *ServerMonitor) HasBackupPhysicalCookie() bool { + return server.hasCookie("cookie_physicalbackup") +} + func (server *ServerMonitor) HasReadOnly() bool { return server.Variables["READ_ONLY"] == "ON" } diff --git a/cluster/srv_job.go b/cluster/srv_job.go index 9f3217c2c..d3f14577e 100644 --- a/cluster/srv_job.go +++ b/cluster/srv_job.go @@ -580,9 +580,9 @@ func (server *ServerMonitor) JobBackupLogical() error { if server.IsDown() { return nil } - + server.DelBackupLogicalCookie() if server.IsMariaDB() && server.DBVersion.Major == 10 && - server.DBVersion.Minor >= 4 && server.DBVersion.Minor < 6 && + server.DBVersion.Minor >= 4 && server.ClusterGroup.Conf.BackupLockDDL && (server.ClusterGroup.Conf.BackupLogicalType == config.ConstBackupLogicalTypeMysqldump || server.ClusterGroup.Conf.BackupLogicalType == config.ConstBackupLogicalTypeMydumper) { bckConn, err := server.GetNewDBConn() @@ -673,6 +673,8 @@ func (server *ServerMonitor) JobBackupLogical() error { if err != nil { server.ClusterGroup.LogPrintf(LvlErr, "mysqldump: %s", err) + } else { + server.SetBackupLogicalCookie() } gw.Flush() gw.Close() @@ -749,6 +751,8 @@ func (server *ServerMonitor) JobBackupLogical() error { wg.Wait() if err := dumpCmd.Wait(); err != nil { server.ClusterGroup.LogPrintf(LvlErr, "MyDumper: %s", err) + } else { + server.SetBackupLogicalCookie() } } diff --git a/cluster/srv_rejoin.go b/cluster/srv_rejoin.go index 8e0574b63..2ad48e9ea 100644 --- a/cluster/srv_rejoin.go +++ b/cluster/srv_rejoin.go @@ -157,21 +157,23 @@ func (server *ServerMonitor) ReseedMasterSST() error { server.ClusterGroup.LogPrintf("ERROR", "mysqldump restore failed %s", err) return errors.New("Dump from master failed") } - } else if server.ClusterGroup.Conf.AutorejoinLogicalBackup { - server.JobReseedLogicalBackup() - } else if server.ClusterGroup.Conf.AutorejoinPhysicalBackup { - server.JobReseedPhysicalBackup() - } else if server.ClusterGroup.Conf.RejoinScript != "" { - server.ClusterGroup.LogPrintf("INFO", "Calling rejoin script") - var out []byte - out, err := exec.Command(server.ClusterGroup.Conf.RejoinScript, misc.Unbracket(server.Host), misc.Unbracket(server.ClusterGroup.master.Host)).CombinedOutput() - if err != nil { - server.ClusterGroup.LogPrintf("ERROR", "%s", err) - } - server.ClusterGroup.LogPrintf("INFO", "Rejoin script complete %s", string(out)) } else { - server.ClusterGroup.LogPrintf("INFO", "No SST reseed method found") - return errors.New("No SST reseed method found") + if server.ClusterGroup.Conf.AutorejoinLogicalBackup { + server.JobReseedLogicalBackup() + } else if server.ClusterGroup.Conf.AutorejoinPhysicalBackup { + server.JobReseedPhysicalBackup() + } else if server.ClusterGroup.Conf.RejoinScript != "" { + server.ClusterGroup.LogPrintf("INFO", "Calling rejoin script") + var out []byte + out, err := exec.Command(server.ClusterGroup.Conf.RejoinScript, misc.Unbracket(server.Host), misc.Unbracket(server.ClusterGroup.master.Host)).CombinedOutput() + if err != nil { + server.ClusterGroup.LogPrintf("ERROR", "%s", err) + } + server.ClusterGroup.LogPrintf("INFO", "Rejoin script complete %s", string(out)) + } else { + server.ClusterGroup.LogPrintf("INFO", "No SST reseed method found") + return errors.New("No SST reseed method found") + } } return nil diff --git a/cluster/srv_set.go b/cluster/srv_set.go index 5769d90e3..67afda6ea 100644 --- a/cluster/srv_set.go +++ b/cluster/srv_set.go @@ -307,3 +307,14 @@ func (server *ServerMonitor) SetWaitStopCookie() error { func (server *ServerMonitor) SetReprovCookie() error { return server.createCookie("cookie_reprov") } + +func (server *ServerMonitor) SetWaitBackupCookie() error { + return server.createCookie("cookie_waitbackup") +} + +func (server *ServerMonitor) SetBackupPhysicalCookie() error { + return server.createCookie("cookie_physicalbackup") +} +func (server *ServerMonitor) SetBackupLogicalCookie() error { + return server.createCookie("cookie_logicalbackup") +} From 327ca0425eed27c0c0066be768f04f81487c2dee Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 15 Jun 2021 10:46:22 +0200 Subject: [PATCH 53/84] Fix logging an error when none occured --- cluster/cluster_chk.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cluster/cluster_chk.go b/cluster/cluster_chk.go index 49515f33b..b6475905b 100644 --- a/cluster/cluster_chk.go +++ b/cluster/cluster_chk.go @@ -466,7 +466,9 @@ func (cluster *Cluster) CheckAlert(state state.State) { } err := cluster.SendAlert(a) - cluster.LogPrintf("ERROR", "Could not send alert: %s ", err) + if err != nil { + cluster.LogPrintf("ERROR", "Could not send alert: %s ", err) + } } } From d61eddd8c773266d62fae4544ced3770061b3121 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 15 Jun 2021 18:38:21 +0200 Subject: [PATCH 54/84] Only email on Opened states --- cluster/cluster.go | 4 ++++ utils/state/state.go | 36 ++++++++++++++++++++++++++++-------- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 29b359ee9..86bf1e786 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -528,8 +528,12 @@ func (cluster *Cluster) StateProcessing() { ostates := cluster.sme.GetOpenStates() for _, s := range ostates { cluster.CheckCapture(s) + } + + for _, s := range cluster.sme.GetLastOpenedStates() { cluster.CheckAlert(s) } + cluster.sme.ClearState() if cluster.sme.GetHeartbeats()%60 == 0 { cluster.Save() diff --git a/utils/state/state.go b/utils/state/state.go index cb90d6150..2cb63fe34 100644 --- a/utils/state/state.go +++ b/utils/state/state.go @@ -296,23 +296,43 @@ func (SM *StateMachine) IsDiscovered() bool { func (SM *StateMachine) GetStates() []string { var log []string + + //every thing in OldState that can't be found in curstate + for key2, value2 := range SM.GetLastResolvedStates() { + log = append(log, fmt.Sprintf("RESOLV %s : %s", key2, value2.ErrDesc)) + } + + for key, value := range SM.GetLastOpenedStates() { + log = append(log, fmt.Sprintf("OPENED %s : %s", key, value.ErrDesc)) + } + + return log +} + +func (SM *StateMachine) GetLastResolvedStates() map[string]State { + resolved := make(map[string]State) SM.Lock() //every thing in OldState that can't be found in curstate - for key2, value2 := range *SM.OldState { - if SM.CurState.Search(key2) == false { - //log = append(log, fmt.Sprintf("%-5s %s HAS BEEN FIXED, %s", value2.ErrType, key2, value2.ErrDesc)) - log = append(log, fmt.Sprintf("RESOLV %s : %s", key2, value2.ErrDesc)) + for key, state := range *SM.OldState { + if SM.CurState.Search(key) == false { + resolved[key] = state } } + SM.Unlock() + return resolved +} - for key, value := range *SM.CurState { +func (SM *StateMachine) GetLastOpenedStates() map[string]State { + opened := make(map[string]State) + SM.Lock() + //every thing in OldState that can't be found in curstate + for key, state := range *SM.CurState { if SM.OldState.Search(key) == false { - //log = append(log, fmt.Sprintf("%-5s %s %s", value.ErrType, key, value.ErrDesc)) - log = append(log, fmt.Sprintf("OPENED %s : %s", key, value.ErrDesc)) + opened[key] = state } } SM.Unlock() - return log + return opened } func (SM *StateMachine) GetResolvedStates() []State { From 92ef95e9863871d5584ce0e28e4c468768ececc0 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 15 Jun 2021 18:52:00 +0200 Subject: [PATCH 55/84] Exit checking the alert earlier if no email or script is set --- cluster/cluster_chk.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cluster/cluster_chk.go b/cluster/cluster_chk.go index b6475905b..8a28514b8 100644 --- a/cluster/cluster_chk.go +++ b/cluster/cluster_chk.go @@ -459,6 +459,11 @@ func (cluster *Cluster) CheckAlert(state state.State) { return } + // exit even earlier + if cluster.Conf.MailTo == "" && cluster.Conf.AlertScript == "" { + return + } + if strings.Contains(cluster.Conf.MonitoringAlertTrigger, state.ErrKey) { a := alert.Alert{ State: state.ErrKey, From 9d594c9dbbaed475081c208b58135a3042c3df48 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 23 Jun 2021 15:29:25 +0200 Subject: [PATCH 56/84] Changing default mac config --- etc/local/config.toml.osx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/local/config.toml.osx b/etc/local/config.toml.osx index cd9c5549f..fbf652c00 100644 --- a/etc/local/config.toml.osx +++ b/etc/local/config.toml.osx @@ -6,7 +6,7 @@ prov-db-binary-basedir= "/usr/local/bin" prov-db-client-basedir= "/usr/local/bin" backup-mysqlclient-path ="/usr/local/bin/mysql" backup-mysqlbinlog-path = "/usr/local/bin/mysqlbinlog" - +backup-mysqldump-options = "--hex-blob --single-transaction --verbose --all-databases --add-drop-database" #mysql #prov-db-binary-basedir= "/usr/local/mysql/bin" #prov-db-client-basedir= "/usr/local/mysql/bin" From 153772d1ba5149ce02de638730714af5ae675bd1 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 23 Jun 2021 18:24:35 +0200 Subject: [PATCH 57/84] Wait stop database server should wait for state failed and ignore auth error --- cluster/cluster_wait.go | 2 +- cluster/srv_has.go | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/cluster/cluster_wait.go b/cluster/cluster_wait.go index 0dcd4988a..0d8075f9f 100644 --- a/cluster/cluster_wait.go +++ b/cluster/cluster_wait.go @@ -250,7 +250,7 @@ func (cluster *Cluster) WaitDatabaseFailed(server *ServerMonitor) error { exitloop++ - if server.IsFailed() { + if server.IsInStateFailed() { exitloop = 9999999 } else { cluster.LogPrintf(LvlInfo, "Waiting state failed on %s ", server.URL) diff --git a/cluster/srv_has.go b/cluster/srv_has.go index df7a1b005..4f5a0451b 100644 --- a/cluster/srv_has.go +++ b/cluster/srv_has.go @@ -356,7 +356,7 @@ func (server *ServerMonitor) IsRunning() bool { return !server.IsDown() } -// IFailed() returns true is the server is Failed or auth error +// IsFailed() returns true is the server is Failed or auth error func (server *ServerMonitor) IsFailed() bool { if server.State == stateFailed || server.State == stateErrorAuth { return true @@ -364,6 +364,14 @@ func (server *ServerMonitor) IsFailed() bool { return false } +// IsInStateFailed() returns true is the server state is failed +func (server *ServerMonitor) IsInStateFailed() bool { + if server.State == stateFailed { + return true + } + return false +} + func (server *ServerMonitor) IsReplicationBroken() bool { if server.IsSQLThreadRunning() == false || server.IsIOThreadRunning() == false { return true From 447de0d0a6081eeac75714a22efb6de91d5b8f3f Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 23 Jun 2021 18:27:34 +0200 Subject: [PATCH 58/84] API disable/enable scheduler crash and is inverted --- cluster/cluster_tgl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/cluster_tgl.go b/cluster/cluster_tgl.go index 934bb671e..2a7b97dcf 100644 --- a/cluster/cluster_tgl.go +++ b/cluster/cluster_tgl.go @@ -249,7 +249,7 @@ func (cluster *Cluster) SwitchMonitoringProcesslist() { func (cluster *Cluster) SwitchMonitoringScheduler() { cluster.Conf.MonitorScheduler = !cluster.Conf.MonitorScheduler - if cluster.Conf.MonitorScheduler { + if !cluster.Conf.MonitorScheduler { cluster.LogPrintf(LvlInfo, "Stopping scheduler") cluster.scheduler.Stop() } else { From 13b9f25c1d10ba5a0ceb61274c69c24c3c91b930 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 23 Jun 2021 19:03:07 +0200 Subject: [PATCH 59/84] API Scheduler can't start when never initialized from config --- cluster/cluster_tgl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/cluster_tgl.go b/cluster/cluster_tgl.go index 2a7b97dcf..54fab3f60 100644 --- a/cluster/cluster_tgl.go +++ b/cluster/cluster_tgl.go @@ -254,7 +254,7 @@ func (cluster *Cluster) SwitchMonitoringScheduler() { cluster.scheduler.Stop() } else { cluster.LogPrintf(LvlInfo, "Starting scheduler") - cluster.scheduler.Start() + cluster.initScheduler() } } From b62f49737cf88ccdec87da2ac2f4b8ff41ccb959 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 15 Jun 2021 10:46:22 +0200 Subject: [PATCH 60/84] Fix logging an error when none occured --- cluster/cluster_chk.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cluster/cluster_chk.go b/cluster/cluster_chk.go index 49515f33b..b6475905b 100644 --- a/cluster/cluster_chk.go +++ b/cluster/cluster_chk.go @@ -466,7 +466,9 @@ func (cluster *Cluster) CheckAlert(state state.State) { } err := cluster.SendAlert(a) - cluster.LogPrintf("ERROR", "Could not send alert: %s ", err) + if err != nil { + cluster.LogPrintf("ERROR", "Could not send alert: %s ", err) + } } } From 66c6321d3f1dc1a0f720fa2cf3a5c54559a980b9 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 15 Jun 2021 18:38:21 +0200 Subject: [PATCH 61/84] Only email on Opened states --- cluster/cluster.go | 4 ++++ utils/state/state.go | 36 ++++++++++++++++++++++++++++-------- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 29b359ee9..86bf1e786 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -528,8 +528,12 @@ func (cluster *Cluster) StateProcessing() { ostates := cluster.sme.GetOpenStates() for _, s := range ostates { cluster.CheckCapture(s) + } + + for _, s := range cluster.sme.GetLastOpenedStates() { cluster.CheckAlert(s) } + cluster.sme.ClearState() if cluster.sme.GetHeartbeats()%60 == 0 { cluster.Save() diff --git a/utils/state/state.go b/utils/state/state.go index cb90d6150..2cb63fe34 100644 --- a/utils/state/state.go +++ b/utils/state/state.go @@ -296,23 +296,43 @@ func (SM *StateMachine) IsDiscovered() bool { func (SM *StateMachine) GetStates() []string { var log []string + + //every thing in OldState that can't be found in curstate + for key2, value2 := range SM.GetLastResolvedStates() { + log = append(log, fmt.Sprintf("RESOLV %s : %s", key2, value2.ErrDesc)) + } + + for key, value := range SM.GetLastOpenedStates() { + log = append(log, fmt.Sprintf("OPENED %s : %s", key, value.ErrDesc)) + } + + return log +} + +func (SM *StateMachine) GetLastResolvedStates() map[string]State { + resolved := make(map[string]State) SM.Lock() //every thing in OldState that can't be found in curstate - for key2, value2 := range *SM.OldState { - if SM.CurState.Search(key2) == false { - //log = append(log, fmt.Sprintf("%-5s %s HAS BEEN FIXED, %s", value2.ErrType, key2, value2.ErrDesc)) - log = append(log, fmt.Sprintf("RESOLV %s : %s", key2, value2.ErrDesc)) + for key, state := range *SM.OldState { + if SM.CurState.Search(key) == false { + resolved[key] = state } } + SM.Unlock() + return resolved +} - for key, value := range *SM.CurState { +func (SM *StateMachine) GetLastOpenedStates() map[string]State { + opened := make(map[string]State) + SM.Lock() + //every thing in OldState that can't be found in curstate + for key, state := range *SM.CurState { if SM.OldState.Search(key) == false { - //log = append(log, fmt.Sprintf("%-5s %s %s", value.ErrType, key, value.ErrDesc)) - log = append(log, fmt.Sprintf("OPENED %s : %s", key, value.ErrDesc)) + opened[key] = state } } SM.Unlock() - return log + return opened } func (SM *StateMachine) GetResolvedStates() []State { From e69b60082f5587d8be16e7895d2d5e61afb793d9 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 15 Jun 2021 18:52:00 +0200 Subject: [PATCH 62/84] Exit checking the alert earlier if no email or script is set --- cluster/cluster_chk.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cluster/cluster_chk.go b/cluster/cluster_chk.go index b6475905b..8a28514b8 100644 --- a/cluster/cluster_chk.go +++ b/cluster/cluster_chk.go @@ -459,6 +459,11 @@ func (cluster *Cluster) CheckAlert(state state.State) { return } + // exit even earlier + if cluster.Conf.MailTo == "" && cluster.Conf.AlertScript == "" { + return + } + if strings.Contains(cluster.Conf.MonitoringAlertTrigger, state.ErrKey) { a := alert.Alert{ State: state.ErrKey, From 91f55227e734fb10690c7efe7dd2fd79946ce183 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 23 Jun 2021 18:27:34 +0200 Subject: [PATCH 63/84] API disable/enable scheduler crash and is inverted --- cluster/cluster_tgl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/cluster_tgl.go b/cluster/cluster_tgl.go index 934bb671e..2a7b97dcf 100644 --- a/cluster/cluster_tgl.go +++ b/cluster/cluster_tgl.go @@ -249,7 +249,7 @@ func (cluster *Cluster) SwitchMonitoringProcesslist() { func (cluster *Cluster) SwitchMonitoringScheduler() { cluster.Conf.MonitorScheduler = !cluster.Conf.MonitorScheduler - if cluster.Conf.MonitorScheduler { + if !cluster.Conf.MonitorScheduler { cluster.LogPrintf(LvlInfo, "Stopping scheduler") cluster.scheduler.Stop() } else { From 110bc4c6c410e6e75cc9e557a2c077b28cfea18d Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 23 Jun 2021 19:03:07 +0200 Subject: [PATCH 64/84] API Scheduler can't start when never initialized from config --- cluster/cluster_tgl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/cluster_tgl.go b/cluster/cluster_tgl.go index 2a7b97dcf..54fab3f60 100644 --- a/cluster/cluster_tgl.go +++ b/cluster/cluster_tgl.go @@ -254,7 +254,7 @@ func (cluster *Cluster) SwitchMonitoringScheduler() { cluster.scheduler.Stop() } else { cluster.LogPrintf(LvlInfo, "Starting scheduler") - cluster.scheduler.Start() + cluster.initScheduler() } } From ec8e05438d33528b28a656e5e5e273a7d830d9e0 Mon Sep 17 00:00:00 2001 From: Yorick Terweijden Date: Tue, 15 Jun 2021 18:52:00 +0200 Subject: [PATCH 65/84] Exit checking the alert earlier if no email or script is set --- cluster/cluster_chk.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cluster/cluster_chk.go b/cluster/cluster_chk.go index 8a28514b8..6f8243731 100644 --- a/cluster/cluster_chk.go +++ b/cluster/cluster_chk.go @@ -464,6 +464,11 @@ func (cluster *Cluster) CheckAlert(state state.State) { return } + // exit even earlier + if cluster.Conf.MailTo == "" && cluster.Conf.AlertScript == "" { + return + } + if strings.Contains(cluster.Conf.MonitoringAlertTrigger, state.ErrKey) { a := alert.Alert{ State: state.ErrKey, From b0bcddaf7d322202863032a72641ad332e05872d Mon Sep 17 00:00:00 2001 From: svaroqui Date: Mon, 28 Jun 2021 12:54:09 +0200 Subject: [PATCH 66/84] Failover election can failed when no master discovery --- cluster/cluster_fail.go | 3 +++ cluster/srv_job.go | 4 ++-- go.mod | 6 +++--- go.sum | 8 ++++++++ share/opensvc/moduleset_mariadb.svc.mrm.db.json | 4 ++-- 5 files changed, 18 insertions(+), 7 deletions(-) diff --git a/cluster/cluster_fail.go b/cluster/cluster_fail.go index 63137a121..74dcd424b 100644 --- a/cluster/cluster_fail.go +++ b/cluster/cluster_fail.go @@ -821,6 +821,9 @@ func (cluster *Cluster) electFailoverCandidate(l []*ServerMonitor, forcingLog bo continue } } + if cluster.master == nil { + continue + } ss, errss := sl.GetSlaveStatus(sl.ReplicationSourceName) // not a slave diff --git a/cluster/srv_job.go b/cluster/srv_job.go index d3f14577e..ff06ad7eb 100644 --- a/cluster/srv_job.go +++ b/cluster/srv_job.go @@ -457,7 +457,7 @@ func (server *ServerMonitor) JobMyLoaderParseMeta(dir string) (config.MyDumperMe } func (server *ServerMonitor) JobsCheckRunning() error { - if server.IsDown() || server.ClusterGroup.Conf.MonitorScheduler == false { + if server.IsDown() { return nil } //server.JobInsertTaks("", "", "") @@ -856,7 +856,7 @@ func (server *ServerMonitor) JobRunViaSSH() error { return err } defer client.Close() - out, err2 := client.ScriptFile(server.Datadir + "/init/init/dbjobs_new").SmartOutput() + out, err2 := client.Script(server.Datadir + "/init/init/dbjobs_new").Output() if err2 != nil { server.ClusterGroup.LogPrintf(LvlErr, "JobRunViaSSH %s", err2) return err diff --git a/go.mod b/go.mod index 2b1b029a5..4596a4009 100644 --- a/go.mod +++ b/go.mod @@ -67,7 +67,7 @@ require ( github.com/hashicorp/golang-lru v0.5.1 github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/serf v0.0.0-20180213013805-d4f33d5b6a0b - github.com/helloyi/go-sshclient v0.0.0-20191203124208-f1e205501005 + github.com/helloyi/go-sshclient v1.0.0 github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607 github.com/hpcloud/tail v1.0.0 github.com/hydrogen18/stalecucumber v0.0.0-20161215203336-0a94983f3e27 @@ -123,9 +123,9 @@ require ( github.com/walle/lll v1.0.1 // indirect github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f github.com/xwb1989/sqlparser v0.0.0-20171128062118-da747e0c62c4 - golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 + golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad golang.org/x/net v0.0.0-20200202094626-16171245cfb2 - golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f + golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 golang.org/x/text v0.3.2 google.golang.org/appengine v1.5.0 gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect diff --git a/go.sum b/go.sum index fd701e0e2..e70ec741c 100644 --- a/go.sum +++ b/go.sum @@ -238,6 +238,8 @@ github.com/hashicorp/serf v0.0.0-20180213013805-d4f33d5b6a0b h1:zDlT8SQxogA9IfDm github.com/hashicorp/serf v0.0.0-20180213013805-d4f33d5b6a0b/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= github.com/helloyi/go-sshclient v0.0.0-20191203124208-f1e205501005 h1:Oryd+XkS5Tk+8RB1aqF+r5WPtNi8xVPQ9KYh4lTRhkk= github.com/helloyi/go-sshclient v0.0.0-20191203124208-f1e205501005/go.mod h1:SONC5bXENAC+Y960BqCKzML+BP2/kF1RWeXNdJfysYg= +github.com/helloyi/go-sshclient v1.0.0 h1:pwDDc54wwyMlkwYbhszsX2UB1ajJM296WqLDtNDvcn8= +github.com/helloyi/go-sshclient v1.0.0/go.mod h1:NrhRWsYJDjoQXTDWZ4YtVk84wZ4LK3NSM9jD2TZDAm8= github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607 h1:+7wvV++11s0Okyl1dekihkIiCIYDz+Qk2LvxAShINU4= github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607/go.mod h1:41HzSPxBGeFRQKEEwgh49TRw/nKBsYZ2cF1OzPjSJsA= github.com/hpcloud/tail v0.0.0-20180514194441-a1dbeea552b7 h1:lus8hJKTrh146vNoUWNHv2F1jdtsud5ajNL0/YndJUw= @@ -497,6 +499,8 @@ golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 h1:iMGN4xG0cnqj3t+zOM8wUB0BiPKHEwSxEZCvzcbZuvk= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -544,6 +548,10 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20180208041248-4e4a3210bb54 h1:a5WocgxWTnjG0C4hZblDx+yonFbQMMbv8yJGhHMz/nY= golang.org/x/text v0.0.0-20180208041248-4e4a3210bb54/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/share/opensvc/moduleset_mariadb.svc.mrm.db.json b/share/opensvc/moduleset_mariadb.svc.mrm.db.json index b9c1d092f..4e91b5b11 100644 --- a/share/opensvc/moduleset_mariadb.svc.mrm.db.json +++ b/share/opensvc/moduleset_mariadb.svc.mrm.db.json @@ -2580,8 +2580,8 @@ { "var_author": "admin Manager", "var_class": "file", - "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/init/dbjobs_new\",\"mode\":755,\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"#!/bin/bash\\nUSER=%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%\\nPASSWORD=%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%\\nMYSQL_PORT=%%ENV:SERVER_PORT%%\\nMYSQL_SERVER=%%ENV:SERVER_HOST%%\\nCLUSTER_NAME=%%ENV:SVC_NAMESPACE%%\\nREPLICATION_MANAGER_ADDR=%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_ADDR%%\\nMYSQL_CONF=%%ENV:SVC_CONF_ENV_MYSQL_CONFDIR%%\\nDATADIR=%%ENV:SVC_CONF_ENV_MYSQL_DATADIR%%\\nMYSQL_CLIENT=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysql\\nMYSQL_CHECK=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqlcheck\\nMYSQL_DUMP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqldump\\nSST_RECEIVER_PORT=%%ENV:SVC_CONF_ENV_SST_RECEIVER_PORT%%\\nSOCAT_BIND=%%ENV:SERVER_IP%%\\nMARIADB_BACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mariabackup\\nXTRABACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/xtrabackup\\nINNODBACKUPEX=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/innobackupex\\n\\nERROLOG=$DATADIR/.system/logs/error.log\\nSLOWLOG=$DATADIR/.system/logs/slow-query.log\\nBACKUPDIR=$DATADIR/.system/backup\\n\\nJOBS=( \\\"xtrabackup\\\" \\\"mariabackup\\\" \\\"error\\\" \\\"slowquery\\\" \\\"zfssnapback\\\" \\\"optimize\\\" \\\"reseedxtrabackup\\\" \\\"reseedmariabackup\\\" \\\"reseedmysqldump\\\" \\\"flashbackxtrabackup\\\" \\\"flashbackmariadbackup\\\" \\\"flashbackmysqldump\\\" \\\"stop\\\" \\\"restart\\\" \\\"start\\\")\\n\\nsocatCleaner()\\n{\\n kill -9 $(lsof -t -i:$SST_RECEIVER_PORT -sTCP:LISTEN)\\n}\\n\\ndoneJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set end=NOW(), result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npauseJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"select sleep(6);set sql_log_bin=0;UPDATE replication_manager_schema.jobs set result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npartialRestore()\\n{\\n chown -R mysql:mysql $BACKUPDIR\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;install plugin BLACKHOLE soname 'ha_blackhole.so'\\\"\\n for dir in $(ls -d $BACKUPDIR/*/ | xargs -n 1 basename | grep -vE 'mysql|performance_schema|replication_manager_schema') ; do\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;drop database IF EXISTS $dir; CREATE DATABASE $dir;\\\"\\n\\n\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.exp\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n cat $BACKUPDIR/$dir/$file.frm | sed -e 's/\\\\x06\\\\x00\\\\x49\\\\x6E\\\\x6E\\\\x6F\\\\x44\\\\x42\\\\x00\\\\x00\\\\x00/\\\\x09\\\\x00\\\\x42\\\\x4C\\\\x41\\\\x43\\\\x4B\\\\x48\\\\x4F\\\\x4C\\\\x45/g' > $DATADIR/$dir/mrm_pivo.frm\\n chown mysql:mysql $DATADIR/$dir/mrm_pivo.frm\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.mrm_pivo engine=innodb;RENAME TABLE $dir.mrm_pivo TO $dir.$file; ALTER TABLE $dir.$file DISCARD TABLESPACE;\\\"\\n mv $BACKUPDIR/$dir/$file.ibd $DATADIR/$dir/$file.ibd\\n mv $BACKUPDIR/$dir/$file.exp $DATADIR/$dir/$file.exp\\n mv $BACKUPDIR/$dir/$file.cfg $DATADIR/$dir/$file.cfg\\n mv $BACKUPDIR/$dir/$file.TRG $DATADIR/$dir/$file.TRG\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.$file IMPORT TABLESPACE\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.CSV\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n done\\n for file in $(find $BACKUPDIR/mysql/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/mysql/$file.* $DATADIR/mysql/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE mysql.$file\\\"\\n done\\n cat $BACKUPDIR/xtrabackup_info | grep binlog_pos | awk -F, '{ print $3 }' | sed -e 's/GTID of the last change/set sql_log_bin=0;set global gtid_slave_pos=/g' | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e\\\"flush privileges;start slave;\\\"\\n}\\n\\nfor job in \\\"${JOBS[@]}\\\"\\ndo\\n\\n TASK=($(echo \\\"select concat(id,'@',server,':',port) from replication_manager_schema.jobs WHERE task='$job' and done=0 order by task desc limit 1\\\" | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -N))\\n\\n ADDRESS=($(echo $TASK | awk -F@ '{ print $2 }'))\\n ID=($(echo $TASK | awk -F@ '{ print $1 }'))\\n #purge de past\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set done=1 WHERE done=0 AND task='$job';\\\"\\n\\n if [ \\\"$ADDRESS\\\" == \\\"\\\" ]; then\\n echo \\\"No $job needed\\\"\\n case \\\"$job\\\" in \\n start)\\n if [ \\\"curl -so /dev/null -w '%{response_code}' http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/need-start\\\" == \\\"200\\\" ]; then\\n curl http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/config|tar xzvf etc/* - -C $CONFDIR/../..\\n systemctl start mysql \\n fi\\n ;;\\n esac\\n else\\n echo \\\"Processing $job\\\"\\n case \\\"$job\\\" in\\n reseedmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"reset master;set sql_log_bin=0;\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n flashbackmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"set sql_log_bin=0\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n reseedxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n reseedmariabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | mbstream -x -C $BACKUPDIR\\n # mbstream -p, --parallel\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner \\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackmariadbackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n xtrabackup)\\n cd /docker-entrypoint-initdb.d\\n $INNODBACKUPEX --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n mariabackup)\\n cd /docker-entrypoint-initdb.d\\n $MARIADB_BACKUP --innobackupex --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n error)\\n cat $ERROLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $ERROLOG\\n ;;\\n slowquery)\\n cat $SLOWLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $SLOWLOG\\n ;;\\n zfssnapback)\\n LASTSNAP=`zfs list -r -t all |grep zp%%ENV:SERVICES_SVCNAME%%_pod01 | grep daily | sort -r | head -n 1 | cut -d\\\" \\\" -f1`\\n %%ENV:SERVICES_SVCNAME%% stop\\n zfs rollback $LASTSNAP\\n %%ENV:SERVICES_SVCNAME%% start\\n ;;\\n optimize)\\n $MYSQL_CHECK -o --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --all-databases --skip-write-binlog &>/tmp/dbjob.out\\n ;;\\n restart)\\n systemctl restart mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n stop)\\n systemctl stop mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n esac\\n doneJob\\n fi\\n\\ndone\\n\"}", - "var_updated": "2021-02-06 09:19:15", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/init/dbjobs_new\",\"mode\":755,\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"#!/bin/bash\\nset -x\\nUSER=%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%\\nPASSWORD=%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%\\nMYSQL_PORT=%%ENV:SERVER_PORT%%\\nMYSQL_SERVER=%%ENV:SERVER_HOST%%\\nCLUSTER_NAME=%%ENV:SVC_NAMESPACE%%\\nREPLICATION_MANAGER_ADDR=%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_ADDR%%\\nMYSQL_CONF=%%ENV:SVC_CONF_ENV_MYSQL_CONFDIR%%\\nDATADIR=%%ENV:SVC_CONF_ENV_MYSQL_DATADIR%%\\nMYSQL_CLIENT=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysql\\nMYSQL_CHECK=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqlcheck\\nMYSQL_DUMP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqldump\\nSST_RECEIVER_PORT=%%ENV:SVC_CONF_ENV_SST_RECEIVER_PORT%%\\nSOCAT_BIND=%%ENV:SERVER_IP%%\\nMARIADB_BACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mariabackup\\nXTRABACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/xtrabackup\\nINNODBACKUPEX=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/innobackupex\\n\\nERROLOG=$DATADIR/.system/logs/error.log\\nSLOWLOG=$DATADIR/.system/logs/slow-query.log\\nBACKUPDIR=$DATADIR/.system/backup\\n\\nJOBS=( \\\"xtrabackup\\\" \\\"mariabackup\\\" \\\"error\\\" \\\"slowquery\\\" \\\"zfssnapback\\\" \\\"optimize\\\" \\\"reseedxtrabackup\\\" \\\"reseedmariabackup\\\" \\\"reseedmysqldump\\\" \\\"flashbackxtrabackup\\\" \\\"flashbackmariadbackup\\\" \\\"flashbackmysqldump\\\" \\\"stop\\\" \\\"restart\\\" \\\"start\\\")\\n\\nsocatCleaner()\\n{\\n kill -9 $(lsof -t -i:$SST_RECEIVER_PORT -sTCP:LISTEN)\\n}\\n\\ndoneJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set end=NOW(), result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npauseJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"select sleep(20);set sql_log_bin=0;UPDATE replication_manager_schema.jobs set set done=1,result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npartialRestore()\\n{\\n chown -R mysql:mysql $BACKUPDIR\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;install plugin BLACKHOLE soname 'ha_blackhole.so'\\\"\\n for dir in $(ls -d $BACKUPDIR/*/ | xargs -n 1 basename | grep -vE 'mysql|performance_schema|replication_manager_schema') ; do\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;drop database IF EXISTS $dir; CREATE DATABASE $dir;\\\"\\n\\n\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.exp\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n cat $BACKUPDIR/$dir/$file.frm | sed -e 's/\\\\x06\\\\x00\\\\x49\\\\x6E\\\\x6E\\\\x6F\\\\x44\\\\x42\\\\x00\\\\x00\\\\x00/\\\\x09\\\\x00\\\\x42\\\\x4C\\\\x41\\\\x43\\\\x4B\\\\x48\\\\x4F\\\\x4C\\\\x45/g' > $DATADIR/$dir/mrm_pivo.frm\\n chown mysql:mysql $DATADIR/$dir/mrm_pivo.frm\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.mrm_pivo engine=innodb;RENAME TABLE $dir.mrm_pivo TO $dir.$file; ALTER TABLE $dir.$file DISCARD TABLESPACE;\\\"\\n mv $BACKUPDIR/$dir/$file.ibd $DATADIR/$dir/$file.ibd\\n mv $BACKUPDIR/$dir/$file.exp $DATADIR/$dir/$file.exp\\n mv $BACKUPDIR/$dir/$file.cfg $DATADIR/$dir/$file.cfg\\n mv $BACKUPDIR/$dir/$file.TRG $DATADIR/$dir/$file.TRG\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.$file IMPORT TABLESPACE\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.CSV\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n done\\n for file in $(find $BACKUPDIR/mysql/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/mysql/$file.* $DATADIR/mysql/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE mysql.$file\\\"\\n done\\n cat $BACKUPDIR/xtrabackup_info | grep binlog_pos | awk -F, '{ print $3 }' | sed -e 's/GTID of the last change/set sql_log_bin=0;set global gtid_slave_pos=/g' | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e\\\"flush privileges;start slave;\\\"\\n}\\n\\nfor job in \\\"${JOBS[@]}\\\"\\ndo\\n\\n TASK=($(echo \\\"select concat(id,'@',server,':',port) from replication_manager_schema.jobs WHERE task='$job' and done=0 order by task desc limit 1\\\" | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -N))\\n\\n ADDRESS=($(echo $TASK | awk -F@ '{ print $2 }'))\\n ID=($(echo $TASK | awk -F@ '{ print $1 }'))\\n #purge de past\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set done=1 WHERE done=0 AND task='$job' AND ID<>$ID;\\\"\\n\\n if [ \\\"$ADDRESS\\\" == \\\"\\\" ]; then\\n echo \\\"No $job needed\\\"\\n case \\\"$job\\\" in \\n start)\\n if [ \\\"curl -so /dev/null -w '%{response_code}' http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/need-start\\\" == \\\"200\\\" ]; then\\n curl http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/config|tar xzvf etc/* - -C $CONFDIR/../..\\n systemctl start mysql \\n fi\\n ;;\\n esac\\n else\\n echo \\\"Processing $job\\\"\\n case \\\"$job\\\" in\\n reseedmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n time socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"reset master;set sql_log_bin=0;\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n flashbackmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"set sql_log_bin=0\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n reseedxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n reseedmariabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | mbstream -x -C $BACKUPDIR\\n # mbstream -p, --parallel\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner \\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackmariadbackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n xtrabackup)\\n cd /docker-entrypoint-initdb.d\\n $INNODBACKUPEX --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n mariabackup)\\n cd /docker-entrypoint-initdb.d\\n $MARIADB_BACKUP --innobackupex --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n error)\\n cat $ERROLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $ERROLOG\\n ;;\\n slowquery)\\n cat $SLOWLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $SLOWLOG\\n ;;\\n zfssnapback)\\n LASTSNAP=`zfs list -r -t all |grep zp%%ENV:SERVICES_SVCNAME%%_pod01 | grep daily | sort -r | head -n 1 | cut -d\\\" \\\" -f1`\\n %%ENV:SERVICES_SVCNAME%% stop\\n zfs rollback $LASTSNAP\\n %%ENV:SERVICES_SVCNAME%% start\\n ;;\\n optimize)\\n $MYSQL_CHECK -o --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --all-databases --skip-write-binlog &>/tmp/dbjob.out\\n ;;\\n restart)\\n systemctl restart mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n stop)\\n systemctl stop mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n esac\\n doneJob\\n fi\\n\\ndone\\n\"}", + "var_updated": "2021-06-28 10:01:07", "var_name": "db_cnf_script_dbjobs_new", "id": 6270 } From b4d185fd28078a7daa0f07ce4c916bed2264b07f Mon Sep 17 00:00:00 2001 From: svaroqui Date: Mon, 28 Jun 2021 13:40:44 +0200 Subject: [PATCH 67/84] Move DelWaitStartCookie, before refresh in case of refresh error --- cluster/srv.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cluster/srv.go b/cluster/srv.go index 8253484f8..60e5eef96 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -400,6 +400,12 @@ func (server *ServerMonitor) Ping(wg *sync.WaitGroup) { server.ClusterGroup.LogPrintf(LvlDbg, "Inside failover, skiping refresh") return } + // For orchestrator to trigger a start via tracking state URL + if server.PrevState == stateFailed { + server.DelWaitStartCookie() + server.DelRestartCookie() + } + // reaffect a global DB pool object if we never get it , ex dynamic seeding if server.Conn == nil { server.Conn = conn @@ -415,11 +421,6 @@ func (server *ServerMonitor) Ping(wg *sync.WaitGroup) { } defer conn.Close() - // For orchestrator to trigger a start via tracking state URL - if server.PrevState == stateFailed { - server.DelWaitStartCookie() - server.DelRestartCookie() - } // Reset FailCount if (server.State != stateFailed && server.State != stateErrorAuth && server.State != stateSuspect) && (server.FailCount > 0) /*&& (((server.ClusterGroup.sme.GetHeartbeats() - server.FailSuspectHeartbeat) * server.ClusterGroup.Conf.MonitoringTicker) > server.ClusterGroup.Conf.FailResetTime)*/ { server.FailCount = 0 From f06449626849af79428c494bf2c171dacb98d034 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Mon, 28 Jun 2021 14:39:24 +0200 Subject: [PATCH 68/84] Fix broken Cookies delete --- cluster/srv_del.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/srv_del.go b/cluster/srv_del.go index 08e979d49..602a999a4 100644 --- a/cluster/srv_del.go +++ b/cluster/srv_del.go @@ -14,7 +14,7 @@ import ( ) func (server *ServerMonitor) delCookie(key string) error { - err := os.Remove(server.Datadir + "/@/" + key) + err := os.Remove(server.Datadir + "/@" + key) if err != nil { server.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie (%s) %s", key, err) } From 393a2e753d440d6ca935f450c22b30590d6c323d Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 30 Jun 2021 19:18:07 +0200 Subject: [PATCH 69/84] Add cookie_waitbackup on ressed or flashback if cookie_physicalbackup or cookie_logicalbackup --- cluster/srv_job.go | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/cluster/srv_job.go b/cluster/srv_job.go index ff06ad7eb..e88f1927e 100644 --- a/cluster/srv_job.go +++ b/cluster/srv_job.go @@ -122,7 +122,10 @@ func (server *ServerMonitor) JobBackupPhysical() (int64, error) { } func (server *ServerMonitor) JobReseedPhysicalBackup() (int64, error) { - + if server.ClusterGroup.master != nil && !server.ClusterGroup.GetBackupServer().HasBackupPhysicalCookie() { + server.createCookie("cookie_waitbackup") + return 0, errors.New("No Physical Backup") + } jobid, err := server.JobInsertTaks("reseed"+server.ClusterGroup.Conf.BackupPhysicalType, server.SSTPort, server.ClusterGroup.Conf.MonitorAddress) if err != nil { @@ -153,6 +156,10 @@ func (server *ServerMonitor) JobReseedPhysicalBackup() (int64, error) { } func (server *ServerMonitor) JobFlashbackPhysicalBackup() (int64, error) { + if server.ClusterGroup.master != nil && !server.ClusterGroup.GetBackupServer().HasBackupPhysicalCookie() { + server.createCookie("cookie_waitbackup") + return 0, errors.New("No Physical Backup") + } jobid, err := server.JobInsertTaks("flashback"+server.ClusterGroup.Conf.BackupPhysicalType, server.SSTPort, server.ClusterGroup.Conf.MonitorAddress) @@ -186,6 +193,12 @@ func (server *ServerMonitor) JobFlashbackPhysicalBackup() (int64, error) { } func (server *ServerMonitor) JobReseedLogicalBackup() (int64, error) { + + if server.ClusterGroup.master != nil && !server.ClusterGroup.GetBackupServer().HasBackupLogicalCookie() { + server.createCookie("cookie_waitbackup") + return 0, errors.New("No Logical Backup") + } + jobid, err := server.JobInsertTaks("reseed"+server.ClusterGroup.Conf.BackupLogicalType, server.SSTPort, server.ClusterGroup.Conf.MonitorAddress) if err != nil { @@ -237,6 +250,10 @@ func (server *ServerMonitor) JobServerRestart() (int64, error) { } func (server *ServerMonitor) JobFlashbackLogicalBackup() (int64, error) { + if server.ClusterGroup.master != nil && !server.ClusterGroup.GetBackupServer().HasBackupLogicalCookie() { + server.createCookie("cookie_waitbackup") + return 0, errors.New("No Logical Backup") + } jobid, err := server.JobInsertTaks("flashback"+server.ClusterGroup.Conf.BackupLogicalType, server.SSTPort, server.ClusterGroup.Conf.MonitorAddress) if err != nil { server.ClusterGroup.LogPrintf(LvlErr, "Receive reseed logical backup %s request for server: %s %s", server.ClusterGroup.Conf.BackupPhysicalType, server.URL, err) From f7e9dbaf02d6e14b20ae099b5c7a84374353ea7e Mon Sep 17 00:00:00 2001 From: svaroqui Date: Sat, 3 Jul 2021 11:13:47 +0200 Subject: [PATCH 70/84] API refine error messages for get config API failed to download proxy config slit GetDatadir and GetConfigDatadir --- cluster/prx.go | 2 +- cluster/prx_get.go | 12 ++++++++++-- server/api_database.go | 7 ++++--- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/cluster/prx.go b/cluster/prx.go index 8cd930bbc..52a7d886f 100644 --- a/cluster/prx.go +++ b/cluster/prx.go @@ -111,9 +111,9 @@ type DatabaseProxy interface { GetUseSSL() string GetUseCompression() string GetDatadir() string + GetConfigDatadir() string GetEnv() map[string]string GetConfigProxyModule(variable string) string - SendStats() error OpenSVCGetProxyDefaultSection() map[string]string diff --git a/cluster/prx_get.go b/cluster/prx_get.go index f5105ba98..b6e49c848 100644 --- a/cluster/prx_get.go +++ b/cluster/prx_get.go @@ -205,13 +205,21 @@ func (proxy *Proxy) GetUseCompression() string { } -func (proxy *Proxy) GetDatadir() string { +func (proxy *Proxy) GetConfigDatadir() string { if proxy.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { return proxy.SlapOSDatadir } return "/tmp" } +func (proxy *Proxy) GetDatadir() string { + return proxy.Datadir +} + +func (proxy *Proxy) GetName() string { + return proxy.Name +} + func (proxy *ProxySQLProxy) GetEnv() map[string]string { env := proxy.GetBaseEnv() env["%%ENV:SVC_CONF_ENV_PROXYSQL_READ_ON_MASTER%%"] = proxy.ProxySQLReadOnMaster() @@ -261,7 +269,7 @@ func (proxy *Proxy) GetBaseEnv() map[string]string { "%%ENV:SVC_CONF_ENV_VIP_PORT%%": proxy.ClusterGroup.Conf.ProvProxRoutePort, "%%ENV:SVC_CONF_ENV_MRM_API_ADDR%%": proxy.ClusterGroup.Conf.MonitorAddress + ":" + proxy.ClusterGroup.Conf.HttpPort, "%%ENV:SVC_CONF_ENV_MRM_CLUSTER_NAME%%": proxy.ClusterGroup.GetClusterName(), - "%%ENV:SVC_CONF_ENV_DATADIR%%": proxy.GetDatadir(), + "%%ENV:SVC_CONF_ENV_DATADIR%%": proxy.GetConfigDatadir(), } } diff --git a/server/api_database.go b/server/api_database.go index d4b572dd6..0017ed9eb 100644 --- a/server/api_database.go +++ b/server/api_database.go @@ -1353,7 +1353,7 @@ func (repman *ReplicationManager) handlerMuxServersPortConfig(w http.ResponseWri vars := mux.Vars(r) mycluster := repman.getClusterByName(vars["clusterName"]) if mycluster != nil { - if repman.Conf.APISecureConfig { + if mycluster.Conf.APISecureConfig { if !repman.IsValidClusterACL(r, mycluster) { http.Error(w, "No valid ACL", 403) return @@ -1367,7 +1367,7 @@ func (repman *ReplicationManager) handlerMuxServersPortConfig(w http.ResponseWri if err != nil { r.URL.Path = r.URL.Path + ".tar.gz" w.WriteHeader(404) - w.Write([]byte("404 Something went wrong - " + http.StatusText(404))) + w.Write([]byte("404 Something went wrong reading : " + string(node.Datadir+"/config.tar.gz") + " " + err.Error() + " - " + http.StatusText(404))) return } w.Write(data) @@ -1378,7 +1378,8 @@ func (repman *ReplicationManager) handlerMuxServersPortConfig(w http.ResponseWri if err != nil { r.URL.Path = r.URL.Path + ".tar.gz" w.WriteHeader(404) - w.Write([]byte("404 Something went wrong - " + http.StatusText(404))) + w.Write([]byte("404 Something went wrong reading : " + string(proxy.GetDatadir()+"/config.tar.gz") + " " + err.Error() + " - " + http.StatusText(404))) + return } w.Write(data) From 163a4c9be8f8ed02cad7830b6d76c8e45fffd3d4 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Wed, 7 Jul 2021 12:53:09 +0200 Subject: [PATCH 71/84] Config fixing proxysql can have empty monitor_writer_is_also_reader --- cluster/cluster.go | 3 ++ cluster/cluster_has.go | 16 ++++++ cluster/error.go | 1 + cluster/prov_opensvc_proxysql.go | 7 --- cluster/prov_opensvc_prx.go | 4 -- cluster/prx_get.go | 85 +++++++++++++++++--------------- 6 files changed, 66 insertions(+), 50 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 86bf1e786..1f1e88148 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -62,6 +62,7 @@ type Cluster struct { IsNeedDatabasesRollingRestart bool `json:"isNeedDatabasesRollingRestart"` IsNeedDatabasesRollingReprov bool `json:"isNeedDatabasesRollingReprov"` IsNeedDatabasesReprov bool `json:"isNeedDatabasesReprov"` + IsValidBackup bool `json:"isValidBackup"` IsNotMonitoring bool `json:"isNotMonitoring"` IsCapturing bool `json:"isCapturing"` Conf config.Config `json:"config"` @@ -436,12 +437,14 @@ func (cluster *Cluster) Run() { cluster.MonitorQueryRules() cluster.MonitorVariablesDiff() cluster.ResticFetchRepo() + cluster.IsValidBackup = cluster.HasValidBackup() } else { cluster.sme.PreserveState("WARN0093") cluster.sme.PreserveState("WARN0084") cluster.sme.PreserveState("WARN0095") cluster.sme.PreserveState("ERR00082") + cluster.sme.PreserveState("WARN0101") } if cluster.sme.GetHeartbeats()%36000 == 0 { cluster.ResticPurgeRepo() diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index b88bb1cf6..94c658b03 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -7,9 +7,11 @@ package cluster import ( + "fmt" "strings" "github.com/signal18/replication-manager/config" + "github.com/signal18/replication-manager/utils/state" ) func (cluster *Cluster) HasServer(srv *ServerMonitor) bool { @@ -23,6 +25,20 @@ func (cluster *Cluster) HasServer(srv *ServerMonitor) bool { return false } +func (cluster *Cluster) HasValidBackup() bool { + if cluster.Conf.MonitorScheduler && (cluster.Conf.SchedulerBackupLogical || cluster.Conf.SchedulerBackupPhysical) { + sv := cluster.GetBackupServer() + if sv != nil { + if sv.HasBackupLogicalCookie() || sv.HasBackupPhysicalCookie() { + return true + } + } + } + cluster.SetState("WARN0101", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0101"]), ErrFrom: "TOPO"}) + return false + +} + func (cluster *Cluster) HasSchedulerEntry(myname string) bool { if _, ok := cluster.Schedule[myname]; ok { return true diff --git a/cluster/error.go b/cluster/error.go index a321c785a..b8d20e321 100644 --- a/cluster/error.go +++ b/cluster/error.go @@ -149,4 +149,5 @@ var clusterError = map[string]string{ "WARN0098": "ProxySQL could not load global variables from runtime (%s)", "WARN0099": "MariaDB version as replication issue https://jira.mariadb.org/browse/MDEV-20821", "WARN0100": "No space left on device pn %s", + "WARN0101": "Cluster does not have backup", } diff --git a/cluster/prov_opensvc_proxysql.go b/cluster/prov_opensvc_proxysql.go index decae7e67..120818531 100644 --- a/cluster/prov_opensvc_proxysql.go +++ b/cluster/prov_opensvc_proxysql.go @@ -13,13 +13,6 @@ import ( "github.com/signal18/replication-manager/opensvc" ) -func (proxy *ProxySQLProxy) ProxySQLReadOnMaster() string { - if proxy.IsFilterInTags("proxy.route.readonmaster") { - return "1" - } - return "0" -} - func (cluster *Cluster) OpenSVCGetProxysqlContainerSection(server *ProxySQLProxy) map[string]string { svccontainer := make(map[string]string) if server.ClusterGroup.Conf.ProvProxType == "docker" || server.ClusterGroup.Conf.ProvProxType == "podman" || server.ClusterGroup.Conf.ProvProxType == "oci" { diff --git a/cluster/prov_opensvc_prx.go b/cluster/prov_opensvc_prx.go index 8f3f686ca..5d1edc28a 100644 --- a/cluster/prov_opensvc_prx.go +++ b/cluster/prov_opensvc_prx.go @@ -508,10 +508,6 @@ mrm_api_addr = ` + cluster.Conf.MonitorAddress + ":" + cluster.Conf.HttpPort + ` mrm_cluster_name = ` + cluster.GetClusterName() + ` ` - if pr, ok := prx.(*ProxySQLProxy); ok { - conf += `proxysql_read_on_master = ` + pr.ProxySQLReadOnMaster() - } - return conf } diff --git a/cluster/prx_get.go b/cluster/prx_get.go index b6e49c848..c6954f3e1 100644 --- a/cluster/prx_get.go +++ b/cluster/prx_get.go @@ -222,7 +222,6 @@ func (proxy *Proxy) GetName() string { func (proxy *ProxySQLProxy) GetEnv() map[string]string { env := proxy.GetBaseEnv() - env["%%ENV:SVC_CONF_ENV_PROXYSQL_READ_ON_MASTER%%"] = proxy.ProxySQLReadOnMaster() return env } @@ -232,47 +231,55 @@ func (proxy *Proxy) GetEnv() map[string]string { func (proxy *Proxy) GetBaseEnv() map[string]string { return map[string]string{ - "%%ENV:NODES_CPU_CORES%%": proxy.ClusterGroup.Conf.ProvCores, - "%%ENV:SVC_CONF_ENV_MAX_CORES%%": proxy.ClusterGroup.Conf.ProvCores, - "%%ENV:SVC_CONF_ENV_CRC32_ID%%": string(proxy.Id[2:10]), - "%%ENV:SVC_CONF_ENV_SERVER_ID%%": string(proxy.Id[2:10]), - "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%": proxy.ClusterGroup.dbPass, - "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%": proxy.ClusterGroup.dbUser, - "%%ENV:SERVER_IP%%": proxy.GetBindAddress(), - "%%ENV:EXTRA_BIND_SERVER_IPV6%%": proxy.GetBindAddressExtraIPV6(), - "%%ENV:SVC_CONF_ENV_PROXY_USE_SSL%%": proxy.GetUseSSL(), - "%%ENV:SVC_CONF_ENV_PROXY_USE_COMPRESS%%": proxy.GetUseCompression(), - "%%ENV:SERVER_PORT%%": proxy.Port, - "%%ENV:SVC_NAMESPACE%%": proxy.ClusterGroup.Name, - "%%ENV:SVC_NAME%%": proxy.Name, - "%%ENV:SERVERS_HAPROXY_WRITE%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_HAPROXY_WRITE%%"), - "%%ENV:SERVERS_HAPROXY_READ%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_HAPROXY_READ%%"), - "%%ENV:SERVERS_HAPROXY_WRITE_BACKEND%%": proxy.ClusterGroup.Conf.HaproxyAPIWriteBackend, - "%%ENV:SERVERS_HAPROXY_READ_BACKEND%%": proxy.ClusterGroup.Conf.HaproxyAPIReadBackend, - "%%ENV:SERVERS_PROXYSQL%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_PROXYSQL%%"), - "%%ENV:SERVERS%%": proxy.GetConfigProxyModule("%%ENV:SERVERS%%"), - "%%ENV:SERVERS_LIST%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_LIST%%"), - "%%ENV:SVC_CONF_ENV_PORT_HTTP%%": "80", - "%%ENV:SVC_CONF_ENV_PORT_R_LB%%": strconv.Itoa(proxy.ReadPort), - "%%ENV:SVC_CONF_ENV_PORT_RW%%": strconv.Itoa(proxy.WritePort), - "%%ENV:SVC_CONF_ENV_MAXSCALE_MAXINFO_PORT%%": strconv.Itoa(proxy.ClusterGroup.Conf.MxsMaxinfoPort), - "%%ENV:SVC_CONF_ENV_PORT_RW_SPLIT%%": strconv.Itoa(proxy.ReadWritePort), - "%%ENV:SVC_CONF_ENV_PORT_BINLOG%%": strconv.Itoa(proxy.ClusterGroup.Conf.MxsBinlogPort), - "%%ENV:SVC_CONF_ENV_PORT_TELNET%%": proxy.Port, - "%%ENV:SVC_CONF_ENV_PORT_ADMIN%%": proxy.Port, - "%%ENV:SVC_CONF_ENV_USER_ADMIN%%": proxy.User, - "%%ENV:SVC_CONF_ENV_PASSWORD_ADMIN%%": proxy.Pass, - "%%ENV:SVC_CONF_ENV_SPHINX_MEM%%": proxy.ClusterGroup.Conf.ProvSphinxMem, - "%%ENV:SVC_CONF_ENV_SPHINX_MAX_CHILDREN%%": proxy.ClusterGroup.Conf.ProvSphinxMaxChildren, - "%%ENV:SVC_CONF_ENV_VIP_ADDR%%": proxy.ClusterGroup.Conf.ProvProxRouteAddr, - "%%ENV:SVC_CONF_ENV_VIP_NETMASK%%": proxy.ClusterGroup.Conf.ProvProxRouteMask, - "%%ENV:SVC_CONF_ENV_VIP_PORT%%": proxy.ClusterGroup.Conf.ProvProxRoutePort, - "%%ENV:SVC_CONF_ENV_MRM_API_ADDR%%": proxy.ClusterGroup.Conf.MonitorAddress + ":" + proxy.ClusterGroup.Conf.HttpPort, - "%%ENV:SVC_CONF_ENV_MRM_CLUSTER_NAME%%": proxy.ClusterGroup.GetClusterName(), - "%%ENV:SVC_CONF_ENV_DATADIR%%": proxy.GetConfigDatadir(), + "%%ENV:NODES_CPU_CORES%%": proxy.ClusterGroup.Conf.ProvCores, + "%%ENV:SVC_CONF_ENV_MAX_CORES%%": proxy.ClusterGroup.Conf.ProvCores, + "%%ENV:SVC_CONF_ENV_CRC32_ID%%": string(proxy.Id[2:10]), + "%%ENV:SVC_CONF_ENV_SERVER_ID%%": string(proxy.Id[2:10]), + "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%": proxy.ClusterGroup.dbPass, + "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%": proxy.ClusterGroup.dbUser, + "%%ENV:SERVER_IP%%": proxy.GetBindAddress(), + "%%ENV:EXTRA_BIND_SERVER_IPV6%%": proxy.GetBindAddressExtraIPV6(), + "%%ENV:SVC_CONF_ENV_PROXY_USE_SSL%%": proxy.GetUseSSL(), + "%%ENV:SVC_CONF_ENV_PROXY_USE_COMPRESS%%": proxy.GetUseCompression(), + "%%ENV:SERVER_PORT%%": proxy.Port, + "%%ENV:SVC_NAMESPACE%%": proxy.ClusterGroup.Name, + "%%ENV:SVC_NAME%%": proxy.Name, + "%%ENV:SERVERS_HAPROXY_WRITE%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_HAPROXY_WRITE%%"), + "%%ENV:SERVERS_HAPROXY_READ%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_HAPROXY_READ%%"), + "%%ENV:SERVERS_HAPROXY_WRITE_BACKEND%%": proxy.ClusterGroup.Conf.HaproxyAPIWriteBackend, + "%%ENV:SERVERS_HAPROXY_READ_BACKEND%%": proxy.ClusterGroup.Conf.HaproxyAPIReadBackend, + "%%ENV:SERVERS_PROXYSQL%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_PROXYSQL%%"), + "%%ENV:SERVERS%%": proxy.GetConfigProxyModule("%%ENV:SERVERS%%"), + "%%ENV:SERVERS_LIST%%": proxy.GetConfigProxyModule("%%ENV:SERVERS_LIST%%"), + "%%ENV:SVC_CONF_ENV_PORT_HTTP%%": "80", + "%%ENV:SVC_CONF_ENV_PORT_R_LB%%": strconv.Itoa(proxy.ReadPort), + "%%ENV:SVC_CONF_ENV_PORT_RW%%": strconv.Itoa(proxy.WritePort), + "%%ENV:SVC_CONF_ENV_MAXSCALE_MAXINFO_PORT%%": strconv.Itoa(proxy.ClusterGroup.Conf.MxsMaxinfoPort), + "%%ENV:SVC_CONF_ENV_PORT_RW_SPLIT%%": strconv.Itoa(proxy.ReadWritePort), + "%%ENV:SVC_CONF_ENV_PORT_BINLOG%%": strconv.Itoa(proxy.ClusterGroup.Conf.MxsBinlogPort), + "%%ENV:SVC_CONF_ENV_PORT_TELNET%%": proxy.Port, + "%%ENV:SVC_CONF_ENV_PORT_ADMIN%%": proxy.Port, + "%%ENV:SVC_CONF_ENV_USER_ADMIN%%": proxy.User, + "%%ENV:SVC_CONF_ENV_PASSWORD_ADMIN%%": proxy.Pass, + "%%ENV:SVC_CONF_ENV_SPHINX_MEM%%": proxy.ClusterGroup.Conf.ProvSphinxMem, + "%%ENV:SVC_CONF_ENV_SPHINX_MAX_CHILDREN%%": proxy.ClusterGroup.Conf.ProvSphinxMaxChildren, + "%%ENV:SVC_CONF_ENV_VIP_ADDR%%": proxy.ClusterGroup.Conf.ProvProxRouteAddr, + "%%ENV:SVC_CONF_ENV_VIP_NETMASK%%": proxy.ClusterGroup.Conf.ProvProxRouteMask, + "%%ENV:SVC_CONF_ENV_VIP_PORT%%": proxy.ClusterGroup.Conf.ProvProxRoutePort, + "%%ENV:SVC_CONF_ENV_MRM_API_ADDR%%": proxy.ClusterGroup.Conf.MonitorAddress + ":" + proxy.ClusterGroup.Conf.HttpPort, + "%%ENV:SVC_CONF_ENV_MRM_CLUSTER_NAME%%": proxy.ClusterGroup.GetClusterName(), + "%%ENV:SVC_CONF_ENV_DATADIR%%": proxy.GetConfigDatadir(), + "%%ENV:SVC_CONF_ENV_PROXYSQL_READ_ON_MASTER%%": proxy.GetConfigProxySQLReadOnMaster(), } } +func (proxy *Proxy) GetConfigProxySQLReadOnMaster() string { + if proxy.IsFilterInTags("proxy.route.readonmaster") { + return "1" + } + return "0" +} + func (proxy *Proxy) GetConfigProxyModule(variable string) string { confmaxscale := "" confmaxscaleserverlist := "" From 3904f87cc63c529301275015b48e9bbb9c5436a8 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Thu, 8 Jul 2021 09:51:56 +0200 Subject: [PATCH 72/84] Reseed on closing WARN0101 --- cluster/cluster.go | 11 +++++++++++ cluster/cluster_has.go | 12 ++++++------ cluster/srv_rejoin.go | 1 + 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 1f1e88148..cc3d811ce 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -521,8 +521,19 @@ func (cluster *Cluster) StateProcessing() { go cluster.SSTRunSender(servertoreseed.GetMyBackupDirectory()+"mysqldump.sql.gz", servertoreseed) } } + if s.ErrKey == "WARN0101" { + cluster.LogPrintf(LvlInfo, "Cluster have backup") + for _, srv := range cluster.Servers { + if srv.HasWaitBackupCookie() { + cluster.LogPrintf(LvlInfo, "Server %s was waiting for backup", srv.URL) + go srv.ReseedMasterSST() + } + } + + } // cluster.statecloseChan <- s } + states := cluster.sme.GetStates() for i := range states { cluster.LogPrintf("STATE", states[i]) diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index 94c658b03..8b8eedef7 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -26,14 +26,14 @@ func (cluster *Cluster) HasServer(srv *ServerMonitor) bool { } func (cluster *Cluster) HasValidBackup() bool { - if cluster.Conf.MonitorScheduler && (cluster.Conf.SchedulerBackupLogical || cluster.Conf.SchedulerBackupPhysical) { - sv := cluster.GetBackupServer() - if sv != nil { - if sv.HasBackupLogicalCookie() || sv.HasBackupPhysicalCookie() { - return true - } + // if cluster.Conf.MonitorScheduler && (cluster.Conf.SchedulerBackupLogical || cluster.Conf.SchedulerBackupPhysical) { + sv := cluster.GetBackupServer() + if sv != nil { + if sv.HasBackupLogicalCookie() || sv.HasBackupPhysicalCookie() { + return true } } + // } cluster.SetState("WARN0101", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0101"]), ErrFrom: "TOPO"}) return false diff --git a/cluster/srv_rejoin.go b/cluster/srv_rejoin.go index 2ad48e9ea..72eb98c7b 100644 --- a/cluster/srv_rejoin.go +++ b/cluster/srv_rejoin.go @@ -150,6 +150,7 @@ func (server *ServerMonitor) RejoinMasterSST() error { } func (server *ServerMonitor) ReseedMasterSST() error { + server.DelWaitBackupCookie() if server.ClusterGroup.Conf.AutorejoinMysqldump == true { server.ClusterGroup.LogPrintf("INFO", "Rejoin dump restore %s", server.URL) err := server.RejoinDirectDump() From c6e9f9c715375397efef3babee1c3b0eb4e515f2 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Thu, 8 Jul 2021 10:13:59 +0200 Subject: [PATCH 73/84] Schema Monitor can crash when master discoverd from slave and master has wrong monitor user #359 --- cluster/cluster.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 86bf1e786..767be2ee9 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -796,7 +796,10 @@ func (cluster *Cluster) MonitorSchema() { if cluster.master == nil { return } - if cluster.master.State == stateFailed || cluster.master.State == stateMaintenance { + if cluster.master.State == stateFailed || cluster.master.State == stateMaintenance || cluster.master.State == stateUnconn { + return + } + if cluster.master.Conn == nil { return } cluster.sme.SetMonitorSchemaState() From 003c3b9d4538b267959662366610e7406df97040 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Thu, 8 Jul 2021 09:55:35 +0200 Subject: [PATCH 74/84] Wrong warning missing GtidStrictMode in case not MariaDB #360 --- cluster/srv_chk.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/srv_chk.go b/cluster/srv_chk.go index dc566997a..c1710cc3e 100644 --- a/cluster/srv_chk.go +++ b/cluster/srv_chk.go @@ -274,7 +274,7 @@ func (server *ServerMonitor) CheckMasterSettings() { if server.HaveBinlogSlaveUpdates == false { server.ClusterGroup.sme.AddState("WARN0069", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0069"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) } - if server.HaveGtidStrictMode == false { + if server.HaveGtidStrictMode == false && server.DBVersion.Flavor == "MariaDB" { server.ClusterGroup.sme.AddState("WARN0070", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0070"], server.URL), ErrFrom: "TOPO", ServerUrl: server.URL}) } if server.IsAcid() == false && server.ClusterGroup.IsDiscovered() { From 142fa61386de5876b5020bd7d4cfbdd3b5c4de98 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Thu, 8 Jul 2021 16:51:04 +0200 Subject: [PATCH 75/84] Print server infos in bad user grants --- cluster/error.go | 8 ++++---- cluster/srv_chk.go | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cluster/error.go b/cluster/error.go index a321c785a..66e0ef6bf 100644 --- a/cluster/error.go +++ b/cluster/error.go @@ -13,10 +13,10 @@ var clusterError = map[string]string{ "ERR00002": "Waiting for a user manual failover", "ERR00004": "Database %s access denied: %s", "ERR00005": "Could not get privileges for user %s@%s: %s", - "ERR00006": "User must have REPLICATION CLIENT privilege", - "ERR00007": "User must have REPLICATION SLAVE privilege", - "ERR00008": "User must have SUPER privilege", - "ERR00009": "User must have RELOAD privilege", + "ERR00006": "User must have REPLICATION CLIENT privilege on %s", + "ERR00007": "User must have REPLICATION SLAVE privilege on %s", + "ERR00008": "User must have SUPER privilege %s", + "ERR00009": "User must have RELOAD privilege %s", "ERR00010": "Could not find a slave in topology", "ERR00011": "Found multiple masters in topology but not explicitely setup", "ERR00012": "Could not find a master in topology", diff --git a/cluster/srv_chk.go b/cluster/srv_chk.go index c1710cc3e..788c3dce5 100644 --- a/cluster/srv_chk.go +++ b/cluster/srv_chk.go @@ -317,13 +317,13 @@ func (server *ServerMonitor) CheckPrivileges() { server.ClusterGroup.SetState("ERR00005", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00005"], server.ClusterGroup.dbUser, server.ClusterGroup.repmgrHostname, err), ErrFrom: "CONF", ServerUrl: server.URL}) } if priv.Repl_client_priv == "N" { - server.ClusterGroup.SetState("ERR00006", state.State{ErrType: "ERROR", ErrDesc: clusterError["ERR00006"], ErrFrom: "CONF", ServerUrl: server.URL}) + server.ClusterGroup.SetState("ERR00006", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00006"], server.URL), ErrFrom: "CONF", ServerUrl: server.URL}) } if priv.Super_priv == "N" { - server.ClusterGroup.SetState("ERR00008", state.State{ErrType: "ERROR", ErrDesc: clusterError["ERR00008"], ErrFrom: "CONF", ServerUrl: server.URL}) + server.ClusterGroup.SetState("ERR00008", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00008"], server.URL), ErrFrom: "CONF", ServerUrl: server.URL}) } if priv.Reload_priv == "N" { - server.ClusterGroup.SetState("ERR00009", state.State{ErrType: "ERROR", ErrDesc: clusterError["ERR00009"], ErrFrom: "CONF", ServerUrl: server.URL}) + server.ClusterGroup.SetState("ERR00009", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00009"], server.URL), ErrFrom: "CONF", ServerUrl: server.URL}) } } // Check replication user has correct privs. @@ -336,7 +336,7 @@ func (server *ServerMonitor) CheckPrivileges() { server.ClusterGroup.SetState("ERR00015", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00015"], server.ClusterGroup.rplUser, sv2.URL, err), ErrFrom: "CONF", ServerUrl: sv2.URL}) } if rpriv.Repl_slave_priv == "N" { - server.ClusterGroup.SetState("ERR00007", state.State{ErrType: "ERROR", ErrDesc: clusterError["ERR00007"], ErrFrom: "CONF", ServerUrl: sv2.URL}) + server.ClusterGroup.SetState("ERR00007", state.State{ErrType: "ERROR", ErrDesc: fmt.Sprintf(clusterError["ERR00007"], sv2.URL), ErrFrom: "CONF", ServerUrl: sv2.URL}) } } } From dd80a83cd17171821b105e8d24b5540de4645163 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 9 Jul 2021 09:43:16 +0200 Subject: [PATCH 76/84] Don't print RELAY_LOG insode variable diff --- cluster/cluster.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cluster/cluster.go b/cluster/cluster.go index 767be2ee9..b5da93700 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -771,6 +771,7 @@ func (cluster *Cluster) MonitorVariablesDiff() { "SOCKET": true, "DATADIR": true, "THREAD_POOL_SIZE": true, + "RELAY_LOG": true, } variablesdiff := "" for k, v := range masterVariables { From 1f164dff868f20cbe1994f405ad3e709b72d434f Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 9 Jul 2021 15:07:10 +0200 Subject: [PATCH 77/84] Track variable diff in cluster structure --- cluster/cluster.go | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index b5da93700..563218881 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -154,6 +154,7 @@ type Cluster struct { WaitingRejoin int `json:"waitingRejoin"` WaitingSwitchover int `json:"waitingSwitchover"` WaitingFailover int `json:"waitingFailover"` + DiffVariables []VariableDiff `json:"diffVariables"` sync.Mutex crcTable *crc64.Table } @@ -210,6 +211,16 @@ type JobResult struct { Restart bool `json:"restart"` } +type Diff struct { + Server string `json:"serverName"` + VariableValue string `json:"variableValue"` +} + +type VariableDiff struct { + VariableName string `json:"variableName"` + DiffValues []Diff `json:"diffValues"` +} + const ( stateClusterStart string = "Running starting" stateClusterDown string = "Running cluster down" @@ -774,18 +785,33 @@ func (cluster *Cluster) MonitorVariablesDiff() { "RELAY_LOG": true, } variablesdiff := "" + var alldiff []VariableDiff for k, v := range masterVariables { - + var myvardiff VariableDiff + var myvalues []Diff + var mastervalue Diff + mastervalue.Server = cluster.GetMaster().URL + mastervalue.VariableValue = v + myvalues = append(myvalues, mastervalue) for _, s := range cluster.slaves { slaveVariables := s.Variables if slaveVariables[k] != v && exceptVariables[k] != true { + var slavevalue Diff + slavevalue.Server = s.URL + slavevalue.VariableValue = slaveVariables[k] + myvalues = append(myvalues, mastervalue) variablesdiff += "+ Master Variable: " + k + " -> " + v + "\n" variablesdiff += "- Slave: " + s.URL + " -> " + slaveVariables[k] + "\n" } - + if len(myvalues) > 1 { + myvardiff.VariableName = k + myvardiff.DiffValues = myvalues + alldiff = append(alldiff, myvardiff) + } } } if variablesdiff != "" { + cluster.DiffVariables = alldiff cluster.SetState("WARN0084", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0084"], variablesdiff), ErrFrom: "MON", ServerUrl: cluster.GetMaster().URL}) } } From c7c20084657232f3044bf52820064fe18f7013f6 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 9 Jul 2021 18:27:18 +0200 Subject: [PATCH 78/84] Fixing variable diff --- cluster/cluster.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/cluster/cluster.go b/cluster/cluster.go index 563218881..c2113c61c 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -799,20 +799,25 @@ func (cluster *Cluster) MonitorVariablesDiff() { var slavevalue Diff slavevalue.Server = s.URL slavevalue.VariableValue = slaveVariables[k] - myvalues = append(myvalues, mastervalue) + myvalues = append(myvalues, slavevalue) variablesdiff += "+ Master Variable: " + k + " -> " + v + "\n" variablesdiff += "- Slave: " + s.URL + " -> " + slaveVariables[k] + "\n" } - if len(myvalues) > 1 { - myvardiff.VariableName = k - myvardiff.DiffValues = myvalues - alldiff = append(alldiff, myvardiff) - } + } + if len(myvalues) > 1 { + myvardiff.VariableName = k + myvardiff.DiffValues = myvalues + alldiff = append(alldiff, myvardiff) } } if variablesdiff != "" { cluster.DiffVariables = alldiff - cluster.SetState("WARN0084", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0084"], variablesdiff), ErrFrom: "MON", ServerUrl: cluster.GetMaster().URL}) + jtext, err := json.MarshalIndent(alldiff, " ", "\t") + if err != nil { + cluster.LogPrintf(LvlErr, "Encoding variables diff %s", err) + return + } + cluster.SetState("WARN0084", state.State{ErrType: "WARNING", ErrDesc: fmt.Sprintf(clusterError["WARN0084"], string(jtext)), ErrFrom: "MON", ServerUrl: cluster.GetMaster().URL}) } } From 1ea7927c6752e5f92e08087a1f55cc31c0a653f1 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Fri, 9 Jul 2021 18:36:01 +0200 Subject: [PATCH 79/84] Fixing proxy delcookie --- cluster/prx_del.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/prx_del.go b/cluster/prx_del.go index 9f2b7507d..1e2f3c989 100644 --- a/cluster/prx_del.go +++ b/cluster/prx_del.go @@ -13,7 +13,7 @@ import ( ) func (proxy *Proxy) delCookie(key string) error { - err := os.Remove(proxy.Datadir + "/@/" + key) + err := os.Remove(proxy.Datadir + "/@" + key) if err != nil { proxy.ClusterGroup.LogPrintf(LvlDbg, "Remove cookie (%s) %s", key, err) } From 3dc5eb0d87cd9c317924cdace4a0a7e01150fa0d Mon Sep 17 00:00:00 2001 From: svaroqui Date: Mon, 12 Jul 2021 18:31:59 +0200 Subject: [PATCH 80/84] GetBackupServer to send master if no server defined in config Run dbjobs_new via ssh mixxing path for socat fo OSX Job via SSH report all stdout and stderr in log-level >2 Add child cluster wrong ACL --- cluster/cluster_get.go | 3 ++ cluster/srv_job.go | 33 ++++++++++++++----- server/api_cluster.go | 3 +- .../opensvc/moduleset_mariadb.svc.mrm.db.json | 4 +-- 4 files changed, 31 insertions(+), 12 deletions(-) diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index 96f73977b..8e480fd89 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -408,6 +408,9 @@ func (cluster *Cluster) GetBackupServer() *ServerMonitor { return server } } + if cluster.master != nil { + return cluster.master + } return nil } diff --git a/cluster/srv_job.go b/cluster/srv_job.go index e88f1927e..3b18a0342 100644 --- a/cluster/srv_job.go +++ b/cluster/srv_job.go @@ -20,6 +20,7 @@ import ( "log" "os" "os/exec" + "os/user" "reflect" "regexp" "strconv" @@ -866,18 +867,33 @@ func (server *ServerMonitor) JobRunViaSSH() error { if server.ClusterGroup.IsInFailover() { return errors.New("Cancel dbjob via ssh during failover") } + user, err := user.Current() key := os.Getenv("HOME") + "/.ssh/id_rsa" - client, err := sshcli.DialWithKey(misc.Unbracket(server.Host)+":22", "apple", key) + client, err := sshcli.DialWithKey(misc.Unbracket(server.Host)+":22", user.Username, key) if err != nil { server.ClusterGroup.LogPrintf(LvlErr, "JobRunViaSSH %s", err) return err } defer client.Close() - out, err2 := client.Script(server.Datadir + "/init/init/dbjobs_new").Output() - if err2 != nil { - server.ClusterGroup.LogPrintf(LvlErr, "JobRunViaSSH %s", err2) - return err + + var ( + stdout bytes.Buffer + stderr bytes.Buffer + ) + filerc, err := os.Open(server.Datadir + "/init/init/dbjobs_new") + if err != nil { + server.ClusterGroup.LogPrintf(LvlErr, "JobRunViaSSH %s", err) + return errors.New("Cancel dbjob can't open script") + } + defer filerc.Close() + buf := new(bytes.Buffer) + buf.ReadFrom(filerc) + + if client.Shell().SetStdio(buf, &stdout, &stderr).Start(); err != nil { + server.ClusterGroup.LogPrintf(LvlWarn, "JobRunViaSSH %s", stderr.String()) + } + out := stdout.String() res := new(JobResult) val := reflect.ValueOf(res).Elem() @@ -886,13 +902,14 @@ func (server *ServerMonitor) JobRunViaSSH() error { val.Field(i).SetBool(false) } else { val.Field(i).SetBool(true) - server.ClusterGroup.LogPrintf(LvlInfo, "Exec via ssh : %s", out) + server.ClusterGroup.LogPrintf(LvlInfo, "Exec via ssh : %s", val.Type().Field(i).Name) } } - //server.ClusterGroup.LogPrintf(LvlInfo, "Exec via ssh : %s", res) - //server.ClusterGroup.LogPrintf(LvlInfo, "Exec via ssh : %s", val) server.ClusterGroup.JobResults[server.URL] = res + if server.ClusterGroup.Conf.LogLevel > 2 { + server.ClusterGroup.LogPrintf(LvlInfo, "Exec via ssh : %s", res) + } return nil } diff --git a/server/api_cluster.go b/server/api_cluster.go index 522eef278..8df08ee2d 100644 --- a/server/api_cluster.go +++ b/server/api_cluster.go @@ -461,11 +461,10 @@ func (repman *ReplicationManager) handlerMuxClusterShardingAdd(w http.ResponseWr mycluster := repman.getClusterByName(vars["clusterName"]) if mycluster != nil { if !repman.IsValidClusterACL(r, mycluster) { - repman.AddCluster(vars["clusterShardingName"], vars["clusterName"]) http.Error(w, "No valid ACL", 403) return } - mycluster.RollingRestart() + repman.AddCluster(vars["clusterShardingName"], vars["clusterName"]) } else { http.Error(w, "No cluster", 500) return diff --git a/share/opensvc/moduleset_mariadb.svc.mrm.db.json b/share/opensvc/moduleset_mariadb.svc.mrm.db.json index 4e91b5b11..bf32c59c0 100644 --- a/share/opensvc/moduleset_mariadb.svc.mrm.db.json +++ b/share/opensvc/moduleset_mariadb.svc.mrm.db.json @@ -2580,8 +2580,8 @@ { "var_author": "admin Manager", "var_class": "file", - "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/init/dbjobs_new\",\"mode\":755,\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"#!/bin/bash\\nset -x\\nUSER=%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%\\nPASSWORD=%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%\\nMYSQL_PORT=%%ENV:SERVER_PORT%%\\nMYSQL_SERVER=%%ENV:SERVER_HOST%%\\nCLUSTER_NAME=%%ENV:SVC_NAMESPACE%%\\nREPLICATION_MANAGER_ADDR=%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_ADDR%%\\nMYSQL_CONF=%%ENV:SVC_CONF_ENV_MYSQL_CONFDIR%%\\nDATADIR=%%ENV:SVC_CONF_ENV_MYSQL_DATADIR%%\\nMYSQL_CLIENT=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysql\\nMYSQL_CHECK=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqlcheck\\nMYSQL_DUMP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqldump\\nSST_RECEIVER_PORT=%%ENV:SVC_CONF_ENV_SST_RECEIVER_PORT%%\\nSOCAT_BIND=%%ENV:SERVER_IP%%\\nMARIADB_BACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mariabackup\\nXTRABACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/xtrabackup\\nINNODBACKUPEX=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/innobackupex\\n\\nERROLOG=$DATADIR/.system/logs/error.log\\nSLOWLOG=$DATADIR/.system/logs/slow-query.log\\nBACKUPDIR=$DATADIR/.system/backup\\n\\nJOBS=( \\\"xtrabackup\\\" \\\"mariabackup\\\" \\\"error\\\" \\\"slowquery\\\" \\\"zfssnapback\\\" \\\"optimize\\\" \\\"reseedxtrabackup\\\" \\\"reseedmariabackup\\\" \\\"reseedmysqldump\\\" \\\"flashbackxtrabackup\\\" \\\"flashbackmariadbackup\\\" \\\"flashbackmysqldump\\\" \\\"stop\\\" \\\"restart\\\" \\\"start\\\")\\n\\nsocatCleaner()\\n{\\n kill -9 $(lsof -t -i:$SST_RECEIVER_PORT -sTCP:LISTEN)\\n}\\n\\ndoneJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set end=NOW(), result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npauseJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"select sleep(20);set sql_log_bin=0;UPDATE replication_manager_schema.jobs set set done=1,result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npartialRestore()\\n{\\n chown -R mysql:mysql $BACKUPDIR\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;install plugin BLACKHOLE soname 'ha_blackhole.so'\\\"\\n for dir in $(ls -d $BACKUPDIR/*/ | xargs -n 1 basename | grep -vE 'mysql|performance_schema|replication_manager_schema') ; do\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;drop database IF EXISTS $dir; CREATE DATABASE $dir;\\\"\\n\\n\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.exp\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n cat $BACKUPDIR/$dir/$file.frm | sed -e 's/\\\\x06\\\\x00\\\\x49\\\\x6E\\\\x6E\\\\x6F\\\\x44\\\\x42\\\\x00\\\\x00\\\\x00/\\\\x09\\\\x00\\\\x42\\\\x4C\\\\x41\\\\x43\\\\x4B\\\\x48\\\\x4F\\\\x4C\\\\x45/g' > $DATADIR/$dir/mrm_pivo.frm\\n chown mysql:mysql $DATADIR/$dir/mrm_pivo.frm\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.mrm_pivo engine=innodb;RENAME TABLE $dir.mrm_pivo TO $dir.$file; ALTER TABLE $dir.$file DISCARD TABLESPACE;\\\"\\n mv $BACKUPDIR/$dir/$file.ibd $DATADIR/$dir/$file.ibd\\n mv $BACKUPDIR/$dir/$file.exp $DATADIR/$dir/$file.exp\\n mv $BACKUPDIR/$dir/$file.cfg $DATADIR/$dir/$file.cfg\\n mv $BACKUPDIR/$dir/$file.TRG $DATADIR/$dir/$file.TRG\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.$file IMPORT TABLESPACE\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.CSV\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n done\\n for file in $(find $BACKUPDIR/mysql/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/mysql/$file.* $DATADIR/mysql/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE mysql.$file\\\"\\n done\\n cat $BACKUPDIR/xtrabackup_info | grep binlog_pos | awk -F, '{ print $3 }' | sed -e 's/GTID of the last change/set sql_log_bin=0;set global gtid_slave_pos=/g' | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e\\\"flush privileges;start slave;\\\"\\n}\\n\\nfor job in \\\"${JOBS[@]}\\\"\\ndo\\n\\n TASK=($(echo \\\"select concat(id,'@',server,':',port) from replication_manager_schema.jobs WHERE task='$job' and done=0 order by task desc limit 1\\\" | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -N))\\n\\n ADDRESS=($(echo $TASK | awk -F@ '{ print $2 }'))\\n ID=($(echo $TASK | awk -F@ '{ print $1 }'))\\n #purge de past\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set done=1 WHERE done=0 AND task='$job' AND ID<>$ID;\\\"\\n\\n if [ \\\"$ADDRESS\\\" == \\\"\\\" ]; then\\n echo \\\"No $job needed\\\"\\n case \\\"$job\\\" in \\n start)\\n if [ \\\"curl -so /dev/null -w '%{response_code}' http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/need-start\\\" == \\\"200\\\" ]; then\\n curl http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/config|tar xzvf etc/* - -C $CONFDIR/../..\\n systemctl start mysql \\n fi\\n ;;\\n esac\\n else\\n echo \\\"Processing $job\\\"\\n case \\\"$job\\\" in\\n reseedmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n time socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"reset master;set sql_log_bin=0;\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n flashbackmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"set sql_log_bin=0\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n reseedxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n reseedmariabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | mbstream -x -C $BACKUPDIR\\n # mbstream -p, --parallel\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner \\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackmariadbackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n xtrabackup)\\n cd /docker-entrypoint-initdb.d\\n $INNODBACKUPEX --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n mariabackup)\\n cd /docker-entrypoint-initdb.d\\n $MARIADB_BACKUP --innobackupex --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n error)\\n cat $ERROLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $ERROLOG\\n ;;\\n slowquery)\\n cat $SLOWLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $SLOWLOG\\n ;;\\n zfssnapback)\\n LASTSNAP=`zfs list -r -t all |grep zp%%ENV:SERVICES_SVCNAME%%_pod01 | grep daily | sort -r | head -n 1 | cut -d\\\" \\\" -f1`\\n %%ENV:SERVICES_SVCNAME%% stop\\n zfs rollback $LASTSNAP\\n %%ENV:SERVICES_SVCNAME%% start\\n ;;\\n optimize)\\n $MYSQL_CHECK -o --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --all-databases --skip-write-binlog &>/tmp/dbjob.out\\n ;;\\n restart)\\n systemctl restart mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n stop)\\n systemctl stop mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n esac\\n doneJob\\n fi\\n\\ndone\\n\"}", - "var_updated": "2021-06-28 10:01:07", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/init/dbjobs_new\",\"mode\":755,\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"#!/bin/bash\\nset -x\\nUSER=%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%\\nPASSWORD=%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%\\nMYSQL_PORT=%%ENV:SERVER_PORT%%\\nMYSQL_SERVER=%%ENV:SERVER_HOST%%\\nCLUSTER_NAME=%%ENV:SVC_NAMESPACE%%\\nREPLICATION_MANAGER_ADDR=%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_ADDR%%\\nMYSQL_CONF=%%ENV:SVC_CONF_ENV_MYSQL_CONFDIR%%\\nDATADIR=%%ENV:SVC_CONF_ENV_MYSQL_DATADIR%%\\nMYSQL_CLIENT=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysql\\nMYSQL_CHECK=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqlcheck\\nMYSQL_DUMP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqldump\\nSST_RECEIVER_PORT=%%ENV:SVC_CONF_ENV_SST_RECEIVER_PORT%%\\nSOCAT_BIND=%%ENV:SERVER_IP%%\\nMARIADB_BACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mariabackup\\nXTRABACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/xtrabackup\\nINNODBACKUPEX=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/innobackupex\\n\\nERROLOG=$DATADIR/.system/logs/error.log\\nSLOWLOG=$DATADIR/.system/logs/slow-query.log\\nBACKUPDIR=$DATADIR/.system/backup\\n\\nJOBS=( \\\"xtrabackup\\\" \\\"mariabackup\\\" \\\"error\\\" \\\"slowquery\\\" \\\"zfssnapback\\\" \\\"optimize\\\" \\\"reseedxtrabackup\\\" \\\"reseedmariabackup\\\" \\\"reseedmysqldump\\\" \\\"flashbackxtrabackup\\\" \\\"flashbackmariadbackup\\\" \\\"flashbackmysqldump\\\" \\\"stop\\\" \\\"restart\\\" \\\"start\\\")\\n\\n# OSX need socat extra path\\nexport PATH=$PATH:/usr/local/bin\\n \\nsocatCleaner()\\n{\\n lsof -t -i:$SST_RECEIVER_PORT -sTCP:LISTEN | kill -9\\n}\\n\\ndoneJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set end=NOW(), result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npauseJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"select sleep(20);set sql_log_bin=0;UPDATE replication_manager_schema.jobs set set done=1,result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npartialRestore()\\n{\\n chown -R mysql:mysql $BACKUPDIR\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;install plugin BLACKHOLE soname 'ha_blackhole.so'\\\"\\n for dir in $(ls -d $BACKUPDIR/*/ | xargs -n 1 basename | grep -vE 'mysql|performance_schema|replication_manager_schema') ; do\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;drop database IF EXISTS $dir; CREATE DATABASE $dir;\\\"\\n\\n\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.exp\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n cat $BACKUPDIR/$dir/$file.frm | sed -e 's/\\\\x06\\\\x00\\\\x49\\\\x6E\\\\x6E\\\\x6F\\\\x44\\\\x42\\\\x00\\\\x00\\\\x00/\\\\x09\\\\x00\\\\x42\\\\x4C\\\\x41\\\\x43\\\\x4B\\\\x48\\\\x4F\\\\x4C\\\\x45/g' > $DATADIR/$dir/mrm_pivo.frm\\n chown mysql:mysql $DATADIR/$dir/mrm_pivo.frm\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.mrm_pivo engine=innodb;RENAME TABLE $dir.mrm_pivo TO $dir.$file; ALTER TABLE $dir.$file DISCARD TABLESPACE;\\\"\\n mv $BACKUPDIR/$dir/$file.ibd $DATADIR/$dir/$file.ibd\\n mv $BACKUPDIR/$dir/$file.exp $DATADIR/$dir/$file.exp\\n mv $BACKUPDIR/$dir/$file.cfg $DATADIR/$dir/$file.cfg\\n mv $BACKUPDIR/$dir/$file.TRG $DATADIR/$dir/$file.TRG\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.$file IMPORT TABLESPACE\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.CSV\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n done\\n for file in $(find $BACKUPDIR/mysql/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/mysql/$file.* $DATADIR/mysql/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE mysql.$file\\\"\\n done\\n cat $BACKUPDIR/xtrabackup_info | grep binlog_pos | awk -F, '{ print $3 }' | sed -e 's/GTID of the last change/set sql_log_bin=0;set global gtid_slave_pos=/g' | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e\\\"flush privileges;start slave;\\\"\\n}\\n\\nfor job in \\\"${JOBS[@]}\\\"\\ndo\\n\\n TASK=($(echo \\\"select concat(id,'@',server,':',port) from replication_manager_schema.jobs WHERE task='$job' and done=0 order by task desc limit 1\\\" | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -N))\\n\\n ADDRESS=($(echo $TASK | awk -F@ '{ print $2 }'))\\n ID=($(echo $TASK | awk -F@ '{ print $1 }'))\\n #purge de past\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set done=1 WHERE done=0 AND task='$job' AND ID<>$ID;\\\"\\n\\n if [ \\\"$ADDRESS\\\" == \\\"\\\" ]; then\\n echo \\\"No $job needed\\\"\\n case \\\"$job\\\" in \\n start)\\n if [ \\\"curl -so /dev/null -w '%{response_code}' http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/need-start\\\" == \\\"200\\\" ]; then\\n curl http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/config|tar xzvf etc/* - -C $CONFDIR/../..\\n systemctl start mysql \\n fi\\n ;;\\n esac\\n else\\n echo \\\"Processing $job\\\"\\n case \\\"$job\\\" in\\n reseedmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n time socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"reset master;set sql_log_bin=0;\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n flashbackmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"set sql_log_bin=0\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n reseedxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n reseedmariabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | mbstream -x -C $BACKUPDIR\\n # mbstream -p, --parallel\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner \\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackmariadbackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n xtrabackup)\\n cd /docker-entrypoint-initdb.d\\n $INNODBACKUPEX --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n mariabackup)\\n cd /docker-entrypoint-initdb.d\\n $MARIADB_BACKUP --innobackupex --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n error)\\n cat $ERROLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $ERROLOG\\n ;;\\n slowquery)\\n cat $SLOWLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $SLOWLOG\\n ;;\\n zfssnapback)\\n LASTSNAP=`zfs list -r -t all |grep zp%%ENV:SERVICES_SVCNAME%%_pod01 | grep daily | sort -r | head -n 1 | cut -d\\\" \\\" -f1`\\n %%ENV:SERVICES_SVCNAME%% stop\\n zfs rollback $LASTSNAP\\n %%ENV:SERVICES_SVCNAME%% start\\n ;;\\n optimize)\\n $MYSQL_CHECK -o --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --all-databases --skip-write-binlog &>/tmp/dbjob.out\\n ;;\\n restart)\\n systemctl restart mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n stop)\\n systemctl stop mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n esac\\n doneJob\\n fi\\n\\ndone\\n\"}", + "var_updated": "2021-07-12 17:11:16", "var_name": "db_cnf_script_dbjobs_new", "id": 6270 } From 14440f942a7cfb64a0e6424997f2c1fff19796fe Mon Sep 17 00:00:00 2001 From: svaroqui Date: Thu, 15 Jul 2021 09:23:03 +0200 Subject: [PATCH 81/84] ssh job script to set and use session env variable not to expose password --- cluster/srv_job.go | 10 +++++++++- share/opensvc/moduleset_mariadb.svc.mrm.db.json | 4 ++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/cluster/srv_job.go b/cluster/srv_job.go index 3b18a0342..ac5131f9c 100644 --- a/cluster/srv_job.go +++ b/cluster/srv_job.go @@ -889,7 +889,15 @@ func (server *ServerMonitor) JobRunViaSSH() error { defer filerc.Close() buf := new(bytes.Buffer) buf.ReadFrom(filerc) - + adminuser := "admin" + adminpassword := "repman" + if user, ok := server.ClusterGroup.APIUsers[adminuser]; ok { + adminpassword = user.Password + } + _, err = client.Cmd("export MYSQL_ROOT_PASSWORD=" + server.Pass).Cmd("export REPLICATION_MANAGER_URL=" + server.ClusterGroup.Conf.MonitorAddress + ":" + server.ClusterGroup.Conf.APIPort).Cmd("export REPLICATION_MANAGER_USER=" + adminuser).Cmd("export REPLICATION_MANAGER_PASSWORD=" + adminpassword).Cmd("export REPLICATION_MANAGER_HOST_NAME=" + server.Host).Cmd("export REPLICATION_MANAGER_HOST_PORT=" + server.Port).Cmd("export REPLICATION_MANAGER_CLUSTER_NAME=" + server.ClusterGroup.Name).SmartOutput() + if err != nil { + return errors.New("JobRunViaSSH Setup env variables via SSH %s" + err.Error()) + } if client.Shell().SetStdio(buf, &stdout, &stderr).Start(); err != nil { server.ClusterGroup.LogPrintf(LvlWarn, "JobRunViaSSH %s", stderr.String()) } diff --git a/share/opensvc/moduleset_mariadb.svc.mrm.db.json b/share/opensvc/moduleset_mariadb.svc.mrm.db.json index bf32c59c0..00e6ab1f0 100644 --- a/share/opensvc/moduleset_mariadb.svc.mrm.db.json +++ b/share/opensvc/moduleset_mariadb.svc.mrm.db.json @@ -2580,8 +2580,8 @@ { "var_author": "admin Manager", "var_class": "file", - "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/init/dbjobs_new\",\"mode\":755,\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"#!/bin/bash\\nset -x\\nUSER=%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%\\nPASSWORD=%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%\\nMYSQL_PORT=%%ENV:SERVER_PORT%%\\nMYSQL_SERVER=%%ENV:SERVER_HOST%%\\nCLUSTER_NAME=%%ENV:SVC_NAMESPACE%%\\nREPLICATION_MANAGER_ADDR=%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_ADDR%%\\nMYSQL_CONF=%%ENV:SVC_CONF_ENV_MYSQL_CONFDIR%%\\nDATADIR=%%ENV:SVC_CONF_ENV_MYSQL_DATADIR%%\\nMYSQL_CLIENT=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysql\\nMYSQL_CHECK=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqlcheck\\nMYSQL_DUMP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqldump\\nSST_RECEIVER_PORT=%%ENV:SVC_CONF_ENV_SST_RECEIVER_PORT%%\\nSOCAT_BIND=%%ENV:SERVER_IP%%\\nMARIADB_BACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mariabackup\\nXTRABACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/xtrabackup\\nINNODBACKUPEX=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/innobackupex\\n\\nERROLOG=$DATADIR/.system/logs/error.log\\nSLOWLOG=$DATADIR/.system/logs/slow-query.log\\nBACKUPDIR=$DATADIR/.system/backup\\n\\nJOBS=( \\\"xtrabackup\\\" \\\"mariabackup\\\" \\\"error\\\" \\\"slowquery\\\" \\\"zfssnapback\\\" \\\"optimize\\\" \\\"reseedxtrabackup\\\" \\\"reseedmariabackup\\\" \\\"reseedmysqldump\\\" \\\"flashbackxtrabackup\\\" \\\"flashbackmariadbackup\\\" \\\"flashbackmysqldump\\\" \\\"stop\\\" \\\"restart\\\" \\\"start\\\")\\n\\n# OSX need socat extra path\\nexport PATH=$PATH:/usr/local/bin\\n \\nsocatCleaner()\\n{\\n lsof -t -i:$SST_RECEIVER_PORT -sTCP:LISTEN | kill -9\\n}\\n\\ndoneJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set end=NOW(), result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npauseJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"select sleep(20);set sql_log_bin=0;UPDATE replication_manager_schema.jobs set set done=1,result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npartialRestore()\\n{\\n chown -R mysql:mysql $BACKUPDIR\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;install plugin BLACKHOLE soname 'ha_blackhole.so'\\\"\\n for dir in $(ls -d $BACKUPDIR/*/ | xargs -n 1 basename | grep -vE 'mysql|performance_schema|replication_manager_schema') ; do\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;drop database IF EXISTS $dir; CREATE DATABASE $dir;\\\"\\n\\n\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.exp\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n cat $BACKUPDIR/$dir/$file.frm | sed -e 's/\\\\x06\\\\x00\\\\x49\\\\x6E\\\\x6E\\\\x6F\\\\x44\\\\x42\\\\x00\\\\x00\\\\x00/\\\\x09\\\\x00\\\\x42\\\\x4C\\\\x41\\\\x43\\\\x4B\\\\x48\\\\x4F\\\\x4C\\\\x45/g' > $DATADIR/$dir/mrm_pivo.frm\\n chown mysql:mysql $DATADIR/$dir/mrm_pivo.frm\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.mrm_pivo engine=innodb;RENAME TABLE $dir.mrm_pivo TO $dir.$file; ALTER TABLE $dir.$file DISCARD TABLESPACE;\\\"\\n mv $BACKUPDIR/$dir/$file.ibd $DATADIR/$dir/$file.ibd\\n mv $BACKUPDIR/$dir/$file.exp $DATADIR/$dir/$file.exp\\n mv $BACKUPDIR/$dir/$file.cfg $DATADIR/$dir/$file.cfg\\n mv $BACKUPDIR/$dir/$file.TRG $DATADIR/$dir/$file.TRG\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.$file IMPORT TABLESPACE\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.CSV\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n done\\n for file in $(find $BACKUPDIR/mysql/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/mysql/$file.* $DATADIR/mysql/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE mysql.$file\\\"\\n done\\n cat $BACKUPDIR/xtrabackup_info | grep binlog_pos | awk -F, '{ print $3 }' | sed -e 's/GTID of the last change/set sql_log_bin=0;set global gtid_slave_pos=/g' | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e\\\"flush privileges;start slave;\\\"\\n}\\n\\nfor job in \\\"${JOBS[@]}\\\"\\ndo\\n\\n TASK=($(echo \\\"select concat(id,'@',server,':',port) from replication_manager_schema.jobs WHERE task='$job' and done=0 order by task desc limit 1\\\" | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -N))\\n\\n ADDRESS=($(echo $TASK | awk -F@ '{ print $2 }'))\\n ID=($(echo $TASK | awk -F@ '{ print $1 }'))\\n #purge de past\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set done=1 WHERE done=0 AND task='$job' AND ID<>$ID;\\\"\\n\\n if [ \\\"$ADDRESS\\\" == \\\"\\\" ]; then\\n echo \\\"No $job needed\\\"\\n case \\\"$job\\\" in \\n start)\\n if [ \\\"curl -so /dev/null -w '%{response_code}' http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/need-start\\\" == \\\"200\\\" ]; then\\n curl http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/config|tar xzvf etc/* - -C $CONFDIR/../..\\n systemctl start mysql \\n fi\\n ;;\\n esac\\n else\\n echo \\\"Processing $job\\\"\\n case \\\"$job\\\" in\\n reseedmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n time socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"reset master;set sql_log_bin=0;\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n flashbackmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"set sql_log_bin=0\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n reseedxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n reseedmariabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | mbstream -x -C $BACKUPDIR\\n # mbstream -p, --parallel\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner \\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackmariadbackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n xtrabackup)\\n cd /docker-entrypoint-initdb.d\\n $INNODBACKUPEX --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n mariabackup)\\n cd /docker-entrypoint-initdb.d\\n $MARIADB_BACKUP --innobackupex --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n error)\\n cat $ERROLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $ERROLOG\\n ;;\\n slowquery)\\n cat $SLOWLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $SLOWLOG\\n ;;\\n zfssnapback)\\n LASTSNAP=`zfs list -r -t all |grep zp%%ENV:SERVICES_SVCNAME%%_pod01 | grep daily | sort -r | head -n 1 | cut -d\\\" \\\" -f1`\\n %%ENV:SERVICES_SVCNAME%% stop\\n zfs rollback $LASTSNAP\\n %%ENV:SERVICES_SVCNAME%% start\\n ;;\\n optimize)\\n $MYSQL_CHECK -o --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --all-databases --skip-write-binlog &>/tmp/dbjob.out\\n ;;\\n restart)\\n systemctl restart mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n stop)\\n systemctl stop mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n esac\\n doneJob\\n fi\\n\\ndone\\n\"}", - "var_updated": "2021-07-12 17:11:16", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/init/dbjobs_new\",\"mode\":755,\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"#!/bin/bash\\nset -x\\nUSER=%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%\\nPASSWORD=$MYSQL_ROOT_PASSWORD\\nMYSQL_PORT=%%ENV:SERVER_PORT%%\\nMYSQL_SERVER=%%ENV:SERVER_HOST%%\\nCLUSTER_NAME=%%ENV:SVC_NAMESPACE%%\\nREPLICATION_MANAGER_ADDR=%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_ADDR%%\\nMYSQL_CONF=%%ENV:SVC_CONF_ENV_MYSQL_CONFDIR%%\\nDATADIR=%%ENV:SVC_CONF_ENV_MYSQL_DATADIR%%\\nMYSQL_CLIENT=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysql\\nMYSQL_CHECK=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqlcheck\\nMYSQL_DUMP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mysqldump\\nSST_RECEIVER_PORT=%%ENV:SVC_CONF_ENV_SST_RECEIVER_PORT%%\\nSOCAT_BIND=%%ENV:SERVER_IP%%\\nMARIADB_BACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/mariabackup\\nXTRABACKUP=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/xtrabackup\\nINNODBACKUPEX=%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%/innobackupex\\n\\nERROLOG=$DATADIR/.system/logs/error.log\\nSLOWLOG=$DATADIR/.system/logs/slow-query.log\\nBACKUPDIR=$DATADIR/.system/backup\\n\\nJOBS=( \\\"xtrabackup\\\" \\\"mariabackup\\\" \\\"error\\\" \\\"slowquery\\\" \\\"zfssnapback\\\" \\\"optimize\\\" \\\"reseedxtrabackup\\\" \\\"reseedmariabackup\\\" \\\"reseedmysqldump\\\" \\\"flashbackxtrabackup\\\" \\\"flashbackmariadbackup\\\" \\\"flashbackmysqldump\\\" \\\"stop\\\" \\\"restart\\\" \\\"start\\\")\\n\\n# OSX need socat extra path\\nexport PATH=$PATH:/usr/local/bin\\n \\nsocatCleaner()\\n{\\n lsof -t -i:$SST_RECEIVER_PORT -sTCP:LISTEN | kill -9\\n}\\n\\ndoneJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set end=NOW(), result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npauseJob()\\n{\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"select sleep(20);set sql_log_bin=0;UPDATE replication_manager_schema.jobs set set done=1,result=LOAD_FILE('/tmp/dbjob.out') WHERE id='$ID';\\\" &\\n}\\n\\npartialRestore()\\n{\\n chown -R mysql:mysql $BACKUPDIR\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;install plugin BLACKHOLE soname 'ha_blackhole.so'\\\"\\n for dir in $(ls -d $BACKUPDIR/*/ | xargs -n 1 basename | grep -vE 'mysql|performance_schema|replication_manager_schema') ; do\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;drop database IF EXISTS $dir; CREATE DATABASE $dir;\\\"\\n\\n\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.exp\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n cat $BACKUPDIR/$dir/$file.frm | sed -e 's/\\\\x06\\\\x00\\\\x49\\\\x6E\\\\x6E\\\\x6F\\\\x44\\\\x42\\\\x00\\\\x00\\\\x00/\\\\x09\\\\x00\\\\x42\\\\x4C\\\\x41\\\\x43\\\\x4B\\\\x48\\\\x4F\\\\x4C\\\\x45/g' > $DATADIR/$dir/mrm_pivo.frm\\n chown mysql:mysql $DATADIR/$dir/mrm_pivo.frm\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.mrm_pivo engine=innodb;RENAME TABLE $dir.mrm_pivo TO $dir.$file; ALTER TABLE $dir.$file DISCARD TABLESPACE;\\\"\\n mv $BACKUPDIR/$dir/$file.ibd $DATADIR/$dir/$file.ibd\\n mv $BACKUPDIR/$dir/$file.exp $DATADIR/$dir/$file.exp\\n mv $BACKUPDIR/$dir/$file.cfg $DATADIR/$dir/$file.cfg\\n mv $BACKUPDIR/$dir/$file.TRG $DATADIR/$dir/$file.TRG\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;ALTER TABLE $dir.$file IMPORT TABLESPACE\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n for file in $(find $BACKUPDIR/$dir/ -name \\\"*.CSV\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE $dir.$file\\\"\\n done\\n done\\n for file in $(find $BACKUPDIR/mysql/ -name \\\"*.MYD\\\" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do\\n mv $BACKUPDIR/mysql/$file.* $DATADIR/mysql/\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;FLUSH TABLE mysql.$file\\\"\\n done\\n cat $BACKUPDIR/xtrabackup_info | grep binlog_pos | awk -F, '{ print $3 }' | sed -e 's/GTID of the last change/set sql_log_bin=0;set global gtid_slave_pos=/g' | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e\\\"flush privileges;start slave;\\\"\\n}\\n\\nfor job in \\\"${JOBS[@]}\\\"\\ndo\\n\\n TASK=($(echo \\\"select concat(id,'@',server,':',port) from replication_manager_schema.jobs WHERE task='$job' and done=0 order by task desc limit 1\\\" | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -N))\\n\\n ADDRESS=($(echo $TASK | awk -F@ '{ print $2 }'))\\n ID=($(echo $TASK | awk -F@ '{ print $1 }'))\\n #purge de past\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e \\\"set sql_log_bin=0;UPDATE replication_manager_schema.jobs set done=1 WHERE done=0 AND task='$job' AND ID<>$ID;\\\"\\n\\n if [ \\\"$ADDRESS\\\" == \\\"\\\" ]; then\\n echo \\\"No $job needed\\\"\\n case \\\"$job\\\" in \\n start)\\n if [ \\\"curl -so /dev/null -w '%{response_code}' http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/need-start\\\" == \\\"200\\\" ]; then\\n curl http://$REPLICATION_MANAGER_ADDR/api/clusters/$CLUSTER_NAME/servers/$MYSQL_SERVER/$MYSQL_PORT/config|tar xzvf etc/* - -C $CONFDIR/../..\\n systemctl start mysql \\n fi\\n ;;\\n esac\\n else\\n echo \\\"Processing $job\\\"\\n case \\\"$job\\\" in\\n reseedmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n time socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"reset master;set sql_log_bin=0;\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n flashbackmysqldump)\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | gunzip | $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --init-command=\\\"set sql_log_bin=0\\\" > /tmp/dbjob.out 2>&1\\n $MYSQL_CLIENT --defaults-extra-file=$MYSQL_CONF/dbjob.cnf -e 'start slave;'\\n ;;\\n reseedxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n reseedmariabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | mbstream -x -C $BACKUPDIR\\n # mbstream -p, --parallel\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackxtrabackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner \\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $XTRABACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n flashbackmariadbackup)\\n rm -rf $BACKUPDIR\\n mkdir $BACKUPDIR\\n echo \\\"Waiting backup.\\\" > /tmp/dbjob.out\\n pauseJob\\n socatCleaner\\n socat -u TCP-LISTEN:$SST_RECEIVER_PORT,reuseaddr,bind=$SOCAT_BIND STDOUT | xbstream -x -C $BACKUPDIR\\n $MARIADB_BACKUP --prepare --export --target-dir=$BACKUPDIR\\n partialRestore\\n ;;\\n xtrabackup)\\n cd /docker-entrypoint-initdb.d\\n $INNODBACKUPEX --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n mariabackup)\\n cd /docker-entrypoint-initdb.d\\n $MARIADB_BACKUP --innobackupex --defaults-file=$MYSQL_CONF/my.cnf --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --no-version-check --stream=xbstream /tmp/ | socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n ;;\\n error)\\n cat $ERROLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $ERROLOG\\n ;;\\n slowquery)\\n cat $SLOWLOG| socat -u stdio TCP:$ADDRESS &>/tmp/dbjob.out\\n > $SLOWLOG\\n ;;\\n zfssnapback)\\n LASTSNAP=`zfs list -r -t all |grep zp%%ENV:SERVICES_SVCNAME%%_pod01 | grep daily | sort -r | head -n 1 | cut -d\\\" \\\" -f1`\\n %%ENV:SERVICES_SVCNAME%% stop\\n zfs rollback $LASTSNAP\\n %%ENV:SERVICES_SVCNAME%% start\\n ;;\\n optimize)\\n $MYSQL_CHECK -o --defaults-extra-file=$MYSQL_CONF/dbjob.cnf --all-databases --skip-write-binlog &>/tmp/dbjob.out\\n ;;\\n restart)\\n systemctl restart mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n stop)\\n systemctl stop mysql \\n journalctl -u mysql > /tmp/dbjob.out \\n ;;\\n esac\\n doneJob\\n fi\\n\\ndone\\n\"}", + "var_updated": "2021-07-13 17:59:40", "var_name": "db_cnf_script_dbjobs_new", "id": 6270 } From a32090b66a50698930598abf5cfe7e864d1096f6 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Sat, 17 Jul 2021 15:45:45 +0200 Subject: [PATCH 82/84] Split configurator in his on package for later usage ( still need a cluster with server ) Add compliance for loose_innodb_lru_flush_size default to 1024 thanks to https://mysqlonarm.github.io/tune-your-io-workload/ Add compliance tag nosplitpath that will get the value from monitored variables, this allow to generate config that are disk compatible with an existing on premise server --- cluster/cluster.go | 627 ++++-------------- cluster/cluster_add.go | 12 +- cluster/cluster_del.go | 28 +- cluster/cluster_get.go | 250 +------ cluster/cluster_has.go | 18 - cluster/cluster_key.go | 4 +- cluster/cluster_set.go | 72 +- cluster/prov_localhost_db.go | 2 +- cluster/prov_opensvc_db.go | 64 +- cluster/prov_opensvc_shardproxy.go | 5 +- cluster/prx_get.go | 92 +-- cluster/prx_has.go | 12 - cluster/srv.go | 45 ++ cluster/srv_cnf.go | 159 +++++ cluster/srv_get.go | 245 +------ cluster/srv_rejoin.go | 4 +- config/config.go | 6 + dashboard/app/dashboard.js | 4 +- dashboard/static/card-db-configurator.html | 2 +- dashboard/static/card-proxy-configurator.html | 2 +- server/api_cluster.go | 9 +- .../opensvc/moduleset_mariadb.svc.mrm.db.json | 50 +- 22 files changed, 487 insertions(+), 1225 deletions(-) create mode 100644 cluster/srv_cnf.go diff --git a/cluster/cluster.go b/cluster/cluster.go index cc3d811ce..0703739d7 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -15,12 +15,12 @@ import ( "io/ioutil" "os" "os/exec" - "strconv" "strings" "sync" "time" "github.com/BurntSushi/toml" + "github.com/signal18/replication-manager/cluster/configurator" "github.com/signal18/replication-manager/cluster/nbc" "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/router/maxscale" @@ -35,126 +35,122 @@ import ( ) type Cluster struct { - Name string `json:"name"` - Tenant string `json:"tenant"` - WorkingDir string `json:"workingDir"` - Servers serverList `json:"-"` - ServerIdList []string `json:"dbServers"` - Crashes crashList `json:"dbServersCrashes"` - Proxies proxyList `json:"-"` - ProxyIdList []string `json:"proxyServers"` - FailoverCtr int `json:"failoverCounter"` - FailoverTs int64 `json:"failoverLastTime"` - Status string `json:"activePassiveStatus"` - IsSplitBrain bool `json:"isSplitBrain"` - IsSplitBrainBck bool `json:"-"` - IsFailedArbitrator bool `json:"isFailedArbitrator"` - IsLostMajority bool `json:"isLostMajority"` - IsDown bool `json:"isDown"` - IsClusterDown bool `json:"isClusterDown"` - IsAllDbUp bool `json:"isAllDbUp"` - IsFailable bool `json:"isFailable"` - IsPostgres bool `json:"isPostgres"` - IsProvision bool `json:"isProvision"` - IsNeedProxiesRestart bool `json:"isNeedProxyRestart"` - IsNeedProxiesReprov bool `json:"isNeedProxiesRestart"` - IsNeedDatabasesRestart bool `json:"isNeedDatabasesRestart"` - IsNeedDatabasesRollingRestart bool `json:"isNeedDatabasesRollingRestart"` - IsNeedDatabasesRollingReprov bool `json:"isNeedDatabasesRollingReprov"` - IsNeedDatabasesReprov bool `json:"isNeedDatabasesReprov"` - IsValidBackup bool `json:"isValidBackup"` - IsNotMonitoring bool `json:"isNotMonitoring"` - IsCapturing bool `json:"isCapturing"` - Conf config.Config `json:"config"` - CleanAll bool `json:"cleanReplication"` //used in testing - ConfigDBTags []Tag `json:"configTags"` //from module - ConfigPrxTags []Tag `json:"configPrxTags"` //from module - DBTags []string `json:"dbServersTags"` //from conf - ProxyTags []string `json:"proxyServersTags"` - Topology string `json:"topology"` - Uptime string `json:"uptime"` - UptimeFailable string `json:"uptimeFailable"` - UptimeSemiSync string `json:"uptimeSemisync"` - MonitorSpin string `json:"monitorSpin"` - DBTableSize int64 `json:"dbTableSize"` - DBIndexSize int64 `json:"dbIndexSize"` - Connections int `json:"connections"` - QPS int64 `json:"qps"` - Log s18log.HttpLog `json:"log"` - JobResults map[string]*JobResult `json:"jobResults"` - Grants map[string]string `json:"-"` - tlog *s18log.TermLog `json:"-"` - htlog *s18log.HttpLog `json:"-"` - SQLGeneralLog s18log.HttpLog `json:"sqlGeneralLog"` - SQLErrorLog s18log.HttpLog `json:"sqlErrorLog"` - MonitorType map[string]string `json:"monitorType"` - TopologyType map[string]string `json:"topologyType"` - FSType map[string]bool `json:"fsType"` - DiskType map[string]string `json:"diskType"` - VMType map[string]bool `json:"vmType"` - Agents []Agent `json:"agents"` - hostList []string `json:"-"` - proxyList []string `json:"-"` - clusterList map[string]*Cluster `json:"-"` - slaves serverList `json:"-"` - master *ServerMonitor `json:"-"` - oldMaster *ServerMonitor `json:"-"` - vmaster *ServerMonitor `json:"-"` - mxs *maxscale.MaxScale `json:"-"` - dbUser string `json:"-"` - dbPass string `json:"-"` - rplUser string `json:"-"` - rplPass string `json:"-"` - sme *state.StateMachine `json:"-"` - runOnceAfterTopology bool `json:"-"` - logPtr *os.File `json:"-"` - termlength int `json:"-"` - runUUID string `json:"-"` - cfgGroupDisplay string `json:"-"` - repmgrVersion string `json:"-"` - repmgrHostname string `json:"-"` - key []byte `json:"-"` - exitMsg string `json:"-"` - exit bool `json:"-"` - canFlashBack bool `json:"-"` - failoverCond *nbc.NonBlockingChan `json:"-"` - switchoverCond *nbc.NonBlockingChan `json:"-"` - rejoinCond *nbc.NonBlockingChan `json:"-"` - bootstrapCond *nbc.NonBlockingChan `json:"-"` - altertableCond *nbc.NonBlockingChan `json:"-"` - addtableCond *nbc.NonBlockingChan `json:"-"` - statecloseChan chan state.State `json:"-"` - switchoverChan chan bool `json:"-"` - errorChan chan error `json:"-"` - testStopCluster bool `json:"-"` - testStartCluster bool `json:"-"` - lastmaster *ServerMonitor `json:"-"` - benchmarkType string `json:"-"` - HaveDBTLSCert bool `json:"haveDBTLSCert"` - HaveDBTLSOldCert bool `json:"haveDBTLSOldCert"` - tlsconf *tls.Config `json:"-"` - tlsoldconf *tls.Config `json:"-"` - tunnel *ssh.Client `json:"-"` - DBModule config.Compliance `json:"-"` - ProxyModule config.Compliance `json:"-"` - QueryRules map[uint32]config.QueryRule `json:"-"` - Backups []Backup `json:"-"` - SLAHistory []state.Sla `json:"slaHistory"` - APIUsers map[string]APIUser `json:"apiUsers"` - Schedule map[string]cron.Entry `json:"-"` - scheduler *cron.Cron `json:"-"` - idSchedulerPhysicalBackup cron.EntryID `json:"-"` - idSchedulerLogicalBackup cron.EntryID `json:"-"` - idSchedulerOptimize cron.EntryID `json:"-"` - idSchedulerErrorLogs cron.EntryID `json:"-"` - idSchedulerLogRotateTable cron.EntryID `json:"-"` - idSchedulerSLARotate cron.EntryID `json:"-"` - idSchedulerRollingRestart cron.EntryID `json:"-"` - idSchedulerDbsjobsSsh cron.EntryID `json:"-"` - idSchedulerRollingReprov cron.EntryID `json:"-"` - WaitingRejoin int `json:"waitingRejoin"` - WaitingSwitchover int `json:"waitingSwitchover"` - WaitingFailover int `json:"waitingFailover"` + Name string `json:"name"` + Tenant string `json:"tenant"` + WorkingDir string `json:"workingDir"` + Servers serverList `json:"-"` + ServerIdList []string `json:"dbServers"` + Crashes crashList `json:"dbServersCrashes"` + Proxies proxyList `json:"-"` + ProxyIdList []string `json:"proxyServers"` + FailoverCtr int `json:"failoverCounter"` + FailoverTs int64 `json:"failoverLastTime"` + Status string `json:"activePassiveStatus"` + IsSplitBrain bool `json:"isSplitBrain"` + IsSplitBrainBck bool `json:"-"` + IsFailedArbitrator bool `json:"isFailedArbitrator"` + IsLostMajority bool `json:"isLostMajority"` + IsDown bool `json:"isDown"` + IsClusterDown bool `json:"isClusterDown"` + IsAllDbUp bool `json:"isAllDbUp"` + IsFailable bool `json:"isFailable"` + IsPostgres bool `json:"isPostgres"` + IsProvision bool `json:"isProvision"` + IsNeedProxiesRestart bool `json:"isNeedProxyRestart"` + IsNeedProxiesReprov bool `json:"isNeedProxiesRestart"` + IsNeedDatabasesRestart bool `json:"isNeedDatabasesRestart"` + IsNeedDatabasesRollingRestart bool `json:"isNeedDatabasesRollingRestart"` + IsNeedDatabasesRollingReprov bool `json:"isNeedDatabasesRollingReprov"` + IsNeedDatabasesReprov bool `json:"isNeedDatabasesReprov"` + IsValidBackup bool `json:"isValidBackup"` + IsNotMonitoring bool `json:"isNotMonitoring"` + IsCapturing bool `json:"isCapturing"` + Conf config.Config `json:"config"` + CleanAll bool `json:"cleanReplication"` //used in testing + + Topology string `json:"topology"` + Uptime string `json:"uptime"` + UptimeFailable string `json:"uptimeFailable"` + UptimeSemiSync string `json:"uptimeSemisync"` + MonitorSpin string `json:"monitorSpin"` + DBTableSize int64 `json:"dbTableSize"` + DBIndexSize int64 `json:"dbIndexSize"` + Connections int `json:"connections"` + QPS int64 `json:"qps"` + Log s18log.HttpLog `json:"log"` + JobResults map[string]*JobResult `json:"jobResults"` + Grants map[string]string `json:"-"` + tlog *s18log.TermLog `json:"-"` + htlog *s18log.HttpLog `json:"-"` + SQLGeneralLog s18log.HttpLog `json:"sqlGeneralLog"` + SQLErrorLog s18log.HttpLog `json:"sqlErrorLog"` + MonitorType map[string]string `json:"monitorType"` + TopologyType map[string]string `json:"topologyType"` + FSType map[string]bool `json:"fsType"` + DiskType map[string]string `json:"diskType"` + VMType map[string]bool `json:"vmType"` + Agents []Agent `json:"agents"` + hostList []string `json:"-"` + proxyList []string `json:"-"` + clusterList map[string]*Cluster `json:"-"` + slaves serverList `json:"-"` + master *ServerMonitor `json:"-"` + oldMaster *ServerMonitor `json:"-"` + vmaster *ServerMonitor `json:"-"` + mxs *maxscale.MaxScale `json:"-"` + dbUser string `json:"-"` + dbPass string `json:"-"` + rplUser string `json:"-"` + rplPass string `json:"-"` + sme *state.StateMachine `json:"-"` + runOnceAfterTopology bool `json:"-"` + logPtr *os.File `json:"-"` + termlength int `json:"-"` + runUUID string `json:"-"` + cfgGroupDisplay string `json:"-"` + repmgrVersion string `json:"-"` + repmgrHostname string `json:"-"` + key []byte `json:"-"` + exitMsg string `json:"-"` + exit bool `json:"-"` + canFlashBack bool `json:"-"` + failoverCond *nbc.NonBlockingChan `json:"-"` + switchoverCond *nbc.NonBlockingChan `json:"-"` + rejoinCond *nbc.NonBlockingChan `json:"-"` + bootstrapCond *nbc.NonBlockingChan `json:"-"` + altertableCond *nbc.NonBlockingChan `json:"-"` + addtableCond *nbc.NonBlockingChan `json:"-"` + statecloseChan chan state.State `json:"-"` + switchoverChan chan bool `json:"-"` + errorChan chan error `json:"-"` + testStopCluster bool `json:"-"` + testStartCluster bool `json:"-"` + lastmaster *ServerMonitor `json:"-"` + benchmarkType string `json:"-"` + HaveDBTLSCert bool `json:"haveDBTLSCert"` + HaveDBTLSOldCert bool `json:"haveDBTLSOldCert"` + tlsconf *tls.Config `json:"-"` + tlsoldconf *tls.Config `json:"-"` + tunnel *ssh.Client `json:"-"` + QueryRules map[uint32]config.QueryRule `json:"-"` + Backups []Backup `json:"-"` + SLAHistory []state.Sla `json:"slaHistory"` + APIUsers map[string]APIUser `json:"apiUsers"` + Schedule map[string]cron.Entry `json:"-"` + scheduler *cron.Cron `json:"-"` + idSchedulerPhysicalBackup cron.EntryID `json:"-"` + idSchedulerLogicalBackup cron.EntryID `json:"-"` + idSchedulerOptimize cron.EntryID `json:"-"` + idSchedulerErrorLogs cron.EntryID `json:"-"` + idSchedulerLogRotateTable cron.EntryID `json:"-"` + idSchedulerSLARotate cron.EntryID `json:"-"` + idSchedulerRollingRestart cron.EntryID `json:"-"` + idSchedulerDbsjobsSsh cron.EntryID `json:"-"` + idSchedulerRollingReprov cron.EntryID `json:"-"` + WaitingRejoin int `json:"waitingRejoin"` + WaitingSwitchover int `json:"waitingSwitchover"` + WaitingFailover int `json:"waitingFailover"` + Configurator configurator.Configurator `json:"configurator"` sync.Mutex crcTable *crc64.Table } @@ -189,12 +185,6 @@ type Alerts struct { Warnings []state.StateHttp `json:"warnings"` } -type Tag struct { - Id uint `json:"id"` - Name string `json:"name"` - Category string `json:"category"` -} - type JobResult struct { Xtrabackup bool `json:"xtrabackup"` Mariabackup bool `json:"mariabackup"` @@ -325,10 +315,7 @@ func (cluster *Cluster) Init(conf config.Config, cfgGroup string, tlog *s18log.T cluster.LogPrintf(LvlErr, "Could not set proxy list %s", err) } //Loading configuration compliances - cluster.LoadDBModules() - cluster.LoadPrxModules() - cluster.ConfigDBTags = cluster.GetDBModuleTags() - cluster.ConfigPrxTags = cluster.GetProxyModuleTags() + cluster.Configurator.Init(cluster.Conf) switch cluster.Conf.ProvOrchestrator { case config.ConstOrchestratorLocalhost: @@ -629,6 +616,7 @@ func (cluster *Cluster) InitAgent(conf config.Config) { func (cluster *Cluster) ReloadConfig(conf config.Config) { cluster.Conf = conf + cluster.Configurator.SetConfig(conf) cluster.sme.SetFailoverState() cluster.newServerList() cluster.newProxyList() @@ -949,369 +937,6 @@ func (cluster *Cluster) LostArbitration(realmasterurl string) { } } -func (cluster *Cluster) LoadDBModules() { - file := cluster.Conf.ShareDir + "/opensvc/moduleset_mariadb.svc.mrm.db.json" - jsonFile, err := os.Open(file) - if err != nil { - cluster.LogPrintf(LvlErr, "Failed opened module %s %s", file, err) - } - cluster.LogPrintf(LvlInfo, "Loading database configurator config %s", file) - // defer the closing of our jsonFile so that we can parse it later on - defer jsonFile.Close() - - byteValue, _ := ioutil.ReadAll(jsonFile) - - err = json.Unmarshal([]byte(byteValue), &cluster.DBModule) - if err != nil { - cluster.LogPrintf(LvlErr, "Failed unmarshal file %s %s", file, err) - } - -} - -func (cluster *Cluster) LoadPrxModules() { - - file := cluster.Conf.ShareDir + "/opensvc/moduleset_mariadb.svc.mrm.proxy.json" - jsonFile, err := os.Open(file) - if err != nil { - cluster.LogPrintf(LvlErr, "Failed opened module %s %s", file, err) - } - cluster.LogPrintf(LvlInfo, "Loading proxies configurator config %s", file) - // defer the closing of our jsonFile so that we can parse it later on - defer jsonFile.Close() - - byteValue, _ := ioutil.ReadAll(jsonFile) - - err = json.Unmarshal([]byte(byteValue), &cluster.ProxyModule) - if err != nil { - cluster.LogPrintf(LvlErr, "Failed unmarshal file %s %s", file, err) - } - -} - -func (cluster *Cluster) ConfigDiscovery() error { - - if cluster.master == nil { - return errors.New("No master in topology") - } - innodbmem, err := strconv.ParseUint(cluster.master.Variables["INNODB_BUFFER_POOL_SIZE"], 10, 64) - if err != nil { - return err - } - totalmem := innodbmem - myisammem, err := strconv.ParseUint(cluster.master.Variables["KEY_BUFFER_SIZE"], 10, 64) - if err != nil { - return err - } - totalmem += myisammem - qcmem, err := strconv.ParseUint(cluster.master.Variables["QUERY_CACHE_SIZE"], 10, 64) - if err != nil { - return err - } - if qcmem == 0 { - cluster.AddDBTag("noquerycache") - } - totalmem += qcmem - ariamem := uint64(0) - if _, ok := cluster.master.Variables["ARIA_PAGECACHE_BUFFER_SIZE"]; ok { - ariamem, err = strconv.ParseUint(cluster.master.Variables["ARIA_PAGECACHE_BUFFER_SIZE"], 10, 64) - if err != nil { - return err - } - totalmem += ariamem - } - tokumem := uint64(0) - if _, ok := cluster.master.Variables["TOKUDB_CACHE_SIZE"]; ok { - cluster.AddDBTag("tokudb") - tokumem, err = strconv.ParseUint(cluster.master.Variables["TOKUDB_CACHE_SIZE"], 10, 64) - if err != nil { - return err - } - totalmem += tokumem - } - s3mem := uint64(0) - if _, ok := cluster.master.Variables["S3_PAGECACHE_BUFFER_SIZE"]; ok { - cluster.AddDBTag("s3") - tokumem, err = strconv.ParseUint(cluster.master.Variables["S3_PAGECACHE_BUFFER_SIZE"], 10, 64) - if err != nil { - return err - } - totalmem += s3mem - } - - rocksmem := uint64(0) - if _, ok := cluster.master.Variables["ROCKSDB_BLOCK_CACHE_SIZE"]; ok { - cluster.AddDBTag("myrocks") - tokumem, err = strconv.ParseUint(cluster.master.Variables["ROCKSDB_BLOCK_CACHE_SIZE"], 10, 64) - if err != nil { - return err - } - totalmem += rocksmem - } - - sharedmempcts, _ := cluster.Conf.GetMemoryPctShared() - totalmem = totalmem + totalmem*uint64(sharedmempcts["threads"])/100 - cluster.SetDBMemorySize(strconv.FormatUint((totalmem / 1024 / 1024), 10)) - cluster.SetDBCores(cluster.master.Variables["THREAD_POOL_SIZE"]) - - if cluster.master.Variables["INNODB_DOUBLEWRITE"] == "OFF" { - cluster.AddDBTag("nodoublewrite") - } - if cluster.master.Variables["INNODB_FLUSH_LOG_AT_TRX_COMMIT"] != "1" && cluster.master.Variables["SYNC_BINLOG"] != "1" { - cluster.AddDBTag("nodurable") - } - if cluster.master.Variables["INNODB_FLUSH_METHOD"] != "O_DIRECT" { - cluster.AddDBTag("noodirect") - } - if cluster.master.Variables["LOG_BIN_COMPRESS"] == "ON" { - cluster.AddDBTag("compressbinlog") - } - if cluster.master.Variables["INNODB_DEFRAGMENT"] == "ON" { - cluster.AddDBTag("autodefrag") - } - if cluster.master.Variables["INNODB_COMPRESSION_DEFAULT"] == "ON" { - cluster.AddDBTag("compresstable") - } - - if cluster.master.HasInstallPlugin("BLACKHOLE") { - cluster.AddDBTag("blackhole") - } - if cluster.master.HasInstallPlugin("QUERY_RESPONSE_TIME") { - cluster.AddDBTag("userstats") - } - if cluster.master.HasInstallPlugin("SQL_ERROR_LOG") { - cluster.AddDBTag("sqlerror") - } - if cluster.master.HasInstallPlugin("METADATA_LOCK_INFO") { - cluster.AddDBTag("metadatalocks") - } - if cluster.master.HasInstallPlugin("SERVER_AUDIT") { - cluster.AddDBTag("audit") - } - if cluster.master.Variables["SLOW_QUERY_LOG"] == "ON" { - cluster.AddDBTag("slow") - } - if cluster.master.Variables["GENERAL_LOG"] == "ON" { - cluster.AddDBTag("general") - } - if cluster.master.Variables["PERFORMANCE_SCHEMA"] == "ON" { - cluster.AddDBTag("pfs") - } - if cluster.master.Variables["LOG_OUTPUT"] == "TABLE" { - cluster.AddDBTag("logtotable") - } - - if cluster.master.HasInstallPlugin("CONNECT") { - cluster.AddDBTag("connect") - } - if cluster.master.HasInstallPlugin("SPIDER") { - cluster.AddDBTag("spider") - } - if cluster.master.HasInstallPlugin("SPHINX") { - cluster.AddDBTag("sphinx") - } - if cluster.master.HasInstallPlugin("MROONGA") { - cluster.AddDBTag("mroonga") - } - if cluster.master.HasWsrep() { - cluster.AddDBTag("wsrep") - } - //missing in compliance - if cluster.master.HasInstallPlugin("ARCHIVE") { - cluster.AddDBTag("archive") - } - - if cluster.master.HasInstallPlugin("CRACKLIB_PASSWORD_CHECK") { - cluster.AddDBTag("pwdcheckcracklib") - } - if cluster.master.HasInstallPlugin("SIMPLE_PASSWORD_CHECK") { - cluster.AddDBTag("pwdchecksimple") - } - - if cluster.master.Variables["LOCAL_INFILE"] == "ON" { - cluster.AddDBTag("localinfile") - } - if cluster.master.Variables["SKIP_NAME_RESOLVE"] == "OFF" { - cluster.AddDBTag("resolvdns") - } - if cluster.master.Variables["READ_ONLY"] == "ON" { - cluster.AddDBTag("readonly") - } - if cluster.master.Variables["HAVE_SSL"] == "YES" { - cluster.AddDBTag("ssl") - } - - if cluster.master.Variables["BINLOG_FORMAT"] == "STATEMENT" { - cluster.AddDBTag("statement") - } - if cluster.master.Variables["BINLOG_FORMAT"] == "ROW" { - cluster.AddDBTag("row") - } - if cluster.master.Variables["LOG_BIN"] == "OFF" { - cluster.AddDBTag("nobinlog") - } - if cluster.master.Variables["LOG_BIN"] == "OFF" { - cluster.AddDBTag("nobinlog") - } - if cluster.master.Variables["LOG_SLAVE_UPDATES"] == "OFF" { - cluster.AddDBTag("nologslaveupdates") - } - if cluster.master.Variables["RPL_SEMI_SYNC_MASTER_ENABLED"] == "ON" { - cluster.AddDBTag("semisync") - } - if cluster.master.Variables["GTID_STRICT_MODE"] == "ON" { - cluster.AddDBTag("gtidstrict") - } - if strings.Contains(cluster.master.Variables["SLAVE_TYPE_COVERSIONS"], "ALL_NON_LOSSY") || strings.Contains(cluster.master.Variables["SLAVE_TYPE_COVERSIONS"], "ALL_LOSSY") { - cluster.AddDBTag("lossyconv") - } - if cluster.master.Variables["SLAVE_EXEC_MODE"] == "IDEMPOTENT" { - cluster.AddDBTag("idempotent") - } - - //missing in compliance - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "SUBQUERY_CACHE=ON") { - cluster.AddDBTag("subquerycache") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "SEMIJOIN_WITH_CACHE=ON") { - cluster.AddDBTag("semijoincache") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "FIRSTMATCH=ON") { - cluster.AddDBTag("firstmatch") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "EXTENDED_KEYS=ON") { - cluster.AddDBTag("extendedkeys") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "LOOSESCAN=ON") { - cluster.AddDBTag("loosescan") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "INDEX_CONDITION_PUSHDOWN=OFF") { - cluster.AddDBTag("noicp") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "IN_TO_EXISTS=OFF") { - cluster.AddDBTag("nointoexists") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "DERIVED_MERGE=OFF") { - cluster.AddDBTag("noderivedmerge") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "DERIVED_WITH_KEYS=OFF") { - cluster.AddDBTag("noderivedwithkeys") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "MRR=OFF") { - cluster.AddDBTag("nomrr") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "OUTER_JOIN_WITH_CACHE=OFF") { - cluster.AddDBTag("noouterjoincache") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "SEMI_JOIN_WITH_CACHE=OFF") { - cluster.AddDBTag("nosemijoincache") - } - if strings.Contains(cluster.master.Variables["OPTIMIZER_SWITCH"], "TABLE_ELIMINATION=OFF") { - cluster.AddDBTag("notableelimination") - } - if strings.Contains(cluster.master.Variables["SQL_MODE"], "ORACLE") { - cluster.AddDBTag("sqlmodeoracle") - } - if cluster.master.Variables["SQL_MODE"] == "" { - cluster.AddDBTag("sqlmodeunstrict") - } - //index_merge=on - //index_merge_union=on, - //index_merge_sort_union=on - //index_merge_intersection=on - //index_merge_sort_intersection=off - //engine_condition_pushdown=on - //materialization=on - //semijoin=on - //partial_match_rowid_merge=on - //partial_match_table_scan=on, - //mrr_cost_based=off - //mrr_sort_keys=on, - //join_cache_incremental=on, - //join_cache_hashed=on, - //join_cache_bka=on, - //optimize_join_buffer_size=on, - //orderby_uses_equalities=on - //condition_pushdown_for_derived=on - //split_materialized=on// - //condition_pushdown_for_subquery=on, - //rowid_filter=on - //condition_pushdown_from_having=on - - if cluster.master.Variables["TX_ISOLATION"] == "READ-COMMITTED" { - cluster.AddDBTag("readcommitted") - } - //missing - if cluster.master.Variables["TX_ISOLATION"] == "READ-UNCOMMITTED" { - cluster.AddDBTag("readuncommitted") - } - if cluster.master.Variables["TX_ISOLATION"] == "REPEATABLE-READ" { - cluster.AddDBTag("reapeatableread") - } - if cluster.master.Variables["TX_ISOLATION"] == "SERIALIZED" { - cluster.AddDBTag("serialized") - } - - if cluster.master.Variables["JOIN_CACHE_LEVEL"] == "8" { - cluster.AddDBTag("hashjoin") - } - if cluster.master.Variables["JOIN_CACHE_LEVEL"] == "6" { - cluster.AddDBTag("mrrjoin") - } - if cluster.master.Variables["JOIN_CACHE_LEVEL"] == "2" { - cluster.AddDBTag("nestedjoin") - } - if cluster.master.Variables["LOWER_CASE_TABLE_NAMES"] == "1" { - cluster.AddDBTag("lowercasetable") - } - if cluster.master.Variables["USER_STAT_TABLES"] == "PREFERABLY_FOR_QUERIES" { - cluster.AddDBTag("eits") - } - - if cluster.master.Variables["CHARACTER_SET_SERVER"] == "UTF8MB4" { - if strings.Contains(cluster.master.Variables["COLLATION_SERVER"], "_ci") { - cluster.AddDBTag("bm4ci") - } else { - cluster.AddDBTag("bm4cs") - } - } - if cluster.master.Variables["CHARACTER_SET_SERVER"] == "UTF8" { - if strings.Contains(cluster.master.Variables["COLLATION_SERVER"], "_ci") { - cluster.AddDBTag("utf8ci") - } else { - cluster.AddDBTag("utf8cs") - } - } - - //slave_parallel_mode = optimistic - /* - - tmpmem, err := strconv.ParseUint(cluster.master.Variables["TMP_TABLE_SIZE"], 10, 64) - if err != nil { - return err - } - qttmp, err := strconv.ParseUint(cluster.master.Variables["MAX_TMP_TABLES"], 10, 64) - if err != nil { - return err - } - tmpmem = tmpmem * qttmp - totalmem += tmpmem - - cores, err := strconv.ParseUint(cluster.master.Variables["THREAD_POOL_SIZE"], 10, 64) - if err != nil { - return err - } - - joinmem, err := strconv.ParseUint(cluster.master.Variables["JOIN_BUFFER_SPACE_LIMIT"], 10, 64) - joinmem = joinmem * cores - - sortmem, err := strconv.ParseUint(cluster.master.Variables["SORT_BUFFER_SIZE"], 10, 64) - */ - // - // containermem = containermem * int64(sharedmempcts["innodb"]) / 100 - - return nil -} - func (c *Cluster) AddProxy(prx DatabaseProxy) { prx.SetCluster(c) prx.SetID() @@ -1321,3 +946,17 @@ func (c *Cluster) AddProxy(prx DatabaseProxy) { prx.SetState(stateSuspect) c.Proxies = append(c.Proxies, prx) } + +func (cluster *Cluster) ConfigDiscovery() error { + server := cluster.GetMaster() + if server != nil { + cluster.LogPrintf(LvlErr, "Cluster configurartion discovery can ony be done on a valid leader") + return errors.New("Cluster configurartion discovery can ony be done on a valid leader") + } + cluster.Configurator.ConfigDiscovery(server.Variables, server.Plugins) + cluster.SetDBCoresFromConfigurator() + cluster.SetDBMemoryFromConfigurator() + cluster.SetDBIOPSFromConfigurator() + cluster.SetTagsFromConfigurator() + return nil +} diff --git a/cluster/cluster_add.go b/cluster/cluster_add.go index a000779ba..48262c17c 100644 --- a/cluster/cluster_add.go +++ b/cluster/cluster_add.go @@ -32,16 +32,16 @@ func (cluster *Cluster) AddSeededServer(srv string) error { } func (cluster *Cluster) AddDBTagConfig(tag string) { - if !cluster.HaveDBTag(tag) { - cluster.DBTags = append(cluster.DBTags, tag) - cluster.Conf.ProvTags = strings.Join(cluster.DBTags, ",") + if !cluster.Configurator.HaveDBTag(tag) { + cluster.Configurator.AddDBTag(tag) + cluster.Conf.ProvTags = cluster.Configurator.GetConfigDBTags() cluster.SetClusterVariablesFromConfig() } } func (cluster *Cluster) AddDBTag(tag string) { - if !cluster.HaveDBTag(tag) { + if !cluster.Configurator.HaveDBTag(tag) { cluster.LogPrintf(LvlInfo, "Adding database tag %s ", tag) cluster.AddDBTagConfig(tag) if cluster.Conf.ProvDBApplyDynamicConfig { @@ -64,8 +64,8 @@ func (cluster *Cluster) AddDBTag(tag string) { } func (cluster *Cluster) AddProxyTag(tag string) { - cluster.ProxyTags = append(cluster.ProxyTags, tag) - cluster.Conf.ProvProxTags = strings.Join(cluster.ProxyTags, ",") + cluster.Configurator.AddProxyTag(tag) + cluster.Conf.ProvProxTags = cluster.Configurator.GetConfigProxyTags() cluster.SetClusterVariablesFromConfig() cluster.SetProxiesRestartCookie() } diff --git a/cluster/cluster_del.go b/cluster/cluster_del.go index 9ea352658..4bcf9d34a 100644 --- a/cluster/cluster_del.go +++ b/cluster/cluster_del.go @@ -62,32 +62,16 @@ func (cluster *Cluster) DropDBTag(dtag string) { } func (cluster *Cluster) DropDBTagConfig(dtag string) bool { - var newtags []string - changed := false - for _, tag := range cluster.DBTags { - if dtag != tag { - newtags = append(newtags, tag) - } - } - if len(cluster.DBTags) != len(newtags) { - changed = true - cluster.DBTags = newtags - cluster.Conf.ProvTags = strings.Join(cluster.DBTags, ",") - cluster.SetClusterVariablesFromConfig() - } + changed := cluster.Configurator.DropDBTagConfig(dtag) + cluster.Conf.ProvTags = strings.Join(cluster.Configurator.GetDBTags(), ",") + cluster.SetClusterVariablesFromConfig() return changed } func (cluster *Cluster) DropProxyTag(dtag string) { - var newtags []string - for _, tag := range cluster.ProxyTags { - // cluster.LogPrintf(LvlInfo, "%s %s", tag, dtag) - if dtag != tag { - newtags = append(newtags, tag) - } - } - cluster.ProxyTags = newtags - cluster.Conf.ProvProxTags = strings.Join(cluster.ProxyTags, ",") + + cluster.Configurator.DropProxyTag(dtag) + cluster.Conf.ProvProxTags = strings.Join(cluster.Configurator.GetProxyTags(), ",") cluster.SetClusterVariablesFromConfig() cluster.SetProxiesRestartCookie() } diff --git a/cluster/cluster_get.go b/cluster/cluster_get.go index 8e480fd89..a56d1974a 100644 --- a/cluster/cluster_get.go +++ b/cluster/cluster_get.go @@ -9,7 +9,6 @@ package cluster import ( "encoding/json" "errors" - "hash/crc32" "io/ioutil" "sort" "strconv" @@ -474,6 +473,7 @@ func (cluster *Cluster) GetTopology() string { return cluster.Conf.Topology } +/* func (cluster *Cluster) GetDatabaseTags() []string { return strings.Split(cluster.Conf.ProvTags, ",") } @@ -481,7 +481,7 @@ func (cluster *Cluster) GetDatabaseTags() []string { func (cluster *Cluster) GetProxyTags() []string { return strings.Split(cluster.Conf.ProvProxTags, ",") } - +*/ func (cluster *Cluster) GetCron() []cron.Entry { return cluster.scheduler.Entries() @@ -649,256 +649,10 @@ func (cluster *Cluster) GetTableDLLNoFK(schema string, table string, srv *Server return ddl, err } -func (cluster *Cluster) GetDBModuleTags() []Tag { - var tags []Tag - for _, value := range cluster.DBModule.Filtersets { - var t Tag - t.Id = value.ID - s := strings.Split(value.Name, ".") - t.Name = s[len(s)-1] - t.Category = s[len(s)-2] - tags = append(tags, t) - } - return tags -} - func (cluster *Cluster) GetBackups() []Backup { return cluster.Backups } -func (cluster *Cluster) GetProxyModuleTags() []Tag { - var tags []Tag - for _, value := range cluster.ProxyModule.Filtersets { - var t Tag - t.Id = value.ID - s := strings.SplitAfter(value.Name, ".") - t.Name = s[len(s)-1] - tags = append(tags, t) - } - return tags -} - -func (cluster *Cluster) GetConfigMaxConnections() string { - return strconv.Itoa(cluster.Conf.ProvMaxConnections) -} - -func (cluster *Cluster) GetConfigExpireLogDays() string { - return strconv.Itoa(cluster.Conf.ProvExpireLogDays) -} - -func (cluster *Cluster) GetConfigRelaySpaceLimit() string { - return strconv.Itoa(10 * 1024 * 1024) -} - -func (cluster *Cluster) GetConfigReplicationDomain() string { - // Multi source need differnt domain id - if cluster.Conf.MasterConn != "" && cluster.Conf.ProvDomain == "0" { - crcTable := crc32.MakeTable(0xD5828281) - return strconv.FormatUint(uint64(crc32.Checksum([]byte(cluster.Name), crcTable)), 10) - } - return cluster.Conf.ProvDomain -} - -// GetConfigInnoDBBPSize configure 80% of the ConfigMemory in Megabyte -func (cluster *Cluster) GetConfigInnoDBBPSize() string { - containermem, err := strconv.ParseInt(cluster.Conf.ProvMem, 10, 64) - if err != nil { - return "128" - } - sharedmempcts, _ := cluster.Conf.GetMemoryPctShared() - - containermem = containermem * int64(sharedmempcts["innodb"]) / 100 - s10 := strconv.FormatInt(containermem, 10) - return s10 -} - -func (cluster *Cluster) GetConfigMyISAMKeyBufferSize() string { - containermem, err := strconv.ParseInt(cluster.Conf.ProvMem, 10, 64) - if err != nil { - return "128" - } - sharedmempcts, _ := cluster.Conf.GetMemoryPctShared() - - containermem = containermem * int64(sharedmempcts["myisam"]) / 100 - s10 := strconv.FormatInt(containermem, 10) - return s10 -} - -func (cluster *Cluster) GetConfigTokuDBBufferSize() string { - containermem, err := strconv.ParseInt(cluster.Conf.ProvMem, 10, 64) - if err != nil { - return "128" - } - sharedmempcts, _ := cluster.Conf.GetMemoryPctShared() - - containermem = containermem * int64(sharedmempcts["tokudb"]) / 100 - s10 := strconv.FormatInt(containermem, 10) - return s10 -} - -func (cluster *Cluster) GetConfigQueryCacheSize() string { - containermem, err := strconv.ParseInt(cluster.Conf.ProvMem, 10, 64) - if err != nil { - return "128" - } - sharedmempcts, _ := cluster.Conf.GetMemoryPctShared() - containermem = containermem * int64(sharedmempcts["querycache"]) / 100 - s10 := strconv.FormatInt(containermem, 10) - return s10 -} - -func (cluster *Cluster) GetConfigAriaCacheSize() string { - containermem, err := strconv.ParseInt(cluster.Conf.ProvMem, 10, 64) - if err != nil { - return "128" - } - sharedmempcts, _ := cluster.Conf.GetMemoryPctShared() - containermem = containermem * int64(sharedmempcts["aria"]) / 100 - s10 := strconv.FormatInt(containermem, 10) - return s10 -} - -func (cluster *Cluster) GetConfigS3CacheSize() string { - containermem, err := strconv.ParseInt(cluster.Conf.ProvMem, 10, 64) - if err != nil { - return "128" - } - sharedmempcts, _ := cluster.Conf.GetMemoryPctShared() - containermem = containermem * int64(sharedmempcts["s3"]) / 100 - s10 := strconv.FormatInt(containermem, 10) - return s10 -} - -func (cluster *Cluster) GetConfigRocksDBCacheSize() string { - containermem, err := strconv.ParseInt(cluster.Conf.ProvMem, 10, 64) - if err != nil { - return "128" - } - sharedmempcts, _ := cluster.Conf.GetMemoryPctShared() - containermem = containermem * int64(sharedmempcts["rocksdb"]) / 100 - s10 := strconv.FormatInt(containermem, 10) - return s10 -} - -func (cluster *Cluster) GetConfigMyISAMKeyBufferSegements() string { - value, err := strconv.ParseInt(cluster.GetConfigMyISAMKeyBufferSize(), 10, 64) - if err != nil { - return "1" - } - value = value/8000 + 1 - s10 := strconv.FormatInt(value, 10) - return s10 -} - -func (cluster *Cluster) GetConfigInnoDBIOCapacity() string { - value, err := strconv.ParseInt(cluster.Conf.ProvIops, 10, 64) - if err != nil { - return "100" - } - value = value / 3 - s10 := strconv.FormatInt(value, 10) - return s10 -} - -func (cluster *Cluster) GetConfigInnoDBIOCapacityMax() string { - value, err := strconv.ParseInt(cluster.Conf.ProvIops, 10, 64) - if err != nil { - return "200" - } - s10 := strconv.FormatInt(value, 10) - return s10 -} - -func (cluster *Cluster) GetConfigInnoDBMaxDirtyPagePct() string { - /* mem, err := strconv.ParseInt(cluster.GetConfigInnoDBBPSize(), 10, 64) - if err != nil { - return "20" - } - //Compute the ration of memory compare to a G - // value := mem/1000 - - */ - var value int64 - value = 40 - s10 := strconv.FormatInt(value, 10) - return s10 -} - -func (cluster *Cluster) GetConfigInnoDBMaxDirtyPagePctLwm() string { - var value int64 - value = 20 - s10 := strconv.FormatInt(value, 10) - return s10 -} - -func (cluster *Cluster) GetConfigInnoDBLogFileSize() string { - //result in MB - var valuemin int64 - var valuemax int64 - valuemin = 1024 - valuemax = 20 * 1024 - value, err := strconv.ParseInt(cluster.GetConfigInnoDBBPSize(), 10, 64) - if err != nil { - return "1024" - } - value = value / 2 - if value < valuemin { - value = valuemin - } - if value > valuemax { - value = valuemax - } - if cluster.HaveDBTag("smallredolog") { - return "128" - } - s10 := strconv.FormatInt(value, 10) - return s10 -} - -func (cluster *Cluster) GetConfigInnoDBLogBufferSize() string { - //result in MB - var value int64 - value = 16 - s10 := strconv.FormatInt(value, 10) - return s10 -} - -// GetConfigInnoDBBPInstances configure BP/8G of the ConfigMemory in Megabyte -func (cluster *Cluster) GetConfigInnoDBBPInstances() string { - value, err := strconv.ParseInt(cluster.GetConfigInnoDBBPSize(), 10, 64) - if err != nil { - return "1" - } - value = value/8000 + 1 - s10 := strconv.FormatInt(value, 10) - return s10 -} - -func (cluster *Cluster) GetConfigInnoDBWriteIoThreads() string { - iopsLatency, err := strconv.ParseFloat(cluster.Conf.ProvIopsLatency, 64) - if err != nil { - return "4" - } - iops, err := strconv.ParseFloat(cluster.Conf.ProvIops, 64) - if err != nil { - return "4" - } - nbthreads := int(iopsLatency * iops) - if nbthreads < 1 { - return "1" - } - strnbthreads := strconv.Itoa(nbthreads) - return strnbthreads -} -func (cluster *Cluster) GetConfigInnoDBReadIoThreads() string { - - return cluster.Conf.ProvCores -} - -func (cluster *Cluster) GetConfigInnoDBPurgeThreads() string { - return "4" -} - func (cluster *Cluster) GetQueryRules() []config.QueryRule { r := make([]config.QueryRule, 0, len(cluster.QueryRules)) for _, value := range cluster.QueryRules { diff --git a/cluster/cluster_has.go b/cluster/cluster_has.go index 8b8eedef7..79555f183 100644 --- a/cluster/cluster_has.go +++ b/cluster/cluster_has.go @@ -267,21 +267,3 @@ func (cluster *Cluster) IsInFailover() bool { func (cluster *Cluster) IsDiscovered() bool { return cluster.sme.IsDiscovered() } - -func (cluster *Cluster) HaveDBTag(tag string) bool { - for _, t := range cluster.DBTags { - if t == tag { - return true - } - } - return false -} - -func (cluster *Cluster) HaveProxyTag(tag string) bool { - for _, t := range cluster.ProxyTags { - if t == tag { - return true - } - } - return false -} diff --git a/cluster/cluster_key.go b/cluster/cluster_key.go index 8e6f559d5..b6c124f2e 100644 --- a/cluster/cluster_key.go +++ b/cluster/cluster_key.go @@ -32,7 +32,7 @@ func (cluster *Cluster) loadDBCertificates(path string) error { var cacertfile, clicertfile, clikeyfile string if cluster.Conf.HostsTLSCA == "" || cluster.Conf.HostsTLSCLI == "" || cluster.Conf.HostsTLSKEY == "" { - if cluster.Conf.DBServersTLSUseGeneratedCertificate || cluster.HaveDBTag("ssl") { + if cluster.Conf.DBServersTLSUseGeneratedCertificate || cluster.Configurator.HaveDBTag("ssl") { cacertfile = path + "/ca-cert.pem" clicertfile = path + "/client-cert.pem" clikeyfile = path + "/client-key.pem" @@ -72,7 +72,7 @@ func (cluster *Cluster) loadDBOldCertificates(path string) error { var cacertfile, clicertfile, clikeyfile string if cluster.Conf.HostsTLSCA == "" || cluster.Conf.HostsTLSCLI == "" || cluster.Conf.HostsTLSKEY == "" { - if cluster.Conf.DBServersTLSUseGeneratedCertificate || cluster.HaveDBTag("ssl") { + if cluster.Conf.DBServersTLSUseGeneratedCertificate || cluster.Configurator.HaveDBTag("ssl") { cacertfile = path + "/ca-cert.pem" clicertfile = path + "/client-cert.pem" clikeyfile = path + "/client-key.pem" diff --git a/cluster/cluster_set.go b/cluster/cluster_set.go index ed6727c2e..0bdcf5625 100644 --- a/cluster/cluster_set.go +++ b/cluster/cluster_set.go @@ -258,59 +258,85 @@ func (cluster *Cluster) SetInteractive(check bool) { } func (cluster *Cluster) SetDBDiskSize(value string) { - cluster.Conf.ProvDisk = value - cluster.SetDBReprovCookie() + + cluster.Configurator.SetDBDisk(value) + cluster.Conf.ProvDisk = cluster.Configurator.GetConfigDBDisk() + +cluster.SetDBReprovCookie() } func (cluster *Cluster) SetDBCores(value string) { - cluster.Conf.ProvCores = value - cluster.SetDBRestartCookie() + + cluster.Configurator.SetDBCores(value) + cluster.Conf.ProvCores = cluster.Configurator.GetConfigDBCores() + cluster.SetDBReprovCookie() } func (cluster *Cluster) SetDBMemorySize(value string) { - cluster.Conf.ProvMem = value + + cluster.Configurator.SetDBMemory(value) + cluster.Conf.ProvMem = cluster.Configurator.GetConfigDBMemory() + cluster.SetDBReprovCookie() +} + +func (cluster *Cluster) SetDBCoresFromConfigurator() { + + cluster.Conf.ProvCores = cluster.Configurator.GetConfigDBCores() + cluster.SetDBRestartCookie() +} + +func (cluster *Cluster) SetDBMemoryFromConfigurator() { + cluster.Conf.ProvMem = cluster.Configurator.GetConfigDBMemory() cluster.SetDBRestartCookie() } +func (cluster *Cluster) SetDBIOPSFromConfigurator() { + cluster.Conf.ProvIops=cluster.Configurator.GetConfigDBDiskIOPS() + cluster.SetDBRestartCookie() +} + +func (cluster *Cluster) SetTagsFromConfigurator() { + cluster.Conf.ProvTags = cluster.Configurator.GetConfigDBTags() + cluster.Conf.ProvTags = cluster.Configurator.GetConfigProxyTags() +} + func (cluster *Cluster) SetDBDiskIOPS(value string) { - cluster.Conf.ProvIops = value + cluster.Configurator.SetDBDiskIOPS(value) + cluster.Conf.ProvIops = cluster.Configurator.GetConfigDBDiskIOPS() cluster.SetDBRestartCookie() } func (cluster *Cluster) SetDBMaxConnections(value string) { - valueNum, err := strconv.Atoi(value) - if err != nil { - cluster.Conf.ProvMaxConnections = 1000 - } - cluster.Conf.ProvMaxConnections = valueNum + cluster.Configurator.SetDBMaxConnections(value) + cluster.Conf.ProvMaxConnections =cluster.Configurator.GetConfigDBMaxConnections() cluster.SetDBRestartCookie() } func (cluster *Cluster) SetDBExpireLogDays(value string) { - valueNum, err := strconv.Atoi(value) - if err != nil { - cluster.Conf.ProvExpireLogDays = 5 - } - cluster.Conf.ProvExpireLogDays = valueNum + cluster.Configurator.SetDBExpireLogDays(value) + cluster.Conf.ProvExpireLogDays = cluster.Configurator.GetConfigDBExpireLogDays() cluster.SetDBRestartCookie() } func (cluster *Cluster) SetProxyCores(value string) { - cluster.Conf.ProvProxCores = value + cluster.Configurator.SetProxyCores(value) + cluster.Conf.ProvProxCores =cluster.Configurator.GetConfigProxyCores() cluster.SetProxiesRestartCookie() } + func (cluster *Cluster) SetProxyMemorySize(value string) { - cluster.Conf.ProvProxMem = value + cluster.Configurator.SetProxyMemorySize(value) + cluster.Conf.ProvProxMem = cluster.Configurator.GetProxyMemorySize() cluster.SetProxiesRestartCookie() } + func (cluster *Cluster) SetProxyDiskSize(value string) { - cluster.Conf.ProvProxDisk = value + cluster.Configurator.SetProxyDiskSize(value) + cluster.Conf.ProvProxDisk = cluster.Configurator.GetProxyDiskSize() cluster.SetProxiesReprovCookie() } func (cluster *Cluster) SetTraffic(traffic bool) { - //cluster.SetBenchMethod("table") - //cluster.PrepareBench() cluster.Conf.TestInjectTraffic = traffic } @@ -452,8 +478,8 @@ func (cluster *Cluster) SetTestStopCluster(check bool) { } func (cluster *Cluster) SetClusterVariablesFromConfig() { - cluster.DBTags = cluster.GetDatabaseTags() - cluster.ProxyTags = cluster.GetProxyTags() + cluster.Configurator.SetConfig(cluster.Conf) + var err error err = cluster.loadDBCertificates(cluster.WorkingDir) if err != nil { diff --git a/cluster/prov_localhost_db.go b/cluster/prov_localhost_db.go index 40487f950..a6ee35a9a 100644 --- a/cluster/prov_localhost_db.go +++ b/cluster/prov_localhost_db.go @@ -326,7 +326,7 @@ func (cluster *Cluster) LocalhostStartDatabaseService(server *ServerMonitor) err time.Sleep(time.Millisecond * 2000) //cluster.LogPrintf(LvlInfo, "Waiting database startup ") - cluster.LogPrintf(LvlInfo, "Waiting database startup .. %s", out) + cluster.LogPrintf(LvlInfo, "Waiting database startup %d: %s", exitloop, out.String()) conn, err2 := sqlx.Open("mysql", server.DSN) if err2 == nil { defer conn.Close() diff --git a/cluster/prov_opensvc_db.go b/cluster/prov_opensvc_db.go index 62b4587c9..fc0ed18e2 100644 --- a/cluster/prov_opensvc_db.go +++ b/cluster/prov_opensvc_db.go @@ -16,7 +16,6 @@ import ( "strings" "github.com/signal18/replication-manager/opensvc" - "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/state" ) @@ -744,10 +743,7 @@ safe_ssl_ca_uuid = ` + server.ClusterGroup.Conf.ProvSSLCaUUID + ` safe_ssl_cert_uuid = ` + server.ClusterGroup.Conf.ProvSSLCertUUID + ` safe_ssl_key_uuid = ` + server.ClusterGroup.Conf.ProvSSLKeyUUID + ` server_id = ` + string(server.Id[2:10]) + ` -innodb_buffer_pool_size = ` + server.ClusterGroup.GetConfigInnoDBBPSize() + ` -innodb_log_file_size = ` + server.ClusterGroup.GetConfigInnoDBLogFileSize() + ` -innodb_buffer_pool_instances = ` + server.ClusterGroup.GetConfigInnoDBBPInstances() + ` -innodb_log_buffer_size = 8 + ` log.Println(conf) @@ -809,61 +805,3 @@ run_args = -e MYSQL_ROOT_PASSWORD={env.mysql_root_password} } return vm } - -func (server *ServerMonitor) GetEnv() map[string]string { - - return map[string]string{ - "%%ENV:NODES_CPU_CORES%%": server.ClusterGroup.Conf.ProvCores, - "%%ENV:SVC_CONF_ENV_MAX_CORES%%": server.ClusterGroup.Conf.ProvCores, - "%%ENV:SVC_CONF_ENV_MAX_CONNECTIONS%%": server.ClusterGroup.GetConfigMaxConnections(), - "%%ENV:SVC_CONF_ENV_CRC32_ID%%": string(server.Id[2:10]), - "%%ENV:SVC_CONF_ENV_SERVER_ID%%": string(server.Id[2:10]), - "%%ENV:SERVER_IP%%": misc.Unbracket(server.GetBindAddress()), - "%%ENV:SERVER_HOST%%": server.Host, - "%%ENV:SERVER_PORT%%": server.Port, - "%%ENV:SVC_CONF_ENV_MYSQL_DATADIR%%": server.GetDatabaseDatadir(), - "%%ENV:SVC_CONF_ENV_MYSQL_CONFDIR%%": server.GetDatabaseConfdir(), - "%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%": server.GetDatabaseClientBasedir(), - "%%ENV:SVC_CONF_ENV_MYSQL_SOCKET%%": server.GetDatabaseSocket(), - "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%": server.ClusterGroup.dbUser, - "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%": server.ClusterGroup.dbPass, - "%%ENV:SVC_CONF_ENV_MAX_MEM%%": server.ClusterGroup.GetConfigInnoDBBPSize(), - "%%ENV:SVC_CONF_ENV_INNODB_CACHE_SIZE%%": server.ClusterGroup.GetConfigInnoDBBPSize(), - "%%ENV:SVC_CONF_ENV_TOKUDB_CACHE_SIZE%%": server.ClusterGroup.GetConfigTokuDBBufferSize(), - "%%ENV:SVC_CONF_ENV_MYISAM_CACHE_SIZE%%": server.ClusterGroup.GetConfigMyISAMKeyBufferSize(), - "%%ENV:SVC_CONF_ENV_MYISAM_CACHE_SEGMENTS%%": server.ClusterGroup.GetConfigMyISAMKeyBufferSegements(), - "%%ENV:SVC_CONF_ENV_ARIA_CACHE_SIZE%%": server.ClusterGroup.GetConfigAriaCacheSize(), - "%%ENV:SVC_CONF_ENV_QUERY_CACHE_SIZE%%": server.ClusterGroup.GetConfigQueryCacheSize(), - "%%ENV:SVC_CONF_ENV_ROCKSDB_CACHE_SIZE%%": server.ClusterGroup.GetConfigRocksDBCacheSize(), - "%%ENV:SVC_CONF_ENV_S3_CACHE_SIZE%%": server.ClusterGroup.GetConfigS3CacheSize(), - "%%ENV:IBPINSTANCES%%": server.ClusterGroup.GetConfigInnoDBBPInstances(), - "%%ENV:SVC_CONF_ENV_GCOMM%%": server.ClusterGroup.GetGComm(), - "%%ENV:CHECKPOINTIOPS%%": server.ClusterGroup.GetConfigInnoDBIOCapacity(), - "%%ENV:SVC_CONF_ENV_MAX_IOPS%%": server.ClusterGroup.GetConfigInnoDBIOCapacityMax(), - "%%ENV:SVC_CONF_ENV_INNODB_IO_CAPACITY%%": server.ClusterGroup.GetConfigInnoDBIOCapacity(), - "%%ENV:SVC_CONF_ENV_INNODB_IO_CAPACITY_MAX%%": server.ClusterGroup.GetConfigInnoDBIOCapacityMax(), - "%%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT%%": server.ClusterGroup.GetConfigInnoDBMaxDirtyPagePct(), - "%%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT_LWM%%": server.ClusterGroup.GetConfigInnoDBMaxDirtyPagePctLwm(), - "%%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_INSTANCES%%": server.ClusterGroup.GetConfigInnoDBBPInstances(), - "%%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_SIZE%%": server.ClusterGroup.GetConfigInnoDBBPSize(), - "%%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%": server.ClusterGroup.GetConfigInnoDBLogBufferSize(), - "%%ENV:SVC_CONF_ENV_INNODB_LOG_FILE_SIZE%%": server.ClusterGroup.GetConfigInnoDBLogFileSize(), - "%%ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%": server.ClusterGroup.GetConfigInnoDBWriteIoThreads(), - "%%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%": server.ClusterGroup.GetConfigInnoDBReadIoThreads(), - "%%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%": server.ClusterGroup.GetConfigInnoDBPurgeThreads(), - "%%ENV:SVC_CONF_ENV_EXPIRE_LOG_DAYS%%": server.ClusterGroup.GetConfigExpireLogDays(), - "%%ENV:SVC_CONF_ENV_RELAY_SPACE_LIMIT%%": server.ClusterGroup.GetConfigRelaySpaceLimit(), - "%%ENV:SVC_NAMESPACE%%": server.ClusterGroup.Name, - "%%ENV:SVC_NAME%%": server.Name, - "%%ENV:SVC_CONF_ENV_SST_METHOD%%": server.ClusterGroup.Conf.MultiMasterWsrepSSTMethod, - "%%ENV:SVC_CONF_ENV_DOMAIN_ID%%": server.ClusterGroup.GetConfigReplicationDomain(), - "%%ENV:SVC_CONF_ENV_SST_RECEIVER_PORT%%": server.SSTPort, - "%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_ADDR%%": server.ClusterGroup.Conf.MonitorAddress + ":" + server.ClusterGroup.Conf.HttpPort, - "%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_URL%%": server.ClusterGroup.Conf.MonitorAddress + ":" + server.ClusterGroup.Conf.APIPort, - "%%ENV:ENV:SVC_CONF_ENV_REPLICATION_MANAGER_HOST_NAME%%": server.Host, - "%%ENV:ENV:SVC_CONF_ENV_REPLICATION_MANAGER_HOST_PORT%%": server.Port, - "%%ENV:ENV:SVC_CONF_ENV_REPLICATION_MANAGER_CLUSTER_NAME%%": server.ClusterGroup.Name, - } - - // size = ` + collector.ProvDisk + ` -} diff --git a/cluster/prov_opensvc_shardproxy.go b/cluster/prov_opensvc_shardproxy.go index 63b1f6840..56483b6da 100644 --- a/cluster/prov_opensvc_shardproxy.go +++ b/cluster/prov_opensvc_shardproxy.go @@ -92,10 +92,7 @@ gcomm = ` + cluster.GetGComm() + ` mrm_api_addr = ` + cluster.Conf.MonitorAddress + ":" + cluster.Conf.HttpPort + ` mrm_cluster_name = ` + cluster.GetClusterName() + ` server_id = ` + string(prx.Id[2:10]) + ` -innodb_buffer_pool_size = ` + cluster.GetConfigInnoDBBPSize() + ` -innodb_log_file_size = ` + cluster.GetConfigInnoDBLogFileSize() + ` -innodb_buffer_pool_instances = ` + cluster.GetConfigInnoDBBPInstances() + ` -innodb_log_buffer_size = 8 + ` log.Println(conf) return conf, nil diff --git a/cluster/prx_get.go b/cluster/prx_get.go index c6954f3e1..856201901 100644 --- a/cluster/prx_get.go +++ b/cluster/prx_get.go @@ -9,13 +9,9 @@ package cluster import ( - "encoding/json" "errors" "fmt" - "os" - "path/filepath" "strconv" - "strings" "github.com/jmoiron/sqlx" "github.com/signal18/replication-manager/config" @@ -76,86 +72,10 @@ func (prx *Proxy) GetCluster() (*sqlx.DB, error) { func (proxy *Proxy) GetProxyConfig() string { proxy.ClusterGroup.LogPrintf(LvlInfo, "Proxy Config generation "+proxy.Datadir+"/config.tar.gz") - - type File struct { - Path string `json:"path"` - Content string `json:"fmt"` - } - os.RemoveAll(proxy.Datadir + "/init") - // Extract files - for _, rule := range proxy.ClusterGroup.ProxyModule.Rulesets { - - if strings.Contains(rule.Name, "mariadb.svc.mrm.proxy.cnf") { - - for _, variable := range rule.Variables { - - if variable.Class == "file" || variable.Class == "fileprop" { - var f File - json.Unmarshal([]byte(variable.Value), &f) - fpath := strings.Replace(f.Path, "%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%", proxy.Datadir+"/init", -1) - dir := filepath.Dir(fpath) - // proxy.ClusterGroup.LogPrintf(LvlInfo, "Config create %s", fpath) - // create directory - if _, err := os.Stat(dir); os.IsNotExist(err) { - err := os.MkdirAll(dir, os.FileMode(0775)) - if err != nil { - proxy.ClusterGroup.LogPrintf(LvlErr, "Compliance create directory %q: %s", dir, err) - } - } - proxy.ClusterGroup.LogPrintf(LvlInfo, "rule %s filter %s %t", rule.Name, rule.Filter, proxy.IsFilterInTags(rule.Filter)) - if fpath[len(fpath)-1:] != "/" && (proxy.IsFilterInTags(rule.Filter) || rule.Filter == "") { - content := misc.ExtractKey(f.Content, proxy.GetEnv()) - outFile, err := os.Create(fpath) - if err != nil { - proxy.ClusterGroup.LogPrintf(LvlErr, "Compliance create file failed %q: %s", fpath, err) - } else { - _, err = outFile.WriteString(content) - - if err != nil { - proxy.ClusterGroup.LogPrintf(LvlErr, "Compliance writing file failed %q: %s", fpath, err) - } - outFile.Close() - //server.ClusterGroup.LogPrintf(LvlInfo, "Variable name %s", variable.Name) - - } - - } - } - } - } - } - // processing symlink - type Link struct { - Symlink string `json:"symlink"` - Target string `json:"target"` - } - for _, rule := range proxy.ClusterGroup.ProxyModule.Rulesets { - if strings.Contains(rule.Name, "mariadb.svc.mrm.proxy.cnf") { - for _, variable := range rule.Variables { - if variable.Class == "symlink" { - if proxy.IsFilterInTags(rule.Filter) || rule.Name == "mariadb.svc.mrm.proxy.cnf" { - var f Link - json.Unmarshal([]byte(variable.Value), &f) - fpath := strings.Replace(f.Symlink, "%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%", proxy.Datadir+"/init", -1) - if proxy.ClusterGroup.Conf.LogLevel > 2 { - proxy.ClusterGroup.LogPrintf(LvlInfo, "Config symlink %s", fpath) - } - os.Symlink(f.Target, fpath) - // keys := strings.Split(variable.Value, " ") - } - } - } - } - } - - if proxy.ClusterGroup.HaveProxyTag("docker") { - err := misc.ChownR(proxy.Datadir+"/init/data", 999, 999) - if err != nil { - proxy.ClusterGroup.LogPrintf(LvlErr, "Chown failed %q: %s", proxy.Datadir+"/init/data", err) - } + err := proxy.ClusterGroup.Configurator.GenerateProxyConfig(proxy.Datadir, proxy.GetEnv()) + if err != nil { + proxy.ClusterGroup.LogPrintf(LvlInfo, "Proxy Config generation "+proxy.Datadir+"/config.tar.gz") } - proxy.ClusterGroup.TarGz(proxy.Datadir+"/config.tar.gz", proxy.Datadir+"/init") - //server.TarAddDirectory(server.Datadir+"/data", tw) return "" } @@ -192,13 +112,13 @@ func (proxy *Proxy) GetBindAddressExtraIPV6() string { return "" } func (proxy *Proxy) GetUseSSL() string { - if proxy.IsFilterInTags("ssl") { + if proxy.ClusterGroup.Configurator.IsFilterInProxyTags("ssl") { return "true" } return "false" } func (proxy *Proxy) GetUseCompression() string { - if proxy.IsFilterInTags("nonetworkcompress") { + if proxy.ClusterGroup.Configurator.IsFilterInProxyTags("nonetworkcompress") { return "false" } return "true" @@ -274,7 +194,7 @@ func (proxy *Proxy) GetBaseEnv() map[string]string { } func (proxy *Proxy) GetConfigProxySQLReadOnMaster() string { - if proxy.IsFilterInTags("proxy.route.readonmaster") { + if proxy.ClusterGroup.Configurator.IsFilterInProxyTags("proxy.route.readonmaster") { return "1" } return "0" diff --git a/cluster/prx_has.go b/cluster/prx_has.go index 7bb372e57..38f100a5e 100644 --- a/cluster/prx_has.go +++ b/cluster/prx_has.go @@ -10,20 +10,8 @@ package cluster import ( "os" - "strings" ) -func (proxy *Proxy) IsFilterInTags(filter string) bool { - tags := proxy.ClusterGroup.GetProxyTags() - for _, tag := range tags { - if strings.Contains(filter, "."+tag) { - // fmt.Println(server.ClusterGroup.Conf.ProvTags + " vs tag: " + tag + " against " + filter) - return true - } - } - return false -} - func (proxy *Proxy) hasCookie(key string) bool { if _, err := os.Stat(proxy.Datadir + "/@" + key); os.IsNotExist(err) { return false diff --git a/cluster/srv.go b/cluster/srv.go index 60e5eef96..4b6bc5ef1 100644 --- a/cluster/srv.go +++ b/cluster/srv.go @@ -280,6 +280,7 @@ func (cluster *Cluster) newServerMonitor(url string, user string, pass string, c server.SetIgnored(cluster.IsInIgnoredHosts(server)) server.SetPreferedBackup(cluster.IsInPreferedBackupHosts(server)) server.SetPrefered(cluster.IsInPreferedHosts(server)) + server.ReloadSaveInfosVariables() /*if server.ClusterGroup.Conf.MasterSlavePgStream || server.ClusterGroup.Conf.MasterSlavePgLogical { server.Conn, err = sqlx.Open("postgres", server.DSN) } else { @@ -633,6 +634,7 @@ func (server *ServerMonitor) Refresh() error { server.ClusterGroup.SetState("ERR00073", state.State{ErrType: LvlErr, ErrDesc: fmt.Sprintf(clusterError["ERR00073"], server.URL), ErrFrom: "MON"}) } if server.ClusterGroup.sme.GetHeartbeats()%30 == 0 { + server.SaveInfos() server.CheckPrivileges() } else { server.ClusterGroup.sme.PreserveState("ERR00007") @@ -1214,6 +1216,49 @@ func (server *ServerMonitor) Capture() error { return nil } +func (server *ServerMonitor) SaveInfos() error { + type Save struct { + Variables map[string]string `json:"variables"` + ProcessList []dbhelper.Processlist `json:"processlist"` + Status map[string]string `json:"status"` + SlaveStatus []dbhelper.SlaveStatus `json:"slavestatus"` + } + var clsave Save + clsave.Variables = server.Variables + clsave.Status = server.Status + clsave.ProcessList = server.FullProcessList + clsave.SlaveStatus = server.LastSeenReplications + saveJSON, _ := json.MarshalIndent(clsave, "", "\t") + err := ioutil.WriteFile(server.Datadir+"/serverstate.json", saveJSON, 0644) + if err != nil { + return errors.New("SaveInfos" + err.Error()) + } + return nil +} + +func (server *ServerMonitor) ReloadSaveInfosVariables() error { + type Save struct { + Variables map[string]string `json:"variables"` + ProcessList []dbhelper.Processlist `json:"processlist"` + Status map[string]string `json:"status"` + SlaveStatus []dbhelper.SlaveStatus `json:"slavestatus"` + } + + var clsave Save + file, err := ioutil.ReadFile(server.Datadir + "/serverstate.json") + if err != nil { + server.ClusterGroup.LogPrintf(LvlInfo, "No file found %s: %v\n", server.Datadir+"/serverstate.json", err) + return err + } + err = json.Unmarshal(file, &clsave) + if err != nil { + server.ClusterGroup.LogPrintf(LvlErr, "File error: %v\n", err) + return err + } + server.Variables = clsave.Variables + return nil +} + func (server *ServerMonitor) CaptureLoop(start int64) { server.InCaptureMode = true diff --git a/cluster/srv_cnf.go b/cluster/srv_cnf.go new file mode 100644 index 000000000..14f88a979 --- /dev/null +++ b/cluster/srv_cnf.go @@ -0,0 +1,159 @@ +// replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL +// Copyright 2017 Signal 18 Cloud SAS +// Authors: Guillaume Lefranc +// Stephane Varoqui +// This source code is licensed under the GNU General Public License, version 3. + +package cluster + +import ( + "github.com/signal18/replication-manager/config" + "github.com/signal18/replication-manager/utils/misc" +) + +func (server *ServerMonitor) GetEnv() map[string]string { + + return map[string]string{ + "%%ENV:NODES_CPU_CORES%%": server.ClusterGroup.Configurator.GetConfigDBCores(), + "%%ENV:SVC_CONF_ENV_MAX_CORES%%": server.ClusterGroup.Configurator.GetConfigDBCores(), + "%%ENV:SVC_CONF_ENV_MAX_CONNECTIONS%%": server.ClusterGroup.Configurator.GetConfigMaxConnections(), + "%%ENV:SVC_CONF_ENV_CRC32_ID%%": string(server.Id[2:10]), + "%%ENV:SVC_CONF_ENV_SERVER_ID%%": string(server.Id[2:10]), + "%%ENV:SERVER_IP%%": misc.Unbracket(server.GetBindAddress()), + "%%ENV:SERVER_HOST%%": server.Host, + "%%ENV:SERVER_PORT%%": server.Port, + "%%ENV:SVC_CONF_ENV_MYSQL_DATADIR%%": server.GetDatabaseDatadir(), + "%%ENV:SVC_CONF_ENV_MYSQL_TMPDIR%%": server.GetConfigVariable("TMPDIR"), + "%%ENV:SVC_CONF_ENV_MYSQL_SLAVE_LOAD_TMPDIR%%": server.GetConfigVariable("SLAVE_LOAD_TMPDIR"), + "%%ENV:SVC_CONF_ENV_MYSQL_LOG_ERROR%%": server.GetConfigVariable("LOG_ERROR"), + "%%ENV:SVC_CONF_ENV_MYSQL_SLOW_QUERY_LOG_FILE%%": server.GetConfigVariable("SLOW_QUERY_LOG_FILE"), + "%%ENV:SVC_CONF_ENV_MYSQL_GENERAL_LOG_FILE%%": server.GetConfigVariable("GENERAL_LOG_FILE"), + "%%ENV:SVC_CONF_ENV_MYSQL_INNODB_DATA_HOME_DIR%%": server.GetConfigVariable("INNODB_DATA_HOME_DIR"), + "%%ENV:SVC_CONF_ENV_MYSQL_INNODB_LOG_GROUP_HOME_DIR%%": server.GetConfigVariable("INNODB_LOG_GROUP_HOME_DIR"), + "%%ENV:SVC_CONF_ENV_MYSQL_INNODB_UNDO_DIRECTORY%%": server.GetConfigVariable("INNODB_UNDO_DIRECTORY"), + "%%ENV:SVC_CONF_ENV_MYSQL_LOG_BIN%%": server.GetConfigVariable("LOG_BIN"), + "%%ENV:SVC_CONF_ENV_MYSQL_LOG_BIN_INDEX%%": server.GetConfigVariable("LOG_BIN_INDEX"), + "%%ENV:SVC_CONF_ENV_MYSQL_RELAY_LOG%%": server.GetConfigVariable("RELAY_LOG"), + "%%ENV:SVC_CONF_ENV_MYSQL_RELAY_LOG_INDEX%%": server.GetConfigVariable("RELAY_LOG_INDEX"), + "%%ENV:SVC_CONF_ENV_MYSQL_ARIA_LOG_DIR_PATH%%": server.GetConfigVariable("ARIA_LOG_DIR_PATH"), + "%%ENV:SVC_CONF_ENV_MYSQL_CONFDIR%%": server.GetDatabaseConfdir(), + "%%ENV:SVC_CONF_ENV_CLIENT_BASEDIR%%": server.GetDatabaseClientBasedir(), + "%%ENV:SVC_CONF_ENV_MYSQL_SOCKET%%": server.GetDatabaseSocket(), + "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_USER%%": server.ClusterGroup.dbUser, + "%%ENV:SVC_CONF_ENV_MYSQL_ROOT_PASSWORD%%": server.ClusterGroup.dbPass, + "%%ENV:SVC_CONF_ENV_MAX_MEM%%": server.ClusterGroup.Configurator.GetConfigInnoDBBPSize(), + "%%ENV:SVC_CONF_ENV_INNODB_CACHE_SIZE%%": server.ClusterGroup.Configurator.GetConfigInnoDBBPSize(), + "%%ENV:SVC_CONF_ENV_TOKUDB_CACHE_SIZE%%": server.ClusterGroup.Configurator.GetConfigTokuDBBufferSize(), + "%%ENV:SVC_CONF_ENV_MYISAM_CACHE_SIZE%%": server.ClusterGroup.Configurator.GetConfigMyISAMKeyBufferSize(), + "%%ENV:SVC_CONF_ENV_MYISAM_CACHE_SEGMENTS%%": server.ClusterGroup.Configurator.GetConfigMyISAMKeyBufferSegements(), + "%%ENV:SVC_CONF_ENV_ARIA_CACHE_SIZE%%": server.ClusterGroup.Configurator.GetConfigAriaCacheSize(), + "%%ENV:SVC_CONF_ENV_QUERY_CACHE_SIZE%%": server.ClusterGroup.Configurator.GetConfigQueryCacheSize(), + "%%ENV:SVC_CONF_ENV_ROCKSDB_CACHE_SIZE%%": server.ClusterGroup.Configurator.GetConfigRocksDBCacheSize(), + "%%ENV:SVC_CONF_ENV_S3_CACHE_SIZE%%": server.ClusterGroup.Configurator.GetConfigS3CacheSize(), + "%%ENV:IBPINSTANCES%%": server.ClusterGroup.Configurator.GetConfigInnoDBBPInstances(), + "%%ENV:SVC_CONF_ENV_GCOMM%%": server.ClusterGroup.GetGComm(), + "%%ENV:CHECKPOINTIOPS%%": server.ClusterGroup.Configurator.GetConfigInnoDBIOCapacity(), + "%%ENV:SVC_CONF_ENV_MAX_IOPS%%": server.ClusterGroup.Configurator.GetConfigInnoDBIOCapacityMax(), + "%%ENV:SVC_CONF_ENV_INNODB_IO_CAPACITY%%": server.ClusterGroup.Configurator.GetConfigInnoDBIOCapacity(), + "%%ENV:SVC_CONF_ENV_INNODB_IO_CAPACITY_MAX%%": server.ClusterGroup.Configurator.GetConfigInnoDBIOCapacityMax(), + "%%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT%%": server.ClusterGroup.Configurator.GetConfigInnoDBMaxDirtyPagePct(), + "%%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT_LWM%%": server.ClusterGroup.Configurator.GetConfigInnoDBMaxDirtyPagePctLwm(), + "%%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_INSTANCES%%": server.ClusterGroup.Configurator.GetConfigInnoDBBPInstances(), + "%%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_SIZE%%": server.ClusterGroup.Configurator.GetConfigInnoDBBPSize(), + "%%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%": server.ClusterGroup.Configurator.GetConfigInnoDBLogBufferSize(), + "%%ENV:SVC_CONF_ENV_INNODB_LOG_FILE_SIZE%%": server.ClusterGroup.Configurator.GetConfigInnoDBLogFileSize(), + "%%ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%": server.ClusterGroup.Configurator.GetConfigInnoDBWriteIoThreads(), + "%%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%": server.ClusterGroup.Configurator.GetConfigInnoDBReadIoThreads(), + "%%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%": server.ClusterGroup.Configurator.GetConfigInnoDBPurgeThreads(), + "%%ENV:SVC_CONF_ENV_INNODB_LRU_FLUSH_SIZE%%": server.ClusterGroup.Configurator.GetConfigInnoDBLruFlushSize(), + "%%ENV:SVC_CONF_ENV_EXPIRE_LOG_DAYS%%": server.ClusterGroup.Configurator.GetConfigExpireLogDays(), + "%%ENV:SVC_CONF_ENV_RELAY_SPACE_LIMIT%%": server.ClusterGroup.Configurator.GetConfigRelaySpaceLimit(), + "%%ENV:SVC_NAMESPACE%%": server.ClusterGroup.Name, + "%%ENV:SVC_NAME%%": server.Name, + "%%ENV:SVC_CONF_ENV_SST_METHOD%%": server.ClusterGroup.Conf.MultiMasterWsrepSSTMethod, + "%%ENV:SVC_CONF_ENV_DOMAIN_ID%%": server.ClusterGroup.Configurator.GetConfigReplicationDomain(server.ClusterGroup.Name), + "%%ENV:SVC_CONF_ENV_SST_RECEIVER_PORT%%": server.SSTPort, + "%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_ADDR%%": server.ClusterGroup.Conf.MonitorAddress + ":" + server.ClusterGroup.Conf.HttpPort, + "%%ENV:SVC_CONF_ENV_REPLICATION_MANAGER_URL%%": server.ClusterGroup.Conf.MonitorAddress + ":" + server.ClusterGroup.Conf.APIPort, + "%%ENV:ENV:SVC_CONF_ENV_REPLICATION_MANAGER_HOST_NAME%%": server.Host, + "%%ENV:ENV:SVC_CONF_ENV_REPLICATION_MANAGER_HOST_PORT%%": server.Port, + "%%ENV:ENV:SVC_CONF_ENV_REPLICATION_MANAGER_CLUSTER_NAME%%": server.ClusterGroup.Name, + } + + // size = ` + collector.ProvDisk + ` +} + +func (server *ServerMonitor) GetDatabaseDatadir() string { + if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { + return server.Datadir + "/var" + } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { + return server.SlapOSDatadir + "/var/lib/mysql" + } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorOnPremise { + value := server.GetConfigVariable("DATADIR") + if value != "" { + return value + } + } + return "/var/lib/mysql" +} +func (server *ServerMonitor) GetDatabaseConfdir() string { + if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { + return server.Datadir + "/init/etc/mysql" + } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { + return server.SlapOSDatadir + "/etc/mysql" + } + return "/etc/mysql" +} +func (server *ServerMonitor) GetDatabaseBinary() string { + if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { + return server.ClusterGroup.Conf.ProvDBBinaryBasedir + "/mysqld" + } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { + return server.SlapOSDatadir + "/usr/sbin/mysqld" + } + return "/usr/sbin/mysqld" +} +func (server *ServerMonitor) GetDatabaseSocket() string { + if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { + return server.Datadir + "/" + server.Id + ".sock" + } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { + return server.SlapOSDatadir + "/var/mysqld.sock" + } + return "/var/run/mysqld/mysqld.sock" +} + +func (server *ServerMonitor) GetDatabaseClientBasedir() string { + if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { + return server.ClusterGroup.Conf.ProvDBClientBasedir + } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { + return server.SlapOSDatadir + "/usr/bin/" + } + return "/usr/bin/mysql" +} + +func (server *ServerMonitor) GetConfigVariable(variable string) string { + if server.Variables == nil { + return "" + } + value := server.Variables[variable] + return value +} + +func (server *ServerMonitor) GetDatabaseConfig() string { + server.ClusterGroup.LogPrintf(LvlInfo, "Database Config generation "+server.Datadir+"/config.tar.gz") + if server.IsCompute { + server.ClusterGroup.Configurator.AddDBTag("spider") + } + err := server.ClusterGroup.Configurator.GenerateDatabaseConfig(server.Datadir, server.ClusterGroup.Conf.WorkingDir+"/"+server.ClusterGroup.Name, server.GetDatabaseBasedir(), server.GetEnv()) + if err != nil { + server.ClusterGroup.LogPrintf(LvlInfo, "Database Config generation %s: %s"+server.Datadir+"/config.tar.gz", err) + } + return "" +} + +func (server *ServerMonitor) GetDatabaseDynamicConfig(filter string, cmd string) string { + mydynamicconf, err := server.ClusterGroup.Configurator.GetDatabaseDynamicConfig(filter, cmd, server.Datadir) + if err != nil { + server.ClusterGroup.LogPrintf(LvlErr, "%s", err) + } + return mydynamicconf +} diff --git a/cluster/srv_get.go b/cluster/srv_get.go index 2a7b3aade..4916fa869 100644 --- a/cluster/srv_get.go +++ b/cluster/srv_get.go @@ -10,13 +10,9 @@ package cluster import ( - "bufio" - "encoding/json" "errors" "fmt" "os" - "path/filepath" - "regexp" "sort" "strconv" "strings" @@ -25,7 +21,6 @@ import ( "github.com/jmoiron/sqlx" "github.com/signal18/replication-manager/config" "github.com/signal18/replication-manager/utils/dbhelper" - "github.com/signal18/replication-manager/utils/misc" "github.com/signal18/replication-manager/utils/s18log" "github.com/signal18/replication-manager/utils/state" ) @@ -577,242 +572,24 @@ func (server *ServerMonitor) GetTableDefinition(schema string, table string) (st return ddl, nil } -func (server *ServerMonitor) GetTablePK(schema string, table string) (string, error) { - query := "SELECT group_concat( distinct column_name) from information_schema.KEY_COLUMN_USAGE WHERE CONSTRAINT_NAME='PRIMARY' AND CONSTRAINT_SCHEMA='" + schema + "' AND TABLE_NAME='" + table + "'" - var pk string - err := server.Conn.QueryRowx(query).Scan(&pk) - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Failed query %s %s", query, err) - return "", nil - } - return pk, nil -} +func (server *ServerMonitor) GetDatabaseBasedir() string { -func (server *ServerMonitor) IsFilterInTags(filter string) bool { - tags := server.ClusterGroup.GetDatabaseTags() - for _, tag := range tags { - if strings.HasSuffix(filter, tag) { - // fmt.Println(server.ClusterGroup.Conf.ProvTags + " vs tag: " + tag + " against " + filter) - return true - } - if server.IsCompute && strings.Contains(filter, "spider") { - //IsCompute identify spider nodes need to force tag spider if no present in db tags config - return true - } - } - return false -} - -func (server *ServerMonitor) GetDatabaseDatadir() string { - if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { - return server.Datadir + "/var" - } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { - return server.SlapOSDatadir + "/var/lib/mysql" - } - return "/var/lib/mysql" -} -func (server *ServerMonitor) GetDatabaseConfdir() string { - if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { - return server.Datadir + "/init/etc/mysql" - } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { - return server.SlapOSDatadir + "/etc/mysql" - } - return "/etc/mysql" -} -func (server *ServerMonitor) GetDatabaseBinary() string { - if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { - return server.ClusterGroup.Conf.ProvDBBinaryBasedir + "/mysqld" - } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { - return server.SlapOSDatadir + "/usr/sbin/mysqld" - } - return "/usr/sbin/mysqld" -} -func (server *ServerMonitor) GetDatabaseSocket() string { if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { - return server.Datadir + "/" + server.Id + ".sock" - } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { - return server.SlapOSDatadir + "/var/mysqld.sock" - } - return "/var/run/mysqld/mysqld.sock" -} + return server.Datadir -func (server *ServerMonitor) GetDatabaseClientBasedir() string { - if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { - return server.ClusterGroup.Conf.ProvDBClientBasedir } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { - return server.SlapOSDatadir + "/usr/bin/" - } - return "/usr/bin/mysql" -} - -func (server *ServerMonitor) GetDatabaseConfig() string { - type File struct { - Path string `json:"path"` - Content string `json:"fmt"` + return server.SlapOSDatadir } - server.ClusterGroup.LogPrintf(LvlInfo, "Database Config generation "+server.Datadir+"/config.tar.gz") - // Extract files - if server.ClusterGroup.Conf.ProvBinaryInTarball { - url, err := server.ClusterGroup.Conf.GetTarballUrl(server.ClusterGroup.Conf.ProvBinaryTarballName) - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Compliance get binary %s directory %s", url, err) - } - err = misc.DownloadFileTimeout(url, server.Datadir+"/"+server.ClusterGroup.Conf.ProvBinaryTarballName, 1200) - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Compliance dowload binary %s directory %s", url, err) - } - misc.Untargz(server.Datadir+"/init", server.Datadir+"/"+server.ClusterGroup.Conf.ProvBinaryTarballName) - } - - if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { - os.RemoveAll(server.Datadir + "/init/etc") - } else { - os.RemoveAll(server.Datadir + "/init") - } - for _, rule := range server.ClusterGroup.DBModule.Rulesets { - if strings.Contains(rule.Name, "mariadb.svc.mrm.db.cnf") { - - for _, variable := range rule.Variables { - if variable.Class == "file" || variable.Class == "fileprop" { - var f File - json.Unmarshal([]byte(variable.Value), &f) - fpath := strings.Replace(f.Path, "%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%", server.Datadir+"/init", -1) - dir := filepath.Dir(fpath) - if server.ClusterGroup.Conf.LogLevel > 2 { - server.ClusterGroup.LogPrintf(LvlInfo, "Config create %s", fpath) - } - // create directory - if _, err := os.Stat(dir); os.IsNotExist(err) { - err := os.MkdirAll(dir, os.FileMode(0775)) - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Compliance create directory %q: %s", dir, err) - } - } - - if fpath[len(fpath)-1:] != "/" && (server.IsFilterInTags(rule.Filter) || rule.Name == "mariadb.svc.mrm.db.cnf.generic") { - content := misc.ExtractKey(f.Content, server.GetEnv()) - - if server.IsFilterInTags("docker") && server.ClusterGroup.Conf.ProvOrchestrator != config.ConstOrchestratorLocalhost { - if server.IsFilterInTags("wsrep") { - //if galera don't cusomized system files - if strings.Contains(content, "./.system") { - content = "" - } - } else { - content = strings.Replace(content, "./.system", "/var/lib/mysql/.system", -1) - } - } - if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorLocalhost { - content = strings.Replace(content, "includedir ..", "includedir "+server.Datadir+"/init", -1) - content = strings.Replace(content, "../etc/mysql", server.Datadir+"/init/etc/mysql", -1) - - } else if server.ClusterGroup.Conf.ProvOrchestrator == config.ConstOrchestratorSlapOS { - content = strings.Replace(content, "includedir ..", "includedir "+server.SlapOSDatadir+"/", -1) - content = strings.Replace(content, "../etc/mysql", server.SlapOSDatadir+"/etc/mysql", -1) - content = strings.Replace(content, "./.system", server.SlapOSDatadir+"/var/lib/mysql/.system", -1) - } - outFile, err := os.Create(fpath) - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Compliance create file failed %q: %s", fpath, err) - } else { - _, err = outFile.WriteString(content) - - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Compliance writing file failed %q: %s", fpath, err) - } - outFile.Close() - //server.ClusterGroup.LogPrintf(LvlInfo, "Variable name %s", variable.Name) - } - - } - } - } - } - } - // processing symlink - type Link struct { - Symlink string `json:"symlink"` - Target string `json:"target"` - } - for _, rule := range server.ClusterGroup.DBModule.Rulesets { - if strings.Contains(rule.Name, "mariadb.svc.mrm.db.cnf.generic") { - for _, variable := range rule.Variables { - if variable.Class == "symlink" { - if server.IsFilterInTags(rule.Filter) || rule.Name == "mariadb.svc.mrm.db.cnf.generic" { - var f Link - json.Unmarshal([]byte(variable.Value), &f) - fpath := strings.Replace(f.Symlink, "%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%", server.Datadir+"/init", -1) - if server.ClusterGroup.Conf.LogLevel > 2 { - server.ClusterGroup.LogPrintf(LvlInfo, "Config symlink %s", fpath) - } - os.Symlink(f.Target, fpath) - // keys := strings.Split(variable.Value, " ") - } - } - } - } - } - - if server.ClusterGroup.HaveDBTag("docker") { - err := misc.ChownR(server.Datadir+"/init/data", 999, 999) - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Chown failed %q: %s", server.Datadir+"/init/data", err) - } - err = misc.ChmodR(server.Datadir+"/init/init", 0755) - if err != nil { - server.ClusterGroup.LogPrintf(LvlErr, "Chown failed %q: %s", server.Datadir+"/init/init", err) - } - } - - misc.CopyFile(server.ClusterGroup.Conf.WorkingDir+"/"+server.ClusterGroup.Name+"/ca-cert.pem", server.Datadir+"/init/etc/mysql/ssl/ca-cert.pem") - misc.CopyFile(server.ClusterGroup.Conf.WorkingDir+"/"+server.ClusterGroup.Name+"/server-cert.pem", server.Datadir+"/init/etc/mysql/ssl/server-cert.pem") - misc.CopyFile(server.ClusterGroup.Conf.WorkingDir+"/"+server.ClusterGroup.Name+"/server-key.pem", server.Datadir+"/init/etc/mysql/ssl/server-key.pem") - misc.CopyFile(server.ClusterGroup.Conf.WorkingDir+"/"+server.ClusterGroup.Name+"/client-cert.pem", server.Datadir+"/init/etc/mysql/ssl/client-cert.pem") - misc.CopyFile(server.ClusterGroup.Conf.WorkingDir+"/"+server.ClusterGroup.Name+"/client-key.pem", server.Datadir+"/init/etc/mysql/ssl/client-key.pem") - - server.ClusterGroup.TarGz(server.Datadir+"/config.tar.gz", server.Datadir+"/init") - return "" } -func (server *ServerMonitor) GetDatabaseDynamicConfig(filter string, cmd string) string { - mydynamicconf := "" - // processing symlink - type Link struct { - Symlink string `json:"symlink"` - Target string `json:"target"` - } - for _, rule := range server.ClusterGroup.DBModule.Rulesets { - if strings.Contains(rule.Name, "mariadb.svc.mrm.db.cnf.generic") { - for _, variable := range rule.Variables { - if variable.Class == "symlink" { - if server.IsFilterInTags(rule.Filter) || rule.Name == "mariadb.svc.mrm.db.cnf.generic" { - // server.ClusterGroup.LogPrintf(LvlInfo, "content %s %s", filter, rule.Filter) - if filter == "" || strings.Contains(rule.Filter, filter) { - var f Link - json.Unmarshal([]byte(variable.Value), &f) - fpath := server.Datadir + "/init/etc/mysql/conf.d/" - // server.ClusterGroup.LogPrintf(LvlInfo, "Config symlink %s , %s", fpath, f.Target) - file, err := os.Open(fpath + f.Target) - if err == nil { - r, _ := regexp.Compile(cmd) - scanner := bufio.NewScanner(file) - for scanner.Scan() { - // server.ClusterGroup.LogPrintf(LvlInfo, "content: %s", scanner.Text()) - if r.MatchString(scanner.Text()) { - mydynamicconf = mydynamicconf + strings.Split(scanner.Text(), ":")[1] - } - } - file.Close() - - } else { - server.ClusterGroup.LogPrintf(LvlInfo, "Error in dynamic config: %s", err) - } - } - } - } - } - } +func (server *ServerMonitor) GetTablePK(schema string, table string) (string, error) { + query := "SELECT group_concat( distinct column_name) from information_schema.KEY_COLUMN_USAGE WHERE CONSTRAINT_NAME='PRIMARY' AND CONSTRAINT_SCHEMA='" + schema + "' AND TABLE_NAME='" + table + "'" + var pk string + err := server.Conn.QueryRowx(query).Scan(&pk) + if err != nil { + server.ClusterGroup.LogPrintf(LvlErr, "Failed query %s %s", query, err) + return "", nil } - return mydynamicconf + return pk, nil } diff --git a/cluster/srv_rejoin.go b/cluster/srv_rejoin.go index 72eb98c7b..8be568b38 100644 --- a/cluster/srv_rejoin.go +++ b/cluster/srv_rejoin.go @@ -654,7 +654,7 @@ func (cluster *Cluster) RejoinFixRelay(slave *ServerMonitor, relay *ServerMonito return nil } -// UseGtid check is replication use gtid +// UseGtid check is replication use gtid func (server *ServerMonitor) UsedGtidAtElection(crash *Crash) bool { ss, errss := server.GetSlaveStatus(server.ReplicationSourceName) if errss != nil { @@ -663,7 +663,7 @@ func (server *ServerMonitor) UsedGtidAtElection(crash *Crash) bool { server.ClusterGroup.LogPrintf(LvlDbg, "Rejoin Server use GTID %s", ss.UsingGtid.String) - // An old master master do no have replication + // An old master do no have replication if crash.FailoverIOGtid == nil { server.ClusterGroup.LogPrintf(LvlDbg, "Rejoin server cannot find a saved master election GTID") return false diff --git a/config/config.go b/config/config.go index 19b27e587..771a8b628 100644 --- a/config/config.go +++ b/config/config.go @@ -587,6 +587,12 @@ type DockerRepos struct { Repos []DockerRepo `json:"repos"` } +type Tag struct { + Id uint `json:"id"` + Name string `json:"name"` + Category string `json:"category"` +} + type Grant struct { Grant string `json:"grant"` Enable bool `json:"enable"` diff --git a/dashboard/app/dashboard.js b/dashboard/app/dashboard.js index c848d70af..b69454a57 100644 --- a/dashboard/app/dashboard.js +++ b/dashboard/app/dashboard.js @@ -342,8 +342,8 @@ function ( return passedTest; } $scope.agents = data.agents; - $scope.missingDBTags=isInTags(data.configTags,data.dbServersTags,function(currentTag,dbTags){ return (dbTags.indexOf(currentTag)== -1);}); - $scope.missingProxyTags=isInTags(data.configPrxTags,data.proxyServersTags,function(currentTag,proxyTags){ return (proxyTags.indexOf(currentTag)== -1);}); + $scope.missingDBTags=isInTags(data.configurator.configTags,data.configurator.dbServersTags,function(currentTag,dbTags){ return (dbTags.indexOf(currentTag)== -1);}); + $scope.missingProxyTags=isInTags(data.configurator.configPrxTags,data.configurator.proxyServersTags,function(currentTag,proxyTags){ return (proxyTags.indexOf(currentTag)== -1);}); $scope.reserror = false; diff --git a/dashboard/static/card-db-configurator.html b/dashboard/static/card-db-configurator.html index 928a09edb..e46a9282f 100644 --- a/dashboard/static/card-db-configurator.html +++ b/dashboard/static/card-db-configurator.html @@ -73,7 +73,7 @@
Using
- {{ tag }}
diff --git a/dashboard/static/card-proxy-configurator.html b/dashboard/static/card-proxy-configurator.html index 4149ba35e..d3789a53a 100644 --- a/dashboard/static/card-proxy-configurator.html +++ b/dashboard/static/card-proxy-configurator.html @@ -15,7 +15,7 @@
Using
- {{ tag }}
Ressources
diff --git a/server/api_cluster.go b/server/api_cluster.go index 8df08ee2d..63999aaf9 100644 --- a/server/api_cluster.go +++ b/server/api_cluster.go @@ -722,9 +722,12 @@ func (repman *ReplicationManager) handlerMuxSetSettingsDiscover(w http.ResponseW http.Error(w, "No valid ACL", 403) return } - mycluster.ConfigDiscovery() + err := mycluster.ConfigDiscovery() + if err != nil { + http.Error(w, err.Error(), 500) + return + } } else { - http.Error(w, "No cluster", 500) return } @@ -842,7 +845,7 @@ func (repman *ReplicationManager) handlerMuxClusterTags(w http.ResponseWriter, r if mycluster != nil { e := json.NewEncoder(w) e.SetIndent("", "\t") - err := e.Encode(mycluster.GetDBModuleTags()) + err := e.Encode(mycluster.Configurator.GetDBModuleTags()) if err != nil { http.Error(w, "Encoding error", 500) return diff --git a/share/opensvc/moduleset_mariadb.svc.mrm.db.json b/share/opensvc/moduleset_mariadb.svc.mrm.db.json index 00e6ab1f0..dea3a19a5 100644 --- a/share/opensvc/moduleset_mariadb.svc.mrm.db.json +++ b/share/opensvc/moduleset_mariadb.svc.mrm.db.json @@ -2258,6 +2258,25 @@ } ], "fset_name": "mariadb.disk.splitundolog" + }, + { + "fset_stats": false, + "id": 409, + "filters": [ + { + "filter": { + "f_op": "=", + "f_field": "tag_name", + "f_value": "nosplitpath", + "f_table": "v_tags", + "id": 366 + }, + "f_order": 0, + "f_log_op": "AND", + "filterset": null + } + ], + "fset_name": "mariadb.disk.nosplitpath" } ], "rulesets": [ @@ -2696,8 +2715,8 @@ { "var_author": "admin Manager", "var_class": "file", - "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/engine_innodb.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# INNODB\\n# ------\\n# mariadb_command: SET GLOBAL innodb_adaptive_hash_index=0;SET GLOBAL innodb_buffer_pool_size = %%ENV:SVC_CONF_ENV_MAX_MEM%%*1024*1024;SET GLOBAL innodb_io_capacity = %%ENV:CHECKPOINTIOPS%%;SET GLOBAL innodb_io_capacity_max = %%ENV:SVC_CONF_ENV_MAX_IOPS%%;SET GLOBAL innodb_max_dirty_pages_pct = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT%%;SET GLOBAL innodb_max_dirty_pages_pct_lwm = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT_LWM%%;SET GLOBAL innodb_flush_neighbors = 1;EXECUTE IMMEDIATE IF(@@innodb_buffer_pool_instances=%%ENV:IBPINSTANCES%%,\\\"SELECT 'Unchanged innodb_buffer_pool_instances'\\\", \\\"SET GLOBAL innodb_buffer_pool_instances =%%ENV:IBPINSTANCES%%\\\"); EXECUTE IMMEDIATE IF(@@innodb_read_io_threads=%%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%,\\\"SELECT 'Unchanged innodb_read_io_threads'\\\", \\\"SET GLOBAL innodb_read_io_threads = %%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%\\\");EXECUTE IMMEDIATE IF(@@innodb_write_io_threads=%%ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%,\\\"SELECT 'Unchanged innodb_write_io_threads'\\\", \\\"SET GLOBAL innodb_write_io_threads = %%ENV:ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%\\\");EXECUTE IMMEDIATE IF(@@innodb_log_buffer_size=%%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%*1024*1024,\\\"SELECT 'Unchanged innodb_log_buffer_size'\\\", \\\"SET GLOBAL innodb_log_buffer_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%*1024*1024\\\");SET GLOBAL innodb_thread_concurrency = 0;EXECUTE IMMEDIATE IF(@@innodb_autoinc_lock_mode=2,\\\"SELECT 'Unchanged innodb_autoinc_lock_mode'\\\", \\\"SET GLOBAL innodb_autoinc_lock_mode=2\\\");SET GLOBAL innodb_stats_on_metadata = 0;SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 1;EXECUTE IMMEDIATE IF(@@innodb_buffer_pool_load_at_startup=1,\\\"SELECT 'Unchanged innodb_buffer_pool_load_at_startup'\\\", \\\"SET GLOBAL innodb_buffer_pool_load_at_startup=1\\\");EXECUTE IMMEDIATE IF(@@innodb_open_files=16384,\\\"SELECT 'Unchanged innodb_open_files'\\\", \\\"SET GLOBAL innodb_open_files=16384\\\");EXECUTE IMMEDIATE IF(@@innodb_purge_threads=%%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%,\\\"SELECT 'Unchanged innodb_purge_threads'\\\", \\\"SET GLOBAL innodb_purge_threads = %%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%\\\");SET GLOBAL innodb_concurrency_tickets = 5000;SET GLOBAL innodb_print_all_deadlocks = 1; EXECUTE IMMEDIATE IF(@@innodb_large_prefix=1 OR @@innodb_large_prefix IS NULL,\\\"SELECT 'Unchanged innodb_large_prefix'\\\", \\\"SET GLOBAL innodb_large_prefix = 1\\\"); EXECUTE IMMEDIATE IF(@@innodb_file_format='barracuda' OR @@innodb_file_format IS NULL,\\\"SELECT 'Unchanged barracuda'\\\", \\\"SET GLOBAL innodb_file_format='barracuda'\\\");SET GLOBAL innodb_default_row_format=dynamic;\\n \\n[mysqld-5.5]\\ninnodb_file_format = barracuda\\n[mysqld-5.6]\\ninnodb_file_format = barracuda\\n[mysqld-5.7]\\ninnodb_file_format = barracuda \\n#deprecated in 5.8\\n\\n[mariadb]\\n#innodb_buffer_pool_populate=0 \\n[mariadb 5.5]\\ninnodb_file_format = barracuda\\n[mariadb 10.0]\\ninnodb_file_format = barracuda\\n \\n[mariadb 10.1]\\ninnodb_force_primary_key = 1\\ninnodb_default_row_format=dynamic\\n#innodb_defragment = 1\\ninnodb_file_format = barracuda\\n\\n[mariadb 10.2]\\ninnodb_lock_schedule_algorithm=fcfs\\ninnodb_file_format = barracuda\\n\\n[mariadb 10.3]\\ninnodb-page-cleaners= %%ENV:NODES_CPU_CORES%%\\n\\n[mysqld]\\ntransaction-isolation = READ-COMMITTED\\ndefault_storage_engine = InnoDB\\ninnodb_buffer_pool_size = %%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_SIZE%%M\\ninnodb_buffer_pool_instances = %%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_INSTANCES%%\\ninnodb_read_io_threads = %%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%\\ninnodb_write_io_threads = %%ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%\\ninnodb_io_capacity = %%ENV:CHECKPOINTIOPS%%\\ninnodb_io_capacity_max = %%ENV:SVC_CONF_ENV_MAX_IOPS%%\\ninnodb_max_dirty_pages_pct = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT%%\\ninnodb_max_dirty_pages_pct_lwm = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT_LWM%%\\ninnodb_log_file_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_FILE_SIZE%%M\\n# innodb_log_buffer_size change Crash 10.2.14\\ninnodb_log_buffer_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%M\\nloose_innodb_purge_threads = %%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%\\ninnodb_adaptive_hash_index = 0\\ninnodb_file_per_table= 1\\ninnodb_log_files_in_group = 1\\ninnodb_flush_neighbors = 1\\ninnodb_thread_concurrency = 0\\ninnodb_flush_method = O_DIRECT\\n#innodb_undo_tablespaces = 4\\ninnodb_autoinc_lock_mode = 2\\ninnodb_stats_on_metadata = 0\\ninnodb_buffer_pool_dump_at_shutdown = 1\\ninnodb_buffer_pool_load_at_startup = 1\\ninnodb_open_files = 16384\\ninnodb_concurrency_tickets = 5000\\ninnodb_print_all_deadlocks = 1 \\n# innodb_monitor_enable = all // cost perf \\n\\n[mariadb 10.4]\\ninnodb_large_prefix = 1\\ninnodb_file_format = barracuda\\ninnodb_default_row_format=dynamic\\n\"}", - "var_updated": "2021-05-20 10:26:25", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/engine_innodb.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"# INNODB\\n# ------\\n# mariadb_command: SET GLOBAL innodb_adaptive_hash_index=0;SET GLOBAL innodb_buffer_pool_size = %%ENV:SVC_CONF_ENV_MAX_MEM%%*1024*1024;SET GLOBAL innodb_io_capacity = %%ENV:CHECKPOINTIOPS%%;SET GLOBAL innodb_io_capacity_max = %%ENV:SVC_CONF_ENV_MAX_IOPS%%;SET GLOBAL innodb_max_dirty_pages_pct = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT%%;SET GLOBAL innodb_max_dirty_pages_pct_lwm = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT_LWM%%;SET GLOBAL innodb_flush_neighbors = 1;EXECUTE IMMEDIATE IF(@@innodb_buffer_pool_instances=%%ENV:IBPINSTANCES%%,\\\"SELECT 'Unchanged innodb_buffer_pool_instances'\\\", \\\"SET GLOBAL innodb_buffer_pool_instances =%%ENV:IBPINSTANCES%%\\\"); EXECUTE IMMEDIATE IF(@@innodb_read_io_threads=%%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%,\\\"SELECT 'Unchanged innodb_read_io_threads'\\\", \\\"SET GLOBAL innodb_read_io_threads = %%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%\\\");EXECUTE IMMEDIATE IF(@@innodb_write_io_threads=%%ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%,\\\"SELECT 'Unchanged innodb_write_io_threads'\\\", \\\"SET GLOBAL innodb_write_io_threads = %%ENV:ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%\\\");EXECUTE IMMEDIATE IF(@@innodb_log_buffer_size=%%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%*1024*1024,\\\"SELECT 'Unchanged innodb_log_buffer_size'\\\", \\\"SET GLOBAL innodb_log_buffer_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%*1024*1024\\\");SET GLOBAL innodb_thread_concurrency = 0;EXECUTE IMMEDIATE IF(@@innodb_autoinc_lock_mode=2,\\\"SELECT 'Unchanged innodb_autoinc_lock_mode'\\\", \\\"SET GLOBAL innodb_autoinc_lock_mode=2\\\");SET GLOBAL innodb_stats_on_metadata = 0;SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 1;EXECUTE IMMEDIATE IF(@@innodb_buffer_pool_load_at_startup=1,\\\"SELECT 'Unchanged innodb_buffer_pool_load_at_startup'\\\", \\\"SET GLOBAL innodb_buffer_pool_load_at_startup=1\\\");EXECUTE IMMEDIATE IF(@@innodb_open_files=16384,\\\"SELECT 'Unchanged innodb_open_files'\\\", \\\"SET GLOBAL innodb_open_files=16384\\\");EXECUTE IMMEDIATE IF(@@innodb_purge_threads=%%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%,\\\"SELECT 'Unchanged innodb_purge_threads'\\\", \\\"SET GLOBAL innodb_purge_threads = %%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%\\\");SET GLOBAL innodb_concurrency_tickets = 5000;SET GLOBAL innodb_print_all_deadlocks = 1; EXECUTE IMMEDIATE IF(@@innodb_large_prefix=1 OR @@innodb_large_prefix IS NULL,\\\"SELECT 'Unchanged innodb_large_prefix'\\\", \\\"SET GLOBAL innodb_large_prefix = 1\\\"); EXECUTE IMMEDIATE IF(@@innodb_file_format='barracuda' OR @@innodb_file_format IS NULL,\\\"SELECT 'Unchanged barracuda'\\\", \\\"SET GLOBAL innodb_file_format='barracuda'\\\");SET GLOBAL innodb_default_row_format=dynamic;\\n \\n[mysqld-5.5]\\ninnodb_file_format = barracuda\\n[mysqld-5.6]\\ninnodb_file_format = barracuda\\n[mysqld-5.7]\\ninnodb_file_format = barracuda \\n#deprecated in 5.8\\n\\n[mariadb]\\n#innodb_buffer_pool_populate=0 \\n[mariadb 5.5]\\ninnodb_file_format = barracuda\\n[mariadb 10.0]\\ninnodb_file_format = barracuda\\n \\n[mariadb 10.1]\\ninnodb_force_primary_key = 1\\ninnodb_default_row_format=dynamic\\n#innodb_defragment = 1\\ninnodb_file_format = barracuda\\n\\n[mariadb 10.2]\\ninnodb_lock_schedule_algorithm=fcfs\\ninnodb_file_format = barracuda\\n\\n[mariadb 10.3]\\ninnodb-page-cleaners= %%ENV:NODES_CPU_CORES%%\\n\\n[mysqld]\\ntransaction-isolation = READ-COMMITTED\\ndefault_storage_engine = InnoDB\\ninnodb_buffer_pool_size = %%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_SIZE%%M\\ninnodb_buffer_pool_instances = %%ENV:SVC_CONF_ENV_INNODB_BUFFER_POOL_INSTANCES%%\\ninnodb_read_io_threads = %%ENV:SVC_CONF_ENV_INNODB_READ_IO_THREADS%%\\ninnodb_write_io_threads = %%ENV:SVC_CONF_ENV_INNODB_WRITE_IO_THREADS%%\\ninnodb_io_capacity = %%ENV:CHECKPOINTIOPS%%\\ninnodb_io_capacity_max = %%ENV:SVC_CONF_ENV_MAX_IOPS%%\\ninnodb_max_dirty_pages_pct = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT%%\\ninnodb_max_dirty_pages_pct_lwm = %%ENV:SVC_CONF_ENV_INNODB_MAX_DIRTY_PAGE_PCT_LWM%%\\ninnodb_log_file_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_FILE_SIZE%%M\\n# innodb_log_buffer_size change Crash 10.2.14\\ninnodb_log_buffer_size = %%ENV:SVC_CONF_ENV_INNODB_LOG_BUFFER_SIZE%%M\\nloose_innodb_purge_threads = %%ENV:SVC_CONF_ENV_INNODB_PURGE_THREADS%%\\nloose_innodb_lru_flush_size = %%ENV:SVC_CONF_ENV_INNODB_LRU_FLUSH_SIZE%%\\ninnodb_adaptive_hash_index = 0\\ninnodb_file_per_table= 1\\ninnodb_log_files_in_group = 1\\ninnodb_flush_neighbors = 1\\ninnodb_thread_concurrency = 0\\ninnodb_flush_method = O_DIRECT\\n#innodb_undo_tablespaces = 4\\ninnodb_autoinc_lock_mode = 2\\ninnodb_stats_on_metadata = 0\\ninnodb_buffer_pool_dump_at_shutdown = 1\\ninnodb_buffer_pool_load_at_startup = 1\\ninnodb_open_files = 16384\\ninnodb_concurrency_tickets = 5000\\ninnodb_print_all_deadlocks = 1 \\n# innodb_monitor_enable = all // cost perf \\n\\n[mariadb 10.4]\\ninnodb_large_prefix = 1\\ninnodb_file_format = barracuda\\ninnodb_default_row_format=dynamic\\n\"}", + "var_updated": "2021-07-16 09:17:42", "var_name": "db_cnf_engine_innodb", "id": 5917 }, @@ -4051,7 +4070,8 @@ "mariadb.svc.mrm.db.cnf.generic.sec_noauth", "mariadb.svc.mrm.db.cnf.generic.mem_withinterleave", "mariadb.svc.mrm.db.cnf.generic.rep_withrowminimal", - "mariadb.svc.mrm.db.cnf.generic.disk_withsplitundolog" + "mariadb.svc.mrm.db.cnf.generic.disk_withsplitundolog", + "mariadb.svc.mrm.db.cnf.generic.disk_nosplitpath" ], "publications": [ "replication-manager" @@ -6958,6 +6978,30 @@ "responsibles": [ "replication-manager" ] + }, + { + "fset_name": "mariadb.disk.nosplitpath", + "ruleset_name": "mariadb.svc.mrm.db.cnf.generic.disk_nosplitpath", + "variables": [ + { + "var_author": "admin Manager", + "var_class": "file", + "var_value": "{\"path\":\"%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%/etc/mysql/default_path.cnf\",\"mode\":\"%%ENV:CNF_PERMS%%\",\"uid\":\"%%ENV:MYSQL_UID%%\",\"gid\":\"%%ENV:MYSQL_GID%%\",\"fmt\":\"[mysqld]\\ntmpdir = %%ENV:SVC_CONF_ENV_MYSQL_TMPDIR%%\\nslave_load_tmpdir = %%ENV:SVC_CONF_ENV_MYSQL_SLAVE_LOAD_TMPDIR%%\\nlog_error = %%ENV:SVC_CONF_ENV_MYSQL_LOG_ERROR%%\\nslow_query_log_file = %%ENV:SVC_CONF_ENV_MYSQL_SLOW_QUERY_LOG_FILE%%\\ngeneral_log_file= %%ENV:SVC_CONF_ENV_MYSQL_GENERAL_LOG_FILE%%\\ninnodb_data_home_dir = %%ENV:SVC_CONF_ENV_MYSQL_INNODB_DATA_HOME_DIR%%\\ninnodb_log_group_home_dir = %%ENV:SVC_CONF_ENV_MYSQL_INNODB_LOG_GROUP_HOME_DIR%%\\nloose_innodb_undo_directory = %%ENV:SVC_CONF_ENV_MYSQL_INNODB_UNDO_DIRECTORY%%\\nlog_bin= %%ENV:SVC_CONF_ENV_MYSQL_LOG_BIN%%\\nlog_bin_index= %%ENV:SVC_CONF_ENV_MYSQL_LOG_BIN_INDEX%%\\nrelay_log = %%ENV:SVC_CONF_ENV_MYSQL_RELAY_LOG%%\\nrelay_log_index = %%ENV:SVC_CONF_ENV_MYSQL_RELAY_LOG_INDEX%%\\n\\n[mariadb]\\n# Bug aria_log_dir_path is not available in variables \\n# aria_log_dir_path = %%ENV:SVC_CONF_ENV_MYSQL_ARIA_LOG_DIR_PATH%%\"}", + "var_updated": "2021-07-15 15:36:26", + "var_name": "db_cnf_default_path", + "id": 6285 + } + ], + "ruleset_public": false, + "ruleset_type": "contextual", + "rulesets": [], + "publications": [ + "replication-manager" + ], + "id": 1152, + "responsibles": [ + "replication-manager" + ] } ] } From e61989c6a2912aec350491d76b93dd8f040b0458 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Sat, 17 Jul 2021 16:16:52 +0200 Subject: [PATCH 83/84] Addding configurator --- cluster/configurator/configurator.go | 661 +++++++++++++++++++++++ cluster/configurator/configurator_add.go | 14 + cluster/configurator/configurator_del.go | 34 ++ cluster/configurator/configurator_get.go | 317 +++++++++++ cluster/configurator/configurator_has.go | 72 +++ cluster/configurator/configurator_set.go | 73 +++ cluster/configurator/configurator_tar.go | 101 ++++ 7 files changed, 1272 insertions(+) create mode 100644 cluster/configurator/configurator.go create mode 100644 cluster/configurator/configurator_add.go create mode 100644 cluster/configurator/configurator_del.go create mode 100644 cluster/configurator/configurator_get.go create mode 100644 cluster/configurator/configurator_has.go create mode 100644 cluster/configurator/configurator_set.go create mode 100644 cluster/configurator/configurator_tar.go diff --git a/cluster/configurator/configurator.go b/cluster/configurator/configurator.go new file mode 100644 index 000000000..ae8acacc0 --- /dev/null +++ b/cluster/configurator/configurator.go @@ -0,0 +1,661 @@ +// replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL +// Copyright 2017 Signal 18 Cloud SAS +// Authors: Guillaume Lefranc +// Stephane Varoqui +// This source code is licensed under the GNU General Public License, version 3. + +package configurator + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/signal18/replication-manager/config" + "github.com/signal18/replication-manager/utils/dbhelper" + "github.com/signal18/replication-manager/utils/misc" +) + +type Configurator struct { + ClusterConfig config.Config `json:"-"` + DBModule config.Compliance `json:"-"` + ProxyModule config.Compliance `json:"-"` + ConfigDBTags []config.Tag `json:"configTags"` //from module + ConfigPrxTags []config.Tag `json:"configPrxTags"` //from module + DBTags []string `json:"dbServersTags"` //from conf + ProxyTags []string `json:"proxyServersTags"` + WorkingDir string `json:"-"` // working dir is the place to generate the all cluster config +} + +func (configurator *Configurator) Init(conf config.Config) error { + var err error + configurator.ClusterConfig = conf + configurator.LoadDBModules() + configurator.LoadProxyModules() + configurator.ConfigDBTags = configurator.GetDBModuleTags() + configurator.ConfigPrxTags = configurator.GetProxyModuleTags() + + return err +} + +func (configurator *Configurator) LoadDBModules() error { + file := configurator.ClusterConfig.ShareDir + "/opensvc/moduleset_mariadb.svc.mrm.db.json" + jsonFile, err := os.Open(file) + if err != nil { + return errors.New(fmt.Sprintf("Failed opened module %s %s", file, err)) + } + // defer the closing of our jsonFile so that we can parse it later on + defer jsonFile.Close() + + byteValue, _ := ioutil.ReadAll(jsonFile) + + err = json.Unmarshal([]byte(byteValue), &configurator.DBModule) + if err != nil { + return errors.New(fmt.Sprintf("Failed unmarshal file %s %s", file, err)) + } + return nil +} + +func (configurator *Configurator) LoadProxyModules() error { + + file := configurator.ClusterConfig.ShareDir + "/opensvc/moduleset_mariadb.svc.mrm.proxy.json" + jsonFile, err := os.Open(file) + if err != nil { + return errors.New(fmt.Sprintf("Failed opened module %s %s", file, err)) + } + defer jsonFile.Close() + byteValue, _ := ioutil.ReadAll(jsonFile) + err = json.Unmarshal([]byte(byteValue), &configurator.ProxyModule) + if err != nil { + return errors.New(fmt.Sprintf("Failed unmarshal file %s %s", file, err)) + } + return nil +} + +func (configurator *Configurator) ConfigDiscovery(Variables map[string]string, Plugins map[string]dbhelper.Plugin) error { + + innodbmem, err := strconv.ParseUint(Variables["INNODB_BUFFER_POOL_SIZE"], 10, 64) + if err != nil { + return err + } + totalmem := innodbmem + myisammem, err := strconv.ParseUint(Variables["KEY_BUFFER_SIZE"], 10, 64) + if err != nil { + return err + } + totalmem += myisammem + qcmem, err := strconv.ParseUint(Variables["QUERY_CACHE_SIZE"], 10, 64) + if err != nil { + return err + } + if qcmem == 0 { + configurator.AddDBTag("noquerycache") + } + totalmem += qcmem + ariamem := uint64(0) + if _, ok := Variables["ARIA_PAGECACHE_BUFFER_SIZE"]; ok { + ariamem, err = strconv.ParseUint(Variables["ARIA_PAGECACHE_BUFFER_SIZE"], 10, 64) + if err != nil { + return err + } + totalmem += ariamem + } + tokumem := uint64(0) + if _, ok := Variables["TOKUDB_CACHE_SIZE"]; ok { + configurator.AddDBTag("tokudb") + tokumem, err = strconv.ParseUint(Variables["TOKUDB_CACHE_SIZE"], 10, 64) + if err != nil { + return err + } + totalmem += tokumem + } + s3mem := uint64(0) + if _, ok := Variables["S3_PAGECACHE_BUFFER_SIZE"]; ok { + configurator.AddDBTag("s3") + tokumem, err = strconv.ParseUint(Variables["S3_PAGECACHE_BUFFER_SIZE"], 10, 64) + if err != nil { + return err + } + totalmem += s3mem + } + + rocksmem := uint64(0) + if _, ok := Variables["ROCKSDB_BLOCK_CACHE_SIZE"]; ok { + configurator.AddDBTag("myrocks") + tokumem, err = strconv.ParseUint(Variables["ROCKSDB_BLOCK_CACHE_SIZE"], 10, 64) + if err != nil { + return err + } + totalmem += rocksmem + } + + sharedmempcts, _ := configurator.ClusterConfig.GetMemoryPctShared() + totalmem = totalmem + totalmem*uint64(sharedmempcts["threads"])/100 + configurator.SetDBMemory(strconv.FormatUint((totalmem / 1024 / 1024), 10)) + configurator.SetDBCores(Variables["THREAD_POOL_SIZE"]) + + if Variables["INNODB_DOUBLEWRITE"] == "OFF" { + configurator.AddDBTag("nodoublewrite") + } + if Variables["INNODB_FLUSH_LOG_AT_TRX_COMMIT"] != "1" && Variables["SYNC_BINLOG"] != "1" { + configurator.AddDBTag("nodurable") + } + if Variables["INNODB_FLUSH_METHOD"] != "O_DIRECT" { + configurator.AddDBTag("noodirect") + } + if Variables["LOG_BIN_COMPRESS"] == "ON" { + configurator.AddDBTag("compressbinlog") + } + if Variables["INNODB_DEFRAGMENT"] == "ON" { + configurator.AddDBTag("autodefrag") + } + if Variables["INNODB_COMPRESSION_DEFAULT"] == "ON" { + configurator.AddDBTag("compresstable") + } + + if configurator.HasInstallPlugin(Plugins, "BLACKHOLE") { + configurator.AddDBTag("blackhole") + } + if configurator.HasInstallPlugin(Plugins, "QUERY_RESPONSE_TIME") { + configurator.AddDBTag("userstats") + } + if configurator.HasInstallPlugin(Plugins, "SQL_ERROR_LOG") { + configurator.AddDBTag("sqlerror") + } + if configurator.HasInstallPlugin(Plugins, "METADATA_LOCK_INFO") { + configurator.AddDBTag("metadatalocks") + } + if configurator.HasInstallPlugin(Plugins, "SERVER_AUDIT") { + configurator.AddDBTag("audit") + } + if Variables["SLOW_QUERY_LOG"] == "ON" { + configurator.AddDBTag("slow") + } + if Variables["GENERAL_LOG"] == "ON" { + configurator.AddDBTag("general") + } + if Variables["PERFORMANCE_SCHEMA"] == "ON" { + configurator.AddDBTag("pfs") + } + if Variables["LOG_OUTPUT"] == "TABLE" { + configurator.AddDBTag("logtotable") + } + + if configurator.HasInstallPlugin(Plugins, "CONNECT") { + configurator.AddDBTag("connect") + } + if configurator.HasInstallPlugin(Plugins, "SPIDER") { + configurator.AddDBTag("spider") + } + if configurator.HasInstallPlugin(Plugins, "SPHINX") { + configurator.AddDBTag("sphinx") + } + if configurator.HasInstallPlugin(Plugins, "MROONGA") { + configurator.AddDBTag("mroonga") + } + if configurator.HasWsrep(Variables) { + configurator.AddDBTag("wsrep") + } + //missing in compliance + if configurator.HasInstallPlugin(Plugins, "ARCHIVE") { + configurator.AddDBTag("archive") + } + + if configurator.HasInstallPlugin(Plugins, "CRACKLIB_PASSWORD_CHECK") { + configurator.AddDBTag("pwdcheckcracklib") + } + if configurator.HasInstallPlugin(Plugins, "SIMPLE_PASSWORD_CHECK") { + configurator.AddDBTag("pwdchecksimple") + } + + if Variables["LOCAL_INFILE"] == "ON" { + configurator.AddDBTag("localinfile") + } + if Variables["SKIP_NAME_RESOLVE"] == "OFF" { + configurator.AddDBTag("resolvdns") + } + if Variables["READ_ONLY"] == "ON" { + configurator.AddDBTag("readonly") + } + if Variables["HAVE_SSL"] == "YES" { + configurator.AddDBTag("ssl") + } + + if Variables["BINLOG_FORMAT"] == "STATEMENT" { + configurator.AddDBTag("statement") + } + if Variables["BINLOG_FORMAT"] == "ROW" { + configurator.AddDBTag("row") + } + if Variables["LOG_BIN"] == "OFF" { + configurator.AddDBTag("nobinlog") + } + if Variables["LOG_BIN"] == "OFF" { + configurator.AddDBTag("nobinlog") + } + if Variables["LOG_SLAVE_UPDATES"] == "OFF" { + configurator.AddDBTag("nologslaveupdates") + } + if Variables["RPL_SEMI_SYNC_MASTER_ENABLED"] == "ON" { + configurator.AddDBTag("semisync") + } + if Variables["GTID_STRICT_MODE"] == "ON" { + configurator.AddDBTag("gtidstrict") + } + if strings.Contains(Variables["SLAVE_TYPE_COVERSIONS"], "ALL_NON_LOSSY") || strings.Contains(Variables["SLAVE_TYPE_COVERSIONS"], "ALL_LOSSY") { + configurator.AddDBTag("lossyconv") + } + if Variables["SLAVE_EXEC_MODE"] == "IDEMPOTENT" { + configurator.AddDBTag("idempotent") + } + + //missing in compliance + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "SUBQUERY_CACHE=ON") { + configurator.AddDBTag("subquerycache") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "SEMIJOIN_WITH_CACHE=ON") { + configurator.AddDBTag("semijoincache") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "FIRSTMATCH=ON") { + configurator.AddDBTag("firstmatch") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "EXTENDED_KEYS=ON") { + configurator.AddDBTag("extendedkeys") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "LOOSESCAN=ON") { + configurator.AddDBTag("loosescan") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "INDEX_CONDITION_PUSHDOWN=OFF") { + configurator.AddDBTag("noicp") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "IN_TO_EXISTS=OFF") { + configurator.AddDBTag("nointoexists") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "DERIVED_MERGE=OFF") { + configurator.AddDBTag("noderivedmerge") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "DERIVED_WITH_KEYS=OFF") { + configurator.AddDBTag("noderivedwithkeys") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "MRR=OFF") { + configurator.AddDBTag("nomrr") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "OUTER_JOIN_WITH_CACHE=OFF") { + configurator.AddDBTag("noouterjoincache") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "SEMI_JOIN_WITH_CACHE=OFF") { + configurator.AddDBTag("nosemijoincache") + } + if strings.Contains(Variables["OPTIMIZER_SWITCH"], "TABLE_ELIMINATION=OFF") { + configurator.AddDBTag("notableelimination") + } + if strings.Contains(Variables["SQL_MODE"], "ORACLE") { + configurator.AddDBTag("sqlmodeoracle") + } + if Variables["SQL_MODE"] == "" { + configurator.AddDBTag("sqlmodeunstrict") + } + //index_merge=on + //index_merge_union=on, + //index_merge_sort_union=on + //index_merge_intersection=on + //index_merge_sort_intersection=off + //engine_condition_pushdown=on + //materialization=on + //semijoin=on + //partial_match_rowid_merge=on + //partial_match_table_scan=on, + //mrr_cost_based=off + //mrr_sort_keys=on, + //join_cache_incremental=on, + //join_cache_hashed=on, + //join_cache_bka=on, + //optimize_join_buffer_size=on, + //orderby_uses_equalities=on + //condition_pushdown_for_derived=on + //split_materialized=on// + //condition_pushdown_for_subquery=on, + //rowid_filter=on + //condition_pushdown_from_having=on + + if Variables["TX_ISOLATION"] == "READ-COMMITTED" { + configurator.AddDBTag("readcommitted") + } + //missing + if Variables["TX_ISOLATION"] == "READ-UNCOMMITTED" { + configurator.AddDBTag("readuncommitted") + } + if Variables["TX_ISOLATION"] == "REPEATABLE-READ" { + configurator.AddDBTag("reapeatableread") + } + if Variables["TX_ISOLATION"] == "SERIALIZED" { + configurator.AddDBTag("serialized") + } + + if Variables["JOIN_CACHE_LEVEL"] == "8" { + configurator.AddDBTag("hashjoin") + } + if Variables["JOIN_CACHE_LEVEL"] == "6" { + configurator.AddDBTag("mrrjoin") + } + if Variables["JOIN_CACHE_LEVEL"] == "2" { + configurator.AddDBTag("nestedjoin") + } + if Variables["LOWER_CASE_TABLE_NAMES"] == "1" { + configurator.AddDBTag("lowercasetable") + } + if Variables["USER_STAT_TABLES"] == "PREFERABLY_FOR_QUERIES" { + configurator.AddDBTag("eits") + } + + if Variables["CHARACTER_SET_SERVER"] == "UTF8MB4" { + if strings.Contains(Variables["COLLATION_SERVER"], "_ci") { + configurator.AddDBTag("bm4ci") + } else { + configurator.AddDBTag("bm4cs") + } + } + if Variables["CHARACTER_SET_SERVER"] == "UTF8" { + if strings.Contains(Variables["COLLATION_SERVER"], "_ci") { + configurator.AddDBTag("utf8ci") + } else { + configurator.AddDBTag("utf8cs") + } + } + + //slave_parallel_mode = optimistic + /* + + tmpmem, err := strconv.ParseUint(Variables["TMP_TABLE_SIZE"], 10, 64) + if err != nil { + return err + } + qttmp, err := strconv.ParseUint(Variables["MAX_TMP_TABLES"], 10, 64) + if err != nil { + return err + } + tmpmem = tmpmem * qttmp + totalmem += tmpmem + + cores, err := strconv.ParseUint(Variables["THREAD_POOL_SIZE"], 10, 64) + if err != nil { + return err + } + + joinmem, err := strconv.ParseUint(Variables["JOIN_BUFFER_SPACE_LIMIT"], 10, 64) + joinmem = joinmem * cores + + sortmem, err := strconv.ParseUint(Variables["SORT_BUFFER_SIZE"], 10, 64) + */ + // + // containermem = containermem * int64(sharedmempcts["innodb"]) / 100 + + return nil +} + +func (configurator *Configurator) GenerateProxyConfig(Datadir string, TemplateEnv map[string]string) error { + + type File struct { + Path string `json:"path"` + Content string `json:"fmt"` + } + os.RemoveAll(Datadir + "/init") + // Extract files + for _, rule := range configurator.ProxyModule.Rulesets { + + if strings.Contains(rule.Name, "mariadb.svc.mrm.proxy.cnf") { + + for _, variable := range rule.Variables { + + if variable.Class == "file" || variable.Class == "fileprop" { + var f File + json.Unmarshal([]byte(variable.Value), &f) + fpath := strings.Replace(f.Path, "%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%", Datadir+"/init", -1) + dir := filepath.Dir(fpath) + // proxy.ClusterGroup.LogPrintf(LvlInfo, "Config create %s", fpath) + // create directory + if _, err := os.Stat(dir); os.IsNotExist(err) { + err := os.MkdirAll(dir, os.FileMode(0775)) + if err != nil { + return errors.New(fmt.Sprintf("Compliance create directory %q: %s", dir, err)) + } + } + // proxy.ClusterGroup.LogPrintf(LvlInfo, "rule %s filter %s %t", rule.Name, rule.Filter, proxy.IsFilterInTags(rule.Filter)) + if fpath[len(fpath)-1:] != "/" && (configurator.IsFilterInProxyTags(rule.Filter) || rule.Filter == "") { + content := misc.ExtractKey(f.Content, TemplateEnv) + outFile, err := os.Create(fpath) + if err != nil { + return errors.New(fmt.Sprintf("Compliance create file failed %q: %s", fpath, err)) + } else { + _, err = outFile.WriteString(content) + + if err != nil { + return errors.New(fmt.Sprintf("Compliance writing file failed %q: %s", fpath, err)) + } + outFile.Close() + //server.ClusterGroup.LogPrintf(LvlInfo, "Variable name %s", variable.Name) + + } + + } + } + } + } + } + // processing symlink + type Link struct { + Symlink string `json:"symlink"` + Target string `json:"target"` + } + for _, rule := range configurator.ProxyModule.Rulesets { + if strings.Contains(rule.Name, "mariadb.svc.mrm.proxy.cnf") { + for _, variable := range rule.Variables { + if variable.Class == "symlink" { + if configurator.IsFilterInProxyTags(rule.Filter) || rule.Name == "mariadb.svc.mrm.proxy.cnf" { + var f Link + json.Unmarshal([]byte(variable.Value), &f) + fpath := strings.Replace(f.Symlink, "%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%", Datadir+"/init", -1) + /* if proxy.ClusterGroup.Conf.LogLevel > 2 { + proxy.ClusterGroup.LogPrintf(LvlInfo, "Config symlink %s", fpath) + } + */ + os.Symlink(f.Target, fpath) + + } + } + } + } + } + + if configurator.HaveProxyTag("docker") { + err := misc.ChownR(Datadir+"/init/data", 999, 999) + if err != nil { + return errors.New(fmt.Sprintf("Chown failed %q: %s", Datadir+"/init/data", err)) + } + } + configurator.TarGz(Datadir+"/config.tar.gz", Datadir+"/init") + + return nil +} + +func (configurator *Configurator) GenerateDatabaseConfig(Datadir string, ClusterDir string, RemoteBasedir string, TemplateEnv map[string]string) error { + + type File struct { + Path string `json:"path"` + Content string `json:"fmt"` + } + + // Extract files + if configurator.ClusterConfig.ProvBinaryInTarball { + url, err := configurator.ClusterConfig.GetTarballUrl(configurator.ClusterConfig.ProvBinaryTarballName) + if err != nil { + return errors.New(fmt.Sprintf("Compliance get binary %s directory %s", url, err)) + } + err = misc.DownloadFileTimeout(url, Datadir+"/"+configurator.ClusterConfig.ProvBinaryTarballName, 1200) + if err != nil { + return errors.New(fmt.Sprintf("Compliance dowload binary %s directory %s", url, err)) + } + misc.Untargz(Datadir+"/init", Datadir+"/"+configurator.ClusterConfig.ProvBinaryTarballName) + } + + if configurator.ClusterConfig.ProvOrchestrator == config.ConstOrchestratorLocalhost { + os.RemoveAll(Datadir + "/init/etc") + } else { + os.RemoveAll(Datadir + "/init") + } + for _, rule := range configurator.DBModule.Rulesets { + if strings.Contains(rule.Name, "mariadb.svc.mrm.db.cnf") { + + for _, variable := range rule.Variables { + if variable.Class == "file" || variable.Class == "fileprop" { + var f File + json.Unmarshal([]byte(variable.Value), &f) + fpath := strings.Replace(f.Path, "%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%", Datadir+"/init", -1) + dir := filepath.Dir(fpath) + /* if server.ClusterGroup.Conf.LogLevel > 2 { + server.ClusterGroup.LogPrintf(LvlInfo, "Config create %s", fpath) + } + */ + // create directory + if _, err := os.Stat(dir); os.IsNotExist(err) { + err := os.MkdirAll(dir, os.FileMode(0775)) + if err != nil { + return errors.New(fmt.Sprintf("Compliance create directory %q: %s", dir, err)) + } + } + + if fpath[len(fpath)-1:] != "/" && (configurator.IsFilterInDBTags(rule.Filter) || rule.Name == "mariadb.svc.mrm.db.cnf.generic") { + content := misc.ExtractKey(f.Content, TemplateEnv) + + if configurator.IsFilterInDBTags("docker") && configurator.ClusterConfig.ProvOrchestrator != config.ConstOrchestratorLocalhost { + if configurator.IsFilterInDBTags("wsrep") { + //if galera don't cusomized system files + if strings.Contains(content, "./.system") { + content = "" + } + } else { + content = strings.Replace(content, "./.system", "/var/lib/mysql/.system", -1) + } + } + + if configurator.ClusterConfig.ProvOrchestrator == config.ConstOrchestratorLocalhost { + content = strings.Replace(content, "includedir ..", "includedir "+RemoteBasedir+"/init", -1) + content = strings.Replace(content, "../etc/mysql", RemoteBasedir+"/init/etc/mysql", -1) + + } else if configurator.ClusterConfig.ProvOrchestrator == config.ConstOrchestratorSlapOS { + content = strings.Replace(content, "includedir ..", "includedir "+RemoteBasedir+"/", -1) + content = strings.Replace(content, "../etc/mysql", RemoteBasedir+"/etc/mysql", -1) + content = strings.Replace(content, "./.system", RemoteBasedir+"/var/lib/mysql/.system", -1) + } + outFile, err := os.Create(fpath) + if err != nil { + return errors.New(fmt.Sprintf("Compliance create file failed %q: %s", fpath, err)) + } else { + _, err = outFile.WriteString(content) + + if err != nil { + return errors.New(fmt.Sprintf("Compliance writing file failed %q: %s", fpath, err)) + } + outFile.Close() + //server.ClusterGroup.LogPrintf(LvlInfo, "Variable name %s", variable.Name) + } + + } + } + } + } + } + // processing symlink + type Link struct { + Symlink string `json:"symlink"` + Target string `json:"target"` + } + for _, rule := range configurator.DBModule.Rulesets { + if strings.Contains(rule.Name, "mariadb.svc.mrm.db.cnf.generic") { + for _, variable := range rule.Variables { + if variable.Class == "symlink" { + if configurator.IsFilterInDBTags(rule.Filter) || rule.Name == "mariadb.svc.mrm.db.cnf.generic" { + var f Link + json.Unmarshal([]byte(variable.Value), &f) + fpath := strings.Replace(f.Symlink, "%%ENV:SVC_CONF_ENV_BASE_DIR%%/%%ENV:POD%%", Datadir+"/init", -1) + /* if configurator.ClusterConfig.LogLevel > 2 { + server.ClusterGroup.LogPrintf(LvlInfo, "Config symlink %s", fpath) + } */ + os.Symlink(f.Target, fpath) + // keys := strings.Split(variable.Value, " ") + } + } + } + } + } + + if configurator.HaveDBTag("docker") { + err := misc.ChownR(Datadir+"/init/data", 999, 999) + if err != nil { + return errors.New(fmt.Sprintf("Chown failed %q: %s", Datadir+"/init/data", err)) + } + err = misc.ChmodR(Datadir+"/init/init", 0755) + if err != nil { + return errors.New(fmt.Sprintf("Chown failed %q: %s", Datadir+"/init/init", err)) + } + } + + misc.CopyFile(ClusterDir+"/ca-cert.pem", Datadir+"/init/etc/mysql/ssl/ca-cert.pem") + misc.CopyFile(ClusterDir+"/server-cert.pem", Datadir+"/init/etc/mysql/ssl/server-cert.pem") + misc.CopyFile(ClusterDir+"/server-key.pem", Datadir+"/init/etc/mysql/ssl/server-key.pem") + misc.CopyFile(ClusterDir+"/client-cert.pem", Datadir+"/init/etc/mysql/ssl/client-cert.pem") + misc.CopyFile(ClusterDir+"/client-key.pem", Datadir+"/init/etc/mysql/ssl/client-key.pem") + + configurator.TarGz(Datadir+"/config.tar.gz", Datadir+"/init") + + return nil +} + +func (configurator *Configurator) GetDatabaseDynamicConfig(filter string, cmd string, Datadir string) (string, error) { + mydynamicconf := "" + // processing symlink + type Link struct { + Symlink string `json:"symlink"` + Target string `json:"target"` + } + for _, rule := range configurator.DBModule.Rulesets { + if strings.Contains(rule.Name, "mariadb.svc.mrm.db.cnf.generic") { + for _, variable := range rule.Variables { + if variable.Class == "symlink" { + if configurator.IsFilterInDBTags(rule.Filter) || rule.Name == "mariadb.svc.mrm.db.cnf.generic" { + // server.ClusterGroup.LogPrintf(LvlInfo, "content %s %s", filter, rule.Filter) + if filter == "" || strings.Contains(rule.Filter, filter) { + var f Link + json.Unmarshal([]byte(variable.Value), &f) + fpath := Datadir + "/init/etc/mysql/conf.d/" + // server.ClusterGroup.LogPrintf(LvlInfo, "Config symlink %s , %s", fpath, f.Target) + file, err := os.Open(fpath + f.Target) + if err == nil { + r, _ := regexp.Compile(cmd) + scanner := bufio.NewScanner(file) + for scanner.Scan() { + // server.ClusterGroup.LogPrintf(LvlInfo, "content: %s", scanner.Text()) + if r.MatchString(scanner.Text()) { + mydynamicconf = mydynamicconf + strings.Split(scanner.Text(), ":")[1] + } + } + file.Close() + + } else { + return mydynamicconf, errors.New(fmt.Sprintf("Error in dynamic config: %s", err)) + } + } + } + } + } + } + } + return mydynamicconf, nil +} diff --git a/cluster/configurator/configurator_add.go b/cluster/configurator/configurator_add.go new file mode 100644 index 000000000..3b5e3a171 --- /dev/null +++ b/cluster/configurator/configurator_add.go @@ -0,0 +1,14 @@ +// replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL +// Copyright 2017 Signal 18 Cloud SAS +// Authors: Guillaume Lefranc +// Stephane Varoqui +// This source code is licensed under the GNU General Public License, version 3. + +package configurator + +func (configurator *Configurator) AddProxyTag(tag string) { + configurator.ProxyTags = append(configurator.ProxyTags, tag) +} +func (configurator *Configurator) AddDBTag(tag string) { + configurator.DBTags = append(configurator.DBTags, tag) +} diff --git a/cluster/configurator/configurator_del.go b/cluster/configurator/configurator_del.go new file mode 100644 index 000000000..d3b199ada --- /dev/null +++ b/cluster/configurator/configurator_del.go @@ -0,0 +1,34 @@ +// replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL +// Copyright 2017 Signal 18 Cloud SAS +// Authors: Guillaume Lefranc +// Stephane Varoqui +// This source code is licensed under the GNU General Public License, version 3. + +package configurator + +func (configurator *Configurator) DropProxyTag(dtag string) { + var newtags []string + for _, tag := range configurator.ProxyTags { + // cluster.LogPrintf(LvlInfo, "%s %s", tag, dtag) + if dtag != tag { + newtags = append(newtags, tag) + } + } + configurator.ProxyTags = newtags +} + +func (configurator *Configurator) DropDBTagConfig(dtag string) bool { + + var newtags []string + changed := false + for _, tag := range configurator.DBTags { + if dtag != tag { + newtags = append(newtags, tag) + } + } + if len(configurator.DBTags) != len(newtags) { + changed = true + configurator.SetDBTags(newtags) + } + return changed +} diff --git a/cluster/configurator/configurator_get.go b/cluster/configurator/configurator_get.go new file mode 100644 index 000000000..b1ccd1f73 --- /dev/null +++ b/cluster/configurator/configurator_get.go @@ -0,0 +1,317 @@ +// replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL +// Copyright 2017 Signal 18 Cloud SAS +// Authors: Guillaume Lefranc +// Stephane Varoqui +// This source code is licensed under the GNU General Public License, version 3. + +package configurator + +import ( + "hash/crc32" + "strconv" + "strings" + + "github.com/signal18/replication-manager/config" +) + +func (configurator *Configurator) GetDBModuleTags() []config.Tag { + var tags []config.Tag + for _, value := range configurator.DBModule.Filtersets { + var t config.Tag + t.Id = value.ID + s := strings.Split(value.Name, ".") + t.Name = s[len(s)-1] + t.Category = s[len(s)-2] + tags = append(tags, t) + } + return tags +} + +func (configurator *Configurator) GetDBTags() []string { + return configurator.DBTags +} +func (configurator *Configurator) GetProxyTags() []string { + return configurator.ProxyTags +} + +func (configurator *Configurator) GetProxyModuleTags() []config.Tag { + var tags []config.Tag + for _, value := range configurator.ProxyModule.Filtersets { + var t config.Tag + t.Id = value.ID + s := strings.SplitAfter(value.Name, ".") + t.Name = s[len(s)-1] + tags = append(tags, t) + } + return tags +} + +func (configurator *Configurator) GetConfigMaxConnections() string { + return strconv.Itoa(configurator.ClusterConfig.ProvMaxConnections) +} + +func (configurator *Configurator) GetConfigExpireLogDays() string { + return strconv.Itoa(configurator.ClusterConfig.ProvExpireLogDays) +} + +func (configurator *Configurator) GetConfigRelaySpaceLimit() string { + return strconv.Itoa(10 * 1024 * 1024) +} + +func (configurator *Configurator) GetConfigReplicationDomain(ClusterName string) string { + // Multi source need differnt domain id + if configurator.ClusterConfig.MasterConn != "" && configurator.ClusterConfig.ProvDomain == "0" { + crcTable := crc32.MakeTable(0xD5828281) + return strconv.FormatUint(uint64(crc32.Checksum([]byte(ClusterName), crcTable)), 10) + } + return configurator.ClusterConfig.ProvDomain +} + +// GetConfigInnoDBBPSize configure 80% of the ConfigMemory in Megabyte +func (configurator *Configurator) GetConfigInnoDBBPSize() string { + containermem, err := strconv.ParseInt(configurator.ClusterConfig.ProvMem, 10, 64) + if err != nil { + return "128" + } + sharedmempcts, _ := configurator.ClusterConfig.GetMemoryPctShared() + + containermem = containermem * int64(sharedmempcts["innodb"]) / 100 + s10 := strconv.FormatInt(containermem, 10) + return s10 +} + +func (configurator *Configurator) GetConfigMyISAMKeyBufferSize() string { + containermem, err := strconv.ParseInt(configurator.ClusterConfig.ProvMem, 10, 64) + if err != nil { + return "128" + } + sharedmempcts, _ := configurator.ClusterConfig.GetMemoryPctShared() + + containermem = containermem * int64(sharedmempcts["myisam"]) / 100 + s10 := strconv.FormatInt(containermem, 10) + return s10 +} + +func (configurator *Configurator) GetConfigTokuDBBufferSize() string { + containermem, err := strconv.ParseInt(configurator.ClusterConfig.ProvMem, 10, 64) + if err != nil { + return "128" + } + sharedmempcts, _ := configurator.ClusterConfig.GetMemoryPctShared() + + containermem = containermem * int64(sharedmempcts["tokudb"]) / 100 + s10 := strconv.FormatInt(containermem, 10) + return s10 +} + +func (configurator *Configurator) GetConfigQueryCacheSize() string { + containermem, err := strconv.ParseInt(configurator.ClusterConfig.ProvMem, 10, 64) + if err != nil { + return "128" + } + sharedmempcts, _ := configurator.ClusterConfig.GetMemoryPctShared() + containermem = containermem * int64(sharedmempcts["querycache"]) / 100 + s10 := strconv.FormatInt(containermem, 10) + return s10 +} + +func (configurator *Configurator) GetConfigAriaCacheSize() string { + containermem, err := strconv.ParseInt(configurator.ClusterConfig.ProvMem, 10, 64) + if err != nil { + return "128" + } + sharedmempcts, _ := configurator.ClusterConfig.GetMemoryPctShared() + containermem = containermem * int64(sharedmempcts["aria"]) / 100 + s10 := strconv.FormatInt(containermem, 10) + return s10 +} + +func (configurator *Configurator) GetConfigS3CacheSize() string { + containermem, err := strconv.ParseInt(configurator.ClusterConfig.ProvMem, 10, 64) + if err != nil { + return "128" + } + sharedmempcts, _ := configurator.ClusterConfig.GetMemoryPctShared() + containermem = containermem * int64(sharedmempcts["s3"]) / 100 + s10 := strconv.FormatInt(containermem, 10) + return s10 +} + +func (configurator *Configurator) GetConfigRocksDBCacheSize() string { + containermem, err := strconv.ParseInt(configurator.ClusterConfig.ProvMem, 10, 64) + if err != nil { + return "128" + } + sharedmempcts, _ := configurator.ClusterConfig.GetMemoryPctShared() + containermem = containermem * int64(sharedmempcts["rocksdb"]) / 100 + s10 := strconv.FormatInt(containermem, 10) + return s10 +} + +func (configurator *Configurator) GetConfigMyISAMKeyBufferSegements() string { + value, err := strconv.ParseInt(configurator.GetConfigMyISAMKeyBufferSize(), 10, 64) + if err != nil { + return "1" + } + value = value/8000 + 1 + s10 := strconv.FormatInt(value, 10) + return s10 +} + +func (configurator *Configurator) GetConfigInnoDBIOCapacity() string { + value, err := strconv.ParseInt(configurator.ClusterConfig.ProvIops, 10, 64) + if err != nil { + return "100" + } + value = value / 3 + s10 := strconv.FormatInt(value, 10) + return s10 +} + +func (configurator *Configurator) GetConfigInnoDBIOCapacityMax() string { + value, err := strconv.ParseInt(configurator.ClusterConfig.ProvIops, 10, 64) + if err != nil { + return "200" + } + s10 := strconv.FormatInt(value, 10) + return s10 +} + +func (configurator *Configurator) GetConfigInnoDBMaxDirtyPagePct() string { + /* mem, err := strconv.ParseInt(cluster.GetConfigInnoDBBPSize(), 10, 64) + if err != nil { + return "20" + } + //Compute the ration of memory compare to a G + // value := mem/1000 + + */ + var value int64 + value = 40 + s10 := strconv.FormatInt(value, 10) + return s10 +} + +func (configurator *Configurator) GetConfigInnoDBMaxDirtyPagePctLwm() string { + var value int64 + value = 20 + s10 := strconv.FormatInt(value, 10) + return s10 +} + +func (configurator *Configurator) GetConfigInnoDBLogFileSize() string { + //result in MB + var valuemin int64 + var valuemax int64 + valuemin = 1024 + valuemax = 20 * 1024 + value, err := strconv.ParseInt(configurator.GetConfigInnoDBBPSize(), 10, 64) + if err != nil { + return "1024" + } + value = value / 2 + if value < valuemin { + value = valuemin + } + if value > valuemax { + value = valuemax + } + if configurator.HaveDBTag("smallredolog") { + return "128" + } + s10 := strconv.FormatInt(value, 10) + return s10 +} + +func (configurator *Configurator) GetConfigInnoDBLogBufferSize() string { + //result in MB + var value int64 + value = 16 + s10 := strconv.FormatInt(value, 10) + return s10 +} + +// GetConfigInnoDBBPInstances configure BP/8G of the ConfigMemory in Megabyte +func (configurator *Configurator) GetConfigInnoDBBPInstances() string { + value, err := strconv.ParseInt(configurator.GetConfigInnoDBBPSize(), 10, 64) + if err != nil { + return "1" + } + value = value/8000 + 1 + s10 := strconv.FormatInt(value, 10) + return s10 +} + +func (configurator *Configurator) GetConfigInnoDBWriteIoThreads() string { + iopsLatency, err := strconv.ParseFloat(configurator.ClusterConfig.ProvIopsLatency, 64) + if err != nil { + return "4" + } + iops, err := strconv.ParseFloat(configurator.ClusterConfig.ProvIops, 64) + if err != nil { + return "4" + } + nbthreads := int(iopsLatency * iops) + if nbthreads < 1 { + return "1" + } + strnbthreads := strconv.Itoa(nbthreads) + return strnbthreads +} + +func (configurator *Configurator) GetConfigInnoDBReadIoThreads() string { + return configurator.ClusterConfig.ProvCores +} + +func (configurator *Configurator) GetConfigInnoDBPurgeThreads() string { + return "4" +} + +func (configurator *Configurator) GetConfigInnoDBLruFlushSize() string { + return "1024" +} + +func (configurator *Configurator) GetConfigDBCores() string { + return configurator.ClusterConfig.ProvCores +} + +func (configurator *Configurator) GetConfigDBMemory() string { + return configurator.ClusterConfig.ProvMem +} + +func (configurator *Configurator) GetConfigDBDisk() string { + return configurator.ClusterConfig.ProvDisk +} + +func (configurator *Configurator) GetConfigDBDiskIOPS() string { + return configurator.ClusterConfig.ProvIops +} + +func (configurator *Configurator) GetConfigDBMaxConnections() int { + return configurator.ClusterConfig.ProvMaxConnections +} + +func (configurator *Configurator) GetConfigProxyTags() string { + return strings.Join(configurator.ProxyTags, ",") +} + +func (configurator *Configurator) GetConfigDBTags() string { + return strings.Join(configurator.DBTags, ",") +} + +func (configurator *Configurator) GetConfigDBExpireLogDays() int { + + return configurator.ClusterConfig.ProvExpireLogDays +} + +func (configurator *Configurator) GetConfigProxyCores() string { + return configurator.ClusterConfig.ProvProxCores +} + +func (configurator *Configurator) GetProxyMemorySize() string { + return configurator.ClusterConfig.ProvProxMem +} + +func (configurator *Configurator) GetProxyDiskSize() string { + return configurator.ClusterConfig.ProvProxDisk +} diff --git a/cluster/configurator/configurator_has.go b/cluster/configurator/configurator_has.go new file mode 100644 index 000000000..f9a757333 --- /dev/null +++ b/cluster/configurator/configurator_has.go @@ -0,0 +1,72 @@ +// replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL +// Copyright 2017 Signal 18 Cloud SAS +// Authors: Guillaume Lefranc +// Stephane Varoqui +// This source code is licensed under the GNU General Public License, version 3. + +package configurator + +import ( + "strings" + + "github.com/signal18/replication-manager/utils/dbhelper" +) + +func (configurator *Configurator) HasInstallPlugin(Plugins map[string]dbhelper.Plugin, name string) bool { + val, ok := Plugins[name] + if !ok { + return false + } + if val.Status == "ACTIVE" { + return true + } + return false +} + +func (configurator *Configurator) HasWsrep(Variables map[string]string) bool { + return Variables["WSREP_ON"] == "ON" +} + +func (configurator *Configurator) HaveDBTag(tag string) bool { + for _, t := range configurator.DBTags { + if t == tag { + return true + } + } + return false +} + +func (configurator *Configurator) HaveProxyTag(tag string) bool { + for _, t := range configurator.ProxyTags { + if t == tag { + return true + } + } + return false +} + +func (configurator *Configurator) IsFilterInProxyTags(filter string) bool { + tags := configurator.GetProxyTags() + for _, tag := range tags { + if strings.Contains(filter, "."+tag) { + // fmt.Println(server.ClusterGroup.Conf.ProvTags + " vs tag: " + tag + " against " + filter) + return true + } + } + return false +} + +func (configurator *Configurator) IsFilterInDBTags(filter string) bool { + tags := configurator.GetDBTags() + for _, tag := range tags { + if strings.HasSuffix(filter, tag) { + // fmt.Println(server.ClusterGroup.Conf.ProvTags + " vs tag: " + tag + " against " + filter) + return true + } + if strings.Contains(filter, "spider") { + //IsCompute identify spider nodes need to force tag spider if no present in db tags config + return true + } + } + return false +} diff --git a/cluster/configurator/configurator_set.go b/cluster/configurator/configurator_set.go new file mode 100644 index 000000000..09237158d --- /dev/null +++ b/cluster/configurator/configurator_set.go @@ -0,0 +1,73 @@ +// replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL +// Copyright 2017 Signal 18 Cloud SAS +// Authors: Guillaume Lefranc +// Stephane Varoqui +// This source code is licensed under the GNU General Public License, version 3. + +package configurator + +import ( + "strconv" + "strings" + + "github.com/signal18/replication-manager/config" +) + +func (configurator *Configurator) SetConfig(conf config.Config) { + configurator.ClusterConfig = conf + configurator.DBTags = strings.Split(conf.ProvTags, ",") + configurator.ProxyTags = strings.Split(conf.ProvProxTags, ",") +} + +func (configurator *Configurator) SetDBTags(newtags []string) { + configurator.DBTags = newtags +} + +func (configurator *Configurator) SetProxyTags(newtags []string) { + configurator.ProxyTags = newtags +} + +func (configurator *Configurator) SetDBCores(value string) { + configurator.ClusterConfig.ProvCores = value +} + +func (configurator *Configurator) SetDBMemory(value string) { + configurator.ClusterConfig.ProvMem = value +} + +func (configurator *Configurator) SetDBDisk(value string) { + configurator.ClusterConfig.ProvDisk = value +} + +func (configurator *Configurator) SetDBDiskIOPS(value string) { + configurator.ClusterConfig.ProvIops = value +} + +func (configurator *Configurator) SetDBMaxConnections(value string) { + valueNum, err := strconv.Atoi(value) + if err != nil { + configurator.ClusterConfig.ProvMaxConnections = 1000 + return + } + configurator.ClusterConfig.ProvMaxConnections = valueNum +} + +func (configurator *Configurator) SetDBExpireLogDays(value string) { + valueNum, err := strconv.Atoi(value) + if err != nil { + configurator.ClusterConfig.ProvExpireLogDays = 5 + } + configurator.ClusterConfig.ProvExpireLogDays = valueNum +} + +func (configurator *Configurator) SetProxyCores(value string) { + configurator.ClusterConfig.ProvProxCores = value +} + +func (configurator *Configurator) SetProxyMemorySize(value string) { + configurator.ClusterConfig.ProvProxMem = value +} + +func (configurator *Configurator) SetProxyDiskSize(value string) { + configurator.ClusterConfig.ProvProxDisk = value +} diff --git a/cluster/configurator/configurator_tar.go b/cluster/configurator/configurator_tar.go new file mode 100644 index 000000000..81902cc12 --- /dev/null +++ b/cluster/configurator/configurator_tar.go @@ -0,0 +1,101 @@ +// replication-manager - Replication Manager Monitoring and CLI for MariaDB and MySQL +// Copyright 2017 Signal 18 Cloud SAS +// Authors: Guillaume Lefranc +// Stephane Varoqui +// This source code is licensed under the GNU General Public License, version 3. + +package configurator + +import ( + "archive/tar" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "strings" +) + +func (configurator *Configurator) TarGzWrite(_path string, tw *tar.Writer, fi os.FileInfo, trimprefix string) error { + fr, err := os.Open(_path) + if err != nil { + return errors.New(fmt.Sprintf("Compliance writing config.tar.gz failed : %s", err)) + } + defer fr.Close() + h := new(tar.Header) + var link string + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + if link, err = os.Readlink(_path); err != nil { + return nil + } + + } + h, _ = tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + h.Name = strings.TrimPrefix(_path, trimprefix) + // h.Size = fi.Size() + // h.Mode = int64(fi.Mode()) + // h.ModTime = fi.ModTime() + + err = tw.WriteHeader(h) + if err != nil { + return errors.New(fmt.Sprintf("Compliance writing config.tar.gz failed : %s", err)) + } + if !fi.Mode().IsRegular() { //nothing more to do for non-regular + return nil + } + _, err = io.Copy(tw, fr) + if err != nil { + return errors.New(fmt.Sprintf("Compliance writing config.tar.gz failed : %s", err)) + } + return nil +} + +func (configurator *Configurator) TarGz(outFilePath string, inPath string) error { + // file write + fw, err := os.Create(outFilePath) + if err != nil { + return errors.New(fmt.Sprintf("Compliance writing config.tar.gz failed : %s", err)) + } + defer fw.Close() + + // gzip write + gw := gzip.NewWriter(fw) + defer gw.Close() + + // tar write + tw := tar.NewWriter(gw) + defer tw.Close() + + configurator.IterDirectory(inPath, tw, inPath+"/") + + return nil +} + +func (configurator *Configurator) IterDirectory(dirPath string, tw *tar.Writer, trimprefix string) error { + dir, err := os.Open(dirPath) + if err != nil { + return errors.New(fmt.Sprintf("Compliance writing config.tar.gz failed : %s", err)) + } + defer dir.Close() + fis, err := dir.Readdir(0) + if err != nil { + return errors.New(fmt.Sprintf("Compliance writing config.tar.gz failed : %s", err)) + } + for _, fi := range fis { + curPath := dirPath + "/" + fi.Name() + if fi.IsDir() { + err := configurator.TarGzWrite(curPath, tw, fi, trimprefix) + if err != nil { + return err + } + configurator.IterDirectory(curPath, tw, trimprefix) + } else { + // fmt.Printf("adding... %s\n", curPath) + configurator.TarGzWrite(curPath, tw, fi, trimprefix) + } + } + return nil +} From 7465c74b2777993db51be47b143d1bcc41c5c672 Mon Sep 17 00:00:00 2001 From: svaroqui Date: Mon, 19 Jul 2021 10:37:32 +0200 Subject: [PATCH 84/84] Flashback failed as always looking for all slave status and wrongly looking at the new master id Default binlog format to row in configuration to enable flashback --- cluster/srv_rejoin.go | 27 ++++++++++++++++----------- main.go | 2 +- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/cluster/srv_rejoin.go b/cluster/srv_rejoin.go index 8e0574b63..1159e281d 100644 --- a/cluster/srv_rejoin.go +++ b/cluster/srv_rejoin.go @@ -352,13 +352,14 @@ func (server *ServerMonitor) rejoinMasterIncremental(crash *Crash) error { } else { // don't try flashback on old style replication that are ahead jump to SST if server.HasGTIDReplication() == false { - return errors.New("Incremental failed") + server.ClusterGroup.LogPrintf("INFO", "Incremental canceled caused by old style replication") + return errors.New("Incremental canceled caused by old style replication") } } if crash.FailoverIOGtid != nil { // server.ClusterGroup.master.FailoverIOGtid.GetSeqServerIdNos(uint64(server.ServerID)) == 0 // lookup in crash recorded is the current master - if crash.FailoverIOGtid.GetSeqServerIdNos(uint64(server.ClusterGroup.master.ServerID)) == 0 { + if crash.FailoverIOGtid.GetSeqServerIdNos(uint64(server.ServerID)) == 0 { server.ClusterGroup.LogPrintf("INFO", "Cascading failover, consider we cannot flashback") server.ClusterGroup.canFlashBack = false } else { @@ -653,21 +654,25 @@ func (cluster *Cluster) RejoinFixRelay(slave *ServerMonitor, relay *ServerMonito // UseGtid check is replication use gtid func (server *ServerMonitor) UsedGtidAtElection(crash *Crash) bool { - ss, errss := server.GetSlaveStatus(server.ReplicationSourceName) - if errss != nil { - return false - } - - server.ClusterGroup.LogPrintf(LvlDbg, "Rejoin Server use GTID %s", ss.UsingGtid.String) + /* + ss, errss := server.GetSlaveStatus(server.ReplicationSourceName) + if errss != nil { + server.ClusterGroup.LogPrintf(LvlInfo, "Failed to check if server was using GTID %s", errss) + return false + } + server.ClusterGroup.LogPrintf(LvlInfo, "Rejoin server using GTID %s", ss.UsingGtid.String) + */ // An old master master do no have replication if crash.FailoverIOGtid == nil { - server.ClusterGroup.LogPrintf(LvlDbg, "Rejoin server cannot find a saved master election GTID") + server.ClusterGroup.LogPrintf(LvlInfo, "Rejoin server cannot find a saved master election GTID") return false } if len(crash.FailoverIOGtid.GetSeqNos()) > 0 { + server.ClusterGroup.LogPrintf(LvlInfo, "Rejoin server found a crash GTID greater than 0 ") return true - } else { - return false } + server.ClusterGroup.LogPrintf(LvlInfo, "Rejoin server can not found a GTID greater than 0 ") + return false + } diff --git a/main.go b/main.go index 242946786..a1dd5009d 100644 --- a/main.go +++ b/main.go @@ -461,7 +461,7 @@ func init() { monitorCmd.Flags().StringVar(&conf.ProvIopsLatency, "prov-db-disk-iops-latency", "0.002", "IO latency in s") monitorCmd.Flags().StringVar(&conf.ProvCores, "prov-db-cpu-cores", "1", "Number of cpu cores for the micro service VM") monitorCmd.Flags().BoolVar(&conf.ProvDBApplyDynamicConfig, "prov-db-apply-dynamic-config", false, "Dynamic database config change") - monitorCmd.Flags().StringVar(&conf.ProvTags, "prov-db-tags", "semisync,innodb,noquerycache,threadpool,slow,pfs,docker,linux,readonly,diskmonitor,sqlerror,compressbinlog", "playbook configuration tags") + monitorCmd.Flags().StringVar(&conf.ProvTags, "prov-db-tags", "semisync,row,innodb,noquerycache,threadpool,slow,pfs,docker,linux,readonly,diskmonitor,sqlerror,compressbinlog", "playbook configuration tags") monitorCmd.Flags().StringVar(&conf.ProvDomain, "prov-db-domain", "0", "Config domain id for the cluster") monitorCmd.Flags().StringVar(&conf.ProvMem, "prov-db-memory", "256", "Memory in M for micro service VM") monitorCmd.Flags().StringVar(&conf.ProvMemSharedPct, "prov-db-memory-shared-pct", "threads:16,innodb:60,myisam:10,aria:10,rocksdb:1,tokudb:1,s3:1,archive:1,querycache:0", "% memory shared per buffer")