diff --git a/.gitattributes b/.gitattributes index be0e8a78a7a4..5239e1e7c969 100644 --- a/.gitattributes +++ b/.gitattributes @@ -38,7 +38,14 @@ /strings/uca_data.h !filter /internal/meb/meb/mysqloption_list.cpp !filter -# NDB is currently exempt and will be taken in a future merge. -/storage/ndb/** !filter /sql/abstract_query_plan.h !filter /sql/abstract_query_plan.cc !filter +# Some NDB source is currently exempt +/storage/ndb/clusterj/** !filter +/storage/ndb/include/** !filter +/storage/ndb/memcache/** !filter +/storage/ndb/ndbapi-examples/** !filter +/storage/ndb/nodejs/** !filter +/storage/ndb/src/** !filter +/storage/ndb/test/** !filter +/storage/ndb/tools/** !filter diff --git a/storage/ndb/.clang-format b/storage/ndb/.clang-format deleted file mode 100644 index a3519cbb9b55..000000000000 --- a/storage/ndb/.clang-format +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License, version 2.0, -# as published by the Free Software Foundation. -# -# This program is also distributed with certain software (including -# but not limited to OpenSSL) that is licensed under separate terms, -# as designated in a particular file or component or in included license -# documentation. The authors of MySQL hereby grant you an additional -# permission to link the program and your derivative works with the -# separately licensed software that they have included with MySQL. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License, version 2.0, for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ---- -Language: Cpp -BasedOnStyle: Google -BinPackArguments: false -BinPackParameters: false -BreakBeforeBraces: Allman -SortIncludes: false -Standard: Cpp11 ---- -Language: Java ---- -Language: JavaScript ---- diff --git a/storage/ndb/coding_guidelines.h b/storage/ndb/coding_guidelines.h index d693b37282f6..add22272d05b 100644 --- a/storage/ndb/coding_guidelines.h +++ b/storage/ndb/coding_guidelines.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License, version 2.0, @@ -21,7 +21,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** - @page CPP_CODING_GUIDELINES_FOR_NDB_SE C++ Coding Guidelines for the NDB Storage Engine + @page CPP_CODING_GUIDELINES_FOR_NDB_SE C++ Coding Guidelines for the NDB + Storage Engine The mysqld handler part of NDB (ha_ndbcluster.cc, ha_ndbcluster_binlog.cc, etc.) uses the same coding style as @@ -56,7 +57,6 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - @subpage USE_OF_NDBREQUIRE */ - /** @page BRACES Braces @@ -90,7 +90,6 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ ~~~~~~~~~~~~~~~~ */ - /** @page ASSIGNMENT Assignment ~~~~~~~~~~~~~~~~ @@ -99,7 +98,6 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ ~~~~~~~~~~~~~~~~ */ - /** @page USE_OF_NDBREQUIRE Use of ndbrequire diff --git a/storage/ndb/memcache/extra/.clang-format b/storage/ndb/memcache/extra/.clang-format deleted file mode 100644 index 71a03970db02..000000000000 --- a/storage/ndb/memcache/extra/.clang-format +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License, version 2.0, -# as published by the Free Software Foundation. -# -# This program is also distributed with certain software (including -# but not limited to OpenSSL) that is licensed under separate terms, -# as designated in a particular file or component or in included license -# documentation. The authors of MySQL hereby grant you an additional -# permission to link the program and your derivative works with the -# separately licensed software that they have included with MySQL. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License, version 2.0, for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ---- -DisableFormat: true ---- diff --git a/storage/ndb/plugin/ha_ndb_ddl_fk.cc b/storage/ndb/plugin/ha_ndb_ddl_fk.cc index a0d244cf0c40..28343a5c18bf 100644 --- a/storage/ndb/plugin/ha_ndb_ddl_fk.cc +++ b/storage/ndb/plugin/ha_ndb_ddl_fk.cc @@ -27,7 +27,7 @@ #include "my_dbug.h" #include "mysql/service_thd_alloc.h" #include "sql/key_spec.h" -#include "sql/mysqld.h" // global_system_variables table_alias_charset ... +#include "sql/mysqld.h" // global_system_variables table_alias_charset ... #include "sql/sql_class.h" #include "sql/sql_lex.h" #include "sql/sql_table.h" @@ -38,13 +38,13 @@ #include "storage/ndb/plugin/ndb_tdc.h" #include "template_utils.h" -#define ERR_RETURN(err) \ -{ \ - const NdbError& tmp= err; \ - DBUG_RETURN(ndb_to_mysql_error(&tmp)); \ -} +#define ERR_RETURN(err) \ + { \ + const NdbError &tmp = err; \ + DBUG_RETURN(ndb_to_mysql_error(&tmp)); \ + } -// Typedefs for long names +// Typedefs for long names typedef NdbDictionary::Dictionary NDBDICT; typedef NdbDictionary::Table NDBTAB; typedef NdbDictionary::Column NDBCOL; @@ -57,17 +57,15 @@ typedef NdbDictionary::ForeignKey NDBFK; Unlike indexes, no references to global dictionary are kept. */ -struct Ndb_fk_item -{ +struct Ndb_fk_item { FOREIGN_KEY_INFO f_key_info; - int update_action; // NDBFK::FkAction + int update_action; // NDBFK::FkAction int delete_action; bool is_child; bool is_parent; }; -struct Ndb_fk_data -{ +struct Ndb_fk_data { List list; uint cnt_child; uint cnt_parent; @@ -79,32 +77,24 @@ struct Ndb_fk_data The actual foreign keys are not passed in handler interface so gets them from thd->lex :-( */ -static -const NDBINDEX* -find_matching_index(NDBDICT* dict, - const NDBTAB * tab, - const NDBCOL * columns[], - /* OUT */ bool & matches_primary_key) -{ +static const NDBINDEX *find_matching_index( + NDBDICT *dict, const NDBTAB *tab, const NDBCOL *columns[], + /* OUT */ bool &matches_primary_key) { /** * First check if it matches primary key */ { - matches_primary_key= false; + matches_primary_key = false; - uint cnt_pk= 0, cnt_col= 0; - for (unsigned i = 0; columns[i] != 0; i++) - { + uint cnt_pk = 0, cnt_col = 0; + for (unsigned i = 0; columns[i] != 0; i++) { cnt_col++; - if (columns[i]->getPrimaryKey()) - cnt_pk++; + if (columns[i]->getPrimaryKey()) cnt_pk++; } // check if all columns was part of full primary key - if (cnt_col == (uint)tab->getNoOfPrimaryKeys() && - cnt_col == cnt_pk) - { - matches_primary_key= true; + if (cnt_col == (uint)tab->getNoOfPrimaryKeys() && cnt_col == cnt_pk) { + matches_primary_key = true; return 0; } } @@ -114,31 +104,26 @@ find_matching_index(NDBDICT* dict, * first choice is unique index * second choice is ordered index...with as many columns as possible */ - const int noinvalidate= 0; - uint best_matching_columns= 0; - const NDBINDEX* best_matching_index= 0; + const int noinvalidate = 0; + uint best_matching_columns = 0; + const NDBINDEX *best_matching_index = 0; NDBDICT::List index_list; dict->listIndexes(index_list, *tab); - for (unsigned i = 0; i < index_list.count; i++) - { - const char * index_name= index_list.elements[i].name; - const NDBINDEX* index= dict->getIndexGlobal(index_name, *tab); - if (index->getType() == NDBINDEX::UniqueHashIndex) - { - uint cnt= 0, j; - for (j = 0; columns[j] != 0; j++) - { + for (unsigned i = 0; i < index_list.count; i++) { + const char *index_name = index_list.elements[i].name; + const NDBINDEX *index = dict->getIndexGlobal(index_name, *tab); + if (index->getType() == NDBINDEX::UniqueHashIndex) { + uint cnt = 0, j; + for (j = 0; columns[j] != 0; j++) { /* * Search for matching columns in any order * since order does not matter for unique index */ - bool found= false; - for (unsigned c = 0; c < index->getNoOfColumns(); c++) - { - if (!strcmp(columns[j]->getName(), index->getColumn(c)->getName())) - { - found= true; + bool found = false; + for (unsigned c = 0; c < index->getNoOfColumns(); c++) { + if (!strcmp(columns[j]->getName(), index->getColumn(c)->getName())) { + found = true; break; } } @@ -147,103 +132,77 @@ find_matching_index(NDBDICT* dict, else break; } - if (cnt == index->getNoOfColumns() && columns[j] == 0) - { + if (cnt == index->getNoOfColumns() && columns[j] == 0) { /** * Full match...return this index, no need to look further */ - if (best_matching_index) - { + if (best_matching_index) { // release ref to previous best candidate - dict->removeIndexGlobal(* best_matching_index, noinvalidate); + dict->removeIndexGlobal(*best_matching_index, noinvalidate); } - return index; // NOTE: also returns reference + return index; // NOTE: also returns reference } /** * Not full match...i.e not usable */ - dict->removeIndexGlobal(* index, noinvalidate); + dict->removeIndexGlobal(*index, noinvalidate); continue; - } - else if (index->getType() == NDBINDEX::OrderedIndex) - { - uint cnt= 0; - for (; columns[cnt] != 0; cnt++) - { - const NDBCOL * ndbcol= index->getColumn(cnt); - if (ndbcol == 0) - break; + } else if (index->getType() == NDBINDEX::OrderedIndex) { + uint cnt = 0; + for (; columns[cnt] != 0; cnt++) { + const NDBCOL *ndbcol = index->getColumn(cnt); + if (ndbcol == 0) break; - if (strcmp(columns[cnt]->getName(), ndbcol->getName()) != 0) - break; + if (strcmp(columns[cnt]->getName(), ndbcol->getName()) != 0) break; } - if (cnt > best_matching_columns) - { + if (cnt > best_matching_columns) { /** * better match... */ - if (best_matching_index) - { - dict->removeIndexGlobal(* best_matching_index, noinvalidate); + if (best_matching_index) { + dict->removeIndexGlobal(*best_matching_index, noinvalidate); } - best_matching_index= index; - best_matching_columns= cnt; - } - else - { - dict->removeIndexGlobal(* index, noinvalidate); + best_matching_index = index; + best_matching_columns = cnt; + } else { + dict->removeIndexGlobal(*index, noinvalidate); } - } - else - { + } else { // what ?? unknown index type assert(false); - dict->removeIndexGlobal(* index, noinvalidate); + dict->removeIndexGlobal(*index, noinvalidate); continue; } } - return best_matching_index; // NOTE: also returns reference + return best_matching_index; // NOTE: also returns reference } - -static -void -setDbName(Ndb* ndb, const char * name) -{ - if (name && strlen(name) != 0) - { + +static void setDbName(Ndb *ndb, const char *name) { + if (name && strlen(name) != 0) { ndb->setDatabaseName(name); } } - template -const char * -lex2str(const LEX_CSTRING& str, char (&buf)[buf_size]) -{ +const char *lex2str(const LEX_CSTRING &str, char (&buf)[buf_size]) { snprintf(buf, buf_size, "%.*s", (int)str.length, str.str); return buf; } - -static void -ndb_fk_casedn(char *name) -{ +static void ndb_fk_casedn(char *name) { DBUG_ASSERT(name != 0); uint length = (uint)strlen(name); DBUG_ASSERT(files_charset_info != 0 && files_charset_info->casedn_multiply == 1); - files_charset_info->cset->casedn(files_charset_info, - name, length, name, length); + files_charset_info->cset->casedn(files_charset_info, name, length, name, + length); } -static int -ndb_fk_casecmp(const char* name1, const char* name2) -{ - if (!lower_case_table_names) - { +static int ndb_fk_casecmp(const char *name1, const char *name2) { + if (!lower_case_table_names) { return strcmp(name1, name2); } char tmp1[FN_LEN + 1]; @@ -255,69 +214,60 @@ ndb_fk_casecmp(const char* name1, const char* name2) return strcmp(tmp1, tmp2); } +extern bool ndb_show_foreign_key_mock_tables(THD *thd); -extern bool ndb_show_foreign_key_mock_tables(THD* thd); - -class Fk_util -{ - THD* m_thd; +class Fk_util { + THD *m_thd; - void - info(const char* fmt, ...) const - MY_ATTRIBUTE((format(printf, 2, 3))); + void info(const char *fmt, ...) const MY_ATTRIBUTE((format(printf, 2, 3))); - void - warn(const char* fmt, ...) const - MY_ATTRIBUTE((format(printf, 2, 3))); + void warn(const char *fmt, ...) const MY_ATTRIBUTE((format(printf, 2, 3))); - void - error(const NdbDictionary::Dictionary* dict, const char* fmt, ...) const - MY_ATTRIBUTE((format(printf, 3, 4))); + void error(const NdbDictionary::Dictionary *dict, const char *fmt, ...) const + MY_ATTRIBUTE((format(printf, 3, 4))); - void - remove_index_global(NdbDictionary::Dictionary* dict, const NdbDictionary::Index* index) const - { - if (!index) - return; + void remove_index_global(NdbDictionary::Dictionary *dict, + const NdbDictionary::Index *index) const { + if (!index) return; dict->removeIndexGlobal(*index, 0); } - - bool - copy_fk_to_new_parent(NdbDictionary::Dictionary* dict, NdbDictionary::ForeignKey& fk, - const char* new_parent_name, const char* column_names[]) const - { + bool copy_fk_to_new_parent(NdbDictionary::Dictionary *dict, + NdbDictionary::ForeignKey &fk, + const char *new_parent_name, + const char *column_names[]) const { DBUG_ENTER("copy_fk_to_new_parent"); DBUG_PRINT("info", ("new_parent_name: %s", new_parent_name)); // Load up the new parent table Ndb_table_guard new_parent_tab(dict, new_parent_name); - if (!new_parent_tab.get_table()) - { - error(dict, "Failed to load potentially new parent '%s'", new_parent_name); + if (!new_parent_tab.get_table()) { + error(dict, "Failed to load potentially new parent '%s'", + new_parent_name); DBUG_RETURN(false); } // Build new parent column list from parent column names - const NdbDictionary::Column* columns[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; + const NdbDictionary::Column *columns[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; { unsigned num_columns = 0; - for (unsigned i = 0; column_names[i] != 0; i++) - { + for (unsigned i = 0; column_names[i] != 0; i++) { DBUG_PRINT("info", ("column: %s", column_names[i])); - const NdbDictionary::Column* col = + const NdbDictionary::Column *col = new_parent_tab.get_table()->getColumn(column_names[i]); - if (!col) - { + if (!col) { // Parent table didn't have any column with the given name, can happen - warn("Could not resolve '%s' as fk parent for '%s' since it didn't have " - "all the referenced columns", new_parent_name, fk.getChildTable()); + warn( + "Could not resolve '%s' as fk parent for '%s' since it didn't " + "have " + "all the referenced columns", + new_parent_name, fk.getChildTable()); DBUG_RETURN(false); } - columns[num_columns++]= col; + columns[num_columns++] = col; } - columns[num_columns]= 0; + columns[num_columns] = 0; } NdbDictionary::ForeignKey new_fk(fk); @@ -325,45 +275,40 @@ class Fk_util // Create name for the new fk by splitting the fk's name and replacing // the part in format "//" { - char name[FN_REFLEN+1]; + char name[FN_REFLEN + 1]; unsigned parent_id, child_id; - if (sscanf(fk.getName(), "%u/%u/%s", - &parent_id, &child_id, name) != 3) - { + if (sscanf(fk.getName(), "%u/%u/%s", &parent_id, &child_id, name) != 3) { warn("Skip, failed to parse name of fk: %s", fk.getName()); DBUG_RETURN(false); } - char fk_name[FN_REFLEN+1]; - snprintf(fk_name, sizeof(fk_name), "%s", - name); + char fk_name[FN_REFLEN + 1]; + snprintf(fk_name, sizeof(fk_name), "%s", name); DBUG_PRINT("info", ("Setting new fk name: %s", fk_name)); new_fk.setName(fk_name); } // Find matching index - bool parent_primary_key= false; - const NdbDictionary::Index* parent_index= find_matching_index(dict, - new_parent_tab.get_table(), - columns, - parent_primary_key); + bool parent_primary_key = false; + const NdbDictionary::Index *parent_index = find_matching_index( + dict, new_parent_tab.get_table(), columns, parent_primary_key); DBUG_PRINT("info", ("parent_primary_key: %d", parent_primary_key)); // Check if either pk or index matched - if (!parent_primary_key && parent_index == 0) - { - warn("Could not resolve '%s' as fk parent for '%s' since no matching index " - "could be found", new_parent_name, fk.getChildTable()); + if (!parent_primary_key && parent_index == 0) { + warn( + "Could not resolve '%s' as fk parent for '%s' since no matching " + "index " + "could be found", + new_parent_name, fk.getChildTable()); DBUG_RETURN(false); } - if (parent_index != 0) - { - DBUG_PRINT("info", ("Setting parent with index %s", parent_index->getName())); + if (parent_index != 0) { + DBUG_PRINT("info", + ("Setting parent with index %s", parent_index->getName())); new_fk.setParent(*new_parent_tab.get_table(), parent_index, columns); - } - else - { + } else { DBUG_PRINT("info", ("Setting parent without index")); new_fk.setParent(*new_parent_tab.get_table(), 0, columns); } @@ -373,13 +318,11 @@ class Fk_util // Create new fk referencing the new table DBUG_PRINT("info", ("Create new fk: %s", new_fk.getName())); int flags = 0; - if (thd_test_options(m_thd, OPTION_NO_FOREIGN_KEY_CHECKS)) - { + if (thd_test_options(m_thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { flags |= NdbDictionary::Dictionary::CreateFK_NoVerify; } NdbDictionary::ObjectId objid; - if (dict->createForeignKey(new_fk, &objid, flags) != 0) - { + if (dict->createForeignKey(new_fk, &objid, flags) != 0) { error(dict, "Failed to create foreign key '%s'", new_fk.getName()); remove_index_global(dict, parent_index); DBUG_RETURN(false); @@ -389,19 +332,15 @@ class Fk_util DBUG_RETURN(true); } - - void - resolve_mock(NdbDictionary::Dictionary* dict, - const char* new_parent_name, const char* mock_name) const - { + void resolve_mock(NdbDictionary::Dictionary *dict, + const char *new_parent_name, const char *mock_name) const { DBUG_ENTER("resolve_mock"); DBUG_PRINT("enter", ("mock_name '%s'", mock_name)); DBUG_ASSERT(is_mock_name(mock_name)); // Load up the mock table Ndb_table_guard mock_tab(dict, mock_name); - if (!mock_tab.get_table()) - { + if (!mock_tab.get_table()) { error(dict, "Failed to load the listed mock table '%s'", mock_name); DBUG_ASSERT(false); DBUG_VOID_RETURN; @@ -409,49 +348,46 @@ class Fk_util // List dependent objects of mock table NdbDictionary::Dictionary::List list; - if (dict->listDependentObjects(list, *mock_tab.get_table()) != 0) - { - error(dict, "Failed to list dependent objects for mock table '%s'", mock_name); + if (dict->listDependentObjects(list, *mock_tab.get_table()) != 0) { + error(dict, "Failed to list dependent objects for mock table '%s'", + mock_name); DBUG_VOID_RETURN; } - for (unsigned i = 0; i < list.count; i++) - { - const NdbDictionary::Dictionary::List::Element& element = list.elements[i]; - if (element.type != NdbDictionary::Object::ForeignKey) - continue; + for (unsigned i = 0; i < list.count; i++) { + const NdbDictionary::Dictionary::List::Element &element = + list.elements[i]; + if (element.type != NdbDictionary::Object::ForeignKey) continue; DBUG_PRINT("info", ("fk: %s", element.name)); NdbDictionary::ForeignKey fk; - if (dict->getForeignKey(fk, element.name) != 0) - { + if (dict->getForeignKey(fk, element.name) != 0) { error(dict, "Could not find the listed fk '%s'", element.name); continue; } // Build column name list for parent - const char* col_names[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; + const char *col_names[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; { unsigned num_columns = 0; - for (unsigned j = 0; j < fk.getParentColumnCount(); j++) - { - const NdbDictionary::Column* col = + for (unsigned j = 0; j < fk.getParentColumnCount(); j++) { + const NdbDictionary::Column *col = mock_tab.get_table()->getColumn(fk.getParentColumnNo(j)); - if (!col) - { + if (!col) { error(NULL, "Could not find column %d in mock table '%s'", fk.getParentColumnNo(j), mock_name); continue; } - col_names[num_columns++]= col->getName(); + col_names[num_columns++] = col->getName(); } - col_names[num_columns]= 0; + col_names[num_columns] = 0; - if (num_columns != fk.getParentColumnCount()) - { - error(NULL, "Could not find all columns referenced by fk in mock table '%s'", - mock_name); + if (num_columns != fk.getParentColumnCount()) { + error( + NULL, + "Could not find all columns referenced by fk in mock table '%s'", + mock_name); continue; } } @@ -461,22 +397,19 @@ class Fk_util // New fk has been created between child and new parent, drop the mock // table and it's related fk - const int drop_flags= NDBDICT::DropTableCascadeConstraints; - if (dict->dropTableGlobal(*mock_tab.get_table(), drop_flags) != 0) - { + const int drop_flags = NDBDICT::DropTableCascadeConstraints; + if (dict->dropTableGlobal(*mock_tab.get_table(), drop_flags) != 0) { error(dict, "Failed to drop mock table '%s'", mock_name); continue; } - info("Dropped mock table '%s' - resolved by '%s'", mock_name, new_parent_name); + info("Dropped mock table '%s' - resolved by '%s'", mock_name, + new_parent_name); } DBUG_VOID_RETURN; } - - bool - create_mock_tables_and_drop(Ndb* ndb, NdbDictionary::Dictionary* dict, - const NdbDictionary::Table* table) - { + bool create_mock_tables_and_drop(Ndb *ndb, NdbDictionary::Dictionary *dict, + const NdbDictionary::Table *table) { DBUG_ENTER("create_mock_tables_and_drop"); DBUG_PRINT("enter", ("table: %s", table->getName())); @@ -485,25 +418,23 @@ class Fk_util and recreate those to point at a new mock */ NdbDictionary::Dictionary::List list; - if (dict->listDependentObjects(list, *table) != 0) - { - error(dict, "Failed to list dependent objects for table '%s'", table->getName()); + if (dict->listDependentObjects(list, *table) != 0) { + error(dict, "Failed to list dependent objects for table '%s'", + table->getName()); DBUG_RETURN(false); } uint fk_index = 0; - for (unsigned i = 0; i < list.count; i++) - { - const NdbDictionary::Dictionary::List::Element& element = list.elements[i]; + for (unsigned i = 0; i < list.count; i++) { + const NdbDictionary::Dictionary::List::Element &element = + list.elements[i]; - if (element.type != NdbDictionary::Object::ForeignKey) - continue; + if (element.type != NdbDictionary::Object::ForeignKey) continue; DBUG_PRINT("fk", ("name: %s, type: %d", element.name, element.type)); NdbDictionary::ForeignKey fk; - if (dict->getForeignKey(fk, element.name) != 0) - { + if (dict->getForeignKey(fk, element.name) != 0) { // Could not find the listed fk DBUG_ASSERT(false); continue; @@ -512,25 +443,25 @@ class Fk_util // Parent of the found fk should be the table to be dropped DBUG_PRINT("info", ("fk.parent: %s", fk.getParentTable())); char parent_db_and_name[FN_LEN + 1]; - const char * parent_name = fk_split_name(parent_db_and_name, fk.getParentTable()); + const char *parent_name = + fk_split_name(parent_db_and_name, fk.getParentTable()); if (strcmp(parent_db_and_name, ndb->getDatabaseName()) != 0 || - strcmp(parent_name, table->getName()) != 0) - { + strcmp(parent_name, table->getName()) != 0) { DBUG_PRINT("info", ("fk is not parent, skip")); continue; } DBUG_PRINT("info", ("fk.child: %s", fk.getChildTable())); char child_db_and_name[FN_LEN + 1]; - const char * child_name = fk_split_name(child_db_and_name, fk.getChildTable()); + const char *child_name = + fk_split_name(child_db_and_name, fk.getChildTable()); // Open child table Ndb_db_guard db_guard(ndb); setDbName(ndb, child_db_and_name); Ndb_table_guard child_tab(dict, child_name); - if (child_tab.get_table() == 0) - { + if (child_tab.get_table() == 0) { error(dict, "Failed to open child table '%s'", child_name); DBUG_RETURN(false); } @@ -538,25 +469,23 @@ class Fk_util /* Format mock table name */ char mock_name[FN_REFLEN]; if (!format_name(mock_name, sizeof(mock_name), - child_tab.get_table()->getObjectId(), - fk_index, parent_name)) - { + child_tab.get_table()->getObjectId(), fk_index, + parent_name)) { error(NULL, "Failed to create mock parent table, too long mock name"); DBUG_RETURN(false); } - // Build both column name and column type list from parent(which will be dropped) - const char* col_names[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; - const NdbDictionary::Column* col_types[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; + // Build both column name and column type list from parent(which will be + // dropped) + const char *col_names[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; + const NdbDictionary::Column *col_types[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; { unsigned num_columns = 0; - for (unsigned j = 0; j < fk.getParentColumnCount(); j++) - { - const NdbDictionary::Column* col = + for (unsigned j = 0; j < fk.getParentColumnCount(); j++) { + const NdbDictionary::Column *col = table->getColumn(fk.getParentColumnNo(j)); DBUG_PRINT("col", ("[%u] %s", i, col->getName())); - if (!col) - { + if (!col) { error(NULL, "Could not find column %d in parent table '%s'", fk.getParentColumnNo(j), table->getName()); continue; @@ -565,30 +494,28 @@ class Fk_util col_types[num_columns] = col; num_columns++; } - col_names[num_columns]= 0; + col_names[num_columns] = 0; col_types[num_columns] = 0; - if (num_columns != fk.getParentColumnCount()) - { - error(NULL, "Could not find all columns referenced by fk in parent table '%s'", + if (num_columns != fk.getParentColumnCount()) { + error(NULL, + "Could not find all columns referenced by fk in parent table " + "'%s'", table->getName()); continue; } } - db_guard.restore(); // restore db + db_guard.restore(); // restore db // Create new mock - if (!create(dict, mock_name, child_name, - col_names, col_types)) - { + if (!create(dict, mock_name, child_name, col_names, col_types)) { error(dict, "Failed to create mock parent table '%s", mock_name); DBUG_ASSERT(false); DBUG_RETURN(false); } // Recreate fks to point at new mock - if (!copy_fk_to_new_parent(dict, fk, mock_name, col_names)) - { + if (!copy_fk_to_new_parent(dict, fk, mock_name, col_names)) { DBUG_RETURN(false); } @@ -597,9 +524,8 @@ class Fk_util // Drop the requested table and all foreign keys refering to it // i.e the old fks - const int drop_flags= NDBDICT::DropTableCascadeConstraints; - if (dict->dropTableGlobal(*table, drop_flags) != 0) - { + const int drop_flags = NDBDICT::DropTableCascadeConstraints; + if (dict->dropTableGlobal(*table, drop_flags) != 0) { error(dict, "Failed to drop the requested table"); DBUG_RETURN(false); } @@ -607,65 +533,49 @@ class Fk_util DBUG_RETURN(true); } -public: - Fk_util(THD* thd) : m_thd(thd) {} + public: + Fk_util(THD *thd) : m_thd(thd) {} - static - bool split_mock_name(const char* name, - unsigned* child_id_ptr = NULL, - unsigned* child_index_ptr = NULL, - const char** parent_name = NULL) - { + static bool split_mock_name(const char *name, unsigned *child_id_ptr = NULL, + unsigned *child_index_ptr = NULL, + const char **parent_name = NULL) { const struct { - const char* str; + const char *str; size_t len; - } prefix = { STRING_WITH_LEN("NDB$FKM_") }; + } prefix = {STRING_WITH_LEN("NDB$FKM_")}; - if (strncmp(name, prefix.str, prefix.len) != 0) - return false; + if (strncmp(name, prefix.str, prefix.len) != 0) return false; - char* end; - const char* ptr= name + prefix.len + 1; + char *end; + const char *ptr = name + prefix.len + 1; // Parse child id long child_id = strtol(ptr, &end, 10); - if (ptr == end || child_id < 0 || *end == 0 || *end != '_') - return false; - ptr = end+1; + if (ptr == end || child_id < 0 || *end == 0 || *end != '_') return false; + ptr = end + 1; // Parse child index long child_index = strtol(ptr, &end, 10); - if (ptr == end || child_id < 0 || *end == 0 || *end != '_') - return false; - ptr = end+1; + if (ptr == end || child_id < 0 || *end == 0 || *end != '_') return false; + ptr = end + 1; // Assign and return OK - if (child_id_ptr) - *child_id_ptr = child_id; - if (child_index_ptr) - *child_index_ptr = child_index; - if (parent_name) - *parent_name = ptr; + if (child_id_ptr) *child_id_ptr = child_id; + if (child_index_ptr) *child_index_ptr = child_index; + if (parent_name) *parent_name = ptr; return true; } - static - bool is_mock_name(const char* name) - { - return split_mock_name(name); - } + static bool is_mock_name(const char *name) { return split_mock_name(name); } - static - const char* format_name(char buf[], size_t buf_size, int child_id, - uint fk_index, const char* parent_name) - { + static const char *format_name(char buf[], size_t buf_size, int child_id, + uint fk_index, const char *parent_name) { DBUG_ENTER("format_name"); DBUG_PRINT("enter", ("child_id: %d, fk_index: %u, parent_name: %s", child_id, fk_index, parent_name)); - const size_t len = snprintf(buf, buf_size, "NDB$FKM_%d_%u_%s", - child_id, fk_index, parent_name); - if (len >= buf_size - 1) - { + const size_t len = snprintf(buf, buf_size, "NDB$FKM_%d_%u_%s", child_id, + fk_index, parent_name); + if (len >= buf_size - 1) { DBUG_PRINT("info", ("Size of buffer too small")); DBUG_RETURN(NULL); } @@ -673,18 +583,15 @@ class Fk_util DBUG_RETURN(buf); } - // Adaptor function for calling create() with Mem_root_array - bool create(NDBDICT *dict, const char* mock_name, const char* child_name, - const Mem_root_array &key_part_list, - const NDBCOL * col_types[]) - { + bool create(NDBDICT *dict, const char *mock_name, const char *child_name, + const Mem_root_array &key_part_list, + const NDBCOL *col_types[]) { // Convert List into null terminated const char* array - const char* col_names[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; + const char *col_names[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; { unsigned i = 0; - for (const Key_part_spec *key : key_part_list) - { + for (const Key_part_spec *key : key_part_list) { col_names[i++] = strdup(key->get_field_name()); } col_names[i] = 0; @@ -693,47 +600,40 @@ class Fk_util const bool ret = create(dict, mock_name, child_name, col_names, col_types); // Free the strings in col_names array - for (unsigned i = 0; col_names[i] != 0; i++) - { - const char* col_name = col_names[i]; - free(const_cast(col_name)); + for (unsigned i = 0; col_names[i] != 0; i++) { + const char *col_name = col_names[i]; + free(const_cast(col_name)); } return ret; } - - bool create(NDBDICT *dict, const char* mock_name, const char* child_name, - const char* col_names[], const NDBCOL * col_types[]) - { + bool create(NDBDICT *dict, const char *mock_name, const char *child_name, + const char *col_names[], const NDBCOL *col_types[]) { NDBTAB mock_tab; DBUG_ENTER("mock_table::create"); DBUG_PRINT("enter", ("mock_name: %s", mock_name)); DBUG_ASSERT(is_mock_name(mock_name)); - if (mock_tab.setName(mock_name)) - { + if (mock_tab.setName(mock_name)) { DBUG_RETURN(false); } mock_tab.setLogging(false); unsigned i = 0; - while (col_names[i]) - { + while (col_names[i]) { NDBCOL mock_col; - const char* col_name = col_names[i]; + const char *col_name = col_names[i]; DBUG_PRINT("info", ("name: %s", col_name)); - if (mock_col.setName(col_name)) - { + if (mock_col.setName(col_name)) { DBUG_ASSERT(false); DBUG_RETURN(false); } - const NDBCOL * col= col_types[i]; - if (!col) - { + const NDBCOL *col = col_types[i]; + if (!col) { // Internal error, the two lists should be same size DBUG_ASSERT(col); DBUG_RETURN(false); @@ -750,16 +650,14 @@ class Fk_util mock_col.setPrimaryKey(true); mock_col.setNullable(false); - if (mock_tab.addColumn(mock_col)) - { + if (mock_tab.addColumn(mock_col)) { DBUG_RETURN(false); } i++; } // Create the table in NDB - if (dict->createTable(mock_tab) != 0) - { + if (dict->createTable(mock_tab) != 0) { // Error is available to caller in dict* DBUG_RETURN(false); } @@ -767,70 +665,62 @@ class Fk_util DBUG_RETURN(true); } - bool - build_mock_list(NdbDictionary::Dictionary* dict, - const NdbDictionary::Table* table, List &mock_list) - { + bool build_mock_list(NdbDictionary::Dictionary *dict, + const NdbDictionary::Table *table, + List &mock_list) { DBUG_ENTER("build_mock_list"); NdbDictionary::Dictionary::List list; - if (dict->listDependentObjects(list, *table) != 0) - { - error(dict, "Failed to list dependent objects for table '%s'", table->getName()); + if (dict->listDependentObjects(list, *table) != 0) { + error(dict, "Failed to list dependent objects for table '%s'", + table->getName()); DBUG_RETURN(false); } - for (unsigned i = 0; i < list.count; i++) - { - const NdbDictionary::Dictionary::List::Element& element = list.elements[i]; - if (element.type != NdbDictionary::Object::ForeignKey) - continue; + for (unsigned i = 0; i < list.count; i++) { + const NdbDictionary::Dictionary::List::Element &element = + list.elements[i]; + if (element.type != NdbDictionary::Object::ForeignKey) continue; NdbDictionary::ForeignKey fk; - if (dict->getForeignKey(fk, element.name) != 0) - { + if (dict->getForeignKey(fk, element.name) != 0) { // Could not find the listed fk DBUG_ASSERT(false); continue; } char parent_db_and_name[FN_LEN + 1]; - const char * name = fk_split_name(parent_db_and_name,fk.getParentTable()); + const char *name = fk_split_name(parent_db_and_name, fk.getParentTable()); - if (!Fk_util::is_mock_name(name)) - continue; + if (!Fk_util::is_mock_name(name)) continue; mock_list.push_back(thd_strdup(m_thd, fk.getParentTable())); } DBUG_RETURN(true); } - - void - drop_mock_list(Ndb* ndb, NdbDictionary::Dictionary* dict, List &drop_list) - { - const char* full_name; + void drop_mock_list(Ndb *ndb, NdbDictionary::Dictionary *dict, + List &drop_list) { + const char *full_name; List_iterator_fast it(drop_list); - while ((full_name=it++)) - { + while ((full_name = it++)) { DBUG_PRINT("info", ("drop table: '%s'", full_name)); char db_name[FN_LEN + 1]; - const char * table_name = fk_split_name(db_name, full_name); + const char *table_name = fk_split_name(db_name, full_name); Ndb_db_guard db_guard(ndb); setDbName(ndb, db_name); Ndb_table_guard mocktab_g(dict, table_name); - if (!mocktab_g.get_table()) - { - // Could not open the mock table - DBUG_PRINT("error", ("Could not open the listed mock table, ignore it")); - DBUG_ASSERT(false); - continue; + if (!mocktab_g.get_table()) { + // Could not open the mock table + DBUG_PRINT("error", + ("Could not open the listed mock table, ignore it")); + DBUG_ASSERT(false); + continue; } - if (dict->dropTableGlobal(*mocktab_g.get_table()) != 0) - { + if (dict->dropTableGlobal(*mocktab_g.get_table()) != 0) { DBUG_PRINT("error", ("Failed to drop the mock table '%s'", - mocktab_g.get_table()->getName())); + mocktab_g.get_table()->getName())); DBUG_ASSERT(false); continue; } @@ -838,31 +728,26 @@ class Fk_util } } - - bool - drop(Ndb* ndb, NdbDictionary::Dictionary* dict, - const NdbDictionary::Table* table) - { + bool drop(Ndb *ndb, NdbDictionary::Dictionary *dict, + const NdbDictionary::Table *table) { DBUG_ENTER("drop"); // Start schema transaction to make this operation atomic - if (dict->beginSchemaTrans() != 0) - { + if (dict->beginSchemaTrans() != 0) { error(dict, "Failed to start schema transaction"); DBUG_RETURN(false); } bool result = true; - if (!create_mock_tables_and_drop(ndb, dict, table)) - { + if (!create_mock_tables_and_drop(ndb, dict, table)) { // Operation failed, set flag to abort when ending trans result = false; } // End schema transaction - const Uint32 end_trans_flag = result ? 0 : NdbDictionary::Dictionary::SchemaTransAbort; - if (dict->endSchemaTrans(end_trans_flag) != 0) - { + const Uint32 end_trans_flag = + result ? 0 : NdbDictionary::Dictionary::SchemaTransAbort; + if (dict->endSchemaTrans(end_trans_flag) != 0) { error(dict, "Failed to end schema transaction"); result = false; } @@ -870,62 +755,52 @@ class Fk_util DBUG_RETURN(result); } - bool count_fks(NdbDictionary::Dictionary* dict, - const NdbDictionary::Table* table, uint& count) const - { + bool count_fks(NdbDictionary::Dictionary *dict, + const NdbDictionary::Table *table, uint &count) const { DBUG_ENTER("count_fks"); NdbDictionary::Dictionary::List list; - if (dict->listDependentObjects(list, *table) != 0) - { - error(dict, "Failed to list dependent objects for table '%s'", table->getName()); + if (dict->listDependentObjects(list, *table) != 0) { + error(dict, "Failed to list dependent objects for table '%s'", + table->getName()); DBUG_RETURN(false); } - for (unsigned i = 0; i < list.count; i++) - { - if (list.elements[i].type == NdbDictionary::Object::ForeignKey) - count++; + for (unsigned i = 0; i < list.count; i++) { + if (list.elements[i].type == NdbDictionary::Object::ForeignKey) count++; } DBUG_PRINT("exit", ("count: %u", count)); DBUG_RETURN(true); } - - bool drop_fk(Ndb* ndb, NdbDictionary::Dictionary* dict, const char* fk_name) - { + bool drop_fk(Ndb *ndb, NdbDictionary::Dictionary *dict, const char *fk_name) { DBUG_ENTER("drop_fk"); NdbDictionary::ForeignKey fk; - if (dict->getForeignKey(fk, fk_name) != 0) - { + if (dict->getForeignKey(fk, fk_name) != 0) { error(dict, "Could not find fk '%s'", fk_name); DBUG_ASSERT(false); DBUG_RETURN(false); } char parent_db_and_name[FN_LEN + 1]; - const char * parent_name = fk_split_name(parent_db_and_name,fk.getParentTable()); - if (Fk_util::is_mock_name(parent_name)) - { + const char *parent_name = + fk_split_name(parent_db_and_name, fk.getParentTable()); + if (Fk_util::is_mock_name(parent_name)) { // Fk is referencing a mock table, drop the table // and the constraint at the same time Ndb_db_guard db_guard(ndb); setDbName(ndb, parent_db_and_name); Ndb_table_guard mocktab_g(dict, parent_name); - if (mocktab_g.get_table()) - { - const int drop_flags= NDBDICT::DropTableCascadeConstraints; - if (dict->dropTableGlobal(*mocktab_g.get_table(), drop_flags) != 0) - { + if (mocktab_g.get_table()) { + const int drop_flags = NDBDICT::DropTableCascadeConstraints; + if (dict->dropTableGlobal(*mocktab_g.get_table(), drop_flags) != 0) { error(dict, "Failed to drop fk mock table '%s'", parent_name); DBUG_ASSERT(false); DBUG_RETURN(false); } // table and fk dropped DBUG_RETURN(true); - } - else - { + } else { warn("Could not open the fk mock table '%s', ignoring it...", parent_name); DBUG_ASSERT(false); @@ -933,20 +808,16 @@ class Fk_util } } - if (dict->dropForeignKey(fk) != 0) - { + if (dict->dropForeignKey(fk) != 0) { error(dict, "Failed to drop fk '%s'", fk_name); DBUG_RETURN(false); } DBUG_RETURN(true); } - - void - resolve_mock_tables(NdbDictionary::Dictionary* dict, - const char* new_parent_db, - const char* new_parent_name) const - { + void resolve_mock_tables(NdbDictionary::Dictionary *dict, + const char *new_parent_db, + const char *new_parent_name) const { DBUG_ENTER("resolve_mock_tables"); DBUG_PRINT("enter", ("new_parent_db: %s, new_parent_name: %s", new_parent_db, new_parent_name)); @@ -956,34 +827,33 @@ class Fk_util potentially be resolved to the new table */ NdbDictionary::Dictionary::List table_list; - if (dict->listObjects(table_list, NdbDictionary::Object::UserTable, true) != 0) - { + if (dict->listObjects(table_list, NdbDictionary::Object::UserTable, true) != + 0) { DBUG_ASSERT(false); DBUG_VOID_RETURN; } - for (unsigned i = 0; i < table_list.count; i++) - { - const NdbDictionary::Dictionary::List::Element& el = table_list.elements[i]; + for (unsigned i = 0; i < table_list.count; i++) { + const NdbDictionary::Dictionary::List::Element &el = + table_list.elements[i]; DBUG_ASSERT(el.type == NdbDictionary::Object::UserTable); // Check if table is in same database as the potential new parent - if (strcmp(new_parent_db, el.database) != 0) - { + if (strcmp(new_parent_db, el.database) != 0) { DBUG_PRINT("info", ("Skip, '%s.%s' is in different database", el.database, el.name)); continue; } - const char* parent_name; + const char *parent_name; if (!Fk_util::split_mock_name(el.name, NULL, NULL, &parent_name)) continue; // Check if this mock table should reference the new table - if (strcmp(parent_name, new_parent_name) != 0) - { - DBUG_PRINT("info", ("Skip, parent of this mock table is not the new table")); + if (strcmp(parent_name, new_parent_name) != 0) { + DBUG_PRINT("info", + ("Skip, parent of this mock table is not the new table")); continue; } @@ -993,30 +863,26 @@ class Fk_util DBUG_VOID_RETURN; } - - bool truncate_allowed(NdbDictionary::Dictionary* dict, const char* db, - const NdbDictionary::Table* table, bool& allow) const - { + bool truncate_allowed(NdbDictionary::Dictionary *dict, const char *db, + const NdbDictionary::Table *table, bool &allow) const { DBUG_ENTER("truncate_allowed"); NdbDictionary::Dictionary::List list; - if (dict->listDependentObjects(list, *table) != 0) - { - error(dict, "Failed to list dependent objects for table '%s'", table->getName()); + if (dict->listDependentObjects(list, *table) != 0) { + error(dict, "Failed to list dependent objects for table '%s'", + table->getName()); DBUG_RETURN(false); } allow = true; - for (unsigned i = 0; i < list.count; i++) - { - const NdbDictionary::Dictionary::List::Element& element = list.elements[i]; - if (element.type != NdbDictionary::Object::ForeignKey) - continue; + for (unsigned i = 0; i < list.count; i++) { + const NdbDictionary::Dictionary::List::Element &element = + list.elements[i]; + if (element.type != NdbDictionary::Object::ForeignKey) continue; DBUG_PRINT("info", ("fk: %s", element.name)); NdbDictionary::ForeignKey fk; - if (dict->getForeignKey(fk, element.name) != 0) - { + if (dict->getForeignKey(fk, element.name) != 0) { error(dict, "Could not find the listed fk '%s'", element.name); DBUG_ASSERT(false); continue; @@ -1024,11 +890,10 @@ class Fk_util // Refuse if table is parent of fk char parent_db_and_name[FN_LEN + 1]; - const char * parent_name = fk_split_name(parent_db_and_name, - fk.getParentTable()); + const char *parent_name = + fk_split_name(parent_db_and_name, fk.getParentTable()); if (strcmp(db, parent_db_and_name) != 0 || - strcmp(parent_name, table->getName()) != 0) - { + strcmp(parent_name, table->getName()) != 0) { // Not parent of the fk, skip continue; } @@ -1061,17 +926,15 @@ class Fk_util @retval true on success false on failure. */ - bool - generate_fk_constraint_string(Ndb *ndb, - const NdbDictionary::ForeignKey &fk, - const int tab_id, - const bool print_mock_table_names, - String &fk_string) - { + bool generate_fk_constraint_string(Ndb *ndb, + const NdbDictionary::ForeignKey &fk, + const int tab_id, + const bool print_mock_table_names, + String &fk_string) { DBUG_ENTER("generate_fk_constraint_string"); - const NDBTAB *parenttab= 0; - const NDBTAB *childtab= 0; + const NDBTAB *parenttab = 0; + const NDBTAB *childtab = 0; NDBDICT *dict = ndb->getDictionary(); Ndb_db_guard db_guard(ndb); @@ -1084,16 +947,14 @@ class Fk_util Ndb_table_guard parent_table_guard(dict); char parent_db_and_name[FN_LEN + 1]; { - const char *name = fk_split_name(parent_db_and_name, - fk.getParentTable()); + const char *name = fk_split_name(parent_db_and_name, fk.getParentTable()); setDbName(ndb, parent_db_and_name); parent_table_guard.init(name); - parenttab= parent_table_guard.get_table(); - if (parenttab == 0) - { - NdbError err= dict->getNdbError(); - warn("Unable to load parent table : error %d, %s", - err.code, err.message); + parenttab = parent_table_guard.get_table(); + if (parenttab == 0) { + NdbError err = dict->getNdbError(); + warn("Unable to load parent table : error %d, %s", err.code, + err.message); DBUG_RETURN(false); } } @@ -1102,22 +963,19 @@ class Fk_util Ndb_table_guard child_table_guard(dict); char child_db_and_name[FN_LEN + 1]; { - const char * name = fk_split_name(child_db_and_name, - fk.getChildTable()); + const char *name = fk_split_name(child_db_and_name, fk.getChildTable()); setDbName(ndb, child_db_and_name); child_table_guard.init(name); - childtab= child_table_guard.get_table(); - if (childtab == 0) - { - NdbError err= dict->getNdbError(); - err= dict->getNdbError(); - warn("Unable to load child table : error %d, %s", - err.code, err.message); + childtab = child_table_guard.get_table(); + if (childtab == 0) { + NdbError err = dict->getNdbError(); + err = dict->getNdbError(); + warn("Unable to load child table : error %d, %s", err.code, + err.message); DBUG_RETURN(false); } - if(!generating_for_show_create) - { + if (!generating_for_show_create) { /* Print child table name if printing error */ fk_string.append("`"); fk_string.append(child_db_and_name); @@ -1127,10 +985,8 @@ class Fk_util } } - if (generating_for_show_create) - { - if(childtab->getTableId() != tab_id) - { + if (generating_for_show_create) { + if (childtab->getTableId() != tab_id) { /** * This was on parent table (fk are shown on child table in SQL) * Skip printing this fk @@ -1145,16 +1001,15 @@ class Fk_util fk_string.append("CONSTRAINT `"); { - char db_and_name[FN_LEN+1]; - const char * name = fk_split_name(db_and_name, fk.getName()); + char db_and_name[FN_LEN + 1]; + const char *name = fk_split_name(db_and_name, fk.getName()); fk_string.append(name); } fk_string.append("` FOREIGN KEY ("); { - const char* separator = ""; - for (unsigned j = 0; j < fk.getChildColumnCount(); j++) - { + const char *separator = ""; + for (unsigned j = 0; j < fk.getChildColumnCount(); j++) { const int child_col_index = fk.getChildColumnNo(j); fk_string.append(separator); fk_string.append("`"); @@ -1165,31 +1020,26 @@ class Fk_util } fk_string.append(") REFERENCES `"); - if (strcmp(parent_db_and_name, child_db_and_name) != 0) - { + if (strcmp(parent_db_and_name, child_db_and_name) != 0) { /* Print db name only if the parent and child are from different dbs */ fk_string.append(parent_db_and_name); fk_string.append("`.`"); } - const char* real_parent_name; + const char *real_parent_name; if (!print_mock_table_names && - Fk_util::split_mock_name(parenttab->getName(), - NULL, NULL, &real_parent_name)) - { + Fk_util::split_mock_name(parenttab->getName(), NULL, NULL, + &real_parent_name)) { /* print the real table name */ DBUG_PRINT("info", ("real_parent_name: %s", real_parent_name)); fk_string.append(real_parent_name); - } - else - { + } else { fk_string.append(parenttab->getName()); } fk_string.append("` ("); { - const char* separator = ""; - for (unsigned j = 0; j < fk.getParentColumnCount(); j++) - { + const char *separator = ""; + for (unsigned j = 0; j < fk.getParentColumnCount(); j++) { const int parent_col_index = fk.getParentColumnNo(j); fk_string.append(separator); fk_string.append("`"); @@ -1201,57 +1051,55 @@ class Fk_util fk_string.append(")"); /* print action strings */ - switch(fk.getOnDeleteAction()){ - case NdbDictionary::ForeignKey::NoAction: - fk_string.append(" ON DELETE NO ACTION"); - break; - case NdbDictionary::ForeignKey::Restrict: - fk_string.append(" ON DELETE RESTRICT"); - break; - case NdbDictionary::ForeignKey::Cascade: - fk_string.append(" ON DELETE CASCADE"); - break; - case NdbDictionary::ForeignKey::SetNull: - fk_string.append(" ON DELETE SET NULL"); - break; - case NdbDictionary::ForeignKey::SetDefault: - fk_string.append(" ON DELETE SET DEFAULT"); - break; + switch (fk.getOnDeleteAction()) { + case NdbDictionary::ForeignKey::NoAction: + fk_string.append(" ON DELETE NO ACTION"); + break; + case NdbDictionary::ForeignKey::Restrict: + fk_string.append(" ON DELETE RESTRICT"); + break; + case NdbDictionary::ForeignKey::Cascade: + fk_string.append(" ON DELETE CASCADE"); + break; + case NdbDictionary::ForeignKey::SetNull: + fk_string.append(" ON DELETE SET NULL"); + break; + case NdbDictionary::ForeignKey::SetDefault: + fk_string.append(" ON DELETE SET DEFAULT"); + break; } - switch(fk.getOnUpdateAction()){ - case NdbDictionary::ForeignKey::NoAction: - fk_string.append(" ON UPDATE NO ACTION"); - break; - case NdbDictionary::ForeignKey::Restrict: - fk_string.append(" ON UPDATE RESTRICT"); - break; - case NdbDictionary::ForeignKey::Cascade: - fk_string.append(" ON UPDATE CASCADE"); - break; - case NdbDictionary::ForeignKey::SetNull: - fk_string.append(" ON UPDATE SET NULL"); - break; - case NdbDictionary::ForeignKey::SetDefault: - fk_string.append(" ON UPDATE SET DEFAULT"); - break; + switch (fk.getOnUpdateAction()) { + case NdbDictionary::ForeignKey::NoAction: + fk_string.append(" ON UPDATE NO ACTION"); + break; + case NdbDictionary::ForeignKey::Restrict: + fk_string.append(" ON UPDATE RESTRICT"); + break; + case NdbDictionary::ForeignKey::Cascade: + fk_string.append(" ON UPDATE CASCADE"); + break; + case NdbDictionary::ForeignKey::SetNull: + fk_string.append(" ON UPDATE SET NULL"); + break; + case NdbDictionary::ForeignKey::SetDefault: + fk_string.append(" ON UPDATE SET DEFAULT"); + break; } DBUG_RETURN(true); } }; -void Fk_util::info(const char* fmt, ...) const -{ +void Fk_util::info(const char *fmt, ...) const { va_list args; char msg[MYSQL_ERRMSG_SIZE]; - va_start(args,fmt); + va_start(args, fmt); vsnprintf(msg, sizeof(msg), fmt, args); va_end(args); // Push as warning if user has turned on ndb_show_foreign_key_mock_tables - if (ndb_show_foreign_key_mock_tables(m_thd)) - { + if (ndb_show_foreign_key_mock_tables(m_thd)) { push_warning(m_thd, Sql_condition::SL_WARNING, ER_YES, msg); } @@ -1259,11 +1107,10 @@ void Fk_util::info(const char* fmt, ...) const ndb_log_info("%s", msg); } -void Fk_util::warn(const char* fmt, ...) const -{ +void Fk_util::warn(const char *fmt, ...) const { va_list args; char msg[MYSQL_ERRMSG_SIZE]; - va_start(args,fmt); + va_start(args, fmt); vsnprintf(msg, sizeof(msg), fmt, args); va_end(args); push_warning(m_thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, msg); @@ -1272,96 +1119,76 @@ void Fk_util::warn(const char* fmt, ...) const ndb_log_warning("%s", msg); } -void Fk_util::error(const NdbDictionary::Dictionary* dict, const char* fmt, ...) const -{ +void Fk_util::error(const NdbDictionary::Dictionary *dict, const char *fmt, + ...) const { va_list args; char msg[MYSQL_ERRMSG_SIZE]; - va_start(args,fmt); + va_start(args, fmt); vsnprintf(msg, sizeof(msg), fmt, args); va_end(args); - push_warning(m_thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, msg); + push_warning(m_thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, msg); char ndb_msg[MYSQL_ERRMSG_SIZE] = {0}; - if (dict) - { + if (dict) { // Extract message from Ndb - const NdbError& error = dict->getNdbError(); - snprintf(ndb_msg, sizeof(ndb_msg), - "%d '%s'", error.code, error.message); - push_warning_printf(m_thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, "Ndb error: %s", ndb_msg); + const NdbError &error = dict->getNdbError(); + snprintf(ndb_msg, sizeof(ndb_msg), "%d '%s'", error.code, error.message); + push_warning_printf(m_thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, + "Ndb error: %s", ndb_msg); } // Print error to log ndb_log_error("%s, Ndb error: %s", msg, ndb_msg); } - - -bool ndb_fk_util_build_list(THD* thd, NdbDictionary::Dictionary* dict, - const NdbDictionary::Table* table, List &mock_list) -{ +bool ndb_fk_util_build_list(THD *thd, NdbDictionary::Dictionary *dict, + const NdbDictionary::Table *table, + List &mock_list) { Fk_util fk_util(thd); return fk_util.build_mock_list(dict, table, mock_list); } - -void ndb_fk_util_drop_list(THD* thd, Ndb* ndb, NdbDictionary::Dictionary* dict, List &drop_list) -{ +void ndb_fk_util_drop_list(THD *thd, Ndb *ndb, NdbDictionary::Dictionary *dict, + List &drop_list) { Fk_util fk_util(thd); fk_util.drop_mock_list(ndb, dict, drop_list); } - -bool ndb_fk_util_drop_table(THD* thd, Ndb* ndb, NdbDictionary::Dictionary* dict, - const NdbDictionary::Table* table) -{ +bool ndb_fk_util_drop_table(THD *thd, Ndb *ndb, NdbDictionary::Dictionary *dict, + const NdbDictionary::Table *table) { Fk_util fk_util(thd); return fk_util.drop(ndb, dict, table); } - -bool ndb_fk_util_is_mock_name(const char* table_name) -{ +bool ndb_fk_util_is_mock_name(const char *table_name) { return Fk_util::is_mock_name(table_name); } - -void -ndb_fk_util_resolve_mock_tables(THD* thd, NdbDictionary::Dictionary* dict, - const char* new_parent_db, - const char* new_parent_name) -{ +void ndb_fk_util_resolve_mock_tables(THD *thd, NdbDictionary::Dictionary *dict, + const char *new_parent_db, + const char *new_parent_name) { Fk_util fk_util(thd); fk_util.resolve_mock_tables(dict, new_parent_db, new_parent_name); } - -bool ndb_fk_util_truncate_allowed(THD* thd, NdbDictionary::Dictionary* dict, - const char* db, - const NdbDictionary::Table* table, - bool& allowed) -{ +bool ndb_fk_util_truncate_allowed(THD *thd, NdbDictionary::Dictionary *dict, + const char *db, + const NdbDictionary::Table *table, + bool &allowed) { Fk_util fk_util(thd); - if (!fk_util.truncate_allowed(dict, db, table, allowed)) - return false; + if (!fk_util.truncate_allowed(dict, db, table, allowed)) return false; return true; } - -bool ndb_fk_util_generate_constraint_string(THD* thd, Ndb *ndb, +bool ndb_fk_util_generate_constraint_string(THD *thd, Ndb *ndb, const NdbDictionary::ForeignKey &fk, const int tab_id, const bool print_mock_table_names, - String &fk_string) -{ + String &fk_string) { Fk_util fk_util(thd); - return fk_util.generate_fk_constraint_string(ndb, fk, tab_id, - print_mock_table_names, - fk_string); + return fk_util.generate_fk_constraint_string( + ndb, fk, tab_id, print_mock_table_names, fk_string); } - /** @brief Flush the parent table after a successful addition/deletion to the Foreign Key. This is done to force reload the Parent @@ -1372,56 +1199,51 @@ bool ndb_fk_util_generate_constraint_string(THD* thd, Ndb *ndb, @param parent_name Parent table's name @return Void */ -static void -flush_parent_table_for_fk(THD* thd, - const char* parent_db, const char* parent_name) -{ +static void flush_parent_table_for_fk(THD *thd, const char *parent_db, + const char *parent_name) { DBUG_ENTER("ha_ndbcluster::flush_parent_table_for_fk"); - if(Fk_util::is_mock_name(parent_name)) - { + if (Fk_util::is_mock_name(parent_name)) { /* Parent table is mock - no need to flush */ DBUG_PRINT("debug", ("Parent table is a mock - skipped flushing")); DBUG_VOID_RETURN; } - DBUG_PRINT("debug", ("Flushing table : `%s`.`%s` ", - parent_db, parent_name)); + DBUG_PRINT("debug", ("Flushing table : `%s`.`%s` ", parent_db, parent_name)); ndb_tdc_close_cached_table(thd, parent_db, parent_name); DBUG_VOID_RETURN; } - /* @brief Guard class for references to indexes in the global NdbApi dictionary cache which need to be released(and sometimes invalidated) when guard goes out of scope */ -template class Ndb_index_release_guard { - NdbDictionary::Dictionary* const m_dict; - std::vector m_indexes; +template +class Ndb_index_release_guard { + NdbDictionary::Dictionary *const m_dict; + std::vector m_indexes; + public: - Ndb_index_release_guard(NdbDictionary::Dictionary* dict) : m_dict(dict) {} - Ndb_index_release_guard(const Ndb_index_release_guard&) = delete; + Ndb_index_release_guard(NdbDictionary::Dictionary *dict) : m_dict(dict) {} + Ndb_index_release_guard(const Ndb_index_release_guard &) = delete; ~Ndb_index_release_guard() { - for (const NdbDictionary::Index* index : m_indexes) { + for (const NdbDictionary::Index *index : m_indexes) { DBUG_PRINT("info", ("Releasing index: '%s'", index->getName())); m_dict->removeIndexGlobal(*index, invalidate_index); } } // Register index to be released - void add_index_to_release(const NdbDictionary::Index* index) { + void add_index_to_release(const NdbDictionary::Index *index) { DBUG_PRINT("info", ("Adding index '%s' to release", index->getName())); m_indexes.push_back(index); } }; -int -ha_ndbcluster::create_fks(THD *thd, Ndb *ndb) -{ +int ha_ndbcluster::create_fks(THD *thd, Ndb *ndb) { DBUG_ENTER("ha_ndbcluster::create_fks"); - NdbDictionary::Dictionary *dict= ndb->getDictionary(); + NdbDictionary::Dictionary *dict = ndb->getDictionary(); // Releaser for child(i.e the table being created/altered) which // need to be invalidated when released Ndb_index_release_guard child_index_releaser(dict); @@ -1430,20 +1252,17 @@ ha_ndbcluster::create_fks(THD *thd, Ndb *ndb) Ndb_index_release_guard parent_index_releaser(dict); // return real mysql error to avoid total randomness.. - const int err_default= HA_ERR_CANNOT_ADD_FOREIGN; + const int err_default = HA_ERR_CANNOT_ADD_FOREIGN; assert(thd->lex != 0); - for (const Key_spec *key : thd->lex->alter_info->key_list) - { - if (key->type != KEYTYPE_FOREIGN) - continue; + for (const Key_spec *key : thd->lex->alter_info->key_list) { + if (key->type != KEYTYPE_FOREIGN) continue; - const Foreign_key_spec * fk= down_cast(key); + const Foreign_key_spec *fk = down_cast(key); // Open the table to create foreign keys for Ndb_table_guard child_tab(dict, m_tabname); - if (child_tab.get_table() == 0) - { + if (child_tab.get_table() == 0) { ERR_RETURN(dict->getNdbError()); } @@ -1460,47 +1279,40 @@ ha_ndbcluster::create_fks(THD *thd, Ndb *ndb) /** * Get table columns columns... */ - const NDBCOL * childcols[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; - { - unsigned pos= 0; - const NDBTAB * tab= child_tab.get_table(); - for (const Key_part_spec *col : fk->columns) - { - const NDBCOL * ndbcol= tab->getColumn(col->get_field_name()); - if (ndbcol == 0) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, - "Child table %s has no column %s in NDB", - child_tab.get_table()->getName(), - col->get_field_name()); + const NDBCOL *childcols[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; + { + unsigned pos = 0; + const NDBTAB *tab = child_tab.get_table(); + for (const Key_part_spec *col : fk->columns) { + const NDBCOL *ndbcol = tab->getColumn(col->get_field_name()); + if (ndbcol == 0) { + push_warning_printf( + thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, + "Child table %s has no column %s in NDB", + child_tab.get_table()->getName(), col->get_field_name()); DBUG_RETURN(err_default); } - childcols[pos++]= ndbcol; + childcols[pos++] = ndbcol; } - childcols[pos]= 0; // NULL terminate + childcols[pos] = 0; // NULL terminate } - bool child_primary_key= false; - const NDBINDEX* child_index= find_matching_index(dict, - child_tab.get_table(), - childcols, - child_primary_key); - if (child_index) - { + bool child_primary_key = false; + const NDBINDEX *child_index = find_matching_index( + dict, child_tab.get_table(), childcols, child_primary_key); + if (child_index) { child_index_releaser.add_index_to_release(child_index); } - if (!child_primary_key && child_index == 0) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, - "Child table %s foreign key columns match no index in NDB", - child_tab.get_table()->getName()); + if (!child_primary_key && child_index == 0) { + push_warning_printf( + thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, + "Child table %s foreign key columns match no index in NDB", + child_tab.get_table()->getName()); DBUG_RETURN(err_default); } - Ndb_db_guard db_guard(ndb); // save db + Ndb_db_guard db_guard(ndb); // save db char parent_db[FN_REFLEN]; char parent_name[FN_REFLEN]; @@ -1508,132 +1320,108 @@ ha_ndbcluster::create_fks(THD *thd, Ndb *ndb) * Looking at Table_ident, testing for db.str first is safer * for valgrind. Do same with table.str too. */ - if (fk->ref_db.str != 0 && fk->ref_db.length != 0) - { - snprintf(parent_db, sizeof(parent_db), "%*s", - (int)fk->ref_db.length, - fk->ref_db.str); - } - else - { + if (fk->ref_db.str != 0 && fk->ref_db.length != 0) { + snprintf(parent_db, sizeof(parent_db), "%*s", (int)fk->ref_db.length, + fk->ref_db.str); + } else { /* parent db missing - so the db is same as child's */ - snprintf(parent_db, sizeof(parent_db), "%*s", - (int)sizeof(m_dbname), m_dbname); + snprintf(parent_db, sizeof(parent_db), "%*s", (int)sizeof(m_dbname), + m_dbname); } - if (fk->ref_table.str != 0 && fk->ref_table.length != 0) - { + if (fk->ref_table.str != 0 && fk->ref_table.length != 0) { snprintf(parent_name, sizeof(parent_name), "%*s", - (int)fk->ref_table.length, - fk->ref_table.str); + (int)fk->ref_table.length, fk->ref_table.str); + } else { + parent_name[0] = 0; } - else - { - parent_name[0]= 0; - } - if (lower_case_table_names) - { + if (lower_case_table_names) { ndb_fk_casedn(parent_db); ndb_fk_casedn(parent_name); } setDbName(ndb, parent_db); Ndb_table_guard parent_tab(dict, parent_name); - if (parent_tab.get_table() == 0) - { - if (!thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) - { - const NdbError &error= dict->getNdbError(); - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, - "Parent table %s not found in NDB: %d: %s", - parent_name, - error.code, error.message); - DBUG_RETURN(err_default); - } - - DBUG_PRINT("info", ("No parent and foreign_key_checks=0")); - - Fk_util fk_util(thd); - - /* Count the number of existing fks on table */ - uint existing = 0; - if(!fk_util.count_fks(dict, child_tab.get_table(), existing)) - { - DBUG_RETURN(err_default); - } - - /* Format mock table name */ - char mock_name[FN_REFLEN]; - if (!fk_util.format_name(mock_name, sizeof(mock_name), - child_tab.get_table()->getObjectId(), - existing, parent_name)) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, - "Failed to create mock parent table, too long mock name"); - DBUG_RETURN(err_default); - } - if (!fk_util.create(dict, mock_name, m_tabname, - fk->ref_columns, childcols)) - { - const NdbError &error= dict->getNdbError(); - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, - "Failed to create mock parent table in NDB: %d: %s", - error.code, error.message); - DBUG_RETURN(err_default); - } - - parent_tab.init(mock_name); - parent_tab.invalidate(); // invalidate mock table when releasing - if (parent_tab.get_table() == 0) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, - "INTERNAL ERROR: Could not find created mock table '%s'", - mock_name); - // Internal error, should be able to load the just created mock table - DBUG_ASSERT(parent_tab.get_table()); - DBUG_RETURN(err_default); - } - } - - const NDBCOL * parentcols[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; + if (parent_tab.get_table() == 0) { + if (!thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { + const NdbError &error = dict->getNdbError(); + push_warning_printf(thd, Sql_condition::SL_WARNING, + ER_CANNOT_ADD_FOREIGN, + "Parent table %s not found in NDB: %d: %s", + parent_name, error.code, error.message); + DBUG_RETURN(err_default); + } + + DBUG_PRINT("info", ("No parent and foreign_key_checks=0")); + + Fk_util fk_util(thd); + + /* Count the number of existing fks on table */ + uint existing = 0; + if (!fk_util.count_fks(dict, child_tab.get_table(), existing)) { + DBUG_RETURN(err_default); + } + + /* Format mock table name */ + char mock_name[FN_REFLEN]; + if (!fk_util.format_name(mock_name, sizeof(mock_name), + child_tab.get_table()->getObjectId(), existing, + parent_name)) { + push_warning_printf( + thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, + "Failed to create mock parent table, too long mock name"); + DBUG_RETURN(err_default); + } + if (!fk_util.create(dict, mock_name, m_tabname, fk->ref_columns, + childcols)) { + const NdbError &error = dict->getNdbError(); + push_warning_printf(thd, Sql_condition::SL_WARNING, + ER_CANNOT_ADD_FOREIGN, + "Failed to create mock parent table in NDB: %d: %s", + error.code, error.message); + DBUG_RETURN(err_default); + } + + parent_tab.init(mock_name); + parent_tab.invalidate(); // invalidate mock table when releasing + if (parent_tab.get_table() == 0) { + push_warning_printf( + thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, + "INTERNAL ERROR: Could not find created mock table '%s'", + mock_name); + // Internal error, should be able to load the just created mock table + DBUG_ASSERT(parent_tab.get_table()); + DBUG_RETURN(err_default); + } + } + + const NDBCOL *parentcols[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; { - unsigned pos= 0; - const NDBTAB * tab= parent_tab.get_table(); - for (const Key_part_spec *col : fk->ref_columns) - { - const NDBCOL * ndbcol= tab->getColumn(col->get_field_name()); - if (ndbcol == 0) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, - "Parent table %s has no column %s in NDB", - parent_tab.get_table()->getName(), - col->get_field_name()); + unsigned pos = 0; + const NDBTAB *tab = parent_tab.get_table(); + for (const Key_part_spec *col : fk->ref_columns) { + const NDBCOL *ndbcol = tab->getColumn(col->get_field_name()); + if (ndbcol == 0) { + push_warning_printf( + thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, + "Parent table %s has no column %s in NDB", + parent_tab.get_table()->getName(), col->get_field_name()); DBUG_RETURN(err_default); } - parentcols[pos++]= ndbcol; + parentcols[pos++] = ndbcol; } - parentcols[pos]= 0; // NULL terminate + parentcols[pos] = 0; // NULL terminate } - bool parent_primary_key= false; - const NDBINDEX* parent_index= find_matching_index(dict, - parent_tab.get_table(), - parentcols, - parent_primary_key); - if (parent_index) - { + bool parent_primary_key = false; + const NDBINDEX *parent_index = find_matching_index( + dict, parent_tab.get_table(), parentcols, parent_primary_key); + if (parent_index) { parent_index_releaser.add_index_to_release(parent_index); } - db_guard.restore(); // restore db + db_guard.restore(); // restore db - if (!parent_primary_key && parent_index == 0) - { - my_error(ER_FK_NO_INDEX_PARENT, MYF(0), - fk->name.str ? fk->name.str : "", + if (!parent_primary_key && parent_index == 0) { + my_error(ER_FK_NO_INDEX_PARENT, MYF(0), fk->name.str ? fk->name.str : "", parent_tab.get_table()->getName()); DBUG_RETURN(err_default); } @@ -1643,19 +1431,16 @@ ha_ndbcluster::create_fks(THD *thd, Ndb *ndb) * Check that columns match...this happens to be same * condition as the one for SPJ... */ - for (unsigned i = 0; parentcols[i] != 0; i++) - { - if (parentcols[i]->isBindable(* childcols[i]) == -1) - { + for (unsigned i = 0; parentcols[i] != 0; i++) { + if (parentcols[i]->isBindable(*childcols[i]) == -1) { // Should never happen thanks to SQL-layer doing compatibility check. DBUG_ASSERT(0); - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, - "Parent column %s.%s is incompatible with child column %s.%s in NDB", - parent_tab.get_table()->getName(), - parentcols[i]->getName(), - child_tab.get_table()->getName(), - childcols[i]->getName()); + push_warning_printf( + thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, + "Parent column %s.%s is incompatible with child column %s.%s in " + "NDB", + parent_tab.get_table()->getName(), parentcols[i]->getName(), + child_tab.get_table()->getName(), childcols[i]->getName()); DBUG_RETURN(err_default); } } @@ -1663,90 +1448,79 @@ ha_ndbcluster::create_fks(THD *thd, Ndb *ndb) NdbDictionary::ForeignKey ndbfk; char fk_name[FN_REFLEN]; - if (fk->name.str && fk->name.length) - { + if (fk->name.str && fk->name.length) { // The fk has a name, use it lex2str(fk->name, fk_name); - if (lower_case_table_names) - ndb_fk_casedn(fk_name); - } - else - { + if (lower_case_table_names) ndb_fk_casedn(fk_name); + } else { // The fk has no name, generate a name snprintf(fk_name, sizeof(fk_name), "FK_%u_%u", - parent_index ? - parent_index->getObjectId() : - parent_tab.get_table()->getObjectId(), - child_index ? - child_index->getObjectId() : - child_tab.get_table()->getObjectId()); + parent_index ? parent_index->getObjectId() + : parent_tab.get_table()->getObjectId(), + child_index ? child_index->getObjectId() + : child_tab.get_table()->getObjectId()); } ndbfk.setName(fk_name); - ndbfk.setParent(* parent_tab.get_table(), parent_index, parentcols); - ndbfk.setChild(* child_tab.get_table(), child_index, childcols); + ndbfk.setParent(*parent_tab.get_table(), parent_index, parentcols); + ndbfk.setChild(*child_tab.get_table(), child_index, childcols); - switch((fk_option)fk->delete_opt){ - case FK_OPTION_UNDEF: - case FK_OPTION_NO_ACTION: - ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::NoAction); - break; - case FK_OPTION_RESTRICT: - ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::Restrict); - break; - case FK_OPTION_CASCADE: - ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::Cascade); - break; - case FK_OPTION_SET_NULL: - ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::SetNull); - break; - case FK_OPTION_DEFAULT: - ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::SetDefault); - break; - default: - assert(false); - ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::NoAction); + switch ((fk_option)fk->delete_opt) { + case FK_OPTION_UNDEF: + case FK_OPTION_NO_ACTION: + ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::NoAction); + break; + case FK_OPTION_RESTRICT: + ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::Restrict); + break; + case FK_OPTION_CASCADE: + ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::Cascade); + break; + case FK_OPTION_SET_NULL: + ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::SetNull); + break; + case FK_OPTION_DEFAULT: + ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::SetDefault); + break; + default: + assert(false); + ndbfk.setOnDeleteAction(NdbDictionary::ForeignKey::NoAction); } - switch((fk_option)fk->update_opt){ - case FK_OPTION_UNDEF: - case FK_OPTION_NO_ACTION: - ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::NoAction); - break; - case FK_OPTION_RESTRICT: - ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::Restrict); - break; - case FK_OPTION_CASCADE: - ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::Cascade); - break; - case FK_OPTION_SET_NULL: - ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::SetNull); - break; - case FK_OPTION_DEFAULT: - ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::SetDefault); - break; - default: - assert(false); - ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::NoAction); + switch ((fk_option)fk->update_opt) { + case FK_OPTION_UNDEF: + case FK_OPTION_NO_ACTION: + ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::NoAction); + break; + case FK_OPTION_RESTRICT: + ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::Restrict); + break; + case FK_OPTION_CASCADE: + ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::Cascade); + break; + case FK_OPTION_SET_NULL: + ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::SetNull); + break; + case FK_OPTION_DEFAULT: + ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::SetDefault); + break; + default: + assert(false); + ndbfk.setOnUpdateAction(NdbDictionary::ForeignKey::NoAction); } int flags = 0; - if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) - { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { flags |= NdbDictionary::Dictionary::CreateFK_NoVerify; } NdbDictionary::ObjectId objid; const int err = dict->createForeignKey(ndbfk, &objid, flags); - if (err) - { + if (err) { const NdbError err = dict->getNdbError(); - if (err.code == 721) - { + if (err.code == 721) { /* An FK constraint with same name exists */ my_error(ER_FK_DUP_NAME, MYF(0), ndbfk.getName()); DBUG_RETURN(err_default); - } - else - { + } else { /* Return the error returned by dict */ ERR_RETURN(err); } @@ -1754,28 +1528,23 @@ ha_ndbcluster::create_fks(THD *thd, Ndb *ndb) /* Flush the parent table out if parent is different from child */ if (parent_tab.get_table()->getObjectId() != - child_tab.get_table()->getObjectId()) - { + child_tab.get_table()->getObjectId()) { /* flush parent table */ flush_parent_table_for_fk(thd, parent_db, parent_name); } } - ndb_fk_util_resolve_mock_tables(thd, ndb->getDictionary(), - m_dbname, m_tabname); + ndb_fk_util_resolve_mock_tables(thd, ndb->getDictionary(), m_dbname, + m_tabname); DBUG_RETURN(0); } - -uint -ha_ndbcluster::referenced_by_foreign_key() -{ +uint ha_ndbcluster::referenced_by_foreign_key() { DBUG_ENTER("ha_ndbcluster::referenced_by_foreign_key"); - Ndb_fk_data *data= m_fk_data; - if (data == 0) - { + Ndb_fk_data *data = m_fk_data; + if (data == 0) { DBUG_ASSERT(false); DBUG_RETURN(0); } @@ -1787,279 +1556,231 @@ ha_ndbcluster::referenced_by_foreign_key() struct Ndb_mem_root_guard { Ndb_mem_root_guard(MEM_ROOT *new_root) { - root_ptr= THR_MALLOC; + root_ptr = THR_MALLOC; DBUG_ASSERT(root_ptr != 0); - old_root= *root_ptr; - *root_ptr= new_root; - } - ~Ndb_mem_root_guard() { - *root_ptr= old_root; + old_root = *root_ptr; + *root_ptr = new_root; } -private: + ~Ndb_mem_root_guard() { *root_ptr = old_root; } + + private: MEM_ROOT **root_ptr; MEM_ROOT *old_root; }; -int -ha_ndbcluster::get_fk_data(THD *thd, Ndb *ndb) -{ +int ha_ndbcluster::get_fk_data(THD *thd, Ndb *ndb) { DBUG_ENTER("ha_ndbcluster::get_fk_data"); - MEM_ROOT *mem_root= &m_fk_mem_root; + MEM_ROOT *mem_root = &m_fk_mem_root; Ndb_mem_root_guard mem_root_guard(mem_root); free_root(mem_root, 0); - m_fk_data= 0; + m_fk_data = 0; init_alloc_root(PSI_INSTRUMENT_ME, mem_root, fk_root_block_size, 0); NdbError err_OOM; - err_OOM.code= 4000; // should we check OOM errors at all? + err_OOM.code = 4000; // should we check OOM errors at all? NdbError err_API; - err_API.code= 4011; // API internal should not happen + err_API.code = 4011; // API internal should not happen - Ndb_fk_data *data= new (mem_root) Ndb_fk_data; - if (data == 0) - ERR_RETURN(err_OOM); - data->cnt_child= 0; - data->cnt_parent= 0; + Ndb_fk_data *data = new (mem_root) Ndb_fk_data; + if (data == 0) ERR_RETURN(err_OOM); + data->cnt_child = 0; + data->cnt_parent = 0; - DBUG_PRINT("info", ("%s.%s: list dependent objects", - m_dbname, m_tabname)); + DBUG_PRINT("info", ("%s.%s: list dependent objects", m_dbname, m_tabname)); int res; - NDBDICT *dict= ndb->getDictionary(); + NDBDICT *dict = ndb->getDictionary(); NDBDICT::List obj_list; - res= dict->listDependentObjects(obj_list, *m_table); - if (res != 0) - ERR_RETURN(dict->getNdbError()); + res = dict->listDependentObjects(obj_list, *m_table); + if (res != 0) ERR_RETURN(dict->getNdbError()); DBUG_PRINT("info", ("found %u dependent objects", obj_list.count)); - for (unsigned i = 0; i < obj_list.count; i++) - { - const NDBDICT::List::Element &e= obj_list.elements[i]; - if (obj_list.elements[i].type != NdbDictionary::Object::ForeignKey) - { + for (unsigned i = 0; i < obj_list.count; i++) { + const NDBDICT::List::Element &e = obj_list.elements[i]; + if (obj_list.elements[i].type != NdbDictionary::Object::ForeignKey) { DBUG_PRINT("info", ("skip non-FK %s type %d", e.name, e.type)); continue; } DBUG_PRINT("info", ("found FK %s", e.name)); NdbDictionary::ForeignKey fk; - res= dict->getForeignKey(fk, e.name); - if (res != 0) - ERR_RETURN(dict->getNdbError()); + res = dict->getForeignKey(fk, e.name); + if (res != 0) ERR_RETURN(dict->getNdbError()); - Ndb_fk_item *item= new (mem_root) Ndb_fk_item; - if (item == 0) - ERR_RETURN(err_OOM); - FOREIGN_KEY_INFO &f_key_info= item->f_key_info; + Ndb_fk_item *item = new (mem_root) Ndb_fk_item; + if (item == 0) ERR_RETURN(err_OOM); + FOREIGN_KEY_INFO &f_key_info = item->f_key_info; { char fk_full_name[FN_LEN + 1]; - const char * name = fk_split_name(fk_full_name, fk.getName()); - f_key_info.foreign_id = thd_make_lex_string(thd, 0, name, - (uint)strlen(name), 1); + const char *name = fk_split_name(fk_full_name, fk.getName()); + f_key_info.foreign_id = + thd_make_lex_string(thd, 0, name, (uint)strlen(name), 1); } { char child_db_and_name[FN_LEN + 1]; - const char * child_name = fk_split_name(child_db_and_name, - fk.getChildTable()); + const char *child_name = + fk_split_name(child_db_and_name, fk.getChildTable()); /* Dependent (child) database name */ - f_key_info.foreign_db = - thd_make_lex_string(thd, 0, child_db_and_name, - (uint)strlen(child_db_and_name), - 1); + f_key_info.foreign_db = thd_make_lex_string( + thd, 0, child_db_and_name, (uint)strlen(child_db_and_name), 1); /* Dependent (child) table name */ f_key_info.foreign_table = - thd_make_lex_string(thd, 0, child_name, - (uint)strlen(child_name), - 1); + thd_make_lex_string(thd, 0, child_name, (uint)strlen(child_name), 1); Ndb_db_guard db_guard(ndb); setDbName(ndb, child_db_and_name); Ndb_table_guard child_tab(dict, child_name); - if (child_tab.get_table() == 0) - { + if (child_tab.get_table() == 0) { DBUG_ASSERT(false); ERR_RETURN(dict->getNdbError()); } - for (unsigned i = 0; i < fk.getChildColumnCount(); i++) - { - const NdbDictionary::Column * col = - child_tab.get_table()->getColumn(fk.getChildColumnNo(i)); - if (col == 0) - ERR_RETURN(err_API); - LEX_STRING * name = - thd_make_lex_string(thd, 0, col->getName(), - (uint)strlen(col->getName()), 1); + for (unsigned i = 0; i < fk.getChildColumnCount(); i++) { + const NdbDictionary::Column *col = + child_tab.get_table()->getColumn(fk.getChildColumnNo(i)); + if (col == 0) ERR_RETURN(err_API); + LEX_STRING *name = thd_make_lex_string(thd, 0, col->getName(), + (uint)strlen(col->getName()), 1); f_key_info.foreign_fields.push_back(name); } } { char parent_db_and_name[FN_LEN + 1]; - const char * parent_name = fk_split_name(parent_db_and_name, - fk.getParentTable()); + const char *parent_name = + fk_split_name(parent_db_and_name, fk.getParentTable()); /* Referenced (parent) database name */ - f_key_info.referenced_db = - thd_make_lex_string(thd, 0, parent_db_and_name, - (uint)strlen(parent_db_and_name), - 1); + f_key_info.referenced_db = thd_make_lex_string( + thd, 0, parent_db_and_name, (uint)strlen(parent_db_and_name), 1); /* Referenced (parent) table name */ - f_key_info.referenced_table = - thd_make_lex_string(thd, 0, parent_name, - (uint)strlen(parent_name), - 1); + f_key_info.referenced_table = thd_make_lex_string( + thd, 0, parent_name, (uint)strlen(parent_name), 1); Ndb_db_guard db_guard(ndb); setDbName(ndb, parent_db_and_name); Ndb_table_guard parent_tab(dict, parent_name); - if (parent_tab.get_table() == 0) - { + if (parent_tab.get_table() == 0) { DBUG_ASSERT(false); ERR_RETURN(dict->getNdbError()); } - for (unsigned i = 0; i < fk.getParentColumnCount(); i++) - { - const NdbDictionary::Column * col = - parent_tab.get_table()->getColumn(fk.getParentColumnNo(i)); - if (col == 0) - ERR_RETURN(err_API); - LEX_STRING * name = - thd_make_lex_string(thd, 0, col->getName(), - (uint)strlen(col->getName()), 1); + for (unsigned i = 0; i < fk.getParentColumnCount(); i++) { + const NdbDictionary::Column *col = + parent_tab.get_table()->getColumn(fk.getParentColumnNo(i)); + if (col == 0) ERR_RETURN(err_API); + LEX_STRING *name = thd_make_lex_string(thd, 0, col->getName(), + (uint)strlen(col->getName()), 1); f_key_info.referenced_fields.push_back(name); } - } { const char *update_method = ""; - switch (item->update_action= fk.getOnUpdateAction()){ - case NdbDictionary::ForeignKey::NoAction: - update_method = "NO ACTION"; - break; - case NdbDictionary::ForeignKey::Restrict: - update_method = "RESTRICT"; - break; - case NdbDictionary::ForeignKey::Cascade: - update_method = "CASCADE"; - break; - case NdbDictionary::ForeignKey::SetNull: - update_method = "SET NULL"; - break; - case NdbDictionary::ForeignKey::SetDefault: - update_method = "SET DEFAULT"; - break; + switch (item->update_action = fk.getOnUpdateAction()) { + case NdbDictionary::ForeignKey::NoAction: + update_method = "NO ACTION"; + break; + case NdbDictionary::ForeignKey::Restrict: + update_method = "RESTRICT"; + break; + case NdbDictionary::ForeignKey::Cascade: + update_method = "CASCADE"; + break; + case NdbDictionary::ForeignKey::SetNull: + update_method = "SET NULL"; + break; + case NdbDictionary::ForeignKey::SetDefault: + update_method = "SET DEFAULT"; + break; } - f_key_info.update_method = - thd_make_lex_string(thd, 0, update_method, - (uint)strlen(update_method), - 1); + f_key_info.update_method = thd_make_lex_string( + thd, 0, update_method, (uint)strlen(update_method), 1); } { const char *delete_method = ""; - switch (item->delete_action= fk.getOnDeleteAction()){ - case NdbDictionary::ForeignKey::NoAction: - delete_method = "NO ACTION"; - break; - case NdbDictionary::ForeignKey::Restrict: - delete_method = "RESTRICT"; - break; - case NdbDictionary::ForeignKey::Cascade: - delete_method = "CASCADE"; - break; - case NdbDictionary::ForeignKey::SetNull: - delete_method = "SET NULL"; - break; - case NdbDictionary::ForeignKey::SetDefault: - delete_method = "SET DEFAULT"; - break; + switch (item->delete_action = fk.getOnDeleteAction()) { + case NdbDictionary::ForeignKey::NoAction: + delete_method = "NO ACTION"; + break; + case NdbDictionary::ForeignKey::Restrict: + delete_method = "RESTRICT"; + break; + case NdbDictionary::ForeignKey::Cascade: + delete_method = "CASCADE"; + break; + case NdbDictionary::ForeignKey::SetNull: + delete_method = "SET NULL"; + break; + case NdbDictionary::ForeignKey::SetDefault: + delete_method = "SET DEFAULT"; + break; } - f_key_info.delete_method = - thd_make_lex_string(thd, 0, delete_method, - (uint)strlen(delete_method), - 1); + f_key_info.delete_method = thd_make_lex_string( + thd, 0, delete_method, (uint)strlen(delete_method), 1); } - if (fk.getParentIndex() != 0) - { + if (fk.getParentIndex() != 0) { // sys/def/10/xb1$unique char db_and_name[FN_LEN + 1]; - const char * name=fk_split_name(db_and_name, fk.getParentIndex(), true); + const char *name = fk_split_name(db_and_name, fk.getParentIndex(), true); f_key_info.referenced_key_name = - thd_make_lex_string(thd, 0, name, - (uint)strlen(name), - 1); - } - else - { - const char* name= "PRIMARY"; + thd_make_lex_string(thd, 0, name, (uint)strlen(name), 1); + } else { + const char *name = "PRIMARY"; f_key_info.referenced_key_name = - thd_make_lex_string(thd, 0, name, - (uint)strlen(name), - 1); + thd_make_lex_string(thd, 0, name, (uint)strlen(name), 1); } - item->is_child= - strcmp(m_dbname, f_key_info.foreign_db->str) == 0 && - strcmp(m_tabname, f_key_info.foreign_table->str) == 0; + item->is_child = strcmp(m_dbname, f_key_info.foreign_db->str) == 0 && + strcmp(m_tabname, f_key_info.foreign_table->str) == 0; - item->is_parent= - strcmp(m_dbname, f_key_info.referenced_db->str) == 0 && - strcmp(m_tabname, f_key_info.referenced_table->str) == 0; + item->is_parent = strcmp(m_dbname, f_key_info.referenced_db->str) == 0 && + strcmp(m_tabname, f_key_info.referenced_table->str) == 0; - data->cnt_child+= item->is_child; - data->cnt_parent+= item->is_parent; + data->cnt_child += item->is_child; + data->cnt_parent += item->is_parent; - res= data->list.push_back(item); - if (res != 0) - ERR_RETURN(err_OOM); + res = data->list.push_back(item); + if (res != 0) ERR_RETURN(err_OOM); } DBUG_PRINT("info", ("count FKs total %u child %u parent %u", - data->list.elements, data->cnt_child, - data->cnt_parent)); + data->list.elements, data->cnt_child, data->cnt_parent)); - m_fk_data= data; + m_fk_data = data; DBUG_RETURN(0); } -void -ha_ndbcluster::release_fk_data() -{ +void ha_ndbcluster::release_fk_data() { DBUG_ENTER("ha_ndbcluster::release_fk_data"); - Ndb_fk_data *data= m_fk_data; - if (data != 0) - { - DBUG_PRINT("info", ("count FKs total %u child %u parent %u", - data->list.elements, data->cnt_child, - data->cnt_parent)); + Ndb_fk_data *data = m_fk_data; + if (data != 0) { + DBUG_PRINT("info", + ("count FKs total %u child %u parent %u", data->list.elements, + data->cnt_child, data->cnt_parent)); } - MEM_ROOT *mem_root= &m_fk_mem_root; + MEM_ROOT *mem_root = &m_fk_mem_root; free_root(mem_root, 0); - m_fk_data= 0; + m_fk_data = 0; DBUG_VOID_RETURN; } -int -ha_ndbcluster::get_child_or_parent_fk_list(List * f_key_list, - bool is_child, bool is_parent) -{ +int ha_ndbcluster::get_child_or_parent_fk_list( + List *f_key_list, bool is_child, bool is_parent) { DBUG_ENTER("ha_ndbcluster::get_child_or_parent_fk_list"); DBUG_PRINT("info", ("table %s.%s", m_dbname, m_tabname)); - Ndb_fk_data *data= m_fk_data; - if (data == 0) - { + Ndb_fk_data *data = m_fk_data; + if (data == 0) { DBUG_ASSERT(false); DBUG_RETURN(0); } @@ -2067,20 +1788,17 @@ ha_ndbcluster::get_child_or_parent_fk_list(List * f_key_list, DBUG_PRINT("info", ("count FKs total %u child %u parent %u", data->list.elements, data->cnt_child, data->cnt_parent)); - Ndb_fk_item *item= 0; + Ndb_fk_item *item = 0; List_iterator iter(data->list); - while ((item= iter++)) - { - FOREIGN_KEY_INFO &f_key_info= item->f_key_info; - DBUG_PRINT("info", ("FK %s ref %s -> %s is_child %d is_parent %d", - f_key_info.foreign_id->str, - f_key_info.foreign_table->str, - f_key_info.referenced_table->str, - item->is_child, item->is_parent)); - if (is_child && !item->is_child) - continue; - if (is_parent && !item->is_parent) - continue; + while ((item = iter++)) { + FOREIGN_KEY_INFO &f_key_info = item->f_key_info; + DBUG_PRINT( + "info", + ("FK %s ref %s -> %s is_child %d is_parent %d", + f_key_info.foreign_id->str, f_key_info.foreign_table->str, + f_key_info.referenced_table->str, item->is_child, item->is_parent)); + if (is_child && !item->is_child) continue; + if (is_parent && !item->is_parent) continue; DBUG_PRINT("info", ("add %s to list", f_key_info.foreign_id->str)); f_key_list->push_back(&f_key_info); @@ -2089,22 +1807,18 @@ ha_ndbcluster::get_child_or_parent_fk_list(List * f_key_list, DBUG_RETURN(0); } -int -ha_ndbcluster::get_foreign_key_list(THD*, - List * f_key_list) -{ +int ha_ndbcluster::get_foreign_key_list(THD *, + List *f_key_list) { DBUG_ENTER("ha_ndbcluster::get_foreign_key_list"); - int res= get_child_or_parent_fk_list(f_key_list, true, false); + int res = get_child_or_parent_fk_list(f_key_list, true, false); DBUG_PRINT("info", ("count FKs child %u", f_key_list->elements)); DBUG_RETURN(res); } -int -ha_ndbcluster::get_parent_foreign_key_list(THD*, - List * f_key_list) -{ +int ha_ndbcluster::get_parent_foreign_key_list( + THD *, List *f_key_list) { DBUG_ENTER("ha_ndbcluster::get_parent_foreign_key_list"); - int res= get_child_or_parent_fk_list(f_key_list, false, true); + int res = get_child_or_parent_fk_list(f_key_list, false, true); DBUG_PRINT("info", ("count FKs parent %u", f_key_list->elements)); DBUG_RETURN(res); } @@ -2112,18 +1826,14 @@ ha_ndbcluster::get_parent_foreign_key_list(THD*, namespace { struct cmp_fk_name { - bool operator() (const NDBDICT::List::Element &e0, - const NDBDICT::List::Element &e1) const - { + bool operator()(const NDBDICT::List::Element &e0, + const NDBDICT::List::Element &e1) const { int res; - if ((res= strcmp(e0.name, e1.name)) != 0) - return res < 0; + if ((res = strcmp(e0.name, e1.name)) != 0) return res < 0; - if ((res= strcmp(e0.database, e1.database)) != 0) - return res < 0; + if ((res = strcmp(e0.database, e1.database)) != 0) return res < 0; - if ((res= strcmp(e0.schema, e1.schema)) != 0) - return res < 0; + if ((res = strcmp(e0.schema, e1.schema)) != 0) return res < 0; return e0.id < e1.id; } @@ -2131,37 +1841,31 @@ struct cmp_fk_name { } // namespace -char* -ha_ndbcluster::get_foreign_key_create_info() -{ +char *ha_ndbcluster::get_foreign_key_create_info() { DBUG_ENTER("ha_ndbcluster::get_foreign_key_create_info"); /** * List foreigns for this table */ - if (m_table == 0) - { + if (m_table == 0) { DBUG_RETURN(0); } - if (table == 0) - { + if (table == 0) { DBUG_RETURN(0); } - THD* thd = table->in_use; - if (thd == 0) - { + THD *thd = table->in_use; + if (thd == 0) { DBUG_RETURN(0); } - Ndb *ndb= get_ndb(thd); - if (ndb == 0) - { + Ndb *ndb = get_ndb(thd); + if (ndb == 0) { DBUG_RETURN(0); } - NDBDICT *dict= ndb->getDictionary(); + NDBDICT *dict = ndb->getDictionary(); NDBDICT::List obj_list; dict->listDependentObjects(obj_list, *m_table); @@ -2174,68 +1878,56 @@ ha_ndbcluster::get_foreign_key_create_info() std::sort(obj_list.elements, obj_list.elements + obj_list.count, cmp_fk_name()); String fk_string; - for (unsigned i = 0; i < obj_list.count; i++) - { + for (unsigned i = 0; i < obj_list.count; i++) { if (obj_list.elements[i].type != NdbDictionary::Object::ForeignKey) continue; NdbDictionary::ForeignKey fk; - int res= dict->getForeignKey(fk, obj_list.elements[i].name); - if (res != 0) - { + int res = dict->getForeignKey(fk, obj_list.elements[i].name); + if (res != 0) { // Push warning?? DBUG_RETURN(0); } - if (!ndb_fk_util_generate_constraint_string(thd, ndb, fk, - m_table->getTableId(), - ndb_show_foreign_key_mock_tables(thd), - fk_string)) - { - DBUG_RETURN(0); // How to report error ?? + if (!ndb_fk_util_generate_constraint_string( + thd, ndb, fk, m_table->getTableId(), + ndb_show_foreign_key_mock_tables(thd), fk_string)) { + DBUG_RETURN(0); // How to report error ?? } } DBUG_RETURN(strdup(fk_string.c_ptr())); } -void -ha_ndbcluster::free_foreign_key_create_info(char* str) -{ - if (str != 0) - { +void ha_ndbcluster::free_foreign_key_create_info(char *str) { + if (str != 0) { free(str); } } -int -ha_ndbcluster::copy_fk_for_offline_alter(THD * thd, Ndb* ndb, - const char* tabname) -{ +int ha_ndbcluster::copy_fk_for_offline_alter(THD *thd, Ndb *ndb, + const char *tabname) { DBUG_ENTER("ha_ndbcluster::copy_fk_for_offline_alter"); DBUG_PRINT("enter", ("tabname: '%s'", tabname)); - if (thd->lex == 0) - { + if (thd->lex == 0) { assert(false); DBUG_RETURN(0); } Ndb_db_guard db_guard(ndb); - const char * src_db = thd->lex->select_lex->table_list.first->db; - const char * src_tab = thd->lex->select_lex->table_list.first->table_name; + const char *src_db = thd->lex->select_lex->table_list.first->db; + const char *src_tab = thd->lex->select_lex->table_list.first->table_name; - if (src_db == 0 || src_tab == 0) - { + if (src_db == 0 || src_tab == 0) { assert(false); DBUG_RETURN(0); } - NDBDICT* dict = ndb->getDictionary(); + NDBDICT *dict = ndb->getDictionary(); setDbName(ndb, src_db); Ndb_table_guard srctab(dict, src_tab); - if (srctab.get_table() == 0) - { + if (srctab.get_table() == 0) { /** * when doign alter table engine=ndb this can happen */ @@ -2244,40 +1936,34 @@ ha_ndbcluster::copy_fk_for_offline_alter(THD * thd, Ndb* ndb, db_guard.restore(); Ndb_table_guard dsttab(dict, tabname); - if (dsttab.get_table() == 0) - { + if (dsttab.get_table() == 0) { ERR_RETURN(dict->getNdbError()); } setDbName(ndb, src_db); NDBDICT::List obj_list; - if (dict->listDependentObjects(obj_list, *srctab.get_table()) != 0) - { + if (dict->listDependentObjects(obj_list, *srctab.get_table()) != 0) { ERR_RETURN(dict->getNdbError()); } // check if fk to drop exists { - for (const Alter_drop *drop_item : thd->lex->alter_info->drop_list) - { - if (drop_item->type != Alter_drop::FOREIGN_KEY) - continue; - bool found= false; - for (unsigned i = 0; i < obj_list.count; i++) - { + for (const Alter_drop *drop_item : thd->lex->alter_info->drop_list) { + if (drop_item->type != Alter_drop::FOREIGN_KEY) continue; + bool found = false; + for (unsigned i = 0; i < obj_list.count; i++) { // Skip if the element is not a foreign key if (obj_list.elements[i].type != NdbDictionary::Object::ForeignKey) continue; // Check if this is the fk being dropped char db_and_name[FN_LEN + 1]; - const char * name= fk_split_name(db_and_name,obj_list.elements[i].name); - if (ndb_fk_casecmp(drop_item->name, name) != 0) - continue; + const char *name = + fk_split_name(db_and_name, obj_list.elements[i].name); + if (ndb_fk_casecmp(drop_item->name, name) != 0) continue; NdbDictionary::ForeignKey fk; - if (dict->getForeignKey(fk, obj_list.elements[i].name) != 0) - { + if (dict->getForeignKey(fk, obj_list.elements[i].name) != 0) { // should never happen DBUG_ASSERT(false); push_warning_printf(thd, Sql_condition::SL_WARNING, @@ -2289,17 +1975,15 @@ ha_ndbcluster::copy_fk_for_offline_alter(THD * thd, Ndb* ndb, // The FK we are looking for is on src_tab. char child_db_and_name[FN_LEN + 1]; - const char* child_name = fk_split_name(child_db_and_name, - fk.getChildTable()); + const char *child_name = + fk_split_name(child_db_and_name, fk.getChildTable()); if (strcmp(child_db_and_name, src_db) == 0 && - strcmp(child_name, src_tab) == 0) - { - found= true; + strcmp(child_name, src_tab) == 0) { + found = true; break; } } - if (!found) - { + if (!found) { // FK not found my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), drop_item->name); DBUG_RETURN(ER_CANT_DROP_FIELD_OR_KEY); @@ -2307,17 +1991,13 @@ ha_ndbcluster::copy_fk_for_offline_alter(THD * thd, Ndb* ndb, } } - for (unsigned i = 0; i < obj_list.count; i++) - { - if (obj_list.elements[i].type == NdbDictionary::Object::ForeignKey) - { + for (unsigned i = 0; i < obj_list.count; i++) { + if (obj_list.elements[i].type == NdbDictionary::Object::ForeignKey) { NdbDictionary::ForeignKey fk; - if (dict->getForeignKey(fk, obj_list.elements[i].name) != 0) - { + if (dict->getForeignKey(fk, obj_list.elements[i].name) != 0) { // should never happen DBUG_ASSERT(false); - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_ALTER_INFO, + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_ALTER_INFO, "INTERNAL ERROR: Could not find foreign key '%s'", obj_list.elements[i].name); ERR_RETURN(dict->getNdbError()); @@ -2328,28 +2008,24 @@ ha_ndbcluster::copy_fk_for_offline_alter(THD * thd, Ndb* ndb, * Check if it should be copied */ char db_and_name[FN_LEN + 1]; - const char * name= fk_split_name(db_and_name,obj_list.elements[i].name); + const char *name = + fk_split_name(db_and_name, obj_list.elements[i].name); - bool found= false; - for (const Alter_drop *drop_item : thd->lex->alter_info->drop_list) - { - if (drop_item->type != Alter_drop::FOREIGN_KEY) - continue; - if (ndb_fk_casecmp(drop_item->name, name) != 0) - continue; + bool found = false; + for (const Alter_drop *drop_item : thd->lex->alter_info->drop_list) { + if (drop_item->type != Alter_drop::FOREIGN_KEY) continue; + if (ndb_fk_casecmp(drop_item->name, name) != 0) continue; char child_db_and_name[FN_LEN + 1]; - const char* child_name = fk_split_name(child_db_and_name, - fk.getChildTable()); + const char *child_name = + fk_split_name(child_db_and_name, fk.getChildTable()); if (strcmp(child_db_and_name, src_db) == 0 && - strcmp(child_name, src_tab) == 0) - { - found= true; + strcmp(child_name, src_tab) == 0) { + found = true; break; } } - if (found) - { + if (found) { /** * Item is on drop list... * don't copy it @@ -2360,22 +2036,20 @@ ha_ndbcluster::copy_fk_for_offline_alter(THD * thd, Ndb* ndb, { char db_and_name[FN_LEN + 1]; - const char * name= fk_split_name(db_and_name, fk.getParentTable()); + const char *name = fk_split_name(db_and_name, fk.getParentTable()); setDbName(ndb, db_and_name); Ndb_table_guard org_parent(dict, name); - if (org_parent.get_table() == 0) - { + if (org_parent.get_table() == 0) { ERR_RETURN(dict->getNdbError()); } } { char db_and_name[FN_LEN + 1]; - const char * name= fk_split_name(db_and_name, fk.getChildTable()); + const char *name = fk_split_name(db_and_name, fk.getChildTable()); setDbName(ndb, db_and_name); Ndb_table_guard org_child(dict, name); - if (org_child.get_table() == 0) - { + if (org_child.get_table() == 0) { ERR_RETURN(dict->getNdbError()); } } @@ -2386,35 +2060,29 @@ ha_ndbcluster::copy_fk_for_offline_alter(THD * thd, Ndb* ndb, int flags = 0; char db_and_name[FN_LEN + 1]; - const char * name= fk_split_name(db_and_name, fk.getParentTable()); - if (strcmp(name, src_tab) == 0 && - strcmp(db_and_name, src_db) == 0) - { + const char *name = fk_split_name(db_and_name, fk.getParentTable()); + if (strcmp(name, src_tab) == 0 && strcmp(db_and_name, src_db) == 0) { /** * We used to be parent... */ - const NDBCOL * cols[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; - for (unsigned j= 0; j < fk.getParentColumnCount(); j++) - { + const NDBCOL *cols[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; + for (unsigned j = 0; j < fk.getParentColumnCount(); j++) { const int parent_col_index = fk.getParentColumnNo(j); - const NDBCOL * orgcol = srctab.get_table()->getColumn(parent_col_index); - cols[j]= dsttab.get_table()->getColumn(orgcol->getName()); + const NDBCOL *orgcol = + srctab.get_table()->getColumn(parent_col_index); + cols[j] = dsttab.get_table()->getColumn(orgcol->getName()); } - cols[fk.getParentColumnCount()]= 0; - if (fk.getParentIndex() != 0) - { + cols[fk.getParentColumnCount()] = 0; + if (fk.getParentIndex() != 0) { name = fk_split_name(db_and_name, fk.getParentIndex(), true); setDbName(ndb, db_and_name); - const NDBINDEX * idx = dict->getIndexGlobal(name,*dsttab.get_table()); - if (idx == 0) - { + const NDBINDEX *idx = dict->getIndexGlobal(name, *dsttab.get_table()); + if (idx == 0) { ERR_RETURN(dict->getNdbError()); } - fk.setParent(* dsttab.get_table(), idx, cols); - dict->removeIndexGlobal(* idx, 0); - } - else - { + fk.setParent(*dsttab.get_table(), idx, cols); + dict->removeIndexGlobal(*idx, 0); + } else { /* The parent column was previously the primary key. Make sure it still is a primary key as implicit pks @@ -2422,12 +2090,9 @@ ha_ndbcluster::copy_fk_for_offline_alter(THD * thd, Ndb* ndb, matching index. */ bool parent_primary = false; - const NDBINDEX * idx = find_matching_index(dict, - dsttab.get_table(), - cols, - parent_primary); - if (!parent_primary && idx == 0) - { + const NDBINDEX *idx = find_matching_index(dict, dsttab.get_table(), + cols, parent_primary); + if (!parent_primary && idx == 0) { my_error(ER_FK_NO_INDEX_PARENT, MYF(0), fk.getName(), dsttab.get_table()->getName()); DBUG_RETURN(HA_ERR_CANNOT_ADD_FOREIGN); @@ -2435,7 +2100,6 @@ ha_ndbcluster::copy_fk_for_offline_alter(THD * thd, Ndb* ndb, fk.setParent(*dsttab.get_table(), idx, cols); } - /** * We're parent, and this is offline alter table * then we can't verify that FK cause the new parent will @@ -2446,57 +2110,43 @@ ha_ndbcluster::copy_fk_for_offline_alter(THD * thd, Ndb* ndb, * allow the alter to modify the columns referenced */ flags |= NdbDictionary::Dictionary::CreateFK_NoVerify; - } - else - { + } else { name = fk_split_name(db_and_name, fk.getChildTable()); - assert(strcmp(name, src_tab) == 0 && - strcmp(db_and_name, src_db) == 0); - const NDBCOL * cols[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; - for (unsigned j= 0; j < fk.getChildColumnCount(); j++) - { + assert(strcmp(name, src_tab) == 0 && strcmp(db_and_name, src_db) == 0); + const NDBCOL *cols[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; + for (unsigned j = 0; j < fk.getChildColumnCount(); j++) { const int child_col_index = fk.getChildColumnNo(j); - const NDBCOL * orgcol = srctab.get_table()->getColumn(child_col_index); - cols[j]= dsttab.get_table()->getColumn(orgcol->getName()); + const NDBCOL *orgcol = srctab.get_table()->getColumn(child_col_index); + cols[j] = dsttab.get_table()->getColumn(orgcol->getName()); } - cols[fk.getChildColumnCount()]= 0; - if (fk.getChildIndex() != 0) - { + cols[fk.getChildColumnCount()] = 0; + if (fk.getChildIndex() != 0) { name = fk_split_name(db_and_name, fk.getChildIndex(), true); setDbName(ndb, db_and_name); bool child_primary_key = false; - const NDBINDEX * idx = find_matching_index(dict, - dsttab.get_table(), - cols, - child_primary_key); - if (!child_primary_key && idx == 0) - { + const NDBINDEX *idx = find_matching_index(dict, dsttab.get_table(), + cols, child_primary_key); + if (!child_primary_key && idx == 0) { ERR_RETURN(dict->getNdbError()); } - fk.setChild(* dsttab.get_table(), idx, cols); - if(idx) - dict->removeIndexGlobal(*idx, 0); - } - else - { - fk.setChild(* dsttab.get_table(), 0, cols); + fk.setChild(*dsttab.get_table(), idx, cols); + if (idx) dict->removeIndexGlobal(*idx, 0); + } else { + fk.setChild(*dsttab.get_table(), 0, cols); } } char new_name[FN_LEN + 1]; - name= fk_split_name(db_and_name, fk.getName()); - snprintf(new_name, sizeof(new_name), "%s", - name); + name = fk_split_name(db_and_name, fk.getName()); + snprintf(new_name, sizeof(new_name), "%s", name); fk.setName(new_name); setDbName(ndb, db_and_name); - if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) - { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { flags |= NdbDictionary::Dictionary::CreateFK_NoVerify; } NdbDictionary::ObjectId objid; - if (dict->createForeignKey(fk, &objid, flags) != 0) - { + if (dict->createForeignKey(fk, &objid, flags) != 0) { ERR_RETURN(dict->getNdbError()); } } @@ -2504,81 +2154,66 @@ ha_ndbcluster::copy_fk_for_offline_alter(THD * thd, Ndb* ndb, DBUG_RETURN(0); } -int -ha_ndbcluster::inplace__drop_fks(THD * thd, Ndb* ndb, NDBDICT * dict, - const NDBTAB* tab) -{ +int ha_ndbcluster::inplace__drop_fks(THD *thd, Ndb *ndb, NDBDICT *dict, + const NDBTAB *tab) { DBUG_ENTER("ha_ndbcluster::inplace__drop_fks"); - if (thd->lex == 0) - { + if (thd->lex == 0) { assert(false); DBUG_RETURN(0); } Ndb_table_guard srctab(dict, tab->getName()); - if (srctab.get_table() == 0) - { - DBUG_ASSERT(false); // Why ?? + if (srctab.get_table() == 0) { + DBUG_ASSERT(false); // Why ?? DBUG_RETURN(0); } NDBDICT::List obj_list; - if (dict->listDependentObjects(obj_list, *srctab.get_table()) != 0) - { + if (dict->listDependentObjects(obj_list, *srctab.get_table()) != 0) { ERR_RETURN(dict->getNdbError()); } - for (const Alter_drop *drop_item : thd->lex->alter_info->drop_list) - { - if (drop_item->type != Alter_drop::FOREIGN_KEY) - continue; + for (const Alter_drop *drop_item : thd->lex->alter_info->drop_list) { + if (drop_item->type != Alter_drop::FOREIGN_KEY) continue; - bool found= false; - for (unsigned i = 0; i < obj_list.count; i++) - { - if (obj_list.elements[i].type != NdbDictionary::Object::ForeignKey) - { + bool found = false; + for (unsigned i = 0; i < obj_list.count; i++) { + if (obj_list.elements[i].type != NdbDictionary::Object::ForeignKey) { continue; } char db_and_name[FN_LEN + 1]; - const char * name= fk_split_name(db_and_name,obj_list.elements[i].name); + const char *name = fk_split_name(db_and_name, obj_list.elements[i].name); - if (ndb_fk_casecmp(drop_item->name, name) != 0) - continue; + if (ndb_fk_casecmp(drop_item->name, name) != 0) continue; NdbDictionary::ForeignKey fk; - if (dict->getForeignKey(fk, obj_list.elements[i].name) != 0) - { + if (dict->getForeignKey(fk, obj_list.elements[i].name) != 0) { ERR_RETURN(dict->getNdbError()); } char child_db_and_name[FN_LEN + 1]; - const char* child_name = fk_split_name(child_db_and_name, - fk.getChildTable()); + const char *child_name = + fk_split_name(child_db_and_name, fk.getChildTable()); if (strcmp(child_db_and_name, ndb->getDatabaseName()) == 0 && - strcmp(child_name, tab->getName()) == 0) - { - found= true; + strcmp(child_name, tab->getName()) == 0) { + found = true; Fk_util fk_util(thd); - if (!fk_util.drop_fk(ndb, dict, obj_list.elements[i].name)) - { + if (!fk_util.drop_fk(ndb, dict, obj_list.elements[i].name)) { ERR_RETURN(dict->getNdbError()); } /* Flush the parent table out if parent is different from child */ - if(ndb_fk_casecmp(fk.getParentTable(), fk.getChildTable()) != 0) - { + if (ndb_fk_casecmp(fk.getParentTable(), fk.getChildTable()) != 0) { char parent_db[FN_LEN + 1]; - const char* parent_name = fk_split_name(parent_db, - fk.getParentTable()); + const char *parent_name = + fk_split_name(parent_db, fk.getParentTable()); flush_parent_table_for_fk(thd, parent_db, parent_name); } break; } } - if (!found) - { + if (!found) { // FK not found my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), drop_item->name); DBUG_RETURN(ER_CANT_DROP_FIELD_OR_KEY); @@ -2587,7 +2222,6 @@ ha_ndbcluster::inplace__drop_fks(THD * thd, Ndb* ndb, NDBDICT * dict, DBUG_RETURN(0); } - /** Save all fk data into a fk_list - Build list of foreign keys for which the given table is child @@ -2598,30 +2232,24 @@ ha_ndbcluster::inplace__drop_fks(THD * thd, Ndb* ndb, NDBDICT * dict, != 0 failure in saving the fk data */ -int -ha_ndbcluster::get_fk_data_for_truncate(NdbDictionary::Dictionary* dict, - const NdbDictionary::Table* ndbtab, - Ndb_fk_list& fk_list) -{ +int ha_ndbcluster::get_fk_data_for_truncate(NdbDictionary::Dictionary *dict, + const NdbDictionary::Table *ndbtab, + Ndb_fk_list &fk_list) { DBUG_ENTER("ha_ndbcluster::get_fk_data_for_truncate"); NDBDICT::List obj_list; - if (dict->listDependentObjects(obj_list, *ndbtab) != 0) - { + if (dict->listDependentObjects(obj_list, *ndbtab) != 0) { ERR_RETURN(dict->getNdbError()); } - for (unsigned i = 0; i < obj_list.count; i++) - { + for (unsigned i = 0; i < obj_list.count; i++) { DBUG_PRINT("debug", ("DependentObject %d : %s, Type : %d", i, - obj_list.elements[i].name, - obj_list.elements[i].type)); + obj_list.elements[i].name, obj_list.elements[i].type)); if (obj_list.elements[i].type != NdbDictionary::Object::ForeignKey) continue; /* obj is an fk. Fetch it */ NDBFK fk; - if (dict->getForeignKey(fk, obj_list.elements[i].name) != 0) - { + if (dict->getForeignKey(fk, obj_list.elements[i].name) != 0) { ERR_RETURN(dict->getNdbError()); } DBUG_PRINT("debug", ("Retrieving FK : %s", fk.getName())); @@ -2633,7 +2261,6 @@ ha_ndbcluster::get_fk_data_for_truncate(NdbDictionary::Dictionary* dict, DBUG_RETURN(0); } - /** Restore foreign keys into the child table from fk_list - for all foreign keys in the given fk list, re-assign child object ids @@ -2646,73 +2273,64 @@ ha_ndbcluster::get_fk_data_for_truncate(NdbDictionary::Dictionary* dict, != 0 failure in recreating the fk data */ -int -ha_ndbcluster::recreate_fk_for_truncate(THD* thd, Ndb* ndb, const char* tab_name, - Ndb_fk_list& fk_list) -{ +int ha_ndbcluster::recreate_fk_for_truncate(THD *thd, Ndb *ndb, + const char *tab_name, + Ndb_fk_list &fk_list) { DBUG_ENTER("ha_ndbcluster::create_fk_for_truncate"); int flags = 0; - const int err_default= HA_ERR_CANNOT_ADD_FOREIGN; + const int err_default = HA_ERR_CANNOT_ADD_FOREIGN; - NDBDICT* dict = ndb->getDictionary(); + NDBDICT *dict = ndb->getDictionary(); /* fetch child table */ Ndb_table_guard child_tab(dict, tab_name); - if (child_tab.get_table() == 0) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, - "INTERNAL ERROR: Could not find created child table '%s'", - tab_name); + if (child_tab.get_table() == 0) { + push_warning_printf( + thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, + "INTERNAL ERROR: Could not find created child table '%s'", tab_name); // Internal error, should be able to load the just created child table DBUG_ASSERT(child_tab.get_table()); DBUG_RETURN(err_default); } - NDBFK* fk; + NDBFK *fk; List_iterator fk_iterator(fk_list); - while ((fk= fk_iterator++)) - { - DBUG_PRINT("info",("Parsing foreign key : %s", fk->getName())); + while ((fk = fk_iterator++)) { + DBUG_PRINT("info", ("Parsing foreign key : %s", fk->getName())); /* Get child table columns and index */ - const NDBCOL * child_cols[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; - { - unsigned pos= 0; - const NDBTAB* tab= child_tab.get_table(); - for(unsigned i= 0; i < fk->getChildColumnCount(); i++) - { - const NDBCOL * ndbcol= tab->getColumn(fk->getChildColumnNo(i)); - if (ndbcol == 0) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, - "Child table %s has no column referred by the FK %s", - tab->getName(), fk->getName()); + const NDBCOL *child_cols[NDB_MAX_ATTRIBUTES_IN_INDEX + 1]; + { + unsigned pos = 0; + const NDBTAB *tab = child_tab.get_table(); + for (unsigned i = 0; i < fk->getChildColumnCount(); i++) { + const NDBCOL *ndbcol = tab->getColumn(fk->getChildColumnNo(i)); + if (ndbcol == 0) { + push_warning_printf( + thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, + "Child table %s has no column referred by the FK %s", + tab->getName(), fk->getName()); DBUG_ASSERT(ndbcol); DBUG_RETURN(err_default); } - child_cols[pos++]= ndbcol; + child_cols[pos++] = ndbcol; } - child_cols[pos]= 0; + child_cols[pos] = 0; } - bool child_primary_key= false; - const NDBINDEX* child_index= find_matching_index(dict, - child_tab.get_table(), - child_cols, - child_primary_key); + bool child_primary_key = false; + const NDBINDEX *child_index = find_matching_index( + dict, child_tab.get_table(), child_cols, child_primary_key); - if (!child_primary_key && child_index == 0) - { + if (!child_primary_key && child_index == 0) { my_error(ER_FK_NO_INDEX_CHILD, MYF(0), fk->getName(), child_tab.get_table()->getName()); DBUG_RETURN(err_default); } /* update the fk's child references */ - fk->setChild(* child_tab.get_table(), child_index, child_cols); + fk->setChild(*child_tab.get_table(), child_index, child_cols); /* the name of "fk" seems to be different when you read it up @@ -2720,97 +2338,78 @@ ha_ndbcluster::recreate_fk_for_truncate(THD* thd, Ndb* ndb, const char* tab_name So update fk's name */ { - char name[FN_REFLEN+1]; + char name[FN_REFLEN + 1]; unsigned parent_id, child_id; - if (sscanf(fk->getName(), "%u/%u/%s", - &parent_id, &child_id, name) != 3) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANNOT_ADD_FOREIGN, - "Skip, failed to parse name of fk: %s", - fk->getName()); + if (sscanf(fk->getName(), "%u/%u/%s", &parent_id, &child_id, name) != 3) { + push_warning_printf( + thd, Sql_condition::SL_WARNING, ER_CANNOT_ADD_FOREIGN, + "Skip, failed to parse name of fk: %s", fk->getName()); DBUG_RETURN(err_default); } - char fk_name[FN_REFLEN+1]; - snprintf(fk_name, sizeof(fk_name), "%s", - name); + char fk_name[FN_REFLEN + 1]; + snprintf(fk_name, sizeof(fk_name), "%s", name); DBUG_PRINT("info", ("Setting new fk name: %s", fk_name)); fk->setName(fk_name); } - if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) - { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { flags |= NdbDictionary::Dictionary::CreateFK_NoVerify; } NdbDictionary::ObjectId objid; - int err= dict->createForeignKey(*fk, &objid, flags); + int err = dict->createForeignKey(*fk, &objid, flags); - if (child_index) - { - dict->removeIndexGlobal(* child_index, 0); + if (child_index) { + dict->removeIndexGlobal(*child_index, 0); } - if (err) - { + if (err) { ERR_RETURN(dict->getNdbError()); } /* Flush the parent table out if parent is different from child */ char parent_db[FN_LEN + 1]; - const char* parent_name = fk_split_name(parent_db, - fk->getParentTable()); - if(ndb_fk_casecmp(parent_name, tab_name) != 0 || - ndb_fk_casecmp(parent_db, ndb->getDatabaseName()) != 0) - { + const char *parent_name = fk_split_name(parent_db, fk->getParentTable()); + if (ndb_fk_casecmp(parent_name, tab_name) != 0 || + ndb_fk_casecmp(parent_db, ndb->getDatabaseName()) != 0) { flush_parent_table_for_fk(thd, parent_db, parent_name); } } DBUG_RETURN(0); } -bool -ha_ndbcluster::has_fk_dependency(THD* thd, - const NdbDictionary::Column* column) const -{ +bool ha_ndbcluster::has_fk_dependency( + THD *thd, const NdbDictionary::Column *column) const { DBUG_ENTER("ha_ndbcluster::has_fk_dependency"); - Ndb* ndb= get_ndb(thd); - NDBDICT* dict = ndb->getDictionary(); + Ndb *ndb = get_ndb(thd); + NDBDICT *dict = ndb->getDictionary(); NdbDictionary::Dictionary::List obj_list; DBUG_PRINT("info", ("Searching for column %s", column->getName())); - if (dict->listDependentObjects(obj_list, *m_table) == 0) - { - for (unsigned i = 0; i < obj_list.count; i++) - { - const NDBDICT::List::Element &e= obj_list.elements[i]; - if (obj_list.elements[i].type != NdbDictionary::Object::ForeignKey) - { - DBUG_PRINT("info", ("skip non-FK %s type %d", e.name, e.type)); - continue; + if (dict->listDependentObjects(obj_list, *m_table) == 0) { + for (unsigned i = 0; i < obj_list.count; i++) { + const NDBDICT::List::Element &e = obj_list.elements[i]; + if (obj_list.elements[i].type != NdbDictionary::Object::ForeignKey) { + DBUG_PRINT("info", ("skip non-FK %s type %d", e.name, e.type)); + continue; } DBUG_PRINT("info", ("found FK %s", e.name)); NdbDictionary::ForeignKey fk; - if (dict->getForeignKey(fk, e.name) != 0) - { + if (dict->getForeignKey(fk, e.name) != 0) { DBUG_PRINT("error", ("Could not find the listed fk '%s'", e.name)); continue; } - for (unsigned j = 0; j < fk.getParentColumnCount(); j++) - { - const NdbDictionary::Column* col = - m_table->getColumn(fk.getParentColumnNo(j)); - DBUG_PRINT("col", ("[%u] %s", i, col->getName())); - if (col == column) - DBUG_RETURN(true); - } - for (unsigned j = 0; j < fk.getChildColumnCount(); j++) - { - const NdbDictionary::Column* col = - m_table->getColumn(fk.getChildColumnNo(j)); - DBUG_PRINT("col", ("[%u] %s", i, col->getName())); - if (col == column) - DBUG_RETURN(true); + for (unsigned j = 0; j < fk.getParentColumnCount(); j++) { + const NdbDictionary::Column *col = + m_table->getColumn(fk.getParentColumnNo(j)); + DBUG_PRINT("col", ("[%u] %s", i, col->getName())); + if (col == column) DBUG_RETURN(true); + } + for (unsigned j = 0; j < fk.getChildColumnCount(); j++) { + const NdbDictionary::Column *col = + m_table->getColumn(fk.getChildColumnNo(j)); + DBUG_PRINT("col", ("[%u] %s", i, col->getName())); + if (col == column) DBUG_RETURN(true); } } } diff --git a/storage/ndb/plugin/ha_ndb_index_stat.cc b/storage/ndb/plugin/ha_ndb_index_stat.cc index bc62ff8c5e91..9ca393e85b0b 100644 --- a/storage/ndb/plugin/ha_ndb_index_stat.cc +++ b/storage/ndb/plugin/ha_ndb_index_stat.cc @@ -29,7 +29,7 @@ #include #include "my_dbug.h" -#include "sql/mysqld.h" // LOCK_global_system_variables +#include "sql/mysqld.h" // LOCK_global_system_variables #include "storage/ndb/plugin/ha_ndbcluster.h" #include "storage/ndb/plugin/ha_ndbcluster_connection.h" #include "storage/ndb/plugin/ndb_require.h" @@ -40,22 +40,16 @@ extern Ndb_index_stat_thread ndb_index_stat_thread; /* Implemented in ha_ndbcluster.cc */ extern bool ndb_index_stat_get_enable(THD *thd); -// Typedefs for long names +// Typedefs for long names typedef NdbDictionary::Table NDBTAB; typedef NdbDictionary::Index NDBINDEX; /** ndb_index_stat_thread */ Ndb_index_stat_thread::Ndb_index_stat_thread() - : Ndb_component("Index Stat"), - client_waiting(false) -{ -} + : Ndb_component("Index Stat"), client_waiting(false) {} -int -Ndb_index_stat_thread::do_init() -{ - mysql_mutex_init(PSI_INSTRUMENT_ME, &LOCK_client_waiting, - MY_MUTEX_INIT_FAST); +int Ndb_index_stat_thread::do_init() { + mysql_mutex_init(PSI_INSTRUMENT_ME, &LOCK_client_waiting, MY_MUTEX_INIT_FAST); mysql_cond_init(PSI_INSTRUMENT_ME, &COND_client_waiting); mysql_mutex_init(PSI_INSTRUMENT_ME, &stat_mutex, MY_MUTEX_INIT_FAST); @@ -63,13 +57,9 @@ Ndb_index_stat_thread::do_init() return 0; } -Ndb_index_stat_thread::~Ndb_index_stat_thread() -{ -} +Ndb_index_stat_thread::~Ndb_index_stat_thread() {} -int -Ndb_index_stat_thread::do_deinit() -{ +int Ndb_index_stat_thread::do_deinit() { mysql_mutex_destroy(&LOCK_client_waiting); mysql_cond_destroy(&COND_client_waiting); @@ -78,65 +68,63 @@ Ndb_index_stat_thread::do_deinit() return 0; } -void Ndb_index_stat_thread::do_wakeup() -{ +void Ndb_index_stat_thread::do_wakeup() { // Wakeup from potential wait log_info("Wakeup"); wakeup(); } -void Ndb_index_stat_thread::wakeup() -{ +void Ndb_index_stat_thread::wakeup() { mysql_mutex_lock(&LOCK_client_waiting); - client_waiting= true; + client_waiting = true; mysql_cond_signal(&COND_client_waiting); mysql_mutex_unlock(&LOCK_client_waiting); } struct Ndb_index_stat { enum { - LT_Undef= 0, - LT_New= 1, /* new entry added by a table handler */ - LT_Update = 2, /* force kernel update from analyze table */ - LT_Read= 3, /* read or reread stats into new query cache */ - LT_Idle= 4, /* stats exist */ - LT_Check= 5, /* check for new stats */ - LT_Delete= 6, /* delete the entry */ - LT_Error= 7, /* error, on hold for a while */ - LT_Count= 8 + LT_Undef = 0, + LT_New = 1, /* new entry added by a table handler */ + LT_Update = 2, /* force kernel update from analyze table */ + LT_Read = 3, /* read or reread stats into new query cache */ + LT_Idle = 4, /* stats exist */ + LT_Check = 5, /* check for new stats */ + LT_Delete = 6, /* delete the entry */ + LT_Error = 7, /* error, on hold for a while */ + LT_Count = 8 }; - NdbIndexStat* is; + NdbIndexStat *is; int index_id; int index_version; #ifndef DBUG_OFF char id[32]; #endif - time_t access_time; /* by any table handler */ - time_t update_time; /* latest successful update by us */ - time_t load_time; /* when stats were created by kernel */ - time_t read_time; /* when stats were read by us (>= load_time) */ - uint sample_version; /* goes with read_time */ - time_t check_time; /* when checked for updated stats (>= read_time) */ - uint query_bytes; /* cache query bytes in use */ - uint clean_bytes; /* cache clean bytes waiting to be deleted */ - uint drop_bytes; /* cache bytes waiting for drop */ - uint evict_bytes; /* cache bytes waiting for evict */ - bool force_update; /* one-time force update from analyze table */ - bool no_stats; /* have detected that no stats exist */ + time_t access_time; /* by any table handler */ + time_t update_time; /* latest successful update by us */ + time_t load_time; /* when stats were created by kernel */ + time_t read_time; /* when stats were read by us (>= load_time) */ + uint sample_version; /* goes with read_time */ + time_t check_time; /* when checked for updated stats (>= read_time) */ + uint query_bytes; /* cache query bytes in use */ + uint clean_bytes; /* cache clean bytes waiting to be deleted */ + uint drop_bytes; /* cache bytes waiting for drop */ + uint evict_bytes; /* cache bytes waiting for evict */ + bool force_update; /* one-time force update from analyze table */ + bool no_stats; /* have detected that no stats exist */ NdbIndexStat::Error error; NdbIndexStat::Error client_error; time_t error_time; - uint error_count; /* forever increasing */ + uint error_count; /* forever increasing */ struct Ndb_index_stat *share_next; /* per-share list */ int lt; - int lt_old; /* for info only */ + int lt_old; /* for info only */ struct Ndb_index_stat *list_next; struct Ndb_index_stat *list_prev; struct NDB_SHARE *share; - uint ref_count; /* from client requests */ - bool to_delete; /* detached from share and marked for delete */ - bool abort_request; /* abort all requests and allow no more */ + uint ref_count; /* from client requests */ + bool to_delete; /* detached from share and marked for delete */ + bool abort_request; /* abort all requests and allow no more */ Ndb_index_stat(); }; @@ -146,51 +134,39 @@ struct Ndb_index_stat_list { struct Ndb_index_stat *head; struct Ndb_index_stat *tail; uint count; - Ndb_index_stat_list(int the_lt, const char* the_name); + Ndb_index_stat_list(int the_lt, const char *the_name); }; extern Ndb_index_stat_list ndb_index_stat_list[]; -static time_t ndb_index_stat_time_now= 0; +static time_t ndb_index_stat_time_now = 0; -static time_t -ndb_index_stat_time() -{ - time_t now= time(0); +static time_t ndb_index_stat_time() { + time_t now = time(0); - if (unlikely(ndb_index_stat_time_now == 0)) - ndb_index_stat_time_now= now; + if (unlikely(ndb_index_stat_time_now == 0)) ndb_index_stat_time_now = now; - if (unlikely(now < ndb_index_stat_time_now)) - { + if (unlikely(now < ndb_index_stat_time_now)) { DBUG_PRINT("index_stat", ("time moved backwards %d seconds", int(ndb_index_stat_time_now - now))); - now= ndb_index_stat_time_now; + now = ndb_index_stat_time_now; } - ndb_index_stat_time_now= now; + ndb_index_stat_time_now = now; return now; } /* Options */ /* Options in string format buffer size */ -static const uint ndb_index_stat_option_sz= 512; -static void ndb_index_stat_opt2str(const struct Ndb_index_stat_opt&, char*); +static const uint ndb_index_stat_option_sz = 512; +static void ndb_index_stat_opt2str(const struct Ndb_index_stat_opt &, char *); struct Ndb_index_stat_opt { - enum Unit { - Ubool = 1, - Usize = 2, - Utime = 3, - Umsec = 4 - }; - enum Flag { - Freadonly = (1 << 0), - Fcontrol = (1 << 1) - }; + enum Unit { Ubool = 1, Usize = 2, Utime = 3, Umsec = 4 }; + enum Flag { Freadonly = (1 << 0), Fcontrol = (1 << 1) }; struct Val { - const char* name; + const char *name; uint val; uint minval; uint maxval; @@ -220,7 +196,7 @@ struct Ndb_index_stat_opt { Val val[Imax]; /* Options in string format (SYSVAR ndb_index_stat_option) */ char *option; - Ndb_index_stat_opt(char* buf); + Ndb_index_stat_opt(char *buf); uint get(Idx i) const { assert(i < Imax); return val[i].val; @@ -231,15 +207,13 @@ struct Ndb_index_stat_opt { } }; -Ndb_index_stat_opt::Ndb_index_stat_opt(char* buf) : - option(buf) -{ +Ndb_index_stat_opt::Ndb_index_stat_opt(char *buf) : option(buf) { #define ival(aname, aval, aminval, amaxval, aunit, aflag) \ - val[I##aname].name = #aname; \ - val[I##aname].val = aval; \ - val[I##aname].minval = aminval; \ - val[I##aname].maxval = amaxval; \ - val[I##aname].unit = aunit; \ + val[I##aname].name = #aname; \ + val[I##aname].val = aval; \ + val[I##aname].minval = aminval; \ + val[I##aname].maxval = amaxval; \ + val[I##aname].unit = aunit; \ val[I##aname].flag = aflag ival(loop_enable, 1000, 0, ~(uint)0, Umsec, 0); ival(loop_idle, 1000, 0, ~(uint)0, Umsec, 0); @@ -255,7 +229,7 @@ Ndb_index_stat_opt::Ndb_index_stat_opt(char* buf) : ival(error_delay, 60, 0, ~(uint)0, Utime, 0); ival(evict_batch, 8, 1, ~(uint)0, Usize, 0); ival(evict_delay, 60, 0, ~(uint)0, Utime, 0); - ival(cache_limit, 32*1024*1024, 0, ~(uint)0, Usize, 0); + ival(cache_limit, 32 * 1024 * 1024, 0, ~(uint)0, Usize, 0); ival(cache_lowpct, 90, 0, 100, Usize, 0); ival(zero_total, 0, 0, 1, Ubool, Fcontrol); #undef ival @@ -270,79 +244,68 @@ char ndb_index_stat_option_buf[ndb_index_stat_option_sz]; static Ndb_index_stat_opt ndb_index_stat_opt(ndb_index_stat_option_buf); /* Copy option struct to string buffer */ -static void -ndb_index_stat_opt2str(const Ndb_index_stat_opt& opt, char* str) -{ +static void ndb_index_stat_opt2str(const Ndb_index_stat_opt &opt, char *str) { DBUG_ENTER("ndb_index_stat_opt2str"); char buf[ndb_index_stat_option_sz]; - char *const end= &buf[sizeof(buf)]; - char* ptr= buf; - *ptr= 0; + char *const end = &buf[sizeof(buf)]; + char *ptr = buf; + *ptr = 0; - const uint imax= Ndb_index_stat_opt::Imax; - for (uint i= 0; i < imax; i++) - { - const Ndb_index_stat_opt::Val& v= opt.val[i]; - ptr+= strlen(ptr); - const char* sep= (ptr == buf ? "" : ","); - const uint sz= ptr < end ? (uint)(end - ptr) : 0; + const uint imax = Ndb_index_stat_opt::Imax; + for (uint i = 0; i < imax; i++) { + const Ndb_index_stat_opt::Val &v = opt.val[i]; + ptr += strlen(ptr); + const char *sep = (ptr == buf ? "" : ","); + const uint sz = ptr < end ? (uint)(end - ptr) : 0; switch (v.unit) { - case Ndb_index_stat_opt::Ubool: - { + case Ndb_index_stat_opt::Ubool: { DBUG_ASSERT(v.val == 0 || v.val == 1); if (v.val == 0) snprintf(ptr, sz, "%s%s=0", sep, v.name); else snprintf(ptr, sz, "%s%s=1", sep, v.name); - } - break; + } break; - case Ndb_index_stat_opt::Usize: - { + case Ndb_index_stat_opt::Usize: { uint m; if (v.val == 0) snprintf(ptr, sz, "%s%s=0", sep, v.name); - else if (v.val % (m= 1024*1024*1024) == 0) + else if (v.val % (m = 1024 * 1024 * 1024) == 0) snprintf(ptr, sz, "%s%s=%uG", sep, v.name, v.val / m); - else if (v.val % (m= 1024*1024) == 0) + else if (v.val % (m = 1024 * 1024) == 0) snprintf(ptr, sz, "%s%s=%uM", sep, v.name, v.val / m); - else if (v.val % (m= 1024) == 0) + else if (v.val % (m = 1024) == 0) snprintf(ptr, sz, "%s%s=%uK", sep, v.name, v.val / m); else snprintf(ptr, sz, "%s%s=%u", sep, v.name, v.val); - } - break; + } break; - case Ndb_index_stat_opt::Utime: - { + case Ndb_index_stat_opt::Utime: { uint m; if (v.val == 0) snprintf(ptr, sz, "%s%s=0", sep, v.name); - else if (v.val % (m= 60*60*24) == 0) + else if (v.val % (m = 60 * 60 * 24) == 0) snprintf(ptr, sz, "%s%s=%ud", sep, v.name, v.val / m); - else if (v.val % (m= 60*60) == 0) + else if (v.val % (m = 60 * 60) == 0) snprintf(ptr, sz, "%s%s=%uh", sep, v.name, v.val / m); - else if (v.val % (m= 60) == 0) + else if (v.val % (m = 60) == 0) snprintf(ptr, sz, "%s%s=%um", sep, v.name, v.val / m); else snprintf(ptr, sz, "%s%s=%us", sep, v.name, v.val); - } - break; + } break; - case Ndb_index_stat_opt::Umsec: - { + case Ndb_index_stat_opt::Umsec: { if (v.val == 0) snprintf(ptr, sz, "%s%s=0", sep, v.name); else snprintf(ptr, sz, "%s%s=%ums", sep, v.name, v.val); - } - break; + } break; - default: - DBUG_ASSERT(false); - break; + default: + DBUG_ASSERT(false); + break; } } @@ -352,156 +315,121 @@ ndb_index_stat_opt2str(const Ndb_index_stat_opt& opt, char* str) DBUG_VOID_RETURN; } -static int -ndb_index_stat_option_parse(char* p, Ndb_index_stat_opt& opt) -{ +static int ndb_index_stat_option_parse(char *p, Ndb_index_stat_opt &opt) { DBUG_ENTER("ndb_index_stat_option_parse"); - char *r= strchr(p, '='); - if (r == 0) - DBUG_RETURN(-1); - *r++= 0; + char *r = strchr(p, '='); + if (r == 0) DBUG_RETURN(-1); + *r++ = 0; - while (isspace(*r)) - *r++= 0; - if (*r == 0) - DBUG_RETURN(-1); + while (isspace(*r)) *r++ = 0; + if (*r == 0) DBUG_RETURN(-1); - bool found= false; - const uint imax= Ndb_index_stat_opt::Imax; - for (uint i= 0; i < imax; i++) - { - Ndb_index_stat_opt::Val& v= opt.val[i]; - if (strcmp(p, v.name) != 0) - continue; - found= true; + bool found = false; + const uint imax = Ndb_index_stat_opt::Imax; + for (uint i = 0; i < imax; i++) { + Ndb_index_stat_opt::Val &v = opt.val[i]; + if (strcmp(p, v.name) != 0) continue; + found = true; char *s; - for (s= r; *s != 0; s++) - *s= tolower(*s); - ulonglong val= my_strtoull(r, &s, 10); + for (s = r; *s != 0; s++) *s = tolower(*s); + ulonglong val = my_strtoull(r, &s, 10); switch (v.unit) { - case Ndb_index_stat_opt::Ubool: - { - if ((s > r && *s == 0 && val == 0) || - strcmp(r, "off") == 0 || + case Ndb_index_stat_opt::Ubool: { + if ((s > r && *s == 0 && val == 0) || strcmp(r, "off") == 0 || strcmp(r, "false") == 0) - val= 0; - else if ((s > r && *s == 0 && val == 1) || - strcmp(r, "on") == 0 || - strcmp(r, "true") == 0) - val= 1; + val = 0; + else if ((s > r && *s == 0 && val == 1) || strcmp(r, "on") == 0 || + strcmp(r, "true") == 0) + val = 1; else DBUG_RETURN(-1); - v.val= (uint)val; - } - break; + v.val = (uint)val; + } break; - case Ndb_index_stat_opt::Usize: - { - if (s == r) - DBUG_RETURN(-1); + case Ndb_index_stat_opt::Usize: { + if (s == r) DBUG_RETURN(-1); if (strcmp(s, "") == 0) ; else if (strcmp(s, "k") == 0) - val*= 1024; + val *= 1024; else if (strcmp(s, "m") == 0) - val*= 1024*1024; + val *= 1024 * 1024; else if (strcmp(s, "g") == 0) - val*= 1024*1024*1024; + val *= 1024 * 1024 * 1024; else DBUG_RETURN(-1); - if (val < v.minval || val > v.maxval) - DBUG_RETURN(-1); - v.val= (uint)val; - } - break; + if (val < v.minval || val > v.maxval) DBUG_RETURN(-1); + v.val = (uint)val; + } break; - case Ndb_index_stat_opt::Utime: - { - if (s == r) - DBUG_RETURN(-1); + case Ndb_index_stat_opt::Utime: { + if (s == r) DBUG_RETURN(-1); if (strcmp(s, "") == 0) ; else if (strcmp(s, "s") == 0) ; else if (strcmp(s, "m") == 0) - val*= 60; + val *= 60; else if (strcmp(s, "h") == 0) - val*= 60*60; + val *= 60 * 60; else if (strcmp(s, "d") == 0) - val*= 24*60*60; + val *= 24 * 60 * 60; else DBUG_RETURN(-1); - if (val < v.minval || val > v.maxval) - DBUG_RETURN(-1); - v.val= (uint)val; - } - break; + if (val < v.minval || val > v.maxval) DBUG_RETURN(-1); + v.val = (uint)val; + } break; - case Ndb_index_stat_opt::Umsec: - { - if (s == r) - DBUG_RETURN(-1); + case Ndb_index_stat_opt::Umsec: { + if (s == r) DBUG_RETURN(-1); if (strcmp(s, "") == 0) ; else if (strcmp(s, "ms") == 0) ; else DBUG_RETURN(-1); - if (val < v.minval || val > v.maxval) - DBUG_RETURN(-1); - v.val= (uint)val; - } - break; + if (val < v.minval || val > v.maxval) DBUG_RETURN(-1); + v.val = (uint)val; + } break; - default: - DBUG_ASSERT(false); - break; + default: + DBUG_ASSERT(false); + break; } } - if (!found) - DBUG_RETURN(-1); + if (!found) DBUG_RETURN(-1); DBUG_RETURN(0); } /* Copy option string to option struct */ -static int -ndb_index_stat_str2opt(const char *str, Ndb_index_stat_opt& opt) -{ +static int ndb_index_stat_str2opt(const char *str, Ndb_index_stat_opt &opt) { DBUG_ENTER("ndb_index_stat_str2opt"); DBUG_PRINT("index_stat", ("str: \"%s\"", str)); char buf[ndb_index_stat_option_sz]; assert(str != 0); - if (strlen(str) >= sizeof(buf)) - DBUG_RETURN(-1); + if (strlen(str) >= sizeof(buf)) DBUG_RETURN(-1); strcpy(buf, str); - char *p= buf; - while (1) - { - while (isspace(*p)) - p++; - if (*p == 0) - break; + char *p = buf; + while (1) { + while (isspace(*p)) p++; + if (*p == 0) break; - char *q= strchr(p, ','); - if (q == p) - DBUG_RETURN(-1); - if (q != 0) - *q= 0; + char *q = strchr(p, ','); + if (q == p) DBUG_RETURN(-1); + if (q != 0) *q = 0; DBUG_PRINT("index_stat", ("parse: %s", p)); - if (ndb_index_stat_option_parse(p, opt) == -1) - DBUG_RETURN(-1); + if (ndb_index_stat_option_parse(p, opt) == -1) DBUG_RETURN(-1); - if (q == 0) - break; - p= q + 1; + if (q == 0) break; + p = q + 1; } ndb_index_stat_opt2str(opt, opt.option); @@ -513,68 +441,64 @@ ndb_index_stat_str2opt(const char *str, Ndb_index_stat_opt& opt) /* Need storage between check and update (assume locked) */ static char ndb_index_stat_option_tmp[ndb_index_stat_option_sz]; -int ndb_index_stat_option_check(THD *, SYS_VAR*, void *save, - struct st_mysql_value *value) -{ +int ndb_index_stat_option_check(THD *, SYS_VAR *, void *save, + struct st_mysql_value *value) { DBUG_ENTER("ndb_index_stat_option_check"); char buf[ndb_index_stat_option_sz]; - int len= sizeof(buf); - const char *str= value->val_str(value, buf, &len); - if (str != 0) - { + int len = sizeof(buf); + const char *str = value->val_str(value, buf, &len); + if (str != 0) { /* Seems to be nothing in buf */ DBUG_PRINT("index_stat", ("str: %s len: %d", str, len)); char buf2[ndb_index_stat_option_sz]; Ndb_index_stat_opt opt(buf2); - if (ndb_index_stat_str2opt(str, opt) == 0) - { + if (ndb_index_stat_str2opt(str, opt) == 0) { /* Passed to update */ strcpy(ndb_index_stat_option_tmp, str); - *(const char**)save= ndb_index_stat_option_tmp; + *(const char **)save = ndb_index_stat_option_tmp; DBUG_RETURN(0); } } DBUG_RETURN(1); } -void ndb_index_stat_option_update(THD *, SYS_VAR*, - void *var_ptr, const void *save) -{ +void ndb_index_stat_option_update(THD *, SYS_VAR *, void *var_ptr, + const void *save) { DBUG_ENTER("ndb_index_stat_option_update"); - const char *str= *static_cast(save); + const char *str = *static_cast(save); DBUG_PRINT("index_stat", ("str: %s", str)); - Ndb_index_stat_opt& opt= ndb_index_stat_opt; - int ret= ndb_index_stat_str2opt(str, opt); + Ndb_index_stat_opt &opt = ndb_index_stat_opt; + int ret = ndb_index_stat_str2opt(str, opt); ndbcluster::ndbrequire(ret == 0); - *(const char**)var_ptr= ndb_index_stat_opt.option; + *(const char **)var_ptr = ndb_index_stat_opt.option; DBUG_VOID_RETURN; } /* Global stuff */ struct Ndb_index_stat_glob { - bool th_allow; /* Queries allowed */ - bool th_enable; /* Stats thread idea of ndb_index_stat_enable */ - bool th_busy; /* Stats thread is busy-looping */ - uint th_loop; /* Stats thread current loop wait in ms */ + bool th_allow; /* Queries allowed */ + bool th_enable; /* Stats thread idea of ndb_index_stat_enable */ + bool th_busy; /* Stats thread is busy-looping */ + uint th_loop; /* Stats thread current loop wait in ms */ uint force_update; uint wait_update; uint no_stats; uint wait_stats; /* Accumulating counters */ - uint analyze_count; /* Client counters */ + uint analyze_count; /* Client counters */ uint analyze_error; uint query_count; uint query_no_stats; uint query_error; - uint event_act; /* Events acted on */ - uint event_skip; /* Events skipped (likely event-to-self) */ - uint event_miss; /* Events received for unknown index */ - uint refresh_count; /* Successful cache refreshes */ - uint clean_count; /* Times old caches (1 or more) cleaned */ - uint pinned_count; /* Times not cleaned due to old cache ref count */ - uint drop_count; /* From index drop */ - uint evict_count; /* From LRU cleanup */ + uint event_act; /* Events acted on */ + uint event_skip; /* Events skipped (likely event-to-self) */ + uint event_miss; /* Events received for unknown index */ + uint refresh_count; /* Successful cache refreshes */ + uint clean_count; /* Times old caches (1 or more) cleaned */ + uint pinned_count; /* Times not cleaned due to old cache ref count */ + uint drop_count; /* From index drop */ + uint evict_count; /* From LRU cleanup */ /* Cache */ uint cache_query_bytes; /* In use */ uint cache_clean_bytes; /* Obsolete versions not yet removed */ @@ -589,36 +513,35 @@ struct Ndb_index_stat_glob { void zero_total(); }; -Ndb_index_stat_glob::Ndb_index_stat_glob() -{ - th_allow= false; - th_enable= false; - th_busy= false; - th_loop= 0; - force_update= 0; - wait_update= 0; - no_stats= 0; - wait_stats= 0; - analyze_count= 0; - analyze_error= 0; - query_count= 0; - query_no_stats= 0; - query_error= 0; - event_act= 0; - event_skip= 0; - event_miss= 0; - refresh_count= 0; - clean_count= 0; - pinned_count= 0; - drop_count= 0; - evict_count= 0; - cache_query_bytes= 0; - cache_clean_bytes= 0; - cache_high_bytes= 0; - cache_drop_bytes= 0; - cache_evict_bytes= 0; +Ndb_index_stat_glob::Ndb_index_stat_glob() { + th_allow = false; + th_enable = false; + th_busy = false; + th_loop = 0; + force_update = 0; + wait_update = 0; + no_stats = 0; + wait_stats = 0; + analyze_count = 0; + analyze_error = 0; + query_count = 0; + query_no_stats = 0; + query_error = 0; + event_act = 0; + event_skip = 0; + event_miss = 0; + refresh_count = 0; + clean_count = 0; + pinned_count = 0; + drop_count = 0; + evict_count = 0; + cache_query_bytes = 0; + cache_clean_bytes = 0; + cache_high_bytes = 0; + cache_drop_bytes = 0; + cache_evict_bytes = 0; memset(status, 0, sizeof(status)); - status_i= 0; + status_i = 0; } static Ndb_index_stat_glob ndb_index_stat_glob; @@ -628,21 +551,14 @@ static Ndb_index_stat_glob ndb_index_stat_glob; objects. Sync the value with global status ("allow" field). */ -static bool ndb_index_stat_allow_flag= false; +static bool ndb_index_stat_allow_flag = false; -static bool -ndb_index_stat_get_allow() -{ - return ndb_index_stat_allow_flag; -} +static bool ndb_index_stat_get_allow() { return ndb_index_stat_allow_flag; } -static bool -ndb_index_stat_set_allow(bool flag) -{ - if (ndb_index_stat_allow_flag != flag) - { - ndb_index_stat_allow_flag= flag; - Ndb_index_stat_glob &glob= ndb_index_stat_glob; +static bool ndb_index_stat_set_allow(bool flag) { + if (ndb_index_stat_allow_flag != flag) { + ndb_index_stat_allow_flag = flag; + Ndb_index_stat_glob &glob = ndb_index_stat_glob; mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); glob.set_status(); mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); @@ -654,138 +570,131 @@ static const char *g_ndb_status_index_stat_status = ""; static long g_ndb_status_index_stat_cache_query = 0; static long g_ndb_status_index_stat_cache_clean = 0; - /* Update status variable (must hold stat_mutex) */ -void -Ndb_index_stat_glob::set_status() -{ - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - char* p= status[status_i]; +void Ndb_index_stat_glob::set_status() { + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + char *p = status[status_i]; // stats thread - th_allow= ndb_index_stat_get_allow(); - sprintf(p, "allow:%d,enable:%d,busy:%d,loop:%u", - th_allow, th_enable, th_busy, th_loop); - p+= strlen(p); + th_allow = ndb_index_stat_get_allow(); + sprintf(p, "allow:%d,enable:%d,busy:%d,loop:%u", th_allow, th_enable, th_busy, + th_loop); + p += strlen(p); // entry lists strcpy(p, ",list:("); - p+= strlen(p); - uint list_count= 0; - for (int lt= 1; lt < Ndb_index_stat::LT_Count; lt++) - { - const Ndb_index_stat_list &list= ndb_index_stat_list[lt]; + p += strlen(p); + uint list_count = 0; + for (int lt = 1; lt < Ndb_index_stat::LT_Count; lt++) { + const Ndb_index_stat_list &list = ndb_index_stat_list[lt]; sprintf(p, "%s:%u,", list.name, list.count); - p+= strlen(p); - list_count+= list.count; + p += strlen(p); + list_count += list.count; } sprintf(p, "%s:%u)", "total", list_count); - p+= strlen(p); + p += strlen(p); // special counters sprintf(p, ",analyze:(queue:%u,wait:%u)", force_update, wait_update); - p+= strlen(p); + p += strlen(p); sprintf(p, ",stats:(nostats:%u,wait:%u)", no_stats, wait_stats); - p+= strlen(p); + p += strlen(p); // accumulating counters sprintf(p, ",total:("); - p+= strlen(p); + p += strlen(p); sprintf(p, "analyze:(all:%u,error:%u)", analyze_count, analyze_error); - p+= strlen(p); - sprintf(p, ",query:(all:%u,nostats:%u,error:%u)", - query_count, query_no_stats, query_error); - p+= strlen(p); - sprintf(p, ",event:(act:%u,skip:%u,miss:%u)", - event_act, event_skip, event_miss); - p+= strlen(p); + p += strlen(p); + sprintf(p, ",query:(all:%u,nostats:%u,error:%u)", query_count, query_no_stats, + query_error); + p += strlen(p); + sprintf(p, ",event:(act:%u,skip:%u,miss:%u)", event_act, event_skip, + event_miss); + p += strlen(p); sprintf(p, ",cache:(refresh:%u,clean:%u,pinned:%u,drop:%u,evict:%u)", - refresh_count, clean_count, pinned_count, drop_count, evict_count); - p+= strlen(p); + refresh_count, clean_count, pinned_count, drop_count, evict_count); + p += strlen(p); sprintf(p, ")"); - p+= strlen(p); + p += strlen(p); // cache size - const uint cache_limit= opt.get(Ndb_index_stat_opt::Icache_limit); - const uint cache_total= cache_query_bytes + cache_clean_bytes; - double cache_pct= (double)0.0; - double cache_high_pct= (double)0.0; - if (cache_limit != 0) - { - cache_pct= (double)100.0 * (double)cache_total / (double)cache_limit; - cache_high_pct= (double)100.0 * (double)cache_high_bytes / (double)cache_limit; - } - sprintf(p, ",cache:(query:%u,clean:%u" - ",drop:%u,evict:%u" - ",usedpct:%.2f,highpct:%.2f)", - cache_query_bytes, cache_clean_bytes, - cache_drop_bytes, cache_evict_bytes, - cache_pct, cache_high_pct); - p+= strlen(p); + const uint cache_limit = opt.get(Ndb_index_stat_opt::Icache_limit); + const uint cache_total = cache_query_bytes + cache_clean_bytes; + double cache_pct = (double)0.0; + double cache_high_pct = (double)0.0; + if (cache_limit != 0) { + cache_pct = (double)100.0 * (double)cache_total / (double)cache_limit; + cache_high_pct = + (double)100.0 * (double)cache_high_bytes / (double)cache_limit; + } + sprintf(p, + ",cache:(query:%u,clean:%u" + ",drop:%u,evict:%u" + ",usedpct:%.2f,highpct:%.2f)", + cache_query_bytes, cache_clean_bytes, cache_drop_bytes, + cache_evict_bytes, cache_pct, cache_high_pct); + p += strlen(p); // alternating status buffers to keep this lock short mysql_mutex_lock(&LOCK_global_system_variables); - g_ndb_status_index_stat_status= status[status_i]; - status_i= (status_i + 1) % 2; - g_ndb_status_index_stat_cache_query= cache_query_bytes; - g_ndb_status_index_stat_cache_clean= cache_clean_bytes; + g_ndb_status_index_stat_status = status[status_i]; + status_i = (status_i + 1) % 2; + g_ndb_status_index_stat_cache_query = cache_query_bytes; + g_ndb_status_index_stat_cache_clean = cache_clean_bytes; mysql_mutex_unlock(&LOCK_global_system_variables); } /* Zero accumulating counters */ -void -Ndb_index_stat_glob::zero_total() -{ - analyze_count= 0; - analyze_error= 0; - query_count= 0; - query_no_stats= 0; - query_error= 0; - event_act= 0; - event_skip= 0; - event_miss= 0; - refresh_count= 0; - clean_count= 0; - pinned_count= 0; - drop_count= 0; - evict_count= 0; +void Ndb_index_stat_glob::zero_total() { + analyze_count = 0; + analyze_error = 0; + query_count = 0; + query_no_stats = 0; + query_error = 0; + event_act = 0; + event_skip = 0; + event_miss = 0; + refresh_count = 0; + clean_count = 0; + pinned_count = 0; + drop_count = 0; + evict_count = 0; /* Reset highest use seen to current */ - cache_high_bytes= cache_query_bytes + cache_clean_bytes; + cache_high_bytes = cache_query_bytes + cache_clean_bytes; } /* Shared index entries */ -Ndb_index_stat::Ndb_index_stat() -{ - is= 0; - index_id= 0; - index_version= 0; +Ndb_index_stat::Ndb_index_stat() { + is = 0; + index_id = 0; + index_version = 0; #ifndef DBUG_OFF memset(id, 0, sizeof(id)); #endif - access_time= 0; - update_time= 0; - load_time= 0; - read_time= 0; - sample_version= 0; - check_time= 0; - query_bytes= 0; - clean_bytes= 0; - drop_bytes= 0; - evict_bytes= 0; - force_update= false; - no_stats= false; - error_time= 0; - error_count= 0; - share_next= 0; - lt= 0; - lt_old= 0; - list_next= 0; - list_prev= 0; - share= 0; - ref_count= 0; - to_delete= false; - abort_request= false; + access_time = 0; + update_time = 0; + load_time = 0; + read_time = 0; + sample_version = 0; + check_time = 0; + query_bytes = 0; + clean_bytes = 0; + drop_bytes = 0; + evict_bytes = 0; + force_update = false; + no_stats = false; + error_time = 0; + error_count = 0; + share_next = 0; + lt = 0; + lt_old = 0; + list_next = 0; + list_prev = 0; + share = 0; + ref_count = 0; + to_delete = false; + abort_request = false; } /* @@ -794,124 +703,102 @@ Ndb_index_stat::Ndb_index_stat() since they are probably local e.g. bad range (internal error). Argument "from" is 0=stats thread 1=client. */ -static void -ndb_index_stat_error(Ndb_index_stat *st, - int from, const char* place MY_ATTRIBUTE((unused))) -{ - time_t now= ndb_index_stat_time(); - NdbIndexStat::Error error= st->is->getNdbError(); - if (error.code == 0) - { +static void ndb_index_stat_error(Ndb_index_stat *st, int from, + const char *place MY_ATTRIBUTE((unused))) { + time_t now = ndb_index_stat_time(); + NdbIndexStat::Error error = st->is->getNdbError(); + if (error.code == 0) { /* Make sure code is not 0 */ NdbIndexStat::Error error2; - error= error2; - error.code= NdbIndexStat::InternalError; - error.status= NdbError::TemporaryError; - } - if (from == 0) - { - st->error= error; - st->error_time= now; /* Controls proc_error */ - } - else - st->client_error= error; + error = error2; + error.code = NdbIndexStat::InternalError; + error.status = NdbError::TemporaryError; + } + if (from == 0) { + st->error = error; + st->error_time = now; /* Controls proc_error */ + } else + st->client_error = error; st->error_count++; - DBUG_PRINT("index_stat", ("%s error, code: %d, line: %d, extra: %d", - place, error.code, error.line, error.extra)); + DBUG_PRINT("index_stat", ("%s error, code: %d, line: %d, extra: %d", place, + error.code, error.line, error.extra)); } -static void -ndb_index_stat_clear_error(Ndb_index_stat *st) -{ - st->error.code= 0; - st->error.status= NdbError::Success; +static void ndb_index_stat_clear_error(Ndb_index_stat *st) { + st->error.code = 0; + st->error.status = NdbError::Success; } /* Lists across shares */ -Ndb_index_stat_list::Ndb_index_stat_list(int the_lt, const char* the_name) -{ - lt= the_lt; - name= the_name; - head= 0; - tail= 0; - count= 0; +Ndb_index_stat_list::Ndb_index_stat_list(int the_lt, const char *the_name) { + lt = the_lt; + name = the_name; + head = 0; + tail = 0; + count = 0; } Ndb_index_stat_list ndb_index_stat_list[Ndb_index_stat::LT_Count] = { - Ndb_index_stat_list(0, 0), - Ndb_index_stat_list(Ndb_index_stat::LT_New, "new"), - Ndb_index_stat_list(Ndb_index_stat::LT_Update, "update"), - Ndb_index_stat_list(Ndb_index_stat::LT_Read, "read"), - Ndb_index_stat_list(Ndb_index_stat::LT_Idle, "idle"), - Ndb_index_stat_list(Ndb_index_stat::LT_Check, "check"), - Ndb_index_stat_list(Ndb_index_stat::LT_Delete, "delete"), - Ndb_index_stat_list(Ndb_index_stat::LT_Error, "error") -}; - -static void -ndb_index_stat_list_add(Ndb_index_stat* st, int lt) -{ + Ndb_index_stat_list(0, 0), + Ndb_index_stat_list(Ndb_index_stat::LT_New, "new"), + Ndb_index_stat_list(Ndb_index_stat::LT_Update, "update"), + Ndb_index_stat_list(Ndb_index_stat::LT_Read, "read"), + Ndb_index_stat_list(Ndb_index_stat::LT_Idle, "idle"), + Ndb_index_stat_list(Ndb_index_stat::LT_Check, "check"), + Ndb_index_stat_list(Ndb_index_stat::LT_Delete, "delete"), + Ndb_index_stat_list(Ndb_index_stat::LT_Error, "error")}; + +static void ndb_index_stat_list_add(Ndb_index_stat *st, int lt) { assert(st != 0 && st->lt == 0); assert(st->list_next == 0 && st->list_prev == 0); assert(1 <= lt && lt < Ndb_index_stat::LT_Count); - Ndb_index_stat_list &list= ndb_index_stat_list[lt]; + Ndb_index_stat_list &list = ndb_index_stat_list[lt]; DBUG_PRINT("index_stat", ("st %s -> %s", st->id, list.name)); - if (list.count == 0) - { + if (list.count == 0) { assert(list.head == 0 && list.tail == 0); - list.head= st; - list.tail= st; - } - else - { + list.head = st; + list.tail = st; + } else { assert(list.tail != 0 && list.tail->list_next == 0); - st->list_prev= list.tail; - list.tail->list_next= st; - list.tail= st; + st->list_prev = list.tail; + list.tail->list_next = st; + list.tail = st; } list.count++; - st->lt= lt; + st->lt = lt; } -static void -ndb_index_stat_list_remove(Ndb_index_stat* st) -{ +static void ndb_index_stat_list_remove(Ndb_index_stat *st) { assert(st != 0); - int lt= st->lt; + int lt = st->lt; assert(1 <= lt && lt < Ndb_index_stat::LT_Count); - Ndb_index_stat_list &list= ndb_index_stat_list[lt]; + Ndb_index_stat_list &list = ndb_index_stat_list[lt]; DBUG_PRINT("index_stat", ("st %s <- %s", st->id, list.name)); - Ndb_index_stat* next= st->list_next; - Ndb_index_stat* prev= st->list_prev; + Ndb_index_stat *next = st->list_next; + Ndb_index_stat *prev = st->list_prev; - if (list.head == st) - list.head= next; - if (list.tail == st) - list.tail= prev; + if (list.head == st) list.head = next; + if (list.tail == st) list.tail = prev; assert(list.count != 0); list.count--; - if (next != 0) - next->list_prev= prev; - if (prev != 0) - prev->list_next= next; + if (next != 0) next->list_prev = prev; + if (prev != 0) prev->list_next = next; - st->lt= 0; - st->lt_old= 0; - st->list_next= 0; - st->list_prev= 0; + st->lt = 0; + st->lt_old = 0; + st->list_next = 0; + st->list_prev = 0; } -static void -ndb_index_stat_list_move(Ndb_index_stat *st, int lt) -{ +static void ndb_index_stat_list_move(Ndb_index_stat *st, int lt) { assert(st != 0); ndb_index_stat_list_remove(st); ndb_index_stat_list_add(st, lt); @@ -919,68 +806,50 @@ ndb_index_stat_list_move(Ndb_index_stat *st, int lt) /* Stats entry changes (must hold stat_mutex) */ -static void -ndb_index_stat_force_update(Ndb_index_stat *st, bool onoff) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - if (onoff) - { - if (!st->force_update) - { +static void ndb_index_stat_force_update(Ndb_index_stat *st, bool onoff) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + if (onoff) { + if (!st->force_update) { glob.force_update++; - st->force_update= true; + st->force_update = true; glob.set_status(); } - } - else - { - if (st->force_update) - { + } else { + if (st->force_update) { assert(glob.force_update != 0); glob.force_update--; - st->force_update= false; + st->force_update = false; glob.set_status(); } } } -static void -ndb_index_stat_no_stats(Ndb_index_stat *st, bool flag) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - if (st->no_stats != flag) - { - if (flag) - { +static void ndb_index_stat_no_stats(Ndb_index_stat *st, bool flag) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + if (st->no_stats != flag) { + if (flag) { glob.no_stats++; - st->no_stats= true; - } - else - { + st->no_stats = true; + } else { assert(glob.no_stats >= 1); - glob.no_stats-= 1; - st->no_stats= false; + glob.no_stats -= 1; + st->no_stats = false; } glob.set_status(); } } -static void -ndb_index_stat_ref_count(Ndb_index_stat *st, bool flag) -{ - uint old_count= st->ref_count; - (void)old_count; // USED - if (flag) - { +static void ndb_index_stat_ref_count(Ndb_index_stat *st, bool flag) { + uint old_count = st->ref_count; + (void)old_count; // USED + if (flag) { st->ref_count++; - } - else - { + } else { assert(st->ref_count != 0); st->ref_count--; } - DBUG_PRINT("index_stat", ("st %s ref_count:%u->%u", - st->id, old_count, st->ref_count)); + DBUG_PRINT("index_stat", + ("st %s ref_count:%u->%u", st->id, old_count, st->ref_count)); } /* Find or add entry under the share */ @@ -990,52 +859,44 @@ struct Ndb_index_stat_snap { time_t load_time; uint sample_version; uint error_count; - Ndb_index_stat_snap() { load_time= 0; sample_version= 0; error_count= 0; } + Ndb_index_stat_snap() { + load_time = 0; + sample_version = 0; + error_count = 0; + } }; /* Subroutine, have lock */ -static Ndb_index_stat* -ndb_index_stat_alloc(const NDBINDEX *index, - const NDBTAB *table, - int &err_out) -{ - err_out= 0; - Ndb_index_stat *st= new Ndb_index_stat; - NdbIndexStat *is= new NdbIndexStat; - if (st != 0 && is != 0) - { - st->is= is; - st->index_id= index->getObjectId(); - st->index_version= index->getObjectVersion(); +static Ndb_index_stat *ndb_index_stat_alloc(const NDBINDEX *index, + const NDBTAB *table, int &err_out) { + err_out = 0; + Ndb_index_stat *st = new Ndb_index_stat; + NdbIndexStat *is = new NdbIndexStat; + if (st != 0 && is != 0) { + st->is = is; + st->index_id = index->getObjectId(); + st->index_version = index->getObjectVersion(); #ifndef DBUG_OFF snprintf(st->id, sizeof(st->id), "%d.%d", st->index_id, st->index_version); #endif - if (is->set_index(*index, *table) == 0) - return st; + if (is->set_index(*index, *table) == 0) return st; ndb_index_stat_error(st, 1, "set_index"); - err_out= st->client_error.code; + err_out = st->client_error.code; + } else { + err_out = NdbIndexStat::NoMemError; } - else - { - err_out= NdbIndexStat::NoMemError; - } - if (is != 0) - delete is; - if (st != 0) - delete st; + if (is != 0) delete is; + if (st != 0) delete st; return 0; } /* Subroutine, have lock */ -static Ndb_index_stat* -ndb_index_stat_find_share(NDB_SHARE *share, - const NDBINDEX *index, - Ndb_index_stat *&st_last) -{ - struct Ndb_index_stat *st= share->index_stat_list; - st_last= 0; - while (st != 0) - { +static Ndb_index_stat *ndb_index_stat_find_share(NDB_SHARE *share, + const NDBINDEX *index, + Ndb_index_stat *&st_last) { + struct Ndb_index_stat *st = share->index_stat_list; + st_last = 0; + while (st != 0) { assert(st->share == share); assert(st->is != 0); NdbIndexStat::Head head; @@ -1043,89 +904,72 @@ ndb_index_stat_find_share(NDB_SHARE *share, if (head.m_indexId == (uint)index->getObjectId() && head.m_indexVersion == (uint)index->getObjectVersion()) break; - st_last= st; - st= st->share_next; + st_last = st; + st = st->share_next; } return st; } /* Subroutine, have lock */ -static void -ndb_index_stat_add_share(NDB_SHARE *share, - Ndb_index_stat *st, - Ndb_index_stat *st_last) -{ - st->share= share; +static void ndb_index_stat_add_share(NDB_SHARE *share, Ndb_index_stat *st, + Ndb_index_stat *st_last) { + st->share = share; if (st_last == 0) - share->index_stat_list= st; + share->index_stat_list = st; else - st_last->share_next= st; + st_last->share_next = st; } -static Ndb_index_stat* -ndb_index_stat_get_share(NDB_SHARE *share, - const NDBINDEX *index, - const NDBTAB *table, - Ndb_index_stat_snap &snap, - int &err_out, - bool allow_add, - bool force_update) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; +static Ndb_index_stat *ndb_index_stat_get_share(NDB_SHARE *share, + const NDBINDEX *index, + const NDBTAB *table, + Ndb_index_stat_snap &snap, + int &err_out, bool allow_add, + bool force_update) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; mysql_mutex_lock(&share->mutex); mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); - time_t now= ndb_index_stat_time(); - err_out= 0; - - struct Ndb_index_stat *st= 0; - struct Ndb_index_stat *st_last= 0; - do - { - if (unlikely(!ndb_index_stat_get_allow())) - { - err_out= NdbIndexStat::MyNotAllow; + time_t now = ndb_index_stat_time(); + err_out = 0; + + struct Ndb_index_stat *st = 0; + struct Ndb_index_stat *st_last = 0; + do { + if (unlikely(!ndb_index_stat_get_allow())) { + err_out = NdbIndexStat::MyNotAllow; break; } - st= ndb_index_stat_find_share(share, index, st_last); - if (st == 0) - { - if (!allow_add) - { - err_out= NdbIndexStat::MyNotFound; + st = ndb_index_stat_find_share(share, index, st_last); + if (st == 0) { + if (!allow_add) { + err_out = NdbIndexStat::MyNotFound; break; } - st= ndb_index_stat_alloc(index, table, err_out); - if (st == 0) - { + st = ndb_index_stat_alloc(index, table, err_out); + if (st == 0) { assert(err_out != 0); break; } ndb_index_stat_add_share(share, st, st_last); ndb_index_stat_list_add(st, Ndb_index_stat::LT_New); glob.set_status(); - } - else if (unlikely(st->abort_request)) - { - err_out= NdbIndexStat::MyAbortReq; + } else if (unlikely(st->abort_request)) { + err_out = NdbIndexStat::MyAbortReq; break; } - if (force_update) - ndb_index_stat_force_update(st, true); - snap.load_time= st->load_time; - snap.sample_version= st->sample_version; - snap.error_count= st->error_count; - st->access_time= now; - } - while (0); - - if (err_out == 0) - { + if (force_update) ndb_index_stat_force_update(st, true); + snap.load_time = st->load_time; + snap.sample_version = st->sample_version; + snap.error_count = st->error_count; + st->access_time = now; + } while (0); + + if (err_out == 0) { assert(st != 0); ndb_index_stat_ref_count(st, true); - } - else - st= 0; + } else + st = 0; mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); mysql_mutex_unlock(&share->mutex); @@ -1138,77 +982,66 @@ ndb_index_stat_get_share(NDB_SHARE *share, */ /* caller must hold stat_mutex */ -static void -ndb_index_stat_free(Ndb_index_stat *st) -{ +static void ndb_index_stat_free(Ndb_index_stat *st) { DBUG_ENTER("ndb_index_stat_free"); - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - NDB_SHARE *share= st->share; + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + NDB_SHARE *share = st->share; assert(share != 0); - Ndb_index_stat *st_head= 0; - Ndb_index_stat *st_tail= 0; - Ndb_index_stat *st_loop= share->index_stat_list; - uint found= 0; - while (st_loop != 0) - { - if (st == st_loop) - { + Ndb_index_stat *st_head = 0; + Ndb_index_stat *st_tail = 0; + Ndb_index_stat *st_loop = share->index_stat_list; + uint found = 0; + while (st_loop != 0) { + if (st == st_loop) { DBUG_PRINT("index_stat", ("st %s stat free one", st->id)); - st_loop= st_loop->share_next; - st->share_next= 0; - st->share= 0; + st_loop = st_loop->share_next; + st->share_next = 0; + st->share = 0; assert(st->lt != 0); assert(st->lt != Ndb_index_stat::LT_Delete); assert(!st->to_delete); - st->to_delete= true; - st->abort_request= true; + st->to_delete = true; + st->abort_request = true; found++; - } - else - { + } else { if (st_head == 0) - st_head= st_loop; + st_head = st_loop; else - st_tail->share_next= st_loop; - st_tail= st_loop; - st_loop= st_loop->share_next; - st_tail->share_next= 0; + st_tail->share_next = st_loop; + st_tail = st_loop; + st_loop = st_loop->share_next; + st_tail->share_next = 0; } } assert(found == 1); - share->index_stat_list= st_head; + share->index_stat_list = st_head; glob.set_status(); DBUG_VOID_RETURN; } /* Interface to online drop index */ -void -ndb_index_stat_free(NDB_SHARE *share, int index_id, int index_version) -{ +void ndb_index_stat_free(NDB_SHARE *share, int index_id, int index_version) { DBUG_ENTER("ndb_index_stat_free"); - DBUG_PRINT("index_stat", ("(index_id:%d index_version:%d", - index_id, index_version)); - Ndb_index_stat_glob &glob= ndb_index_stat_glob; + DBUG_PRINT("index_stat", + ("(index_id:%d index_version:%d", index_id, index_version)); + Ndb_index_stat_glob &glob = ndb_index_stat_glob; mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); - uint found= 0; - Ndb_index_stat *st= share->index_stat_list; - while (st != 0) - { - if (st->index_id == index_id && - st->index_version == index_version) - { + uint found = 0; + Ndb_index_stat *st = share->index_stat_list; + while (st != 0) { + if (st->index_id == index_id && st->index_version == index_version) { ndb_index_stat_free(st); found++; glob.drop_count++; assert(st->drop_bytes == 0); - st->drop_bytes= st->query_bytes + st->clean_bytes; - glob.cache_drop_bytes+= st->drop_bytes; + st->drop_bytes = st->query_bytes + st->clean_bytes; + glob.cache_drop_bytes += st->drop_bytes; break; } - st= st->share_next; + st = st->share_next; } glob.set_status(); @@ -1216,29 +1049,26 @@ ndb_index_stat_free(NDB_SHARE *share, int index_id, int index_version) DBUG_VOID_RETURN; } -void -ndb_index_stat_free(NDB_SHARE *share) -{ +void ndb_index_stat_free(NDB_SHARE *share) { DBUG_ENTER("ndb_index_stat_free"); - Ndb_index_stat_glob &glob= ndb_index_stat_glob; + Ndb_index_stat_glob &glob = ndb_index_stat_glob; mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); Ndb_index_stat *st; - while ((st= share->index_stat_list) != 0) - { + while ((st = share->index_stat_list) != 0) { DBUG_PRINT("index_stat", ("st %s stat free all", st->id)); - share->index_stat_list= st->share_next; - st->share_next= 0; - st->share= 0; + share->index_stat_list = st->share_next; + st->share_next = 0; + st->share = 0; assert(st->lt != 0); assert(st->lt != Ndb_index_stat::LT_Delete); assert(!st->to_delete); - st->to_delete= true; - st->abort_request= true; + st->to_delete = true; + st->abort_request = true; glob.drop_count++; assert(st->drop_bytes == 0); - st->drop_bytes+= st->query_bytes + st->clean_bytes; - glob.cache_drop_bytes+= st->drop_bytes; + st->drop_bytes += st->query_bytes + st->clean_bytes; + glob.cache_drop_bytes += st->drop_bytes; } glob.set_status(); @@ -1248,29 +1078,24 @@ ndb_index_stat_free(NDB_SHARE *share) /* Find entry across shares */ /* wl4124_todo mutex overkill, hash table, can we find table share */ -static Ndb_index_stat* -ndb_index_stat_find_entry(int index_id, int index_version) -{ +static Ndb_index_stat *ndb_index_stat_find_entry(int index_id, + int index_version) { DBUG_ENTER("ndb_index_stat_find_entry"); mysql_mutex_lock(&ndbcluster_mutex); mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); - DBUG_PRINT("index_stat", ("find, id: %d version: %d", - index_id, index_version)); + DBUG_PRINT("index_stat", + ("find, id: %d version: %d", index_id, index_version)); int lt; - for (lt=1; lt < Ndb_index_stat::LT_Count; lt++) - { - Ndb_index_stat *st=ndb_index_stat_list[lt].head; - while (st != 0) - { - if (st->index_id == index_id && - st->index_version == index_version) - { + for (lt = 1; lt < Ndb_index_stat::LT_Count; lt++) { + Ndb_index_stat *st = ndb_index_stat_list[lt].head; + while (st != 0) { + if (st->index_id == index_id && st->index_version == index_version) { mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); mysql_mutex_unlock(&ndbcluster_mutex); DBUG_RETURN(st); } - st= st->list_next; + st = st->list_next; } } @@ -1281,54 +1106,46 @@ ndb_index_stat_find_entry(int index_id, int index_version) /* Statistics thread sub-routines */ -static void -ndb_index_stat_cache_move(Ndb_index_stat *st) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; +static void ndb_index_stat_cache_move(Ndb_index_stat *st) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; NdbIndexStat::CacheInfo infoBuild; NdbIndexStat::CacheInfo infoQuery; st->is->get_cache_info(infoBuild, NdbIndexStat::CacheBuild); st->is->get_cache_info(infoQuery, NdbIndexStat::CacheQuery); - const uint new_query_bytes= infoBuild.m_totalBytes; - const uint old_query_bytes= infoQuery.m_totalBytes; - DBUG_PRINT("index_stat", ("st %s cache move: query:%u clean:%u", - st->id, new_query_bytes, old_query_bytes)); + const uint new_query_bytes = infoBuild.m_totalBytes; + const uint old_query_bytes = infoQuery.m_totalBytes; + DBUG_PRINT("index_stat", ("st %s cache move: query:%u clean:%u", st->id, + new_query_bytes, old_query_bytes)); st->is->move_cache(); - st->query_bytes= new_query_bytes; - st->clean_bytes+= old_query_bytes; + st->query_bytes = new_query_bytes; + st->clean_bytes += old_query_bytes; assert(glob.cache_query_bytes >= old_query_bytes); - glob.cache_query_bytes-= old_query_bytes; - glob.cache_query_bytes+= new_query_bytes; - glob.cache_clean_bytes+= old_query_bytes; - const uint cache_total= glob.cache_query_bytes + glob.cache_clean_bytes; - if (glob.cache_high_bytes < cache_total) - glob.cache_high_bytes= cache_total; -} - -static bool -ndb_index_stat_cache_clean(Ndb_index_stat *st) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; + glob.cache_query_bytes -= old_query_bytes; + glob.cache_query_bytes += new_query_bytes; + glob.cache_clean_bytes += old_query_bytes; + const uint cache_total = glob.cache_query_bytes + glob.cache_clean_bytes; + if (glob.cache_high_bytes < cache_total) glob.cache_high_bytes = cache_total; +} + +static bool ndb_index_stat_cache_clean(Ndb_index_stat *st) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; NdbIndexStat::CacheInfo infoClean; st->is->get_cache_info(infoClean, NdbIndexStat::CacheClean); - const uint old_clean_bytes= infoClean.m_totalBytes; - const uint ref_count= infoClean.m_ref_count; - DBUG_PRINT("index_stat", ("st %s cache clean: clean:%u ref_count:%u", - st->id, old_clean_bytes, ref_count)); - if (ref_count != 0) - return false; + const uint old_clean_bytes = infoClean.m_totalBytes; + const uint ref_count = infoClean.m_ref_count; + DBUG_PRINT("index_stat", ("st %s cache clean: clean:%u ref_count:%u", st->id, + old_clean_bytes, ref_count)); + if (ref_count != 0) return false; st->is->clean_cache(); - st->clean_bytes= 0; + st->clean_bytes = 0; assert(glob.cache_clean_bytes >= old_clean_bytes); - glob.cache_clean_bytes-= old_clean_bytes; + glob.cache_clean_bytes -= old_clean_bytes; return true; } -static void -ndb_index_stat_cache_evict(Ndb_index_stat *st) -{ +static void ndb_index_stat_cache_evict(Ndb_index_stat *st) { NdbIndexStat::Head head; NdbIndexStat::CacheInfo infoBuild; NdbIndexStat::CacheInfo infoQuery; @@ -1338,29 +1155,28 @@ ndb_index_stat_cache_evict(Ndb_index_stat *st) st->is->get_cache_info(infoQuery, NdbIndexStat::CacheQuery); st->is->get_cache_info(infoClean, NdbIndexStat::CacheClean); - DBUG_PRINT("index_stat", - ("evict table: %u index: %u version: %u" - " sample version: %u" - " cache bytes build:%u query:%u clean:%u", - head.m_tableId, head.m_indexId, head.m_indexVersion, - head.m_sampleVersion, - infoBuild.m_totalBytes, infoQuery.m_totalBytes, infoClean.m_totalBytes)); + DBUG_PRINT("index_stat", ("evict table: %u index: %u version: %u" + " sample version: %u" + " cache bytes build:%u query:%u clean:%u", + head.m_tableId, head.m_indexId, head.m_indexVersion, + head.m_sampleVersion, infoBuild.m_totalBytes, + infoQuery.m_totalBytes, infoClean.m_totalBytes)); /* Twice to move all caches to clean */ ndb_index_stat_cache_move(st); ndb_index_stat_cache_move(st); /* Unused variable release vs debug nonsense */ - bool ok= false; - (void)ok; // USED - ok= ndb_index_stat_cache_clean(st); + bool ok = false; + (void)ok; // USED + ok = ndb_index_stat_cache_clean(st); assert(ok); } /* Misc in/out parameters for process steps */ struct Ndb_index_stat_proc { - NdbIndexStat* is_util; // For metadata and polling + NdbIndexStat *is_util; // For metadata and polling Ndb *ndb; - time_t start; // start of current processing slice + time_t start; // start of current processing slice time_t now; int lt; bool busy; @@ -1369,44 +1185,31 @@ struct Ndb_index_stat_proc { uint cache_query_bytes; uint cache_clean_bytes; #endif - Ndb_index_stat_proc() : - is_util(0), - ndb(0), - now(0), - lt(0), - busy(false), - end(false) - {} - - ~Ndb_index_stat_proc() - { - assert(ndb == NULL); - } + Ndb_index_stat_proc() + : is_util(0), ndb(0), now(0), lt(0), busy(false), end(false) {} + + ~Ndb_index_stat_proc() { assert(ndb == NULL); } }; -static void -ndb_index_stat_proc_new(Ndb_index_stat_proc &pr, Ndb_index_stat *st) -{ +static void ndb_index_stat_proc_new(Ndb_index_stat_proc &pr, + Ndb_index_stat *st) { assert(st->error.code == 0); if (st->force_update) - pr.lt= Ndb_index_stat::LT_Update; + pr.lt = Ndb_index_stat::LT_Update; else - pr.lt= Ndb_index_stat::LT_Read; + pr.lt = Ndb_index_stat::LT_Read; } -static void -ndb_index_stat_proc_new(Ndb_index_stat_proc &pr) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; +static void ndb_index_stat_proc_new(Ndb_index_stat_proc &pr) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); - const int lt= Ndb_index_stat::LT_New; - Ndb_index_stat_list &list= ndb_index_stat_list[lt]; + const int lt = Ndb_index_stat::LT_New; + Ndb_index_stat_list &list = ndb_index_stat_list[lt]; - Ndb_index_stat *st_loop= list.head; - while (st_loop != 0) - { - Ndb_index_stat *st= st_loop; - st_loop= st_loop->list_next; + Ndb_index_stat *st_loop = list.head; + while (st_loop != 0) { + Ndb_index_stat *st = st_loop; + st_loop = st_loop->list_next; DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name)); ndb_index_stat_proc_new(pr, st); assert(pr.lt != lt); @@ -1416,11 +1219,9 @@ ndb_index_stat_proc_new(Ndb_index_stat_proc &pr) mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); } -static void -ndb_index_stat_proc_update(Ndb_index_stat_proc &pr, Ndb_index_stat *st) -{ - if (st->is->update_stat(pr.ndb) == -1) - { +static void ndb_index_stat_proc_update(Ndb_index_stat_proc &pr, + Ndb_index_stat *st) { + if (st->is->update_stat(pr.ndb) == -1) { mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); ndb_index_stat_error(st, 0, "update_stat"); @@ -1434,38 +1235,34 @@ ndb_index_stat_proc_update(Ndb_index_stat_proc &pr, Ndb_index_stat *st) If the index has an unsupported length, remove it from the list and stop monitoring */ - if (st->is->getNdbError().code == NdbIndexStat::InvalidKeySize) - { + if (st->is->getNdbError().code == NdbIndexStat::InvalidKeySize) { ndb_index_stat_free(st); } mysql_cond_broadcast(&ndb_index_stat_thread.stat_cond); mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); - pr.lt= Ndb_index_stat::LT_Error; + pr.lt = Ndb_index_stat::LT_Error; return; } - pr.now= ndb_index_stat_time(); - st->update_time= pr.now; - pr.lt= Ndb_index_stat::LT_Read; + pr.now = ndb_index_stat_time(); + st->update_time = pr.now; + pr.lt = Ndb_index_stat::LT_Read; } -static void -ndb_index_stat_proc_update(Ndb_index_stat_proc &pr) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - const int lt= Ndb_index_stat::LT_Update; - Ndb_index_stat_list &list= ndb_index_stat_list[lt]; - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - const uint batch= opt.get(Ndb_index_stat_opt::Iupdate_batch); +static void ndb_index_stat_proc_update(Ndb_index_stat_proc &pr) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + const int lt = Ndb_index_stat::LT_Update; + Ndb_index_stat_list &list = ndb_index_stat_list[lt]; + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + const uint batch = opt.get(Ndb_index_stat_opt::Iupdate_batch); - Ndb_index_stat *st_loop= list.head; - uint cnt= 0; - while (st_loop != 0 && cnt < batch) - { - Ndb_index_stat *st= st_loop; - st_loop= st_loop->list_next; + Ndb_index_stat *st_loop = list.head; + uint cnt = 0; + while (st_loop != 0 && cnt < batch) { + Ndb_index_stat *st = st_loop; + st_loop = st_loop->list_next; DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name)); ndb_index_stat_proc_update(pr, st); assert(pr.lt != lt); @@ -1476,74 +1273,65 @@ ndb_index_stat_proc_update(Ndb_index_stat_proc &pr) mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); cnt++; } - if (cnt == batch) - pr.busy= true; + if (cnt == batch) pr.busy = true; } -static void -ndb_index_stat_proc_read(Ndb_index_stat_proc &pr, Ndb_index_stat *st) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; +static void ndb_index_stat_proc_read(Ndb_index_stat_proc &pr, + Ndb_index_stat *st) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; NdbIndexStat::Head head; - if (st->is->read_stat(pr.ndb) == -1) - { + if (st->is->read_stat(pr.ndb) == -1) { mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); ndb_index_stat_error(st, 0, "read_stat"); - const bool force_update= st->force_update; + const bool force_update = st->force_update; ndb_index_stat_force_update(st, false); /* no stats is not unexpected error, unless analyze was done */ if (st->is->getNdbError().code == NdbIndexStat::NoIndexStats && - !force_update) - { + !force_update) { ndb_index_stat_no_stats(st, true); - pr.lt= Ndb_index_stat::LT_Idle; - } - else - { - pr.lt= Ndb_index_stat::LT_Error; + pr.lt = Ndb_index_stat::LT_Idle; + } else { + pr.lt = Ndb_index_stat::LT_Error; } mysql_cond_broadcast(&ndb_index_stat_thread.stat_cond); - pr.now= ndb_index_stat_time(); - st->check_time= pr.now; + pr.now = ndb_index_stat_time(); + st->check_time = pr.now; mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); return; } mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); - pr.now= ndb_index_stat_time(); + pr.now = ndb_index_stat_time(); st->is->get_head(head); - st->load_time= (time_t)head.m_loadTime; - st->read_time= pr.now; - st->sample_version= head.m_sampleVersion; - st->check_time= pr.now; + st->load_time = (time_t)head.m_loadTime; + st->read_time = pr.now; + st->sample_version = head.m_sampleVersion; + st->check_time = pr.now; ndb_index_stat_force_update(st, false); ndb_index_stat_no_stats(st, false); ndb_index_stat_cache_move(st); - pr.lt= Ndb_index_stat::LT_Idle; + pr.lt = Ndb_index_stat::LT_Idle; glob.refresh_count++; mysql_cond_broadcast(&ndb_index_stat_thread.stat_cond); mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); } -static void -ndb_index_stat_proc_read(Ndb_index_stat_proc &pr) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - const int lt= Ndb_index_stat::LT_Read; - Ndb_index_stat_list &list= ndb_index_stat_list[lt]; - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - const uint batch= opt.get(Ndb_index_stat_opt::Iread_batch); +static void ndb_index_stat_proc_read(Ndb_index_stat_proc &pr) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + const int lt = Ndb_index_stat::LT_Read; + Ndb_index_stat_list &list = ndb_index_stat_list[lt]; + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + const uint batch = opt.get(Ndb_index_stat_opt::Iread_batch); - Ndb_index_stat *st_loop= list.head; - uint cnt= 0; - while (st_loop != 0 && cnt < batch) - { - Ndb_index_stat *st= st_loop; - st_loop= st_loop->list_next; + Ndb_index_stat *st_loop = list.head; + uint cnt = 0; + while (st_loop != 0 && cnt < batch) { + Ndb_index_stat *st = st_loop; + st_loop = st_loop->list_next; DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name)); ndb_index_stat_proc_read(pr, st); assert(pr.lt != lt); @@ -1554,95 +1342,83 @@ ndb_index_stat_proc_read(Ndb_index_stat_proc &pr) mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); cnt++; } - if (cnt == batch) - pr.busy= true; + if (cnt == batch) pr.busy = true; } -static void -ndb_index_stat_proc_idle(Ndb_index_stat_proc &pr, Ndb_index_stat *st) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - const longlong clean_delay= opt.get(Ndb_index_stat_opt::Iclean_delay); - const longlong check_delay= opt.get(Ndb_index_stat_opt::Icheck_delay); +static void ndb_index_stat_proc_idle(Ndb_index_stat_proc &pr, + Ndb_index_stat *st) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + const longlong clean_delay = opt.get(Ndb_index_stat_opt::Iclean_delay); + const longlong check_delay = opt.get(Ndb_index_stat_opt::Icheck_delay); - const longlong pr_now= (longlong)pr.now; - const longlong st_read_time= (longlong)st->read_time; - const longlong st_check_time= (longlong)st->check_time; + const longlong pr_now = (longlong)pr.now; + const longlong st_read_time = (longlong)st->read_time; + const longlong st_check_time = (longlong)st->check_time; - const longlong clean_wait= st_read_time + clean_delay - pr_now; - const longlong check_wait= st_check_time + check_delay - pr_now; + const longlong clean_wait = st_read_time + clean_delay - pr_now; + const longlong check_wait = st_check_time + check_delay - pr_now; - DBUG_PRINT("index_stat", ("st %s clean_wait:%lld check_wait:%lld" - " force_update:%d to_delete:%d", - st->id, clean_wait, check_wait, - st->force_update, st->to_delete)); + DBUG_PRINT("index_stat", + ("st %s clean_wait:%lld check_wait:%lld" + " force_update:%d to_delete:%d", + st->id, clean_wait, check_wait, st->force_update, st->to_delete)); - if (st->to_delete) - { - pr.lt= Ndb_index_stat::LT_Delete; + if (st->to_delete) { + pr.lt = Ndb_index_stat::LT_Delete; return; } - if (st->clean_bytes != 0 && clean_wait <= 0) - { + if (st->clean_bytes != 0 && clean_wait <= 0) { if (ndb_index_stat_cache_clean(st)) glob.clean_count++; else glob.pinned_count++; } - if (st->force_update) - { - pr.lt= Ndb_index_stat::LT_Update; - pr.busy= true; + if (st->force_update) { + pr.lt = Ndb_index_stat::LT_Update; + pr.busy = true; return; } - if (check_wait <= 0) - { + if (check_wait <= 0) { // avoid creating "idle" entries on Check list - const int lt_check= Ndb_index_stat::LT_Check; - const Ndb_index_stat_list &list_check= ndb_index_stat_list[lt_check]; - const uint check_batch= opt.get(Ndb_index_stat_opt::Icheck_batch); - if (list_check.count < check_batch) - { - pr.lt= Ndb_index_stat::LT_Check; + const int lt_check = Ndb_index_stat::LT_Check; + const Ndb_index_stat_list &list_check = ndb_index_stat_list[lt_check]; + const uint check_batch = opt.get(Ndb_index_stat_opt::Icheck_batch); + if (list_check.count < check_batch) { + pr.lt = Ndb_index_stat::LT_Check; return; } } - pr.lt= Ndb_index_stat::LT_Idle; + pr.lt = Ndb_index_stat::LT_Idle; } -static void -ndb_index_stat_proc_idle(Ndb_index_stat_proc &pr) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - const int lt= Ndb_index_stat::LT_Idle; - Ndb_index_stat_list &list= ndb_index_stat_list[lt]; - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - uint batch= opt.get(Ndb_index_stat_opt::Iidle_batch); +static void ndb_index_stat_proc_idle(Ndb_index_stat_proc &pr) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + const int lt = Ndb_index_stat::LT_Idle; + Ndb_index_stat_list &list = ndb_index_stat_list[lt]; + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + uint batch = opt.get(Ndb_index_stat_opt::Iidle_batch); { mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); - const Ndb_index_stat_glob &glob= ndb_index_stat_glob; - const int lt_update= Ndb_index_stat::LT_Update; - const Ndb_index_stat_list &list_update= ndb_index_stat_list[lt_update]; - if (glob.force_update > list_update.count) - { + const Ndb_index_stat_glob &glob = ndb_index_stat_glob; + const int lt_update = Ndb_index_stat::LT_Update; + const Ndb_index_stat_list &list_update = ndb_index_stat_list[lt_update]; + if (glob.force_update > list_update.count) { // probably there is a force update waiting on Idle list - batch= ~(uint)0; + batch = ~(uint)0; } mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); } // entry may be moved to end of this list - if (batch > list.count) - batch= list.count; - pr.now= ndb_index_stat_time(); - - Ndb_index_stat *st_loop= list.head; - uint cnt= 0; - while (st_loop != 0 && cnt < batch) - { - Ndb_index_stat *st= st_loop; - st_loop= st_loop->list_next; + if (batch > list.count) batch = list.count; + pr.now = ndb_index_stat_time(); + + Ndb_index_stat *st_loop = list.head; + uint cnt = 0; + while (st_loop != 0 && cnt < batch) { + Ndb_index_stat *st = st_loop; + st_loop = st_loop->list_next; DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name)); ndb_index_stat_proc_idle(pr, st); // rotates list if entry remains LT_Idle @@ -1655,58 +1431,49 @@ ndb_index_stat_proc_idle(Ndb_index_stat_proc &pr) mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); } -static void -ndb_index_stat_proc_check(Ndb_index_stat_proc &pr, Ndb_index_stat *st) -{ - pr.now= ndb_index_stat_time(); - st->check_time= pr.now; +static void ndb_index_stat_proc_check(Ndb_index_stat_proc &pr, + Ndb_index_stat *st) { + pr.now = ndb_index_stat_time(); + st->check_time = pr.now; NdbIndexStat::Head head; - if (st->is->read_head(pr.ndb) == -1) - { + if (st->is->read_head(pr.ndb) == -1) { mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); ndb_index_stat_error(st, 0, "read_head"); /* no stats is not unexpected error */ - if (st->is->getNdbError().code == NdbIndexStat::NoIndexStats) - { + if (st->is->getNdbError().code == NdbIndexStat::NoIndexStats) { ndb_index_stat_no_stats(st, true); - pr.lt= Ndb_index_stat::LT_Idle; - } - else - { - pr.lt= Ndb_index_stat::LT_Error; + pr.lt = Ndb_index_stat::LT_Idle; + } else { + pr.lt = Ndb_index_stat::LT_Error; } mysql_cond_broadcast(&ndb_index_stat_thread.stat_cond); mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); return; } st->is->get_head(head); - const uint version_old= st->sample_version; - const uint version_new= head.m_sampleVersion; - if (version_old != version_new) - { - DBUG_PRINT("index_stat", ("st %s sample version old:%u new:%u", - st->id, version_old, version_new)); - pr.lt= Ndb_index_stat::LT_Read; + const uint version_old = st->sample_version; + const uint version_new = head.m_sampleVersion; + if (version_old != version_new) { + DBUG_PRINT("index_stat", ("st %s sample version old:%u new:%u", st->id, + version_old, version_new)); + pr.lt = Ndb_index_stat::LT_Read; return; } - pr.lt= Ndb_index_stat::LT_Idle; + pr.lt = Ndb_index_stat::LT_Idle; } -static void -ndb_index_stat_proc_check(Ndb_index_stat_proc &pr) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - const int lt= Ndb_index_stat::LT_Check; - Ndb_index_stat_list &list= ndb_index_stat_list[lt]; - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - const uint batch= opt.get(Ndb_index_stat_opt::Icheck_batch); +static void ndb_index_stat_proc_check(Ndb_index_stat_proc &pr) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + const int lt = Ndb_index_stat::LT_Check; + Ndb_index_stat_list &list = ndb_index_stat_list[lt]; + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + const uint batch = opt.get(Ndb_index_stat_opt::Icheck_batch); - Ndb_index_stat *st_loop= list.head; - uint cnt= 0; - while (st_loop != 0 && cnt < batch) - { - Ndb_index_stat *st= st_loop; - st_loop= st_loop->list_next; + Ndb_index_stat *st_loop = list.head; + uint cnt = 0; + while (st_loop != 0 && cnt < batch) { + Ndb_index_stat *st = st_loop; + st_loop = st_loop->list_next; DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name)); ndb_index_stat_proc_check(pr, st); assert(pr.lt != lt); @@ -1717,84 +1484,68 @@ ndb_index_stat_proc_check(Ndb_index_stat_proc &pr) mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); cnt++; } - if (cnt == batch) - pr.busy= true; + if (cnt == batch) pr.busy = true; } /* Check if need to evict more */ -static bool -ndb_index_stat_proc_evict() -{ - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - uint curr_size= glob.cache_query_bytes + glob.cache_clean_bytes; +static bool ndb_index_stat_proc_evict() { + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + uint curr_size = glob.cache_query_bytes + glob.cache_clean_bytes; /* Subtract bytes already scheduled for evict */ assert(curr_size >= glob.cache_evict_bytes); - curr_size-= glob.cache_evict_bytes; + curr_size -= glob.cache_evict_bytes; - const uint cache_lowpct= opt.get(Ndb_index_stat_opt::Icache_lowpct); - const uint cache_limit= opt.get(Ndb_index_stat_opt::Icache_limit); - if (100 * curr_size <= cache_lowpct * cache_limit) - return false; + const uint cache_lowpct = opt.get(Ndb_index_stat_opt::Icache_lowpct); + const uint cache_limit = opt.get(Ndb_index_stat_opt::Icache_limit); + if (100 * curr_size <= cache_lowpct * cache_limit) return false; return true; } /* Check if st1 is better or as good to evict than st2 */ -static bool -ndb_index_stat_evict(const Ndb_index_stat *st1, - const Ndb_index_stat *st2) -{ - if (st1->access_time < st2->access_time) - return true; +static bool ndb_index_stat_evict(const Ndb_index_stat *st1, + const Ndb_index_stat *st2) { + if (st1->access_time < st2->access_time) return true; if (st1->access_time == st2->access_time && st1->query_bytes + st1->clean_bytes >= - st2->query_bytes + st2->clean_bytes) + st2->query_bytes + st2->clean_bytes) return true; return false; } -static void -ndb_index_stat_proc_evict(Ndb_index_stat_proc &pr, int lt) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - Ndb_index_stat_list &list= ndb_index_stat_list[lt]; - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - const uint batch= opt.get(Ndb_index_stat_opt::Ievict_batch); - const longlong evict_delay= opt.get(Ndb_index_stat_opt::Ievict_delay); - pr.now= ndb_index_stat_time(); - const longlong pr_now= (longlong)pr.now; +static void ndb_index_stat_proc_evict(Ndb_index_stat_proc &pr, int lt) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + Ndb_index_stat_list &list = ndb_index_stat_list[lt]; + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + const uint batch = opt.get(Ndb_index_stat_opt::Ievict_batch); + const longlong evict_delay = opt.get(Ndb_index_stat_opt::Ievict_delay); + pr.now = ndb_index_stat_time(); + const longlong pr_now = (longlong)pr.now; - if (!ndb_index_stat_proc_evict()) - return; + if (!ndb_index_stat_proc_evict()) return; /* Mutex entire routine (protect access_time) */ mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); /* Create a LRU batch */ - Ndb_index_stat* st_lru_arr[ndb_index_stat_max_evict_batch + 1]; - uint st_lru_cnt= 0; - Ndb_index_stat *st_loop= list.head; - while (st_loop != 0 && st_lru_cnt < batch) - { - Ndb_index_stat *st= st_loop; - st_loop= st_loop->list_next; - const longlong st_read_time= (longlong)st->read_time; + Ndb_index_stat *st_lru_arr[ndb_index_stat_max_evict_batch + 1]; + uint st_lru_cnt = 0; + Ndb_index_stat *st_loop = list.head; + while (st_loop != 0 && st_lru_cnt < batch) { + Ndb_index_stat *st = st_loop; + st_loop = st_loop->list_next; + const longlong st_read_time = (longlong)st->read_time; if (st_read_time + evict_delay <= pr_now && - st->query_bytes + st->clean_bytes != 0 && - !st->to_delete) - { + st->query_bytes + st->clean_bytes != 0 && !st->to_delete) { /* Insertion sort into the batch from the end */ if (st_lru_cnt == 0) - st_lru_arr[st_lru_cnt++]= st; - else - { - uint i= st_lru_cnt; - while (i != 0) - { - const Ndb_index_stat *st1= st_lru_arr[i-1]; - if (ndb_index_stat_evict(st1, st)) - { + st_lru_arr[st_lru_cnt++] = st; + else { + uint i = st_lru_cnt; + while (i != 0) { + const Ndb_index_stat *st1 = st_lru_arr[i - 1]; + if (ndb_index_stat_evict(st1, st)) { /* The old entry at i-1 is preferred over st. Stop at first such entry. Therefore entries @@ -1804,8 +1555,7 @@ ndb_index_stat_proc_evict(Ndb_index_stat_proc &pr, int lt) } i--; } - if (i < st_lru_cnt) - { + if (i < st_lru_cnt) { /* Some old entry is less preferred than st. If this is true for all then i is 0 and st becomes new first entry. @@ -1813,83 +1563,71 @@ ndb_index_stat_proc_evict(Ndb_index_stat_proc &pr, int lt) >= i are shifted up. The extra position at the end of st_lru_arr avoids a special case when the array is full. */ - uint j= st_lru_cnt; - while (j > i) - { - st_lru_arr[j]= st_lru_arr[j-1]; + uint j = st_lru_cnt; + while (j > i) { + st_lru_arr[j] = st_lru_arr[j - 1]; j--; } - st_lru_arr[i]= st; - if (st_lru_cnt < batch) - st_lru_cnt++; + st_lru_arr[i] = st; + if (st_lru_cnt < batch) st_lru_cnt++; } } } } - + #ifndef DBUG_OFF - for (uint i=0; i < st_lru_cnt; i++) - { - Ndb_index_stat* st1= st_lru_arr[i]; + for (uint i = 0; i < st_lru_cnt; i++) { + Ndb_index_stat *st1 = st_lru_arr[i]; assert(!st1->to_delete && st1->share != 0); - if (i + 1 < st_lru_cnt) - { - Ndb_index_stat* st2= st_lru_arr[i+1]; + if (i + 1 < st_lru_cnt) { + Ndb_index_stat *st2 = st_lru_arr[i + 1]; assert(ndb_index_stat_evict(st1, st2)); } } #endif /* Process the LRU batch */ - uint cnt= 0; - while (cnt < st_lru_cnt) - { - if (!ndb_index_stat_proc_evict()) - break; + uint cnt = 0; + while (cnt < st_lru_cnt) { + if (!ndb_index_stat_proc_evict()) break; - Ndb_index_stat *st= st_lru_arr[cnt]; + Ndb_index_stat *st = st_lru_arr[cnt]; DBUG_PRINT("index_stat", ("st %s proc evict %s", st->id, list.name)); /* Entry may have requests. Cache is evicted at delete. */ ndb_index_stat_free(st); assert(st->evict_bytes == 0); - st->evict_bytes= st->query_bytes + st->clean_bytes; - glob.cache_evict_bytes+= st->evict_bytes; + st->evict_bytes = st->query_bytes + st->clean_bytes; + glob.cache_evict_bytes += st->evict_bytes; cnt++; } - if (cnt == batch) - pr.busy= true; + if (cnt == batch) pr.busy = true; - glob.evict_count+= cnt; + glob.evict_count += cnt; mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); } -static void -ndb_index_stat_proc_evict(Ndb_index_stat_proc &pr) -{ +static void ndb_index_stat_proc_evict(Ndb_index_stat_proc &pr) { ndb_index_stat_proc_evict(pr, Ndb_index_stat::LT_Error); ndb_index_stat_proc_evict(pr, Ndb_index_stat::LT_Idle); } -static void -ndb_index_stat_proc_delete(Ndb_index_stat_proc &pr) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - const int lt= Ndb_index_stat::LT_Delete; - Ndb_index_stat_list &list= ndb_index_stat_list[lt]; - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - const uint delete_batch= opt.get(Ndb_index_stat_opt::Idelete_batch); - const uint batch= !pr.end ? delete_batch : ~(uint)0; +static void ndb_index_stat_proc_delete(Ndb_index_stat_proc &pr) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + const int lt = Ndb_index_stat::LT_Delete; + Ndb_index_stat_list &list = ndb_index_stat_list[lt]; + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + const uint delete_batch = opt.get(Ndb_index_stat_opt::Idelete_batch); + const uint batch = !pr.end ? delete_batch : ~(uint)0; /* Mutex entire routine */ mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); - Ndb_index_stat *st_loop= list.head; - uint cnt= 0; - while (st_loop != 0 && cnt < batch) - { - Ndb_index_stat *st= st_loop; - st_loop= st_loop->list_next; + Ndb_index_stat *st_loop = list.head; + uint cnt = 0; + while (st_loop != 0 && cnt < batch) { + Ndb_index_stat *st = st_loop; + st_loop = st_loop->list_next; DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name)); // adjust global counters at drop @@ -1901,84 +1639,75 @@ ndb_index_stat_proc_delete(Ndb_index_stat_proc &pr) risk stats thread hanging. Instead try again next time. Presumably clients will eventually notice abort_request. */ - if (st->ref_count != 0) - { - DBUG_PRINT("index_stat", ("st %s proc %s: ref_count:%u", - st->id, list.name, st->ref_count)); + if (st->ref_count != 0) { + DBUG_PRINT("index_stat", ("st %s proc %s: ref_count:%u", st->id, + list.name, st->ref_count)); continue; } ndb_index_stat_cache_evict(st); assert(glob.cache_drop_bytes >= st->drop_bytes); - glob.cache_drop_bytes-= st->drop_bytes; + glob.cache_drop_bytes -= st->drop_bytes; assert(glob.cache_evict_bytes >= st->evict_bytes); - glob.cache_evict_bytes-= st->evict_bytes; + glob.cache_evict_bytes -= st->evict_bytes; ndb_index_stat_list_remove(st); delete st->is; delete st; cnt++; } - if (cnt == batch) - pr.busy= true; + if (cnt == batch) pr.busy = true; glob.set_status(); mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); } -static void -ndb_index_stat_proc_error(Ndb_index_stat_proc &pr, Ndb_index_stat *st) -{ - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - const longlong error_delay= opt.get(Ndb_index_stat_opt::Ierror_delay); +static void ndb_index_stat_proc_error(Ndb_index_stat_proc &pr, + Ndb_index_stat *st) { + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + const longlong error_delay = opt.get(Ndb_index_stat_opt::Ierror_delay); - const longlong pr_now= (longlong)pr.now; - const longlong st_error_time= (longlong)st->error_time; - const longlong error_wait= st_error_time + error_delay - pr_now; + const longlong pr_now = (longlong)pr.now; + const longlong st_error_time = (longlong)st->error_time; + const longlong error_wait = st_error_time + error_delay - pr_now; DBUG_PRINT("index_stat", ("st %s error_wait:%lld error_count:%u" " force_update:%d to_delete:%d", st->id, error_wait, st->error_count, st->force_update, st->to_delete)); - if (st->to_delete) - { - pr.lt= Ndb_index_stat::LT_Delete; + if (st->to_delete) { + pr.lt = Ndb_index_stat::LT_Delete; return; } if (error_wait <= 0 || /* Analyze issued after previous error */ - st->force_update) - { + st->force_update) { ndb_index_stat_clear_error(st); if (st->force_update) - pr.lt= Ndb_index_stat::LT_Update; + pr.lt = Ndb_index_stat::LT_Update; else - pr.lt= Ndb_index_stat::LT_Read; + pr.lt = Ndb_index_stat::LT_Read; return; } - pr.lt= Ndb_index_stat::LT_Error; + pr.lt = Ndb_index_stat::LT_Error; } -static void -ndb_index_stat_proc_error(Ndb_index_stat_proc &pr) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - const int lt= Ndb_index_stat::LT_Error; - Ndb_index_stat_list &list= ndb_index_stat_list[lt]; - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - uint batch= opt.get(Ndb_index_stat_opt::Ierror_batch); +static void ndb_index_stat_proc_error(Ndb_index_stat_proc &pr) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + const int lt = Ndb_index_stat::LT_Error; + Ndb_index_stat_list &list = ndb_index_stat_list[lt]; + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + uint batch = opt.get(Ndb_index_stat_opt::Ierror_batch); // entry may be moved to end of this list - if (batch > list.count) - batch= list.count; - pr.now= ndb_index_stat_time(); - - Ndb_index_stat *st_loop= list.head; - uint cnt= 0; - while (st_loop != 0 && cnt < batch) - { - Ndb_index_stat *st= st_loop; - st_loop= st_loop->list_next; + if (batch > list.count) batch = list.count; + pr.now = ndb_index_stat_time(); + + Ndb_index_stat *st_loop = list.head; + uint cnt = 0; + while (st_loop != 0 && cnt < batch) { + Ndb_index_stat *st = st_loop; + st_loop = st_loop->list_next; DBUG_PRINT("index_stat", ("st %s proc %s", st->id, list.name)); ndb_index_stat_proc_error(pr, st); // rotates list if entry remains LT_Error @@ -1991,9 +1720,8 @@ ndb_index_stat_proc_error(Ndb_index_stat_proc &pr) mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); } -static void -ndb_index_stat_proc_event(Ndb_index_stat_proc &pr, Ndb_index_stat *st) -{ +static void ndb_index_stat_proc_event(Ndb_index_stat_proc &pr, + Ndb_index_stat *st) { /* Put on Check list if idle. We get event also for our own analyze but this should not matter. @@ -2003,82 +1731,63 @@ ndb_index_stat_proc_event(Ndb_index_stat_proc &pr, Ndb_index_stat *st) for loop_idle time since the entry moves to LT_Check temporarily. Ignore the event if an update was done near this processing slice. */ - pr.lt= st->lt; - if (st->lt == Ndb_index_stat::LT_Idle || - st->lt == Ndb_index_stat::LT_Error) - { - if (st->update_time < pr.start) - { + pr.lt = st->lt; + if (st->lt == Ndb_index_stat::LT_Idle || st->lt == Ndb_index_stat::LT_Error) { + if (st->update_time < pr.start) { DBUG_PRINT("index_stat", ("st %s accept event for check", st->id)); - pr.lt= Ndb_index_stat::LT_Check; - } - else - { + pr.lt = Ndb_index_stat::LT_Check; + } else { DBUG_PRINT("index_stat", ("st %s ignore likely event to self", st->id)); } - } - else - { + } else { DBUG_PRINT("index_stat", ("st %s ignore event on lt=%d", st->id, st->lt)); } } -static void -ndb_index_stat_proc_event(Ndb_index_stat_proc &pr) -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - NdbIndexStat *is= pr.is_util; - Ndb *ndb= pr.ndb; +static void ndb_index_stat_proc_event(Ndb_index_stat_proc &pr) { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + NdbIndexStat *is = pr.is_util; + Ndb *ndb = pr.ndb; int ret; - ret= is->poll_listener(ndb, 0); + ret = is->poll_listener(ndb, 0); DBUG_PRINT("index_stat", ("poll_listener ret: %d", ret)); - if (ret == -1) - { + if (ret == -1) { // wl4124_todo report error DBUG_ASSERT(false); return; } - if (ret == 0) - return; + if (ret == 0) return; - while (1) - { - ret= is->next_listener(ndb); + while (1) { + ret = is->next_listener(ndb); DBUG_PRINT("index_stat", ("next_listener ret: %d", ret)); - if (ret == -1) - { + if (ret == -1) { // wl4124_todo report error DBUG_ASSERT(false); return; } - if (ret == 0) - break; + if (ret == 0) break; NdbIndexStat::Head head; is->get_head(head); DBUG_PRINT("index_stat", ("next_listener eventType: %d indexId: %u", head.m_eventType, head.m_indexId)); - Ndb_index_stat *st= ndb_index_stat_find_entry(head.m_indexId, - head.m_indexVersion); + Ndb_index_stat *st = + ndb_index_stat_find_entry(head.m_indexId, head.m_indexVersion); /* Another process can update stats for an index which is not found in this mysqld. Ignore it. */ - if (st != 0) - { + if (st != 0) { DBUG_PRINT("index_stat", ("st %s proc %s", st->id, "event")); ndb_index_stat_proc_event(pr, st); - if (pr.lt != st->lt) - { + if (pr.lt != st->lt) { ndb_index_stat_list_move(st, pr.lt); glob.event_act++; - } - else + } else glob.event_skip++; - } - else - { + } else { DBUG_PRINT("index_stat", ("entry not found in this mysqld")); glob.event_miss++; } @@ -2090,15 +1799,12 @@ ndb_index_stat_proc_event(Ndb_index_stat_proc &pr) /* Control options */ -static void -ndb_index_stat_proc_control() -{ - Ndb_index_stat_glob &glob= ndb_index_stat_glob; - Ndb_index_stat_opt &opt= ndb_index_stat_opt; +static void ndb_index_stat_proc_control() { + Ndb_index_stat_glob &glob = ndb_index_stat_glob; + Ndb_index_stat_opt &opt = ndb_index_stat_opt; /* Request to zero accumulating counters */ - if (opt.get(Ndb_index_stat_opt::Izero_total) == true) - { + if (opt.get(Ndb_index_stat_opt::Izero_total) == true) { mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); glob.zero_total(); glob.set_status(); @@ -2108,104 +1814,85 @@ ndb_index_stat_proc_control() } #ifndef DBUG_OFF -static void -ndb_index_stat_entry_verify(Ndb_index_stat_proc &pr, const Ndb_index_stat *st) -{ - const NDB_SHARE *share= st->share; - if (st->to_delete) - { +static void ndb_index_stat_entry_verify(Ndb_index_stat_proc &pr, + const Ndb_index_stat *st) { + const NDB_SHARE *share = st->share; + if (st->to_delete) { assert(st->share_next == 0); assert(share == 0); - } - else - { + } else { assert(share != 0); - const Ndb_index_stat *st2= share->index_stat_list; + const Ndb_index_stat *st2 = share->index_stat_list; assert(st2 != 0); - uint found= 0; - while (st2 != 0) - { + uint found = 0; + while (st2 != 0) { assert(st2->share == share); - const Ndb_index_stat *st3= st2->share_next; - uint guard= 0; - while (st3 != 0) - { + const Ndb_index_stat *st3 = st2->share_next; + uint guard = 0; + while (st3 != 0) { assert(st2 != st3); guard++; - assert(guard <= 1000); // MAX_INDEXES - st3= st3->share_next; + assert(guard <= 1000); // MAX_INDEXES + st3 = st3->share_next; } - if (st == st2) - found++; - st2= st2->share_next; + if (st == st2) found++; + st2 = st2->share_next; } assert(found == 1); } assert(st->read_time <= st->check_time); - pr.cache_query_bytes+= st->query_bytes; - pr.cache_clean_bytes+= st->clean_bytes; + pr.cache_query_bytes += st->query_bytes; + pr.cache_clean_bytes += st->clean_bytes; } -static void -ndb_index_stat_list_verify(Ndb_index_stat_proc &pr, int lt) -{ - const Ndb_index_stat_list &list= ndb_index_stat_list[lt]; - const Ndb_index_stat *st= list.head; - uint count= 0; - while (st != 0) - { +static void ndb_index_stat_list_verify(Ndb_index_stat_proc &pr, int lt) { + const Ndb_index_stat_list &list = ndb_index_stat_list[lt]; + const Ndb_index_stat *st = list.head; + uint count = 0; + while (st != 0) { count++; assert(count <= list.count); - if (st->list_prev != 0) - { + if (st->list_prev != 0) { assert(st->list_prev->list_next == st); } - if (st->list_next != 0) - { + if (st->list_next != 0) { assert(st->list_next->list_prev == st); } - if (count == 1) - { + if (count == 1) { assert(st == list.head); } - if (count == list.count) - { + if (count == list.count) { assert(st == list.tail); } - if (st == list.head) - { + if (st == list.head) { assert(count == 1); assert(st->list_prev == 0); } - if (st == list.tail) - { + if (st == list.tail) { assert(count == list.count); assert(st->list_next == 0); } - const Ndb_index_stat *st2= st->list_next; - uint guard= 0; - while (st2 != 0) - { + const Ndb_index_stat *st2 = st->list_next; + uint guard = 0; + while (st2 != 0) { assert(st != st2); guard++; assert(guard <= list.count); - st2= st2->list_next; + st2 = st2->list_next; } ndb_index_stat_entry_verify(pr, st); - st= st->list_next; + st = st->list_next; } assert(count == list.count); } -static void -ndb_index_stat_list_verify(Ndb_index_stat_proc &pr) -{ - const Ndb_index_stat_glob &glob= ndb_index_stat_glob; +static void ndb_index_stat_list_verify(Ndb_index_stat_proc &pr) { + const Ndb_index_stat_glob &glob = ndb_index_stat_glob; mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); - pr.cache_query_bytes= 0; - pr.cache_clean_bytes= 0; + pr.cache_query_bytes = 0; + pr.cache_clean_bytes = 0; - for (int lt= 1; lt < Ndb_index_stat::LT_Count; lt++) + for (int lt = 1; lt < Ndb_index_stat::LT_Count; lt++) ndb_index_stat_list_verify(pr, lt); assert(glob.cache_query_bytes == pr.cache_query_bytes); @@ -2213,34 +1900,29 @@ ndb_index_stat_list_verify(Ndb_index_stat_proc &pr) mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); } -static void -ndb_index_stat_report(const Ndb_index_stat_glob& old_glob) -{ - const Ndb_index_stat_glob &new_glob= ndb_index_stat_glob; - const char *old_status= old_glob.status[old_glob.status_i]; - const char *new_status= new_glob.status[new_glob.status_i]; +static void ndb_index_stat_report(const Ndb_index_stat_glob &old_glob) { + const Ndb_index_stat_glob &new_glob = ndb_index_stat_glob; + const char *old_status = old_glob.status[old_glob.status_i]; + const char *new_status = new_glob.status[new_glob.status_i]; - if (strcmp(old_status, new_status) != 0) - { + if (strcmp(old_status, new_status) != 0) { DBUG_PRINT("index_stat", ("old_status: %s", old_status)); DBUG_PRINT("index_stat", ("new_status: %s", new_status)); } } #endif -static void -ndb_index_stat_proc(Ndb_index_stat_proc &pr) -{ +static void ndb_index_stat_proc(Ndb_index_stat_proc &pr) { DBUG_ENTER("ndb_index_stat_proc"); ndb_index_stat_proc_control(); #ifndef DBUG_OFF ndb_index_stat_list_verify(pr); - Ndb_index_stat_glob old_glob= ndb_index_stat_glob; + Ndb_index_stat_glob old_glob = ndb_index_stat_glob; #endif - pr.start= pr.now= ndb_index_stat_time(); + pr.start = pr.now = ndb_index_stat_time(); ndb_index_stat_proc_new(pr); ndb_index_stat_proc_update(pr); @@ -2262,12 +1944,10 @@ ndb_index_stat_proc(Ndb_index_stat_proc &pr) /* Runs after stats thread exits and needs no locks. */ -void -ndb_index_stat_end() -{ +void ndb_index_stat_end() { DBUG_ENTER("ndb_index_stat_end"); Ndb_index_stat_proc pr; - pr.end= true; + pr.end = true; /* * Shares have been freed so any index stat entries left should be @@ -2275,18 +1955,15 @@ ndb_index_stat_end() */ int lt; - for (lt= 1; lt < Ndb_index_stat::LT_Count; lt++) - { - if (lt == (int)Ndb_index_stat::LT_Delete) - continue; - Ndb_index_stat_list &list= ndb_index_stat_list[lt]; - Ndb_index_stat *st_loop= list.head; - while (st_loop != 0) - { - Ndb_index_stat *st= st_loop; - st_loop= st_loop->list_next; + for (lt = 1; lt < Ndb_index_stat::LT_Count; lt++) { + if (lt == (int)Ndb_index_stat::LT_Delete) continue; + Ndb_index_stat_list &list = ndb_index_stat_list[lt]; + Ndb_index_stat *st_loop = list.head; + while (st_loop != 0) { + Ndb_index_stat *st = st_loop; + st_loop = st_loop->list_next; DBUG_PRINT("index_stat", ("st %s end %s", st->id, list.name)); - pr.lt= Ndb_index_stat::LT_Delete; + pr.lt = Ndb_index_stat::LT_Delete; ndb_index_stat_list_move(st, pr.lt); } } @@ -2298,159 +1975,134 @@ ndb_index_stat_end() /* Index stats thread */ -int -Ndb_index_stat_thread::check_or_create_systables(Ndb_index_stat_proc &pr) -{ +int Ndb_index_stat_thread::check_or_create_systables(Ndb_index_stat_proc &pr) { DBUG_ENTER("Ndb_index_stat_thread::check_or_create_systables"); - NdbIndexStat *is= pr.is_util; - Ndb *ndb= pr.ndb; + NdbIndexStat *is = pr.is_util; + Ndb *ndb = pr.ndb; - if (is->check_systables(ndb) == 0) - { + if (is->check_systables(ndb) == 0) { DBUG_PRINT("index_stat", ("using existing index stats tables")); DBUG_RETURN(0); } - if (is->create_systables(ndb) == 0) - { + if (is->create_systables(ndb) == 0) { DBUG_PRINT("index_stat", ("created index stats tables")); DBUG_RETURN(0); } - if (is->getNdbError().code == 721 || - is->getNdbError().code == 4244 || - is->getNdbError().code == 4009) // no connection + if (is->getNdbError().code == 721 || is->getNdbError().code == 4244 || + is->getNdbError().code == 4009) // no connection { // probably race between mysqlds - DBUG_PRINT("index_stat", ("create index stats tables failed: error %d line %d", - is->getNdbError().code, is->getNdbError().line)); + DBUG_PRINT("index_stat", + ("create index stats tables failed: error %d line %d", + is->getNdbError().code, is->getNdbError().line)); DBUG_RETURN(-1); } - log_info("create tables failed, error: %d, line: %d", - is->getNdbError().code, is->getNdbError().line); + log_info("create tables failed, error: %d, line: %d", is->getNdbError().code, + is->getNdbError().line); DBUG_RETURN(-1); } -int -Ndb_index_stat_thread::check_or_create_sysevents(Ndb_index_stat_proc &pr) -{ +int Ndb_index_stat_thread::check_or_create_sysevents(Ndb_index_stat_proc &pr) { DBUG_ENTER("Ndb_index_stat_thread::check_or_create_sysevents"); - NdbIndexStat *is= pr.is_util; - Ndb *ndb= pr.ndb; + NdbIndexStat *is = pr.is_util; + Ndb *ndb = pr.ndb; - if (is->check_sysevents(ndb) == 0) - { + if (is->check_sysevents(ndb) == 0) { DBUG_PRINT("index_stat", ("using existing index stats events")); DBUG_RETURN(0); } - if (is->create_sysevents(ndb) == 0) - { + if (is->create_sysevents(ndb) == 0) { DBUG_PRINT("index_stat", ("created index stats events")); DBUG_RETURN(0); } - if (is->getNdbError().code == 746) - { + if (is->getNdbError().code == 746) { // Probably race between mysqlds - DBUG_PRINT("index_stat", ("create index stats events failed: error %d line %d", - is->getNdbError().code, is->getNdbError().line)); + DBUG_PRINT("index_stat", + ("create index stats events failed: error %d line %d", + is->getNdbError().code, is->getNdbError().line)); DBUG_RETURN(-1); } - log_info("create events failed, error: %d, line: %d", - is->getNdbError().code, is->getNdbError().line); + log_info("create events failed, error: %d, line: %d", is->getNdbError().code, + is->getNdbError().line); DBUG_RETURN(-1); } -int -Ndb_index_stat_thread::create_ndb(Ndb_index_stat_proc &pr, - Ndb_cluster_connection* connection) -{ +int Ndb_index_stat_thread::create_ndb(Ndb_index_stat_proc &pr, + Ndb_cluster_connection *connection) { DBUG_ENTER("Ndb_index_stat_thread::create_ndb"); assert(pr.ndb == NULL); assert(connection != NULL); - Ndb* ndb= NULL; - do - { - ndb= new (std::nothrow) Ndb(connection, ""); - if (ndb == nullptr) - { + Ndb *ndb = NULL; + do { + ndb = new (std::nothrow) Ndb(connection, ""); + if (ndb == nullptr) { log_error("failed to create Ndb object"); break; } - if (ndb->setNdbObjectName("Ndb Index Stat")) - { + if (ndb->setNdbObjectName("Ndb Index Stat")) { log_error("failed to set Ndb object name, error: %d", ndb->getNdbError().code); break; } - if (ndb->init() != 0) - { - log_error("failed to init Ndb, error: %d", - ndb->getNdbError().code); + if (ndb->init() != 0) { + log_error("failed to init Ndb, error: %d", ndb->getNdbError().code); break; } - if (ndb->setDatabaseName(NDB_INDEX_STAT_DB) != 0) - { - log_error("failed to set database '%s', error: %d", - NDB_INDEX_STAT_DB, ndb->getNdbError().code); + if (ndb->setDatabaseName(NDB_INDEX_STAT_DB) != 0) { + log_error("failed to set database '%s', error: %d", NDB_INDEX_STAT_DB, + ndb->getNdbError().code); break; } - log_info("created Ndb object '%s', ref: 0x%x", - ndb->getNdbObjectName(), ndb->getReference()); + log_info("created Ndb object '%s', ref: 0x%x", ndb->getNdbObjectName(), + ndb->getReference()); - pr.ndb= ndb; + pr.ndb = ndb; DBUG_RETURN(0); } while (0); - if (ndb != NULL) - delete ndb; + if (ndb != NULL) delete ndb; DBUG_RETURN(-1); } -void -Ndb_index_stat_thread::drop_ndb(Ndb_index_stat_proc &pr) -{ +void Ndb_index_stat_thread::drop_ndb(Ndb_index_stat_proc &pr) { DBUG_ENTER("Ndb_index_stat_thread::drop_ndb"); - if (pr.is_util->has_listener()) - { + if (pr.is_util->has_listener()) { stop_listener(pr); } - if (pr.ndb != NULL) - { + if (pr.ndb != NULL) { delete pr.ndb; - pr.ndb= NULL; + pr.ndb = NULL; } DBUG_VOID_RETURN; } -int -Ndb_index_stat_thread::start_listener(Ndb_index_stat_proc &pr) -{ +int Ndb_index_stat_thread::start_listener(Ndb_index_stat_proc &pr) { DBUG_ENTER("Ndb_index_stat_thread::start_listener"); - NdbIndexStat *is= pr.is_util; - Ndb *ndb= pr.ndb; + NdbIndexStat *is = pr.is_util; + Ndb *ndb = pr.ndb; - if (is->create_listener(ndb) == -1) - { + if (is->create_listener(ndb) == -1) { log_info("create index stats listener failed: error %d line %d", is->getNdbError().code, is->getNdbError().line); DBUG_RETURN(-1); } - if (is->execute_listener(ndb) == -1) - { + if (is->execute_listener(ndb) == -1) { log_info("execute index stats listener failed: error %d line %d", is->getNdbError().code, is->getNdbError().line); // Drop the created listener @@ -2461,13 +2113,11 @@ Ndb_index_stat_thread::start_listener(Ndb_index_stat_proc &pr) DBUG_RETURN(0); } -void -Ndb_index_stat_thread::stop_listener(Ndb_index_stat_proc &pr) -{ +void Ndb_index_stat_thread::stop_listener(Ndb_index_stat_proc &pr) { DBUG_ENTER("Ndb_index_stat_thread::stop_listener"); - NdbIndexStat *is= pr.is_util; - Ndb *ndb= pr.ndb; + NdbIndexStat *is = pr.is_util; + Ndb *ndb = pr.ndb; (void)is->drop_listener(ndb); @@ -2476,41 +2126,33 @@ Ndb_index_stat_thread::stop_listener(Ndb_index_stat_proc &pr) /* Restart things after system restart */ -static bool ndb_index_stat_restart_flag= false; +static bool ndb_index_stat_restart_flag = false; -void -ndb_index_stat_restart() -{ +void ndb_index_stat_restart() { DBUG_ENTER("ndb_index_stat_restart"); - ndb_index_stat_restart_flag= true; + ndb_index_stat_restart_flag = true; ndb_index_stat_set_allow(false); DBUG_VOID_RETURN; } -bool -Ndb_index_stat_thread::is_setup_complete() -{ - if (ndb_index_stat_get_enable(NULL)) - { +bool Ndb_index_stat_thread::is_setup_complete() { + if (ndb_index_stat_get_enable(NULL)) { return ndb_index_stat_get_allow(); } return true; } -extern Ndb_cluster_connection* g_ndb_cluster_connection; +extern Ndb_cluster_connection *g_ndb_cluster_connection; -void -Ndb_index_stat_thread::do_run() -{ +void Ndb_index_stat_thread::do_run() { DBUG_ENTER("Ndb_index_stat_thread::do_run"); - Ndb_index_stat_glob &glob= ndb_index_stat_glob; + Ndb_index_stat_glob &glob = ndb_index_stat_glob; Ndb_index_stat_proc pr; log_info("Starting..."); - if (!wait_for_server_started()) - { + if (!wait_for_server_started()) { mysql_mutex_lock(&LOCK_client_waiting); goto ndb_index_stat_thread_end; } @@ -2519,11 +2161,9 @@ Ndb_index_stat_thread::do_run() /* Wait for cluster to start */ - while (!ndbcluster_is_connected(1)) - { + while (!ndbcluster_is_connected(1)) { /* ndb not connected yet */ - if (is_stop_requested()) - { + if (is_stop_requested()) { /* Terminated with a stop_request */ mysql_mutex_lock(&LOCK_client_waiting); goto ndb_index_stat_thread_end; @@ -2531,8 +2171,7 @@ Ndb_index_stat_thread::do_run() } /* Get instance used for sys objects check and create */ - if (!(pr.is_util= new NdbIndexStat)) - { + if (!(pr.is_util = new NdbIndexStat)) { log_error("Could not allocate NdbIndexStat is_util object"); mysql_mutex_lock(&LOCK_client_waiting); goto ndb_index_stat_thread_end; @@ -2546,29 +2185,25 @@ Ndb_index_stat_thread::do_run() log_info("Started"); bool enable_ok; - enable_ok= false; + enable_ok = false; // do we need to check or re-check sys objects (expensive) bool check_sys; - check_sys= true; + check_sys = true; struct timespec abstime; set_timespec(&abstime, 0); - for (;;) - { + for (;;) { mysql_mutex_lock(&LOCK_client_waiting); - if (client_waiting == false) - { - const int ret= - mysql_cond_timedwait(&COND_client_waiting, - &LOCK_client_waiting, - &abstime); + if (client_waiting == false) { + const int ret = mysql_cond_timedwait(&COND_client_waiting, + &LOCK_client_waiting, &abstime); if (ret == ETIMEDOUT) DBUG_PRINT("index_stat", ("loop: timed out")); else DBUG_PRINT("index_stat", ("loop: wake up")); } - client_waiting= false; + client_waiting = false; mysql_mutex_unlock(&LOCK_client_waiting); if (is_stop_requested()) /* Shutting down server */ @@ -2586,33 +2221,29 @@ Ndb_index_stat_thread::do_run() * running. In such case the Ndb object must be recycled to avoid * some event-related asserts (bug#20888668), */ - do - { + do { // initial restart was done while this mysqld was left running - if (ndb_index_stat_restart_flag) - { - ndb_index_stat_restart_flag= false; + if (ndb_index_stat_restart_flag) { + ndb_index_stat_restart_flag = false; ndb_index_stat_set_allow(false); drop_ndb(pr); - check_sys= true; // sys objects are gone + check_sys = true; // sys objects are gone } // check enable flag { /* const bool enable_ok_new= THDVAR(NULL, index_stat_enable); */ - const bool enable_ok_new= ndb_index_stat_get_enable(NULL); - - if (enable_ok != enable_ok_new) - { - DBUG_PRINT("index_stat", ("global enable: %d -> %d", - enable_ok, enable_ok_new)); - enable_ok= enable_ok_new; - check_sys= enable_ok; // check sys objects if enabling + const bool enable_ok_new = ndb_index_stat_get_enable(NULL); + + if (enable_ok != enable_ok_new) { + DBUG_PRINT("index_stat", + ("global enable: %d -> %d", enable_ok, enable_ok_new)); + enable_ok = enable_ok_new; + check_sys = enable_ok; // check sys objects if enabling } } - if (!enable_ok) - { + if (!enable_ok) { DBUG_PRINT("index_stat", ("Index stats is not enabled")); ndb_index_stat_set_allow(false); drop_ndb(pr); @@ -2620,15 +2251,12 @@ Ndb_index_stat_thread::do_run() } // the Ndb object is needed first - if (pr.ndb == NULL) - { - if (create_ndb(pr, g_ndb_cluster_connection) == -1) - break; + if (pr.ndb == NULL) { + if (create_ndb(pr, g_ndb_cluster_connection) == -1) break; } // sys objects - if (check_sys) - { + if (check_sys) { // at enable check or create stats tables and events if (check_or_create_systables(pr) == -1 || check_or_create_sysevents(pr) == -1) @@ -2636,37 +2264,35 @@ Ndb_index_stat_thread::do_run() } // listener is not critical but error means something is wrong - if (!pr.is_util->has_listener()) - { - if (start_listener(pr) == -1) - break; + if (!pr.is_util->has_listener()) { + if (start_listener(pr) == -1) break; } // normal processing - check_sys= false; + check_sys = false; ndb_index_stat_set_allow(true); - pr.busy= false; + pr.busy = false; ndb_index_stat_proc(pr); } while (0); /* Calculate new time to wake up */ - const Ndb_index_stat_opt &opt= ndb_index_stat_opt; - uint msecs= 0; + const Ndb_index_stat_opt &opt = ndb_index_stat_opt; + uint msecs = 0; if (!enable_ok) - msecs= opt.get(Ndb_index_stat_opt::Iloop_enable); + msecs = opt.get(Ndb_index_stat_opt::Iloop_enable); else if (!pr.busy) - msecs= opt.get(Ndb_index_stat_opt::Iloop_idle); + msecs = opt.get(Ndb_index_stat_opt::Iloop_idle); else - msecs= opt.get(Ndb_index_stat_opt::Iloop_busy); + msecs = opt.get(Ndb_index_stat_opt::Iloop_busy); DBUG_PRINT("index_stat", ("sleep %dms", msecs)); set_timespec_nsec(&abstime, msecs * 1000000ULL); /* Update status variable */ - glob.th_enable= enable_ok; - glob.th_busy= pr.busy; - glob.th_loop= msecs; + glob.th_enable = enable_ok; + glob.th_busy = pr.busy; + glob.th_loop = msecs; mysql_mutex_lock(&stat_mutex); glob.set_status(); mysql_mutex_unlock(&stat_mutex); @@ -2678,11 +2304,10 @@ Ndb_index_stat_thread::do_run() /* Prevent clients */ ndb_index_stat_set_allow(false); - if (pr.is_util) - { + if (pr.is_util) { drop_ndb(pr); delete pr.is_util; - pr.is_util= 0; + pr.is_util = 0; } mysql_mutex_unlock(&LOCK_client_waiting); @@ -2695,15 +2320,12 @@ Ndb_index_stat_thread::do_run() /* Optimizer queries */ -static ulonglong -ndb_index_stat_round(double x) -{ +static ulonglong ndb_index_stat_round(double x) { char buf[100]; - if (x < 0.0) - x= 0.0; + if (x < 0.0) x = 0.0; snprintf(buf, sizeof(buf), "%.0f", x); /* mysql provides my_strtoull */ - ulonglong n= my_strtoull(buf, 0, 10); + ulonglong n = my_strtoull(buf, 0, 10); return n; } @@ -2712,36 +2334,30 @@ ndb_index_stat_round(double x) similar but separated for clarity. */ -static int -ndb_index_stat_wait_query(Ndb_index_stat *st, - const Ndb_index_stat_snap &snap) -{ +static int ndb_index_stat_wait_query(Ndb_index_stat *st, + const Ndb_index_stat_snap &snap) { DBUG_ENTER("ndb_index_stat_wait_query"); - Ndb_index_stat_glob &glob= ndb_index_stat_glob; + Ndb_index_stat_glob &glob = ndb_index_stat_glob; mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); - int err= 0; - uint count= 0; + int err = 0; + uint count = 0; struct timespec abstime; glob.wait_stats++; glob.query_count++; - while (true) - { - int ret= 0; + while (true) { + int ret = 0; /* Query waits for any samples */ - if (st->sample_version > 0) - break; - if (st->no_stats) - { + if (st->sample_version > 0) break; + if (st->no_stats) { /* Have detected no stats now or before */ - err= NdbIndexStat::NoIndexStats; + err = NdbIndexStat::NoIndexStats; glob.query_no_stats++; break; } - if (st->error.code != 0) - { + if (st->error.code != 0) { /* An error has accured now or before */ - err= NdbIndexStat::MyHasError; + err = NdbIndexStat::MyHasError; glob.query_error++; break; } @@ -2750,39 +2366,32 @@ ndb_index_stat_wait_query(Ndb_index_stat *st, happen but make sure. */ if (st->load_time != snap.load_time || - st->sample_version != snap.sample_version) - { + st->sample_version != snap.sample_version) { DBUG_ASSERT(false); - err= NdbIndexStat::NoIndexStats; + err = NdbIndexStat::NoIndexStats; break; } - if (st->abort_request) - { - err= NdbIndexStat::MyAbortReq; + if (st->abort_request) { + err = NdbIndexStat::MyAbortReq; break; } count++; - DBUG_PRINT("index_stat", ("st %s wait_query count:%u", - st->id, count)); + DBUG_PRINT("index_stat", ("st %s wait_query count:%u", st->id, count)); ndb_index_stat_thread.wakeup(); set_timespec(&abstime, 1); - ret= mysql_cond_timedwait(&ndb_index_stat_thread.stat_cond, - &ndb_index_stat_thread.stat_mutex, - &abstime); - if (ret != 0 && ret != ETIMEDOUT) - { - err= ret; + ret = mysql_cond_timedwait(&ndb_index_stat_thread.stat_cond, + &ndb_index_stat_thread.stat_mutex, &abstime); + if (ret != 0 && ret != ETIMEDOUT) { + err = ret; break; } } assert(glob.wait_stats != 0); glob.wait_stats--; mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); - if (err != 0) - { - DBUG_PRINT("index_stat", ("st %s wait_query error: %d", - st->id, err)); + if (err != 0) { + DBUG_PRINT("index_stat", ("st %s wait_query error: %d", st->id, err)); DBUG_RETURN(err); } DBUG_PRINT("index_stat", ("st %s wait_query ok: sample_version %u -> %u", @@ -2790,30 +2399,25 @@ ndb_index_stat_wait_query(Ndb_index_stat *st, DBUG_RETURN(0); } -static int -ndb_index_stat_wait_analyze(Ndb_index_stat *st, - const Ndb_index_stat_snap &snap) -{ +static int ndb_index_stat_wait_analyze(Ndb_index_stat *st, + const Ndb_index_stat_snap &snap) { DBUG_ENTER("ndb_index_stat_wait_analyze"); - Ndb_index_stat_glob &glob= ndb_index_stat_glob; + Ndb_index_stat_glob &glob = ndb_index_stat_glob; mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); - int err= 0; - uint count= 0; + int err = 0; + uint count = 0; struct timespec abstime; glob.wait_update++; glob.analyze_count++; - while (true) - { - int ret= 0; + while (true) { + int ret = 0; /* Analyze waits for newer samples */ - if (st->sample_version > snap.sample_version) - break; - if (st->error_count != snap.error_count) - { + if (st->sample_version > snap.sample_version) break; + if (st->error_count != snap.error_count) { /* A new error has occurred */ DBUG_ASSERT(st->error_count > snap.error_count); - err= st->error.code; + err = st->error.code; glob.analyze_error++; break; } @@ -2822,20 +2426,16 @@ ndb_index_stat_wait_analyze(Ndb_index_stat *st, deleted stats, an analyze here could wait forever. */ if (st->load_time != snap.load_time || - st->sample_version != snap.sample_version) - { + st->sample_version != snap.sample_version) { DBUG_ASSERT(false); - err= NdbIndexStat::AlienUpdate; + err = NdbIndexStat::AlienUpdate; break; } - if (st->abort_request) - { - err= NdbIndexStat::MyAbortReq; + if (st->abort_request) { + err = NdbIndexStat::MyAbortReq; break; } - if (!st->force_update || - glob.wait_update == 0) - { + if (!st->force_update || glob.wait_update == 0) { /** * If there is somehow nothing happening and * nothing to wait for, then it is an error to wait any @@ -2844,35 +2444,28 @@ ndb_index_stat_wait_analyze(Ndb_index_stat *st, fprintf(stderr, "ndb_index_stat_wait_analyze idx %u st->force_update %u " "glob.wait_update %u status : %s\n", - st->index_id, - st->force_update, - glob.wait_update, + st->index_id, st->force_update, glob.wait_update, g_ndb_status_index_stat_status); err = NdbIndexStat::InternalError; break; } count++; - DBUG_PRINT("index_stat", ("st %s wait_analyze count:%u", - st->id, count)); + DBUG_PRINT("index_stat", ("st %s wait_analyze count:%u", st->id, count)); ndb_index_stat_thread.wakeup(); set_timespec(&abstime, 1); - ret= mysql_cond_timedwait(&ndb_index_stat_thread.stat_cond, - &ndb_index_stat_thread.stat_mutex, - &abstime); - if (ret != 0 && ret != ETIMEDOUT) - { - err= ret; + ret = mysql_cond_timedwait(&ndb_index_stat_thread.stat_cond, + &ndb_index_stat_thread.stat_mutex, &abstime); + if (ret != 0 && ret != ETIMEDOUT) { + err = ret; break; } } assert(glob.wait_update != 0); glob.wait_update--; mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); - if (err != 0) - { - DBUG_PRINT("index_stat", ("st %s wait_analyze error: %d", - st->id, err)); + if (err != 0) { + DBUG_PRINT("index_stat", ("st %s wait_analyze error: %d", st->id, err)); DBUG_RETURN(err); } DBUG_PRINT("index_stat", ("st %s wait_analyze ok: sample_version %u -> %u", @@ -2880,46 +2473,36 @@ ndb_index_stat_wait_analyze(Ndb_index_stat *st, DBUG_RETURN(0); } +void compute_index_bounds(NdbIndexScanOperation::IndexBound &bound, + const KEY *key_info, const key_range *start_key, + const key_range *end_key, int from); -void -compute_index_bounds(NdbIndexScanOperation::IndexBound & bound, - const KEY *key_info, - const key_range *start_key, const key_range *end_key, - int from); - -int -ha_ndbcluster::ndb_index_stat_query(uint inx, - const key_range *min_key, - const key_range *max_key, - NdbIndexStat::Stat& stat, - int from) -{ +int ha_ndbcluster::ndb_index_stat_query(uint inx, const key_range *min_key, + const key_range *max_key, + NdbIndexStat::Stat &stat, int from) { DBUG_ENTER("ha_ndbcluster::ndb_index_stat_query"); - const KEY *key_info= table->key_info + inx; - const NDB_INDEX_DATA &data= m_index[inx]; - const NDBINDEX *index= data.index; + const KEY *key_info = table->key_info + inx; + const NDB_INDEX_DATA &data = m_index[inx]; + const NDBINDEX *index = data.index; DBUG_PRINT("index_stat", ("index: %u name: %s", inx, index->getName())); - int err= 0; + int err = 0; /* Create an IndexBound struct for the keys */ NdbIndexScanOperation::IndexBound ib; compute_index_bounds(ib, key_info, min_key, max_key, from); - ib.range_no= 0; + ib.range_no = 0; Ndb_index_stat_snap snap; - Ndb_index_stat *st= - ndb_index_stat_get_share(m_share, index, m_table, snap, err, true, false); - if (st == 0) - DBUG_RETURN(err); + Ndb_index_stat *st = + ndb_index_stat_get_share(m_share, index, m_table, snap, err, true, false); + if (st == 0) DBUG_RETURN(err); /* Now holding reference to st */ - do - { - err= ndb_index_stat_wait_query(st, snap); - if (err != 0) - break; + do { + err = ndb_index_stat_wait_query(st, snap); + if (err != 0) break; assert(st->sample_version != 0); uint8 bound_lo_buffer[NdbIndexStat::BoundBufferBytes]; uint8 bound_hi_buffer[NdbIndexStat::BoundBufferBytes]; @@ -2927,26 +2510,23 @@ ha_ndbcluster::ndb_index_stat_query(uint inx, NdbIndexStat::Bound bound_hi(st->is, bound_hi_buffer); NdbIndexStat::Range range(bound_lo, bound_hi); - const NdbRecord* key_record= data.ndb_record_key; - if (st->is->convert_range(range, key_record, &ib) == -1) - { + const NdbRecord *key_record = data.ndb_record_key; + if (st->is->convert_range(range, key_record, &ib) == -1) { mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); ndb_index_stat_error(st, 1, "convert_range"); - err= st->client_error.code; + err = st->client_error.code; mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); break; } - if (st->is->query_stat(range, stat) == -1) - { + if (st->is->query_stat(range, stat) == -1) { /* Invalid cache - should remove the entry */ mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); ndb_index_stat_error(st, 1, "query_stat"); - err= st->client_error.code; + err = st->client_error.code; mysql_mutex_unlock(&ndb_index_stat_thread.stat_mutex); break; } - } - while (0); + } while (0); /* Release reference to st */ mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); @@ -2955,25 +2535,20 @@ ha_ndbcluster::ndb_index_stat_query(uint inx, DBUG_RETURN(err); } -int -ha_ndbcluster::ndb_index_stat_get_rir(uint inx, - key_range *min_key, - key_range *max_key, - ha_rows *rows_out) -{ +int ha_ndbcluster::ndb_index_stat_get_rir(uint inx, key_range *min_key, + key_range *max_key, + ha_rows *rows_out) { DBUG_ENTER("ha_ndbcluster::ndb_index_stat_get_rir"); uint8 stat_buffer[NdbIndexStat::StatBufferBytes]; NdbIndexStat::Stat stat(stat_buffer); - int err= ndb_index_stat_query(inx, min_key, max_key, stat, 1); - if (err == 0) - { - double rir= -1.0; + int err = ndb_index_stat_query(inx, min_key, max_key, stat, 1); + if (err == 0) { + double rir = -1.0; NdbIndexStat::get_rir(stat, &rir); - ha_rows rows= ndb_index_stat_round(rir); + ha_rows rows = ndb_index_stat_round(rir); /* Estimate only so cannot return exact zero */ - if (rows == 0) - rows= 1; - *rows_out= rows; + if (rows == 0) rows = 1; + *rows_out = rows; #ifndef DBUG_OFF char rule[NdbIndexStat::RuleBufferBytes]; NdbIndexStat::get_rule(stat, rule); @@ -2984,23 +2559,18 @@ ha_ndbcluster::ndb_index_stat_get_rir(uint inx, DBUG_RETURN(err); } -int -ha_ndbcluster::ndb_index_stat_set_rpk(uint inx) -{ +int ha_ndbcluster::ndb_index_stat_set_rpk(uint inx) { DBUG_ENTER("ha_ndbcluster::ndb_index_stat_set_rpk"); - uint8 stat_buffer[NdbIndexStat::StatBufferBytes]; NdbIndexStat::Stat stat(stat_buffer); - const key_range *min_key= 0; - const key_range *max_key= 0; - const int err= ndb_index_stat_query(inx, min_key, max_key, stat, 2); - if (err == 0) - { - KEY *key_info= table->key_info + inx; - for (uint k= 0; k < key_info->user_defined_key_parts; k++) - { - double rpk= -1.0; + const key_range *min_key = 0; + const key_range *max_key = 0; + const int err = ndb_index_stat_query(inx, min_key, max_key, stat, 2); + if (err == 0) { + KEY *key_info = table->key_info + inx; + for (uint k = 0; k < key_info->user_defined_key_parts; k++) { + double rpk = -1.0; NdbIndexStat::get_rpk(stat, k, &rpk); key_info->set_records_per_key(k, static_cast(rpk)); #ifndef DBUG_OFF @@ -3014,27 +2584,26 @@ ha_ndbcluster::ndb_index_stat_set_rpk(uint inx) DBUG_RETURN(err); } -int -ha_ndbcluster::ndb_index_stat_analyze(uint *inx_list, - uint inx_count) -{ +int ha_ndbcluster::ndb_index_stat_analyze(uint *inx_list, uint inx_count) { DBUG_ENTER("ha_ndbcluster::ndb_index_stat_analyze"); struct Req { Ndb_index_stat *st; Ndb_index_stat_snap snap; int err; - Req() { st= 0; err= 0; } + Req() { + st = 0; + err = 0; + } }; Req req[MAX_INDEXES]; /* Force stats update on each index */ - for (uint i= 0; i < inx_count; i++) - { - Req &r= req[i]; - uint inx= inx_list[i]; - const NDB_INDEX_DATA &data= m_index[inx]; - const NDBINDEX *index= data.index; + for (uint i = 0; i < inx_count; i++) { + Req &r = req[i]; + uint inx = inx_list[i]; + const NDB_INDEX_DATA &data = m_index[inx]; + const NDBINDEX *index = data.index; DBUG_PRINT("index_stat", ("force update: %s", index->getName())); r.st = ndb_index_stat_get_share(m_share, index, m_table, r.snap, r.err, @@ -3044,18 +2613,16 @@ ha_ndbcluster::ndb_index_stat_analyze(uint *inx_list, } /* Wait for each update */ - for (uint i = 0; i < inx_count; i++) - { - Req &r= req[i]; - uint inx= inx_list[i]; - const NDB_INDEX_DATA &data= m_index[inx]; - const NDBINDEX *index= data.index; - (void)index; // USED - - if (r.err == 0) - { + for (uint i = 0; i < inx_count; i++) { + Req &r = req[i]; + uint inx = inx_list[i]; + const NDB_INDEX_DATA &data = m_index[inx]; + const NDBINDEX *index = data.index; + (void)index; // USED + + if (r.err == 0) { DBUG_PRINT("index_stat", ("wait for update: %s", index->getName())); - r.err=ndb_index_stat_wait_analyze(r.st, r.snap); + r.err = ndb_index_stat_wait_analyze(r.st, r.snap); /* Release reference to r.st */ mysql_mutex_lock(&ndb_index_stat_thread.stat_mutex); ndb_index_stat_ref_count(r.st, false); @@ -3064,13 +2631,11 @@ ha_ndbcluster::ndb_index_stat_analyze(uint *inx_list, } /* Return first error if any */ - int err= 0; - for (uint i= 0; i < inx_count; i++) - { - Req &r= req[i]; - if (r.err != 0) - { - err= r.err; + int err = 0; + for (uint i = 0; i < inx_count; i++) { + Req &r = req[i]; + if (r.err != 0) { + err = r.err; break; } } @@ -3078,19 +2643,17 @@ ha_ndbcluster::ndb_index_stat_analyze(uint *inx_list, DBUG_RETURN(err); } +static SHOW_VAR ndb_status_vars_index_stat[] = { + {"status", (char *)&g_ndb_status_index_stat_status, SHOW_CHAR_PTR, + SHOW_SCOPE_GLOBAL}, + {"cache_query", (char *)&g_ndb_status_index_stat_cache_query, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {"cache_clean", (char *)&g_ndb_status_index_stat_cache_clean, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL}}; -static SHOW_VAR ndb_status_vars_index_stat[]= -{ - {"status", (char*) &g_ndb_status_index_stat_status, SHOW_CHAR_PTR, SHOW_SCOPE_GLOBAL}, - {"cache_query", (char*) &g_ndb_status_index_stat_cache_query, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"cache_clean", (char*) &g_ndb_status_index_stat_cache_clean, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL} -}; - -int -show_ndb_status_index_stat(THD*, SHOW_VAR* var, char*) -{ +int show_ndb_status_index_stat(THD *, SHOW_VAR *var, char *) { var->type = SHOW_ARRAY; - var->value = (char*) &ndb_status_vars_index_stat; + var->value = (char *)&ndb_status_vars_index_stat; return 0; } diff --git a/storage/ndb/plugin/ha_ndb_index_stat.h b/storage/ndb/plugin/ha_ndb_index_stat.h index 3d65213d6496..732ed381cff2 100644 --- a/storage/ndb/plugin/ha_ndb_index_stat.h +++ b/storage/ndb/plugin/ha_ndb_index_stat.h @@ -34,13 +34,13 @@ class Ndb_cluster_connection; struct SHOW_VAR; struct SYS_VAR; -class Ndb_index_stat_thread : public Ndb_component -{ +class Ndb_index_stat_thread : public Ndb_component { // Someone is waiting for stats bool client_waiting; mysql_mutex_t LOCK_client_waiting; mysql_cond_t COND_client_waiting; -public: + + public: Ndb_index_stat_thread(); virtual ~Ndb_index_stat_thread(); @@ -56,28 +56,28 @@ class Ndb_index_stat_thread : public Ndb_component /* are we setup */ bool is_setup_complete(); -private: + + private: virtual int do_init(); virtual void do_run(); virtual int do_deinit(); // Wakeup for stop virtual void do_wakeup(); - int check_or_create_systables(struct Ndb_index_stat_proc& pr); - int check_or_create_sysevents(struct Ndb_index_stat_proc& pr); - void drop_ndb(struct Ndb_index_stat_proc& pr); - int start_listener(struct Ndb_index_stat_proc& pr); - int create_ndb(struct Ndb_index_stat_proc& pr, - Ndb_cluster_connection* connection); - void stop_listener(struct Ndb_index_stat_proc& pr); + int check_or_create_systables(struct Ndb_index_stat_proc &pr); + int check_or_create_sysevents(struct Ndb_index_stat_proc &pr); + void drop_ndb(struct Ndb_index_stat_proc &pr); + int start_listener(struct Ndb_index_stat_proc &pr); + int create_ndb(struct Ndb_index_stat_proc &pr, + Ndb_cluster_connection *connection); + void stop_listener(struct Ndb_index_stat_proc &pr); }; /* free entries from share or at end */ -void ndb_index_stat_free(NDB_SHARE*, int iudex_id, int index_version); -void ndb_index_stat_free(NDB_SHARE*); +void ndb_index_stat_free(NDB_SHARE *, int iudex_id, int index_version); +void ndb_index_stat_free(NDB_SHARE *); void ndb_index_stat_end(); - /** show_ndb_status_index_stat @@ -85,14 +85,13 @@ void ndb_index_stat_end(); queries. Returns info about ndb index stat related status variables. */ -int show_ndb_status_index_stat(THD* thd, SHOW_VAR* var, - char* buff); +int show_ndb_status_index_stat(THD *thd, SHOW_VAR *var, char *buff); // Check and update functions for --ndb-index-stat-option= -int ndb_index_stat_option_check(THD*, SYS_VAR*, void* save, - struct st_mysql_value* value); -void ndb_index_stat_option_update(THD*, SYS_VAR*, void* var_ptr, - const void* save); +int ndb_index_stat_option_check(THD *, SYS_VAR *, void *save, + struct st_mysql_value *value); +void ndb_index_stat_option_update(THD *, SYS_VAR *, void *var_ptr, + const void *save); // Storage for --ndb-index-stat-option= extern char ndb_index_stat_option_buf[]; diff --git a/storage/ndb/plugin/ha_ndbcluster.cc b/storage/ndb/plugin/ha_ndbcluster.cc index 737ef7ef765b..0d11c03a3839 100644 --- a/storage/ndb/plugin/ha_ndbcluster.cc +++ b/storage/ndb/plugin/ha_ndbcluster.cc @@ -42,9 +42,9 @@ #include "mysql/psi/mysql_thread.h" #include "sql/abstract_query_plan.h" #include "sql/current_thd.h" -#include "sql/derror.h" // ER_THD -#include "sql/mysqld.h" // global_system_variables table_alias_charset ... -#include "sql/mysqld_thd_manager.h" // Global_THD_manager +#include "sql/derror.h" // ER_THD +#include "sql/mysqld.h" // global_system_variables table_alias_charset ... +#include "sql/mysqld_thd_manager.h" // Global_THD_manager #include "sql/partition_info.h" #include "sql/sql_alter.h" #include "sql/sql_lex.h" @@ -95,9 +95,9 @@ #include "template_utils.h" #ifndef DBUG_OFF -#include "sql/sql_test.h" // print_where +#include "sql/sql_test.h" // print_where #endif - // tablename_to_filename +// tablename_to_filename #include "sql/sql_class.h" #include "sql/sql_table.h" // build_table_filename, #include "storage/ndb/plugin/ndb_dd.h" @@ -116,22 +116,23 @@ typedef NdbDictionary::Dictionary NDBDICT; extern "C" void ndb_init_internal(Uint32); extern "C" void ndb_end_internal(Uint32); -static const int DEFAULT_PARALLELISM= 0; -static const ha_rows DEFAULT_AUTO_PREFETCH= 32; -static const ulong ONE_YEAR_IN_SECONDS= (ulong) 3600L*24L*365L; +static const int DEFAULT_PARALLELISM = 0; +static const ha_rows DEFAULT_AUTO_PREFETCH = 32; +static const ulong ONE_YEAR_IN_SECONDS = (ulong)3600L * 24L * 365L; -static constexpr unsigned MAX_BLOB_ROW_SIZE= 14000; -static constexpr unsigned DEFAULT_MAX_BLOB_PART_SIZE= MAX_BLOB_ROW_SIZE - 4*13; +static constexpr unsigned MAX_BLOB_ROW_SIZE = 14000; +static constexpr unsigned DEFAULT_MAX_BLOB_PART_SIZE = + MAX_BLOB_ROW_SIZE - 4 * 13; ulong opt_ndb_extra_logging; static ulong opt_ndb_wait_connected; static ulong opt_ndb_wait_setup; static uint opt_ndb_cluster_connection_pool; -static char* opt_connection_pool_nodeids_str; +static char *opt_connection_pool_nodeids_str; static uint opt_ndb_recv_thread_activation_threshold; -static char* opt_ndb_recv_thread_cpu_mask; -static char* opt_ndb_index_stat_option; -static char* opt_ndb_connectstring; +static char *opt_ndb_recv_thread_cpu_mask; +static char *opt_ndb_index_stat_option; +static char *opt_ndb_connectstring; static uint opt_ndb_nodeid; static bool opt_ndb_read_backup; static ulong opt_ndb_data_node_neighbour; @@ -141,297 +142,254 @@ static ulong opt_ndb_row_checksum; // The version where ndbcluster uses DYNAMIC by default when creating columns static ulong NDB_VERSION_DYNAMIC_IS_DEFAULT = 50711; enum ndb_default_colum_format_enum { - NDB_DEFAULT_COLUMN_FORMAT_FIXED= 0, - NDB_DEFAULT_COLUMN_FORMAT_DYNAMIC= 1 + NDB_DEFAULT_COLUMN_FORMAT_FIXED = 0, + NDB_DEFAULT_COLUMN_FORMAT_DYNAMIC = 1 }; -static const char* default_column_format_names[]= { "FIXED", "DYNAMIC", NullS }; +static const char *default_column_format_names[] = {"FIXED", "DYNAMIC", NullS}; static ulong opt_ndb_default_column_format; -static TYPELIB default_column_format_typelib= { - array_elements(default_column_format_names) - 1, - "", - default_column_format_names, - NULL -}; +static TYPELIB default_column_format_typelib = { + array_elements(default_column_format_names) - 1, "", + default_column_format_names, NULL}; static MYSQL_SYSVAR_ENUM( - default_column_format, /* name */ - opt_ndb_default_column_format, /* var */ - PLUGIN_VAR_RQCMDARG, - "Change COLUMN_FORMAT default value (fixed or dynamic) " - "for backward compatibility. Also affects the default value " - "of ROW_FORMAT.", - NULL, /* check func. */ - NULL, /* update func. */ - NDB_DEFAULT_COLUMN_FORMAT_FIXED, /* default */ - &default_column_format_typelib /* typelib */ + default_column_format, /* name */ + opt_ndb_default_column_format, /* var */ + PLUGIN_VAR_RQCMDARG, + "Change COLUMN_FORMAT default value (fixed or dynamic) " + "for backward compatibility. Also affects the default value " + "of ROW_FORMAT.", + NULL, /* check func. */ + NULL, /* update func. */ + NDB_DEFAULT_COLUMN_FORMAT_FIXED, /* default */ + &default_column_format_typelib /* typelib */ ); static MYSQL_THDVAR_UINT( - autoincrement_prefetch_sz, /* name */ - PLUGIN_VAR_RQCMDARG, - "Specify number of autoincrement values that are prefetched.", - NULL, /* check func. */ - NULL, /* update func. */ - 1, /* default */ - 1, /* min */ - 65535, /* max */ - 0 /* block */ + autoincrement_prefetch_sz, /* name */ + PLUGIN_VAR_RQCMDARG, + "Specify number of autoincrement values that are prefetched.", + NULL, /* check func. */ + NULL, /* update func. */ + 1, /* default */ + 1, /* min */ + 65535, /* max */ + 0 /* block */ ); - static MYSQL_THDVAR_BOOL( - force_send, /* name */ - PLUGIN_VAR_OPCMDARG, - "Force send of buffers to ndb immediately without waiting for " - "other threads.", - NULL, /* check func. */ - NULL, /* update func. */ - 1 /* default */ + force_send, /* name */ + PLUGIN_VAR_OPCMDARG, + "Force send of buffers to ndb immediately without waiting for " + "other threads.", + NULL, /* check func. */ + NULL, /* update func. */ + 1 /* default */ ); - static MYSQL_THDVAR_BOOL( - use_exact_count, /* name */ - PLUGIN_VAR_OPCMDARG, - "Use exact records count during query planning and for fast " - "select count(*), disable for faster queries.", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ + use_exact_count, /* name */ + PLUGIN_VAR_OPCMDARG, + "Use exact records count during query planning and for fast " + "select count(*), disable for faster queries.", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); - static MYSQL_THDVAR_BOOL( - use_transactions, /* name */ - PLUGIN_VAR_OPCMDARG, - "Use transactions for large inserts, if enabled then large " - "inserts will be split into several smaller transactions", - NULL, /* check func. */ - NULL, /* update func. */ - 1 /* default */ + use_transactions, /* name */ + PLUGIN_VAR_OPCMDARG, + "Use transactions for large inserts, if enabled then large " + "inserts will be split into several smaller transactions", + NULL, /* check func. */ + NULL, /* update func. */ + 1 /* default */ ); - static MYSQL_THDVAR_BOOL( - use_copying_alter_table, /* name */ - PLUGIN_VAR_OPCMDARG, - "Force ndbcluster to always copy tables at alter table (should " - "only be used if online alter table fails).", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ + use_copying_alter_table, /* name */ + PLUGIN_VAR_OPCMDARG, + "Force ndbcluster to always copy tables at alter table (should " + "only be used if online alter table fails).", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); - static MYSQL_THDVAR_BOOL( - allow_copying_alter_table, /* name */ - PLUGIN_VAR_OPCMDARG, - "Specifies if implicit copying alter table is allowed. Can be overridden " - "by using ALGORITHM=COPY in the alter table command.", - NULL, /* check func. */ - NULL, /* update func. */ - 1 /* default */ + allow_copying_alter_table, /* name */ + PLUGIN_VAR_OPCMDARG, + "Specifies if implicit copying alter table is allowed. Can be overridden " + "by using ALGORITHM=COPY in the alter table command.", + NULL, /* check func. */ + NULL, /* update func. */ + 1 /* default */ ); - -static MYSQL_THDVAR_UINT( - optimized_node_selection, /* name */ - PLUGIN_VAR_OPCMDARG, - "Select nodes for transactions in a more optimal way.", - NULL, /* check func. */ - NULL, /* update func. */ - 3, /* default */ - 0, /* min */ - 3, /* max */ - 0 /* block */ +static MYSQL_THDVAR_UINT(optimized_node_selection, /* name */ + PLUGIN_VAR_OPCMDARG, + "Select nodes for transactions in a more optimal way.", + NULL, /* check func. */ + NULL, /* update func. */ + 3, /* default */ + 0, /* min */ + 3, /* max */ + 0 /* block */ ); - -static MYSQL_THDVAR_ULONG( - batch_size, /* name */ - PLUGIN_VAR_RQCMDARG, - "Batch size in bytes.", - NULL, /* check func. */ - NULL, /* update func. */ - 32768, /* default */ - 0, /* min */ - ONE_YEAR_IN_SECONDS, /* max */ - 0 /* block */ +static MYSQL_THDVAR_ULONG(batch_size, /* name */ + PLUGIN_VAR_RQCMDARG, "Batch size in bytes.", + NULL, /* check func. */ + NULL, /* update func. */ + 32768, /* default */ + 0, /* min */ + ONE_YEAR_IN_SECONDS, /* max */ + 0 /* block */ ); - static MYSQL_THDVAR_ULONG( - optimization_delay, /* name */ - PLUGIN_VAR_RQCMDARG, - "For optimize table, specifies the delay in milliseconds " - "for each batch of rows sent.", - NULL, /* check func. */ - NULL, /* update func. */ - 10, /* default */ - 0, /* min */ - 100000, /* max */ - 0 /* block */ + optimization_delay, /* name */ + PLUGIN_VAR_RQCMDARG, + "For optimize table, specifies the delay in milliseconds " + "for each batch of rows sent.", + NULL, /* check func. */ + NULL, /* update func. */ + 10, /* default */ + 0, /* min */ + 100000, /* max */ + 0 /* block */ ); - -static MYSQL_THDVAR_BOOL( - index_stat_enable, /* name */ - PLUGIN_VAR_OPCMDARG, - "Use ndb index statistics in query optimization.", - NULL, /* check func. */ - NULL, /* update func. */ - true /* default */ +static MYSQL_THDVAR_BOOL(index_stat_enable, /* name */ + PLUGIN_VAR_OPCMDARG, + "Use ndb index statistics in query optimization.", + NULL, /* check func. */ + NULL, /* update func. */ + true /* default */ ); - -static MYSQL_THDVAR_BOOL( - table_no_logging, /* name */ - PLUGIN_VAR_NOCMDARG, - "", - NULL, /* check func. */ - NULL, /* update func. */ - false /* default */ +static MYSQL_THDVAR_BOOL(table_no_logging, /* name */ + PLUGIN_VAR_NOCMDARG, "", NULL, /* check func. */ + NULL, /* update func. */ + false /* default */ ); - -static MYSQL_THDVAR_BOOL( - table_temporary, /* name */ - PLUGIN_VAR_NOCMDARG, - "", - NULL, /* check func. */ - NULL, /* update func. */ - false /* default */ +static MYSQL_THDVAR_BOOL(table_temporary, /* name */ + PLUGIN_VAR_NOCMDARG, "", NULL, /* check func. */ + NULL, /* update func. */ + false /* default */ ); -static MYSQL_THDVAR_UINT( - blob_read_batch_bytes, /* name */ - PLUGIN_VAR_RQCMDARG, - "Specifies the bytesize large Blob reads " - "should be batched into. 0 == No limit.", - NULL, /* check func */ - NULL, /* update func */ - 65536, /* default */ - 0, /* min */ - UINT_MAX, /* max */ - 0 /* block */ +static MYSQL_THDVAR_UINT(blob_read_batch_bytes, /* name */ + PLUGIN_VAR_RQCMDARG, + "Specifies the bytesize large Blob reads " + "should be batched into. 0 == No limit.", + NULL, /* check func */ + NULL, /* update func */ + 65536, /* default */ + 0, /* min */ + UINT_MAX, /* max */ + 0 /* block */ ); -static MYSQL_THDVAR_UINT( - blob_write_batch_bytes, /* name */ - PLUGIN_VAR_RQCMDARG, - "Specifies the bytesize large Blob writes " - "should be batched into. 0 == No limit.", - NULL, /* check func */ - NULL, /* update func */ - 65536, /* default */ - 0, /* min */ - UINT_MAX, /* max */ - 0 /* block */ +static MYSQL_THDVAR_UINT(blob_write_batch_bytes, /* name */ + PLUGIN_VAR_RQCMDARG, + "Specifies the bytesize large Blob writes " + "should be batched into. 0 == No limit.", + NULL, /* check func */ + NULL, /* update func */ + 65536, /* default */ + 0, /* min */ + UINT_MAX, /* max */ + 0 /* block */ ); static MYSQL_THDVAR_UINT( - deferred_constraints, /* name */ - PLUGIN_VAR_RQCMDARG, - "Specified that constraints should be checked deferred (when supported)", - NULL, /* check func */ - NULL, /* update func */ - 0, /* default */ - 0, /* min */ - 1, /* max */ - 0 /* block */ + deferred_constraints, /* name */ + PLUGIN_VAR_RQCMDARG, + "Specified that constraints should be checked deferred (when supported)", + NULL, /* check func */ + NULL, /* update func */ + 0, /* default */ + 0, /* min */ + 1, /* max */ + 0 /* block */ ); static MYSQL_THDVAR_BOOL( - show_foreign_key_mock_tables, /* name */ - PLUGIN_VAR_OPCMDARG, - "Show the mock tables which is used to support foreign_key_checks= 0. " - "Extra info warnings are shown when creating and dropping the tables. " - "The real table name is show in SHOW CREATE TABLE", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ + show_foreign_key_mock_tables, /* name */ + PLUGIN_VAR_OPCMDARG, + "Show the mock tables which is used to support foreign_key_checks= 0. " + "Extra info warnings are shown when creating and dropping the tables. " + "The real table name is show in SHOW CREATE TABLE", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); -static MYSQL_THDVAR_BOOL( - join_pushdown, /* name */ - PLUGIN_VAR_OPCMDARG, - "Enable pushing down of join to datanodes", - NULL, /* check func. */ - NULL, /* update func. */ - true /* default */ +static MYSQL_THDVAR_BOOL(join_pushdown, /* name */ + PLUGIN_VAR_OPCMDARG, + "Enable pushing down of join to datanodes", + NULL, /* check func. */ + NULL, /* update func. */ + true /* default */ ); -static MYSQL_THDVAR_BOOL( - log_exclusive_reads, /* name */ - PLUGIN_VAR_OPCMDARG, - "Log primary key reads with exclusive locks " - "to allow conflict resolution based on read conflicts", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ +static MYSQL_THDVAR_BOOL(log_exclusive_reads, /* name */ + PLUGIN_VAR_OPCMDARG, + "Log primary key reads with exclusive locks " + "to allow conflict resolution based on read conflicts", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); - /* Required in index_stat.cc but available only from here thanks to use of top level anonymous structs. */ -bool ndb_index_stat_get_enable(THD *thd) -{ +bool ndb_index_stat_get_enable(THD *thd) { const bool value = THDVAR(thd, index_stat_enable); return value; } -bool ndb_show_foreign_key_mock_tables(THD* thd) -{ +bool ndb_show_foreign_key_mock_tables(THD *thd) { const bool value = THDVAR(thd, show_foreign_key_mock_tables); return value; } static int ndbcluster_end(handlerton *, ha_panic_function); -static bool ndbcluster_show_status(handlerton *, THD*, - stat_print_fn *, +static bool ndbcluster_show_status(handlerton *, THD *, stat_print_fn *, enum ha_stat_type); -static int -ndbcluster_make_pushed_join(handlerton *, THD*, const AQP::Join_plan*); +static int ndbcluster_make_pushed_join(handlerton *, THD *, + const AQP::Join_plan *); -static int ndbcluster_get_tablespace(THD* thd, - LEX_CSTRING db_name, +static int ndbcluster_get_tablespace(THD *thd, LEX_CSTRING db_name, LEX_CSTRING table_name, LEX_CSTRING *tablespace_name); -static int ndbcluster_alter_tablespace(handlerton*, THD* thd, - st_alter_tablespace* info, - const dd::Tablespace*, - dd::Tablespace*); -static bool -ndbcluster_get_tablespace_statistics(const char *tablespace_name, - const char *file_name, - const dd::Properties &ts_se_private_data, - ha_tablespace_statistics *stats); +static int ndbcluster_alter_tablespace(handlerton *, THD *thd, + st_alter_tablespace *info, + const dd::Tablespace *, + dd::Tablespace *); +static bool ndbcluster_get_tablespace_statistics( + const char *tablespace_name, const char *file_name, + const dd::Properties &ts_se_private_data, ha_tablespace_statistics *stats); static void ndbcluster_pre_dd_shutdown(handlerton *); - static handler *ndbcluster_create_handler(handlerton *hton, TABLE_SHARE *table, bool /* partitioned */, - MEM_ROOT *mem_root) -{ + MEM_ROOT *mem_root) { return new (mem_root) ha_ndbcluster(hton, table); } -static uint -ndbcluster_partition_flags() -{ - return (HA_CAN_UPDATE_PARTITION_KEY | - HA_CAN_PARTITION_UNIQUE | HA_USE_AUTO_PARTITION); +static uint ndbcluster_partition_flags() { + return (HA_CAN_UPDATE_PARTITION_KEY | HA_CAN_PARTITION_UNIQUE | + HA_USE_AUTO_PARTITION); } -uint ha_ndbcluster::alter_flags(uint flags) const -{ - const uint f= - HA_PARTITION_FUNCTION_SUPPORTED | - 0; +uint ha_ndbcluster::alter_flags(uint flags) const { + const uint f = HA_PARTITION_FUNCTION_SUPPORTED | 0; - if (flags & Alter_info::ALTER_DROP_PARTITION) - return 0; + if (flags & Alter_info::ALTER_DROP_PARTITION) return 0; return f; } @@ -442,51 +400,50 @@ static constexpr uint NDB_AUTO_INCREMENT_RETRIES = 100; #define ERR_PRINT(err) \ DBUG_PRINT("error", ("%d message: %s", err.code, err.message)) -#define ERR_RETURN(err) \ -{ \ - const NdbError& tmp= err; \ - DBUG_RETURN(ndb_to_mysql_error(&tmp)); \ -} +#define ERR_RETURN(err) \ + { \ + const NdbError &tmp = err; \ + DBUG_RETURN(ndb_to_mysql_error(&tmp)); \ + } -#define ERR_BREAK(err, code) \ -{ \ - const NdbError& tmp= err; \ - code= ndb_to_mysql_error(&tmp); \ - break; \ -} +#define ERR_BREAK(err, code) \ + { \ + const NdbError &tmp = err; \ + code = ndb_to_mysql_error(&tmp); \ + break; \ + } -#define ERR_SET(err, code) \ -{ \ - const NdbError& tmp= err; \ - code= ndb_to_mysql_error(&tmp); \ -} +#define ERR_SET(err, code) \ + { \ + const NdbError &tmp = err; \ + code = ndb_to_mysql_error(&tmp); \ + } -static int ndbcluster_inited= 0; +static int ndbcluster_inited = 0; -/* +/* Indicator used to delay client and slave connections until Ndb has Binlog setup (bug#46955) */ -int ndb_setup_complete= 0; // Use ndbcluster_mutex & ndbcluster_cond -extern Ndb* g_ndb; +int ndb_setup_complete = 0; // Use ndbcluster_mutex & ndbcluster_cond +extern Ndb *g_ndb; extern Ndb_cluster_connection *g_ndb_cluster_connection; /// Handler synchronization mysql_mutex_t ndbcluster_mutex; -mysql_cond_t ndbcluster_cond; +mysql_cond_t ndbcluster_cond; -static const char* ndbcluster_hton_name = "ndbcluster"; -static const int ndbcluster_hton_name_length = sizeof(ndbcluster_hton_name)-1; +static const char *ndbcluster_hton_name = "ndbcluster"; +static const int ndbcluster_hton_name_length = sizeof(ndbcluster_hton_name) - 1; static void modify_shared_stats(NDB_SHARE *share, Ndb_local_table_statistics *local_stat); -static int ndb_get_table_statistics(THD *thd, ha_ndbcluster*, Ndb*, - const NdbDictionary::Table*, - const NdbRecord *, - struct Ndb_statistics *, - uint part_id= ~(uint)0); +static int ndb_get_table_statistics(THD *thd, ha_ndbcluster *, Ndb *, + const NdbDictionary::Table *, + const NdbRecord *, struct Ndb_statistics *, + uint part_id = ~(uint)0); static ulong multi_range_fixed_size(int num_ranges); @@ -509,54 +466,46 @@ static long long g_server_api_client_stats[Ndb::NumClientStatistics]; from their own Ndb object before showing the values. @param ndb The ndb object */ -void -update_slave_api_stats(const Ndb* ndb) -{ +void update_slave_api_stats(const Ndb *ndb) { // Should only be called by the slave (applier) thread DBUG_ASSERT(current_thd->slave_thread); - for (Uint32 i=0; i < Ndb::NumClientStatistics; i++) - { + for (Uint32 i = 0; i < Ndb::NumClientStatistics; i++) { g_slave_api_client_stats[i] = ndb->getClientStat(i); } } st_ndb_slave_state g_ndb_slave_state; -static int check_slave_config() -{ +static int check_slave_config() { DBUG_ENTER("check_slave_config"); - if (ndb_get_number_of_channels() > 1) - { - ndb_log_error("NDB Slave: Configuration with number of replication " - "masters = %u is not supported when applying to NDB", - ndb_get_number_of_channels()); + if (ndb_get_number_of_channels() > 1) { + ndb_log_error( + "NDB Slave: Configuration with number of replication " + "masters = %u is not supported when applying to NDB", + ndb_get_number_of_channels()); DBUG_RETURN(HA_ERR_UNSUPPORTED); } - if (ndb_mi_get_slave_parallel_workers() > 0) - { - ndb_log_error("NDB Slave: Configuration 'slave_parallel_workers = %lu' is " - "not supported when applying to NDB", - ndb_mi_get_slave_parallel_workers()); + if (ndb_mi_get_slave_parallel_workers() > 0) { + ndb_log_error( + "NDB Slave: Configuration 'slave_parallel_workers = %lu' is " + "not supported when applying to NDB", + ndb_mi_get_slave_parallel_workers()); DBUG_RETURN(HA_ERR_UNSUPPORTED); } DBUG_RETURN(0); } -static int check_slave_state(THD* thd) -{ +static int check_slave_state(THD *thd) { DBUG_ENTER("check_slave_state"); - if (!thd->slave_thread) - DBUG_RETURN(0); + if (!thd->slave_thread) DBUG_RETURN(0); const Uint32 runId = ndb_mi_get_slave_run_id(); - DBUG_PRINT("info", ("Slave SQL thread run id is %u", - runId)); - if (unlikely(runId != g_ndb_slave_state.sql_run_id)) - { + DBUG_PRINT("info", ("Slave SQL thread run id is %u", runId)); + if (unlikely(runId != g_ndb_slave_state.sql_run_id)) { DBUG_PRINT("info", ("Slave run id changed from %u, " "treating as Slave restart", g_ndb_slave_state.sql_run_id)); @@ -565,8 +514,7 @@ static int check_slave_state(THD* thd) * Check that the slave configuration is supported */ int error = check_slave_config(); - if (unlikely(error)) - DBUG_RETURN(error); + if (unlikely(error)) DBUG_RETURN(error); g_ndb_slave_state.sql_run_id = runId; @@ -584,82 +532,72 @@ static int check_slave_state(THD* thd) DBUG_PRINT("info", ("Loading applied epoch information")); NdbError ndb_error; Uint64 highestAppliedEpoch = 0; - do - { - Ndb* ndb= check_ndb_in_thd(thd); - NDBDICT* dict= ndb->getDictionary(); - NdbTransaction* trans= NULL; + do { + Ndb *ndb = check_ndb_in_thd(thd); + NDBDICT *dict = ndb->getDictionary(); + NdbTransaction *trans = NULL; ndb->setDatabaseName(Ndb_apply_status_table::DB_NAME.c_str()); Ndb_table_guard ndbtab_g(dict, Ndb_apply_status_table::TABLE_NAME.c_str()); - const NDBTAB* ndbtab= ndbtab_g.get_table(); - if (unlikely(ndbtab == NULL)) - { + const NDBTAB *ndbtab = ndbtab_g.get_table(); + if (unlikely(ndbtab == NULL)) { ndb_error = dict->getNdbError(); break; } - trans= ndb->startTransaction(); - if (unlikely(trans == NULL)) - { + trans = ndb->startTransaction(); + if (unlikely(trans == NULL)) { ndb_error = ndb->getNdbError(); break; } - do - { - NdbScanOperation* sop = trans->getNdbScanOperation(ndbtab); - if (unlikely(sop == NULL)) - { + do { + NdbScanOperation *sop = trans->getNdbScanOperation(ndbtab); + if (unlikely(sop == NULL)) { ndb_error = trans->getNdbError(); break; } const Uint32 server_id_col_num = 0; const Uint32 epoch_col_num = 1; - NdbRecAttr* server_id_ra = 0; - NdbRecAttr* epoch_ra = 0; + NdbRecAttr *server_id_ra = 0; + NdbRecAttr *epoch_ra = 0; - if (unlikely((sop->readTuples(NdbOperation::LM_CommittedRead) != 0) || - ((server_id_ra = sop->getValue(server_id_col_num)) == NULL) || - ((epoch_ra = sop->getValue(epoch_col_num)) == NULL))) - { + if (unlikely( + (sop->readTuples(NdbOperation::LM_CommittedRead) != 0) || + ((server_id_ra = sop->getValue(server_id_col_num)) == NULL) || + ((epoch_ra = sop->getValue(epoch_col_num)) == NULL))) { ndb_error = sop->getNdbError(); break; } - if (trans->execute(NdbTransaction::Commit)) - { + if (trans->execute(NdbTransaction::Commit)) { ndb_error = trans->getNdbError(); break; } int rc = 0; - while (0 == (rc= sop->nextResult(true))) - { + while (0 == (rc = sop->nextResult(true))) { Uint32 serverid = server_id_ra->u_32_value(); Uint64 epoch = epoch_ra->u_64_value(); if ((serverid == ::server_id) || - (ndb_mi_get_ignore_server_id(serverid))) - { + (ndb_mi_get_ignore_server_id(serverid))) { highestAppliedEpoch = MAX(epoch, highestAppliedEpoch); } } - if (rc != 1) - { + if (rc != 1) { ndb_error = sop->getNdbError(); break; } } while (0); trans->close(); - } while(0); + } while (0); - if (ndb_error.code != 0) - { + if (ndb_error.code != 0) { ndb_log_warning( "NDB Slave: Could not determine maximum replicated " "epoch from '%s.%s' at Slave start, error %u %s", @@ -674,186 +612,185 @@ static int check_slave_state(THD* thd) If none was found, this will be zero. */ g_ndb_slave_state.max_rep_epoch = highestAppliedEpoch; - ndb_log_info("NDB Slave: MaxReplicatedEpoch set to %llu (%u/%u) at " - "Slave start", - g_ndb_slave_state.max_rep_epoch, - (Uint32)(g_ndb_slave_state.max_rep_epoch >> 32), - (Uint32)(g_ndb_slave_state.max_rep_epoch & 0xffffffff)); - } // Load highest replicated epoch - } // New Slave SQL thread run id + ndb_log_info( + "NDB Slave: MaxReplicatedEpoch set to %llu (%u/%u) at " + "Slave start", + g_ndb_slave_state.max_rep_epoch, + (Uint32)(g_ndb_slave_state.max_rep_epoch >> 32), + (Uint32)(g_ndb_slave_state.max_rep_epoch & 0xffffffff)); + } // Load highest replicated epoch + } // New Slave SQL thread run id DBUG_RETURN(0); } - -static int update_status_variables(Thd_ndb *thd_ndb, - st_ndb_status *ns, - Ndb_cluster_connection *c) -{ - ns->connected_port= c->get_connected_port(); - ns->connected_host= c->get_connected_host(); - if (ns->cluster_node_id != (int) c->node_id()) - { - ns->cluster_node_id= c->node_id(); +static int update_status_variables(Thd_ndb *thd_ndb, st_ndb_status *ns, + Ndb_cluster_connection *c) { + ns->connected_port = c->get_connected_port(); + ns->connected_host = c->get_connected_host(); + if (ns->cluster_node_id != (int)c->node_id()) { + ns->cluster_node_id = c->node_id(); if (&g_ndb_status == ns && g_ndb_cluster_connection == c) ndb_log_info("NodeID is %lu, management server '%s:%lu'", - ns->cluster_node_id, ns->connected_host, - ns->connected_port); + ns->cluster_node_id, ns->connected_host, ns->connected_port); } { - int n= c->get_no_ready(); - ns->number_of_ready_data_nodes= n > 0 ? n : 0; + int n = c->get_no_ready(); + ns->number_of_ready_data_nodes = n > 0 ? n : 0; } - ns->number_of_data_nodes= c->no_db_nodes(); - ns->connect_count= c->get_connect_count(); + ns->number_of_data_nodes = c->no_db_nodes(); + ns->connect_count = c->get_connect_count(); ns->system_name = c->get_system_name(); - ns->last_commit_epoch_server= ndb_get_latest_trans_gci(); - if (thd_ndb) - { - ns->execute_count= thd_ndb->m_execute_count; + ns->last_commit_epoch_server = ndb_get_latest_trans_gci(); + if (thd_ndb) { + ns->execute_count = thd_ndb->m_execute_count; ns->trans_hint_count = thd_ndb->hinted_trans_count(); - ns->scan_count= thd_ndb->m_scan_count; - ns->pruned_scan_count= thd_ndb->m_pruned_scan_count; - ns->sorted_scan_count= thd_ndb->m_sorted_scan_count; - ns->pushed_queries_defined= thd_ndb->m_pushed_queries_defined; - ns->pushed_queries_dropped= thd_ndb->m_pushed_queries_dropped; - ns->pushed_queries_executed= thd_ndb->m_pushed_queries_executed; - ns->pushed_reads= thd_ndb->m_pushed_reads; + ns->scan_count = thd_ndb->m_scan_count; + ns->pruned_scan_count = thd_ndb->m_pruned_scan_count; + ns->sorted_scan_count = thd_ndb->m_sorted_scan_count; + ns->pushed_queries_defined = thd_ndb->m_pushed_queries_defined; + ns->pushed_queries_dropped = thd_ndb->m_pushed_queries_dropped; + ns->pushed_queries_executed = thd_ndb->m_pushed_queries_executed; + ns->pushed_reads = thd_ndb->m_pushed_reads; ns->last_commit_epoch_session = thd_ndb->m_last_commit_epoch_session; - for (int i=0; i < Ndb::NumClientStatistics; i++) - { + for (int i = 0; i < Ndb::NumClientStatistics; i++) { ns->api_client_stats[i] = thd_ndb->ndb->getClientStat(i); } - ns->schema_locks_count= thd_ndb->schema_locks_count; + ns->schema_locks_count = thd_ndb->schema_locks_count; } return 0; } /* Helper macro for definitions of NdbApi status variables */ -#define NDBAPI_COUNTERS(NAME_SUFFIX, ARRAY_LOCATION) \ - {"api_wait_exec_complete_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::WaitExecCompleteCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_wait_scan_result_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::WaitScanResultCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_wait_meta_request_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::WaitMetaRequestCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_wait_nanos_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::WaitNanosCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_bytes_sent_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::BytesSentCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_bytes_received_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::BytesRecvdCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_trans_start_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::TransStartCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_trans_commit_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::TransCommitCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_trans_abort_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::TransAbortCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_trans_close_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::TransCloseCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_pk_op_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::PkOpCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_uk_op_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::UkOpCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_table_scan_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::TableScanCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_range_scan_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::RangeScanCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_pruned_scan_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::PrunedScanCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_scan_batch_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::ScanBatchCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_read_row_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::ReadRowCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_trans_local_read_row_count" NAME_SUFFIX, \ - (char*) ARRAY_LOCATION[ Ndb::TransLocalReadRowCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_adaptive_send_forced_count" NAME_SUFFIX, \ - (char *) ARRAY_LOCATION[ Ndb::ForcedSendsCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_adaptive_send_unforced_count" NAME_SUFFIX, \ - (char *) ARRAY_LOCATION[ Ndb::UnforcedSendsCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ - {"api_adaptive_send_deferred_count" NAME_SUFFIX, \ - (char *) ARRAY_LOCATION[ Ndb::DeferredSendsCount ], \ - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL} - - -static SHOW_VAR ndb_status_vars_dynamic[]= -{ - {"cluster_node_id", (char*) &g_ndb_status.cluster_node_id, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"config_from_host", (char*) &g_ndb_status.connected_host, SHOW_CHAR_PTR, SHOW_SCOPE_GLOBAL}, - {"config_from_port", (char*) &g_ndb_status.connected_port, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"number_of_data_nodes",(char*) &g_ndb_status.number_of_data_nodes, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"number_of_ready_data_nodes", - (char*) &g_ndb_status.number_of_ready_data_nodes, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"connect_count", (char*) &g_ndb_status.connect_count, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"execute_count", (char*) &g_ndb_status.execute_count, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"scan_count", (char*) &g_ndb_status.scan_count, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"pruned_scan_count", (char*) &g_ndb_status.pruned_scan_count, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"schema_locks_count", (char*) &g_ndb_status.schema_locks_count, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - NDBAPI_COUNTERS("_session", &g_ndb_status.api_client_stats), - {"trans_hint_count_session", - reinterpret_cast(&g_ndb_status.trans_hint_count), SHOW_LONG, - SHOW_SCOPE_GLOBAL}, - {"sorted_scan_count", (char*) &g_ndb_status.sorted_scan_count, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"pushed_queries_defined", (char*) &g_ndb_status.pushed_queries_defined, - SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"pushed_queries_dropped", (char*) &g_ndb_status.pushed_queries_dropped, - SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"pushed_queries_executed", (char*) &g_ndb_status.pushed_queries_executed, - SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"pushed_reads", (char*) &g_ndb_status.pushed_reads, SHOW_LONG, SHOW_SCOPE_GLOBAL}, - {"last_commit_epoch_server", - (char*) &g_ndb_status.last_commit_epoch_server, - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"last_commit_epoch_session", - (char*) &g_ndb_status.last_commit_epoch_session, - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"system_name", (char*) &g_ndb_status.system_name, SHOW_CHAR_PTR, SHOW_SCOPE_GLOBAL}, - {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL} -}; - -static SHOW_VAR ndb_status_vars_slave[]= -{ - NDBAPI_COUNTERS("_slave", &g_slave_api_client_stats), - {"slave_max_replicated_epoch", (char*) &g_ndb_slave_state.max_rep_epoch, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL} -}; - -static SHOW_VAR ndb_status_vars_server_api[]= -{ - NDBAPI_COUNTERS("", &g_server_api_client_stats), - {"api_event_data_count", - (char*) &g_server_api_client_stats[ Ndb::DataEventsRecvdCount ], - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"api_event_nondata_count", - (char*) &g_server_api_client_stats[ Ndb::NonDataEventsRecvdCount ], - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"api_event_bytes_count", - (char*) &g_server_api_client_stats[ Ndb::EventBytesRecvdCount ], - SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL} -}; - +#define NDBAPI_COUNTERS(NAME_SUFFIX, ARRAY_LOCATION) \ + {"api_wait_exec_complete_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::WaitExecCompleteCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_wait_scan_result_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::WaitScanResultCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_wait_meta_request_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::WaitMetaRequestCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_wait_nanos_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::WaitNanosCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_bytes_sent_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::BytesSentCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_bytes_received_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::BytesRecvdCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_trans_start_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::TransStartCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_trans_commit_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::TransCommitCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_trans_abort_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::TransAbortCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_trans_close_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::TransCloseCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_pk_op_count" NAME_SUFFIX, (char *)ARRAY_LOCATION[Ndb::PkOpCount], \ + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ + {"api_uk_op_count" NAME_SUFFIX, (char *)ARRAY_LOCATION[Ndb::UkOpCount], \ + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, \ + {"api_table_scan_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::TableScanCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_range_scan_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::RangeScanCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_pruned_scan_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::PrunedScanCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_scan_batch_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::ScanBatchCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_read_row_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::ReadRowCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_trans_local_read_row_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::TransLocalReadRowCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_adaptive_send_forced_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::ForcedSendsCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + {"api_adaptive_send_unforced_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::UnforcedSendsCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL}, \ + { \ + "api_adaptive_send_deferred_count" NAME_SUFFIX, \ + (char *)ARRAY_LOCATION[Ndb::DeferredSendsCount], SHOW_LONGLONG, \ + SHOW_SCOPE_GLOBAL \ + } + +static SHOW_VAR ndb_status_vars_dynamic[] = { + {"cluster_node_id", (char *)&g_ndb_status.cluster_node_id, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {"config_from_host", (char *)&g_ndb_status.connected_host, SHOW_CHAR_PTR, + SHOW_SCOPE_GLOBAL}, + {"config_from_port", (char *)&g_ndb_status.connected_port, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {"number_of_data_nodes", (char *)&g_ndb_status.number_of_data_nodes, + SHOW_LONG, SHOW_SCOPE_GLOBAL}, + {"number_of_ready_data_nodes", + (char *)&g_ndb_status.number_of_ready_data_nodes, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {"connect_count", (char *)&g_ndb_status.connect_count, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {"execute_count", (char *)&g_ndb_status.execute_count, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {"scan_count", (char *)&g_ndb_status.scan_count, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {"pruned_scan_count", (char *)&g_ndb_status.pruned_scan_count, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {"schema_locks_count", (char *)&g_ndb_status.schema_locks_count, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + NDBAPI_COUNTERS("_session", &g_ndb_status.api_client_stats), + {"trans_hint_count_session", + reinterpret_cast(&g_ndb_status.trans_hint_count), SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {"sorted_scan_count", (char *)&g_ndb_status.sorted_scan_count, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {"pushed_queries_defined", (char *)&g_ndb_status.pushed_queries_defined, + SHOW_LONG, SHOW_SCOPE_GLOBAL}, + {"pushed_queries_dropped", (char *)&g_ndb_status.pushed_queries_dropped, + SHOW_LONG, SHOW_SCOPE_GLOBAL}, + {"pushed_queries_executed", (char *)&g_ndb_status.pushed_queries_executed, + SHOW_LONG, SHOW_SCOPE_GLOBAL}, + {"pushed_reads", (char *)&g_ndb_status.pushed_reads, SHOW_LONG, + SHOW_SCOPE_GLOBAL}, + {"last_commit_epoch_server", (char *)&g_ndb_status.last_commit_epoch_server, + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"last_commit_epoch_session", + (char *)&g_ndb_status.last_commit_epoch_session, SHOW_LONGLONG, + SHOW_SCOPE_GLOBAL}, + {"system_name", (char *)&g_ndb_status.system_name, SHOW_CHAR_PTR, + SHOW_SCOPE_GLOBAL}, + {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL}}; + +static SHOW_VAR ndb_status_vars_slave[] = { + NDBAPI_COUNTERS("_slave", &g_slave_api_client_stats), + {"slave_max_replicated_epoch", (char *)&g_ndb_slave_state.max_rep_epoch, + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL}}; + +static SHOW_VAR ndb_status_vars_server_api[] = { + NDBAPI_COUNTERS("", &g_server_api_client_stats), + {"api_event_data_count", + (char *)&g_server_api_client_stats[Ndb::DataEventsRecvdCount], + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"api_event_nondata_count", + (char *)&g_server_api_client_stats[Ndb::NonDataEventsRecvdCount], + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"api_event_bytes_count", + (char *)&g_server_api_client_stats[Ndb::EventBytesRecvdCount], + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL}}; /* Called when SHOW STATUS or performance_schema.[global|session]_status @@ -863,50 +800,45 @@ static SHOW_VAR ndb_status_vars_server_api[]= the updated globals */ -static -int show_ndb_status_server_api(THD*, SHOW_VAR *var, char*) -{ - ndb_get_connection_stats((Uint64*) &g_server_api_client_stats[0]); +static int show_ndb_status_server_api(THD *, SHOW_VAR *var, char *) { + ndb_get_connection_stats((Uint64 *)&g_server_api_client_stats[0]); - var->type= SHOW_ARRAY; - var->value= (char*) ndb_status_vars_server_api; - var->scope= SHOW_SCOPE_GLOBAL; + var->type = SHOW_ARRAY; + var->value = (char *)ndb_status_vars_server_api; + var->scope = SHOW_SCOPE_GLOBAL; return 0; } - /* Error handling functions */ /* Note for merge: old mapping table, moved to storage/ndb/ndberror.c */ -int ndb_to_mysql_error(const NdbError *ndberr) -{ +int ndb_to_mysql_error(const NdbError *ndberr) { /* read the mysql mapped error code */ - int error= ndberr->mysql_code; + int error = ndberr->mysql_code; - switch (error) - { - /* errors for which we do not add warnings, just return mapped error code - */ - case HA_ERR_NO_SUCH_TABLE: - case HA_ERR_KEY_NOT_FOUND: - return error; - - /* Mapping missing, go with the ndb error code */ - case -1: - case 0: - /* Never map to errors below HA_ERR_FIRST */ - if (ndberr->code < HA_ERR_FIRST) - error= HA_ERR_INTERNAL_ERROR; - else - error= ndberr->code; - break; - /* Mapping exists, go with the mapped code */ - default: - break; + switch (error) { + /* errors for which we do not add warnings, just return mapped error code + */ + case HA_ERR_NO_SUCH_TABLE: + case HA_ERR_KEY_NOT_FOUND: + return error; + + /* Mapping missing, go with the ndb error code */ + case -1: + case 0: + /* Never map to errors below HA_ERR_FIRST */ + if (ndberr->code < HA_ERR_FIRST) + error = HA_ERR_INTERNAL_ERROR; + else + error = ndberr->code; + break; + /* Mapping exists, go with the mapped code */ + default: + break; } { @@ -922,49 +854,35 @@ int ndb_to_mysql_error(const NdbError *ndberr) ER_THD(current_thd, ER_GET_TEMPORARY_ERRMSG), ndberr->code, ndberr->message, "NDB"); else - push_warning_printf(current_thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, ER_THD(current_thd, ER_GET_ERRMSG), - ndberr->code, ndberr->message, "NDB"); + push_warning_printf(current_thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(current_thd, ER_GET_ERRMSG), ndberr->code, + ndberr->message, "NDB"); } return error; } ulong opt_ndb_slave_conflict_role; -static int -handle_conflict_op_error(NdbTransaction* trans, - const NdbError& err, - const NdbOperation* op); - -static int -handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share, - const char* tab_name, - const char* handling_type, - const NdbRecord* key_rec, - const NdbRecord* data_rec, - const uchar* old_row, - const uchar* new_row, - enum_conflicting_op_type op_type, - enum_conflict_cause conflict_cause, - const NdbError& conflict_error, - NdbTransaction* conflict_trans, - const MY_BITMAP *write_set, - Uint64 transaction_id); +static int handle_conflict_op_error(NdbTransaction *trans, const NdbError &err, + const NdbOperation *op); + +static int handle_row_conflict( + NDB_CONFLICT_FN_SHARE *cfn_share, const char *tab_name, + const char *handling_type, const NdbRecord *key_rec, + const NdbRecord *data_rec, const uchar *old_row, const uchar *new_row, + enum_conflicting_op_type op_type, enum_conflict_cause conflict_cause, + const NdbError &conflict_error, NdbTransaction *conflict_trans, + const MY_BITMAP *write_set, Uint64 transaction_id); static const Uint32 error_op_after_refresh_op = 920; -static inline -int -check_completed_operations_pre_commit(Thd_ndb *thd_ndb, NdbTransaction *trans, - const NdbOperation *first, - const NdbOperation *last, - uint *ignore_count) -{ - uint ignores= 0; +static inline int check_completed_operations_pre_commit( + Thd_ndb *thd_ndb, NdbTransaction *trans, const NdbOperation *first, + const NdbOperation *last, uint *ignore_count) { + uint ignores = 0; DBUG_ENTER("check_completed_operations_pre_commit"); - if (unlikely(first == 0)) - { + if (unlikely(first == 0)) { assert(last == 0); DBUG_RETURN(0); } @@ -973,85 +891,65 @@ check_completed_operations_pre_commit(Thd_ndb *thd_ndb, NdbTransaction *trans, Check that all errors are "accepted" errors or exceptions to report */ - const NdbOperation* lastUserOp = trans->getLastDefinedOperation(); - while (true) - { - const NdbError &err= first->getNdbError(); + const NdbOperation *lastUserOp = trans->getLastDefinedOperation(); + while (true) { + const NdbError &err = first->getNdbError(); const bool op_has_conflict_detection = (first->getCustomData() != NULL); - if (!op_has_conflict_detection) - { - DBUG_ASSERT(err.code != (int) error_op_after_refresh_op); + if (!op_has_conflict_detection) { + DBUG_ASSERT(err.code != (int)error_op_after_refresh_op); /* 'Normal path' - ignore key (not) present, others are errors */ if (err.classification != NdbError::NoError && err.classification != NdbError::ConstraintViolation && - err.classification != NdbError::NoDataFound) - { + err.classification != NdbError::NoDataFound) { /* Non ignored error, report it */ DBUG_PRINT("info", ("err.code == %u", err.code)); DBUG_RETURN(err.code); } - } - else - { + } else { /* Op with conflict detection, use special error handling method */ - if (err.classification != NdbError::NoError) - { - int res = handle_conflict_op_error(trans, - err, - first); - if (res != 0) - DBUG_RETURN(res); + if (err.classification != NdbError::NoError) { + int res = handle_conflict_op_error(trans, err, first); + if (res != 0) DBUG_RETURN(res); } - } // if (!op_has_conflict_detection) - if (err.classification != NdbError::NoError) - ignores++; + } // if (!op_has_conflict_detection) + if (err.classification != NdbError::NoError) ignores++; - if (first == last) - break; + if (first == last) break; - first= trans->getNextCompletedOperation(first); + first = trans->getNextCompletedOperation(first); } - if (ignore_count) - *ignore_count= ignores; + if (ignore_count) *ignore_count = ignores; /* Conflict detection related error handling above may have defined new operations on the transaction. If so, execute them now */ - if (trans->getLastDefinedOperation() != lastUserOp) - { - const NdbOperation* last_conflict_op = trans->getLastDefinedOperation(); + if (trans->getLastDefinedOperation() != lastUserOp) { + const NdbOperation *last_conflict_op = trans->getLastDefinedOperation(); NdbError nonMaskedError; assert(nonMaskedError.code == 0); - if (trans->execute(NdbTransaction::NoCommit, - NdbOperation::AO_IgnoreError, - thd_ndb->m_force_send)) - { + if (trans->execute(NdbTransaction::NoCommit, NdbOperation::AO_IgnoreError, + thd_ndb->m_force_send)) { /* Transaction execute failed, even with IgnoreError... */ nonMaskedError = trans->getNdbError(); assert(nonMaskedError.code != 0); - } - else if (trans->getNdbError().code) - { + } else if (trans->getNdbError().code) { /* Check the result codes of the operations we added */ - const NdbOperation* conflict_op = NULL; - do - { + const NdbOperation *conflict_op = NULL; + do { conflict_op = trans->getNextCompletedOperation(conflict_op); assert(conflict_op != NULL); /* We will ignore 920 which represents a refreshOp or other op * arriving after a refreshOp */ - const NdbError& err = conflict_op->getNdbError(); - if ((err.code != 0) && - (err.code != (int) error_op_after_refresh_op)) - { + const NdbError &err = conflict_op->getNdbError(); + if ((err.code != 0) && (err.code != (int)error_op_after_refresh_op)) { /* Found a real error, break out and handle it */ nonMaskedError = err; break; @@ -1060,23 +958,19 @@ check_completed_operations_pre_commit(Thd_ndb *thd_ndb, NdbTransaction *trans, } /* Handle errors with extra conflict handling operations */ - if (nonMaskedError.code != 0) - { - if (nonMaskedError.status == NdbError::TemporaryError) - { + if (nonMaskedError.code != 0) { + if (nonMaskedError.status == NdbError::TemporaryError) { /* Slave will roll back and retry entire transaction. */ ERR_RETURN(nonMaskedError); - } - else - { + } else { char msg[FN_REFLEN]; - snprintf(msg, sizeof(msg), "Executing extra operations for " - "conflict handling hit Ndb error %d '%s'", - nonMaskedError.code, nonMaskedError.message); - push_warning_printf(current_thd, Sql_condition::SL_ERROR, - ER_EXCEPTIONS_WRITE_ERROR, - ER_THD(current_thd, - ER_EXCEPTIONS_WRITE_ERROR), msg); + snprintf(msg, sizeof(msg), + "Executing extra operations for " + "conflict handling hit Ndb error %d '%s'", + nonMaskedError.code, nonMaskedError.message); + push_warning_printf( + current_thd, Sql_condition::SL_ERROR, ER_EXCEPTIONS_WRITE_ERROR, + ER_THD(current_thd, ER_EXCEPTIONS_WRITE_ERROR), msg); /* Slave will stop replication. */ DBUG_RETURN(ER_EXCEPTIONS_WRITE_ERROR); } @@ -1088,13 +982,11 @@ check_completed_operations_pre_commit(Thd_ndb *thd_ndb, NdbTransaction *trans, static inline int check_completed_operations(NdbTransaction *trans, const NdbOperation *first, const NdbOperation *last, - uint *ignore_count) -{ - uint ignores= 0; + uint *ignore_count) { + uint ignores = 0; DBUG_ENTER("check_completed_operations"); - if (unlikely(first == 0)) - { + if (unlikely(first == 0)) { assert(last == 0); DBUG_RETURN(0); } @@ -1102,34 +994,27 @@ static inline int check_completed_operations(NdbTransaction *trans, /* Check that all errors are "accepted" errors */ - while (true) - { - const NdbError &err= first->getNdbError(); + while (true) { + const NdbError &err = first->getNdbError(); if (err.classification != NdbError::NoError && err.classification != NdbError::ConstraintViolation && - err.classification != NdbError::NoDataFound) - { + err.classification != NdbError::NoDataFound) { /* All conflict detection etc should be done before commit */ - DBUG_ASSERT((err.code != (int) error_conflict_fn_violation) && - (err.code != (int) error_op_after_refresh_op)); + DBUG_ASSERT((err.code != (int)error_conflict_fn_violation) && + (err.code != (int)error_op_after_refresh_op)); DBUG_RETURN(err.code); } - if (err.classification != NdbError::NoError) - ignores++; + if (err.classification != NdbError::NoError) ignores++; - if (first == last) - break; + if (first == last) break; - first= trans->getNextCompletedOperation(first); + first = trans->getNextCompletedOperation(first); } - if (ignore_count) - *ignore_count= ignores; + if (ignore_count) *ignore_count = ignores; DBUG_RETURN(0); } -void -ha_ndbcluster::release_completed_operations(NdbTransaction *trans) -{ +void ha_ndbcluster::release_completed_operations(NdbTransaction *trans) { /** * mysqld reads/write blobs fully, * which means that it does not keep blobs @@ -1142,44 +1027,33 @@ ha_ndbcluster::release_completed_operations(NdbTransaction *trans) trans->releaseCompletedQueries(); } - -static inline -int -execute_no_commit(Thd_ndb *thd_ndb, NdbTransaction *trans, - bool ignore_no_key, - uint *ignore_count = 0) -{ +static inline int execute_no_commit(Thd_ndb *thd_ndb, NdbTransaction *trans, + bool ignore_no_key, + uint *ignore_count = 0) { DBUG_ENTER("execute_no_commit"); ha_ndbcluster::release_completed_operations(trans); - const NdbOperation *first= trans->getFirstDefinedOperation(); - const NdbOperation *last= trans->getLastDefinedOperation(); + const NdbOperation *first = trans->getFirstDefinedOperation(); + const NdbOperation *last = trans->getLastDefinedOperation(); thd_ndb->m_execute_count++; - thd_ndb->m_unsent_bytes= 0; + thd_ndb->m_unsent_bytes = 0; DBUG_PRINT("info", ("execute_count: %u", thd_ndb->m_execute_count)); - int rc= 0; - do - { - if (trans->execute(NdbTransaction::NoCommit, - NdbOperation::AO_IgnoreError, - thd_ndb->m_force_send)) - { - rc= -1; + int rc = 0; + do { + if (trans->execute(NdbTransaction::NoCommit, NdbOperation::AO_IgnoreError, + thd_ndb->m_force_send)) { + rc = -1; break; } - if (!ignore_no_key || trans->getNdbError().code == 0) - { - rc= trans->getNdbError().code; + if (!ignore_no_key || trans->getNdbError().code == 0) { + rc = trans->getNdbError().code; break; } - rc = check_completed_operations_pre_commit(thd_ndb, trans, - first, last, + rc = check_completed_operations_pre_commit(thd_ndb, trans, first, last, ignore_count); } while (0); - if (unlikely(thd_ndb->is_slave_thread() && - rc != 0)) - { + if (unlikely(thd_ndb->is_slave_thread() && rc != 0)) { g_ndb_slave_state.atTransactionAbort(); } @@ -1187,70 +1061,56 @@ execute_no_commit(Thd_ndb *thd_ndb, NdbTransaction *trans, DBUG_RETURN(rc); } - -static inline -int -execute_commit(Thd_ndb *thd_ndb, NdbTransaction *trans, - int force_send, int ignore_error, uint *ignore_count = 0) -{ +static inline int execute_commit(Thd_ndb *thd_ndb, NdbTransaction *trans, + int force_send, int ignore_error, + uint *ignore_count = 0) { DBUG_ENTER("execute_commit"); - NdbOperation::AbortOption ao= NdbOperation::AO_IgnoreError; - if (thd_ndb->m_unsent_bytes && !ignore_error) - { + NdbOperation::AbortOption ao = NdbOperation::AO_IgnoreError; + if (thd_ndb->m_unsent_bytes && !ignore_error) { /* We have unsent bytes and cannot ignore error. Calling execute with NdbOperation::AO_IgnoreError will result in possible commit of a transaction although there is an error. */ - ao= NdbOperation::AbortOnError; + ao = NdbOperation::AbortOnError; } - const NdbOperation *first= trans->getFirstDefinedOperation(); - const NdbOperation *last= trans->getLastDefinedOperation(); + const NdbOperation *first = trans->getFirstDefinedOperation(); + const NdbOperation *last = trans->getLastDefinedOperation(); thd_ndb->m_execute_count++; - thd_ndb->m_unsent_bytes= 0; + thd_ndb->m_unsent_bytes = 0; DBUG_PRINT("info", ("execute_count: %u", thd_ndb->m_execute_count)); - int rc= 0; - do - { - if (trans->execute(NdbTransaction::Commit, ao, force_send)) - { - rc= -1; + int rc = 0; + do { + if (trans->execute(NdbTransaction::Commit, ao, force_send)) { + rc = -1; break; } - if (!ignore_error || trans->getNdbError().code == 0) - { - rc= trans->getNdbError().code; + if (!ignore_error || trans->getNdbError().code == 0) { + rc = trans->getNdbError().code; break; } - rc= check_completed_operations(trans, first, last, - ignore_count); + rc = check_completed_operations(trans, first, last, ignore_count); } while (0); - if (likely(rc == 0)) - { + if (likely(rc == 0)) { /* Committed ok, update session GCI, if it's available * (Not available for reads, empty transactions etc...) */ Uint64 reportedGCI; - if (trans->getGCI(&reportedGCI) == 0 && - reportedGCI != 0) - { + if (trans->getGCI(&reportedGCI) == 0 && reportedGCI != 0) { assert(reportedGCI >= thd_ndb->m_last_commit_epoch_session); thd_ndb->m_last_commit_epoch_session = reportedGCI; } } - if (thd_ndb->is_slave_thread()) - { - if (likely(rc == 0)) - { + if (thd_ndb->is_slave_thread()) { + if (likely(rc == 0)) { /* Success */ - g_ndb_slave_state.atTransactionCommit(thd_ndb->m_last_commit_epoch_session); - } - else - { + g_ndb_slave_state.atTransactionCommit( + thd_ndb->m_last_commit_epoch_session); + } else { g_ndb_slave_state.atTransactionAbort(); } } @@ -1259,15 +1119,13 @@ execute_commit(Thd_ndb *thd_ndb, NdbTransaction *trans, DBUG_RETURN(rc); } -static inline -int execute_no_commit_ie(Thd_ndb *thd_ndb, NdbTransaction *trans) -{ +static inline int execute_no_commit_ie(Thd_ndb *thd_ndb, + NdbTransaction *trans) { DBUG_ENTER("execute_no_commit_ie"); ha_ndbcluster::release_completed_operations(trans); - int res= trans->execute(NdbTransaction::NoCommit, - NdbOperation::AO_IgnoreError, - thd_ndb->m_force_send); - thd_ndb->m_unsent_bytes= 0; + int res = trans->execute(NdbTransaction::NoCommit, + NdbOperation::AO_IgnoreError, thd_ndb->m_force_send); + thd_ndb->m_unsent_bytes = 0; thd_ndb->m_execute_count++; DBUG_PRINT("info", ("execute_count: %u", thd_ndb->m_execute_count)); DBUG_RETURN(res); @@ -1281,44 +1139,42 @@ struct THD_NDB_SHARE { struct Ndb_local_table_statistics stat; }; -Thd_ndb::Thd_ndb(THD* thd) : - m_thd(thd), - m_slave_thread(thd->slave_thread), - options(0), - trans_options(0), - m_ddl_ctx(nullptr), - global_schema_lock_trans(NULL), - global_schema_lock_count(0), - global_schema_lock_error(0), - schema_locks_count(0), - m_last_commit_epoch_session(0) -{ - connection= ndb_get_cluster_connection(); - m_connect_count= connection->get_connect_count(); - ndb= new Ndb(connection, ""); - lock_count= 0; - start_stmt_count= 0; - save_point_count= 0; - count= 0; - trans= NULL; - m_handler= NULL; - m_error= false; - m_unsent_bytes= 0; - m_execute_count= 0; - m_scan_count= 0; - m_pruned_scan_count= 0; - m_sorted_scan_count= 0; - m_pushed_queries_defined= 0; - m_pushed_queries_dropped= 0; - m_pushed_queries_executed= 0; - m_pushed_reads= 0; - - init_alloc_root(PSI_INSTRUMENT_ME, - &m_batch_mem_root, BATCH_FLUSH_SIZE/4, 0); -} - -Thd_ndb::~Thd_ndb() -{ +Thd_ndb::Thd_ndb(THD *thd) + : m_thd(thd), + m_slave_thread(thd->slave_thread), + options(0), + trans_options(0), + m_ddl_ctx(nullptr), + global_schema_lock_trans(NULL), + global_schema_lock_count(0), + global_schema_lock_error(0), + schema_locks_count(0), + m_last_commit_epoch_session(0) { + connection = ndb_get_cluster_connection(); + m_connect_count = connection->get_connect_count(); + ndb = new Ndb(connection, ""); + lock_count = 0; + start_stmt_count = 0; + save_point_count = 0; + count = 0; + trans = NULL; + m_handler = NULL; + m_error = false; + m_unsent_bytes = 0; + m_execute_count = 0; + m_scan_count = 0; + m_pruned_scan_count = 0; + m_sorted_scan_count = 0; + m_pushed_queries_defined = 0; + m_pushed_queries_dropped = 0; + m_pushed_queries_executed = 0; + m_pushed_reads = 0; + + init_alloc_root(PSI_INSTRUMENT_ME, &m_batch_mem_root, BATCH_FLUSH_SIZE / 4, + 0); +} + +Thd_ndb::~Thd_ndb() { DBUG_ASSERT(global_schema_lock_count == 0); DBUG_ASSERT(m_ddl_ctx == nullptr); @@ -1327,94 +1183,81 @@ Thd_ndb::~Thd_ndb() free_root(&m_batch_mem_root, MYF(0)); } - -Ndb *ha_ndbcluster::get_ndb(THD *thd) const -{ - return get_thd_ndb(thd)->ndb; -} +Ndb *ha_ndbcluster::get_ndb(THD *thd) const { return get_thd_ndb(thd)->ndb; } /* * manage uncommitted insert/deletes during transactio to get records correct */ -void ha_ndbcluster::set_rec_per_key() -{ +void ha_ndbcluster::set_rec_per_key() { DBUG_ENTER("ha_ndbcluster::set_rec_per_key"); /* Set up the 'rec_per_key[]' for keys which we have good knowledge - about the distribution. 'rec_per_key[]' is init'ed to '0' by + about the distribution. 'rec_per_key[]' is init'ed to '0' by open_binary_frm(), which is interpreted as 'unknown' by optimizer. -> Not setting 'rec_per_key[]' will force the optimizer to use its own heuristic to estimate 'records pr. key'. */ - for (uint i=0 ; i < table_share->keys ; i++) - { - bool is_unique_index= false; - KEY* key_info= table->key_info + i; - switch (get_index_type(i)) - { - case UNIQUE_INDEX: - case PRIMARY_KEY_INDEX: - { - // Index is unique when all 'key_parts' are specified, - // else distribution is unknown and not specified here. - is_unique_index= true; - break; - } - case UNIQUE_ORDERED_INDEX: - case PRIMARY_KEY_ORDERED_INDEX: - is_unique_index= true; - // intentional fall thru to logic for ordered index - case ORDERED_INDEX: - // 'Records pr. key' are unknown for non-unique indexes. - // (May change when we get better index statistics.) - { - THD *thd= current_thd; - const bool index_stat_enable= THDVAR(NULL, index_stat_enable) && - THDVAR(thd, index_stat_enable); - if (index_stat_enable) - { - int err= ndb_index_stat_set_rpk(i); - if (err != 0 && - /* no stats is not unexpected error */ - err != NdbIndexStat::NoIndexStats && - /* warning was printed at first error */ - err != NdbIndexStat::MyHasError && - /* stats thread aborted request */ - err != NdbIndexStat::MyAbortReq) + for (uint i = 0; i < table_share->keys; i++) { + bool is_unique_index = false; + KEY *key_info = table->key_info + i; + switch (get_index_type(i)) { + case UNIQUE_INDEX: + case PRIMARY_KEY_INDEX: { + // Index is unique when all 'key_parts' are specified, + // else distribution is unknown and not specified here. + is_unique_index = true; + break; + } + case UNIQUE_ORDERED_INDEX: + case PRIMARY_KEY_ORDERED_INDEX: + is_unique_index = true; + // intentional fall thru to logic for ordered index + case ORDERED_INDEX: + // 'Records pr. key' are unknown for non-unique indexes. + // (May change when we get better index statistics.) { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_CANT_GET_STAT, /* pun? */ - "index stats (RPK) for key %s:" - " unexpected error %d", - key_info->name, err); + THD *thd = current_thd; + const bool index_stat_enable = + THDVAR(NULL, index_stat_enable) && THDVAR(thd, index_stat_enable); + if (index_stat_enable) { + int err = ndb_index_stat_set_rpk(i); + if (err != 0 && + /* no stats is not unexpected error */ + err != NdbIndexStat::NoIndexStats && + /* warning was printed at first error */ + err != NdbIndexStat::MyHasError && + /* stats thread aborted request */ + err != NdbIndexStat::MyAbortReq) { + push_warning_printf(thd, Sql_condition::SL_WARNING, + ER_CANT_GET_STAT, /* pun? */ + "index stats (RPK) for key %s:" + " unexpected error %d", + key_info->name, err); + } + } + // no fallback method... + break; } - } - // no fallback method... - break; - } - default: - DBUG_ASSERT(false); + default: + DBUG_ASSERT(false); } // set rows per key to 1 for complete key given for unique/primary index - if (is_unique_index) - { - key_info->set_records_per_key(key_info->user_defined_key_parts-1, 1.0f); + if (is_unique_index) { + key_info->set_records_per_key(key_info->user_defined_key_parts - 1, 1.0f); } } DBUG_VOID_RETURN; } -int ha_ndbcluster::records(ha_rows* num_rows) -{ +int ha_ndbcluster::records(ha_rows *num_rows) { DBUG_ENTER("ha_ndbcluster::records"); - DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", - m_table->getTableId(), - m_table_info->no_uncommitted_rows_count)); + DBUG_PRINT("info", + ("id=%d, no_uncommitted_rows_count=%d", m_table->getTableId(), + m_table_info->no_uncommitted_rows_count)); int error = update_stats(table->in_use, 1); - if (error != 0) - { + if (error != 0) { *num_rows = HA_POS_ERROR; DBUG_RETURN(error); } @@ -1423,92 +1266,77 @@ int ha_ndbcluster::records(ha_rows* num_rows) DBUG_RETURN(0); } -void ha_ndbcluster::no_uncommitted_rows_execute_failure() -{ +void ha_ndbcluster::no_uncommitted_rows_execute_failure() { DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_execute_failure"); - get_thd_ndb(current_thd)->m_error= true; + get_thd_ndb(current_thd)->m_error = true; DBUG_VOID_RETURN; } -void ha_ndbcluster::no_uncommitted_rows_update(int c) -{ +void ha_ndbcluster::no_uncommitted_rows_update(int c) { DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update"); - struct Ndb_local_table_statistics *local_info= m_table_info; - local_info->no_uncommitted_rows_count+= c; - DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", - m_table->getTableId(), - local_info->no_uncommitted_rows_count)); + struct Ndb_local_table_statistics *local_info = m_table_info; + local_info->no_uncommitted_rows_count += c; + DBUG_PRINT("info", + ("id=%d, no_uncommitted_rows_count=%d", m_table->getTableId(), + local_info->no_uncommitted_rows_count)); DBUG_VOID_RETURN; } - -int ha_ndbcluster::ndb_err(NdbTransaction *trans) -{ - THD *thd= current_thd; +int ha_ndbcluster::ndb_err(NdbTransaction *trans) { + THD *thd = current_thd; int res; - NdbError err= trans->getNdbError(); + NdbError err = trans->getNdbError(); DBUG_ENTER("ndb_err"); switch (err.classification) { - case NdbError::SchemaError: - { - // TODO perhaps we need to do more here, invalidate also in the cache - m_table->setStatusInvalid(); - /* Close other open handlers not used by any thread */ - ndb_tdc_close_cached_table(thd, m_dbname, m_tabname); - break; - } - default: - break; + case NdbError::SchemaError: { + // TODO perhaps we need to do more here, invalidate also in the cache + m_table->setStatusInvalid(); + /* Close other open handlers not used by any thread */ + ndb_tdc_close_cached_table(thd, m_dbname, m_tabname); + break; + } + default: + break; } - res= ndb_to_mysql_error(&err); - DBUG_PRINT("info", ("transformed ndbcluster error %d to mysql error %d", + res = ndb_to_mysql_error(&err); + DBUG_PRINT("info", ("transformed ndbcluster error %d to mysql error %d", err.code, res)); - if (res == HA_ERR_FOUND_DUPP_KEY) - { - char *error_data= err.details; - uint dupkey= MAX_KEY; - - for (uint i= 0; i < MAX_KEY; i++) - { - if (m_index[i].type == UNIQUE_INDEX || - m_index[i].type == UNIQUE_ORDERED_INDEX) - { - const NDBINDEX *unique_index= - (const NDBINDEX *) m_index[i].unique_index; - if (unique_index && UintPtr(unique_index->getObjectId()) == UintPtr(error_data)) - { - dupkey= i; + if (res == HA_ERR_FOUND_DUPP_KEY) { + char *error_data = err.details; + uint dupkey = MAX_KEY; + + for (uint i = 0; i < MAX_KEY; i++) { + if (m_index[i].type == UNIQUE_INDEX || + m_index[i].type == UNIQUE_ORDERED_INDEX) { + const NDBINDEX *unique_index = + (const NDBINDEX *)m_index[i].unique_index; + if (unique_index && + UintPtr(unique_index->getObjectId()) == UintPtr(error_data)) { + dupkey = i; break; } } } - if (m_rows_to_insert == 1) - { + if (m_rows_to_insert == 1) { /* - We can only distinguish between primary and non-primary - violations here, so we need to return MAX_KEY for non-primary - to signal that key is unknown + We can only distinguish between primary and non-primary + violations here, so we need to return MAX_KEY for non-primary + to signal that key is unknown */ - m_dupkey= err.code == 630 ? table_share->primary_key : dupkey; - } - else - { + m_dupkey = err.code == 630 ? table_share->primary_key : dupkey; + } else { /* We are batching inserts, offending key is not available */ - m_dupkey= (uint) -1; + m_dupkey = (uint)-1; } } DBUG_RETURN(res); } - -extern bool -ndb_fk_util_generate_constraint_string(THD* thd, Ndb *ndb, - const NdbDictionary::ForeignKey &fk, - const int child_tab_id, - const bool print_mock_table_names, - String &fk_string); - +extern bool ndb_fk_util_generate_constraint_string( + THD *thd, Ndb *ndb, const NdbDictionary::ForeignKey &fk, + const int child_tab_id, const bool print_mock_table_names, + String &fk_string); /** Generate error messages when requested by the caller. @@ -1522,51 +1350,42 @@ ndb_fk_util_generate_constraint_string(THD* thd, Ndb *ndb, false if its temporary */ -bool ha_ndbcluster::get_error_message(int error, - String *buf) -{ +bool ha_ndbcluster::get_error_message(int error, String *buf) { DBUG_ENTER("ha_ndbcluster::get_error_message"); DBUG_PRINT("enter", ("error: %d", error)); - Ndb *ndb= check_ndb_in_thd(current_thd); - if (!ndb) - DBUG_RETURN(false); + Ndb *ndb = check_ndb_in_thd(current_thd); + if (!ndb) DBUG_RETURN(false); bool temporary = false; - if(unlikely(error == HA_ERR_NO_REFERENCED_ROW || - error == HA_ERR_ROW_IS_REFERENCED)) - { + if (unlikely(error == HA_ERR_NO_REFERENCED_ROW || + error == HA_ERR_ROW_IS_REFERENCED)) { /* Error message to be generated from NdbError in latest trans or dict */ Thd_ndb *thd_ndb = get_thd_ndb(current_thd); NdbDictionary::Dictionary *dict = ndb->getDictionary(); NdbError err; - if (thd_ndb->trans != NULL) - { + if (thd_ndb->trans != NULL) { err = thd_ndb->trans->getNdbError(); - } - else - { - //Drop table failure. get error from dictionary. + } else { + // Drop table failure. get error from dictionary. err = dict->getNdbError(); DBUG_ASSERT(err.code == 21080); } - temporary= (err.status==NdbError::TemporaryError); + temporary = (err.status == NdbError::TemporaryError); String fk_string; { /* copy default error message to be used on failure */ - const char* unknown_fk = "Unknown FK Constraint"; + const char *unknown_fk = "Unknown FK Constraint"; buf->copy(unknown_fk, (uint32)strlen(unknown_fk), &my_charset_bin); } /* fk name of format parent_id/child_id/fk_name */ - char fully_qualified_fk_name[MAX_ATTR_NAME_SIZE + - (2*MAX_INT_WIDTH) + 3]; + char fully_qualified_fk_name[MAX_ATTR_NAME_SIZE + (2 * MAX_INT_WIDTH) + 3]; /* get the fully qualified FK name from ndb using getNdbErrorDetail */ if (ndb->getNdbErrorDetail(err, &fully_qualified_fk_name[0], - sizeof(fully_qualified_fk_name)) == NULL) - { + sizeof(fully_qualified_fk_name)) == NULL) { DBUG_ASSERT(false); ndb_to_mysql_error(&dict->getNdbError()); DBUG_RETURN(temporary); @@ -1574,18 +1393,15 @@ bool ha_ndbcluster::get_error_message(int error, /* fetch the foreign key */ NdbDictionary::ForeignKey fk; - if (dict->getForeignKey(fk, fully_qualified_fk_name) != 0) - { + if (dict->getForeignKey(fk, fully_qualified_fk_name) != 0) { DBUG_ASSERT(false); ndb_to_mysql_error(&dict->getNdbError()); DBUG_RETURN(temporary); } /* generate constraint string from fk object */ - if(!ndb_fk_util_generate_constraint_string(current_thd, ndb, - fk, 0, false, - fk_string)) - { + if (!ndb_fk_util_generate_constraint_string(current_thd, ndb, fk, 0, false, + fk_string)) { DBUG_ASSERT(false); DBUG_RETURN(temporary); } @@ -1593,12 +1409,10 @@ bool ha_ndbcluster::get_error_message(int error, /* fk found and string has been generated. set the buf */ buf->copy(fk_string); DBUG_RETURN(temporary); - } - else - { + } else { /* NdbError code. Fetch error description from ndb */ - const NdbError err= ndb->getNdbError(error); - temporary= err.status==NdbError::TemporaryError; + const NdbError err = ndb->getNdbError(error); + temporary = err.status == NdbError::TemporaryError; buf->set(err.message, (uint32)strlen(err.message), &my_charset_bin); } @@ -1606,43 +1420,37 @@ bool ha_ndbcluster::get_error_message(int error, DBUG_RETURN(temporary); } - /* field_used_length() returns the number of bytes actually used to store the data of the field. So for a varstring it includes both length byte(s) and string data, and anything after data_length() bytes are unused. */ -static -uint32 field_used_length(const Field* field, ptrdiff_t row_offset=0) -{ - if (field->type() == MYSQL_TYPE_VARCHAR) - { - const Field_varstring* f = down_cast(field); - return f->length_bytes + f->data_length(row_offset); - } - return field->pack_length(); +static uint32 field_used_length(const Field *field, ptrdiff_t row_offset = 0) { + if (field->type() == MYSQL_TYPE_VARCHAR) { + const Field_varstring *f = down_cast(field); + return f->length_bytes + f->data_length(row_offset); + } + return field->pack_length(); } - /** Check if MySQL field type forces var part in ndb storage */ -static bool field_type_forces_var_part(enum_field_types type) -{ +static bool field_type_forces_var_part(enum_field_types type) { switch (type) { - case MYSQL_TYPE_VAR_STRING: - case MYSQL_TYPE_VARCHAR: - return true; - case MYSQL_TYPE_TINY_BLOB: - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_LONG_BLOB: - case MYSQL_TYPE_JSON: - case MYSQL_TYPE_GEOMETRY: - return false; - default: - return false; + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_VARCHAR: + return true; + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_JSON: + case MYSQL_TYPE_GEOMETRY: + return false; + default: + return false; } } @@ -1666,19 +1474,15 @@ static bool field_type_forces_var_part(enum_field_types type) will free the memory already at add_row_check_if_batch_full_size() time, it will not remain valid until the second execute(). */ -uchar * -ha_ndbcluster::get_buffer(Thd_ndb *thd_ndb, uint size) -{ +uchar *ha_ndbcluster::get_buffer(Thd_ndb *thd_ndb, uint size) { // Allocate buffer memory from batch MEM_ROOT - return (uchar*)thd_ndb->m_batch_mem_root.Alloc(size); + return (uchar *)thd_ndb->m_batch_mem_root.Alloc(size); } -uchar * -ha_ndbcluster::copy_row_to_buffer(Thd_ndb *thd_ndb, const uchar *record) -{ - uchar *row= get_buffer(thd_ndb, table->s->stored_rec_length); - if (unlikely(!row)) - return NULL; +uchar *ha_ndbcluster::copy_row_to_buffer(Thd_ndb *thd_ndb, + const uchar *record) { + uchar *row = get_buffer(thd_ndb, table->s->stored_rec_length); + if (unlikely(!row)) return NULL; memcpy(row, record, table->s->stored_rec_length); return row; } @@ -1689,82 +1493,69 @@ ha_ndbcluster::copy_row_to_buffer(Thd_ndb *thd_ndb, const uchar *record) * NDBAPI objects from Blob up to transaction. * It will return -1 if no error is found, 0 if an error is found. */ -int findBlobError(NdbError& error, NdbBlob* pBlob) -{ - error= pBlob->getNdbError(); - if (error.code != 0) - return 0; - - const NdbOperation* pOp= pBlob->getNdbOperation(); - error= pOp->getNdbError(); - if (error.code != 0) - return 0; - - NdbTransaction* pTrans= pOp->getNdbTransaction(); - error= pTrans->getNdbError(); - if (error.code != 0) - return 0; - +int findBlobError(NdbError &error, NdbBlob *pBlob) { + error = pBlob->getNdbError(); + if (error.code != 0) return 0; + + const NdbOperation *pOp = pBlob->getNdbOperation(); + error = pOp->getNdbError(); + if (error.code != 0) return 0; + + NdbTransaction *pTrans = pOp->getNdbTransaction(); + error = pTrans->getNdbError(); + if (error.code != 0) return 0; + /* No error on any of the objects */ return -1; } - -/* +/* Calculate the length of the blob/text after applying mysql limits - on blob/text sizes. If the blob contains multi-byte characters, the length is - reduced till the end of the last well-formed char, so that data is not truncated - in the middle of a multi-byte char. + on blob/text sizes. If the blob contains multi-byte characters, the length is + reduced till the end of the last well-formed char, so that data is not + truncated in the middle of a multi-byte char. */ static uint64 calc_ndb_blob_len(const CHARSET_INFO *cs, uchar *blob_ptr, - uint64 maxlen) -{ + uint64 maxlen) { int errors = 0; - - if (!cs) - cs = &my_charset_bin; - - const char *begin = (const char*) blob_ptr; - const char *end = (const char*) (blob_ptr+maxlen); - - // avoid truncation in the middle of a multi-byte character by + + if (!cs) cs = &my_charset_bin; + + const char *begin = (const char *)blob_ptr; + const char *end = (const char *)(blob_ptr + maxlen); + + // avoid truncation in the middle of a multi-byte character by // stopping at end of last well-formed character before max length uint32 numchars = cs->cset->numchars(cs, begin, end); uint64 len64 = cs->cset->well_formed_len(cs, begin, end, numchars, &errors); assert(len64 <= maxlen); - return len64; + return len64; } - -int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) -{ - ha_ndbcluster *ha= (ha_ndbcluster *)arg; +int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) { + ha_ndbcluster *ha = (ha_ndbcluster *)arg; DBUG_ENTER("g_get_ndb_blobs_value"); DBUG_PRINT("info", ("destination row: %p", ha->m_blob_destination_record)); - if (ha->m_blob_counter == 0) /* Reset total size at start of row */ - ha->m_blobs_row_total_size= 0; + if (ha->m_blob_counter == 0) /* Reset total size at start of row */ + ha->m_blobs_row_total_size = 0; /* Count the total length needed for blob data. */ int isNull; - if (ndb_blob->getNull(isNull) != 0) - ERR_RETURN(ndb_blob->getNdbError()); + if (ndb_blob->getNull(isNull) != 0) ERR_RETURN(ndb_blob->getNdbError()); if (isNull == 0) { - Uint64 len64= 0; - if (ndb_blob->getLength(len64) != 0) - ERR_RETURN(ndb_blob->getNdbError()); + Uint64 len64 = 0; + if (ndb_blob->getLength(len64) != 0) ERR_RETURN(ndb_blob->getNdbError()); /* Align to Uint64. */ - ha->m_blobs_row_total_size+= (len64 + 7) & ~((Uint64)7); - if (ha->m_blobs_row_total_size > 0xffffffff) - { + ha->m_blobs_row_total_size += (len64 + 7) & ~((Uint64)7); + if (ha->m_blobs_row_total_size > 0xffffffff) { DBUG_ASSERT(false); DBUG_RETURN(-1); } - DBUG_PRINT("info", ("Blob number %d needs size %llu, total buffer reqt. now %llu", - ha->m_blob_counter, - len64, - ha->m_blobs_row_total_size)); + DBUG_PRINT("info", + ("Blob number %d needs size %llu, total buffer reqt. now %llu", + ha->m_blob_counter, len64, ha->m_blobs_row_total_size)); } ha->m_blob_counter++; @@ -1772,37 +1563,32 @@ int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) Wait until all blobs in this row are active, so we can allocate and use a common buffer containing all. */ - if (ha->m_blob_counter < ha->m_blob_expected_count_per_row) - DBUG_RETURN(0); + if (ha->m_blob_counter < ha->m_blob_expected_count_per_row) DBUG_RETURN(0); /* Reset blob counter for next row (scan scenario) */ - ha->m_blob_counter= 0; + ha->m_blob_counter = 0; /* Re-allocate bigger blob buffer for this row if necessary. */ - if (ha->m_blobs_row_total_size > ha->m_blobs_buffer_size) - { + if (ha->m_blobs_row_total_size > ha->m_blobs_buffer_size) { my_free(ha->m_blobs_buffer); DBUG_PRINT("info", ("allocate blobs buffer size %u", (uint32)(ha->m_blobs_row_total_size))); /* Windows compiler complains about my_malloc on non-size_t * validate mapping from Uint64 to size_t */ - if(((size_t)ha->m_blobs_row_total_size) != ha->m_blobs_row_total_size) - { - ha->m_blobs_buffer= NULL; - ha->m_blobs_buffer_size= 0; + if (((size_t)ha->m_blobs_row_total_size) != ha->m_blobs_row_total_size) { + ha->m_blobs_buffer = NULL; + ha->m_blobs_buffer_size = 0; DBUG_RETURN(-1); } - ha->m_blobs_buffer= - (uchar*) my_malloc(PSI_INSTRUMENT_ME, - (size_t) ha->m_blobs_row_total_size, MYF(MY_WME)); - if (ha->m_blobs_buffer == NULL) - { - ha->m_blobs_buffer_size= 0; + ha->m_blobs_buffer = (uchar *)my_malloc( + PSI_INSTRUMENT_ME, (size_t)ha->m_blobs_row_total_size, MYF(MY_WME)); + if (ha->m_blobs_buffer == NULL) { + ha->m_blobs_buffer_size = 0; DBUG_RETURN(-1); } - ha->m_blobs_buffer_size= ha->m_blobs_row_total_size; + ha->m_blobs_buffer_size = ha->m_blobs_row_total_size; } /* @@ -1810,76 +1596,62 @@ int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) If we know the destination mysqld row, we also set the blob null bit and pointer/length (if not, it will be done instead in unpack_record()). */ - uint32 offset= 0; - for (uint i= 0; i < ha->table->s->fields; i++) - { - Field *field= ha->table->field[i]; - if (! ((field->flags & BLOB_FLAG) && field->stored_in_db)) - continue; - NdbValue value= ha->m_value[i]; - if (value.blob == NULL) - { - DBUG_PRINT("info",("[%u] skipped", i)); + uint32 offset = 0; + for (uint i = 0; i < ha->table->s->fields; i++) { + Field *field = ha->table->field[i]; + if (!((field->flags & BLOB_FLAG) && field->stored_in_db)) continue; + NdbValue value = ha->m_value[i]; + if (value.blob == NULL) { + DBUG_PRINT("info", ("[%u] skipped", i)); continue; } - Field_blob *field_blob= (Field_blob *)field; - NdbBlob *ndb_blob= value.blob; + Field_blob *field_blob = (Field_blob *)field; + NdbBlob *ndb_blob = value.blob; int isNull; - if (ndb_blob->getNull(isNull) != 0) - ERR_RETURN(ndb_blob->getNdbError()); + if (ndb_blob->getNull(isNull) != 0) ERR_RETURN(ndb_blob->getNdbError()); if (isNull == 0) { - Uint64 len64= 0; - if (ndb_blob->getLength(len64) != 0) - ERR_RETURN(ndb_blob->getNdbError()); + Uint64 len64 = 0; + if (ndb_blob->getLength(len64) != 0) ERR_RETURN(ndb_blob->getNdbError()); DBUG_ASSERT(len64 < 0xffffffff); - uchar *buf= ha->m_blobs_buffer + offset; - uint32 len= (uint32)(ha->m_blobs_buffer_size - offset); - if (ndb_blob->readData(buf, len) != 0) - { + uchar *buf = ha->m_blobs_buffer + offset; + uint32 len = (uint32)(ha->m_blobs_buffer_size - offset); + if (ndb_blob->readData(buf, len) != 0) { NdbError err; - if (findBlobError(err, ndb_blob) == 0) - { + if (findBlobError(err, ndb_blob) == 0) { ERR_RETURN(err); - } - else - { + } else { /* Should always have some error code set */ assert(err.code != 0); ERR_RETURN(err); } - } - DBUG_PRINT("info", ("[%u] offset: %u buf: 0x%lx len=%u", - i, offset, (long) buf, len)); + } + DBUG_PRINT("info", ("[%u] offset: %u buf: 0x%lx len=%u", i, offset, + (long)buf, len)); DBUG_ASSERT(len == len64); - if (ha->m_blob_destination_record) - { - ptrdiff_t ptrdiff= - ha->m_blob_destination_record - ha->table->record[0]; + if (ha->m_blob_destination_record) { + ptrdiff_t ptrdiff = + ha->m_blob_destination_record - ha->table->record[0]; field_blob->move_field_offset(ptrdiff); - if(len > field_blob->max_data_length()) - { - len = calc_ndb_blob_len(field_blob->charset(), - buf, field_blob->max_data_length()); + if (len > field_blob->max_data_length()) { + len = calc_ndb_blob_len(field_blob->charset(), buf, + field_blob->max_data_length()); // push a warning - push_warning_printf(current_thd, Sql_condition::SL_WARNING, - WARN_DATA_TRUNCATED, - "Truncated value from TEXT field \'%s\'", field_blob->field_name); + push_warning_printf( + current_thd, Sql_condition::SL_WARNING, WARN_DATA_TRUNCATED, + "Truncated value from TEXT field \'%s\'", field_blob->field_name); } field_blob->set_ptr(len, buf); field_blob->set_notnull(); field_blob->move_field_offset(-ptrdiff); } - offset+= Uint32((len64 + 7) & ~((Uint64)7)); - } - else if (ha->m_blob_destination_record) - { + offset += Uint32((len64 + 7) & ~((Uint64)7)); + } else if (ha->m_blob_destination_record) { /* Have to set length even in this case. */ - ptrdiff_t ptrdiff= - ha->m_blob_destination_record - ha->table->record[0]; - const uchar *buf= ha->m_blobs_buffer + offset; + ptrdiff_t ptrdiff = ha->m_blob_destination_record - ha->table->record[0]; + const uchar *buf = ha->m_blobs_buffer + offset; field_blob->move_field_offset(ptrdiff); field_blob->set_ptr((uint32)0, buf); field_blob->set_null(); @@ -1894,22 +1666,17 @@ int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) * early */ const bool autocommit = (get_thd_ndb(current_thd)->m_handler != NULL); - if (!autocommit && - !ha->m_active_cursor) - { - for (uint i= 0; i < ha->table->s->fields; i++) - { - Field *field= ha->table->field[i]; - if (! ((field->flags & BLOB_FLAG) && field->stored_in_db)) - continue; - NdbValue value= ha->m_value[i]; - if (value.blob == NULL) - { - DBUG_PRINT("info",("[%u] skipped", i)); + if (!autocommit && !ha->m_active_cursor) { + for (uint i = 0; i < ha->table->s->fields; i++) { + Field *field = ha->table->field[i]; + if (!((field->flags & BLOB_FLAG) && field->stored_in_db)) continue; + NdbValue value = ha->m_value[i]; + if (value.blob == NULL) { + DBUG_PRINT("info", ("[%u] skipped", i)); continue; } - NdbBlob *ndb_blob= value.blob; - + NdbBlob *ndb_blob = value.blob; + assert(ndb_blob->getState() == NdbBlob::Active); /* Call close() with execPendingBlobOps == true @@ -1918,8 +1685,7 @@ int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) * code invoking this callback will execute before * returning control to the caller of execute() */ - if (ndb_blob->close(true) != 0) - { + if (ndb_blob->close(true) != 0) { ERR_RETURN(ndb_blob->getNdbError()); } } @@ -1935,115 +1701,96 @@ int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) set in that record. Otherwise they must be set later by calling unpack_record(). */ -int -ha_ndbcluster::get_blob_values(const NdbOperation *ndb_op, uchar *dst_record, - const MY_BITMAP *bitmap) -{ +int ha_ndbcluster::get_blob_values(const NdbOperation *ndb_op, + uchar *dst_record, const MY_BITMAP *bitmap) { uint i; DBUG_ENTER("ha_ndbcluster::get_blob_values"); - m_blob_counter= 0; - m_blob_expected_count_per_row= 0; - m_blob_destination_record= dst_record; - m_blobs_row_total_size= 0; - ndb_op->getNdbTransaction()-> - setMaxPendingBlobReadBytes(THDVAR(current_thd, blob_read_batch_bytes)); + m_blob_counter = 0; + m_blob_expected_count_per_row = 0; + m_blob_destination_record = dst_record; + m_blobs_row_total_size = 0; + ndb_op->getNdbTransaction()->setMaxPendingBlobReadBytes( + THDVAR(current_thd, blob_read_batch_bytes)); - for (i= 0; i < table_share->fields; i++) - { - Field *field= table->field[i]; - if (! ((field->flags & BLOB_FLAG) && field->stored_in_db)) - continue; + for (i = 0; i < table_share->fields; i++) { + Field *field = table->field[i]; + if (!((field->flags & BLOB_FLAG) && field->stored_in_db)) continue; DBUG_PRINT("info", ("fieldnr=%d", i)); NdbBlob *ndb_blob; - if (bitmap_is_set(bitmap, i)) - { - if ((ndb_blob= m_table_map->getBlobHandle(ndb_op, i)) == NULL || + if (bitmap_is_set(bitmap, i)) { + if ((ndb_blob = m_table_map->getBlobHandle(ndb_op, i)) == NULL || ndb_blob->setActiveHook(g_get_ndb_blobs_value, this) != 0) DBUG_RETURN(1); m_blob_expected_count_per_row++; - } - else - ndb_blob= NULL; + } else + ndb_blob = NULL; - m_value[i].blob= ndb_blob; + m_value[i].blob = ndb_blob; } DBUG_RETURN(0); } -int -ha_ndbcluster::set_blob_values(const NdbOperation *ndb_op, - ptrdiff_t row_offset, const MY_BITMAP *bitmap, - uint *set_count, bool batch) -{ +int ha_ndbcluster::set_blob_values(const NdbOperation *ndb_op, + ptrdiff_t row_offset, + const MY_BITMAP *bitmap, uint *set_count, + bool batch) { uint field_no; uint *blob_index, *blob_index_end; - int res= 0; + int res = 0; DBUG_ENTER("ha_ndbcluster::set_blob_values"); - *set_count= 0; + *set_count = 0; - if (table_share->blob_fields == 0) - DBUG_RETURN(0); + if (table_share->blob_fields == 0) DBUG_RETURN(0); - ndb_op->getNdbTransaction()-> - setMaxPendingBlobWriteBytes(THDVAR(current_thd, blob_write_batch_bytes)); - blob_index= table_share->blob_field; - blob_index_end= blob_index + table_share->blob_fields; - do - { - field_no= *blob_index; + ndb_op->getNdbTransaction()->setMaxPendingBlobWriteBytes( + THDVAR(current_thd, blob_write_batch_bytes)); + blob_index = table_share->blob_field; + blob_index_end = blob_index + table_share->blob_fields; + do { + field_no = *blob_index; /* A NULL bitmap sets all blobs. */ - if (bitmap && !bitmap_is_set(bitmap, field_no)) - continue; - Field *field= table->field[field_no]; - if(field->is_virtual_gcol()) - continue; + if (bitmap && !bitmap_is_set(bitmap, field_no)) continue; + Field *field = table->field[field_no]; + if (field->is_virtual_gcol()) continue; - NdbBlob *ndb_blob= m_table_map->getBlobHandle(ndb_op, field_no); - if (ndb_blob == NULL) - ERR_RETURN(ndb_op->getNdbError()); - if (field->is_real_null(row_offset)) - { + NdbBlob *ndb_blob = m_table_map->getBlobHandle(ndb_op, field_no); + if (ndb_blob == NULL) ERR_RETURN(ndb_op->getNdbError()); + if (field->is_real_null(row_offset)) { DBUG_PRINT("info", ("Setting Blob %d to NULL", field_no)); - if (ndb_blob->setNull() != 0) - ERR_RETURN(ndb_op->getNdbError()); - } - else - { - Field_blob *field_blob= (Field_blob *)field; + if (ndb_blob->setNull() != 0) ERR_RETURN(ndb_op->getNdbError()); + } else { + Field_blob *field_blob = (Field_blob *)field; // Get length and pointer to data - const uint32 blob_len= field_blob->get_length(row_offset); - const uchar *blob_ptr= field_blob->get_blob_data(row_offset); + const uint32 blob_len = field_blob->get_length(row_offset); + const uchar *blob_ptr = field_blob->get_blob_data(row_offset); // Looks like NULL ptr signals length 0 blob if (blob_ptr == NULL) { DBUG_ASSERT(blob_len == 0); - blob_ptr= pointer_cast(""); + blob_ptr = pointer_cast(""); } - DBUG_PRINT("value", ("set blob ptr: 0x%lx len: %u", - (long) blob_ptr, blob_len)); + DBUG_PRINT("value", + ("set blob ptr: 0x%lx len: %u", (long)blob_ptr, blob_len)); DBUG_DUMP("value", blob_ptr, MIN(blob_len, 26)); /* NdbBlob requires the data pointer to remain valid until execute() time. So when batching, we need to copy the value to a temporary buffer. */ - if (batch && blob_len > 0) - { - uchar *tmp_buf= get_buffer(m_thd_ndb, blob_len); - if (!tmp_buf) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); + if (batch && blob_len > 0) { + uchar *tmp_buf = get_buffer(m_thd_ndb, blob_len); + if (!tmp_buf) DBUG_RETURN(HA_ERR_OUT_OF_MEM); memcpy(tmp_buf, blob_ptr, blob_len); - blob_ptr= tmp_buf; + blob_ptr = tmp_buf; } - res= ndb_blob->setValue(pointer_cast(blob_ptr), blob_len); - if (res != 0) - ERR_RETURN(ndb_op->getNdbError()); + res = ndb_blob->setValue(pointer_cast(blob_ptr), blob_len); + if (res != 0) ERR_RETURN(ndb_op->getNdbError()); } ++(*set_count); @@ -2052,55 +1799,46 @@ ha_ndbcluster::set_blob_values(const NdbOperation *ndb_op, DBUG_RETURN(res); } - /** Check if any set or get of blob value in current query. */ -bool ha_ndbcluster::uses_blob_value(const MY_BITMAP *bitmap) const -{ +bool ha_ndbcluster::uses_blob_value(const MY_BITMAP *bitmap) const { uint *blob_index, *blob_index_end; - if (table_share->blob_fields == 0) - return false; + if (table_share->blob_fields == 0) return false; - blob_index= table_share->blob_field; - blob_index_end= blob_index + table_share->blob_fields; - do - { - Field *field= table->field[*blob_index]; - if (bitmap_is_set(bitmap, field->field_index) && ! field->is_virtual_gcol()) + blob_index = table_share->blob_field; + blob_index_end = blob_index + table_share->blob_fields; + do { + Field *field = table->field[*blob_index]; + if (bitmap_is_set(bitmap, field->field_index) && !field->is_virtual_gcol()) return true; } while (++blob_index != blob_index_end); return false; } -void ha_ndbcluster::release_blobs_buffer() -{ +void ha_ndbcluster::release_blobs_buffer() { DBUG_ENTER("releaseBlobsBuffer"); - if (m_blobs_buffer_size > 0) - { - DBUG_PRINT("info", ("Deleting blobs buffer, size %llu", m_blobs_buffer_size)); + if (m_blobs_buffer_size > 0) { + DBUG_PRINT("info", + ("Deleting blobs buffer, size %llu", m_blobs_buffer_size)); my_free(m_blobs_buffer); - m_blobs_buffer= 0; - m_blobs_row_total_size= 0; - m_blobs_buffer_size= 0; + m_blobs_buffer = 0; + m_blobs_row_total_size = 0; + m_blobs_buffer_size = 0; } DBUG_VOID_RETURN; } - /* Does type support a default value? */ -static bool -type_supports_default_value(enum_field_types mysql_type) -{ - bool ret = (mysql_type != MYSQL_TYPE_BLOB && - mysql_type != MYSQL_TYPE_TINY_BLOB && - mysql_type != MYSQL_TYPE_MEDIUM_BLOB && - mysql_type != MYSQL_TYPE_LONG_BLOB && - mysql_type != MYSQL_TYPE_JSON && - mysql_type != MYSQL_TYPE_GEOMETRY); +static bool type_supports_default_value(enum_field_types mysql_type) { + bool ret = + (mysql_type != MYSQL_TYPE_BLOB && mysql_type != MYSQL_TYPE_TINY_BLOB && + mysql_type != MYSQL_TYPE_MEDIUM_BLOB && + mysql_type != MYSQL_TYPE_LONG_BLOB && mysql_type != MYSQL_TYPE_JSON && + mysql_type != MYSQL_TYPE_GEOMETRY); return ret; } @@ -2109,124 +1847,107 @@ type_supports_default_value(enum_field_types mysql_type) Check that Ndb data dictionary has the same default values as MySQLD for the current table. Called as part of a DBUG check as part of table open - + Returns 0 - Defaults are ok -1 - Some default(s) are bad */ -int ha_ndbcluster::check_default_values(const NDBTAB* ndbtab) -{ +int ha_ndbcluster::check_default_values(const NDBTAB *ndbtab) { /* Debug only method for checking table defaults aligned between MySQLD and Ndb */ - bool defaults_aligned= true; + bool defaults_aligned = true; - if (ndbtab->hasDefaultValues()) - { + if (ndbtab->hasDefaultValues()) { /* Ndb supports native defaults for non-pk columns */ - my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set); + my_bitmap_map *old_map = tmp_use_all_columns(table, table->read_set); - for (uint f=0; f < table_share->fields; f++) - { - Field* field= table->field[f]; // Use Field struct from MySQLD table rep - if(! field->stored_in_db) - continue; + for (uint f = 0; f < table_share->fields; f++) { + Field *field = table->field[f]; // Use Field struct from MySQLD table rep + if (!field->stored_in_db) continue; - const NdbDictionary::Column* ndbCol= m_table_map->getColumn(field->field_index); + const NdbDictionary::Column *ndbCol = + m_table_map->getColumn(field->field_index); - if ((! (field->flags & (PRI_KEY_FLAG | - NO_DEFAULT_VALUE_FLAG))) && - type_supports_default_value(field->real_type())) - { + if ((!(field->flags & (PRI_KEY_FLAG | NO_DEFAULT_VALUE_FLAG))) && + type_supports_default_value(field->real_type())) { /* We expect Ndb to have a native default for this * column */ - ptrdiff_t src_offset= table_share->default_values - - field->table->record[0]; + ptrdiff_t src_offset = + table_share->default_values - field->table->record[0]; /* Move field by offset to refer to default value */ field->move_field_offset(src_offset); - - const uchar* ndb_default= (const uchar*) ndbCol->getDefaultValue(); - if (ndb_default == NULL) - /* MySQLD default must also be NULL */ - defaults_aligned= field->is_null(); - else - { - if (field->type() != MYSQL_TYPE_BIT) - { - defaults_aligned= (0 == field->cmp(ndb_default)); - } - else - { - longlong value= (static_cast(field))->val_int(); + const uchar *ndb_default = (const uchar *)ndbCol->getDefaultValue(); + + if (ndb_default == NULL) /* MySQLD default must also be NULL */ + defaults_aligned = field->is_null(); + else { + if (field->type() != MYSQL_TYPE_BIT) { + defaults_aligned = (0 == field->cmp(ndb_default)); + } else { + longlong value = (static_cast(field))->val_int(); /* Map to NdbApi format - two Uint32s */ Uint32 out[2]; out[0] = 0; out[1] = 0; - for (int b=0; b < 64; b++) - { + for (int b = 0; b < 64; b++) { out[b >> 5] |= (value & 1) << (b & 31); - - value= value >> 1; + + value = value >> 1; } Uint32 defaultLen = field_used_length(field); defaultLen = ((defaultLen + 3) & ~(Uint32)0x7); - defaults_aligned= (0 == memcmp(ndb_default, - out, - defaultLen)); + defaults_aligned = (0 == memcmp(ndb_default, out, defaultLen)); } } - + field->move_field_offset(-src_offset); - if (unlikely(!defaults_aligned)) - { - ndb_log_error("Internal error, Default values differ " - "for column %u, ndb_default: %d", - field->field_index, ndb_default != NULL); + if (unlikely(!defaults_aligned)) { + ndb_log_error( + "Internal error, Default values differ " + "for column %u, ndb_default: %d", + field->field_index, ndb_default != NULL); } - } - else - { + } else { /* We don't expect Ndb to have a native default for this column */ - if (unlikely(ndbCol->getDefaultValue() != NULL)) - { + if (unlikely(ndbCol->getDefaultValue() != NULL)) { /* Didn't expect that */ - ndb_log_error("Internal error, Column %u has native " - "default, but shouldn't. Flags=%u, type=%u", - field->field_index, field->flags, - field->real_type()); - defaults_aligned= false; + ndb_log_error( + "Internal error, Column %u has native " + "default, but shouldn't. Flags=%u, type=%u", + field->field_index, field->flags, field->real_type()); + defaults_aligned = false; } } - if (unlikely(!defaults_aligned)) - { + if (unlikely(!defaults_aligned)) { // Dump field - ndb_log_error("field[ name: '%s', type: %u, real_type: %u, " - "flags: 0x%x, is_null: %d]", - field->field_name, field->type(), field->real_type(), - field->flags, field->is_null()); + ndb_log_error( + "field[ name: '%s', type: %u, real_type: %u, " + "flags: 0x%x, is_null: %d]", + field->field_name, field->type(), field->real_type(), field->flags, + field->is_null()); // Dump ndbCol - ndb_log_error("ndbCol[name: '%s', type: %u, column_no: %d, " - "nullable: %d]", - ndbCol->getName(), ndbCol->getType(), - ndbCol->getColumnNo(), ndbCol->getNullable()); + ndb_log_error( + "ndbCol[name: '%s', type: %u, column_no: %d, " + "nullable: %d]", + ndbCol->getName(), ndbCol->getType(), ndbCol->getColumnNo(), + ndbCol->getNullable()); break; } } tmp_restore_column_map(table->read_set, old_map); } - return (defaults_aligned? 0: -1); + return (defaults_aligned ? 0 : -1); } - -int ha_ndbcluster::get_metadata(THD *thd, const dd::Table* table_def) -{ - Ndb *ndb= get_thd_ndb(thd)->ndb; - NDBDICT *dict= ndb->getDictionary(); +int ha_ndbcluster::get_metadata(THD *thd, const dd::Table *table_def) { + Ndb *ndb = get_thd_ndb(thd)->ndb; + NDBDICT *dict = ndb->getDictionary(); DBUG_ENTER("ha_ndbcluster::get_metadata"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); @@ -2234,9 +1955,8 @@ int ha_ndbcluster::get_metadata(THD *thd, const dd::Table* table_def) DBUG_ASSERT(m_table_info == NULL); int object_id, object_version; - if (!ndb_dd_table_get_object_id_and_version(table_def, - object_id, object_version)) - { + if (!ndb_dd_table_get_object_id_and_version(table_def, object_id, + object_version)) { DBUG_PRINT("error", ("Could not extract object_id and object_version " "from table definition")); DBUG_RETURN(1); @@ -2245,8 +1965,7 @@ int ha_ndbcluster::get_metadata(THD *thd, const dd::Table* table_def) ndb->setDatabaseName(m_dbname); Ndb_table_guard ndbtab_g(dict, m_tabname); const NDBTAB *tab = ndbtab_g.get_table(); - if (tab == nullptr) - { + if (tab == nullptr) { ERR_RETURN(dict->getNdbError()); } @@ -2254,20 +1973,17 @@ int ha_ndbcluster::get_metadata(THD *thd, const dd::Table* table_def) // matches the id and version of the NDB table const int ndb_object_id = tab->getObjectId(); const int ndb_object_version = tab->getObjectVersion(); - if (ndb_object_id != object_id || - ndb_object_version != object_version) - { + if (ndb_object_id != object_id || ndb_object_version != object_version) { DBUG_PRINT("error", ("Table id or version mismatch")); - DBUG_PRINT("error", ("NDB table id: %u, version: %u", - ndb_object_id, ndb_object_version)); - DBUG_PRINT("error", ("DD table id: %u, version: %u", - object_id, object_version)); + DBUG_PRINT("error", ("NDB table id: %u, version: %u", ndb_object_id, + ndb_object_version)); + DBUG_PRINT("error", + ("DD table id: %u, version: %u", object_id, object_version)); ndb_log_verbose(10, "Table id or version mismatch for table '%s.%s', " "[%d, %d] != [%d, %d]", - m_dbname, m_tabname, - object_id, object_version, + m_dbname, m_tabname, object_id, object_version, ndb_object_id, ndb_object_version); ndbtab_g.invalidate(); @@ -2283,8 +1999,7 @@ int ha_ndbcluster::get_metadata(THD *thd, const dd::Table* table_def) // Check that NDB and DD metadata matches DBUG_ASSERT(Ndb_metadata::compare(thd, tab, table_def)); - if (DBUG_EVALUATE_IF("ndb_get_metadata_fail", true, false)) - { + if (DBUG_EVALUATE_IF("ndb_get_metadata_fail", true, false)) { fprintf(stderr, "ndb_get_metadata_fail\n"); DBUG_RETURN(HA_ERR_TABLE_DEF_CHANGED); } @@ -2292,61 +2007,51 @@ int ha_ndbcluster::get_metadata(THD *thd, const dd::Table* table_def) // Create field to column map when table is opened m_table_map = new Ndb_table_map(table, tab); - /* Now check that any Ndb native defaults are aligned + /* Now check that any Ndb native defaults are aligned with MySQLD defaults */ DBUG_ASSERT(check_default_values(tab) == 0); DBUG_PRINT("info", ("fetched table %s", tab->getName())); - m_table= tab; + m_table = tab; ndb_bitmap_init(m_bitmap, m_bitmap_buf, table_share->fields); int error = 0; - if (table_share->primary_key == MAX_KEY) - { + if (table_share->primary_key == MAX_KEY) { /* Hidden primary key. */ - if ((error= add_hidden_pk_ndb_record(dict)) != 0) - goto err; + if ((error = add_hidden_pk_ndb_record(dict)) != 0) goto err; } - if ((error= add_table_ndb_record(dict)) != 0) - goto err; + if ((error = add_table_ndb_record(dict)) != 0) goto err; /* Approx. write size in bytes over transporter */ - m_bytes_per_write= 12 + tab->getRowSizeInBytes() + 4 * tab->getNoOfColumns(); + m_bytes_per_write = 12 + tab->getRowSizeInBytes() + 4 * tab->getNoOfColumns(); /* Open indexes */ - if ((error= open_indexes(ndb, table)) != 0) - goto err; + if ((error = open_indexes(ndb, table)) != 0) goto err; /* Read foreign keys where this table is child or parent */ - if ((error= get_fk_data(thd, ndb)) != 0) - goto err; + if ((error = get_fk_data(thd, ndb)) != 0) goto err; /* Backward compatibility for tables created without tablespace in .frm => read tablespace setting from engine */ if (table_share->mysql_version < 50120 && - !table_share->tablespace /* safety */) - { + !table_share->tablespace /* safety */) { Uint32 id; - if (tab->getTablespace(&id)) - { - NdbDictionary::Tablespace ts= dict->getTablespace(id); - if (ndb_dict_check_NDB_error(dict)) - { - const char *tablespace= ts.getName(); - const size_t tablespace_len= strlen(tablespace); - if (tablespace_len != 0) - { + if (tab->getTablespace(&id)) { + NdbDictionary::Tablespace ts = dict->getTablespace(id); + if (ndb_dict_check_NDB_error(dict)) { + const char *tablespace = ts.getName(); + const size_t tablespace_len = strlen(tablespace); + if (tablespace_len != 0) { DBUG_PRINT("info", ("Found tablespace '%s'", tablespace)); - table_share->tablespace= strmake_root(&table_share->mem_root, - tablespace, - tablespace_len); + table_share->tablespace = + strmake_root(&table_share->mem_root, tablespace, tablespace_len); } } } @@ -2362,52 +2067,44 @@ int ha_ndbcluster::get_metadata(THD *thd, const dd::Table* table_def) release_indexes(dict, 1 /* invalidate */); // Release NdbRecord's allocated for the table - if (m_ndb_record != NULL) - { + if (m_ndb_record != NULL) { dict->releaseRecord(m_ndb_record); - m_ndb_record= NULL; + m_ndb_record = NULL; } - if (m_ndb_hidden_key_record != NULL) - { + if (m_ndb_hidden_key_record != NULL) { dict->releaseRecord(m_ndb_hidden_key_record); - m_ndb_hidden_key_record= NULL; + m_ndb_hidden_key_record = NULL; } ndbtab_g.invalidate(); - m_table= NULL; + m_table = NULL; DBUG_RETURN(error); } static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, - const NDBINDEX *index, - KEY *key_info) -{ + const NDBINDEX *index, KEY *key_info) { DBUG_ENTER("fix_unique_index_attr_order"); - unsigned sz= index->getNoOfIndexColumns(); + unsigned sz = index->getNoOfIndexColumns(); - if (data.unique_index_attrid_map) - my_free(data.unique_index_attrid_map); - data.unique_index_attrid_map= (uchar*)my_malloc(PSI_INSTRUMENT_ME, sz,MYF(MY_WME)); - if (data.unique_index_attrid_map == 0) - { + if (data.unique_index_attrid_map) my_free(data.unique_index_attrid_map); + data.unique_index_attrid_map = + (uchar *)my_malloc(PSI_INSTRUMENT_ME, sz, MYF(MY_WME)); + if (data.unique_index_attrid_map == 0) { DBUG_RETURN(HA_ERR_OUT_OF_MEM); } - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; + KEY_PART_INFO *key_part = key_info->key_part; + KEY_PART_INFO *end = key_part + key_info->user_defined_key_parts; DBUG_ASSERT(key_info->user_defined_key_parts == sz); - for (unsigned i= 0; key_part != end; key_part++, i++) - { - const char *field_name= key_part->field->field_name; + for (unsigned i = 0; key_part != end; key_part++, i++) { + const char *field_name = key_part->field->field_name; #ifndef DBUG_OFF - data.unique_index_attrid_map[i]= 255; + data.unique_index_attrid_map[i] = 255; #endif - for (unsigned j= 0; j < sz; j++) - { - const NDBCOL *c= index->getColumn(j); - if (strcmp(field_name, c->getName()) == 0) - { - data.unique_index_attrid_map[i]= j; + for (unsigned j = 0; j < sz; j++) { + const NDBCOL *c = index->getColumn(j); + if (strcmp(field_name, c->getName()) == 0) { + data.unique_index_attrid_map[i] = j; break; } } @@ -2422,20 +2119,17 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, immediately */ int ha_ndbcluster::create_indexes(THD *thd, TABLE *tab, - const NdbDictionary::Table *ndbtab) const -{ - int error= 0; - KEY* key_info= tab->key_info; - const char **key_name= tab->s->keynames.type_names; + const NdbDictionary::Table *ndbtab) const { + int error = 0; + KEY *key_info = tab->key_info; + const char **key_name = tab->s->keynames.type_names; DBUG_ENTER("ha_ndbcluster::create_indexes"); - for (uint i= 0; i < tab->s->keys; i++, key_info++, key_name++) - { - const char* index_name= *key_name; - NDB_INDEX_TYPE idx_type= get_index_type_from_table(i); - error= create_index(thd, index_name, key_info, idx_type, ndbtab); - if (error) - { + for (uint i = 0; i < tab->s->keys; i++, key_info++, key_name++) { + const char *index_name = *key_name; + NDB_INDEX_TYPE idx_type = get_index_type_from_table(i); + error = create_index(thd, index_name, key_info, idx_type, ndbtab); + if (error) { DBUG_PRINT("error", ("Failed to create index %u", i)); break; } @@ -2444,57 +2138,47 @@ int ha_ndbcluster::create_indexes(THD *thd, TABLE *tab, DBUG_RETURN(error); } -static void ndb_init_index(NDB_INDEX_DATA &data) -{ - data.type= UNDEFINED_INDEX; - data.status= NDB_INDEX_DATA::UNDEFINED; - data.unique_index= NULL; - data.index= NULL; - data.unique_index_attrid_map= NULL; - data.ndb_record_key= NULL; - data.ndb_unique_record_key= NULL; - data.ndb_unique_record_row= NULL; +static void ndb_init_index(NDB_INDEX_DATA &data) { + data.type = UNDEFINED_INDEX; + data.status = NDB_INDEX_DATA::UNDEFINED; + data.unique_index = NULL; + data.index = NULL; + data.unique_index_attrid_map = NULL; + data.ndb_record_key = NULL; + data.ndb_unique_record_key = NULL; + data.ndb_unique_record_row = NULL; } -static void ndb_clear_index(NDBDICT *dict, NDB_INDEX_DATA &data) -{ - if (data.unique_index_attrid_map) - { +static void ndb_clear_index(NDBDICT *dict, NDB_INDEX_DATA &data) { + if (data.unique_index_attrid_map) { my_free(data.unique_index_attrid_map); } if (data.ndb_unique_record_key) dict->releaseRecord(data.ndb_unique_record_key); if (data.ndb_unique_record_row) dict->releaseRecord(data.ndb_unique_record_row); - if (data.ndb_record_key) - dict->releaseRecord(data.ndb_record_key); + if (data.ndb_record_key) dict->releaseRecord(data.ndb_record_key); ndb_init_index(data); } -static -void ndb_protect_char(const char* from, char* to, uint to_length, char protect) -{ - uint fpos= 0, tpos= 0; +static void ndb_protect_char(const char *from, char *to, uint to_length, + char protect) { + uint fpos = 0, tpos = 0; - while(from[fpos] != '\0' && tpos < to_length - 1) - { - if (from[fpos] == protect) - { - int len= 0; - to[tpos++]= '@'; - if(tpos < to_length - 5) - { - len= sprintf(to+tpos, "00%u", (uint) protect); - tpos+= len; + while (from[fpos] != '\0' && tpos < to_length - 1) { + if (from[fpos] == protect) { + int len = 0; + to[tpos++] = '@'; + if (tpos < to_length - 5) { + len = sprintf(to + tpos, "00%u", (uint)protect); + tpos += len; } - } - else - { - to[tpos++]= from[fpos]; + } else { + to[tpos++] = from[fpos]; } fpos++; } - to[tpos]= '\0'; + to[tpos] = '\0'; } /* @@ -2502,63 +2186,49 @@ void ndb_protect_char(const char* from, char* to, uint to_length, char protect) with an index (for faster access) */ int ha_ndbcluster::add_index_handle(NDBDICT *dict, KEY *key_info, - const char *key_name, uint index_no) -{ + const char *key_name, uint index_no) { char index_name[FN_LEN + 1]; - int error= 0; + int error = 0; - const NDB_INDEX_TYPE idx_type= get_index_type_from_table(index_no); - m_index[index_no].type= idx_type; + const NDB_INDEX_TYPE idx_type = get_index_type_from_table(index_no); + m_index[index_no].type = idx_type; DBUG_ENTER("ha_ndbcluster::add_index_handle"); DBUG_PRINT("enter", ("table %s", m_tabname)); - + ndb_protect_char(key_name, index_name, sizeof(index_name) - 1, '/'); - if (idx_type != PRIMARY_KEY_INDEX && idx_type != UNIQUE_INDEX) - { + if (idx_type != PRIMARY_KEY_INDEX && idx_type != UNIQUE_INDEX) { DBUG_PRINT("info", ("Get handle to index %s", index_name)); - const NDBINDEX *index= dict->getIndexGlobal(index_name, *m_table); - if (!index) - ERR_RETURN(dict->getNdbError()); - DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d", - (long) index, - index->getObjectId(), - index->getObjectVersion() & 0xFFFFFF, - index->getObjectVersion() >> 24, - index->getObjectStatus())); - DBUG_ASSERT(index->getObjectStatus() == - NdbDictionary::Object::Retrieved); - m_index[index_no].index= index; - } - - if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX) - { + const NDBINDEX *index = dict->getIndexGlobal(index_name, *m_table); + if (!index) ERR_RETURN(dict->getNdbError()); + DBUG_PRINT("info", + ("index: 0x%lx id: %d version: %d.%d status: %d", (long)index, + index->getObjectId(), index->getObjectVersion() & 0xFFFFFF, + index->getObjectVersion() >> 24, index->getObjectStatus())); + DBUG_ASSERT(index->getObjectStatus() == NdbDictionary::Object::Retrieved); + m_index[index_no].index = index; + } + + if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX) { char unique_index_name[FN_LEN + 1]; - static const char* unique_suffix= "$unique"; - m_has_unique_index= true; + static const char *unique_suffix = "$unique"; + m_has_unique_index = true; strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS); DBUG_PRINT("info", ("Get handle to unique_index %s", unique_index_name)); - const NDBINDEX *index = - dict->getIndexGlobal(unique_index_name, *m_table); - if (!index) - ERR_RETURN(dict->getNdbError()); - DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d", - (long) index, - index->getObjectId(), - index->getObjectVersion() & 0xFFFFFF, - index->getObjectVersion() >> 24, - index->getObjectStatus())); - DBUG_ASSERT(index->getObjectStatus() == - NdbDictionary::Object::Retrieved); - m_index[index_no].unique_index= index; - error= fix_unique_index_attr_order(m_index[index_no], index, key_info); - } - - if (!error) - error= add_index_ndb_record(dict, key_info, index_no); - - if (!error) - m_index[index_no].status= NDB_INDEX_DATA::ACTIVE; - + const NDBINDEX *index = dict->getIndexGlobal(unique_index_name, *m_table); + if (!index) ERR_RETURN(dict->getNdbError()); + DBUG_PRINT("info", + ("index: 0x%lx id: %d version: %d.%d status: %d", (long)index, + index->getObjectId(), index->getObjectVersion() & 0xFFFFFF, + index->getObjectVersion() >> 24, index->getObjectStatus())); + DBUG_ASSERT(index->getObjectStatus() == NdbDictionary::Object::Retrieved); + m_index[index_no].unique_index = index; + error = fix_unique_index_attr_order(m_index[index_no], index, key_info); + } + + if (!error) error = add_index_ndb_record(dict, key_info, index_no); + + if (!error) m_index[index_no].status = NDB_INDEX_DATA::ACTIVE; + DBUG_RETURN(error); } @@ -2566,59 +2236,54 @@ int ha_ndbcluster::add_index_handle(NDBDICT *dict, KEY *key_info, We use this function to convert null bit masks, as found in class Field, to bit numbers, as used in NdbRecord. */ -static uint -null_bit_mask_to_bit_number(uchar bit_mask) -{ - switch (bit_mask) - { - case 0x1: return 0; - case 0x2: return 1; - case 0x4: return 2; - case 0x8: return 3; - case 0x10: return 4; - case 0x20: return 5; - case 0x40: return 6; - case 0x80: return 7; +static uint null_bit_mask_to_bit_number(uchar bit_mask) { + switch (bit_mask) { + case 0x1: + return 0; + case 0x2: + return 1; + case 0x4: + return 2; + case 0x8: + return 3; + case 0x10: + return 4; + case 0x20: + return 5; + case 0x40: + return 6; + case 0x80: + return 7; default: DBUG_ASSERT(false); return 0; } } -static void -ndb_set_record_specification(uint field_no, - NdbDictionary::RecordSpecification *spec, - const TABLE *table, - const NdbDictionary::Column *ndb_column) -{ +static void ndb_set_record_specification( + uint field_no, NdbDictionary::RecordSpecification *spec, const TABLE *table, + const NdbDictionary::Column *ndb_column) { DBUG_ENTER("ndb_set_record_specification"); DBUG_ASSERT(ndb_column); - spec->column= ndb_column; - spec->offset= Uint32(table->field[field_no]->ptr - table->record[0]); - if (table->field[field_no]->real_maybe_null()) - { - spec->nullbit_byte_offset= - Uint32(table->field[field_no]->null_offset()); - spec->nullbit_bit_in_byte= - null_bit_mask_to_bit_number(table->field[field_no]->null_bit); - } - else if (table->field[field_no]->type() == MYSQL_TYPE_BIT) - { + spec->column = ndb_column; + spec->offset = Uint32(table->field[field_no]->ptr - table->record[0]); + if (table->field[field_no]->real_maybe_null()) { + spec->nullbit_byte_offset = Uint32(table->field[field_no]->null_offset()); + spec->nullbit_bit_in_byte = + null_bit_mask_to_bit_number(table->field[field_no]->null_bit); + } else if (table->field[field_no]->type() == MYSQL_TYPE_BIT) { /* We need to store the position of the overflow bits. */ - const Field_bit* field_bit= static_cast(table->field[field_no]); - spec->nullbit_byte_offset= - Uint32(field_bit->bit_ptr - table->record[0]); - spec->nullbit_bit_in_byte= field_bit->bit_ofs; - } - else - { - spec->nullbit_byte_offset= 0; - spec->nullbit_bit_in_byte= 0; - } - spec->column_flags= 0; + const Field_bit *field_bit = + static_cast(table->field[field_no]); + spec->nullbit_byte_offset = Uint32(field_bit->bit_ptr - table->record[0]); + spec->nullbit_bit_in_byte = field_bit->bit_ofs; + } else { + spec->nullbit_byte_offset = 0; + spec->nullbit_bit_in_byte = 0; + } + spec->column_flags = 0; if (table->field[field_no]->type() == MYSQL_TYPE_STRING && - table->field[field_no]->pack_length() == 0) - { + table->field[field_no]->pack_length() == 0) { /* This is CHAR(0), which we represent as a nullable BIT(1) column where we ignore the data bit @@ -2628,175 +2293,140 @@ ndb_set_record_specification(uint field_no, } DBUG_PRINT("info", ("%s.%s field: %d, col: %d, offset: %d, null bit: %d", - table->s->table_name.str, ndb_column->getName(), - field_no, ndb_column->getColumnNo(), - spec->offset, - (8 * spec->nullbit_byte_offset) + spec->nullbit_bit_in_byte)); + table->s->table_name.str, ndb_column->getName(), field_no, + ndb_column->getColumnNo(), spec->offset, + (8 * spec->nullbit_byte_offset) + spec->nullbit_bit_in_byte)); DBUG_VOID_RETURN; } -int -ha_ndbcluster::add_table_ndb_record(NDBDICT *dict) -{ +int ha_ndbcluster::add_table_ndb_record(NDBDICT *dict) { DBUG_ENTER("ha_ndbcluster::add_table_ndb_record()"); NdbDictionary::RecordSpecification spec[NDB_MAX_ATTRIBUTES_IN_TABLE + 2]; NdbRecord *rec; uint fieldId, colId; - for (fieldId= 0, colId= 0; fieldId < table_share->fields; fieldId++) - { - if(table->field[fieldId]->stored_in_db) - { + for (fieldId = 0, colId = 0; fieldId < table_share->fields; fieldId++) { + if (table->field[fieldId]->stored_in_db) { ndb_set_record_specification(fieldId, &spec[colId], table, m_table->getColumn(colId)); colId++; } } - rec= dict->createRecord(m_table, spec, colId, sizeof(spec[0]), - NdbDictionary::RecMysqldBitfield | - NdbDictionary::RecPerColumnFlags); - if (! rec) - ERR_RETURN(dict->getNdbError()); - m_ndb_record= rec; + rec = dict->createRecord( + m_table, spec, colId, sizeof(spec[0]), + NdbDictionary::RecMysqldBitfield | NdbDictionary::RecPerColumnFlags); + if (!rec) ERR_RETURN(dict->getNdbError()); + m_ndb_record = rec; DBUG_RETURN(0); } /* Create NdbRecord for setting hidden primary key from Uint64. */ -int -ha_ndbcluster::add_hidden_pk_ndb_record(NDBDICT *dict) -{ +int ha_ndbcluster::add_hidden_pk_ndb_record(NDBDICT *dict) { DBUG_ENTER("ha_ndbcluster::add_hidden_pk_ndb_record"); NdbDictionary::RecordSpecification spec[1]; NdbRecord *rec; - spec[0].column= m_table->getColumn(m_table_map->get_hidden_key_column()); - spec[0].offset= 0; - spec[0].nullbit_byte_offset= 0; - spec[0].nullbit_bit_in_byte= 0; + spec[0].column = m_table->getColumn(m_table_map->get_hidden_key_column()); + spec[0].offset = 0; + spec[0].nullbit_byte_offset = 0; + spec[0].nullbit_bit_in_byte = 0; - rec= dict->createRecord(m_table, spec, 1, sizeof(spec[0])); - if (! rec) - ERR_RETURN(dict->getNdbError()); - m_ndb_hidden_key_record= rec; + rec = dict->createRecord(m_table, spec, 1, sizeof(spec[0])); + if (!rec) ERR_RETURN(dict->getNdbError()); + m_ndb_hidden_key_record = rec; DBUG_RETURN(0); } -int -ha_ndbcluster::add_index_ndb_record(NDBDICT *dict, KEY *key_info, uint index_no) -{ +int ha_ndbcluster::add_index_ndb_record(NDBDICT *dict, KEY *key_info, + uint index_no) { DBUG_ENTER("ha_ndbcluster::add_index_ndb_record"); NdbDictionary::RecordSpecification spec[NDB_MAX_ATTRIBUTES_IN_TABLE + 2]; NdbRecord *rec; - Uint32 offset= 0; - for (uint i= 0; i < key_info->user_defined_key_parts; i++) - { - KEY_PART_INFO *kp= &key_info->key_part[i]; - spec[i].column= m_table_map->getColumn(kp->fieldnr - 1); - if (! spec[i].column) - ERR_RETURN(dict->getNdbError()); - if (kp->null_bit) - { + Uint32 offset = 0; + for (uint i = 0; i < key_info->user_defined_key_parts; i++) { + KEY_PART_INFO *kp = &key_info->key_part[i]; + spec[i].column = m_table_map->getColumn(kp->fieldnr - 1); + if (!spec[i].column) ERR_RETURN(dict->getNdbError()); + if (kp->null_bit) { /* Nullable column. */ - spec[i].offset= offset + 1; // First byte is NULL flag - spec[i].nullbit_byte_offset= offset; - spec[i].nullbit_bit_in_byte= 0; - } - else - { + spec[i].offset = offset + 1; // First byte is NULL flag + spec[i].nullbit_byte_offset = offset; + spec[i].nullbit_bit_in_byte = 0; + } else { /* Not nullable column. */ - spec[i].offset= offset; - spec[i].nullbit_byte_offset= 0; - spec[i].nullbit_bit_in_byte= 0; + spec[i].offset = offset; + spec[i].nullbit_byte_offset = 0; + spec[i].nullbit_bit_in_byte = 0; } - offset+= kp->store_length; + offset += kp->store_length; } - if (m_index[index_no].index) - { + if (m_index[index_no].index) { /* Enable MysqldShrinkVarchar flag so that the two-byte length used by mysqld for short varchar keys is correctly converted into a one-byte length used by Ndb kernel. */ - rec= dict->createRecord(m_index[index_no].index, m_table, - spec, key_info->user_defined_key_parts, sizeof(spec[0]), - ( NdbDictionary::RecMysqldShrinkVarchar | - NdbDictionary::RecMysqldBitfield )); - if (! rec) - ERR_RETURN(dict->getNdbError()); - m_index[index_no].ndb_record_key= rec; - } - else - m_index[index_no].ndb_record_key= NULL; - - if (m_index[index_no].unique_index) - { - rec= dict->createRecord(m_index[index_no].unique_index, m_table, - spec, key_info->user_defined_key_parts, sizeof(spec[0]), - ( NdbDictionary::RecMysqldShrinkVarchar | - NdbDictionary::RecMysqldBitfield )); - if (! rec) - ERR_RETURN(dict->getNdbError()); - m_index[index_no].ndb_unique_record_key= rec; - } - else if (index_no == table_share->primary_key) - { + rec = dict->createRecord(m_index[index_no].index, m_table, spec, + key_info->user_defined_key_parts, sizeof(spec[0]), + (NdbDictionary::RecMysqldShrinkVarchar | + NdbDictionary::RecMysqldBitfield)); + if (!rec) ERR_RETURN(dict->getNdbError()); + m_index[index_no].ndb_record_key = rec; + } else + m_index[index_no].ndb_record_key = NULL; + + if (m_index[index_no].unique_index) { + rec = dict->createRecord(m_index[index_no].unique_index, m_table, spec, + key_info->user_defined_key_parts, sizeof(spec[0]), + (NdbDictionary::RecMysqldShrinkVarchar | + NdbDictionary::RecMysqldBitfield)); + if (!rec) ERR_RETURN(dict->getNdbError()); + m_index[index_no].ndb_unique_record_key = rec; + } else if (index_no == table_share->primary_key) { /* The primary key is special, there is no explicit NDB index associated. */ - rec= dict->createRecord(m_table, - spec, key_info->user_defined_key_parts, sizeof(spec[0]), - ( NdbDictionary::RecMysqldShrinkVarchar | - NdbDictionary::RecMysqldBitfield )); - if (! rec) - ERR_RETURN(dict->getNdbError()); - m_index[index_no].ndb_unique_record_key= rec; - } - else - m_index[index_no].ndb_unique_record_key= NULL; + rec = dict->createRecord(m_table, spec, key_info->user_defined_key_parts, + sizeof(spec[0]), + (NdbDictionary::RecMysqldShrinkVarchar | + NdbDictionary::RecMysqldBitfield)); + if (!rec) ERR_RETURN(dict->getNdbError()); + m_index[index_no].ndb_unique_record_key = rec; + } else + m_index[index_no].ndb_unique_record_key = NULL; /* Now do the same, but this time with offsets from Field, for row access. */ - for (uint i= 0; i < key_info->user_defined_key_parts; i++) - { - const KEY_PART_INFO *kp= &key_info->key_part[i]; + for (uint i = 0; i < key_info->user_defined_key_parts; i++) { + const KEY_PART_INFO *kp = &key_info->key_part[i]; - spec[i].offset= kp->offset; - if (kp->null_bit) - { + spec[i].offset = kp->offset; + if (kp->null_bit) { /* Nullable column. */ - spec[i].nullbit_byte_offset= kp->null_offset; - spec[i].nullbit_bit_in_byte= null_bit_mask_to_bit_number(kp->null_bit); - } - else - { + spec[i].nullbit_byte_offset = kp->null_offset; + spec[i].nullbit_bit_in_byte = null_bit_mask_to_bit_number(kp->null_bit); + } else { /* Not nullable column. */ - spec[i].nullbit_byte_offset= 0; - spec[i].nullbit_bit_in_byte= 0; + spec[i].nullbit_byte_offset = 0; + spec[i].nullbit_bit_in_byte = 0; } } - if (m_index[index_no].unique_index) - { - rec= dict->createRecord(m_index[index_no].unique_index, m_table, - spec, key_info->user_defined_key_parts, sizeof(spec[0]), - NdbDictionary::RecMysqldBitfield); - if (! rec) - ERR_RETURN(dict->getNdbError()); - m_index[index_no].ndb_unique_record_row= rec; - } - else if (index_no == table_share->primary_key) - { - rec= dict->createRecord(m_table, - spec, key_info->user_defined_key_parts, sizeof(spec[0]), - NdbDictionary::RecMysqldBitfield); - if (! rec) - ERR_RETURN(dict->getNdbError()); - m_index[index_no].ndb_unique_record_row= rec; - } - else - m_index[index_no].ndb_unique_record_row= NULL; + if (m_index[index_no].unique_index) { + rec = dict->createRecord(m_index[index_no].unique_index, m_table, spec, + key_info->user_defined_key_parts, sizeof(spec[0]), + NdbDictionary::RecMysqldBitfield); + if (!rec) ERR_RETURN(dict->getNdbError()); + m_index[index_no].ndb_unique_record_row = rec; + } else if (index_no == table_share->primary_key) { + rec = dict->createRecord(m_table, spec, key_info->user_defined_key_parts, + sizeof(spec[0]), NdbDictionary::RecMysqldBitfield); + if (!rec) ERR_RETURN(dict->getNdbError()); + m_index[index_no].ndb_unique_record_row = rec; + } else + m_index[index_no].ndb_unique_record_row = NULL; DBUG_RETURN(0); } @@ -2804,49 +2434,38 @@ ha_ndbcluster::add_index_ndb_record(NDBDICT *dict, KEY *key_info, uint index_no) /* Associate index handles for each index of a table */ -int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab) -{ - - NDBDICT *dict= ndb->getDictionary(); - KEY* key_info= tab->key_info; - const char **key_name= tab->s->keynames.type_names; +int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab) { + NDBDICT *dict = ndb->getDictionary(); + KEY *key_info = tab->key_info; + const char **key_name = tab->s->keynames.type_names; DBUG_ENTER("ha_ndbcluster::open_indexes"); - m_has_unique_index= false; + m_has_unique_index = false; - for (uint i= 0; i < tab->s->keys; i++, key_info++, key_name++) - { - const int error= add_index_handle(dict, key_info, *key_name, i); - if (error) - { + for (uint i = 0; i < tab->s->keys; i++, key_info++, key_name++) { + const int error = add_index_handle(dict, key_info, *key_name, i); + if (error) { DBUG_RETURN(error); } - m_index[i].null_in_unique_index= false; + m_index[i].null_in_unique_index = false; if (check_index_fields_not_null(key_info)) - m_index[i].null_in_unique_index= true; + m_index[i].null_in_unique_index = true; } DBUG_RETURN(0); } - - -void -ha_ndbcluster::release_indexes(NdbDictionary::Dictionary *dict, - int invalidate) -{ +void ha_ndbcluster::release_indexes(NdbDictionary::Dictionary *dict, + int invalidate) { DBUG_ENTER("ha_ndbcluster::release_indexes"); - for (uint i= 0; i < MAX_KEY; i++) - { - NDB_INDEX_DATA& index = m_index[i]; - if (index.unique_index) - { + for (uint i = 0; i < MAX_KEY; i++) { + NDB_INDEX_DATA &index = m_index[i]; + if (index.unique_index) { // Release reference to index in NdbAPI dict->removeIndexGlobal(*index.unique_index, invalidate); } - if (index.index) - { + if (index.index) { // Release reference to index in NdbAPI dict->removeIndexGlobal(*index.index, invalidate); } @@ -2855,22 +2474,19 @@ ha_ndbcluster::release_indexes(NdbDictionary::Dictionary *dict, DBUG_VOID_RETURN; } - /* Renumber indexes in index list by shifting out the index that was dropped */ -void ha_ndbcluster::inplace__renumber_indexes(uint dropped_index_num) -{ +void ha_ndbcluster::inplace__renumber_indexes(uint dropped_index_num) { DBUG_ENTER("ha_ndbcluster::inplace__renumber_indexes"); // Shift the dropped index out of list - for(uint i= dropped_index_num + 1; - i != MAX_KEY && m_index[i].status != NDB_INDEX_DATA::UNDEFINED; i++) - { - NDB_INDEX_DATA tmp= m_index[i - 1]; - m_index[i - 1]= m_index[i]; - m_index[i]= tmp; + for (uint i = dropped_index_num + 1; + i != MAX_KEY && m_index[i].status != NDB_INDEX_DATA::UNDEFINED; i++) { + NDB_INDEX_DATA tmp = m_index[i - 1]; + m_index[i - 1] = m_index[i]; + m_index[i] = tmp; } DBUG_VOID_RETURN; @@ -2879,55 +2495,43 @@ void ha_ndbcluster::inplace__renumber_indexes(uint dropped_index_num) /* Drop all indexes that are marked for deletion */ -int ha_ndbcluster::inplace__drop_indexes(Ndb *ndb, TABLE *tab) -{ - int error= 0; - KEY* key_info= tab->key_info; - NDBDICT *dict= ndb->getDictionary(); +int ha_ndbcluster::inplace__drop_indexes(Ndb *ndb, TABLE *tab) { + int error = 0; + KEY *key_info = tab->key_info; + NDBDICT *dict = ndb->getDictionary(); DBUG_ENTER("ha_ndbcluster::inplace__drop_indexes"); - - for (uint i= 0; i < tab->s->keys; i++, key_info++) - { - NDB_INDEX_TYPE idx_type= get_index_type_from_table(i); - m_index[i].type= idx_type; - if (m_index[i].status == NDB_INDEX_DATA::TO_BE_DROPPED) - { - const NdbDictionary::Index *index= m_index[i].index; - const NdbDictionary::Index *unique_index= m_index[i].unique_index; - if (unique_index) - { + for (uint i = 0; i < tab->s->keys; i++, key_info++) { + NDB_INDEX_TYPE idx_type = get_index_type_from_table(i); + m_index[i].type = idx_type; + if (m_index[i].status == NDB_INDEX_DATA::TO_BE_DROPPED) { + const NdbDictionary::Index *index = m_index[i].index; + const NdbDictionary::Index *unique_index = m_index[i].unique_index; + + if (unique_index) { DBUG_PRINT("info", ("Dropping unique index %u: %s", i, unique_index->getName())); // Drop unique index from ndb - if (dict->dropIndexGlobal(*unique_index) == 0) - { + if (dict->dropIndexGlobal(*unique_index) == 0) { dict->removeIndexGlobal(*unique_index, 1); - m_index[i].unique_index= NULL; - } - else - { - error= ndb_to_mysql_error(&dict->getNdbError()); - m_dupkey= i; // for HA_ERR_DROP_INDEX_FK + m_index[i].unique_index = NULL; + } else { + error = ndb_to_mysql_error(&dict->getNdbError()); + m_dupkey = i; // for HA_ERR_DROP_INDEX_FK } } - if (!error && index) - { + if (!error && index) { DBUG_PRINT("info", ("Dropping index %u: %s", i, index->getName())); // Drop ordered index from ndb - if (dict->dropIndexGlobal(*index) == 0) - { + if (dict->dropIndexGlobal(*index) == 0) { dict->removeIndexGlobal(*index, 1); - m_index[i].index= NULL; - } - else - { - error=ndb_to_mysql_error(&dict->getNdbError()); - m_dupkey= i; // for HA_ERR_DROP_INDEX_FK + m_index[i].index = NULL; + } else { + error = ndb_to_mysql_error(&dict->getNdbError()); + m_dupkey = i; // for HA_ERR_DROP_INDEX_FK } } - if (error) - { + if (error) { // Change the status back to active. since it was not dropped m_index[i].status = NDB_INDEX_DATA::ACTIVE; DBUG_RETURN(error); @@ -2938,155 +2542,124 @@ int ha_ndbcluster::inplace__drop_indexes(Ndb *ndb, TABLE *tab) ndb_clear_index(dict, m_index[tab->s->keys]); } } - + DBUG_RETURN(error); } /** - Decode the type of an index from information + Decode the type of an index from information provided in table object. */ -NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const -{ +NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const { return get_index_type_from_key(inx, table_share->key_info, inx == table_share->primary_key); } -NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_key(uint inx, - KEY *key_info, - bool primary) const -{ - bool is_hash_index= (key_info[inx].algorithm == - HA_KEY_ALG_HASH); +NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_key(uint inx, KEY *key_info, + bool primary) const { + bool is_hash_index = (key_info[inx].algorithm == HA_KEY_ALG_HASH); if (primary) return is_hash_index ? PRIMARY_KEY_INDEX : PRIMARY_KEY_ORDERED_INDEX; - - return ((key_info[inx].flags & HA_NOSAME) ? - (is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) : - ORDERED_INDEX); -} - -bool ha_ndbcluster::check_index_fields_not_null(KEY* key_info) const -{ - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; + + return ((key_info[inx].flags & HA_NOSAME) + ? (is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) + : ORDERED_INDEX); +} + +bool ha_ndbcluster::check_index_fields_not_null(KEY *key_info) const { + KEY_PART_INFO *key_part = key_info->key_part; + KEY_PART_INFO *end = key_part + key_info->user_defined_key_parts; DBUG_ENTER("ha_ndbcluster::check_index_fields_not_null"); - - for (; key_part != end; key_part++) - { - Field* field= key_part->field; - if (field->maybe_null()) - DBUG_RETURN(true); - } - + + for (; key_part != end; key_part++) { + Field *field = key_part->field; + if (field->maybe_null()) DBUG_RETURN(true); + } + DBUG_RETURN(false); } -void ha_ndbcluster::release_metadata(THD *thd, Ndb *ndb) -{ +void ha_ndbcluster::release_metadata(THD *thd, Ndb *ndb) { DBUG_ENTER("release_metadata"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); - if(m_table == NULL) - { + if (m_table == NULL) { DBUG_VOID_RETURN; // table already released } - NDBDICT *dict= ndb->getDictionary(); - int invalidate_indexes= 0; - if (thd && thd->lex && thd->lex->sql_command == SQLCOM_FLUSH) - { + NDBDICT *dict = ndb->getDictionary(); + int invalidate_indexes = 0; + if (thd && thd->lex && thd->lex->sql_command == SQLCOM_FLUSH) { invalidate_indexes = 1; } - if (m_ndb_record != NULL) - { + if (m_ndb_record != NULL) { dict->releaseRecord(m_ndb_record); - m_ndb_record= NULL; + m_ndb_record = NULL; } - if (m_ndb_hidden_key_record != NULL) - { + if (m_ndb_hidden_key_record != NULL) { dict->releaseRecord(m_ndb_hidden_key_record); - m_ndb_hidden_key_record= NULL; + m_ndb_hidden_key_record = NULL; } if (m_table->getObjectStatus() == NdbDictionary::Object::Invalid) - invalidate_indexes= 1; + invalidate_indexes = 1; dict->removeTableGlobal(*m_table, invalidate_indexes); - m_table_info= NULL; + m_table_info = NULL; release_indexes(dict, invalidate_indexes); // Release FK data release_fk_data(); - m_table= NULL; + m_table = NULL; DBUG_VOID_RETURN; } - /* Map from thr_lock_type to NdbOperation::LockMode */ -static inline -NdbOperation::LockMode get_ndb_lock_mode(enum thr_lock_type type) -{ - if (type >= TL_WRITE_ALLOW_WRITE) - return NdbOperation::LM_Exclusive; - if (type == TL_READ_WITH_SHARED_LOCKS) - return NdbOperation::LM_Read; +static inline NdbOperation::LockMode get_ndb_lock_mode( + enum thr_lock_type type) { + if (type >= TL_WRITE_ALLOW_WRITE) return NdbOperation::LM_Exclusive; + if (type == TL_READ_WITH_SHARED_LOCKS) return NdbOperation::LM_Read; return NdbOperation::LM_CommittedRead; } +static const ulong index_type_flags[] = { + /* UNDEFINED_INDEX */ + 0, -static const ulong index_type_flags[]= -{ - /* UNDEFINED_INDEX */ - 0, + /* PRIMARY_KEY_INDEX */ + HA_ONLY_WHOLE_INDEX, - /* PRIMARY_KEY_INDEX */ - HA_ONLY_WHOLE_INDEX, + /* PRIMARY_KEY_ORDERED_INDEX */ + /* + Enable HA_KEYREAD_ONLY when "sorted" indexes are supported, + thus ORDER BY clauses can be optimized by reading directly + through the index. + */ + // HA_KEYREAD_ONLY | + HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE | HA_READ_ORDER, - /* PRIMARY_KEY_ORDERED_INDEX */ - /* - Enable HA_KEYREAD_ONLY when "sorted" indexes are supported, - thus ORDER BY clauses can be optimized by reading directly - through the index. - */ - // HA_KEYREAD_ONLY | - HA_READ_NEXT | - HA_READ_PREV | - HA_READ_RANGE | - HA_READ_ORDER, - - /* UNIQUE_INDEX */ - HA_ONLY_WHOLE_INDEX, - - /* UNIQUE_ORDERED_INDEX */ - HA_READ_NEXT | - HA_READ_PREV | - HA_READ_RANGE | - HA_READ_ORDER, - - /* ORDERED_INDEX */ - HA_READ_NEXT | - HA_READ_PREV | - HA_READ_RANGE | - HA_READ_ORDER -}; + /* UNIQUE_INDEX */ + HA_ONLY_WHOLE_INDEX, -inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const -{ + /* UNIQUE_ORDERED_INDEX */ + HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE | HA_READ_ORDER, + + /* ORDERED_INDEX */ + HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE | HA_READ_ORDER}; + +inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const { DBUG_ASSERT(idx_no < MAX_KEY); return m_index[idx_no].type; } -inline bool ha_ndbcluster::has_null_in_unique_index(uint idx_no) const -{ +inline bool ha_ndbcluster::has_null_in_unique_index(uint idx_no) const { DBUG_ASSERT(idx_no < MAX_KEY); return m_index[idx_no].null_in_unique_index; } - /** Get the flags for an index. @@ -3094,8 +2667,7 @@ inline bool ha_ndbcluster::has_null_in_unique_index(uint idx_no) const flags depending on the type of the index. */ -inline ulong ha_ndbcluster::index_flags(uint idx_no, uint, bool) const -{ +inline ulong ha_ndbcluster::index_flags(uint idx_no, uint, bool) const { DBUG_ENTER("ha_ndbcluster::index_flags"); DBUG_PRINT("enter", ("idx_no: %u", idx_no)); const NDB_INDEX_TYPE index_type = get_index_type_from_table(idx_no); @@ -3103,12 +2675,8 @@ inline ulong ha_ndbcluster::index_flags(uint idx_no, uint, bool) const DBUG_RETURN(index_type_flags[index_type] | HA_KEY_SCAN_NOT_ROR); } -bool -ha_ndbcluster::primary_key_is_clustered() const -{ - - if (table->s->primary_key == MAX_KEY) - return false; +bool ha_ndbcluster::primary_key_is_clustered() const { + if (table->s->primary_key == MAX_KEY) return false; /* NOTE 1: our ordered indexes are not really clustered @@ -3120,25 +2688,21 @@ ha_ndbcluster::primary_key_is_clustered() const but that will need to be handled later... */ const NDB_INDEX_TYPE idx_type = - get_index_type_from_table(table->s->primary_key); + get_index_type_from_table(table->s->primary_key); return (idx_type == PRIMARY_KEY_ORDERED_INDEX || - idx_type == UNIQUE_ORDERED_INDEX || - idx_type == ORDERED_INDEX); + idx_type == UNIQUE_ORDERED_INDEX || idx_type == ORDERED_INDEX); } -bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno) -{ - KEY* key_info= table->key_info + keyno; - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; +bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno) { + KEY *key_info = table->key_info + keyno; + KEY_PART_INFO *key_part = key_info->key_part; + KEY_PART_INFO *end = key_part + key_info->user_defined_key_parts; uint i; DBUG_ENTER("check_index_fields_in_write_set"); - for (i= 0; key_part != end; key_part++, i++) - { - Field* field= key_part->field; - if (!bitmap_is_set(table->write_set, field->field_index)) - { + for (i = 0; key_part != end; key_part++, i++) { + Field *field = key_part->field; + if (!bitmap_is_set(table->write_set, field->field_index)) { DBUG_RETURN(false); } } @@ -3146,74 +2710,61 @@ bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno) DBUG_RETURN(true); } - /** Read one record from NDB using primary key. */ -int ha_ndbcluster::pk_read(const uchar *key, uchar *buf, uint32 *part_id) -{ - NdbConnection *trans= m_thd_ndb->trans; +int ha_ndbcluster::pk_read(const uchar *key, uchar *buf, uint32 *part_id) { + NdbConnection *trans = m_thd_ndb->trans; DBUG_ENTER("pk_read"); DBUG_ASSERT(trans); - NdbOperation::LockMode lm= get_ndb_lock_mode(m_lock.type); + NdbOperation::LockMode lm = get_ndb_lock_mode(m_lock.type); if (check_if_pushable(NdbQueryOperationDef::PrimaryKeyAccess, - table->s->primary_key)) - { + table->s->primary_key)) { // Is parent of pushed join DBUG_ASSERT(lm == NdbOperation::LM_CommittedRead); const int error = pk_unique_index_read_key_pushed(table->s->primary_key, key); - if (unlikely(error)) - { + if (unlikely(error)) { DBUG_RETURN(error); } - DBUG_ASSERT(m_active_query!=NULL); + DBUG_ASSERT(m_active_query != NULL); if (execute_no_commit_ie(m_thd_ndb, trans) != 0 || - m_active_query->getNdbError().code) + m_active_query->getNdbError().code) DBUG_RETURN(ndb_err(trans)); - int result= fetch_next_pushed(); - if (result == NdbQuery::NextResult_gotRow) - { - DBUG_ASSERT(pushed_cond == nullptr || const_cast(pushed_cond)->val_int()); + int result = fetch_next_pushed(); + if (result == NdbQuery::NextResult_gotRow) { + DBUG_ASSERT(pushed_cond == nullptr || + const_cast(pushed_cond)->val_int()); DBUG_RETURN(0); - } - else if (result == NdbQuery::NextResult_scanComplete) - { + } else if (result == NdbQuery::NextResult_scanComplete) { DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); - } - else - { + } else { DBUG_RETURN(ndb_err(trans)); } - } - else - { - if (m_pushed_join_operation == PUSHED_ROOT) - { + } else { + if (m_pushed_join_operation == PUSHED_ROOT) { m_thd_ndb->m_pushed_queries_dropped++; } const NdbOperation *op; - if (!(op= pk_unique_index_read_key(table->s->primary_key, key, buf, lm, - (m_user_defined_partitioning ? - part_id : - NULL)))) + if (!(op = pk_unique_index_read_key( + table->s->primary_key, key, buf, lm, + (m_user_defined_partitioning ? part_id : NULL)))) ERR_RETURN(trans->getNdbError()); - if (execute_no_commit_ie(m_thd_ndb, trans) != 0 || - op->getNdbError().code) + if (execute_no_commit_ie(m_thd_ndb, trans) != 0 || op->getNdbError().code) DBUG_RETURN(ndb_err(trans)); - if (unlikely(!m_cond.check_condition())) - { - DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); // False condition + if (unlikely(!m_cond.check_condition())) { + DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); // False condition } - DBUG_ASSERT(pushed_cond == nullptr || const_cast(pushed_cond)->val_int()); + DBUG_ASSERT(pushed_cond == nullptr || + const_cast(pushed_cond)->val_int()); DBUG_RETURN(0); } } @@ -3222,10 +2773,9 @@ int ha_ndbcluster::pk_read(const uchar *key, uchar *buf, uint32 *part_id) Update primary key or part id by doing delete insert. */ -int ha_ndbcluster::ndb_pk_update_row(THD *thd, - const uchar *old_data, uchar *new_data) -{ - NdbTransaction *trans= m_thd_ndb->trans; +int ha_ndbcluster::ndb_pk_update_row(THD *thd, const uchar *old_data, + uchar *new_data) { + NdbTransaction *trans = m_thd_ndb->trans; int error; DBUG_ENTER("ndb_pk_update_row"); DBUG_ASSERT(trans); @@ -3249,49 +2799,43 @@ int ha_ndbcluster::ndb_pk_update_row(THD *thd, #endif // Delete old row - error= ndb_delete_row(old_data, true); - if (error) - { + error = ndb_delete_row(old_data, true); + if (error) { DBUG_PRINT("info", ("delete failed")); DBUG_RETURN(error); } // Insert new row DBUG_PRINT("info", ("delete succeded")); - bool batched_update= (m_active_cursor != 0); + bool batched_update = (m_active_cursor != 0); /* If we are updating a primary key with auto_increment then we need to update the auto_increment counter */ if (table->found_next_number_field && - bitmap_is_set(table->write_set, + bitmap_is_set(table->write_set, table->found_next_number_field->field_index) && - (error= set_auto_inc(thd, table->found_next_number_field))) - { + (error = set_auto_inc(thd, table->found_next_number_field))) { DBUG_RETURN(error); } /* - We are mapping a MySQLD PK changing update to an NdbApi delete + We are mapping a MySQLD PK changing update to an NdbApi delete and insert. The original PK changing update may not have written new values to all columns, so the write set may be partial. We set the write set to be all columns so that all values are copied from the old row to the new row. */ - my_bitmap_map *old_map= - tmp_use_all_columns(table, table->write_set); - error= ndb_write_row(new_data, true, batched_update); + my_bitmap_map *old_map = tmp_use_all_columns(table, table->write_set); + error = ndb_write_row(new_data, true, batched_update); tmp_restore_column_map(table->write_set, old_map); - if (error) - { + if (error) { DBUG_PRINT("info", ("insert failed")); - if (trans->commitStatus() == NdbConnection::Started) - { - if (thd->slave_thread) - g_ndb_slave_state.atTransactionAbort(); - m_thd_ndb->m_unsent_bytes= 0; + if (trans->commitStatus() == NdbConnection::Started) { + if (thd->slave_thread) g_ndb_slave_state.atTransactionAbort(); + m_thd_ndb->m_unsent_bytes = 0; m_thd_ndb->m_execute_count++; DBUG_PRINT("info", ("execute_count: %u", m_thd_ndb->m_execute_count)); trans->execute(NdbTransaction::Rollback); @@ -3312,47 +2856,35 @@ int ha_ndbcluster::ndb_pk_update_row(THD *thd, bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans, const NdbOperation *first, const NdbOperation *last, - uint errcode) -{ - const NdbOperation *op= first; + uint errcode) { + const NdbOperation *op = first; DBUG_ENTER("ha_ndbcluster::check_all_operations_for_error"); - while(op) - { - NdbError err= op->getNdbError(); - if (err.status != NdbError::Success) - { - if (ndb_to_mysql_error(&err) != (int) errcode) - DBUG_RETURN(false); + while (op) { + NdbError err = op->getNdbError(); + if (err.status != NdbError::Success) { + if (ndb_to_mysql_error(&err) != (int)errcode) DBUG_RETURN(false); if (op == last) break; - op= trans->getNextCompletedOperation(op); - } - else - { + op = trans->getNextCompletedOperation(op); + } else { // We found a duplicate - if (op->getType() == NdbOperation::UniqueIndexAccess) - { - if (errcode == HA_ERR_KEY_NOT_FOUND) - { - const NdbIndexOperation *iop= down_cast(op); - const NDBINDEX *index= iop->getIndex(); + if (op->getType() == NdbOperation::UniqueIndexAccess) { + if (errcode == HA_ERR_KEY_NOT_FOUND) { + const NdbIndexOperation *iop = + down_cast(op); + const NDBINDEX *index = iop->getIndex(); // Find the key_no of the index - for(uint i= 0; is->keys; i++) - { - if (m_index[i].unique_index == index) - { - m_dupkey= i; + for (uint i = 0; i < table->s->keys; i++) { + if (m_index[i].unique_index == index) { + m_dupkey = i; break; } } } - } - else - { + } else { // Must have been primary key access DBUG_ASSERT(op->getType() == NdbOperation::PrimaryKeyAccess); - if (errcode == HA_ERR_KEY_NOT_FOUND) - m_dupkey= table->s->primary_key; + if (errcode == HA_ERR_KEY_NOT_FOUND) m_dupkey = table->s->primary_key; } DBUG_RETURN(false); } @@ -3360,20 +2892,15 @@ bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans, DBUG_RETURN(true); } - /** * Check if record contains any null valued columns that are part of a key */ -static -int -check_null_in_record(const KEY* key_info, const uchar *record) -{ +static int check_null_in_record(const KEY *key_info, const uchar *record) { KEY_PART_INFO *curr_part, *end_part; - curr_part= key_info->key_part; - end_part= curr_part + key_info->user_defined_key_parts; + curr_part = key_info->key_part; + end_part = curr_part + key_info->user_defined_key_parts; - while (curr_part != end_part) - { + while (curr_part != end_part) { if (curr_part->null_bit && (record[curr_part->null_offset] & curr_part->null_bit)) return 1; @@ -3390,7 +2917,7 @@ check_null_in_record(const KEY* key_info, const uchar *record) /* Empty mask and dummy row, for reading no attributes using NdbRecord. */ /* Mask will be initialized to all zeros by linker. */ -static unsigned char empty_mask[(NDB_MAX_ATTRIBUTES_IN_TABLE+7)/8]; +static unsigned char empty_mask[(NDB_MAX_ATTRIBUTES_IN_TABLE + 7) / 8]; static char dummy_row[1]; /** @@ -3398,211 +2925,174 @@ static char dummy_row[1]; primary key or unique index values */ -int ha_ndbcluster::peek_indexed_rows(const uchar *record, - NDB_WRITE_OP write_op) -{ +int ha_ndbcluster::peek_indexed_rows(const uchar *record, + NDB_WRITE_OP write_op) { NdbTransaction *trans; const NdbOperation *op; const NdbOperation *first, *last; NdbOperation::OperationOptions options; - NdbOperation::OperationOptions *poptions=NULL; + NdbOperation::OperationOptions *poptions = NULL; options.optionsPresent = 0; uint i; int error; DBUG_ENTER("peek_indexed_rows"); - if (unlikely(!(trans= get_transaction(error)))) - { + if (unlikely(!(trans = get_transaction(error)))) { DBUG_RETURN(error); } const NdbOperation::LockMode lm = get_ndb_lock_mode(m_lock.type); - first= NULL; - if (write_op != NDB_UPDATE && table->s->primary_key != MAX_KEY) - { + first = NULL; + if (write_op != NDB_UPDATE && table->s->primary_key != MAX_KEY) { /* * Fetch any row with colliding primary key */ - const NdbRecord *key_rec= - m_index[table->s->primary_key].ndb_unique_record_row; + const NdbRecord *key_rec = + m_index[table->s->primary_key].ndb_unique_record_row; - if (m_user_defined_partitioning) - { + if (m_user_defined_partitioning) { uint32 part_id; int error; longlong func_value; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); - error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value); + my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->read_set); + error = m_part_info->get_partition_id(m_part_info, &part_id, &func_value); dbug_tmp_restore_column_map(table->read_set, old_map); - if (error) - { - m_part_info->err_value= func_value; + if (error) { + m_part_info->err_value = func_value; DBUG_RETURN(error); } options.optionsPresent |= NdbOperation::OperationOptions::OO_PARTITION_ID; - options.partitionId=part_id; - poptions=&options; - } - - if (!(op= trans->readTuple(key_rec, (const char *)record, - m_ndb_record, dummy_row, lm, empty_mask, - poptions, - sizeof(NdbOperation::OperationOptions)))) + options.partitionId = part_id; + poptions = &options; + } + + if (!(op = trans->readTuple(key_rec, (const char *)record, m_ndb_record, + dummy_row, lm, empty_mask, poptions, + sizeof(NdbOperation::OperationOptions)))) ERR_RETURN(trans->getNdbError()); - - first= op; + + first = op; } /* * Fetch any rows with colliding unique indexes */ - KEY* key_info; - for (i= 0, key_info= table->key_info; i < table->s->keys; i++, key_info++) - { - if (i != table_share->primary_key && - key_info->flags & HA_NOSAME && - bitmap_is_overlapping(table->write_set, m_key_fields[i])) - { + KEY *key_info; + for (i = 0, key_info = table->key_info; i < table->s->keys; i++, key_info++) { + if (i != table_share->primary_key && key_info->flags & HA_NOSAME && + bitmap_is_overlapping(table->write_set, m_key_fields[i])) { /* A unique index is defined on table and it's being updated We cannot look up a NULL field value in a unique index. But since keys with NULLs are not indexed, such rows cannot conflict anyway, so we just skip the index in this case. */ - if (check_null_in_record(key_info, record)) - { + if (check_null_in_record(key_info, record)) { DBUG_PRINT("info", ("skipping check for key with NULL")); continue; } - if (write_op != NDB_INSERT && !check_index_fields_in_write_set(i)) - { + if (write_op != NDB_INSERT && !check_index_fields_in_write_set(i)) { DBUG_PRINT("info", ("skipping check for key %u not in write_set", i)); continue; } const NdbOperation *iop; - const NdbRecord *key_rec= m_index[i].ndb_unique_record_row; - if (!(iop= trans->readTuple(key_rec, (const char *)record, - m_ndb_record, dummy_row, - lm, empty_mask))) + const NdbRecord *key_rec = m_index[i].ndb_unique_record_row; + if (!(iop = trans->readTuple(key_rec, (const char *)record, m_ndb_record, + dummy_row, lm, empty_mask))) ERR_RETURN(trans->getNdbError()); - if (!first) - first= iop; + if (!first) first = iop; } } - last= trans->getLastDefinedOperation(); - if (first) - { + last = trans->getLastDefinedOperation(); + if (first) { (void)execute_no_commit_ie(m_thd_ndb, trans); - } - else - { + } else { // Table has no keys DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); } - const NdbError ndberr= trans->getNdbError(); - error= ndberr.mysql_code; + const NdbError ndberr = trans->getNdbError(); + error = ndberr.mysql_code; if ((error != 0 && error != HA_ERR_KEY_NOT_FOUND) || - check_all_operations_for_error(trans, first, last, - HA_ERR_KEY_NOT_FOUND)) - { + check_all_operations_for_error(trans, first, last, + HA_ERR_KEY_NOT_FOUND)) { DBUG_RETURN(ndb_err(trans)); - } - else - { + } else { DBUG_PRINT("info", ("m_dupkey %d", m_dupkey)); } DBUG_RETURN(0); } - /** Read one record from NDB using unique secondary index. */ -int ha_ndbcluster::unique_index_read(const uchar *key, uchar *buf) -{ - NdbTransaction *trans= m_thd_ndb->trans; - NdbOperation::LockMode lm= get_ndb_lock_mode(m_lock.type); +int ha_ndbcluster::unique_index_read(const uchar *key, uchar *buf) { + NdbTransaction *trans = m_thd_ndb->trans; + NdbOperation::LockMode lm = get_ndb_lock_mode(m_lock.type); DBUG_ENTER("ha_ndbcluster::unique_index_read"); DBUG_PRINT("enter", ("index: %u, lm: %u", active_index, (unsigned int)lm)); DBUG_ASSERT(trans); - if (check_if_pushable(NdbQueryOperationDef::UniqueIndexAccess, - active_index)) - { + active_index)) { DBUG_ASSERT(lm == NdbOperation::LM_CommittedRead); - const int error= pk_unique_index_read_key_pushed(active_index, key); - if (unlikely(error)) - { + const int error = pk_unique_index_read_key_pushed(active_index, key); + if (unlikely(error)) { DBUG_RETURN(error); } - DBUG_ASSERT(m_active_query!=NULL); + DBUG_ASSERT(m_active_query != NULL); if (execute_no_commit_ie(m_thd_ndb, trans) != 0 || - m_active_query->getNdbError().code) + m_active_query->getNdbError().code) DBUG_RETURN(ndb_err(trans)); - int result= fetch_next_pushed(); - if (result == NdbQuery::NextResult_gotRow) - { - DBUG_ASSERT(pushed_cond == nullptr || const_cast(pushed_cond)->val_int()); + int result = fetch_next_pushed(); + if (result == NdbQuery::NextResult_gotRow) { + DBUG_ASSERT(pushed_cond == nullptr || + const_cast(pushed_cond)->val_int()); DBUG_RETURN(0); - } - else if (result == NdbQuery::NextResult_scanComplete) - { + } else if (result == NdbQuery::NextResult_scanComplete) { DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); - } - else - { + } else { DBUG_RETURN(ndb_err(trans)); } - } - else - { - if (m_pushed_join_operation == PUSHED_ROOT) - { + } else { + if (m_pushed_join_operation == PUSHED_ROOT) { m_thd_ndb->m_pushed_queries_dropped++; } const NdbOperation *op; - if (!(op= pk_unique_index_read_key(active_index, key, buf, lm, NULL))) + if (!(op = pk_unique_index_read_key(active_index, key, buf, lm, NULL))) ERR_RETURN(trans->getNdbError()); - - if (execute_no_commit_ie(m_thd_ndb, trans) != 0 || - op->getNdbError().code) - { + + if (execute_no_commit_ie(m_thd_ndb, trans) != 0 || op->getNdbError().code) { DBUG_RETURN(ndb_err(trans)); } - if (unlikely(!m_cond.check_condition())) - { + if (unlikely(!m_cond.check_condition())) { DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); } - DBUG_ASSERT(pushed_cond == nullptr || const_cast(pushed_cond)->val_int()); + DBUG_ASSERT(pushed_cond == nullptr || + const_cast(pushed_cond)->val_int()); DBUG_RETURN(0); } } -int -ha_ndbcluster::scan_handle_lock_tuple(NdbScanOperation *scanOp, - NdbTransaction *trans) -{ +int ha_ndbcluster::scan_handle_lock_tuple(NdbScanOperation *scanOp, + NdbTransaction *trans) { DBUG_ENTER("ha_ndbcluster::scan_handle_lock_tuple"); - if (m_lock_tuple) - { + if (m_lock_tuple) { /* Lock level m_lock.type either TL_WRITE_ALLOW_WRITE (SELECT FOR UPDATE) or TL_READ_WITH_SHARED_LOCKS (SELECT - LOCK WITH SHARE MODE) and row was not explictly unlocked + LOCK WITH SHARE MODE) and row was not explictly unlocked with unlock_row() call */ DBUG_PRINT("info", ("Keeping lock on scanned row")); - - if (!(scanOp->lockCurrentTuple(trans, m_ndb_record, - dummy_row, empty_mask))) - { - m_lock_tuple= false; + + if (!(scanOp->lockCurrentTuple(trans, m_ndb_record, dummy_row, + empty_mask))) { + m_lock_tuple = false; ERR_RETURN(trans->getNdbError()); } @@ -3614,85 +3104,71 @@ ha_ndbcluster::scan_handle_lock_tuple(NdbScanOperation *scanOp, * issue updateCurrentTuple with AnyValue explicitly set */ if ((m_lock.type >= TL_WRITE_ALLOW_WRITE) && - THDVAR(current_thd, log_exclusive_reads)) - { - if (scan_log_exclusive_read(scanOp, trans)) - { - m_lock_tuple= false; + THDVAR(current_thd, log_exclusive_reads)) { + if (scan_log_exclusive_read(scanOp, trans)) { + m_lock_tuple = false; ERR_RETURN(trans->getNdbError()); } } - m_thd_ndb->m_unsent_bytes+=12; - m_lock_tuple= false; + m_thd_ndb->m_unsent_bytes += 12; + m_lock_tuple = false; } DBUG_RETURN(0); } -inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) -{ +inline int ha_ndbcluster::fetch_next(NdbScanOperation *cursor) { DBUG_ENTER("fetch_next"); int local_check; int error; - NdbTransaction *trans= m_thd_ndb->trans; - + NdbTransaction *trans = m_thd_ndb->trans; + DBUG_ASSERT(trans); - if ((error= scan_handle_lock_tuple(cursor, trans)) != 0) - DBUG_RETURN(error); - - bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE && - m_lock.type != TL_READ_WITH_SHARED_LOCKS; + if ((error = scan_handle_lock_tuple(cursor, trans)) != 0) DBUG_RETURN(error); + + bool contact_ndb = m_lock.type < TL_WRITE_ALLOW_WRITE && + m_lock.type != TL_READ_WITH_SHARED_LOCKS; do { DBUG_PRINT("info", ("Call nextResult, contact_ndb: %d", contact_ndb)); /* We can only handle one tuple with blobs at a time. */ - if (m_thd_ndb->m_unsent_bytes && m_blobs_pending) - { + if (m_thd_ndb->m_unsent_bytes && m_blobs_pending) { if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0) DBUG_RETURN(ndb_err(trans)); } - + /* Should be no unexamined completed operations nextResult() on Blobs generates Blob part read ops, so we will free them here */ release_completed_operations(trans); - - if ((local_check= cursor->nextResult(&_m_next_row, - contact_ndb, - m_thd_ndb->m_force_send)) == 0) - { + + if ((local_check = cursor->nextResult(&_m_next_row, contact_ndb, + m_thd_ndb->m_force_send)) == 0) { /* - Explicitly lock tuple if "select for update" or - "select lock in share mode" + Explicitly lock tuple if "select for update" or + "select lock in share mode" */ - m_lock_tuple= (m_lock.type == TL_WRITE_ALLOW_WRITE - || - m_lock.type == TL_READ_WITH_SHARED_LOCKS); + m_lock_tuple = (m_lock.type == TL_WRITE_ALLOW_WRITE || + m_lock.type == TL_READ_WITH_SHARED_LOCKS); DBUG_RETURN(0); - } - else if (local_check == 1 || local_check == 2) - { + } else if (local_check == 1 || local_check == 2) { // 1: No more records // 2: No more cached records - + /* Before fetching more rows and releasing lock(s), - all pending update or delete operations should + all pending update or delete operations should be sent to NDB */ DBUG_PRINT("info", ("thd_ndb->m_unsent_bytes: %ld", - (long) m_thd_ndb->m_unsent_bytes)); - if (m_thd_ndb->m_unsent_bytes) - { - if ((error = flush_bulk_insert()) != 0) - DBUG_RETURN(error); + (long)m_thd_ndb->m_unsent_bytes)); + if (m_thd_ndb->m_unsent_bytes) { + if ((error = flush_bulk_insert()) != 0) DBUG_RETURN(error); } - contact_ndb= (local_check == 2); - } - else - { + contact_ndb = (local_check == 2); + } else { DBUG_RETURN(ndb_err(trans)); } } while (local_check == 2); @@ -3700,8 +3176,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) DBUG_RETURN(1); } -int ha_ndbcluster::fetch_next_pushed() -{ +int ha_ndbcluster::fetch_next_pushed() { DBUG_ENTER("fetch_next_pushed (from pushed operation)"); DBUG_ASSERT(m_pushed_operation); @@ -3711,114 +3186,102 @@ int ha_ndbcluster::fetch_next_pushed() * ::index_next_pushed() which unpack and set correct status for each row. */ NdbQuery::NextResultOutcome result; - while ((result= m_pushed_operation->nextResult(true, m_thd_ndb->m_force_send)) - == NdbQuery::NextResult_gotRow) - { - DBUG_ASSERT(m_next_row!=NULL); - DBUG_PRINT("info", ("One more record found")); - const int ignore = unpack_record_and_set_generated_fields(table->record[0], - m_next_row); -// m_thd_ndb->m_pushed_reads++; - if (likely(!ignore)) - { + while ((result = m_pushed_operation->nextResult( + true, m_thd_ndb->m_force_send)) == NdbQuery::NextResult_gotRow) { + DBUG_ASSERT(m_next_row != NULL); + DBUG_PRINT("info", ("One more record found")); + const int ignore = + unpack_record_and_set_generated_fields(table->record[0], m_next_row); + // m_thd_ndb->m_pushed_reads++; + if (likely(!ignore)) { DBUG_RETURN(NdbQuery::NextResult_gotRow); } } - if (likely(result == NdbQuery::NextResult_scanComplete)) - { - DBUG_ASSERT(m_next_row==NULL); + if (likely(result == NdbQuery::NextResult_scanComplete)) { + DBUG_ASSERT(m_next_row == NULL); DBUG_PRINT("info", ("No more records")); -// m_thd_ndb->m_pushed_reads++; + // m_thd_ndb->m_pushed_reads++; DBUG_RETURN(result); } DBUG_PRINT("info", ("Error from 'nextResult()'")); DBUG_RETURN(ndb_err(m_thd_ndb->trans)); } - /** - Get the first record from an indexed table access being a child + Get the first record from an indexed table access being a child operation in a pushed join. Fetch will be from prefetched - cached records which are materialized into the bound buffer - areas as result of this call. + cached records which are materialized into the bound buffer + areas as result of this call. */ -int -ha_ndbcluster::index_read_pushed(uchar *buf, const uchar *key, - key_part_map keypart_map) -{ +int ha_ndbcluster::index_read_pushed(uchar *buf, const uchar *key, + key_part_map keypart_map) { DBUG_ENTER("index_read_pushed"); - // Handler might have decided to not execute the pushed joins which has been prepared - // In this case we do an unpushed index_read based on 'Plain old' NdbOperations - if (unlikely(!check_is_pushed())) - { - int res= index_read_map(buf, key, keypart_map, HA_READ_KEY_EXACT); + // Handler might have decided to not execute the pushed joins which has been + // prepared In this case we do an unpushed index_read based on 'Plain old' + // NdbOperations + if (unlikely(!check_is_pushed())) { + int res = index_read_map(buf, key, keypart_map, HA_READ_KEY_EXACT); DBUG_RETURN(res); } - DBUG_ASSERT(m_pushed_join_operation>PUSHED_ROOT); // Child of a pushed join - DBUG_ASSERT(m_active_query==nullptr); + DBUG_ASSERT(m_pushed_join_operation > PUSHED_ROOT); // Child of a pushed join + DBUG_ASSERT(m_active_query == nullptr); - // Might need to re-establish first result row (wrt. its parents which may have been navigated) - NdbQuery::NextResultOutcome result= m_pushed_operation->firstResult(); + // Might need to re-establish first result row (wrt. its parents which may + // have been navigated) + NdbQuery::NextResultOutcome result = m_pushed_operation->firstResult(); // Result from pushed operation will be referred by 'm_next_row' if non-NULL - if (result == NdbQuery::NextResult_gotRow) - { - DBUG_ASSERT(m_next_row!=NULL); + if (result == NdbQuery::NextResult_gotRow) { + DBUG_ASSERT(m_next_row != NULL); const int ignore = unpack_record_and_set_generated_fields(buf, m_next_row); m_thd_ndb->m_pushed_reads++; - if (unlikely(ignore)) - { + if (unlikely(ignore)) { DBUG_RETURN(index_next_pushed(buf)); } DBUG_RETURN(0); } - DBUG_ASSERT(result!=NdbQuery::NextResult_gotRow); + DBUG_ASSERT(result != NdbQuery::NextResult_gotRow); DBUG_PRINT("info", ("No record found")); DBUG_RETURN(HA_ERR_END_OF_FILE); } - /** - Get the next record from an indexes table access being a child + Get the next record from an indexes table access being a child operation in a pushed join. Fetch will be from prefetched - cached records which are materialized into the bound buffer - areas as result of this call. + cached records which are materialized into the bound buffer + areas as result of this call. */ -int ha_ndbcluster::index_next_pushed(uchar *buf) -{ +int ha_ndbcluster::index_next_pushed(uchar *buf) { DBUG_ENTER("index_next_pushed"); - // Handler might have decided to not execute the pushed joins which has been prepared - // In this case we do an unpushed index_read based on 'Plain old' NdbOperations - if (unlikely(!check_is_pushed())) - { - int res= index_next(buf); + // Handler might have decided to not execute the pushed joins which has been + // prepared In this case we do an unpushed index_read based on 'Plain old' + // NdbOperations + if (unlikely(!check_is_pushed())) { + int res = index_next(buf); DBUG_RETURN(res); } - DBUG_ASSERT(m_pushed_join_operation>PUSHED_ROOT); // Child of a pushed join - DBUG_ASSERT(m_active_query==nullptr); + DBUG_ASSERT(m_pushed_join_operation > PUSHED_ROOT); // Child of a pushed join + DBUG_ASSERT(m_active_query == nullptr); int res = fetch_next_pushed(); - if (res == NdbQuery::NextResult_gotRow) - { - DBUG_ASSERT(pushed_cond == nullptr || const_cast(pushed_cond)->val_int()); + if (res == NdbQuery::NextResult_gotRow) { + DBUG_ASSERT(pushed_cond == nullptr || + const_cast(pushed_cond)->val_int()); DBUG_RETURN(0); - } - else if (res == NdbQuery::NextResult_scanComplete) - { + } else if (res == NdbQuery::NextResult_scanComplete) { DBUG_RETURN(HA_ERR_END_OF_FILE); } DBUG_RETURN(ndb_err(m_thd_ndb->trans)); } - /** Get the next record of a started scan. Try to fetch - it locally from NdbApi cached records if possible, + it locally from NdbApi cached records if possible, otherwise ask NDB for more. @note @@ -3826,43 +3289,35 @@ int ha_ndbcluster::index_next_pushed(uchar *buf) NDB before any pending ops have been sent to NDB. */ -inline int ha_ndbcluster::next_result(uchar *buf) -{ +inline int ha_ndbcluster::next_result(uchar *buf) { int res; DBUG_ENTER("next_result"); - - if (m_active_cursor) - { - while ((res= fetch_next(m_active_cursor)) == 0) - { - DBUG_PRINT("info", ("One more record found")); + + if (m_active_cursor) { + while ((res = fetch_next(m_active_cursor)) == 0) { + DBUG_PRINT("info", ("One more record found")); const int ignore = unpack_record(buf, m_next_row); - if (likely(!ignore)) - { - DBUG_ASSERT(pushed_cond == nullptr || const_cast(pushed_cond)->val_int()); - DBUG_RETURN(0); //Found a row + if (likely(!ignore)) { + DBUG_ASSERT(pushed_cond == nullptr || + const_cast(pushed_cond)->val_int()); + DBUG_RETURN(0); // Found a row } } // No rows found, or error - if (res == 1) - { + if (res == 1) { // No more records DBUG_PRINT("info", ("No more records")); DBUG_RETURN(HA_ERR_END_OF_FILE); } DBUG_RETURN(ndb_err(m_thd_ndb->trans)); - } - else if (m_active_query) - { - res= fetch_next_pushed(); - if (res == NdbQuery::NextResult_gotRow) - { - DBUG_ASSERT(pushed_cond == nullptr || const_cast(pushed_cond)->val_int()); - DBUG_RETURN(0); //Found a row - } - else if (res == NdbQuery::NextResult_scanComplete) - { + } else if (m_active_query) { + res = fetch_next_pushed(); + if (res == NdbQuery::NextResult_gotRow) { + DBUG_ASSERT(pushed_cond == nullptr || + const_cast(pushed_cond)->val_int()); + DBUG_RETURN(0); // Found a row + } else if (res == NdbQuery::NextResult_scanComplete) { DBUG_RETURN(HA_ERR_END_OF_FILE); } DBUG_RETURN(ndb_err(m_thd_ndb->trans)); @@ -3870,49 +3325,40 @@ inline int ha_ndbcluster::next_result(uchar *buf) DBUG_RETURN(HA_ERR_END_OF_FILE); } -int -ha_ndbcluster::log_exclusive_read(const NdbRecord *key_rec, - const uchar *key, - uchar *buf, - Uint32 *ppartition_id) -{ +int ha_ndbcluster::log_exclusive_read(const NdbRecord *key_rec, + const uchar *key, uchar *buf, + Uint32 *ppartition_id) { DBUG_ENTER("log_exclusive_read"); NdbOperation::OperationOptions opts; - opts.optionsPresent= - NdbOperation::OperationOptions::OO_ABORTOPTION | - NdbOperation::OperationOptions::OO_ANYVALUE; - + opts.optionsPresent = NdbOperation::OperationOptions::OO_ABORTOPTION | + NdbOperation::OperationOptions::OO_ANYVALUE; + /* If the key does not exist, that is ok */ - opts.abortOption= NdbOperation::AO_IgnoreError; - - /* + opts.abortOption = NdbOperation::AO_IgnoreError; + + /* Mark the AnyValue as a read operation, so that the update is processed */ - opts.anyValue= 0; + opts.anyValue = 0; ndbcluster_anyvalue_set_read_op(opts.anyValue); - if (ppartition_id != NULL) - { + if (ppartition_id != NULL) { assert(m_user_defined_partitioning); - opts.optionsPresent|= NdbOperation::OperationOptions::OO_PARTITION_ID; - opts.partitionId= *ppartition_id; - } - - const NdbOperation* markingOp= - m_thd_ndb->trans->updateTuple(key_rec, - (const char*) key, - m_ndb_record, - (char*)buf, - empty_mask, - &opts, - opts.size()); - if (!markingOp) - { + opts.optionsPresent |= NdbOperation::OperationOptions::OO_PARTITION_ID; + opts.partitionId = *ppartition_id; + } + + const NdbOperation *markingOp = m_thd_ndb->trans->updateTuple( + key_rec, (const char *)key, m_ndb_record, (char *)buf, empty_mask, &opts, + opts.size()); + if (!markingOp) { char msg[FN_REFLEN]; - snprintf(msg, sizeof(msg), "Error logging exclusive reads, failed creating markingOp, %u, %s\n", - m_thd_ndb->trans->getNdbError().code, - m_thd_ndb->trans->getNdbError().message); + snprintf( + msg, sizeof(msg), + "Error logging exclusive reads, failed creating markingOp, %u, %s\n", + m_thd_ndb->trans->getNdbError().code, + m_thd_ndb->trans->getNdbError().message); push_warning_printf(current_thd, Sql_condition::SL_WARNING, ER_EXCEPTIONS_WRITE_ERROR, ER_THD(current_thd, ER_EXCEPTIONS_WRITE_ERROR), msg); @@ -3926,32 +3372,29 @@ ha_ndbcluster::log_exclusive_read(const NdbRecord *key_rec, DBUG_RETURN(0); } -int -ha_ndbcluster::scan_log_exclusive_read(NdbScanOperation *cursor, - NdbTransaction *trans) -{ +int ha_ndbcluster::scan_log_exclusive_read(NdbScanOperation *cursor, + NdbTransaction *trans) { DBUG_ENTER("ha_ndbcluster::scan_log_exclusive_read"); NdbOperation::OperationOptions opts; - opts.optionsPresent= NdbOperation::OperationOptions::OO_ANYVALUE; + opts.optionsPresent = NdbOperation::OperationOptions::OO_ANYVALUE; - /* + /* Mark the AnyValue as a read operation, so that the update is processed */ - opts.anyValue= 0; + opts.anyValue = 0; ndbcluster_anyvalue_set_read_op(opts.anyValue); - - const NdbOperation* markingOp= - cursor->updateCurrentTuple(trans, m_ndb_record, - dummy_row, empty_mask, - &opts, - sizeof(NdbOperation::OperationOptions)); - if (markingOp == NULL) - { + + const NdbOperation *markingOp = + cursor->updateCurrentTuple(trans, m_ndb_record, dummy_row, empty_mask, + &opts, sizeof(NdbOperation::OperationOptions)); + if (markingOp == NULL) { char msg[FN_REFLEN]; - snprintf(msg, sizeof(msg), "Error logging exclusive reads during scan, failed creating markingOp, %u, %s\n", - m_thd_ndb->trans->getNdbError().code, - m_thd_ndb->trans->getNdbError().message); + snprintf(msg, sizeof(msg), + "Error logging exclusive reads during scan, failed creating " + "markingOp, %u, %s\n", + m_thd_ndb->trans->getNdbError().code, + m_thd_ndb->trans->getNdbError().message); push_warning_printf(current_thd, Sql_condition::SL_WARNING, ER_EXCEPTIONS_WRITE_ERROR, ER_THD(current_thd, ER_EXCEPTIONS_WRITE_ERROR), msg); @@ -3965,47 +3408,42 @@ ha_ndbcluster::scan_log_exclusive_read(NdbScanOperation *cursor, Do a primary key or unique key index read operation. The key value is taken from a buffer in mysqld key format. */ -const NdbOperation * -ha_ndbcluster::pk_unique_index_read_key(uint idx, const uchar *key, uchar *buf, - NdbOperation::LockMode lm, - Uint32 *ppartition_id) -{ +const NdbOperation *ha_ndbcluster::pk_unique_index_read_key( + uint idx, const uchar *key, uchar *buf, NdbOperation::LockMode lm, + Uint32 *ppartition_id) { DBUG_ENTER("pk_unique_index_read_key"); const NdbOperation *op; const NdbRecord *key_rec; NdbOperation::OperationOptions options; NdbOperation::OperationOptions *poptions = NULL; - options.optionsPresent= 0; + options.optionsPresent = 0; NdbOperation::GetValueSpec gets[2]; - const NDB_INDEX_TYPE idx_type= - (idx != MAX_KEY)? - get_index_type(idx) - : UNDEFINED_INDEX; + const NDB_INDEX_TYPE idx_type = + (idx != MAX_KEY) ? get_index_type(idx) : UNDEFINED_INDEX; DBUG_ASSERT(m_thd_ndb->trans); - DBUG_PRINT("info", ("pk_unique_index_read_key of table %s", table->s->table_name.str)); + DBUG_PRINT("info", ("pk_unique_index_read_key of table %s", + table->s->table_name.str)); if (idx != MAX_KEY) - key_rec= m_index[idx].ndb_unique_record_key; + key_rec = m_index[idx].ndb_unique_record_key; else - key_rec= m_ndb_hidden_key_record; + key_rec = m_ndb_hidden_key_record; /* Initialize the null bitmap, setting unused null bits to 1. */ memset(buf, 0xff, table->s->null_bytes); - if (table_share->primary_key == MAX_KEY) - { + if (table_share->primary_key == MAX_KEY) { get_hidden_fields_keyop(&options, gets); - poptions= &options; + poptions = &options; } - if (ppartition_id != NULL) - { + if (ppartition_id != NULL) { assert(m_user_defined_partitioning); - options.optionsPresent|= NdbOperation::OperationOptions::OO_PARTITION_ID; - options.partitionId= *ppartition_id; - poptions= &options; + options.optionsPresent |= NdbOperation::OperationOptions::OO_PARTITION_ID; + options.partitionId = *ppartition_id; + poptions = &options; } /* @@ -4016,11 +3454,10 @@ ha_ndbcluster::pk_unique_index_read_key(uint idx, const uchar *key, uchar *buf, m_cond.set_condition(pushed_cond); get_read_set(false, idx); - op= m_thd_ndb->trans->readTuple(key_rec, (const char *)key, m_ndb_record, - (char *)buf, lm, - m_table_map->get_column_mask(table->read_set), - poptions, - sizeof(NdbOperation::OperationOptions)); + op = m_thd_ndb->trans->readTuple( + key_rec, (const char *)key, m_ndb_record, (char *)buf, lm, + m_table_map->get_column_mask(table->read_set), poptions, + sizeof(NdbOperation::OperationOptions)); if (uses_blob_value(table->read_set) && get_blob_values(op, buf, table->read_set) != 0) @@ -4040,14 +3477,12 @@ ha_ndbcluster::pk_unique_index_read_key(uint idx, const uchar *key, uchar *buf, (idx_type == PRIMARY_KEY_INDEX || idx_type == PRIMARY_KEY_ORDERED_INDEX || idx_type == UNIQUE_ORDERED_INDEX || - idx_type == UNIQUE_INDEX) + idx_type == UNIQUE_INDEX) since this method is only invoked for primary or unique indexes, but we do need to check if it was a hidden primary key. */ - idx_type != UNDEFINED_INDEX && - THDVAR(current_thd, log_exclusive_reads)) - { + idx_type != UNDEFINED_INDEX && THDVAR(current_thd, log_exclusive_reads)) { if (log_exclusive_read(key_rec, key, buf, ppartition_id) != 0) DBUG_RETURN(NULL); } @@ -4055,38 +3490,30 @@ ha_ndbcluster::pk_unique_index_read_key(uint idx, const uchar *key, uchar *buf, DBUG_RETURN(op); } - -static -bool -is_shrinked_varchar(const Field *field) -{ - if (field->real_type() == MYSQL_TYPE_VARCHAR) - { - if (down_cast(field)->length_bytes == 1) +static bool is_shrinked_varchar(const Field *field) { + if (field->real_type() == MYSQL_TYPE_VARCHAR) { + if (down_cast(field)->length_bytes == 1) return true; } return false; } -int -ha_ndbcluster::pk_unique_index_read_key_pushed(uint idx, const uchar *key) -{ +int ha_ndbcluster::pk_unique_index_read_key_pushed(uint idx, const uchar *key) { DBUG_ENTER("pk_unique_index_read_key_pushed"); DBUG_ASSERT(m_thd_ndb->trans); DBUG_ASSERT(idx < MAX_KEY); - if (m_active_query) - { + if (m_active_query) { m_active_query->close(false); - m_active_query= NULL; + m_active_query = NULL; } - KEY *key_def= &table->key_info[idx]; + KEY *key_def = &table->key_info[idx]; KEY_PART_INFO *key_part; uint i; - Uint32 offset= 0; + Uint32 offset = 0; NdbQueryParamValue paramValues[ndb_pushed_join::MAX_KEY_PART]; DBUG_ASSERT(key_def->user_defined_key_parts <= ndb_pushed_join::MAX_KEY_PART); @@ -4094,54 +3521,47 @@ ha_ndbcluster::pk_unique_index_read_key_pushed(uint idx, const uchar *key) ndbcluster_build_key_map(m_table, m_index[idx], &table->key_info[idx], map); // Bind key values defining root of pushed join - for (i = 0, key_part= key_def->key_part; i < key_def->user_defined_key_parts; i++, key_part++) - { - bool shrinkVarChar= is_shrinked_varchar(key_part->field); + for (i = 0, key_part = key_def->key_part; i < key_def->user_defined_key_parts; + i++, key_part++) { + bool shrinkVarChar = is_shrinked_varchar(key_part->field); - if (key_part->null_bit) // Column is nullable + if (key_part->null_bit) // Column is nullable { - DBUG_ASSERT(idx != table_share->primary_key); // PK can't be nullable - DBUG_ASSERT(*(key+offset)==0); // Null values not allowed in key - // Value is imm. after NULL indicator - paramValues[map[i]]= NdbQueryParamValue(key+offset+1,shrinkVarChar); - } - else // Non-nullable column + DBUG_ASSERT(idx != table_share->primary_key); // PK can't be nullable + DBUG_ASSERT(*(key + offset) == 0); // Null values not allowed in key + // Value is imm. after NULL indicator + paramValues[map[i]] = NdbQueryParamValue(key + offset + 1, shrinkVarChar); + } else // Non-nullable column { - paramValues[map[i]]= NdbQueryParamValue(key+offset,shrinkVarChar); + paramValues[map[i]] = NdbQueryParamValue(key + offset, shrinkVarChar); } - offset+= key_part->store_length; + offset += key_part->store_length; } - const int ret= create_pushed_join(paramValues, key_def->user_defined_key_parts); + const int ret = + create_pushed_join(paramValues, key_def->user_defined_key_parts); DBUG_RETURN(ret); } - /** Count number of columns in key part. */ -static uint -count_key_columns(const KEY *key_info, const key_range *key) -{ - KEY_PART_INFO *first_key_part= key_info->key_part; - KEY_PART_INFO *key_part_end= first_key_part + key_info->user_defined_key_parts; +static uint count_key_columns(const KEY *key_info, const key_range *key) { + KEY_PART_INFO *first_key_part = key_info->key_part; + KEY_PART_INFO *key_part_end = + first_key_part + key_info->user_defined_key_parts; KEY_PART_INFO *key_part; - uint length= 0; - for(key_part= first_key_part; key_part < key_part_end; key_part++) - { - if (length >= key->length) - break; - length+= key_part->store_length; + uint length = 0; + for (key_part = first_key_part; key_part < key_part_end; key_part++) { + if (length >= key->length) break; + length += key_part->store_length; } return (uint)(key_part - first_key_part); } /* Helper method to compute NDB index bounds. Note: does not set range_no. */ /* Stats queries may differ so add "from" 0:normal 1:RIR 2:RPK. */ -void -compute_index_bounds(NdbIndexScanOperation::IndexBound & bound, - const KEY *key_info, - const key_range *start_key, const key_range *end_key, - int from) -{ +void compute_index_bounds(NdbIndexScanOperation::IndexBound &bound, + const KEY *key_info, const key_range *start_key, + const key_range *end_key, int from) { DBUG_ENTER("ha_ndbcluster::compute_index_bounds"); DBUG_PRINT("info", ("from: %d", from)); @@ -4149,78 +3569,64 @@ compute_index_bounds(NdbIndexScanOperation::IndexBound & bound, DBUG_PRINT("info", ("key parts: %u length: %u", key_info->user_defined_key_parts, key_info->key_length)); { - for (uint j= 0; j <= 1; j++) - { - const key_range* kr= (j == 0 ? start_key : end_key); - if (kr) - { - DBUG_PRINT("info", ("key range %u: length: %u map: %lx flag: %d", - j, kr->length, kr->keypart_map, kr->flag)); + for (uint j = 0; j <= 1; j++) { + const key_range *kr = (j == 0 ? start_key : end_key); + if (kr) { + DBUG_PRINT("info", ("key range %u: length: %u map: %lx flag: %d", j, + kr->length, kr->keypart_map, kr->flag)); DBUG_DUMP("key", kr->key, kr->length); - } - else - { + } else { DBUG_PRINT("info", ("key range %u: none", j)); } } } #endif - if (start_key) - { - bound.low_key= (const char*)start_key->key; - bound.low_key_count= count_key_columns(key_info, start_key); - bound.low_inclusive= - start_key->flag != HA_READ_AFTER_KEY && - start_key->flag != HA_READ_BEFORE_KEY; - } - else - { - bound.low_key= NULL; - bound.low_key_count= 0; + if (start_key) { + bound.low_key = (const char *)start_key->key; + bound.low_key_count = count_key_columns(key_info, start_key); + bound.low_inclusive = start_key->flag != HA_READ_AFTER_KEY && + start_key->flag != HA_READ_BEFORE_KEY; + } else { + bound.low_key = NULL; + bound.low_key_count = 0; } /* RIR query for x >= 1 inexplicably passes HA_READ_KEY_EXACT. */ if (start_key && (start_key->flag == HA_READ_KEY_EXACT || start_key->flag == HA_READ_PREFIX_LAST) && - from != 1) - { - bound.high_key= bound.low_key; - bound.high_key_count= bound.low_key_count; - bound.high_inclusive= true; - } - else if (end_key) - { - bound.high_key= (const char*)end_key->key; - bound.high_key_count= count_key_columns(key_info, end_key); + from != 1) { + bound.high_key = bound.low_key; + bound.high_key_count = bound.low_key_count; + bound.high_inclusive = true; + } else if (end_key) { + bound.high_key = (const char *)end_key->key; + bound.high_key_count = count_key_columns(key_info, end_key); /* For some reason, 'where b >= 1 and b <= 3' uses HA_READ_AFTER_KEY for the end_key. So HA_READ_AFTER_KEY in end_key sets high_inclusive, even though in start_key it does not set low_inclusive. */ - bound.high_inclusive= end_key->flag != HA_READ_BEFORE_KEY; + bound.high_inclusive = end_key->flag != HA_READ_BEFORE_KEY; if (end_key->flag == HA_READ_KEY_EXACT || - end_key->flag == HA_READ_PREFIX_LAST) - { - bound.low_key= bound.high_key; - bound.low_key_count= bound.high_key_count; - bound.low_inclusive= true; - } - } - else - { - bound.high_key= NULL; - bound.high_key_count= 0; - } - DBUG_PRINT("info", ("start_flag=%d end_flag=%d" - " lo_keys=%d lo_incl=%d hi_keys=%d hi_incl=%d", - start_key?start_key->flag:0, end_key?end_key->flag:0, - bound.low_key_count, - bound.low_key_count?bound.low_inclusive:0, - bound.high_key_count, - bound.high_key_count?bound.high_inclusive:0)); + end_key->flag == HA_READ_PREFIX_LAST) { + bound.low_key = bound.high_key; + bound.low_key_count = bound.high_key_count; + bound.low_inclusive = true; + } + } else { + bound.high_key = NULL; + bound.high_key_count = 0; + } + DBUG_PRINT( + "info", + ("start_flag=%d end_flag=%d" + " lo_keys=%d lo_incl=%d hi_keys=%d hi_incl=%d", + start_key ? start_key->flag : 0, end_key ? end_key->flag : 0, + bound.low_key_count, bound.low_key_count ? bound.low_inclusive : 0, + bound.high_key_count, bound.high_key_count ? bound.high_inclusive : 0)); DBUG_VOID_RETURN; } @@ -4229,66 +3635,58 @@ compute_index_bounds(NdbIndexScanOperation::IndexBound & bound, */ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, - const key_range *end_key, - bool sorted, bool descending, - uchar* buf, part_id_range *part_spec) -{ + const key_range *end_key, bool sorted, + bool descending, uchar *buf, + part_id_range *part_spec) { NdbTransaction *trans; NdbIndexScanOperation *op; int error; DBUG_ENTER("ha_ndbcluster::ordered_index_scan"); - DBUG_PRINT("enter", ("index: %u, sorted: %d, descending: %d read_set=0x%x", - active_index, sorted, descending, table->read_set->bitmap[0])); + DBUG_PRINT("enter", + ("index: %u, sorted: %d, descending: %d read_set=0x%x", + active_index, sorted, descending, table->read_set->bitmap[0])); DBUG_PRINT("enter", ("Starting new ordered scan on %s", m_tabname)); - if (unlikely(!(trans= get_transaction(error)))) - { + if (unlikely(!(trans = get_transaction(error)))) { DBUG_RETURN(error); } - if ((error= close_scan())) - DBUG_RETURN(error); + if ((error = close_scan())) DBUG_RETURN(error); const NdbOperation::LockMode lm = get_ndb_lock_mode(m_lock.type); - const NdbRecord *key_rec= m_index[active_index].ndb_record_key; - const NdbRecord *row_rec= m_ndb_record; + const NdbRecord *key_rec = m_index[active_index].ndb_record_key; + const NdbRecord *row_rec = m_ndb_record; NdbIndexScanOperation::IndexBound bound; NdbIndexScanOperation::IndexBound *pbound = NULL; - if (start_key != NULL || end_key != NULL) - { - /* + if (start_key != NULL || end_key != NULL) { + /* Compute bounds info, reversing range boundaries if descending */ - compute_index_bounds(bound, - table->key_info + active_index, - (descending? - end_key : start_key), - (descending? - start_key : end_key), - 0); + compute_index_bounds(bound, table->key_info + active_index, + (descending ? end_key : start_key), + (descending ? start_key : end_key), 0); bound.range_no = 0; pbound = &bound; } - if (check_if_pushable(NdbQueryOperationDef::OrderedIndexScan, active_index)) - { - const int error= create_pushed_join(); - if (unlikely(error)) - DBUG_RETURN(error); + if (check_if_pushable(NdbQueryOperationDef::OrderedIndexScan, active_index)) { + const int error = create_pushed_join(); + if (unlikely(error)) DBUG_RETURN(error); - NdbQuery* const query= m_active_query; - if (sorted && query->getQueryOperation((uint)PUSHED_ROOT) - ->setOrdering(descending ? NdbQueryOptions::ScanOrdering_descending - : NdbQueryOptions::ScanOrdering_ascending)) - { + NdbQuery *const query = m_active_query; + if (sorted && + query->getQueryOperation((uint)PUSHED_ROOT) + ->setOrdering(descending + ? NdbQueryOptions::ScanOrdering_descending + : NdbQueryOptions::ScanOrdering_ascending)) { ERR_RETURN(query->getNdbError()); } - if (pbound && query->setBound(key_rec, pbound)!=0) + if (pbound && query->setBound(key_rec, pbound) != 0) ERR_RETURN(query->getNdbError()); m_thd_ndb->m_scan_count++; @@ -4296,40 +3694,33 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, bool prunable = false; if (unlikely(query->isPrunable(prunable) != 0)) ERR_RETURN(query->getNdbError()); - if (prunable) - m_thd_ndb->m_pruned_scan_count++; + if (prunable) m_thd_ndb->m_pruned_scan_count++; // Can't have BLOB in pushed joins (yet) DBUG_ASSERT(!uses_blob_value(table->read_set)); - } - else - { - if (m_pushed_join_operation == PUSHED_ROOT) - { + } else { + if (m_pushed_join_operation == PUSHED_ROOT) { m_thd_ndb->m_pushed_queries_dropped++; } NdbScanOperation::ScanOptions options; - options.optionsPresent=NdbScanOperation::ScanOptions::SO_SCANFLAGS; - options.scan_flags=0; + options.optionsPresent = NdbScanOperation::ScanOptions::SO_SCANFLAGS; + options.scan_flags = 0; NdbOperation::GetValueSpec gets[2]; if (table_share->primary_key == MAX_KEY) get_hidden_fields_scan(&options, gets); if (lm == NdbOperation::LM_Read) - options.scan_flags|= NdbScanOperation::SF_KeyInfo; - if (sorted) - options.scan_flags|= NdbScanOperation::SF_OrderByFull; - if (descending) - options.scan_flags|= NdbScanOperation::SF_Descending; + options.scan_flags |= NdbScanOperation::SF_KeyInfo; + if (sorted) options.scan_flags |= NdbScanOperation::SF_OrderByFull; + if (descending) options.scan_flags |= NdbScanOperation::SF_Descending; /* Partition pruning */ - if (m_use_partition_pruning && - m_user_defined_partitioning && part_spec != NULL && - part_spec->start_part == part_spec->end_part) - { - /* Explicitly set partition id when pruning User-defined partitioned scan */ + if (m_use_partition_pruning && m_user_defined_partitioning && + part_spec != NULL && part_spec->start_part == part_spec->end_part) { + /* Explicitly set partition id when pruning User-defined partitioned scan + */ options.partitionId = part_spec->start_part; options.optionsPresent |= NdbScanOperation::ScanOptions::SO_PARTITION_ID; } @@ -4338,53 +3729,45 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, generate_scan_filter(&code, &options); get_read_set(true, active_index); - if (!(op= trans->scanIndex(key_rec, row_rec, lm, - m_table_map->get_column_mask(table->read_set), - pbound, - &options, - sizeof(NdbScanOperation::ScanOptions)))) + if (!(op = trans->scanIndex(key_rec, row_rec, lm, + m_table_map->get_column_mask(table->read_set), + pbound, &options, + sizeof(NdbScanOperation::ScanOptions)))) ERR_RETURN(trans->getNdbError()); - DBUG_PRINT("info", ("Is scan pruned to 1 partition? : %u", op->getPruned())); + DBUG_PRINT("info", + ("Is scan pruned to 1 partition? : %u", op->getPruned())); m_thd_ndb->m_scan_count++; - m_thd_ndb->m_pruned_scan_count += (op->getPruned()? 1 : 0); + m_thd_ndb->m_pruned_scan_count += (op->getPruned() ? 1 : 0); if (uses_blob_value(table->read_set) && get_blob_values(op, NULL, table->read_set) != 0) ERR_RETURN(op->getNdbError()); - m_active_cursor= op; + m_active_cursor = op; } - if (sorted) - { + if (sorted) { m_thd_ndb->m_sorted_scan_count++; } if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0) DBUG_RETURN(ndb_err(trans)); - + DBUG_RETURN(next_result(buf)); } -static -int -guess_scan_flags(NdbOperation::LockMode lm, Ndb_table_map * table_map, - const NDBTAB* tab, const MY_BITMAP* readset) -{ - int flags= 0; - flags|= (lm == NdbOperation::LM_Read) ? NdbScanOperation::SF_KeyInfo : 0; - if (tab->checkColumns(0, 0) & 2) - { - const Uint32 * colmap = (const Uint32 *) table_map->get_column_mask(readset); +static int guess_scan_flags(NdbOperation::LockMode lm, Ndb_table_map *table_map, + const NDBTAB *tab, const MY_BITMAP *readset) { + int flags = 0; + flags |= (lm == NdbOperation::LM_Read) ? NdbScanOperation::SF_KeyInfo : 0; + if (tab->checkColumns(0, 0) & 2) { + const Uint32 *colmap = (const Uint32 *)table_map->get_column_mask(readset); int ret = tab->checkColumns(colmap, no_bytes_in_map(readset)); - - if (ret & 2) - { // If disk columns...use disk scan + + if (ret & 2) { // If disk columns...use disk scan flags |= NdbScanOperation::SF_DiskScan; - } - else if ((ret & 4) == 0 && (lm == NdbOperation::LM_Exclusive)) - { + } else if ((ret & 4) == 0 && (lm == NdbOperation::LM_Exclusive)) { // If no mem column is set and exclusive...guess disk scan flags |= NdbScanOperation::SF_DiskScan; } @@ -4396,25 +3779,22 @@ guess_scan_flags(NdbOperation::LockMode lm, Ndb_table_map * table_map, Start full table scan in NDB or unique index scan */ -int ha_ndbcluster::full_table_scan(const KEY* key_info, +int ha_ndbcluster::full_table_scan(const KEY *key_info, const key_range *start_key, - const key_range *end_key, - uchar *buf) -{ + const key_range *end_key, uchar *buf) { int error; - NdbTransaction *trans= m_thd_ndb->trans; + NdbTransaction *trans = m_thd_ndb->trans; part_id_range part_spec; - bool use_set_part_id= false; + bool use_set_part_id = false; NdbOperation::GetValueSpec gets[2]; - DBUG_ENTER("full_table_scan"); + DBUG_ENTER("full_table_scan"); DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname)); - if (m_use_partition_pruning && m_user_defined_partitioning) - { + if (m_use_partition_pruning && m_user_defined_partitioning) { DBUG_ASSERT(m_pushed_join_operation != PUSHED_ROOT); - part_spec.start_part= 0; - part_spec.end_part= m_part_info->get_tot_partitions() - 1; + part_spec.start_part = 0; + part_spec.end_part = m_part_info->get_tot_partitions() - 1; prune_partition_set(table, &part_spec); DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u", part_spec.start_part, part_spec.end_part)); @@ -4422,139 +3802,120 @@ int ha_ndbcluster::full_table_scan(const KEY* key_info, If partition pruning has found no partition in set we can return HA_ERR_END_OF_FILE */ - if (part_spec.start_part > part_spec.end_part) - { + if (part_spec.start_part > part_spec.end_part) { DBUG_RETURN(HA_ERR_END_OF_FILE); } - if (part_spec.start_part == part_spec.end_part) - { + if (part_spec.start_part == part_spec.end_part) { /* * Only one partition is required to scan, if sorted is required * don't need it anymore since output from one ordered partitioned * index is always sorted. * - * Note : This table scan pruning currently only occurs for + * Note : This table scan pruning currently only occurs for * UserDefined partitioned tables. * It could be extended to occur for natively partitioned tables if * the Partitioning layer can make a key (e.g. start or end key) - * available so that we can determine the correct pruning in the + * available so that we can determine the correct pruning in the * NDBAPI layer. */ - use_set_part_id= true; + use_set_part_id = true; if (!trans) - if (unlikely(!(trans= get_transaction_part_id(part_spec.start_part, - error)))) + if (unlikely(!( + trans = get_transaction_part_id(part_spec.start_part, error)))) DBUG_RETURN(error); } } if (!trans) - if (unlikely(!(trans= start_transaction(error)))) - DBUG_RETURN(error); + if (unlikely(!(trans = start_transaction(error)))) DBUG_RETURN(error); const NdbOperation::LockMode lm = get_ndb_lock_mode(m_lock.type); NdbScanOperation::ScanOptions options; options.optionsPresent = (NdbScanOperation::ScanOptions::SO_SCANFLAGS | NdbScanOperation::ScanOptions::SO_PARALLEL); - options.scan_flags = guess_scan_flags(lm, m_table_map, m_table, table->read_set); - options.parallel= DEFAULT_PARALLELISM; + options.scan_flags = + guess_scan_flags(lm, m_table_map, m_table, table->read_set); + options.parallel = DEFAULT_PARALLELISM; if (use_set_part_id) { assert(m_user_defined_partitioning); - options.optionsPresent|= NdbScanOperation::ScanOptions::SO_PARTITION_ID; + options.optionsPresent |= NdbScanOperation::ScanOptions::SO_PARTITION_ID; options.partitionId = part_spec.start_part; }; if (table_share->primary_key == MAX_KEY) get_hidden_fields_scan(&options, gets); - if (check_if_pushable(NdbQueryOperationDef::TableScan)) - { - const int error= create_pushed_join(); - if (unlikely(error)) - DBUG_RETURN(error); + if (check_if_pushable(NdbQueryOperationDef::TableScan)) { + const int error = create_pushed_join(); + if (unlikely(error)) DBUG_RETURN(error); m_thd_ndb->m_scan_count++; // Can't have BLOB in pushed joins (yet) DBUG_ASSERT(!uses_blob_value(table->read_set)); - } - else - { - if (m_pushed_join_operation == PUSHED_ROOT) - { + } else { + if (m_pushed_join_operation == PUSHED_ROOT) { m_thd_ndb->m_pushed_queries_dropped++; } NdbScanOperation *op; NdbInterpretedCode code(m_table); - if (!key_info) - { + if (!key_info) { generate_scan_filter(&code, &options); - } - else - { + } else { /* Unique index scan in NDB (full table scan with scan filter) */ DBUG_PRINT("info", ("Starting unique index scan")); - if (generate_scan_filter_with_key(&code, &options, key_info, - start_key, end_key)) + if (generate_scan_filter_with_key(&code, &options, key_info, start_key, + end_key)) ERR_RETURN(code.getNdbError()); } get_read_set(true, MAX_KEY); - if (!(op= trans->scanTable(m_ndb_record, lm, - m_table_map->get_column_mask(table->read_set), - &options, sizeof(NdbScanOperation::ScanOptions)))) + if (!(op = trans->scanTable( + m_ndb_record, lm, m_table_map->get_column_mask(table->read_set), + &options, sizeof(NdbScanOperation::ScanOptions)))) ERR_RETURN(trans->getNdbError()); m_thd_ndb->m_scan_count++; - m_thd_ndb->m_pruned_scan_count += (op->getPruned()? 1 : 0); + m_thd_ndb->m_pruned_scan_count += (op->getPruned() ? 1 : 0); - DBUG_ASSERT(m_active_cursor==NULL); - m_active_cursor= op; + DBUG_ASSERT(m_active_cursor == NULL); + m_active_cursor = op; if (uses_blob_value(table->read_set) && get_blob_values(op, NULL, table->read_set) != 0) ERR_RETURN(op->getNdbError()); - } // if (check_if_pushable(NdbQueryOperationDef::TableScan)) - + } // if (check_if_pushable(NdbQueryOperationDef::TableScan)) + if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_PRINT("exit", ("Scan started successfully")); DBUG_RETURN(next_result(buf)); -} // ha_ndbcluster::full_table_scan() +} // ha_ndbcluster::full_table_scan() -int -ha_ndbcluster::set_auto_inc(THD *thd, Field *field) -{ +int ha_ndbcluster::set_auto_inc(THD *thd, Field *field) { DBUG_ENTER("ha_ndbcluster::set_auto_inc"); - bool read_bit= bitmap_is_set(table->read_set, field->field_index); + bool read_bit = bitmap_is_set(table->read_set, field->field_index); bitmap_set_bit(table->read_set, field->field_index); - Uint64 next_val= (Uint64) field->val_int() + 1; - if (!read_bit) - bitmap_clear_bit(table->read_set, field->field_index); + Uint64 next_val = (Uint64)field->val_int() + 1; + if (!read_bit) bitmap_clear_bit(table->read_set, field->field_index); DBUG_RETURN(set_auto_inc_val(thd, next_val)); } -inline -int -ha_ndbcluster::set_auto_inc_val(THD *thd, Uint64 value) -{ - Ndb *ndb= get_ndb(thd); +inline int ha_ndbcluster::set_auto_inc_val(THD *thd, Uint64 value) { + Ndb *ndb = get_ndb(thd); DBUG_ENTER("ha_ndbcluster::set_auto_inc_val"); #ifndef DBUG_OFF char buff[22]; - DBUG_PRINT("info", - ("Trying to set next auto increment value to %s", - llstr(value, buff))); + DBUG_PRINT("info", ("Trying to set next auto increment value to %s", + llstr(value, buff))); #endif { NDB_SHARE::Tuple_id_range_guard g(m_share); - if (ndb->checkUpdateAutoIncrementValue(g.range, value)) - { - if (ndb->setAutoIncrementValue(m_table, g.range, value, true) == -1) - { + if (ndb->checkUpdateAutoIncrementValue(g.range, value)) { + if (ndb->setAutoIncrementValue(m_table, g.range, value, true) == -1) { ERR_RETURN(ndb->getNdbError()); } } @@ -4562,17 +3923,13 @@ ha_ndbcluster::set_auto_inc_val(THD *thd, Uint64 value) DBUG_RETURN(0); } +void ha_ndbcluster::get_read_set(bool use_cursor, + uint idx MY_ATTRIBUTE((unused))) { + const bool is_delete = table->in_use->lex->sql_command == SQLCOM_DELETE || + table->in_use->lex->sql_command == SQLCOM_DELETE_MULTI; -void -ha_ndbcluster::get_read_set(bool use_cursor, uint idx MY_ATTRIBUTE((unused))) -{ - const bool is_delete= - table->in_use->lex->sql_command == SQLCOM_DELETE || - table->in_use->lex->sql_command == SQLCOM_DELETE_MULTI; - - const bool is_update= - table->in_use->lex->sql_command == SQLCOM_UPDATE || - table->in_use->lex->sql_command == SQLCOM_UPDATE_MULTI; + const bool is_update = table->in_use->lex->sql_command == SQLCOM_UPDATE || + table->in_use->lex->sql_command == SQLCOM_UPDATE_MULTI; /** * Any fields referred from an unpushed condition is not guaranteed to @@ -4591,44 +3948,39 @@ ha_ndbcluster::get_read_set(bool use_cursor, uint idx MY_ATTRIBUTE((unused))) m_cond.add_read_set(table, pushed_cond); #endif - if (!is_delete && !is_update) - { + if (!is_delete && !is_update) { return; } - DBUG_ASSERT(use_cursor || - idx == MAX_KEY || - idx == table_share->primary_key || + DBUG_ASSERT(use_cursor || idx == MAX_KEY || idx == table_share->primary_key || table->key_info[idx].flags & HA_NOSAME); /** * It is questionable that we in some cases seems to * do a read even if 'm_read_before_write_removal_used'. - * The usage pattern for this seems to be update/delete + * The usage pattern for this seems to be update/delete * cursors which establish a 'current of' position before * a delete- / updateCurrentTuple(). * Anyway, as 'm_read_before_write_removal_used' we don't * have to add more columns to 'read_set'. * - * FUTURE: Investigate if we could have completely + * FUTURE: Investigate if we could have completely * cleared the 'read_set'. * */ - if (m_read_before_write_removal_used) - { + if (m_read_before_write_removal_used) { return; } /** * If (part of) a primary key is updated, it is executed * as a delete+reinsert. In order to avoid extra read-round trips - * to fetch missing columns required by reinsert: + * to fetch missing columns required by reinsert: * Ensure all columns not being modified (in write_set) * are read prior to ::ndb_pk_update_row(). * All PK columns are also required by ::ndb_delete_row() */ - if (bitmap_is_overlapping(table->write_set, m_pk_bitmap_p)) - { + if (bitmap_is_overlapping(table->write_set, m_pk_bitmap_p)) { DBUG_ASSERT(table_share->primary_key != MAX_KEY); bitmap_set_all(&m_bitmap); bitmap_subtract(&m_bitmap, table->write_set); @@ -4637,7 +3989,7 @@ ha_ndbcluster::get_read_set(bool use_cursor, uint idx MY_ATTRIBUTE((unused))) } /** - * Determine whether we have to read PK columns in + * Determine whether we have to read PK columns in * addition to those columns already present in read_set. * NOTE: As checked above, It is a precondition that * a read is required as part of delete/update @@ -4651,20 +4003,19 @@ ha_ndbcluster::get_read_set(bool use_cursor, uint idx MY_ATTRIBUTE((unused))) * In addition, when a 'current of' position is available: * 2) When deleting a row containing BLOBs PK is required * to delete BLOB stored in seperate fragments. - * 3) When updating BLOB columns PK is required to delete + * 3) When updating BLOB columns PK is required to delete * old BLOB + insert new BLOB contents */ - else - if (!use_cursor || // 1) - (is_delete && table_share->blob_fields) || // 2) - uses_blob_value(table->write_set)) // 3) + else if (!use_cursor || // 1) + (is_delete && table_share->blob_fields) || // 2) + uses_blob_value(table->write_set)) // 3) { bitmap_union(table->read_set, m_pk_bitmap_p); } /** * If update/delete use partition pruning, we need - * to read the column values which being part of the + * to read the column values which being part of the * partition spec as they are used by * ::get_parts_for_update() / ::get_parts_for_delete() * Part. columns are always part of PK, so we only @@ -4672,15 +4023,14 @@ ha_ndbcluster::get_read_set(bool use_cursor, uint idx MY_ATTRIBUTE((unused))) */ else if (m_use_partition_pruning) // && m_user_defined_partitioning) { - DBUG_ASSERT(bitmap_is_subset(&m_part_info->full_part_field_set, - m_pk_bitmap_p)); + DBUG_ASSERT( + bitmap_is_subset(&m_part_info->full_part_field_set, m_pk_bitmap_p)); bitmap_union(table->read_set, &m_part_info->full_part_field_set); } - /** * Update might cause PK or Unique key violation. - * Error reporting need values from the offending + * Error reporting need values from the offending * unique columns to have been read: * * NOTE: This is NOT required for the correctness @@ -4689,77 +4039,65 @@ ha_ndbcluster::get_read_set(bool use_cursor, uint idx MY_ATTRIBUTE((unused))) * defering reading of the column values * until formating the error message. */ - if (is_update && m_has_unique_index) - { - for (uint i= 0; i < table_share->keys; i++) - { + if (is_update && m_has_unique_index) { + for (uint i = 0; i < table_share->keys; i++) { if ((table->key_info[i].flags & HA_NOSAME) && - bitmap_is_overlapping(table->write_set, m_key_fields[i])) - { + bitmap_is_overlapping(table->write_set, m_key_fields[i])) { bitmap_union(table->read_set, m_key_fields[i]); } } } } - -Uint32 -ha_ndbcluster::setup_get_hidden_fields(NdbOperation::GetValueSpec gets[2]) -{ - Uint32 num_gets= 0; +Uint32 ha_ndbcluster::setup_get_hidden_fields( + NdbOperation::GetValueSpec gets[2]) { + Uint32 num_gets = 0; /* We need to read the hidden primary key, and possibly the FRAGMENT pseudo-column. */ - gets[num_gets].column= get_hidden_key_column(); - gets[num_gets].appStorage= &m_ref; + gets[num_gets].column = get_hidden_key_column(); + gets[num_gets].appStorage = &m_ref; num_gets++; - if (m_user_defined_partitioning) - { + if (m_user_defined_partitioning) { /* Need to read partition id to support ORDER BY columns. */ - gets[num_gets].column= NdbDictionary::Column::FRAGMENT; - gets[num_gets].appStorage= &m_part_id; + gets[num_gets].column = NdbDictionary::Column::FRAGMENT; + gets[num_gets].appStorage = &m_part_id; num_gets++; } return num_gets; } -void -ha_ndbcluster::get_hidden_fields_keyop(NdbOperation::OperationOptions *options, - NdbOperation::GetValueSpec gets[2]) -{ - Uint32 num_gets= setup_get_hidden_fields(gets); - options->optionsPresent|= NdbOperation::OperationOptions::OO_GETVALUE; - options->extraGetValues= gets; - options->numExtraGetValues= num_gets; -} - -void -ha_ndbcluster::get_hidden_fields_scan(NdbScanOperation::ScanOptions *options, - NdbOperation::GetValueSpec gets[2]) -{ - Uint32 num_gets= setup_get_hidden_fields(gets); - options->optionsPresent|= NdbScanOperation::ScanOptions::SO_GETVALUE; - options->extraGetValues= gets; - options->numExtraGetValues= num_gets; -} - -inline void -ha_ndbcluster::eventSetAnyValue(THD *thd, - NdbOperation::OperationOptions *options) const -{ - options->anyValue= 0; - if (unlikely(m_slow_path)) - { +void ha_ndbcluster::get_hidden_fields_keyop( + NdbOperation::OperationOptions *options, + NdbOperation::GetValueSpec gets[2]) { + Uint32 num_gets = setup_get_hidden_fields(gets); + options->optionsPresent |= NdbOperation::OperationOptions::OO_GETVALUE; + options->extraGetValues = gets; + options->numExtraGetValues = num_gets; +} + +void ha_ndbcluster::get_hidden_fields_scan( + NdbScanOperation::ScanOptions *options, + NdbOperation::GetValueSpec gets[2]) { + Uint32 num_gets = setup_get_hidden_fields(gets); + options->optionsPresent |= NdbScanOperation::ScanOptions::SO_GETVALUE; + options->extraGetValues = gets; + options->numExtraGetValues = num_gets; +} + +inline void ha_ndbcluster::eventSetAnyValue( + THD *thd, NdbOperation::OperationOptions *options) const { + options->anyValue = 0; + if (unlikely(m_slow_path)) { /* Ignore TNTO_NO_LOGGING for slave thd. It is used to indicate log-slave-updates option. This is instead handled in the injector thread, by looking explicitly at the opt_log_slave_updates flag. */ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (thd->slave_thread) - { + Thd_ndb *thd_ndb = get_thd_ndb(thd); + if (thd->slave_thread) { /* Slave-thread, we are applying a replicated event. We set the server_id to the value received from the log which @@ -4770,34 +4108,29 @@ ha_ndbcluster::eventSetAnyValue(THD *thd, */ options->optionsPresent |= NdbOperation::OperationOptions::OO_ANYVALUE; options->anyValue = thd_unmasked_server_id(thd); - } - else if (thd_ndb->check_trans_option(Thd_ndb::TRANS_NO_LOGGING)) - { + } else if (thd_ndb->check_trans_option(Thd_ndb::TRANS_NO_LOGGING)) { options->optionsPresent |= NdbOperation::OperationOptions::OO_ANYVALUE; ndbcluster_anyvalue_set_nologging(options->anyValue); } } #ifndef DBUG_OFF - if (DBUG_EVALUATE_IF("ndb_set_reflect_anyvalue", true, false)) - { - fprintf(stderr, "Ndb forcing reflect AnyValue\n"); - options->optionsPresent |= NdbOperation::OperationOptions::OO_ANYVALUE; - ndbcluster_anyvalue_set_reflect_op(options->anyValue); + if (DBUG_EVALUATE_IF("ndb_set_reflect_anyvalue", true, false)) { + fprintf(stderr, "Ndb forcing reflect AnyValue\n"); + options->optionsPresent |= NdbOperation::OperationOptions::OO_ANYVALUE; + ndbcluster_anyvalue_set_reflect_op(options->anyValue); } - if (DBUG_EVALUATE_IF("ndb_set_refresh_anyvalue", true, false)) - { + if (DBUG_EVALUATE_IF("ndb_set_refresh_anyvalue", true, false)) { fprintf(stderr, "Ndb forcing refresh AnyValue\n"); options->optionsPresent |= NdbOperation::OperationOptions::OO_ANYVALUE; ndbcluster_anyvalue_set_refresh_op(options->anyValue); } - + /* MySQLD will set the user-portion of AnyValue (if any) to all 1s This tests code filtering ServerIds on the value of server-id-bits. */ - const char* p = getenv("NDB_TEST_ANYVALUE_USERDATA"); - if (p != 0 && *p != 0 && *p != '0' && *p != 'n' && *p != 'N') - { + const char *p = getenv("NDB_TEST_ANYVALUE_USERDATA"); + if (p != 0 && *p != 0 && *p != '0' && *p != 'n' && *p != 'N') { options->optionsPresent |= NdbOperation::OperationOptions::OO_ANYVALUE; dbug_ndbcluster_anyvalue_set_userbits(options->anyValue); } @@ -4806,7 +4139,6 @@ ha_ndbcluster::eventSetAnyValue(THD *thd, extern NDB_SHARE *ndb_apply_status_share; - /** prepare_conflict_detection @@ -4822,21 +4154,14 @@ extern NDB_SHARE *ndb_apply_status_share; handling should occur immediately. In this case, conflict_handled is set to true. */ -int -ha_ndbcluster::prepare_conflict_detection(enum_conflicting_op_type op_type, - const NdbRecord* key_rec, - const NdbRecord* data_rec, - const uchar* old_data, - const uchar* new_data, - const MY_BITMAP *write_set, - NdbTransaction* trans, - NdbInterpretedCode* code, - NdbOperation::OperationOptions* options, - bool& conflict_handled, - bool& avoid_ndbapi_write) -{ +int ha_ndbcluster::prepare_conflict_detection( + enum_conflicting_op_type op_type, const NdbRecord *key_rec, + const NdbRecord *data_rec, const uchar *old_data, const uchar *new_data, + const MY_BITMAP *write_set, NdbTransaction *trans, NdbInterpretedCode *code, + NdbOperation::OperationOptions *options, bool &conflict_handled, + bool &avoid_ndbapi_write) { DBUG_ENTER("prepare_conflict_detection"); - THD* thd = table->in_use; + THD *thd = table->in_use; int res = 0; assert(thd->slave_thread); @@ -4846,8 +4171,7 @@ ha_ndbcluster::prepare_conflict_detection(enum_conflicting_op_type op_type, Special check for apply_status table, as we really don't want to do any special handling with it */ - if (unlikely(m_share == ndb_apply_status_share)) - { + if (unlikely(m_share == ndb_apply_status_share)) { DBUG_RETURN(0); } @@ -4858,139 +4182,108 @@ ha_ndbcluster::prepare_conflict_detection(enum_conflicting_op_type op_type, Not that this applies even if the current operation's table does not have a conflict function defined - if a transaction spans a 'transactional - conflict detection' table and a non transactional table, the non-transactional - table's data will also be reverted. + conflict detection' table and a non transactional table, the + non-transactional table's data will also be reverted. */ Uint64 transaction_id = Ndb_binlog_extra_row_info::InvalidTransactionId; - bool op_is_marked_as_read= false; - bool op_is_marked_as_reflected= false; + bool op_is_marked_as_read = false; + bool op_is_marked_as_reflected = false; // Only used for sanity check and debug printout - bool op_is_marked_as_refresh MY_ATTRIBUTE((unused))= false; + bool op_is_marked_as_refresh MY_ATTRIBUTE((unused)) = false; - if (thd->binlog_row_event_extra_data) - { + if (thd->binlog_row_event_extra_data) { Ndb_binlog_extra_row_info extra_row_info; - if (extra_row_info.loadFromBuffer(thd->binlog_row_event_extra_data) != 0) - { - ndb_log_warning("NDB Slave: Malformed event received on table %s " - "cannot parse. Stopping Slave.", - m_share->key_string()); - DBUG_RETURN( ER_SLAVE_CORRUPT_EVENT ); + if (extra_row_info.loadFromBuffer(thd->binlog_row_event_extra_data) != 0) { + ndb_log_warning( + "NDB Slave: Malformed event received on table %s " + "cannot parse. Stopping Slave.", + m_share->key_string()); + DBUG_RETURN(ER_SLAVE_CORRUPT_EVENT); } if (extra_row_info.getFlags() & - Ndb_binlog_extra_row_info::NDB_ERIF_TRANSID) - { + Ndb_binlog_extra_row_info::NDB_ERIF_TRANSID) { transaction_id = extra_row_info.getTransactionId(); } if (extra_row_info.getFlags() & - Ndb_binlog_extra_row_info::NDB_ERIF_CFT_FLAGS) - { + Ndb_binlog_extra_row_info::NDB_ERIF_CFT_FLAGS) { const Uint16 conflict_flags = extra_row_info.getConflictFlags(); DBUG_PRINT("info", ("conflict flags : %x\n", conflict_flags)); - if (conflict_flags & NDB_ERIF_CFT_REFLECT_OP) - { - op_is_marked_as_reflected= true; + if (conflict_flags & NDB_ERIF_CFT_REFLECT_OP) { + op_is_marked_as_reflected = true; g_ndb_slave_state.current_reflect_op_prepare_count++; } - - if (conflict_flags & NDB_ERIF_CFT_REFRESH_OP) - { - op_is_marked_as_refresh= true; + + if (conflict_flags & NDB_ERIF_CFT_REFRESH_OP) { + op_is_marked_as_refresh = true; g_ndb_slave_state.current_refresh_op_count++; } - if (conflict_flags & NDB_ERIF_CFT_READ_OP) - { - op_is_marked_as_read= true; + if (conflict_flags & NDB_ERIF_CFT_READ_OP) { + op_is_marked_as_read = true; } /* Sanity - 1 flag at a time at most */ - assert(! (op_is_marked_as_reflected && - op_is_marked_as_refresh)); - assert(! (op_is_marked_as_read && - (op_is_marked_as_reflected || - op_is_marked_as_refresh))); + assert(!(op_is_marked_as_reflected && op_is_marked_as_refresh)); + assert(!(op_is_marked_as_read && + (op_is_marked_as_reflected || op_is_marked_as_refresh))); } } - const st_conflict_fn_def* conflict_fn = (m_share->m_cfn_share? - m_share->m_cfn_share->m_conflict_fn: - NULL); + const st_conflict_fn_def *conflict_fn = + (m_share->m_cfn_share ? m_share->m_cfn_share->m_conflict_fn : NULL); bool pass_mode = false; - if (conflict_fn) - { + if (conflict_fn) { /* Check Slave Conflict Role Variable setting */ - if (conflict_fn->flags & CF_USE_ROLE_VAR) - { - switch (opt_ndb_slave_conflict_role) - { - case SCR_NONE: - { - ndb_log_warning("NDB Slave: Conflict function %s defined on " - "table %s requires ndb_slave_conflict_role variable " - "to be set. Stopping slave.", - conflict_fn->name, - m_share->key_string()); - DBUG_RETURN(ER_SLAVE_CONFIGURATION); - } - case SCR_PASS: - { - pass_mode = true; - } - default: - /* PRIMARY, SECONDARY */ - break; + if (conflict_fn->flags & CF_USE_ROLE_VAR) { + switch (opt_ndb_slave_conflict_role) { + case SCR_NONE: { + ndb_log_warning( + "NDB Slave: Conflict function %s defined on " + "table %s requires ndb_slave_conflict_role variable " + "to be set. Stopping slave.", + conflict_fn->name, m_share->key_string()); + DBUG_RETURN(ER_SLAVE_CONFIGURATION); + } + case SCR_PASS: { + pass_mode = true; + } + default: + /* PRIMARY, SECONDARY */ + break; } } } { bool handle_conflict_now = false; - const uchar* row_data = (op_type == WRITE_ROW? new_data : old_data); - int res = g_ndb_slave_state.atPrepareConflictDetection(m_table, - key_rec, - row_data, - transaction_id, - handle_conflict_now); - if (res) - DBUG_RETURN(res); + const uchar *row_data = (op_type == WRITE_ROW ? new_data : old_data); + int res = g_ndb_slave_state.atPrepareConflictDetection( + m_table, key_rec, row_data, transaction_id, handle_conflict_now); + if (res) DBUG_RETURN(res); - if (handle_conflict_now) - { + if (handle_conflict_now) { DBUG_PRINT("info", ("Conflict handling for row occurring now")); NdbError noRealConflictError; /* * If the user operation was a read and we receive an update * log event due to an AnyValue update, then the conflicting operation - * should be reported as a read. + * should be reported as a read. */ - enum_conflicting_op_type conflicting_op= - (op_type == UPDATE_ROW && op_is_marked_as_read)? - READ_ROW - : op_type; + enum_conflicting_op_type conflicting_op = + (op_type == UPDATE_ROW && op_is_marked_as_read) ? READ_ROW : op_type; /* Directly handle the conflict here - e.g refresh/ write to exceptions table etc. */ - res = handle_row_conflict(m_share->m_cfn_share, - m_share->table_name, - "Transaction", - key_rec, - data_rec, - old_data, - new_data, - conflicting_op, - TRANS_IN_CONFLICT, - noRealConflictError, - trans, - write_set, - transaction_id); - if (unlikely(res)) - DBUG_RETURN(res); + res = handle_row_conflict( + m_share->m_cfn_share, m_share->table_name, "Transaction", key_rec, + data_rec, old_data, new_data, conflicting_op, TRANS_IN_CONFLICT, + noRealConflictError, trans, write_set, transaction_id); + if (unlikely(res)) DBUG_RETURN(res); g_ndb_slave_state.conflict_flags |= SCS_OPS_DEFINED; @@ -4998,57 +4291,54 @@ ha_ndbcluster::prepare_conflict_detection(enum_conflicting_op_type op_type, Indicate that there (may be) some more operations to execute before committing */ - m_thd_ndb->m_unsent_bytes+= 12; + m_thd_ndb->m_unsent_bytes += 12; conflict_handled = true; DBUG_RETURN(0); } } - if (conflict_fn == NULL || - pass_mode) - { + if (conflict_fn == NULL || pass_mode) { /* No conflict function definition required */ DBUG_RETURN(0); } /** - * By default conflict algorithms use the 'natural' NdbApi ops - * (insert/update/delete) which can detect presence anomalies, + * By default conflict algorithms use the 'natural' NdbApi ops + * (insert/update/delete) which can detect presence anomalies, * as opposed to NdbApi write which ignores them. - * However in some cases, we want to use NdbApi write to apply - * events received on tables with conflict detection defined + * However in some cases, we want to use NdbApi write to apply + * events received on tables with conflict detection defined * (e.g. when we want to forcibly align a row with a refresh op). */ avoid_ndbapi_write = true; if (unlikely((conflict_fn->flags & CF_TRANSACTIONAL) && - (transaction_id == Ndb_binlog_extra_row_info::InvalidTransactionId))) - { - ndb_log_warning("NDB Slave: Transactional conflict detection defined on " - "table %s, but events received without transaction ids. " - "Check --ndb-log-transaction-id setting on " - "upstream Cluster.", - m_share->key_string()); + (transaction_id == + Ndb_binlog_extra_row_info::InvalidTransactionId))) { + ndb_log_warning( + "NDB Slave: Transactional conflict detection defined on " + "table %s, but events received without transaction ids. " + "Check --ndb-log-transaction-id setting on " + "upstream Cluster.", + m_share->key_string()); /* This is a user error, but we want them to notice, so treat seriously */ - DBUG_RETURN( ER_SLAVE_CORRUPT_EVENT ); + DBUG_RETURN(ER_SLAVE_CORRUPT_EVENT); } /** * Normally, update and delete have an attached program executed against - * the existing row content. Insert (and NdbApi write) do not. + * the existing row content. Insert (and NdbApi write) do not. * Insert cannot as there is no pre-existing row to examine (and therefore * no non prepare-time deterministic decisions to make). - * NdbApi Write technically could if the row already existed, but this is + * NdbApi Write technically could if the row already existed, but this is * not currently supported by NdbApi. */ bool prepare_interpreted_program = (op_type != WRITE_ROW); - if (conflict_fn->flags & CF_REFLECT_SEC_OPS) - { + if (conflict_fn->flags & CF_REFLECT_SEC_OPS) { /* This conflict function reflects secondary ops at the Primary */ - - if (opt_ndb_slave_conflict_role == SCR_PRIMARY) - { + + if (opt_ndb_slave_conflict_role == SCR_PRIMARY) { /** * Here we mark the applied operations to indicate that they * should be reflected back to the SECONDARY cluster. @@ -5065,24 +4355,20 @@ ha_ndbcluster::prepare_conflict_detection(enum_conflicting_op_type op_type, */ DBUG_PRINT("info", ("Setting AnyValue to reflect secondary op")); - options->optionsPresent |= - NdbOperation::OperationOptions::OO_ANYVALUE; + options->optionsPresent |= NdbOperation::OperationOptions::OO_ANYVALUE; ndbcluster_anyvalue_set_reflect_op(options->anyValue); - } - else if (opt_ndb_slave_conflict_role == SCR_SECONDARY) - { + } else if (opt_ndb_slave_conflict_role == SCR_SECONDARY) { /** * On the Secondary, we receive reflected operations which * we want to attempt to apply under certain conditions. * This is done to recover from situations where * both PRIMARY and SECONDARY have performed concurrent * DELETEs. - * + * * For non reflected operations we want to apply Inserts and * Updates using write_tuple() to get an idempotent effect */ - if (op_is_marked_as_reflected) - { + if (op_is_marked_as_reflected) { /** * Apply operations using their 'natural' operation types * with interpreted programs attached where appropriate. @@ -5090,9 +4376,7 @@ ha_ndbcluster::prepare_conflict_detection(enum_conflicting_op_type op_type, * of any 'presence' issues (row does/not exist). */ DBUG_PRINT("info", ("Reflected operation")); - } - else - { + } else { /** * Either a normal primary sourced change, or a refresh * operation. @@ -5120,95 +4404,80 @@ ha_ndbcluster::prepare_conflict_detection(enum_conflicting_op_type op_type, Prepare interpreted code for operation (update + delete only) according to algorithm used */ - if (prepare_interpreted_program) - { - res = conflict_fn->prep_func(m_share->m_cfn_share, - op_type, - m_ndb_record, - old_data, - new_data, - table->read_set, // Before image - table->write_set, // After image + if (prepare_interpreted_program) { + res = conflict_fn->prep_func(m_share->m_cfn_share, op_type, m_ndb_record, + old_data, new_data, + table->read_set, // Before image + table->write_set, // After image code); - if (res == 0) - { - if (code->getWordsUsed() > 0) - { + if (res == 0) { + if (code->getWordsUsed() > 0) { /* Attach conflict detecting filter program to operation */ - options->optionsPresent|= - NdbOperation::OperationOptions::OO_INTERPRETED; - options->interpretedCode= code; + options->optionsPresent |= + NdbOperation::OperationOptions::OO_INTERPRETED; + options->interpretedCode = code; } + } else { + ndb_log_warning( + "NDB Slave: Binlog event on table %s missing " + "info necessary for conflict detection. " + "Check binlog format options on upstream cluster.", + m_share->key_string()); + DBUG_RETURN(ER_SLAVE_CORRUPT_EVENT); } - else - { - ndb_log_warning("NDB Slave: Binlog event on table %s missing " - "info necessary for conflict detection. " - "Check binlog format options on upstream cluster.", - m_share->key_string()); - DBUG_RETURN( ER_SLAVE_CORRUPT_EVENT); - } - } // if (op_type != WRITE_ROW) + } // if (op_type != WRITE_ROW) g_ndb_slave_state.conflict_flags |= SCS_OPS_DEFINED; /* Now save data for potential insert to exceptions table... */ Ndb_exceptions_data ex_data; - ex_data.share= m_share; - ex_data.key_rec= key_rec; - ex_data.data_rec= data_rec; - ex_data.op_type= op_type; + ex_data.share = m_share; + ex_data.key_rec = key_rec; + ex_data.data_rec = data_rec; + ex_data.op_type = op_type; ex_data.reflected_operation = op_is_marked_as_reflected; - ex_data.trans_id= transaction_id; + ex_data.trans_id = transaction_id; /* We need to save the row data for possible conflict resolution after execute(). */ - if (old_data) - ex_data.old_row= copy_row_to_buffer(m_thd_ndb, old_data); - if (old_data != NULL && ex_data.old_row == NULL) - { + if (old_data) ex_data.old_row = copy_row_to_buffer(m_thd_ndb, old_data); + if (old_data != NULL && ex_data.old_row == NULL) { DBUG_RETURN(HA_ERR_OUT_OF_MEM); } - if (new_data) - ex_data.new_row= copy_row_to_buffer(m_thd_ndb, new_data); - if (new_data != NULL && ex_data.new_row == NULL) - { + if (new_data) ex_data.new_row = copy_row_to_buffer(m_thd_ndb, new_data); + if (new_data != NULL && ex_data.new_row == NULL) { DBUG_RETURN(HA_ERR_OUT_OF_MEM); } - ex_data.bitmap_buf= NULL; - ex_data.write_set= NULL; - if (table->write_set) - { + ex_data.bitmap_buf = NULL; + ex_data.write_set = NULL; + if (table->write_set) { /* Copy table write set */ - ex_data.bitmap_buf= - (my_bitmap_map *) get_buffer(m_thd_ndb, table->s->column_bitmap_size); - if (ex_data.bitmap_buf == NULL) - { + ex_data.bitmap_buf = + (my_bitmap_map *)get_buffer(m_thd_ndb, table->s->column_bitmap_size); + if (ex_data.bitmap_buf == NULL) { DBUG_RETURN(HA_ERR_OUT_OF_MEM); } - ex_data.write_set= (MY_BITMAP*) get_buffer(m_thd_ndb, sizeof(MY_BITMAP)); - if (ex_data.write_set == NULL) - { + ex_data.write_set = (MY_BITMAP *)get_buffer(m_thd_ndb, sizeof(MY_BITMAP)); + if (ex_data.write_set == NULL) { DBUG_RETURN(HA_ERR_OUT_OF_MEM); } - bitmap_init(ex_data.write_set, ex_data.bitmap_buf, - table->write_set->n_bits, false); + bitmap_init(ex_data.write_set, ex_data.bitmap_buf, table->write_set->n_bits, + false); bitmap_copy(ex_data.write_set, table->write_set); } - uchar* ex_data_buffer= get_buffer(m_thd_ndb, sizeof(ex_data)); - if (ex_data_buffer == NULL) - { + uchar *ex_data_buffer = get_buffer(m_thd_ndb, sizeof(ex_data)); + if (ex_data_buffer == NULL) { DBUG_RETURN(HA_ERR_OUT_OF_MEM); } memcpy(ex_data_buffer, &ex_data, sizeof(ex_data)); /* Store ptr to exceptions data in operation 'customdata' ptr */ - options->optionsPresent|= NdbOperation::OperationOptions::OO_CUSTOMDATA; - options->customData= (void*)ex_data_buffer; + options->optionsPresent |= NdbOperation::OperationOptions::OO_CUSTOMDATA; + options->customData = (void *)ex_data_buffer; DBUG_RETURN(0); } @@ -5226,114 +4495,98 @@ ha_ndbcluster::prepare_conflict_detection(enum_conflicting_op_type op_type, refreshing the row and inserting an entry into the exceptions table */ -static int -handle_conflict_op_error(NdbTransaction* trans, - const NdbError& err, - const NdbOperation* op) -{ +static int handle_conflict_op_error(NdbTransaction *trans, const NdbError &err, + const NdbOperation *op) { DBUG_ENTER("handle_conflict_op_error"); DBUG_PRINT("info", ("ndb error: %d", err.code)); - if ((err.code == (int) error_conflict_fn_violation) || - (err.code == (int) error_op_after_refresh_op) || + if ((err.code == (int)error_conflict_fn_violation) || + (err.code == (int)error_op_after_refresh_op) || (err.classification == NdbError::ConstraintViolation) || - (err.classification == NdbError::NoDataFound)) - { - DBUG_PRINT("info", - ("err.code = %s, err.classification = %s", - ((err.code == (int) error_conflict_fn_violation)? - "error_conflict_fn_violation": - ((err.code == (int) error_op_after_refresh_op)? - "error_op_after_refresh_op" : "?")), - ((err.classification == NdbError::ConstraintViolation)? - "ConstraintViolation": - ((err.classification == NdbError::NoDataFound)? - "NoDataFound" : "?")))); + (err.classification == NdbError::NoDataFound)) { + DBUG_PRINT("info", ("err.code = %s, err.classification = %s", + ((err.code == (int)error_conflict_fn_violation) + ? "error_conflict_fn_violation" + : ((err.code == (int)error_op_after_refresh_op) + ? "error_op_after_refresh_op" + : "?")), + ((err.classification == NdbError::ConstraintViolation) + ? "ConstraintViolation" + : ((err.classification == NdbError::NoDataFound) + ? "NoDataFound" + : "?")))); enum_conflict_cause conflict_cause; /* Map cause onto our conflict description type */ - if ((err.code == (int) error_conflict_fn_violation) || - (err.code == (int) error_op_after_refresh_op)) - { + if ((err.code == (int)error_conflict_fn_violation) || + (err.code == (int)error_op_after_refresh_op)) { DBUG_PRINT("info", ("ROW_IN_CONFLICT")); - conflict_cause= ROW_IN_CONFLICT; - } - else if (err.classification == NdbError::ConstraintViolation) - { + conflict_cause = ROW_IN_CONFLICT; + } else if (err.classification == NdbError::ConstraintViolation) { DBUG_PRINT("info", ("ROW_ALREADY_EXISTS")); - conflict_cause= ROW_ALREADY_EXISTS; - } - else - { + conflict_cause = ROW_ALREADY_EXISTS; + } else { assert(err.classification == NdbError::NoDataFound); DBUG_PRINT("info", ("ROW_DOES_NOT_EXIST")); - conflict_cause= ROW_DOES_NOT_EXIST; + conflict_cause = ROW_DOES_NOT_EXIST; } /* Get exceptions data from operation */ - const void* buffer=op->getCustomData(); + const void *buffer = op->getCustomData(); assert(buffer); Ndb_exceptions_data ex_data; memcpy(&ex_data, buffer, sizeof(ex_data)); - NDB_SHARE *share= ex_data.share; - NDB_CONFLICT_FN_SHARE* cfn_share= share ? share->m_cfn_share : NULL; + NDB_SHARE *share = ex_data.share; + NDB_CONFLICT_FN_SHARE *cfn_share = share ? share->m_cfn_share : NULL; - const NdbRecord* key_rec= ex_data.key_rec; - const NdbRecord* data_rec= ex_data.data_rec; - const uchar* old_row= ex_data.old_row; - const uchar* new_row= ex_data.new_row; + const NdbRecord *key_rec = ex_data.key_rec; + const NdbRecord *data_rec = ex_data.data_rec; + const uchar *old_row = ex_data.old_row; + const uchar *new_row = ex_data.new_row; #ifndef DBUG_OFF - const uchar* row= - (ex_data.op_type == DELETE_ROW)? - ex_data.old_row : ex_data.new_row; + const uchar *row = + (ex_data.op_type == DELETE_ROW) ? ex_data.old_row : ex_data.new_row; #endif - enum_conflicting_op_type causing_op_type= ex_data.op_type; - const MY_BITMAP *write_set= ex_data.write_set; + enum_conflicting_op_type causing_op_type = ex_data.op_type; + const MY_BITMAP *write_set = ex_data.write_set; - DBUG_PRINT("info", ("Conflict causing op type : %u", - causing_op_type)); + DBUG_PRINT("info", ("Conflict causing op type : %u", causing_op_type)); - if (causing_op_type == REFRESH_ROW) - { + if (causing_op_type == REFRESH_ROW) { /* The failing op was a refresh row, we require that it failed due to being a duplicate (e.g. a refresh occurring on a refreshed row) */ - if (err.code == (int) error_op_after_refresh_op) - { + if (err.code == (int)error_op_after_refresh_op) { DBUG_PRINT("info", ("Operation after refresh - ignoring")); DBUG_RETURN(0); - } - else - { + } else { DBUG_PRINT("info", ("Refresh op hit real error %u", err.code)); /* Unexpected error, normal handling*/ DBUG_RETURN(err.code); } } - if (ex_data.reflected_operation) - { - DBUG_PRINT("info", ("Reflected operation error : %u.", - err.code)); - + if (ex_data.reflected_operation) { + DBUG_PRINT("info", ("Reflected operation error : %u.", err.code)); + /** * Expected cases are : * Insert : Row already exists : Don't care - discard * Secondary has this row, or a future version - * + * * Update : Row does not exist : Don't care - discard * Secondary has deleted this row later. * * Conflict * (Row written here last) : Don't care - discard * Secondary has this row, or a future version - * + * * Delete : Row does not exist : Don't care - discard * Secondary has deleted this row later. - * + * * Conflict * (Row written here last) : Don't care - discard * Secondary has a future version of this row @@ -5341,13 +4594,13 @@ handle_conflict_op_error(NdbTransaction* trans, * Presence and authorship conflicts are used to determine * whether to apply a reflecte operation. * The presence checks avoid divergence and the authorship - * checks avoid all actions being applied in delayed + * checks avoid all actions being applied in delayed * duplicate. */ - assert((err.code == (int) error_conflict_fn_violation) || + assert((err.code == (int)error_conflict_fn_violation) || (err.classification == NdbError::ConstraintViolation) || (err.classification == NdbError::NoDataFound)); - + g_ndb_slave_state.current_reflect_op_discard_count++; DBUG_RETURN(0); @@ -5355,19 +4608,17 @@ handle_conflict_op_error(NdbTransaction* trans, { /** - * For asymmetric algorithms that use the ROLE variable to + * For asymmetric algorithms that use the ROLE variable to * determine their role, we check whether we are on the * SECONDARY cluster. * This is far as we want to process conflicts on the * SECONDARY. */ - bool secondary = cfn_share && - cfn_share->m_conflict_fn && - (cfn_share->m_conflict_fn->flags & CF_USE_ROLE_VAR) && - (opt_ndb_slave_conflict_role == SCR_SECONDARY); - - if (secondary) - { + bool secondary = cfn_share && cfn_share->m_conflict_fn && + (cfn_share->m_conflict_fn->flags & CF_USE_ROLE_VAR) && + (opt_ndb_slave_conflict_role == SCR_SECONDARY); + + if (secondary) { DBUG_PRINT("info", ("Conflict detected, on secondary - ignore")); DBUG_RETURN(0); } @@ -5375,12 +4626,10 @@ handle_conflict_op_error(NdbTransaction* trans, DBUG_ASSERT(share != NULL && row != NULL); bool table_has_trans_conflict_detection = - cfn_share && - cfn_share->m_conflict_fn && - (cfn_share->m_conflict_fn->flags & CF_TRANSACTIONAL); + cfn_share && cfn_share->m_conflict_fn && + (cfn_share->m_conflict_fn->flags & CF_TRANSACTIONAL); - if (table_has_trans_conflict_detection) - { + if (table_has_trans_conflict_detection) { /* Mark this transaction as in-conflict. * For Delete-NoSuchRow (aka Delete-Delete) conflicts, we * do not always mark the transaction as in-conflict, as @@ -5393,95 +4642,71 @@ handle_conflict_op_error(NdbTransaction* trans, */ bool is_del_del_cft = ((causing_op_type == DELETE_ROW) && (conflict_cause == ROW_DOES_NOT_EXIST)); - bool fn_treats_del_del_as_cft = - (cfn_share->m_conflict_fn->flags & CF_DEL_DEL_CFT); - - if (!is_del_del_cft || - fn_treats_del_del_as_cft) - { + bool fn_treats_del_del_as_cft = + (cfn_share->m_conflict_fn->flags & CF_DEL_DEL_CFT); + + if (!is_del_del_cft || fn_treats_del_del_as_cft) { /* Perform special transactional conflict-detected handling */ int res = g_ndb_slave_state.atTransConflictDetected(ex_data.trans_id); - if (res) - DBUG_RETURN(res); + if (res) DBUG_RETURN(res); } } - if (cfn_share) - { + if (cfn_share) { /* Now handle the conflict on this row */ enum_conflict_fn_type cft = cfn_share->m_conflict_fn->type; g_ndb_slave_state.current_violation_count[cft]++; - int res = handle_row_conflict(cfn_share, - share->table_name, - "Row", - key_rec, - data_rec, - old_row, - new_row, - causing_op_type, - conflict_cause, - err, - trans, - write_set, - /* - ORIG_TRANSID not available for - non-transactional conflict detection. - */ - Ndb_binlog_extra_row_info::InvalidTransactionId); + int res = handle_row_conflict( + cfn_share, share->table_name, "Row", key_rec, data_rec, old_row, + new_row, causing_op_type, conflict_cause, err, trans, write_set, + /* + ORIG_TRANSID not available for + non-transactional conflict detection. + */ + Ndb_binlog_extra_row_info::InvalidTransactionId); DBUG_RETURN(res); - } - else - { + } else { DBUG_PRINT("info", ("missing cfn_share")); - DBUG_RETURN(0); // TODO : Correct? + DBUG_RETURN(0); // TODO : Correct? } - } - else - { + } else { /* Non conflict related error */ DBUG_PRINT("info", ("err.code == %u", err.code)); DBUG_RETURN(err.code); } - DBUG_RETURN(0); // Reachable? + DBUG_RETURN(0); // Reachable? } /* is_serverid_local */ -static bool is_serverid_local(Uint32 serverid) -{ +static bool is_serverid_local(Uint32 serverid) { /* If it's not our serverid, check the IGNORE_SERVER_IDS setting to check if it's local. */ - return ((serverid == ::server_id) || - ndb_mi_get_ignore_server_id(serverid)); + return ((serverid == ::server_id) || ndb_mi_get_ignore_server_id(serverid)); } - -int ha_ndbcluster::write_row(uchar *record) -{ +int ha_ndbcluster::write_row(uchar *record) { DBUG_ENTER("ha_ndbcluster::write_row"); - if (m_share == ndb_apply_status_share && table->in_use->slave_thread) - { - uint32 row_server_id, master_server_id= ndb_mi_get_master_server_id(); + if (m_share == ndb_apply_status_share && table->in_use->slave_thread) { + uint32 row_server_id, master_server_id = ndb_mi_get_master_server_id(); uint64 row_epoch; memcpy(&row_server_id, table->field[0]->ptr + (record - table->record[0]), sizeof(row_server_id)); memcpy(&row_epoch, table->field[1]->ptr + (record - table->record[0]), sizeof(row_epoch)); - int rc = g_ndb_slave_state.atApplyStatusWrite(master_server_id, - row_server_id, - row_epoch, - is_serverid_local(row_server_id)); - if (rc != 0) - { + int rc = g_ndb_slave_state.atApplyStatusWrite( + master_server_id, row_server_id, row_epoch, + is_serverid_local(row_server_id)); + if (rc != 0) { /* Stop Slave */ DBUG_RETURN(rc); } @@ -5493,123 +4718,104 @@ int ha_ndbcluster::write_row(uchar *record) /** Insert one record into NDB */ -int ha_ndbcluster::ndb_write_row(uchar *record, - bool primary_key_update, - bool batched_update) -{ +int ha_ndbcluster::ndb_write_row(uchar *record, bool primary_key_update, + bool batched_update) { bool has_auto_increment; const NdbOperation *op; - THD *thd= table->in_use; - Thd_ndb *thd_ndb= m_thd_ndb; + THD *thd = table->in_use; + Thd_ndb *thd_ndb = m_thd_ndb; NdbTransaction *trans; uint32 part_id = 0; - int error= 0; + int error = 0; Uint64 auto_value; - longlong func_value= 0; + longlong func_value = 0; const Uint32 authorValue = 1; NdbOperation::SetValueSpec sets[3]; - Uint32 num_sets= 0; + Uint32 num_sets = 0; DBUG_ENTER("ha_ndbcluster::ndb_write_row"); error = check_slave_state(thd); - if (unlikely(error)) - DBUG_RETURN(error); + if (unlikely(error)) DBUG_RETURN(error); - has_auto_increment= (table->next_number_field && record == table->record[0]); + has_auto_increment = (table->next_number_field && record == table->record[0]); - if (has_auto_increment && table_share->primary_key != MAX_KEY) - { + if (has_auto_increment && table_share->primary_key != MAX_KEY) { /* * Increase any auto_incremented primary key */ - m_skip_auto_increment= false; - if ((error= update_auto_increment())) - DBUG_RETURN(error); - m_skip_auto_increment= (insert_id_for_cur_row == 0 || - thd->auto_inc_intervals_forced.nb_elements()); + m_skip_auto_increment = false; + if ((error = update_auto_increment())) DBUG_RETURN(error); + m_skip_auto_increment = (insert_id_for_cur_row == 0 || + thd->auto_inc_intervals_forced.nb_elements()); } /* * If IGNORE the ignore constraint violations on primary and unique keys */ - if (!m_use_write && m_ignore_dup_key) - { + if (!m_use_write && m_ignore_dup_key) { /* compare if expression with that in start_bulk_insert() start_bulk_insert will set parameters to ensure that each write_row is committed individually */ - int peek_res= peek_indexed_rows(record, NDB_INSERT); - - if (!peek_res) - { - error= HA_ERR_FOUND_DUPP_KEY; - } - else if (peek_res != HA_ERR_KEY_NOT_FOUND) - { - error= peek_res; + int peek_res = peek_indexed_rows(record, NDB_INSERT); + + if (!peek_res) { + error = HA_ERR_FOUND_DUPP_KEY; + } else if (peek_res != HA_ERR_KEY_NOT_FOUND) { + error = peek_res; } - if (error) - { - if ((has_auto_increment) && (m_skip_auto_increment)) - { + if (error) { + if ((has_auto_increment) && (m_skip_auto_increment)) { int ret_val; - if ((ret_val= set_auto_inc(thd, table->next_number_field))) - { + if ((ret_val = set_auto_inc(thd, table->next_number_field))) { DBUG_RETURN(ret_val); } } - m_skip_auto_increment= true; + m_skip_auto_increment = true; DBUG_RETURN(error); } } - bool uses_blobs= uses_blob_value(table->write_set); + bool uses_blobs = uses_blob_value(table->write_set); const NdbRecord *key_rec; const uchar *key_row; - if (table_share->primary_key == MAX_KEY) - { + if (table_share->primary_key == MAX_KEY) { /* Table has hidden primary key. */ - Ndb *ndb= get_ndb(thd); - uint retries= NDB_AUTO_INCREMENT_RETRIES; - for (;;) - { + Ndb *ndb = get_ndb(thd); + uint retries = NDB_AUTO_INCREMENT_RETRIES; + for (;;) { NDB_SHARE::Tuple_id_range_guard g(m_share); - if (ndb->getAutoIncrementValue(m_table, g.range, auto_value, 1000) == -1) - { + if (ndb->getAutoIncrementValue(m_table, g.range, auto_value, 1000) == + -1) { if (--retries && !thd_killed(thd) && - ndb->getNdbError().status == NdbError::TemporaryError) - { + ndb->getNdbError().status == NdbError::TemporaryError) { ndb_trans_retry_sleep(); - continue; - } - ERR_RETURN(ndb->getNdbError()); - } - break; + continue; + } + ERR_RETURN(ndb->getNdbError()); + } + break; } - sets[num_sets].column= get_hidden_key_column(); - sets[num_sets].value= &auto_value; + sets[num_sets].column = get_hidden_key_column(); + sets[num_sets].value = &auto_value; num_sets++; - key_rec= m_ndb_hidden_key_record; - key_row= (const uchar *)&auto_value; - } - else - { - key_rec= m_index[table_share->primary_key].ndb_unique_record_row; - key_row= record; + key_rec = m_ndb_hidden_key_record; + key_row = (const uchar *)&auto_value; + } else { + key_rec = m_index[table_share->primary_key].ndb_unique_record_row; + key_row = record; } - trans= thd_ndb->trans; - if (m_user_defined_partitioning) - { + trans = thd_ndb->trans; + if (m_user_defined_partitioning) { DBUG_ASSERT(m_use_partition_pruning); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); - error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value); + my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->read_set); + error = m_part_info->get_partition_id(m_part_info, &part_id, &func_value); dbug_tmp_restore_column_map(table->read_set, old_map); - if (unlikely(error)) - { - m_part_info->err_value= func_value; + if (unlikely(error)) { + m_part_info->err_value = func_value; DBUG_RETURN(error); } { @@ -5618,19 +4824,16 @@ int ha_ndbcluster::ndb_write_row(uchar *record, NDB since the NDB kernel doesn't have easy access to the function to calculate the value. */ - if (func_value >= INT_MAX32) - func_value= INT_MAX32; - sets[num_sets].column= get_partition_id_column(); - sets[num_sets].value= &func_value; + if (func_value >= INT_MAX32) func_value = INT_MAX32; + sets[num_sets].column = get_partition_id_column(); + sets[num_sets].value = &func_value; num_sets++; } if (!trans) - if (unlikely(!(trans= start_transaction_part_id(part_id, error)))) + if (unlikely(!(trans = start_transaction_part_id(part_id, error)))) DBUG_RETURN(error); - } - else if (!trans) - { - if (unlikely(!(trans= start_transaction_row(key_rec, key_row, error)))) + } else if (!trans) { + if (unlikely(!(trans = start_transaction_row(key_rec, key_row, error)))) DBUG_RETURN(error); } DBUG_ASSERT(trans); @@ -5642,244 +4845,198 @@ int ha_ndbcluster::ndb_write_row(uchar *record, */ NdbOperation::OperationOptions options; NdbOperation::OperationOptions *poptions = NULL; - options.optionsPresent=0; - - eventSetAnyValue(thd, &options); - const bool need_flush= + options.optionsPresent = 0; + + eventSetAnyValue(thd, &options); + const bool need_flush = thd_ndb->add_row_check_if_batch_full(m_bytes_per_write); - if ((thd->slave_thread) && - (m_table->getExtraRowAuthorBits())) - { + if ((thd->slave_thread) && (m_table->getExtraRowAuthorBits())) { /* Set author to indicate slave updated last */ - sets[num_sets].column= NdbDictionary::Column::ROW_AUTHOR; - sets[num_sets].value= &authorValue; + sets[num_sets].column = NdbDictionary::Column::ROW_AUTHOR; + sets[num_sets].value = &authorValue; num_sets++; } - if (m_user_defined_partitioning) - { + if (m_user_defined_partitioning) { options.optionsPresent |= NdbOperation::OperationOptions::OO_PARTITION_ID; - options.partitionId= part_id; + options.partitionId = part_id; } - if (num_sets) - { + if (num_sets) { options.optionsPresent |= NdbOperation::OperationOptions::OO_SETVALUE; - options.extraSetValues= sets; - options.numExtraSetValues= num_sets; + options.extraSetValues = sets; + options.numExtraSetValues = num_sets; } - if (thd->slave_thread || THDVAR(thd, deferred_constraints)) - { + if (thd->slave_thread || THDVAR(thd, deferred_constraints)) { options.optionsPresent |= - NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS; + NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS; } - if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) - { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { DBUG_PRINT("info", ("Disabling foreign keys")); - options.optionsPresent |= - NdbOperation::OperationOptions::OO_DISABLE_FK; + options.optionsPresent |= NdbOperation::OperationOptions::OO_DISABLE_FK; } - if (options.optionsPresent != 0) - poptions=&options; + if (options.optionsPresent != 0) poptions = &options; - const Uint32 bitmapSz= (NDB_MAX_ATTRIBUTES_IN_TABLE + 31)/32; + const Uint32 bitmapSz = (NDB_MAX_ATTRIBUTES_IN_TABLE + 31) / 32; uint32 tmpBitmapSpace[bitmapSz]; MY_BITMAP tmpBitmap; MY_BITMAP *user_cols_written_bitmap; bool avoidNdbApiWriteOp = false; /* ndb_write_row defaults to write */ /* Conflict resolution in slave thread */ - if (thd->slave_thread) - { + if (thd->slave_thread) { bool conflict_handled = false; - if (unlikely((error = prepare_conflict_detection(WRITE_ROW, - key_rec, - m_ndb_record, - NULL, /* old_data */ - record, /* new_data */ - table->write_set, - trans, - NULL, /* code */ - &options, - conflict_handled, - avoidNdbApiWriteOp)))) + if (unlikely((error = prepare_conflict_detection( + WRITE_ROW, key_rec, m_ndb_record, NULL, /* old_data */ + record, /* new_data */ + table->write_set, trans, NULL, /* code */ + &options, conflict_handled, avoidNdbApiWriteOp)))) DBUG_RETURN(error); - if (unlikely(conflict_handled)) - { + if (unlikely(conflict_handled)) { /* No need to continue with operation definition */ /* TODO : Ensure batch execution */ DBUG_RETURN(0); } }; - if (m_use_write && - !avoidNdbApiWriteOp) - { - uchar* mask; + if (m_use_write && !avoidNdbApiWriteOp) { + uchar *mask; - if (applying_binlog(thd)) - { + if (applying_binlog(thd)) { /* Use write_set when applying binlog to avoid trampling unchanged columns */ - user_cols_written_bitmap= table->write_set; - mask= m_table_map->get_column_mask(user_cols_written_bitmap); - } - else - { + user_cols_written_bitmap = table->write_set; + mask = m_table_map->get_column_mask(user_cols_written_bitmap); + } else { /* Ignore write_set for REPLACE command */ - user_cols_written_bitmap= NULL; - mask= NULL; + user_cols_written_bitmap = NULL; + mask = NULL; } /* TODO : Add conflict detection etc when interpreted write supported */ - op= trans->writeTuple(key_rec, (const char *)key_row, m_ndb_record, - (char *)record, mask, - poptions, sizeof(NdbOperation::OperationOptions)); - } - else - { + op = trans->writeTuple(key_rec, (const char *)key_row, m_ndb_record, + (char *)record, mask, poptions, + sizeof(NdbOperation::OperationOptions)); + } else { uchar *mask; /* Check whether Ndb table definition includes any default values. */ - if (m_table->hasDefaultValues()) - { + if (m_table->hasDefaultValues()) { DBUG_PRINT("info", ("Not sending values for native defaulted columns")); /* - If Ndb is unaware of the table's defaults, we must provide all column values to the insert. - This is done using a NULL column mask. - If Ndb is aware of the table's defaults, we only need to provide - the columns explicitly mentioned in the write set, - plus any extra columns required due to bug#41616. - plus the primary key columns required due to bug#42238. + If Ndb is unaware of the table's defaults, we must provide all column + values to the insert. This is done using a NULL column mask. If Ndb is + aware of the table's defaults, we only need to provide the columns + explicitly mentioned in the write set, plus any extra columns required + due to bug#41616. plus the primary key columns required due to + bug#42238. */ /* The following code for setting user_cols_written_bitmap should be removed after BUG#41616 and Bug#42238 are fixed */ /* Copy table write set so that we can add to it */ - user_cols_written_bitmap= &tmpBitmap; + user_cols_written_bitmap = &tmpBitmap; bitmap_init(user_cols_written_bitmap, tmpBitmapSpace, table->write_set->n_bits, false); bitmap_copy(user_cols_written_bitmap, table->write_set); - for (uint i= 0; i < table->s->fields; i++) - { - Field *field= table->field[i]; + for (uint i = 0; i < table->s->fields; i++) { + Field *field = table->field[i]; DBUG_PRINT("info", ("Field#%u, (%u), Type : %u " "NO_DEFAULT_VALUE_FLAG : %u PRI_KEY_FLAG : %u", - i, - field->field_index, - field->real_type(), + i, field->field_index, field->real_type(), field->flags & NO_DEFAULT_VALUE_FLAG, field->flags & PRI_KEY_FLAG)); - if ((field->flags & (NO_DEFAULT_VALUE_FLAG | // bug 41616 - PRI_KEY_FLAG)) || // bug 42238 - ! type_supports_default_value(field->real_type())) - { + if ((field->flags & (NO_DEFAULT_VALUE_FLAG | // bug 41616 + PRI_KEY_FLAG)) || // bug 42238 + !type_supports_default_value(field->real_type())) { bitmap_set_bit(user_cols_written_bitmap, field->field_index); } } - /* Finally, translate the whole bitmap from MySQL field numbers + /* Finally, translate the whole bitmap from MySQL field numbers to NDB column numbers */ - mask= m_table_map->get_column_mask(user_cols_written_bitmap); - } - else - { + mask = m_table_map->get_column_mask(user_cols_written_bitmap); + } else { /* No defaults in kernel, provide all columns ourselves */ DBUG_PRINT("info", ("No native defaults, sending all values")); - user_cols_written_bitmap= NULL; + user_cols_written_bitmap = NULL; mask = NULL; } - + /* Using insert, we write all non default columns */ - op= trans->insertTuple(key_rec, (const char *)key_row, m_ndb_record, - (char *)record, mask, // Default value should be masked - poptions, sizeof(NdbOperation::OperationOptions)); + op = trans->insertTuple(key_rec, (const char *)key_row, m_ndb_record, + (char *)record, + mask, // Default value should be masked + poptions, sizeof(NdbOperation::OperationOptions)); } - if (!(op)) - ERR_RETURN(trans->getNdbError()); + if (!(op)) ERR_RETURN(trans->getNdbError()); - bool do_batch= !need_flush && - (batched_update || thd_allow_batch(thd)); - uint blob_count= 0; - if (table_share->blob_fields > 0) - { - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + bool do_batch = !need_flush && (batched_update || thd_allow_batch(thd)); + uint blob_count = 0; + if (table_share->blob_fields > 0) { + my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->read_set); /* Set Blob values for all columns updated by the operation */ - int res= set_blob_values(op, record - table->record[0], - user_cols_written_bitmap, &blob_count, do_batch); + int res = set_blob_values(op, record - table->record[0], + user_cols_written_bitmap, &blob_count, do_batch); dbug_tmp_restore_column_map(table->read_set, old_map); - if (res != 0) - DBUG_RETURN(res); + if (res != 0) DBUG_RETURN(res); } /* Execute write operation - NOTE When doing inserts with many values in + NOTE When doing inserts with many values in each INSERT statement it should not be necessary to NoCommit the transaction between each row. Find out how this is detected! */ m_rows_inserted++; no_uncommitted_rows_update(1); - if (( (m_rows_to_insert == 1 || uses_blobs) && !do_batch ) || - primary_key_update || - need_flush) - { + if (((m_rows_to_insert == 1 || uses_blobs) && !do_batch) || + primary_key_update || need_flush) { const int res = flush_bulk_insert(); - if (res != 0) - { - m_skip_auto_increment= true; + if (res != 0) { + m_skip_auto_increment = true; DBUG_RETURN(res); } } - if ((has_auto_increment) && (m_skip_auto_increment)) - { + if ((has_auto_increment) && (m_skip_auto_increment)) { int ret_val; - if ((ret_val= set_auto_inc(thd, table->next_number_field))) - { + if ((ret_val = set_auto_inc(thd, table->next_number_field))) { DBUG_RETURN(ret_val); } } - m_skip_auto_increment= true; + m_skip_auto_increment = true; - DBUG_PRINT("exit",("ok")); + DBUG_PRINT("exit", ("ok")); DBUG_RETURN(0); } - /* Compare if an update changes the primary key in a row. */ -int ha_ndbcluster::primary_key_cmp(const uchar * old_row, const uchar * new_row) -{ - uint keynr= table_share->primary_key; - KEY_PART_INFO *key_part=table->key_info[keynr].key_part; - KEY_PART_INFO *end=key_part+table->key_info[keynr].user_defined_key_parts; +int ha_ndbcluster::primary_key_cmp(const uchar *old_row, const uchar *new_row) { + uint keynr = table_share->primary_key; + KEY_PART_INFO *key_part = table->key_info[keynr].key_part; + KEY_PART_INFO *end = key_part + table->key_info[keynr].user_defined_key_parts; - for (; key_part != end ; key_part++) - { - if (!bitmap_is_set(table->write_set, key_part->fieldnr - 1)) - continue; + for (; key_part != end; key_part++) { + if (!bitmap_is_set(table->write_set, key_part->fieldnr - 1)) continue; /* The primary key does not allow NULLs. */ DBUG_ASSERT(!key_part->null_bit); - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) - { - + if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) { if (key_part->field->cmp_binary((old_row + key_part->offset), (new_row + key_part->offset), - (ulong) key_part->length)) + (ulong)key_part->length)) return 1; - } - else - { - if (memcmp(old_row+key_part->offset, new_row+key_part->offset, + } else { + if (memcmp(old_row + key_part->offset, new_row + key_part->offset, key_part->length)) return 1; } @@ -5887,62 +5044,51 @@ int ha_ndbcluster::primary_key_cmp(const uchar * old_row, const uchar * new_row) return 0; } +static Ndb_exceptions_data StaticRefreshExceptionsData = { + NULL, NULL, NULL, NULL, NULL, NULL, NULL, REFRESH_ROW, false, 0}; -static Ndb_exceptions_data StaticRefreshExceptionsData= - { NULL, NULL, NULL, NULL, NULL, NULL, NULL, REFRESH_ROW, false, 0 }; - -static int -handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share, - const char* table_name, - const char* handling_type, - const NdbRecord* key_rec, - const NdbRecord* data_rec, - const uchar* old_row, - const uchar* new_row, - enum_conflicting_op_type op_type, - enum_conflict_cause conflict_cause, - const NdbError& conflict_error, - NdbTransaction* conflict_trans, - const MY_BITMAP *write_set, - Uint64 transaction_id) -{ +static int handle_row_conflict( + NDB_CONFLICT_FN_SHARE *cfn_share, const char *table_name, + const char *handling_type, const NdbRecord *key_rec, + const NdbRecord *data_rec, const uchar *old_row, const uchar *new_row, + enum_conflicting_op_type op_type, enum_conflict_cause conflict_cause, + const NdbError &conflict_error, NdbTransaction *conflict_trans, + const MY_BITMAP *write_set, Uint64 transaction_id) { DBUG_ENTER("handle_row_conflict"); - const uchar* row = (op_type == DELETE_ROW)? old_row : new_row; + const uchar *row = (op_type == DELETE_ROW) ? old_row : new_row; /* We will refresh the row if the conflict function requires it, or if we are handling a transactional conflict. */ - bool refresh_row = - (conflict_cause == TRANS_IN_CONFLICT) || - (cfn_share && - (cfn_share->m_flags & CFF_REFRESH_ROWS)); + bool refresh_row = (conflict_cause == TRANS_IN_CONFLICT) || + (cfn_share && (cfn_share->m_flags & CFF_REFRESH_ROWS)); - if (refresh_row) - { + if (refresh_row) { /* A conflict has been detected between an applied replicated operation * and the data in the DB. * The attempt to change the local DB will have been rejected. * We now take steps to generate a refresh Binlog event so that * other clusters will be re-aligned. */ - DBUG_PRINT("info", ("Conflict on table %s. Operation type : %s, " - "conflict cause :%s, conflict error : %u : %s", - table_name, - ((op_type == WRITE_ROW)? "WRITE_ROW": - (op_type == UPDATE_ROW)? "UPDATE_ROW": - "DELETE_ROW"), - ((conflict_cause == ROW_ALREADY_EXISTS)?"ROW_ALREADY_EXISTS": - (conflict_cause == ROW_DOES_NOT_EXIST)?"ROW_DOES_NOT_EXIST": - "ROW_IN_CONFLICT"), - conflict_error.code, - conflict_error.message)); + DBUG_PRINT( + "info", + ("Conflict on table %s. Operation type : %s, " + "conflict cause :%s, conflict error : %u : %s", + table_name, + ((op_type == WRITE_ROW) + ? "WRITE_ROW" + : (op_type == UPDATE_ROW) ? "UPDATE_ROW" : "DELETE_ROW"), + ((conflict_cause == ROW_ALREADY_EXISTS) + ? "ROW_ALREADY_EXISTS" + : (conflict_cause == ROW_DOES_NOT_EXIST) ? "ROW_DOES_NOT_EXIST" + : "ROW_IN_CONFLICT"), + conflict_error.code, conflict_error.message)); assert(key_rec != NULL); assert(row != NULL); - do - { + do { /* When the slave splits an epoch into batches, a conflict row detected * and refreshed in an early batch can be written to by operations in * a later batch. As the operations will not have applied, and the @@ -5950,8 +5096,7 @@ handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share, * it again */ if ((conflict_cause == ROW_IN_CONFLICT) && - (conflict_error.code == (int) error_op_after_refresh_op)) - { + (conflict_error.code == (int)error_op_after_refresh_op)) { /* Attempt to apply an operation after the row was refreshed * Ignore the error */ @@ -5985,9 +5130,9 @@ handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share, * DELETE vs DELETE conflicts by : * NOT refreshing a row when a DELETE vs DELETE conflict is detected * This should map all batching scenarios onto Case1. - * + * * Transactional algorithms - * + * * For transactional algorithms, there are multiple passes over the * epoch transaction. Earlier passes 'mark' in-conflict transactions * so that any row changes to in-conflict rows are automatically @@ -5996,16 +5141,14 @@ handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share, * NDB$EPOCH_TRANS chooses to ignore DELETE-DELETE conflicts entirely * and so skips refreshing rows with only DELETE-DELETE conflicts. * NDB$EPOCH2_TRANS does not ignore them, and so refreshes them. - * This behaviour is controlled by the algorthm's CF_DEL_DEL_CFT + * This behaviour is controlled by the algorthm's CF_DEL_DEL_CFT * flag at conflict detection time. - * + * * For the final pass of the transactional algorithms, every conflict * is a TRANS_IN_CONFLICT error here, so no need to adjust behaviour. - * + * */ - if ((op_type == DELETE_ROW) && - (conflict_cause == ROW_DOES_NOT_EXIST)) - { + if ((op_type == DELETE_ROW) && (conflict_cause == ROW_DOES_NOT_EXIST)) { g_ndb_slave_state.current_delete_delete_count++; DBUG_PRINT("info", ("Delete vs Delete detected, NOT refreshing")); break; @@ -6028,9 +5171,8 @@ handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share, ignoring this correction. */ NdbOperation::OperationOptions options; - options.optionsPresent = - NdbOperation::OperationOptions::OO_CUSTOMDATA | - NdbOperation::OperationOptions::OO_ANYVALUE; + options.optionsPresent = NdbOperation::OperationOptions::OO_CUSTOMDATA | + NdbOperation::OperationOptions::OO_ANYVALUE; options.customData = &StaticRefreshExceptionsData; options.anyValue = 0; @@ -6041,21 +5183,15 @@ handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share, // TODO Do we ever get non-PK key? // Keyless table? // Unique index - const NdbOperation* refresh_op= conflict_trans->refreshTuple(key_rec, - (const char*) row, - &options, - sizeof(options)); - if (!refresh_op) - { + const NdbOperation *refresh_op = conflict_trans->refreshTuple( + key_rec, (const char *)row, &options, sizeof(options)); + if (!refresh_op) { NdbError err = conflict_trans->getNdbError(); - if (err.status == NdbError::TemporaryError) - { + if (err.status == NdbError::TemporaryError) { /* Slave will roll back and retry entire transaction. */ ERR_RETURN(err); - } - else - { + } else { char msg[FN_REFLEN]; /* We cannot refresh a row which has Blobs, as we do not support @@ -6065,15 +5201,13 @@ handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share, * We will generate an error in this case */ const int NDBAPI_ERR_REFRESH_ON_BLOB_TABLE = 4343; - if (err.code == NDBAPI_ERR_REFRESH_ON_BLOB_TABLE) - { + if (err.code == NDBAPI_ERR_REFRESH_ON_BLOB_TABLE) { // Generate legacy error message instead of using // the error code and message returned from NdbApi snprintf(msg, sizeof(msg), "%s conflict handling on table %s failed as table " "has Blobs which cannot be refreshed.", - handling_type, - table_name); + handling_type, table_name); push_warning_printf(current_thd, Sql_condition::SL_WARNING, ER_EXCEPTIONS_WRITE_ERROR, @@ -6083,64 +5217,44 @@ handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share, DBUG_RETURN(ER_EXCEPTIONS_WRITE_ERROR); } - snprintf(msg, sizeof(msg), "Row conflict handling " - "on table %s hit Ndb error %d '%s'", - table_name, - err.code, - err.message); - push_warning_printf(current_thd, Sql_condition::SL_WARNING, - ER_EXCEPTIONS_WRITE_ERROR, - ER_THD(current_thd, ER_EXCEPTIONS_WRITE_ERROR), - msg); + snprintf(msg, sizeof(msg), + "Row conflict handling " + "on table %s hit Ndb error %d '%s'", + table_name, err.code, err.message); + push_warning_printf( + current_thd, Sql_condition::SL_WARNING, ER_EXCEPTIONS_WRITE_ERROR, + ER_THD(current_thd, ER_EXCEPTIONS_WRITE_ERROR), msg); /* Slave will stop replication. */ DBUG_RETURN(ER_EXCEPTIONS_WRITE_ERROR); } } - } while(0); // End of 'refresh' block + } while (0); // End of 'refresh' block } - DBUG_PRINT("info", ("Table %s does%s have an exceptions table", - table_name, - (cfn_share && cfn_share->m_ex_tab_writer.hasTable()) - ? "" : " not")); - if (cfn_share && - cfn_share->m_ex_tab_writer.hasTable()) - { + DBUG_PRINT( + "info", + ("Table %s does%s have an exceptions table", table_name, + (cfn_share && cfn_share->m_ex_tab_writer.hasTable()) ? "" : " not")); + if (cfn_share && cfn_share->m_ex_tab_writer.hasTable()) { NdbError err; - if (cfn_share->m_ex_tab_writer.writeRow(conflict_trans, - key_rec, - data_rec, - ::server_id, - ndb_mi_get_master_server_id(), - g_ndb_slave_state.current_master_server_epoch, - old_row, - new_row, - op_type, - conflict_cause, - transaction_id, - write_set, - err) != 0) - { - if (err.code != 0) - { - if (err.status == NdbError::TemporaryError) - { + if (cfn_share->m_ex_tab_writer.writeRow( + conflict_trans, key_rec, data_rec, ::server_id, + ndb_mi_get_master_server_id(), + g_ndb_slave_state.current_master_server_epoch, old_row, new_row, + op_type, conflict_cause, transaction_id, write_set, err) != 0) { + if (err.code != 0) { + if (err.status == NdbError::TemporaryError) { /* Slave will roll back and retry entire transaction. */ ERR_RETURN(err); - } - else - { + } else { char msg[FN_REFLEN]; - snprintf(msg, sizeof(msg), "%s conflict handling " - "on table %s hit Ndb error %d '%s'", - handling_type, - table_name, - err.code, - err.message); - push_warning_printf(current_thd, Sql_condition::SL_WARNING, - ER_EXCEPTIONS_WRITE_ERROR, - ER_THD(current_thd, ER_EXCEPTIONS_WRITE_ERROR), - msg); + snprintf(msg, sizeof(msg), + "%s conflict handling " + "on table %s hit Ndb error %d '%s'", + handling_type, table_name, err.code, err.message); + push_warning_printf( + current_thd, Sql_condition::SL_WARNING, ER_EXCEPTIONS_WRITE_ERROR, + ER_THD(current_thd, ER_EXCEPTIONS_WRITE_ERROR), msg); /* Slave will stop replication. */ DBUG_RETURN(ER_EXCEPTIONS_WRITE_ERROR); } @@ -6151,16 +5265,13 @@ handle_row_conflict(NDB_CONFLICT_FN_SHARE* cfn_share, DBUG_RETURN(0); } - /** Update one record in NDB using primary key. */ -bool ha_ndbcluster::start_bulk_update() -{ +bool ha_ndbcluster::start_bulk_update() { DBUG_ENTER("ha_ndbcluster::start_bulk_update"); - if (!m_use_write && m_ignore_dup_key) - { + if (!m_use_write && m_ignore_dup_key) { DBUG_PRINT("info", ("Batching turned off as duplicate key is " "ignored by using peek_row")); DBUG_RETURN(true); @@ -6169,26 +5280,23 @@ bool ha_ndbcluster::start_bulk_update() } int ha_ndbcluster::bulk_update_row(const uchar *old_data, uchar *new_data, - uint *dup_key_found) -{ + uint *dup_key_found) { DBUG_ENTER("ha_ndbcluster::bulk_update_row"); - *dup_key_found= 0; + *dup_key_found = 0; DBUG_RETURN(ndb_update_row(old_data, new_data, 1)); } -int ha_ndbcluster::exec_bulk_update(uint *dup_key_found) -{ - NdbTransaction* trans= m_thd_ndb->trans; +int ha_ndbcluster::exec_bulk_update(uint *dup_key_found) { + NdbTransaction *trans = m_thd_ndb->trans; DBUG_ENTER("ha_ndbcluster::exec_bulk_update"); - *dup_key_found= 0; + *dup_key_found = 0; /* If a fatal error is encountered during an update op, the error * is saved and exec continues. So exec_bulk_update may be called * even when init functions fail. Check for error conditions like * an uninit'ed transaction. */ - if(unlikely(!m_thd_ndb->trans)) - { + if (unlikely(!m_thd_ndb->trans)) { DBUG_PRINT("exit", ("Transaction was not started")); int error = 0; ERR_SET(m_thd_ndb->ndb->getNdbError(), error); @@ -6211,9 +5319,7 @@ int ha_ndbcluster::exec_bulk_update(uint *dup_key_found) * enabled, exec_bulk_update does an execute_nocommit(). * - if rbwr not enabled, execute_commit() done in ndbcluster_commit(). */ - if (m_thd_ndb->m_handler && - m_read_before_write_removal_possible) - { + if (m_thd_ndb->m_handler && m_read_before_write_removal_possible) { /* This is an autocommit involving only one table and rbwr is on @@ -6224,33 +5330,28 @@ int ha_ndbcluster::exec_bulk_update(uint *dup_key_found) to the update loop(which will ask handler in rbwr mode) */ DBUG_PRINT("info", ("committing auto-commit+rbwr early")); - uint ignore_count= 0; - const int ignore_error= 1; - if (execute_commit(m_thd_ndb, trans, - m_thd_ndb->m_force_send, ignore_error, - &ignore_count) != 0) - { + uint ignore_count = 0; + const int ignore_error = 1; + if (execute_commit(m_thd_ndb, trans, m_thd_ndb->m_force_send, ignore_error, + &ignore_count) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } - THD *thd= table->in_use; - if (!applying_binlog(thd)) - { + THD *thd = table->in_use; + if (!applying_binlog(thd)) { DBUG_PRINT("info", ("ignore_count: %u", ignore_count)); assert(m_rows_updated >= ignore_count); - m_rows_updated-= ignore_count; + m_rows_updated -= ignore_count; } DBUG_RETURN(0); } - if (m_thd_ndb->m_unsent_bytes == 0) - { + if (m_thd_ndb->m_unsent_bytes == 0) { DBUG_PRINT("exit", ("skip execute - no unsent bytes")); DBUG_RETURN(0); } - if (thd_allow_batch(table->in_use)) - { + if (thd_allow_batch(table->in_use)) { /* Turned on by @@transaction_allow_batching=ON or implicitly by slave exec thread @@ -6259,106 +5360,90 @@ int ha_ndbcluster::exec_bulk_update(uint *dup_key_found) DBUG_RETURN(0); } - if (m_thd_ndb->m_handler && - !m_blobs_pending) - { + if (m_thd_ndb->m_handler && !m_blobs_pending) { // Execute at commit time(in 'ndbcluster_commit') to save a round trip DBUG_PRINT("exit", ("skip execute - simple autocommit")); DBUG_RETURN(0); } - uint ignore_count= 0; + uint ignore_count = 0; if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key || m_read_before_write_removal_used, - &ignore_count) != 0) - { + &ignore_count) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } - THD *thd= table->in_use; - if (!applying_binlog(thd)) - { + THD *thd = table->in_use; + if (!applying_binlog(thd)) { assert(m_rows_updated >= ignore_count); - m_rows_updated-= ignore_count; + m_rows_updated -= ignore_count; } DBUG_RETURN(0); } -void ha_ndbcluster::end_bulk_update() -{ +void ha_ndbcluster::end_bulk_update() { DBUG_ENTER("ha_ndbcluster::end_bulk_update"); DBUG_VOID_RETURN; } -int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data) -{ +int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data) { return ndb_update_row(old_data, new_data, 0); } -void -ha_ndbcluster::setup_key_ref_for_ndb_record(const NdbRecord **key_rec, - const uchar **key_row, - const uchar *record, - bool use_active_index) -{ +void ha_ndbcluster::setup_key_ref_for_ndb_record(const NdbRecord **key_rec, + const uchar **key_row, + const uchar *record, + bool use_active_index) { DBUG_ENTER("setup_key_ref_for_ndb_record"); - if (use_active_index) - { + if (use_active_index) { /* Use unique key to access table */ DBUG_PRINT("info", ("Using unique index (%u)", active_index)); DBUG_ASSERT((table->key_info[active_index].flags & HA_NOSAME)); /* Can't use key if we didn't read it first */ DBUG_ASSERT(bitmap_is_subset(m_key_fields[active_index], table->read_set)); - *key_rec= m_index[active_index].ndb_unique_record_row; - *key_row= record; - } - else if (table_share->primary_key != MAX_KEY) - { + *key_rec = m_index[active_index].ndb_unique_record_row; + *key_row = record; + } else if (table_share->primary_key != MAX_KEY) { /* Use primary key to access table */ DBUG_PRINT("info", ("Using primary key")); /* Can't use pk if we didn't read it first */ DBUG_ASSERT(bitmap_is_subset(m_pk_bitmap_p, table->read_set)); - *key_rec= m_index[table_share->primary_key].ndb_unique_record_row; - *key_row= record; - } - else - { + *key_rec = m_index[table_share->primary_key].ndb_unique_record_row; + *key_row = record; + } else { /* Use hidden primary key previously read into m_ref. */ DBUG_PRINT("info", ("Using hidden primary key (%llu)", m_ref)); /* Can't use hidden pk if we didn't read it first */ DBUG_ASSERT(bitmap_is_subset(m_pk_bitmap_p, table->read_set)); DBUG_ASSERT(m_read_before_write_removal_used == false); - *key_rec= m_ndb_hidden_key_record; - *key_row= (const uchar *)(&m_ref); + *key_rec = m_ndb_hidden_key_record; + *key_row = (const uchar *)(&m_ref); } DBUG_VOID_RETURN; } - /* Update one record in NDB using primary key */ int ha_ndbcluster::ndb_update_row(const uchar *old_data, uchar *new_data, - int is_bulk_update) -{ - THD *thd= table->in_use; - Thd_ndb *thd_ndb= m_thd_ndb; - NdbScanOperation* cursor= m_active_cursor; + int is_bulk_update) { + THD *thd = table->in_use; + Thd_ndb *thd_ndb = m_thd_ndb; + NdbScanOperation *cursor = m_active_cursor; const NdbOperation *op; - uint32 old_part_id= ~uint32(0), new_part_id= ~uint32(0); + uint32 old_part_id = ~uint32(0), new_part_id = ~uint32(0); int error = 0; longlong func_value = 0; Uint32 func_value_uint32; - bool have_pk= (table_share->primary_key != MAX_KEY); - bool pk_update= (!m_read_before_write_removal_possible && - have_pk && - bitmap_is_overlapping(table->write_set, m_pk_bitmap_p) && - primary_key_cmp(old_data, new_data)); - bool batch_allowed= !m_update_cannot_batch && - (is_bulk_update || thd_allow_batch(thd)); + bool have_pk = (table_share->primary_key != MAX_KEY); + bool pk_update = (!m_read_before_write_removal_possible && have_pk && + bitmap_is_overlapping(table->write_set, m_pk_bitmap_p) && + primary_key_cmp(old_data, new_data)); + bool batch_allowed = + !m_update_cannot_batch && (is_bulk_update || thd_allow_batch(thd)); NdbOperation::SetValueSpec sets[2]; - Uint32 num_sets= 0; + Uint32 num_sets = 0; DBUG_ENTER("ndb_update_row"); @@ -6366,66 +5451,56 @@ int ha_ndbcluster::ndb_update_row(const uchar *old_data, uchar *new_data, * (Manual Binlog application...) */ /* TODO : Consider hinting */ - if (unlikely((!m_thd_ndb->trans) && - !get_transaction(error))) - { + if (unlikely((!m_thd_ndb->trans) && !get_transaction(error))) { DBUG_RETURN(error); } - NdbTransaction *trans= m_thd_ndb->trans; + NdbTransaction *trans = m_thd_ndb->trans; DBUG_ASSERT(trans); error = check_slave_state(thd); - if (unlikely(error)) - DBUG_RETURN(error); + if (unlikely(error)) DBUG_RETURN(error); /* * If IGNORE the ignore constraint violations on primary and unique keys, * but check that it is not part of INSERT ... ON DUPLICATE KEY UPDATE */ if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE || - thd->lex->sql_command == SQLCOM_UPDATE_MULTI)) - { - const NDB_WRITE_OP write_op= (pk_update) ? NDB_PK_UPDATE : NDB_UPDATE; - int peek_res= peek_indexed_rows(new_data, write_op); - - if (!peek_res) - { + thd->lex->sql_command == SQLCOM_UPDATE_MULTI)) { + const NDB_WRITE_OP write_op = (pk_update) ? NDB_PK_UPDATE : NDB_UPDATE; + int peek_res = peek_indexed_rows(new_data, write_op); + + if (!peek_res) { DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); } - if (peek_res != HA_ERR_KEY_NOT_FOUND) - DBUG_RETURN(peek_res); + if (peek_res != HA_ERR_KEY_NOT_FOUND) DBUG_RETURN(peek_res); } ha_statistic_increment(&System_status_var::ha_update_count); - bool skip_partition_for_unique_index= false; - if (m_use_partition_pruning) - { - if (!cursor && m_read_before_write_removal_used) - { - const NDB_INDEX_TYPE type= get_index_type(active_index); + bool skip_partition_for_unique_index = false; + if (m_use_partition_pruning) { + if (!cursor && m_read_before_write_removal_used) { + const NDB_INDEX_TYPE type = get_index_type(active_index); /* Ndb unique indexes are global so when m_read_before_write_removal_used is active the unique index can be used directly for update without finding the partitions */ - if (type == UNIQUE_INDEX || - type == UNIQUE_ORDERED_INDEX) - { - skip_partition_for_unique_index= true; + if (type == UNIQUE_INDEX || type == UNIQUE_ORDERED_INDEX) { + skip_partition_for_unique_index = true; goto skip_partition_pruning; } } - if ((error= get_parts_for_update(old_data, new_data, table->record[0], - m_part_info, &old_part_id, &new_part_id, - &func_value))) - { - m_part_info->err_value= func_value; + if ((error = get_parts_for_update(old_data, new_data, table->record[0], + m_part_info, &old_part_id, &new_part_id, + &func_value))) { + m_part_info->err_value = func_value; DBUG_RETURN(error); } - DBUG_PRINT("info", ("old_part_id: %u new_part_id: %u", old_part_id, new_part_id)); + DBUG_PRINT("info", + ("old_part_id: %u new_part_id: %u", old_part_id, new_part_id)); skip_partition_pruning: (void)0; } @@ -6433,9 +5508,8 @@ int ha_ndbcluster::ndb_update_row(const uchar *old_data, uchar *new_data, /* * Check for update of primary key or partition change * for special handling - */ - if (pk_update || old_part_id != new_part_id) - { + */ + if (pk_update || old_part_id != new_part_id) { DBUG_RETURN(ndb_pk_update_row(thd, old_data, new_data)); } /* @@ -6443,10 +5517,9 @@ int ha_ndbcluster::ndb_update_row(const uchar *old_data, uchar *new_data, then we need to update the auto_increment counter */ if (table->found_next_number_field && - bitmap_is_set(table->write_set, - table->found_next_number_field->field_index) && - (error= set_auto_inc(thd, table->found_next_number_field))) - { + bitmap_is_set(table->write_set, + table->found_next_number_field->field_index) && + (error = set_auto_inc(thd, table->found_next_number_field))) { DBUG_RETURN(error); } /* @@ -6456,229 +5529,190 @@ int ha_ndbcluster::ndb_update_row(const uchar *old_data, uchar *new_data, */ bitmap_copy(&m_bitmap, table->write_set); bitmap_subtract(&m_bitmap, m_pk_bitmap_p); - uchar *mask= m_table_map->get_column_mask(& m_bitmap); + uchar *mask = m_table_map->get_column_mask(&m_bitmap); DBUG_ASSERT(!pk_update); NdbOperation::OperationOptions *poptions = NULL; NdbOperation::OperationOptions options; - options.optionsPresent=0; + options.optionsPresent = 0; - /* Need to set the value of any user-defined partitioning function. + /* Need to set the value of any user-defined partitioning function. (excecpt for when using unique index) */ - if (m_user_defined_partitioning && !skip_partition_for_unique_index) - { + if (m_user_defined_partitioning && !skip_partition_for_unique_index) { if (func_value >= INT_MAX32) - func_value_uint32= INT_MAX32; + func_value_uint32 = INT_MAX32; else - func_value_uint32= (uint32)func_value; - sets[num_sets].column= get_partition_id_column(); - sets[num_sets].value= &func_value_uint32; + func_value_uint32 = (uint32)func_value; + sets[num_sets].column = get_partition_id_column(); + sets[num_sets].value = &func_value_uint32; num_sets++; - if (!cursor) - { - options.optionsPresent|= NdbOperation::OperationOptions::OO_PARTITION_ID; - options.partitionId= new_part_id; + if (!cursor) { + options.optionsPresent |= NdbOperation::OperationOptions::OO_PARTITION_ID; + options.partitionId = new_part_id; } } - + eventSetAnyValue(thd, &options); - - const bool need_flush= + + const bool need_flush = thd_ndb->add_row_check_if_batch_full(m_bytes_per_write); - const Uint32 authorValue = 1; - if ((thd->slave_thread) && - (m_table->getExtraRowAuthorBits())) - { - /* Set author to indicate slave updated last */ - sets[num_sets].column= NdbDictionary::Column::ROW_AUTHOR; - sets[num_sets].value= &authorValue; - num_sets++; - } - - if (num_sets) - { - options.optionsPresent|= NdbOperation::OperationOptions::OO_SETVALUE; - options.extraSetValues= sets; - options.numExtraSetValues= num_sets; - } - - if (thd->slave_thread || THDVAR(thd, deferred_constraints)) - { + const Uint32 authorValue = 1; + if ((thd->slave_thread) && (m_table->getExtraRowAuthorBits())) { + /* Set author to indicate slave updated last */ + sets[num_sets].column = NdbDictionary::Column::ROW_AUTHOR; + sets[num_sets].value = &authorValue; + num_sets++; + } + + if (num_sets) { + options.optionsPresent |= NdbOperation::OperationOptions::OO_SETVALUE; + options.extraSetValues = sets; + options.numExtraSetValues = num_sets; + } + + if (thd->slave_thread || THDVAR(thd, deferred_constraints)) { options.optionsPresent |= - NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS; + NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS; } - if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) - { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { DBUG_PRINT("info", ("Disabling foreign keys")); - options.optionsPresent |= - NdbOperation::OperationOptions::OO_DISABLE_FK; + options.optionsPresent |= NdbOperation::OperationOptions::OO_DISABLE_FK; } - if (cursor) - { + if (cursor) { /* We are scanning records and want to update the record - that was just found, call updateCurrentTuple on the cursor + that was just found, call updateCurrentTuple on the cursor to take over the lock to a new update operation - And thus setting the primary key of the record from + And thus setting the primary key of the record from the active record in cursor */ DBUG_PRINT("info", ("Calling updateTuple on cursor, write_set=0x%x", table->write_set->bitmap[0])); - if (options.optionsPresent != 0) - poptions = &options; + if (options.optionsPresent != 0) poptions = &options; - if (!(op= cursor->updateCurrentTuple(trans, m_ndb_record, - (const char*)new_data, mask, - poptions, - sizeof(NdbOperation::OperationOptions)))) + if (!(op = cursor->updateCurrentTuple( + trans, m_ndb_record, (const char *)new_data, mask, poptions, + sizeof(NdbOperation::OperationOptions)))) ERR_RETURN(trans->getNdbError()); - m_lock_tuple= false; - thd_ndb->m_unsent_bytes+= 12; - } - else - { + m_lock_tuple = false; + thd_ndb->m_unsent_bytes += 12; + } else { const NdbRecord *key_rec; const uchar *key_row; setup_key_ref_for_ndb_record(&key_rec, &key_row, new_data, - m_read_before_write_removal_used); + m_read_before_write_removal_used); bool avoidNdbApiWriteOp = true; /* Default update op for ndb_update_row */ - Uint32 buffer[ MAX_CONFLICT_INTERPRETED_PROG_SIZE ]; + Uint32 buffer[MAX_CONFLICT_INTERPRETED_PROG_SIZE]; NdbInterpretedCode code(m_table, buffer, - sizeof(buffer)/sizeof(buffer[0])); + sizeof(buffer) / sizeof(buffer[0])); - if (thd->slave_thread) - { + if (thd->slave_thread) { bool conflict_handled = false; /* Conflict resolution in slave thread. */ - DBUG_PRINT("info", ("Slave thread, preparing conflict resolution for update with mask : %x", *((Uint32*)mask))); - - if (unlikely((error = prepare_conflict_detection(UPDATE_ROW, - key_rec, - m_ndb_record, - old_data, - new_data, - table->write_set, - trans, - &code, - &options, - conflict_handled, - avoidNdbApiWriteOp)))) + DBUG_PRINT("info", ("Slave thread, preparing conflict resolution for " + "update with mask : %x", + *((Uint32 *)mask))); + + if (unlikely((error = prepare_conflict_detection( + UPDATE_ROW, key_rec, m_ndb_record, old_data, new_data, + table->write_set, trans, &code, &options, + conflict_handled, avoidNdbApiWriteOp)))) DBUG_RETURN(error); - if (unlikely(conflict_handled)) - { + if (unlikely(conflict_handled)) { /* No need to continue with operation defintion */ /* TODO : Ensure batch execution */ DBUG_RETURN(0); } } - if (options.optionsPresent !=0) - poptions= &options; + if (options.optionsPresent != 0) poptions = &options; - if (likely(avoidNdbApiWriteOp)) - { - if (!(op= trans->updateTuple(key_rec, (const char *)key_row, - m_ndb_record, (const char*)new_data, mask, - poptions, + if (likely(avoidNdbApiWriteOp)) { + if (!(op = + trans->updateTuple(key_rec, (const char *)key_row, m_ndb_record, + (const char *)new_data, mask, poptions, sizeof(NdbOperation::OperationOptions)))) ERR_RETURN(trans->getNdbError()); - } - else - { + } else { DBUG_PRINT("info", ("Update op using writeTuple")); - if (!(op= trans->writeTuple(key_rec, (const char *)key_row, - m_ndb_record, (const char*)new_data, mask, - poptions, - sizeof(NdbOperation::OperationOptions)))) + if (!(op = trans->writeTuple(key_rec, (const char *)key_row, m_ndb_record, + (const char *)new_data, mask, poptions, + sizeof(NdbOperation::OperationOptions)))) ERR_RETURN(trans->getNdbError()); } } - uint blob_count= 0; - if (uses_blob_value(table->write_set)) - { - int row_offset= (int)(new_data - table->record[0]); - int res= set_blob_values(op, row_offset, table->write_set, &blob_count, - (batch_allowed && !need_flush)); - if (res != 0) - DBUG_RETURN(res); + uint blob_count = 0; + if (uses_blob_value(table->write_set)) { + int row_offset = (int)(new_data - table->record[0]); + int res = set_blob_values(op, row_offset, table->write_set, &blob_count, + (batch_allowed && !need_flush)); + if (res != 0) DBUG_RETURN(res); } - uint ignore_count= 0; + uint ignore_count = 0; /* Batch update operation if we are doing a scan for update, unless there exist UPDATE AFTER triggers */ - if (m_update_cannot_batch || - !(cursor || (batch_allowed && have_pk)) || - need_flush) - { + if (m_update_cannot_batch || !(cursor || (batch_allowed && have_pk)) || + need_flush) { if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key || m_read_before_write_removal_used, - &ignore_count) != 0) - { + &ignore_count) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } - } - else if (blob_count > 0) - m_blobs_pending= true; + } else if (blob_count > 0) + m_blobs_pending = true; m_rows_updated++; - if (!applying_binlog(thd)) - { + if (!applying_binlog(thd)) { assert(m_rows_updated >= ignore_count); - m_rows_updated-= ignore_count; + m_rows_updated -= ignore_count; } DBUG_RETURN(0); } - /* handler delete interface */ -int ha_ndbcluster::delete_row(const uchar *record) -{ +int ha_ndbcluster::delete_row(const uchar *record) { return ndb_delete_row(record, false); } -bool ha_ndbcluster::start_bulk_delete() -{ +bool ha_ndbcluster::start_bulk_delete() { DBUG_ENTER("start_bulk_delete"); m_is_bulk_delete = true; - DBUG_RETURN(0); // Bulk delete used by handler + DBUG_RETURN(0); // Bulk delete used by handler } -int ha_ndbcluster::end_bulk_delete() -{ - NdbTransaction* trans= m_thd_ndb->trans; +int ha_ndbcluster::end_bulk_delete() { + NdbTransaction *trans = m_thd_ndb->trans; DBUG_ENTER("end_bulk_delete"); - assert(m_is_bulk_delete); // Don't allow end() without start() + assert(m_is_bulk_delete); // Don't allow end() without start() m_is_bulk_delete = false; // m_handler must be NULL or point to _this_ handler instance assert(m_thd_ndb->m_handler == NULL || m_thd_ndb->m_handler == this); - if (unlikely(trans == NULL)) - { + if (unlikely(trans == NULL)) { /* Problem with late starting transaction, do nothing here */ DBUG_RETURN(0); } - if (m_thd_ndb->m_handler && - m_read_before_write_removal_possible) - { + if (m_thd_ndb->m_handler && m_read_before_write_removal_possible) { /* This is an autocommit involving only one table and rbwr is on @@ -6689,34 +5723,29 @@ int ha_ndbcluster::end_bulk_delete() to the delete loop(which will ask handler in rbwr mode) */ DBUG_PRINT("info", ("committing auto-commit+rbwr early")); - uint ignore_count= 0; - const int ignore_error= 1; - if (execute_commit(m_thd_ndb, trans, - m_thd_ndb->m_force_send, ignore_error, - &ignore_count) != 0) - { + uint ignore_count = 0; + const int ignore_error = 1; + if (execute_commit(m_thd_ndb, trans, m_thd_ndb->m_force_send, ignore_error, + &ignore_count) != 0) { no_uncommitted_rows_execute_failure(); m_rows_deleted = 0; DBUG_RETURN(ndb_err(trans)); } - THD *thd= table->in_use; - if (!applying_binlog(thd)) - { + THD *thd = table->in_use; + if (!applying_binlog(thd)) { DBUG_PRINT("info", ("ignore_count: %u", ignore_count)); assert(m_rows_deleted >= ignore_count); - m_rows_deleted-= ignore_count; + m_rows_deleted -= ignore_count; } DBUG_RETURN(0); } - if (m_thd_ndb->m_unsent_bytes == 0) - { + if (m_thd_ndb->m_unsent_bytes == 0) { DBUG_PRINT("exit", ("skip execute - no unsent bytes")); DBUG_RETURN(0); } - if (thd_allow_batch(table->in_use)) - { + if (thd_allow_batch(table->in_use)) { /* Turned on by @@transaction_allow_batching=ON or implicitly by slave exec thread @@ -6725,48 +5754,43 @@ int ha_ndbcluster::end_bulk_delete() DBUG_RETURN(0); } - if (m_thd_ndb->m_handler) - { + if (m_thd_ndb->m_handler) { // Execute at commit time(in 'ndbcluster_commit') to save a round trip DBUG_PRINT("exit", ("skip execute - simple autocommit")); DBUG_RETURN(0); } - uint ignore_count= 0; + uint ignore_count = 0; if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key || m_read_before_write_removal_used, - &ignore_count) != 0) - { + &ignore_count) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } - THD *thd= table->in_use; - if (!applying_binlog(thd)) - { + THD *thd = table->in_use; + if (!applying_binlog(thd)) { assert(m_rows_deleted >= ignore_count); - m_rows_deleted-= ignore_count; + m_rows_deleted -= ignore_count; no_uncommitted_rows_update(ignore_count); } DBUG_RETURN(0); } - /** Delete one record from NDB, using primary key . */ int ha_ndbcluster::ndb_delete_row(const uchar *record, - bool primary_key_update) -{ - THD *thd= table->in_use; - Thd_ndb *thd_ndb= m_thd_ndb; - NdbScanOperation* cursor= m_active_cursor; + bool primary_key_update) { + THD *thd = table->in_use; + Thd_ndb *thd_ndb = m_thd_ndb; + NdbScanOperation *cursor = m_active_cursor; const NdbOperation *op; - uint32 part_id= ~uint32(0); + uint32 part_id = ~uint32(0); int error = 0; - bool allow_batch= !m_delete_cannot_batch && - (m_is_bulk_delete || thd_allow_batch(thd)); + bool allow_batch = + !m_delete_cannot_batch && (m_is_bulk_delete || thd_allow_batch(thd)); DBUG_ENTER("ndb_delete_row"); @@ -6774,43 +5798,35 @@ int ha_ndbcluster::ndb_delete_row(const uchar *record, * (Manual Binlog application...) */ /* TODO : Consider hinting */ - if (unlikely((!m_thd_ndb->trans) && - !get_transaction(error))) - { + if (unlikely((!m_thd_ndb->trans) && !get_transaction(error))) { DBUG_RETURN(error); } - - NdbTransaction *trans= m_thd_ndb->trans; + + NdbTransaction *trans = m_thd_ndb->trans; DBUG_ASSERT(trans); error = check_slave_state(thd); - if (unlikely(error)) - DBUG_RETURN(error); + if (unlikely(error)) DBUG_RETURN(error); ha_statistic_increment(&System_status_var::ha_delete_count); - bool skip_partition_for_unique_index= false; - if (m_use_partition_pruning) - { - if (!cursor && m_read_before_write_removal_used) - { - const NDB_INDEX_TYPE type= get_index_type(active_index); + bool skip_partition_for_unique_index = false; + if (m_use_partition_pruning) { + if (!cursor && m_read_before_write_removal_used) { + const NDB_INDEX_TYPE type = get_index_type(active_index); /* Ndb unique indexes are global so when m_read_before_write_removal_used is active the unique index can be used directly for deleting without finding the partitions */ - if (type == UNIQUE_INDEX || - type == UNIQUE_ORDERED_INDEX) - { - skip_partition_for_unique_index= true; + if (type == UNIQUE_INDEX || type == UNIQUE_ORDERED_INDEX) { + skip_partition_for_unique_index = true; goto skip_partition_pruning; } } - if ((error= get_part_for_delete(record, table->record[0], m_part_info, - &part_id))) - { + if ((error = get_part_for_delete(record, table->record[0], m_part_info, + &part_id))) { DBUG_RETURN(error); } skip_partition_pruning: @@ -6819,114 +5835,95 @@ int ha_ndbcluster::ndb_delete_row(const uchar *record, NdbOperation::OperationOptions options; NdbOperation::OperationOptions *poptions = NULL; - options.optionsPresent=0; + options.optionsPresent = 0; eventSetAnyValue(thd, &options); /* Poor approx. let delete ~ tabsize / 4 */ - uint delete_size= 12 + (m_bytes_per_write >> 2); - const bool need_flush = - thd_ndb->add_row_check_if_batch_full(delete_size); + uint delete_size = 12 + (m_bytes_per_write >> 2); + const bool need_flush = thd_ndb->add_row_check_if_batch_full(delete_size); - if (thd->slave_thread || THDVAR(thd, deferred_constraints)) - { + if (thd->slave_thread || THDVAR(thd, deferred_constraints)) { options.optionsPresent |= - NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS; + NdbOperation::OperationOptions::OO_DEFERRED_CONSTAINTS; } - if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) - { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { DBUG_PRINT("info", ("Disabling foreign keys")); - options.optionsPresent |= - NdbOperation::OperationOptions::OO_DISABLE_FK; + options.optionsPresent |= NdbOperation::OperationOptions::OO_DISABLE_FK; } - if (cursor) - { - if (options.optionsPresent != 0) - poptions = &options; + if (cursor) { + if (options.optionsPresent != 0) poptions = &options; /* We are scanning records and want to delete the record - that was just found, call deleteTuple on the cursor + that was just found, call deleteTuple on the cursor to take over the lock to a new delete operation - And thus setting the primary key of the record from + And thus setting the primary key of the record from the active record in cursor */ DBUG_PRINT("info", ("Calling deleteTuple on cursor")); - if ((op = cursor->deleteCurrentTuple(trans, m_ndb_record, - NULL, // result_row - NULL, // result_mask - poptions, - sizeof(NdbOperation::OperationOptions))) == 0) - ERR_RETURN(trans->getNdbError()); - m_lock_tuple= false; - thd_ndb->m_unsent_bytes+= 12; + if ((op = cursor->deleteCurrentTuple( + trans, m_ndb_record, + NULL, // result_row + NULL, // result_mask + poptions, sizeof(NdbOperation::OperationOptions))) == 0) + ERR_RETURN(trans->getNdbError()); + m_lock_tuple = false; + thd_ndb->m_unsent_bytes += 12; no_uncommitted_rows_update(-1); m_rows_deleted++; - if (!(primary_key_update || m_delete_cannot_batch)) - { + if (!(primary_key_update || m_delete_cannot_batch)) { // If deleting from cursor, NoCommit will be handled in next_result DBUG_RETURN(0); } - } - else - { + } else { const NdbRecord *key_rec; const uchar *key_row; - if (m_user_defined_partitioning && !skip_partition_for_unique_index) - { - options.optionsPresent|= NdbOperation::OperationOptions::OO_PARTITION_ID; - options.partitionId= part_id; + if (m_user_defined_partitioning && !skip_partition_for_unique_index) { + options.optionsPresent |= NdbOperation::OperationOptions::OO_PARTITION_ID; + options.partitionId = part_id; } setup_key_ref_for_ndb_record(&key_rec, &key_row, record, - m_read_before_write_removal_used); + m_read_before_write_removal_used); - Uint32 buffer[ MAX_CONFLICT_INTERPRETED_PROG_SIZE ]; + Uint32 buffer[MAX_CONFLICT_INTERPRETED_PROG_SIZE]; NdbInterpretedCode code(m_table, buffer, - sizeof(buffer)/sizeof(buffer[0])); - if (thd->slave_thread) - { - bool conflict_handled = false; - bool dummy_delete_does_not_care = false; + sizeof(buffer) / sizeof(buffer[0])); + if (thd->slave_thread) { + bool conflict_handled = false; + bool dummy_delete_does_not_care = false; /* Conflict resolution in slave thread. */ - if (unlikely((error = prepare_conflict_detection(DELETE_ROW, - key_rec, - m_ndb_record, - key_row, /* old_data */ - NULL, /* new_data */ - table->write_set, - trans, - &code, - &options, - conflict_handled, - dummy_delete_does_not_care)))) + if (unlikely( + (error = prepare_conflict_detection( + DELETE_ROW, key_rec, m_ndb_record, key_row, /* old_data */ + NULL, /* new_data */ + table->write_set, trans, &code, &options, conflict_handled, + dummy_delete_does_not_care)))) DBUG_RETURN(error); - if (unlikely(conflict_handled)) - { + if (unlikely(conflict_handled)) { /* No need to continue with operation definition */ /* TODO : Ensure batch execution */ DBUG_RETURN(0); } } - if (options.optionsPresent != 0) - poptions= &options; + if (options.optionsPresent != 0) poptions = &options; - if (!(op=trans->deleteTuple(key_rec, (const char *)key_row, - m_ndb_record, - NULL, // row - NULL, // mask - poptions, - sizeof(NdbOperation::OperationOptions)))) + if (!(op = trans->deleteTuple(key_rec, (const char *)key_row, m_ndb_record, + NULL, // row + NULL, // mask + poptions, + sizeof(NdbOperation::OperationOptions)))) ERR_RETURN(trans->getNdbError()); no_uncommitted_rows_update(-1); @@ -6953,36 +5950,30 @@ int ha_ndbcluster::ndb_delete_row(const uchar *record, been aborted. */ - if ( allow_batch && - table_share->primary_key != MAX_KEY && - !primary_key_update && - !need_flush) - { + if (allow_batch && table_share->primary_key != MAX_KEY && + !primary_key_update && !need_flush) { DBUG_RETURN(0); } } // Execute delete operation - uint ignore_count= 0; + uint ignore_count = 0; if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key || m_read_before_write_removal_used, - &ignore_count) != 0) - { + &ignore_count) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } - if (!primary_key_update) - { - if (!applying_binlog(thd)) - { + if (!primary_key_update) { + if (!applying_binlog(thd)) { assert(m_rows_deleted >= ignore_count); - m_rows_deleted-= ignore_count; + m_rows_deleted -= ignore_count; no_uncommitted_rows_update(ignore_count); } } DBUG_RETURN(0); } - + /** Unpack a record returned from a scan. We copy field-for-field to @@ -6991,62 +5982,52 @@ int ha_ndbcluster::ndb_delete_row(const uchar *record, Note that we do not unpack all returned rows; some primary/unique key operations can read directly into the destination row. */ -int ha_ndbcluster::unpack_record(uchar *dst_row, const uchar *src_row) -{ +int ha_ndbcluster::unpack_record(uchar *dst_row, const uchar *src_row) { DBUG_ASSERT(src_row != nullptr); - ptrdiff_t dst_offset= dst_row - table->record[0]; - ptrdiff_t src_offset= src_row - table->record[0]; + ptrdiff_t dst_offset = dst_row - table->record[0]; + ptrdiff_t src_offset = src_row - table->record[0]; /* Initialize the NULL bitmap. */ memset(dst_row, 0xff, table->s->null_bytes); - uchar *blob_ptr= m_blobs_buffer; + uchar *blob_ptr = m_blobs_buffer; - for (uint i= 0; i < table_share->fields; i++) - { - if (!bitmap_is_set(table->read_set, i)) - continue; + for (uint i = 0; i < table_share->fields; i++) { + if (!bitmap_is_set(table->read_set, i)) continue; - Field *field= table->field[i]; - if (!field->stored_in_db) - continue; + Field *field = table->field[i]; + if (!field->stored_in_db) continue; - if (likely(!(field->flags & BLOB_FLAG))) - { - if (field->is_real_null(src_offset)) - { + if (likely(!(field->flags & BLOB_FLAG))) { + if (field->is_real_null(src_offset)) { /* NULL bits already set -> no further action needed. */ - } - else if (likely(field->type() != MYSQL_TYPE_BIT)) - { + } else if (likely(field->type() != MYSQL_TYPE_BIT)) { /* A normal, non-NULL field (not blob or bit type). Only copy actually used bytes if varstrings. */ - const uint32 actual_length= field_used_length(field,src_offset); + const uint32 actual_length = field_used_length(field, src_offset); field->set_notnull(dst_offset); - memcpy(field->ptr+dst_offset, field->ptr+src_offset, actual_length); - } - else //MYSQL_TYPE_BIT + memcpy(field->ptr + dst_offset, field->ptr + src_offset, actual_length); + } else // MYSQL_TYPE_BIT { - Field_bit *field_bit= down_cast(field); + Field_bit *field_bit = down_cast(field); field->move_field_offset(src_offset); - longlong value= field_bit->val_int(); - field->move_field_offset(dst_offset-src_offset); + longlong value = field_bit->val_int(); + field->move_field_offset(dst_offset - src_offset); field_bit->set_notnull(); /* Field_bit in DBUG requires the bit set in write_set for store(). */ - my_bitmap_map *old_map= - dbug_tmp_use_all_columns(table, table->write_set); + my_bitmap_map *old_map = + dbug_tmp_use_all_columns(table, table->write_set); ndbcluster::ndbrequire(field_bit->store(value, true) == 0); dbug_tmp_restore_column_map(table->write_set, old_map); field->move_field_offset(-dst_offset); } - } - else // BLOB_FLAG + } else // BLOB_FLAG { - Field_blob *field_blob= (Field_blob *)field; - NdbBlob *ndb_blob= m_value[i].blob; + Field_blob *field_blob = (Field_blob *)field; + NdbBlob *ndb_blob = m_value[i].blob; /* unpack_record *only* called for scan result processing * *while* the scan is open and the Blob is active. * Verify Blob state to be certain. @@ -7056,49 +6037,42 @@ int ha_ndbcluster::unpack_record(uchar *dst_row, const uchar *src_row) DBUG_ASSERT(ndb_blob->getState() == NdbBlob::Active); int isNull; ndbcluster::ndbrequire(ndb_blob->getNull(isNull) == 0); - Uint64 len64= 0; + Uint64 len64 = 0; field_blob->move_field_offset(dst_offset); - if (!isNull) - { + if (!isNull) { ndbcluster::ndbrequire(ndb_blob->getLength(len64) == 0); ndbcluster::ndbrequire(len64 <= (Uint64)0xffffffff); - if(len64 > field_blob->max_data_length()) - { + if (len64 > field_blob->max_data_length()) { len64 = calc_ndb_blob_len(ndb_blob->getColumn()->getCharset(), - blob_ptr, field_blob->max_data_length()); + blob_ptr, field_blob->max_data_length()); // push a warning - push_warning_printf(table->in_use, Sql_condition::SL_WARNING, - WARN_DATA_TRUNCATED, - "Truncated value from TEXT field \'%s\'", field_blob->field_name); - + push_warning_printf( + table->in_use, Sql_condition::SL_WARNING, WARN_DATA_TRUNCATED, + "Truncated value from TEXT field \'%s\'", field_blob->field_name); } field->set_notnull(); } /* Need not set_null(), as we initialized null bits to 1 above. */ field_blob->set_ptr((uint32)len64, blob_ptr); field_blob->move_field_offset(-dst_offset); - blob_ptr+= (len64 + 7) & ~((Uint64)7); + blob_ptr += (len64 + 7) & ~((Uint64)7); } } // for(... - if (unlikely(!m_cond.check_condition())) - { + if (unlikely(!m_cond.check_condition())) { return HA_ERR_KEY_NOT_FOUND; // False condition } - DBUG_ASSERT(pushed_cond == nullptr || const_cast(pushed_cond)->val_int()); + DBUG_ASSERT(pushed_cond == nullptr || + const_cast(pushed_cond)->val_int()); return 0; } int ha_ndbcluster::unpack_record_and_set_generated_fields( - uchar *dst_row, - const uchar *src_row) -{ + uchar *dst_row, const uchar *src_row) { const int res = unpack_record(dst_row, src_row); - if (res == 0 && - Ndb_table_map::has_virtual_gcol(table)) - { + if (res == 0 && Ndb_table_map::has_virtual_gcol(table)) { update_generated_read_fields(dst_row, table); } return res; @@ -7107,50 +6081,40 @@ int ha_ndbcluster::unpack_record_and_set_generated_fields( /** Get the default value of the field from default_values of the table. */ -static void get_default_value(void *def_val, Field *field) -{ +static void get_default_value(void *def_val, Field *field) { DBUG_ASSERT(field != NULL); DBUG_ASSERT(field->stored_in_db); - ptrdiff_t src_offset= field->table->default_values_offset(); + ptrdiff_t src_offset = field->table->default_values_offset(); { - if (bitmap_is_set(field->table->read_set, field->field_index)) - { - if (field->type() == MYSQL_TYPE_BIT) - { - Field_bit *field_bit= static_cast(field); - if (!field->is_real_null(src_offset)) - { + if (bitmap_is_set(field->table->read_set, field->field_index)) { + if (field->type() == MYSQL_TYPE_BIT) { + Field_bit *field_bit = static_cast(field); + if (!field->is_real_null(src_offset)) { field->move_field_offset(src_offset); - longlong value= field_bit->val_int(); + longlong value = field_bit->val_int(); /* Map to NdbApi format - two Uint32s */ Uint32 out[2]; out[0] = 0; out[1] = 0; - for (int b=0; b < 64; b++) - { + for (int b = 0; b < 64; b++) { out[b >> 5] |= (value & 1) << (b & 31); - - value= value >> 1; + + value = value >> 1; } memcpy(def_val, out, sizeof(longlong)); field->move_field_offset(-src_offset); } - } - else if (field->flags & BLOB_FLAG) - { + } else if (field->flags & BLOB_FLAG) { assert(false); - } - else - { + } else { field->move_field_offset(src_offset); /* Normal field (not blob or bit type). */ - if (!field->is_null()) - { + if (!field->is_null()) { /* Only copy actually used bytes of varstrings. */ - uint32 actual_length= field_used_length(field); - uchar *src_ptr= field->ptr; + uint32 actual_length = field_used_length(field); + uchar *src_ptr = field->ptr; field->set_notnull(); memcpy(def_val, src_ptr, actual_length); } @@ -7161,32 +6125,26 @@ static void get_default_value(void *def_val, Field *field) } } - -int ha_ndbcluster::index_init(uint index, bool sorted) -{ +int ha_ndbcluster::index_init(uint index, bool sorted) { DBUG_ENTER("ha_ndbcluster::index_init"); DBUG_PRINT("enter", ("index: %u sorted: %d", index, sorted)); - active_index= index; - m_sorted= sorted; + active_index = index; + m_sorted = sorted; /* Locks are are explicitly released in scan unless m_lock.type == TL_READ_HIGH_PRIORITY and no sub-sequent call to unlock_row() */ - m_lock_tuple= false; + m_lock_tuple = false; - if (table_share->primary_key == MAX_KEY && - m_use_partition_pruning) - { + if (table_share->primary_key == MAX_KEY && m_use_partition_pruning) { bitmap_union(table->read_set, &m_part_info->full_part_field_set); } DBUG_RETURN(0); } - -int ha_ndbcluster::index_end() -{ +int ha_ndbcluster::index_end() { DBUG_ENTER("ha_ndbcluster::index_end"); DBUG_RETURN(close_scan()); } @@ -7194,140 +6152,116 @@ int ha_ndbcluster::index_end() /** Check if key contains null. */ -static -int -check_null_in_key(const KEY* key_info, const uchar *key, uint key_len) -{ +static int check_null_in_key(const KEY *key_info, const uchar *key, + uint key_len) { KEY_PART_INFO *curr_part, *end_part; - const uchar* end_ptr= key + key_len; - curr_part= key_info->key_part; - end_part= curr_part + key_info->user_defined_key_parts; + const uchar *end_ptr = key + key_len; + curr_part = key_info->key_part; + end_part = curr_part + key_info->user_defined_key_parts; - for (; curr_part != end_part && key < end_ptr; curr_part++) - { - if (curr_part->null_bit && *key) - return 1; + for (; curr_part != end_part && key < end_ptr; curr_part++) { + if (curr_part->null_bit && *key) return 1; key += curr_part->store_length; } return 0; } -int ha_ndbcluster::index_read(uchar *buf, - const uchar *key, uint key_len, - enum ha_rkey_function find_flag) -{ - key_range start_key, end_key, *end_key_p=NULL; - bool descending= false; +int ha_ndbcluster::index_read(uchar *buf, const uchar *key, uint key_len, + enum ha_rkey_function find_flag) { + key_range start_key, end_key, *end_key_p = NULL; + bool descending = false; DBUG_ENTER("ha_ndbcluster::index_read"); - DBUG_PRINT("enter", ("active_index: %u, key_len: %u, find_flag: %d", + DBUG_PRINT("enter", ("active_index: %u, key_len: %u, find_flag: %d", active_index, key_len, find_flag)); - start_key.key= key; - start_key.length= key_len; - start_key.flag= find_flag; + start_key.key = key; + start_key.length = key_len; + start_key.flag = find_flag; switch (find_flag) { - case HA_READ_KEY_EXACT: - /** - * Specify as a closed EQ_RANGE. - * Setting HA_READ_AFTER_KEY seems odd, but this is according - * to MySQL convention, see opt_range.cc. - */ - end_key.key= key; - end_key.length= key_len; - end_key.flag= HA_READ_AFTER_KEY; - end_key_p= &end_key; - break; - case HA_READ_KEY_OR_PREV: - case HA_READ_BEFORE_KEY: - case HA_READ_PREFIX_LAST: - case HA_READ_PREFIX_LAST_OR_PREV: - descending= true; - break; - default: - break; + case HA_READ_KEY_EXACT: + /** + * Specify as a closed EQ_RANGE. + * Setting HA_READ_AFTER_KEY seems odd, but this is according + * to MySQL convention, see opt_range.cc. + */ + end_key.key = key; + end_key.length = key_len; + end_key.flag = HA_READ_AFTER_KEY; + end_key_p = &end_key; + break; + case HA_READ_KEY_OR_PREV: + case HA_READ_BEFORE_KEY: + case HA_READ_PREFIX_LAST: + case HA_READ_PREFIX_LAST_OR_PREV: + descending = true; + break; + default: + break; } - const int error= read_range_first_to_buf(&start_key, end_key_p, - descending, - m_sorted, buf); + const int error = + read_range_first_to_buf(&start_key, end_key_p, descending, m_sorted, buf); DBUG_RETURN(error); } - -int ha_ndbcluster::index_next(uchar *buf) -{ +int ha_ndbcluster::index_next(uchar *buf) { DBUG_ENTER("ha_ndbcluster::index_next"); ha_statistic_increment(&System_status_var::ha_read_next_count); - const int error= next_result(buf); + const int error = next_result(buf); DBUG_RETURN(error); } - -int ha_ndbcluster::index_prev(uchar *buf) -{ +int ha_ndbcluster::index_prev(uchar *buf) { DBUG_ENTER("ha_ndbcluster::index_prev"); ha_statistic_increment(&System_status_var::ha_read_prev_count); - const int error= next_result(buf); + const int error = next_result(buf); DBUG_RETURN(error); } - -int ha_ndbcluster::index_first(uchar *buf) -{ +int ha_ndbcluster::index_first(uchar *buf) { DBUG_ENTER("ha_ndbcluster::index_first"); ha_statistic_increment(&System_status_var::ha_read_first_count); // Start the ordered index scan and fetch the first row // Only HA_READ_ORDER indexes get called by index_first - const int error= ordered_index_scan(0, 0, m_sorted, false, buf, NULL); + const int error = ordered_index_scan(0, 0, m_sorted, false, buf, NULL); DBUG_RETURN(error); } - -int ha_ndbcluster::index_last(uchar *buf) -{ +int ha_ndbcluster::index_last(uchar *buf) { DBUG_ENTER("ha_ndbcluster::index_last"); ha_statistic_increment(&System_status_var::ha_read_last_count); - const int error= ordered_index_scan(0, 0, m_sorted, true, buf, NULL); + const int error = ordered_index_scan(0, 0, m_sorted, true, buf, NULL); DBUG_RETURN(error); } - int ha_ndbcluster::index_next_same(uchar *buf, const uchar *key MY_ATTRIBUTE((unused)), - uint length MY_ATTRIBUTE((unused))) -{ + uint length MY_ATTRIBUTE((unused))) { DBUG_ENTER("ha_ndbcluster::index_next_same"); ha_statistic_increment(&System_status_var::ha_read_next_count); - const int error= next_result(buf); + const int error = next_result(buf); DBUG_RETURN(error); } - -int ha_ndbcluster::index_read_last(uchar * buf, const uchar * key, uint key_len) -{ +int ha_ndbcluster::index_read_last(uchar *buf, const uchar *key, uint key_len) { DBUG_ENTER("ha_ndbcluster::index_read_last"); DBUG_RETURN(index_read(buf, key, key_len, HA_READ_PREFIX_LAST)); } - int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, - const key_range *end_key, - bool desc, bool sorted, - uchar* buf) -{ + const key_range *end_key, bool desc, + bool sorted, uchar *buf) { part_id_range part_spec; - const NDB_INDEX_TYPE type= get_index_type(active_index); - const KEY* key_info= table->key_info+active_index; - int error; + const NDB_INDEX_TYPE type = get_index_type(active_index); + const KEY *key_info = table->key_info + active_index; + int error; DBUG_ENTER("ha_ndbcluster::read_range_first_to_buf"); DBUG_PRINT("info", ("desc: %d, sorted: %d", desc, sorted)); - if (unlikely((error= close_scan()))) - DBUG_RETURN(error); + if (unlikely((error = close_scan()))) DBUG_RETURN(error); - if (m_use_partition_pruning) - { + if (m_use_partition_pruning) { DBUG_ASSERT(m_pushed_join_operation != PUSHED_ROOT); get_partition_set(table, buf, active_index, start_key, &part_spec); DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u", @@ -7338,209 +6272,177 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, If partition pruning has found exactly one partition in set we can optimize scan to run towards that partition only. */ - if (part_spec.start_part > part_spec.end_part) - { + if (part_spec.start_part > part_spec.end_part) { DBUG_RETURN(HA_ERR_END_OF_FILE); } - if (part_spec.start_part == part_spec.end_part) - { + if (part_spec.start_part == part_spec.end_part) { /* Only one partition is required to scan, if sorted is required we don't need it any more since output from one ordered partitioned index is always sorted. */ - sorted= false; - if (unlikely(!get_transaction_part_id(part_spec.start_part, error))) - { + sorted = false; + if (unlikely(!get_transaction_part_id(part_spec.start_part, error))) { DBUG_RETURN(error); } } } - switch (type){ - case PRIMARY_KEY_ORDERED_INDEX: - case PRIMARY_KEY_INDEX: - if (start_key && - start_key->length == key_info->key_length && - start_key->flag == HA_READ_KEY_EXACT) - { - if (!m_thd_ndb->trans) - if (unlikely(!start_transaction_key(active_index, - start_key->key, error))) - DBUG_RETURN(error); - DBUG_DUMP("key", start_key->key, start_key->length); - error = - pk_read(start_key->key, buf, - (m_use_partition_pruning) ? &(part_spec.start_part) : NULL); - DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); - } - break; - case UNIQUE_ORDERED_INDEX: - case UNIQUE_INDEX: - if (start_key && start_key->length == key_info->key_length && - start_key->flag == HA_READ_KEY_EXACT && - !check_null_in_key(key_info, start_key->key, start_key->length)) - { - if (!m_thd_ndb->trans) - if (unlikely(!start_transaction_key(active_index, - start_key->key, error))) - DBUG_RETURN(error); - DBUG_DUMP("key", start_key->key, start_key->length); - error= unique_index_read(start_key->key, buf); - DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); - } - else if (type == UNIQUE_INDEX) - DBUG_RETURN(full_table_scan(key_info, - start_key, - end_key, - buf)); - break; - default: - break; + switch (type) { + case PRIMARY_KEY_ORDERED_INDEX: + case PRIMARY_KEY_INDEX: + if (start_key && start_key->length == key_info->key_length && + start_key->flag == HA_READ_KEY_EXACT) { + if (!m_thd_ndb->trans) + if (unlikely( + !start_transaction_key(active_index, start_key->key, error))) + DBUG_RETURN(error); + DBUG_DUMP("key", start_key->key, start_key->length); + error = + pk_read(start_key->key, buf, + (m_use_partition_pruning) ? &(part_spec.start_part) : NULL); + DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); + } + break; + case UNIQUE_ORDERED_INDEX: + case UNIQUE_INDEX: + if (start_key && start_key->length == key_info->key_length && + start_key->flag == HA_READ_KEY_EXACT && + !check_null_in_key(key_info, start_key->key, start_key->length)) { + if (!m_thd_ndb->trans) + if (unlikely( + !start_transaction_key(active_index, start_key->key, error))) + DBUG_RETURN(error); + DBUG_DUMP("key", start_key->key, start_key->length); + error = unique_index_read(start_key->key, buf); + DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); + } else if (type == UNIQUE_INDEX) + DBUG_RETURN(full_table_scan(key_info, start_key, end_key, buf)); + break; + default: + break; } - if (!m_use_partition_pruning && !m_thd_ndb->trans) - { + if (!m_use_partition_pruning && !m_thd_ndb->trans) { get_partition_set(table, buf, active_index, start_key, &part_spec); if (part_spec.start_part == part_spec.end_part) if (unlikely(!start_transaction_part_id(part_spec.start_part, error))) DBUG_RETURN(error); } // Start the ordered index scan and fetch the first row - DBUG_RETURN(ordered_index_scan(start_key, end_key, sorted, desc, buf, - (m_use_partition_pruning)? &part_spec : NULL)); + DBUG_RETURN( + ordered_index_scan(start_key, end_key, sorted, desc, buf, + (m_use_partition_pruning) ? &part_spec : NULL)); } int ha_ndbcluster::read_range_first(const key_range *start_key, const key_range *end_key, - bool /* eq_range */, bool sorted) -{ - uchar* buf= table->record[0]; + bool /* eq_range */, bool sorted) { + uchar *buf = table->record[0]; DBUG_ENTER("ha_ndbcluster::read_range_first"); - DBUG_RETURN(read_range_first_to_buf(start_key, end_key, false, - sorted, buf)); + DBUG_RETURN(read_range_first_to_buf(start_key, end_key, false, sorted, buf)); } -int ha_ndbcluster::read_range_next() -{ +int ha_ndbcluster::read_range_next() { DBUG_ENTER("ha_ndbcluster::read_range_next"); DBUG_RETURN(next_result(table->record[0])); } - -int ha_ndbcluster::rnd_init(bool) -{ +int ha_ndbcluster::rnd_init(bool) { int error; DBUG_ENTER("rnd_init"); - if ((error= close_scan())) - DBUG_RETURN(error); + if ((error = close_scan())) DBUG_RETURN(error); index_init(table_share->primary_key, 0); DBUG_RETURN(0); } -int ha_ndbcluster::close_scan() -{ +int ha_ndbcluster::close_scan() { DBUG_ENTER("close_scan"); - if (m_active_query) - { + if (m_active_query) { m_active_query->close(m_thd_ndb->m_force_send); - m_active_query= NULL; + m_active_query = NULL; } - NdbScanOperation *cursor= m_active_cursor; - if (!cursor) - { + NdbScanOperation *cursor = m_active_cursor; + if (!cursor) { cursor = m_multi_cursor; - if (!cursor) - DBUG_RETURN(0); + if (!cursor) DBUG_RETURN(0); } int error; - NdbTransaction *trans= m_thd_ndb->trans; - if ((error= scan_handle_lock_tuple(cursor, trans)) != 0) - DBUG_RETURN(error); + NdbTransaction *trans = m_thd_ndb->trans; + if ((error = scan_handle_lock_tuple(cursor, trans)) != 0) DBUG_RETURN(error); - if (m_thd_ndb->m_unsent_bytes) - { + if (m_thd_ndb->m_unsent_bytes) { /* - Take over any pending transactions to the - deleteing/updating transaction before closing the scan + Take over any pending transactions to the + deleteing/updating transaction before closing the scan */ DBUG_PRINT("info", ("thd_ndb->m_unsent_bytes: %ld", - (long) m_thd_ndb->m_unsent_bytes)); - if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0) - { + (long)m_thd_ndb->m_unsent_bytes)); + if (execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } } - + cursor->close(m_thd_ndb->m_force_send, true); - m_active_cursor= NULL; - m_multi_cursor= NULL; + m_active_cursor = NULL; + m_multi_cursor = NULL; DBUG_RETURN(0); } -int ha_ndbcluster::rnd_end() -{ +int ha_ndbcluster::rnd_end() { DBUG_ENTER("rnd_end"); DBUG_RETURN(close_scan()); } - -int ha_ndbcluster::rnd_next(uchar *buf) -{ +int ha_ndbcluster::rnd_next(uchar *buf) { DBUG_ENTER("rnd_next"); ha_statistic_increment(&System_status_var::ha_read_rnd_next_count); int error; if (m_active_cursor || m_active_query) - error= next_result(buf); + error = next_result(buf); else - error= full_table_scan(NULL, NULL, NULL, buf); - + error = full_table_scan(NULL, NULL, NULL, buf); + DBUG_RETURN(error); } - /** - An "interesting" record has been found and it's pk + An "interesting" record has been found and it's pk retrieved by calling position. Now it's time to read the record from db once again. */ -int ha_ndbcluster::rnd_pos(uchar *buf, uchar *pos) -{ +int ha_ndbcluster::rnd_pos(uchar *buf, uchar *pos) { DBUG_ENTER("rnd_pos"); ha_statistic_increment(&System_status_var::ha_read_rnd_count); // The primary key for the record is stored in pos // Perform a pk_read using primary key "index" { part_id_range part_spec; - uint key_length= ref_length; - if (m_user_defined_partitioning) - { - if (table_share->primary_key == MAX_KEY) - { + uint key_length = ref_length; + if (m_user_defined_partitioning) { + if (table_share->primary_key == MAX_KEY) { /* The partition id has been fetched from ndb and has been stored directly after the hidden key */ DBUG_DUMP("key+part", pos, key_length); - key_length= ref_length - sizeof(m_part_id); - part_spec.start_part= part_spec.end_part= *(uint32 *)(pos + key_length); - } - else - { + key_length = ref_length - sizeof(m_part_id); + part_spec.start_part = part_spec.end_part = + *(uint32 *)(pos + key_length); + } else { key_range key_spec; - KEY *key_info= table->key_info + table_share->primary_key; - key_spec.key= pos; - key_spec.length= key_length; - key_spec.flag= HA_READ_KEY_EXACT; - get_full_part_id_from_key(table, buf, key_info, - &key_spec, &part_spec); + KEY *key_info = table->key_info + table_share->primary_key; + key_spec.key = pos; + key_spec.length = key_length; + key_spec.flag = HA_READ_KEY_EXACT; + get_full_part_id_from_key(table, buf, key_info, &key_spec, &part_spec); DBUG_ASSERT(part_spec.start_part == part_spec.end_part); } DBUG_PRINT("info", ("partition id %u", part_spec.start_part)); @@ -7549,8 +6451,7 @@ int ha_ndbcluster::rnd_pos(uchar *buf, uchar *pos) int res = pk_read(pos, buf, (m_user_defined_partitioning) ? &(part_spec.start_part) : NULL); - if (res == HA_ERR_KEY_NOT_FOUND) - { + if (res == HA_ERR_KEY_NOT_FOUND) { /** * When using rnd_pos * server first retrives a set of records (typically scans them) @@ -7561,23 +6462,21 @@ int ha_ndbcluster::rnd_pos(uchar *buf, uchar *pos) * the rnd_pos. * Therefor we return HA_ERR_RECORD_DELETED in this case rather than * HA_ERR_KEY_NOT_FOUND (which will cause statment to be aborted) - * + * */ - res= HA_ERR_RECORD_DELETED; + res = HA_ERR_RECORD_DELETED; } DBUG_RETURN(res); } } - /** - Store the primary key of this record in ref + Store the primary key of this record in ref variable, so that the row can be retrieved again later using "reference" in rnd_pos. */ -void ha_ndbcluster::position(const uchar *record) -{ +void ha_ndbcluster::position(const uchar *record) { KEY *key_info; KEY_PART_INFO *key_part; KEY_PART_INFO *end; @@ -7586,34 +6485,29 @@ void ha_ndbcluster::position(const uchar *record) DBUG_ENTER("position"); - if (table_share->primary_key != MAX_KEY) - { - key_length= ref_length; - key_info= table->key_info + table_share->primary_key; - key_part= key_info->key_part; - end= key_part + key_info->user_defined_key_parts; - buff= ref; - - for (; key_part != end; key_part++) - { + if (table_share->primary_key != MAX_KEY) { + key_length = ref_length; + key_info = table->key_info + table_share->primary_key; + key_part = key_info->key_part; + end = key_part + key_info->user_defined_key_parts; + buff = ref; + + for (; key_part != end; key_part++) { if (key_part->null_bit) { - /* Store 0 if the key part is a NULL part */ - if (record[key_part->null_offset] - & key_part->null_bit) { - *buff++= 1; + /* Store 0 if the key part is a NULL part */ + if (record[key_part->null_offset] & key_part->null_bit) { + *buff++ = 1; continue; - } - *buff++= 0; + } + *buff++ = 0; } size_t len = key_part->length; - const uchar * ptr = record + key_part->offset; + const uchar *ptr = record + key_part->offset; Field *field = key_part->field; - if (field->type() == MYSQL_TYPE_VARCHAR) - { + if (field->type() == MYSQL_TYPE_VARCHAR) { size_t var_length; - if (((Field_varstring*)field)->length_bytes == 1) - { + if (((Field_varstring *)field)->length_bytes == 1) { /** * Keys always use 2 bytes length */ @@ -7621,114 +6515,93 @@ void ha_ndbcluster::position(const uchar *record) buff[1] = 0; var_length = ptr[0]; DBUG_ASSERT(var_length <= len); - memcpy(buff+2, ptr + 1, var_length); - } - else - { - var_length = ptr[0] + (ptr[1]*256); + memcpy(buff + 2, ptr + 1, var_length); + } else { + var_length = ptr[0] + (ptr[1] * 256); DBUG_ASSERT(var_length <= len); memcpy(buff, ptr, var_length + 2); } /** - We have to zero-pad any unused VARCHAR buffer so that MySQL is + We have to zero-pad any unused VARCHAR buffer so that MySQL is able to use simple memcmp to compare two instances of the same - unique key value to determine if they are equal. + unique key value to determine if they are equal. MySQL does this to compare contents of two 'ref' values. (Duplicate weedout algorithm is one such case.) */ - memset(buff+2+var_length, 0, len - var_length); + memset(buff + 2 + var_length, 0, len - var_length); len += 2; - } - else - { + } else { memcpy(buff, ptr, len); } buff += len; } - } - else - { + } else { // No primary key, get hidden key DBUG_PRINT("info", ("Getting hidden key")); // If table has user defined partition save the partition id as well - if (m_user_defined_partitioning) - { + if (m_user_defined_partitioning) { DBUG_PRINT("info", ("Saving partition id %u", m_part_id)); - key_length= ref_length - sizeof(m_part_id); - memcpy(ref+key_length, (void *)&m_part_id, sizeof(m_part_id)); - } - else - key_length= ref_length; + key_length = ref_length - sizeof(m_part_id); + memcpy(ref + key_length, (void *)&m_part_id, sizeof(m_part_id)); + } else + key_length = ref_length; #ifndef DBUG_OFF - const int hidden_no= Ndb_table_map::num_stored_fields(table); - const NDBTAB *tab= m_table; - const NDBCOL *hidden_col= tab->getColumn(hidden_no); - DBUG_ASSERT(hidden_col->getPrimaryKey() && - hidden_col->getAutoIncrement() && + const int hidden_no = Ndb_table_map::num_stored_fields(table); + const NDBTAB *tab = m_table; + const NDBCOL *hidden_col = tab->getColumn(hidden_no); + DBUG_ASSERT(hidden_col->getPrimaryKey() && hidden_col->getAutoIncrement() && key_length == NDB_HIDDEN_PRIMARY_KEY_LENGTH); #endif memcpy(ref, &m_ref, key_length); } #ifndef DBUG_OFF - if (table_share->primary_key == MAX_KEY && m_user_defined_partitioning) - DBUG_DUMP("key+part", ref, key_length+sizeof(m_part_id)); + if (table_share->primary_key == MAX_KEY && m_user_defined_partitioning) + DBUG_DUMP("key+part", ref, key_length + sizeof(m_part_id)); #endif DBUG_DUMP("ref", ref, key_length); DBUG_VOID_RETURN; } -int -ha_ndbcluster::cmp_ref(const uchar * ref1, const uchar * ref2) const -{ +int ha_ndbcluster::cmp_ref(const uchar *ref1, const uchar *ref2) const { DBUG_ENTER("cmp_ref"); - if (table_share->primary_key != MAX_KEY) - { - KEY *key_info= table->key_info + table_share->primary_key; - KEY_PART_INFO *key_part= key_info->key_part; - KEY_PART_INFO *end= key_part + key_info->user_defined_key_parts; - - for (; key_part != end; key_part++) - { + if (table_share->primary_key != MAX_KEY) { + KEY *key_info = table->key_info + table_share->primary_key; + KEY_PART_INFO *key_part = key_info->key_part; + KEY_PART_INFO *end = key_part + key_info->user_defined_key_parts; + + for (; key_part != end; key_part++) { // NOTE: No need to check for null since PK is not-null - Field *field= key_part->field; - int result= field->key_cmp(ref1, ref2); - if (result) - { + Field *field = key_part->field; + int result = field->key_cmp(ref1, ref2); + if (result) { DBUG_RETURN(result); } - if (field->type() == MYSQL_TYPE_VARCHAR) - { - ref1+= 2; - ref2+= 2; + if (field->type() == MYSQL_TYPE_VARCHAR) { + ref1 += 2; + ref2 += 2; } - - ref1+= key_part->length; - ref2+= key_part->length; + + ref1 += key_part->length; + ref2 += key_part->length; } DBUG_RETURN(0); - } - else - { + } else { DBUG_RETURN(memcmp(ref1, ref2, ref_length)); } } -int ha_ndbcluster::info(uint flag) -{ - THD *thd= table->in_use; - int result= 0; +int ha_ndbcluster::info(uint flag) { + THD *thd = table->in_use; + int result = 0; DBUG_ENTER("info"); DBUG_PRINT("enter", ("flag: %d", flag)); - - if (flag & HA_STATUS_POS) - DBUG_PRINT("info", ("HA_STATUS_POS")); - if (flag & HA_STATUS_TIME) - DBUG_PRINT("info", ("HA_STATUS_TIME")); - if (flag & HA_STATUS_CONST) - { + + if (flag & HA_STATUS_POS) DBUG_PRINT("info", ("HA_STATUS_POS")); + if (flag & HA_STATUS_TIME) DBUG_PRINT("info", ("HA_STATUS_TIME")); + if (flag & HA_STATUS_CONST) { /* Set size required by a single record in the MRR 'HANDLER_BUFFER'. MRR buffer has both a fixed and a variable sized part. @@ -7737,19 +6610,16 @@ int ha_ndbcluster::info(uint flag) See comments for multi_range_fixed_size() and multi_range_max_entry() regarding how the MRR buffer is organized. */ - stats.mrr_length_per_rec= multi_range_fixed_size(1) + - multi_range_max_entry(PRIMARY_KEY_INDEX, table_share->reclength); + stats.mrr_length_per_rec = + multi_range_fixed_size(1) + + multi_range_max_entry(PRIMARY_KEY_INDEX, table_share->reclength); } - while (flag & HA_STATUS_VARIABLE) - { - if (!thd) - thd= current_thd; + while (flag & HA_STATUS_VARIABLE) { + if (!thd) thd = current_thd; DBUG_PRINT("info", ("HA_STATUS_VARIABLE")); - if (!m_table_info) - { - if (check_ndb_connection(thd)) - DBUG_RETURN(HA_ERR_NO_CONNECTION); + if (!m_table_info) { + if (check_ndb_connection(thd)) DBUG_RETURN(HA_ERR_NO_CONNECTION); } /* @@ -7760,86 +6630,71 @@ int ha_ndbcluster::info(uint flag) 2) HA_STATUS_NO_LOCK -> read from shared cached copy. 3) Local copy is invalid. */ - bool exact_count= THDVAR(thd, use_exact_count); - if (exact_count || // 1) + bool exact_count = THDVAR(thd, use_exact_count); + if (exact_count || // 1) !(flag & HA_STATUS_NO_LOCK) || // 2) - m_table_info == NULL || // 3) + m_table_info == NULL || // 3) m_table_info->records == ~(ha_rows)0) // 3) { - result= update_stats(thd, (exact_count || !(flag & HA_STATUS_NO_LOCK))); - if (result) - DBUG_RETURN(result); + result = update_stats(thd, (exact_count || !(flag & HA_STATUS_NO_LOCK))); + if (result) DBUG_RETURN(result); } /* Read from local statistics, fast and fuzzy, wo/ locks */ - else - { + else { DBUG_ASSERT(m_table_info->records != ~(ha_rows)0); - stats.records= m_table_info->records + - m_table_info->no_uncommitted_rows_count; + stats.records = + m_table_info->records + m_table_info->no_uncommitted_rows_count; } if (thd->lex->sql_command != SQLCOM_SHOW_TABLE_STATUS && - thd->lex->sql_command != SQLCOM_SHOW_KEYS) - { + thd->lex->sql_command != SQLCOM_SHOW_KEYS) { /* just use whatever stats we have. However, optimizer interprets the values 0 and 1 as EXACT: -> < 2 should not be returned. */ - if (stats.records < 2) - stats.records= 2; + if (stats.records < 2) stats.records = 2; } break; } /* RPK moved to variable part */ - if (flag & HA_STATUS_VARIABLE) - { + if (flag & HA_STATUS_VARIABLE) { /* No meaningful way to return error */ DBUG_PRINT("info", ("rec_per_key")); set_rec_per_key(); } - if (flag & HA_STATUS_ERRKEY) - { + if (flag & HA_STATUS_ERRKEY) { DBUG_PRINT("info", ("HA_STATUS_ERRKEY dupkey=%u", m_dupkey)); - errkey= m_dupkey; + errkey = m_dupkey; } - if (flag & HA_STATUS_AUTO) - { + if (flag & HA_STATUS_AUTO) { DBUG_PRINT("info", ("HA_STATUS_AUTO")); - if (m_table && table->found_next_number_field) - { - if (!thd) - thd= current_thd; - if (check_ndb_connection(thd)) - DBUG_RETURN(HA_ERR_NO_CONNECTION); - Ndb *ndb= get_ndb(thd); + if (m_table && table->found_next_number_field) { + if (!thd) thd = current_thd; + if (check_ndb_connection(thd)) DBUG_RETURN(HA_ERR_NO_CONNECTION); + Ndb *ndb = get_ndb(thd); NDB_SHARE::Tuple_id_range_guard g(m_share); - + Uint64 auto_increment_value64; if (ndb->readAutoIncrementValue(m_table, g.range, - auto_increment_value64) == -1) - { - const NdbError err= ndb->getNdbError(); - ndb_log_error("Error %d in readAutoIncrementValue(): %s", - err.code, err.message); - stats.auto_increment_value= ~(ulonglong)0; - } - else - stats.auto_increment_value= (ulonglong)auto_increment_value64; + auto_increment_value64) == -1) { + const NdbError err = ndb->getNdbError(); + ndb_log_error("Error %d in readAutoIncrementValue(): %s", err.code, + err.message); + stats.auto_increment_value = ~(ulonglong)0; + } else + stats.auto_increment_value = (ulonglong)auto_increment_value64; } } - if(result == -1) - result= HA_ERR_NO_CONNECTION; + if (result == -1) result = HA_ERR_NO_CONNECTION; DBUG_RETURN(result); } - void ha_ndbcluster::get_dynamic_partition_info(ha_statistics *stat_info, ha_checksum *checksum, - uint part_id) -{ + uint part_id) { DBUG_PRINT("info", ("ha_ndbcluster::get_dynamic_partition_info")); int error = 0; @@ -7848,17 +6703,13 @@ void ha_ndbcluster::get_dynamic_partition_info(ha_statistics *stat_info, /* Checksum not supported, set it to NULL.*/ *checksum = 0; - if (!thd) - thd = current_thd; - if (!m_table_info) - { - if ((error = check_ndb_connection(thd))) - goto err; + if (!thd) thd = current_thd; + if (!m_table_info) { + if ((error = check_ndb_connection(thd))) goto err; } error = update_stats(thd, 1, part_id); - if (error == 0) - { + if (error == 0) { stat_info->records = stats.records; stat_info->mean_rec_length = stats.mean_rec_length; stat_info->data_file_length = stats.data_file_length; @@ -7867,186 +6718,169 @@ void ha_ndbcluster::get_dynamic_partition_info(ha_statistics *stat_info, return; } -err: +err: - DBUG_PRINT("warning", - ("ha_ndbcluster::get_dynamic_partition_info failed with error code %u", - error)); + DBUG_PRINT( + "warning", + ("ha_ndbcluster::get_dynamic_partition_info failed with error code %u", + error)); } - -int ha_ndbcluster::extra(enum ha_extra_function operation) -{ +int ha_ndbcluster::extra(enum ha_extra_function operation) { DBUG_ENTER("extra"); switch (operation) { - case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/ - DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY")); - DBUG_PRINT("info", ("Ignoring duplicate key")); - m_ignore_dup_key= true; - break; - case HA_EXTRA_NO_IGNORE_DUP_KEY: - DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY")); - m_ignore_dup_key= false; - break; - case HA_EXTRA_IGNORE_NO_KEY: - DBUG_PRINT("info", ("HA_EXTRA_IGNORE_NO_KEY")); - DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit")); - m_ignore_no_key= true; - break; - case HA_EXTRA_NO_IGNORE_NO_KEY: - DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_NO_KEY")); - DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit")); - m_ignore_no_key= false; - break; - case HA_EXTRA_WRITE_CAN_REPLACE: - DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE")); - if (!m_has_unique_index || - /* - Always set if slave, quick fix for bug 27378 - or if manual binlog application, for bug 46662 - */ - applying_binlog(current_thd)) - { - DBUG_PRINT("info", ("Turning ON use of write instead of insert")); - m_use_write= true; - } - break; - case HA_EXTRA_WRITE_CANNOT_REPLACE: - DBUG_PRINT("info", ("HA_EXTRA_WRITE_CANNOT_REPLACE")); - DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); - m_use_write= false; - break; - case HA_EXTRA_DELETE_CANNOT_BATCH: - DBUG_PRINT("info", ("HA_EXTRA_DELETE_CANNOT_BATCH")); - m_delete_cannot_batch= true; - break; - case HA_EXTRA_UPDATE_CANNOT_BATCH: - DBUG_PRINT("info", ("HA_EXTRA_UPDATE_CANNOT_BATCH")); - m_update_cannot_batch= true; - break; - // We don't implement 'KEYREAD'. However, KEYREAD also implies DISABLE_JOINPUSH. - case HA_EXTRA_KEYREAD: - DBUG_PRINT("info", ("HA_EXTRA_KEYREAD")); - m_disable_pushed_join= true; - break; - case HA_EXTRA_NO_KEYREAD: - DBUG_PRINT("info", ("HA_EXTRA_NO_KEYREAD")); - m_disable_pushed_join= false; - break; - case HA_EXTRA_BEGIN_ALTER_COPY: - // Start of copy into intermediate table during copying alter, turn - // off transactions when writing into the intermediate table in order to - // avoid exhausting NDB transaction resources, this is safe as it would - // be dropped anyway if there is a failure during the alter - DBUG_PRINT("info", ("HA_EXTRA_BEGIN_ALTER_COPY")); - m_thd_ndb->set_trans_option(Thd_ndb::TRANS_TRANSACTIONS_OFF); - break; - case HA_EXTRA_END_ALTER_COPY: - // End of copy into intermediate table during copying alter. - // Nothing to do, the transactions will automatically be enabled - // again for subsequent statement - DBUG_PRINT("info", ("HA_EXTRA_END_ALTER_COPY")); - break; - default: - break; + case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/ + DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY")); + DBUG_PRINT("info", ("Ignoring duplicate key")); + m_ignore_dup_key = true; + break; + case HA_EXTRA_NO_IGNORE_DUP_KEY: + DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY")); + m_ignore_dup_key = false; + break; + case HA_EXTRA_IGNORE_NO_KEY: + DBUG_PRINT("info", ("HA_EXTRA_IGNORE_NO_KEY")); + DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit")); + m_ignore_no_key = true; + break; + case HA_EXTRA_NO_IGNORE_NO_KEY: + DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_NO_KEY")); + DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit")); + m_ignore_no_key = false; + break; + case HA_EXTRA_WRITE_CAN_REPLACE: + DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE")); + if (!m_has_unique_index || + /* + Always set if slave, quick fix for bug 27378 + or if manual binlog application, for bug 46662 + */ + applying_binlog(current_thd)) { + DBUG_PRINT("info", ("Turning ON use of write instead of insert")); + m_use_write = true; + } + break; + case HA_EXTRA_WRITE_CANNOT_REPLACE: + DBUG_PRINT("info", ("HA_EXTRA_WRITE_CANNOT_REPLACE")); + DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); + m_use_write = false; + break; + case HA_EXTRA_DELETE_CANNOT_BATCH: + DBUG_PRINT("info", ("HA_EXTRA_DELETE_CANNOT_BATCH")); + m_delete_cannot_batch = true; + break; + case HA_EXTRA_UPDATE_CANNOT_BATCH: + DBUG_PRINT("info", ("HA_EXTRA_UPDATE_CANNOT_BATCH")); + m_update_cannot_batch = true; + break; + // We don't implement 'KEYREAD'. However, KEYREAD also implies + // DISABLE_JOINPUSH. + case HA_EXTRA_KEYREAD: + DBUG_PRINT("info", ("HA_EXTRA_KEYREAD")); + m_disable_pushed_join = true; + break; + case HA_EXTRA_NO_KEYREAD: + DBUG_PRINT("info", ("HA_EXTRA_NO_KEYREAD")); + m_disable_pushed_join = false; + break; + case HA_EXTRA_BEGIN_ALTER_COPY: + // Start of copy into intermediate table during copying alter, turn + // off transactions when writing into the intermediate table in order to + // avoid exhausting NDB transaction resources, this is safe as it would + // be dropped anyway if there is a failure during the alter + DBUG_PRINT("info", ("HA_EXTRA_BEGIN_ALTER_COPY")); + m_thd_ndb->set_trans_option(Thd_ndb::TRANS_TRANSACTIONS_OFF); + break; + case HA_EXTRA_END_ALTER_COPY: + // End of copy into intermediate table during copying alter. + // Nothing to do, the transactions will automatically be enabled + // again for subsequent statement + DBUG_PRINT("info", ("HA_EXTRA_END_ALTER_COPY")); + break; + default: + break; } DBUG_RETURN(0); } - -bool ha_ndbcluster::start_read_removal() -{ - THD *thd= table->in_use; +bool ha_ndbcluster::start_read_removal() { + THD *thd = table->in_use; DBUG_ENTER("start_read_removal"); - if (uses_blob_value(table->write_set)) - { + if (uses_blob_value(table->write_set)) { DBUG_PRINT("exit", ("No! Blob field in write_set")); DBUG_RETURN(false); } - if (thd->lex->sql_command == SQLCOM_DELETE && - table_share->blob_fields) - { + if (thd->lex->sql_command == SQLCOM_DELETE && table_share->blob_fields) { DBUG_PRINT("exit", ("No! DELETE from table with blob(s)")); DBUG_RETURN(false); } - if (table_share->primary_key == MAX_KEY) - { + if (table_share->primary_key == MAX_KEY) { DBUG_PRINT("exit", ("No! Table with hidden key")); DBUG_RETURN(false); } - if (bitmap_is_overlapping(table->write_set, m_pk_bitmap_p)) - { + if (bitmap_is_overlapping(table->write_set, m_pk_bitmap_p)) { DBUG_PRINT("exit", ("No! Updating primary key")); DBUG_RETURN(false); } - if (m_has_unique_index) - { - for (uint i= 0; i < table_share->keys; i++) - { - const KEY* key= table->key_info + i; + if (m_has_unique_index) { + for (uint i = 0; i < table_share->keys; i++) { + const KEY *key = table->key_info + i; if ((key->flags & HA_NOSAME) && - bitmap_is_overlapping(table->write_set, - m_key_fields[i])) - { + bitmap_is_overlapping(table->write_set, m_key_fields[i])) { DBUG_PRINT("exit", ("No! Unique key %d is updated", i)); DBUG_RETURN(false); } } } - m_read_before_write_removal_possible= true; + m_read_before_write_removal_possible = true; DBUG_PRINT("exit", ("Yes, rbwr is possible!")); DBUG_RETURN(true); } - -ha_rows ha_ndbcluster::end_read_removal(void) -{ +ha_rows ha_ndbcluster::end_read_removal(void) { DBUG_ENTER("end_read_removal"); DBUG_ASSERT(m_read_before_write_removal_possible); - DBUG_PRINT("info", ("updated: %llu, deleted: %llu", - m_rows_updated, m_rows_deleted)); + DBUG_PRINT("info", + ("updated: %llu, deleted: %llu", m_rows_updated, m_rows_deleted)); DBUG_RETURN(m_rows_updated + m_rows_deleted); } - -int ha_ndbcluster::reset() -{ +int ha_ndbcluster::reset() { DBUG_ENTER("ha_ndbcluster::reset"); m_cond.cond_clear(); DBUG_ASSERT(m_active_query == NULL); - if (m_pushed_join_operation==PUSHED_ROOT) // Root of pushed query + if (m_pushed_join_operation == PUSHED_ROOT) // Root of pushed query { - delete m_pushed_join_member; // Also delete QueryDef + delete m_pushed_join_member; // Also delete QueryDef } - m_pushed_join_member= NULL; - m_pushed_join_operation= -1; - m_disable_pushed_join= false; + m_pushed_join_member = NULL; + m_pushed_join_operation = -1; + m_disable_pushed_join = false; /* reset flags set by extra calls */ - m_read_before_write_removal_possible= false; - m_read_before_write_removal_used= false; - m_rows_updated= m_rows_deleted= 0; - m_ignore_dup_key= false; - m_use_write= false; - m_ignore_no_key= false; - m_rows_inserted= (ha_rows) 0; - m_rows_to_insert= (ha_rows) 1; - m_delete_cannot_batch= false; - m_update_cannot_batch= false; + m_read_before_write_removal_possible = false; + m_read_before_write_removal_used = false; + m_rows_updated = m_rows_deleted = 0; + m_ignore_dup_key = false; + m_use_write = false; + m_ignore_no_key = false; + m_rows_inserted = (ha_rows)0; + m_rows_to_insert = (ha_rows)1; + m_delete_cannot_batch = false; + m_update_cannot_batch = false; assert(m_is_bulk_delete == false); m_is_bulk_delete = false; DBUG_RETURN(0); } - /** Start of an insert, remember number of rows to be inserted, it will be used in write_row and get_autoincrement to send an optimal number @@ -8056,32 +6890,29 @@ int ha_ndbcluster::reset() rows number of rows to insert, 0 if unknown */ -int -ha_ndbcluster::flush_bulk_insert(bool allow_batch) -{ - NdbTransaction *trans= m_thd_ndb->trans; +int ha_ndbcluster::flush_bulk_insert(bool allow_batch) { + NdbTransaction *trans = m_thd_ndb->trans; DBUG_ENTER("ha_ndbcluster::flush_bulk_insert"); - DBUG_PRINT("info", ("Sending inserts to NDB, rows_inserted: %d", + DBUG_PRINT("info", ("Sending inserts to NDB, rows_inserted: %d", (int)m_rows_inserted)); DBUG_ASSERT(trans); - if (m_thd_ndb->check_trans_option(Thd_ndb::TRANS_TRANSACTIONS_OFF)) - { + if (m_thd_ndb->check_trans_option(Thd_ndb::TRANS_TRANSACTIONS_OFF)) { /* signal that transaction will be broken up and hence cannot be rolled back */ - THD *thd= table->in_use; - thd->get_transaction()->mark_modified_non_trans_table(Transaction_ctx::SESSION); - thd->get_transaction()->mark_modified_non_trans_table(Transaction_ctx::STMT); + THD *thd = table->in_use; + thd->get_transaction()->mark_modified_non_trans_table( + Transaction_ctx::SESSION); + thd->get_transaction()->mark_modified_non_trans_table( + Transaction_ctx::STMT); if (execute_commit(m_thd_ndb, trans, m_thd_ndb->m_force_send, - m_ignore_no_key) != 0) - { + m_ignore_no_key) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } - if (trans->restart() != 0) - { + if (trans->restart() != 0) { DBUG_ASSERT(0); DBUG_RETURN(-1); } @@ -8089,8 +6920,7 @@ ha_ndbcluster::flush_bulk_insert(bool allow_batch) } if (!allow_batch && - execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0) - { + execute_no_commit(m_thd_ndb, trans, m_ignore_no_key) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -8098,14 +6928,12 @@ ha_ndbcluster::flush_bulk_insert(bool allow_batch) DBUG_RETURN(0); } -void ha_ndbcluster::start_bulk_insert(ha_rows rows) -{ +void ha_ndbcluster::start_bulk_insert(ha_rows rows) { DBUG_ENTER("start_bulk_insert"); DBUG_PRINT("enter", ("rows: %d", (int)rows)); - - m_rows_inserted= (ha_rows) 0; - if (!m_use_write && m_ignore_dup_key) - { + + m_rows_inserted = (ha_rows)0; + if (!m_use_write && m_ignore_dup_key) { /* compare if expression with that in write_row we have a situation where peek_indexed_rows() will be called @@ -8113,23 +6941,19 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows) */ DBUG_PRINT("info", ("Batching turned off as duplicate key is " "ignored by using peek_row")); - m_rows_to_insert= 1; + m_rows_to_insert = 1; DBUG_VOID_RETURN; } - if (rows == (ha_rows) 0) - { + if (rows == (ha_rows)0) { /* We don't know how many will be inserted, guess */ - m_rows_to_insert= - (m_autoincrement_prefetch > DEFAULT_AUTO_PREFETCH) - ? m_autoincrement_prefetch - : DEFAULT_AUTO_PREFETCH; - m_autoincrement_prefetch= m_rows_to_insert; - } - else - { - m_rows_to_insert= rows; + m_rows_to_insert = (m_autoincrement_prefetch > DEFAULT_AUTO_PREFETCH) + ? m_autoincrement_prefetch + : DEFAULT_AUTO_PREFETCH; + m_autoincrement_prefetch = m_rows_to_insert; + } else { + m_rows_to_insert = rows; if (m_autoincrement_prefetch < m_rows_to_insert) - m_autoincrement_prefetch= m_rows_to_insert; + m_autoincrement_prefetch = m_rows_to_insert; } DBUG_VOID_RETURN; @@ -8138,22 +6962,19 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows) /** End of an insert. */ -int ha_ndbcluster::end_bulk_insert() -{ - int error= 0; +int ha_ndbcluster::end_bulk_insert() { + int error = 0; DBUG_ENTER("end_bulk_insert"); // Check if last inserts need to be flushed - THD *thd= table->in_use; - Thd_ndb *thd_ndb= m_thd_ndb; - - if (!thd_allow_batch(thd) && thd_ndb->m_unsent_bytes) - { - const bool allow_batch= (thd_ndb->m_handler != 0); - error= flush_bulk_insert(allow_batch); - if (error != 0) - { + THD *thd = table->in_use; + Thd_ndb *thd_ndb = m_thd_ndb; + + if (!thd_allow_batch(thd) && thd_ndb->m_unsent_bytes) { + const bool allow_batch = (thd_ndb->m_handler != 0); + error = flush_bulk_insert(allow_batch); + if (error != 0) { // The requirement to calling set_my_errno() here is // not according to the handler interface specification // However there it is still code in Sql_cmd_load_table::execute_inner() @@ -8170,7 +6991,6 @@ int ha_ndbcluster::end_bulk_insert() DBUG_RETURN(error); } - /** How many seeks it will take to read through the table. @@ -8178,12 +6998,10 @@ int ha_ndbcluster::end_bulk_insert() that we can decide if we should scan the table or use keys. */ -double ha_ndbcluster::scan_time() -{ +double ha_ndbcluster::scan_time() { DBUG_ENTER("ha_ndbcluster::scan_time()"); - double res= rows2double(stats.records*1000); - DBUG_PRINT("exit", ("table: %s value: %f", - m_tabname, res)); + double res = rows2double(stats.records * 1000); + DBUG_PRINT("exit", ("table: %s value: %f", m_tabname, res)); DBUG_RETURN(res); } @@ -8195,35 +7013,30 @@ double ha_ndbcluster::scan_time() from several MySQL servers */ -THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd, - THR_LOCK_DATA **to, - enum thr_lock_type lock_type) -{ +THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type) { DBUG_ENTER("store_lock"); - if (lock_type != TL_IGNORE && m_lock.type == TL_UNLOCK) - { - + if (lock_type != TL_IGNORE && m_lock.type == TL_UNLOCK) { /* If we are not doing a LOCK TABLE, then allow multiple writers */ - + /* Since NDB does not currently have table locks this is treated as a ordinary lock */ const bool in_lock_tables = thd_in_lock_tables(thd); const uint sql_command = thd_sql_command(thd); - if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && - lock_type <= TL_WRITE) && + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) && !(in_lock_tables && sql_command == SQLCOM_LOCK_TABLES)) - lock_type= TL_WRITE_ALLOW_WRITE; - + lock_type = TL_WRITE_ALLOW_WRITE; + /* In queries of type INSERT INTO t1 SELECT ... FROM t2 ... MySQL would use the lock TL_READ_NO_INSERT on t2, and that would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts to t2. Convert the lock to a normal read lock to allow concurrent inserts to t2. */ - + if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables) - lock_type= TL_READ; + lock_type = TL_READ; /** * We need locks on source table when @@ -8232,15 +7045,14 @@ THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd, * but that has been removed in 5.5 * I simply add this to get it... */ - if (sql_command == SQLCOM_ALTER_TABLE) - lock_type = TL_WRITE; + if (sql_command == SQLCOM_ALTER_TABLE) lock_type = TL_WRITE; - m_lock.type=lock_type; + m_lock.type = lock_type; } - *to++= &m_lock; + *to++ = &m_lock; DBUG_PRINT("exit", ("lock_type: %d", lock_type)); - + DBUG_RETURN(to); } @@ -8251,136 +7063,114 @@ THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd, for the statement, this will be stored in thd_ndb.stmt. If not, we have to start a master transaction if there doesn't exist one from before, this will be stored in thd_ndb.all - + When a table lock is held one transaction will be started which holds - the table lock and for each statement a hupp transaction will be started + the table lock and for each statement a hupp transaction will be started If we are locking the table then: - save the NdbDictionary::Table for easy access - save reference to table statistics - refresh list of the indexes for the table if needed (if altered) */ -static int ndbcluster_update_apply_status(THD *thd, int do_update) -{ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; - NDBDICT *dict= ndb->getDictionary(); +static int ndbcluster_update_apply_status(THD *thd, int do_update) { + Thd_ndb *thd_ndb = get_thd_ndb(thd); + Ndb *ndb = thd_ndb->ndb; + NDBDICT *dict = ndb->getDictionary(); const NDBTAB *ndbtab; - NdbTransaction *trans= thd_ndb->trans; + NdbTransaction *trans = thd_ndb->trans; ndb->setDatabaseName(Ndb_apply_status_table::DB_NAME.c_str()); Ndb_table_guard ndbtab_g(dict, Ndb_apply_status_table::TABLE_NAME.c_str()); - if (!(ndbtab= ndbtab_g.get_table())) - { + if (!(ndbtab = ndbtab_g.get_table())) { return -1; } - NdbOperation *op= 0; - int r= 0; - r|= (op= trans->getNdbOperation(ndbtab)) == 0; + NdbOperation *op = 0; + int r = 0; + r |= (op = trans->getNdbOperation(ndbtab)) == 0; DBUG_ASSERT(r == 0); if (do_update) - r|= op->updateTuple(); + r |= op->updateTuple(); else - r|= op->writeTuple(); + r |= op->writeTuple(); DBUG_ASSERT(r == 0); // server_id - r|= op->equal(0u, (Uint32)thd->server_id); + r |= op->equal(0u, (Uint32)thd->server_id); DBUG_ASSERT(r == 0); - if (!do_update) - { + if (!do_update) { // epoch - r|= op->setValue(1u, (Uint64)0); + r |= op->setValue(1u, (Uint64)0); DBUG_ASSERT(r == 0); } - const char* group_master_log_name = - ndb_mi_get_group_master_log_name(); - const Uint64 group_master_log_pos = - ndb_mi_get_group_master_log_pos(); + const char *group_master_log_name = ndb_mi_get_group_master_log_name(); + const Uint64 group_master_log_pos = ndb_mi_get_group_master_log_pos(); const Uint64 future_event_relay_log_pos = - ndb_mi_get_future_event_relay_log_pos(); - const Uint64 group_relay_log_pos = - ndb_mi_get_group_relay_log_pos(); + ndb_mi_get_future_event_relay_log_pos(); + const Uint64 group_relay_log_pos = ndb_mi_get_group_relay_log_pos(); // log_name char tmp_buf[FN_REFLEN]; - ndb_pack_varchar(ndbtab, 2u, tmp_buf, - group_master_log_name, strlen(group_master_log_name)); - r|= op->setValue(2u, tmp_buf); + ndb_pack_varchar(ndbtab, 2u, tmp_buf, group_master_log_name, + strlen(group_master_log_name)); + r |= op->setValue(2u, tmp_buf); DBUG_ASSERT(r == 0); // start_pos - r|= op->setValue(3u, group_master_log_pos); + r |= op->setValue(3u, group_master_log_pos); DBUG_ASSERT(r == 0); // end_pos - r|= op->setValue(4u, group_master_log_pos + - (future_event_relay_log_pos - group_relay_log_pos)); + r |= op->setValue(4u, group_master_log_pos + + (future_event_relay_log_pos - group_relay_log_pos)); DBUG_ASSERT(r == 0); return 0; } +void Thd_ndb::transaction_checks() { + THD *thd = m_thd; -void -Thd_ndb::transaction_checks() -{ - THD* thd = m_thd; - - if (thd_sql_command(thd) == SQLCOM_LOAD || - !THDVAR(thd, use_transactions)) - { + if (thd_sql_command(thd) == SQLCOM_LOAD || !THDVAR(thd, use_transactions)) { // Turn off transactional behaviour for the duration of this // statement/transaction set_trans_option(TRANS_TRANSACTIONS_OFF); } - m_force_send= THDVAR(thd, force_send); + m_force_send = THDVAR(thd, force_send); if (!thd->slave_thread) - m_batch_size= THDVAR(thd, batch_size); - else - { - m_batch_size= THDVAR(NULL, batch_size); /* using global value */ + m_batch_size = THDVAR(thd, batch_size); + else { + m_batch_size = THDVAR(NULL, batch_size); /* using global value */ /* Do not use hinted TC selection in slave thread */ - THDVAR(thd, optimized_node_selection)= - THDVAR(NULL, optimized_node_selection) & 1; /* using global value */ + THDVAR(thd, optimized_node_selection) = + THDVAR(NULL, optimized_node_selection) & 1; /* using global value */ } } - -int ha_ndbcluster::start_statement(THD *thd, - Thd_ndb *thd_ndb, - uint table_count) -{ - NdbTransaction *trans= thd_ndb->trans; +int ha_ndbcluster::start_statement(THD *thd, Thd_ndb *thd_ndb, + uint table_count) { + NdbTransaction *trans = thd_ndb->trans; int error; DBUG_ENTER("ha_ndbcluster::start_statement"); - m_thd_ndb= thd_ndb; + m_thd_ndb = thd_ndb; m_thd_ndb->transaction_checks(); - if (table_count == 0) - { + if (table_count == 0) { ndb_thd_register_trans(thd, trans == nullptr); - if (thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) - { - thd_ndb->m_handler= NULL; - } - else - { + if (thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + thd_ndb->m_handler = NULL; + } else { /* this is an autocommit, we may keep a reference to the handler to be used in the commit phase for optimization reasons, defering execute */ - thd_ndb->m_handler= this; + thd_ndb->m_handler = this; } - } - else - { + } else { /* there is more than one handler involved, execute deferal not possible */ - ha_ndbcluster* handler = thd_ndb->m_handler; - thd_ndb->m_handler= NULL; - if (handler != NULL) - { + ha_ndbcluster *handler = thd_ndb->m_handler; + thd_ndb->m_handler = NULL; + if (handler != NULL) { /** * If we initially belived that this could be run * using execute deferal...but changed out mind @@ -8390,37 +7180,29 @@ int ha_ndbcluster::start_statement(THD *thd, add_handler_to_open_tables(thd, thd_ndb, handler); } } - if (!trans && table_count == 0) - { + if (!trans && table_count == 0) { thd_ndb->reset_trans_options(); - DBUG_PRINT("trans",("Possibly starting transaction")); + DBUG_PRINT("trans", ("Possibly starting transaction")); const uint opti_node_select = THDVAR(thd, optimized_node_selection); DBUG_PRINT("enter", ("optimized_node_selection: %u", opti_node_select)); - if (!(opti_node_select & 2) || - thd->lex->sql_command == SQLCOM_LOAD) - if (unlikely(!start_transaction(error))) - DBUG_RETURN(error); + if (!(opti_node_select & 2) || thd->lex->sql_command == SQLCOM_LOAD) + if (unlikely(!start_transaction(error))) DBUG_RETURN(error); thd_ndb->init_open_tables(); - thd_ndb->m_slow_path= false; + thd_ndb->m_slow_path = false; if (!(thd_test_options(thd, OPTION_BIN_LOG)) || - thd->variables.binlog_format == BINLOG_FORMAT_STMT) - { + thd->variables.binlog_format == BINLOG_FORMAT_STMT) { thd_ndb->set_trans_option(Thd_ndb::TRANS_NO_LOGGING); - thd_ndb->m_slow_path= true; - } - else if (thd->slave_thread) - thd_ndb->m_slow_path= true; + thd_ndb->m_slow_path = true; + } else if (thd->slave_thread) + thd_ndb->m_slow_path = true; } DBUG_RETURN(0); } -int -ha_ndbcluster::add_handler_to_open_tables(THD *thd, - Thd_ndb *thd_ndb, - ha_ndbcluster* handler) -{ +int ha_ndbcluster::add_handler_to_open_tables(THD *thd, Thd_ndb *thd_ndb, + ha_ndbcluster *handler) { DBUG_ENTER("ha_ndbcluster::add_handler_to_open_tables"); DBUG_PRINT("info", ("Adding %s", handler->m_share->key_string())); @@ -8428,174 +7210,150 @@ ha_ndbcluster::add_handler_to_open_tables(THD *thd, * thd_ndb->open_tables is only used iff thd_ndb->m_handler is not */ DBUG_ASSERT(thd_ndb->m_handler == NULL); - const void *key= handler->m_share; - THD_NDB_SHARE *thd_ndb_share= find_or_nullptr(thd_ndb->open_tables, key); - if (thd_ndb_share == 0) - { - thd_ndb_share= - (THD_NDB_SHARE *) thd->get_transaction()->allocate_memory(sizeof(THD_NDB_SHARE)); - if (!thd_ndb_share) - { + const void *key = handler->m_share; + THD_NDB_SHARE *thd_ndb_share = find_or_nullptr(thd_ndb->open_tables, key); + if (thd_ndb_share == 0) { + thd_ndb_share = (THD_NDB_SHARE *)thd->get_transaction()->allocate_memory( + sizeof(THD_NDB_SHARE)); + if (!thd_ndb_share) { mem_alloc_error(sizeof(THD_NDB_SHARE)); DBUG_RETURN(1); } - thd_ndb_share->key= key; - thd_ndb_share->stat.last_count= thd_ndb->count; - thd_ndb_share->stat.no_uncommitted_rows_count= 0; - thd_ndb_share->stat.records= ~(ha_rows)0; + thd_ndb_share->key = key; + thd_ndb_share->stat.last_count = thd_ndb->count; + thd_ndb_share->stat.no_uncommitted_rows_count = 0; + thd_ndb_share->stat.records = ~(ha_rows)0; thd_ndb->open_tables.emplace(thd_ndb_share->key, thd_ndb_share); - } - else if (thd_ndb_share->stat.last_count != thd_ndb->count) - { - thd_ndb_share->stat.last_count= thd_ndb->count; - thd_ndb_share->stat.no_uncommitted_rows_count= 0; - thd_ndb_share->stat.records= ~(ha_rows)0; + } else if (thd_ndb_share->stat.last_count != thd_ndb->count) { + thd_ndb_share->stat.last_count = thd_ndb->count; + thd_ndb_share->stat.no_uncommitted_rows_count = 0; + thd_ndb_share->stat.records = ~(ha_rows)0; } - handler->m_table_info= &thd_ndb_share->stat; + handler->m_table_info = &thd_ndb_share->stat; DBUG_RETURN(0); } -int ha_ndbcluster::init_handler_for_statement(THD *thd) -{ +int ha_ndbcluster::init_handler_for_statement(THD *thd) { /* This is the place to make sure this handler instance has a started transaction. - - The transaction is started by the first handler on which + + The transaction is started by the first handler on which MySQL Server calls external lock - - Other handlers in the same stmt or transaction should use + + Other handlers in the same stmt or transaction should use the same NDB transaction. This is done by setting up the m_thd_ndb - pointer to point to the NDB transaction object. + pointer to point to the NDB transaction object. */ DBUG_ENTER("ha_ndbcluster::init_handler_for_statement"); - Thd_ndb *thd_ndb= m_thd_ndb; + Thd_ndb *thd_ndb = m_thd_ndb; DBUG_ASSERT(thd_ndb); // store thread specific data first to set the right context - m_autoincrement_prefetch= THDVAR(thd, autoincrement_prefetch_sz); - m_blobs_pending= false; + m_autoincrement_prefetch = THDVAR(thd, autoincrement_prefetch_sz); + m_blobs_pending = false; release_blobs_buffer(); - m_slow_path= m_thd_ndb->m_slow_path; + m_slow_path = m_thd_ndb->m_slow_path; - if (unlikely(m_slow_path)) - { + if (unlikely(m_slow_path)) { if (m_share == ndb_apply_status_share && thd->slave_thread) - m_thd_ndb->set_trans_option(Thd_ndb::TRANS_INJECTED_APPLY_STATUS); + m_thd_ndb->set_trans_option(Thd_ndb::TRANS_INJECTED_APPLY_STATUS); } int ret = 0; - if (thd_ndb->m_handler == 0) - { + if (thd_ndb->m_handler == 0) { DBUG_ASSERT(m_share); ret = add_handler_to_open_tables(thd, thd_ndb, this); - } - else - { - struct Ndb_local_table_statistics &stat= m_table_info_instance; - stat.last_count= thd_ndb->count; - stat.no_uncommitted_rows_count= 0; - stat.records= ~(ha_rows)0; - m_table_info= &stat; + } else { + struct Ndb_local_table_statistics &stat = m_table_info_instance; + stat.last_count = thd_ndb->count; + stat.no_uncommitted_rows_count = 0; + stat.records = ~(ha_rows)0; + m_table_info = &stat; } DBUG_RETURN(ret); } -int ha_ndbcluster::external_lock(THD *thd, int lock_type) -{ +int ha_ndbcluster::external_lock(THD *thd, int lock_type) { DBUG_ENTER("external_lock"); - if (lock_type != F_UNLCK) - { + if (lock_type != F_UNLCK) { int error; /* Check that this handler instance has a connection set up to the Ndb object of thd */ - if (check_ndb_connection(thd)) - DBUG_RETURN(1); - Thd_ndb *thd_ndb= get_thd_ndb(thd); + if (check_ndb_connection(thd)) DBUG_RETURN(1); + Thd_ndb *thd_ndb = get_thd_ndb(thd); - DBUG_PRINT("enter", ("lock_type != F_UNLCK " - "this: 0x%lx thd: 0x%lx thd_ndb: %lx " - "thd_ndb->lock_count: %d", - (long) this, (long) thd, (long) thd_ndb, - thd_ndb->lock_count)); + DBUG_PRINT("enter", + ("lock_type != F_UNLCK " + "this: 0x%lx thd: 0x%lx thd_ndb: %lx " + "thd_ndb->lock_count: %d", + (long)this, (long)thd, (long)thd_ndb, thd_ndb->lock_count)); - if ((error= start_statement(thd, thd_ndb, - thd_ndb->lock_count++))) - { + if ((error = start_statement(thd, thd_ndb, thd_ndb->lock_count++))) { thd_ndb->lock_count--; DBUG_RETURN(error); } - if ((error= init_handler_for_statement(thd))) - { + if ((error = init_handler_for_statement(thd))) { thd_ndb->lock_count--; DBUG_RETURN(error); } DBUG_RETURN(0); - } - else - { - Thd_ndb *thd_ndb= m_thd_ndb; + } else { + Thd_ndb *thd_ndb = m_thd_ndb; DBUG_ASSERT(thd_ndb); - DBUG_PRINT("enter", ("lock_type == F_UNLCK " - "this: 0x%lx thd: 0x%lx thd_ndb: %lx " - "thd_ndb->lock_count: %d", - (long) this, (long) thd, (long) thd_ndb, - thd_ndb->lock_count)); + DBUG_PRINT("enter", + ("lock_type == F_UNLCK " + "this: 0x%lx thd: 0x%lx thd_ndb: %lx " + "thd_ndb->lock_count: %d", + (long)this, (long)thd, (long)thd_ndb, thd_ndb->lock_count)); - if (!--thd_ndb->lock_count) - { + if (!--thd_ndb->lock_count) { DBUG_PRINT("trans", ("Last external_lock")); if ((!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && - thd_ndb->trans) - { - if (thd_ndb->trans) - { + thd_ndb->trans) { + if (thd_ndb->trans) { /* Unlock is done without a transaction commit / rollback. This happens if the thread didn't update any rows We must in this case close the transaction to release resources */ - DBUG_PRINT("trans",("ending non-updating transaction")); + DBUG_PRINT("trans", ("ending non-updating transaction")); thd_ndb->ndb->closeTransaction(thd_ndb->trans); - thd_ndb->trans= NULL; - thd_ndb->m_handler= NULL; + thd_ndb->trans = NULL; + thd_ndb->m_handler = NULL; } } } - m_table_info= NULL; + m_table_info = NULL; /* This is the place to make sure this handler instance no longer are connected to the active transaction. - And since the handler is no longer part of the transaction + And since the handler is no longer part of the transaction it can't have open cursors, ops, queries or blobs pending. */ - m_thd_ndb= NULL; + m_thd_ndb = NULL; DBUG_ASSERT(m_active_query == NULL); - if (m_active_query) - DBUG_PRINT("warning", ("m_active_query != NULL")); - m_active_query= NULL; - - if (m_active_cursor) - DBUG_PRINT("warning", ("m_active_cursor != NULL")); - m_active_cursor= NULL; - - if (m_multi_cursor) - DBUG_PRINT("warning", ("m_multi_cursor != NULL")); - m_multi_cursor= NULL; - - if (m_blobs_pending) - DBUG_PRINT("warning", ("blobs_pending != 0")); - m_blobs_pending= 0; - + if (m_active_query) DBUG_PRINT("warning", ("m_active_query != NULL")); + m_active_query = NULL; + + if (m_active_cursor) DBUG_PRINT("warning", ("m_active_cursor != NULL")); + m_active_cursor = NULL; + + if (m_multi_cursor) DBUG_PRINT("warning", ("m_multi_cursor != NULL")); + m_multi_cursor = NULL; + + if (m_blobs_pending) DBUG_PRINT("warning", ("blobs_pending != 0")); + m_blobs_pending = 0; + DBUG_RETURN(0); } } @@ -8607,12 +7365,11 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) locks are kept if unlock_row() is not called. */ -void ha_ndbcluster::unlock_row() -{ +void ha_ndbcluster::unlock_row() { DBUG_ENTER("unlock_row"); DBUG_PRINT("info", ("Unlocking row")); - m_lock_tuple= false; + m_lock_tuple = false; DBUG_VOID_RETURN; } @@ -8640,31 +7397,25 @@ void ha_ndbcluster::unlock_row() statement and the call to commit comes at the end of the stored function. */ -int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type) -{ +int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type) { DBUG_ENTER("start_stmt"); DBUG_ASSERT(thd == table->in_use); int error; - Thd_ndb* thd_ndb= get_thd_ndb(thd); - if ((error= start_statement(thd, thd_ndb, thd_ndb->start_stmt_count++))) - { + Thd_ndb *thd_ndb = get_thd_ndb(thd); + if ((error = start_statement(thd, thd_ndb, thd_ndb->start_stmt_count++))) { thd_ndb->start_stmt_count--; DBUG_RETURN(error); } - if ((error= init_handler_for_statement(thd))) - { + if ((error = init_handler_for_statement(thd))) { thd_ndb->start_stmt_count--; DBUG_RETURN(error); } DBUG_RETURN(0); } -NdbTransaction * -ha_ndbcluster::start_transaction_row(const NdbRecord *ndb_record, - const uchar *record, - int &error) -{ +NdbTransaction *ha_ndbcluster::start_transaction_row( + const NdbRecord *ndb_record, const uchar *record, int &error) { NdbTransaction *trans; DBUG_ENTER("ha_ndbcluster::start_transaction_row"); DBUG_ASSERT(m_thd_ndb); @@ -8672,30 +7423,26 @@ ha_ndbcluster::start_transaction_row(const NdbRecord *ndb_record, m_thd_ndb->transaction_checks(); - Ndb *ndb= m_thd_ndb->ndb; + Ndb *ndb = m_thd_ndb->ndb; - Uint64 tmp[(MAX_KEY_SIZE_IN_WORDS*MAX_XFRM_MULTIPLY) >> 1]; - char *buf= (char*)&tmp[0]; - trans= ndb->startTransaction(ndb_record, - (const char*)record, - buf, sizeof(tmp)); + Uint64 tmp[(MAX_KEY_SIZE_IN_WORDS * MAX_XFRM_MULTIPLY) >> 1]; + char *buf = (char *)&tmp[0]; + trans = + ndb->startTransaction(ndb_record, (const char *)record, buf, sizeof(tmp)); - if (trans) - { + if (trans) { m_thd_ndb->increment_hinted_trans_count(); DBUG_PRINT("info", ("Delayed allocation of TC")); - DBUG_RETURN(m_thd_ndb->trans= trans); + DBUG_RETURN(m_thd_ndb->trans = trans); } ERR_SET(m_thd_ndb->ndb->getNdbError(), error); DBUG_RETURN(NULL); } -NdbTransaction * -ha_ndbcluster::start_transaction_key(uint inx_no, - const uchar *key_data, - int &error) -{ +NdbTransaction *ha_ndbcluster::start_transaction_key(uint inx_no, + const uchar *key_data, + int &error) { NdbTransaction *trans; DBUG_ENTER("ha_ndbcluster::start_transaction_key"); DBUG_ASSERT(m_thd_ndb); @@ -8703,37 +7450,32 @@ ha_ndbcluster::start_transaction_key(uint inx_no, m_thd_ndb->transaction_checks(); - Ndb *ndb= m_thd_ndb->ndb; - const NdbRecord *key_rec= m_index[inx_no].ndb_unique_record_key; + Ndb *ndb = m_thd_ndb->ndb; + const NdbRecord *key_rec = m_index[inx_no].ndb_unique_record_key; - Uint64 tmp[(MAX_KEY_SIZE_IN_WORDS*MAX_XFRM_MULTIPLY) >> 1]; - char *buf= (char*)&tmp[0]; - trans= ndb->startTransaction(key_rec, - (const char*)key_data, - buf, sizeof(tmp)); + Uint64 tmp[(MAX_KEY_SIZE_IN_WORDS * MAX_XFRM_MULTIPLY) >> 1]; + char *buf = (char *)&tmp[0]; + trans = + ndb->startTransaction(key_rec, (const char *)key_data, buf, sizeof(tmp)); - if (trans) - { + if (trans) { m_thd_ndb->increment_hinted_trans_count(); DBUG_PRINT("info", ("Delayed allocation of TC")); - DBUG_RETURN(m_thd_ndb->trans= trans); + DBUG_RETURN(m_thd_ndb->trans = trans); } ERR_SET(m_thd_ndb->ndb->getNdbError(), error); DBUG_RETURN(NULL); } -NdbTransaction * -ha_ndbcluster::start_transaction(int &error) -{ +NdbTransaction *ha_ndbcluster::start_transaction(int &error) { NdbTransaction *trans; DBUG_ENTER("ha_ndbcluster::start_transaction"); DBUG_ASSERT(m_thd_ndb); DBUG_ASSERT(m_thd_ndb->trans == NULL); - if(DBUG_EVALUATE_IF("ndb_fail_start_trans", true, false)) - { + if (DBUG_EVALUATE_IF("ndb_fail_start_trans", true, false)) { fprintf(stderr, "ndb_fail_start_trans\n"); error = HA_ERR_NO_CONNECTION; DBUG_RETURN(NULL); @@ -8741,23 +7483,21 @@ ha_ndbcluster::start_transaction(int &error) m_thd_ndb->transaction_checks(); - const uint opti_node_select= THDVAR(table->in_use, optimized_node_selection); + const uint opti_node_select = THDVAR(table->in_use, optimized_node_selection); m_thd_ndb->connection->set_optimized_node_selection(opti_node_select & 1); - if ((trans= m_thd_ndb->ndb->startTransaction(m_table))) - { + if ((trans = m_thd_ndb->ndb->startTransaction(m_table))) { // NOTE! No hint provided when starting transaction DBUG_PRINT("info", ("Delayed allocation of TC")); - DBUG_RETURN(m_thd_ndb->trans= trans); + DBUG_RETURN(m_thd_ndb->trans = trans); } ERR_SET(m_thd_ndb->ndb->getNdbError(), error); DBUG_RETURN(NULL); } - -NdbTransaction * -ha_ndbcluster::start_transaction_part_id(Uint32 part_id, int &error) -{ + +NdbTransaction *ha_ndbcluster::start_transaction_part_id(Uint32 part_id, + int &error) { NdbTransaction *trans; DBUG_ENTER("ha_ndbcluster::start_transaction_part_id"); @@ -8766,54 +7506,48 @@ ha_ndbcluster::start_transaction_part_id(Uint32 part_id, int &error) m_thd_ndb->transaction_checks(); - if ((trans= m_thd_ndb->ndb->startTransaction(m_table, part_id))) - { + if ((trans = m_thd_ndb->ndb->startTransaction(m_table, part_id))) { m_thd_ndb->increment_hinted_trans_count(); DBUG_PRINT("info", ("Delayed allocation of TC")); - DBUG_RETURN(m_thd_ndb->trans= trans); + DBUG_RETURN(m_thd_ndb->trans = trans); } ERR_SET(m_thd_ndb->ndb->getNdbError(), error); DBUG_RETURN(NULL); } - + /** Static error print function called from static handler method ndbcluster_commit and ndbcluster_rollback. */ -static void -ndbcluster_print_error(int error, const NdbOperation *error_op) -{ +static void ndbcluster_print_error(int error, const NdbOperation *error_op) { DBUG_ENTER("ndbcluster_print_error"); TABLE_SHARE share; - const char *tab_name= (error_op) ? error_op->getTableName() : ""; - if (tab_name == NULL) - { + const char *tab_name = (error_op) ? error_op->getTableName() : ""; + if (tab_name == NULL) { DBUG_ASSERT(tab_name != NULL); - tab_name= ""; + tab_name = ""; } - share.db.str= ""; - share.db.length= 0; - share.table_name.str= tab_name; - share.table_name.length= strlen(tab_name); + share.db.str = ""; + share.db.length = 0; + share.table_name.str = tab_name; + share.table_name.length = strlen(tab_name); ha_ndbcluster error_handler(ndbcluster_hton, &share); error_handler.print_error(error, MYF(0)); DBUG_VOID_RETURN; } - /** Commit a transaction started in NDB. */ -int ndbcluster_commit(handlerton*, THD *thd, bool all) -{ - int res= 0; - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; - NdbTransaction *trans= thd_ndb->trans; +int ndbcluster_commit(handlerton *, THD *thd, bool all) { + int res = 0; + Thd_ndb *thd_ndb = get_thd_ndb(thd); + Ndb *ndb = thd_ndb->ndb; + NdbTransaction *trans = thd_ndb->trans; bool retry_slave_trans = false; - (void) retry_slave_trans; + (void)retry_slave_trans; DBUG_ENTER("ndbcluster_commit"); DBUG_ASSERT(ndb); @@ -8826,19 +7560,17 @@ int ndbcluster_commit(handlerton*, THD *thd, bool all) ddl_ctx->commit(); } - thd_ndb->start_stmt_count= 0; - if (trans == NULL) - { + thd_ndb->start_stmt_count = 0; + if (trans == NULL) { DBUG_PRINT("info", ("trans == NULL")); DBUG_RETURN(0); } - if (!all && thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) - { + if (!all && thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* An odditity in the handler interface is that commit on handlerton - is called to indicate end of statement only in cases where + is called to indicate end of statement only in cases where autocommit isn't used and the all flag isn't set. - + We also leave quickly when a transaction haven't even been started, in this case we are safe that no clean up is needed. In this case the MySQL Server could handle the query without contacting the @@ -8848,27 +7580,24 @@ int ndbcluster_commit(handlerton*, THD *thd, bool all) DBUG_PRINT("info", ("Commit before start or end-of-statement only")); DBUG_RETURN(0); } - thd_ndb->save_point_count= 0; + thd_ndb->save_point_count = 0; - if (unlikely(thd_ndb->m_slow_path)) - { - if (thd->slave_thread) - { - ndbcluster_update_apply_status(thd, + if (unlikely(thd_ndb->m_slow_path)) { + if (thd->slave_thread) { + ndbcluster_update_apply_status( + thd, thd_ndb->check_trans_option(Thd_ndb::TRANS_INJECTED_APPLY_STATUS)); } } - if (thd->slave_thread) - { + if (thd->slave_thread) { /* If this slave transaction has included conflict detecting ops * and some defined operations are not yet sent, then perform * an execute(NoCommit) before committing, as conflict op handling * is done by execute(NoCommit) */ /* TODO : Add as function */ - if (g_ndb_slave_state.conflict_flags & SCS_OPS_DEFINED) - { + if (g_ndb_slave_state.conflict_flags & SCS_OPS_DEFINED) { if (thd_ndb->m_unsent_bytes) res = execute_no_commit(thd_ndb, trans, true); } @@ -8876,66 +7605,53 @@ int ndbcluster_commit(handlerton*, THD *thd, bool all) if (likely(res == 0)) res = g_ndb_slave_state.atConflictPreCommit(retry_slave_trans); - if (likely(res == 0)) - res= execute_commit(thd_ndb, trans, 1, true); + if (likely(res == 0)) res = execute_commit(thd_ndb, trans, 1, true); // Copy-out slave thread statistics update_slave_api_stats(thd_ndb->ndb); - } - else - { + } else { if (thd_ndb->m_handler && - thd_ndb->m_handler->m_read_before_write_removal_possible) - { + thd_ndb->m_handler->m_read_before_write_removal_possible) { /* This is an autocommit involving only one table and rbwr is on, thus the transaction has already been committed in exec_bulk_update() or end_bulk_delete() */ DBUG_PRINT("info", ("autocommit+rbwr, transaction already committed")); - const NdbTransaction::CommitStatusType commitStatus = trans->commitStatus(); - - if(commitStatus == NdbTransaction::Committed) - { + const NdbTransaction::CommitStatusType commitStatus = + trans->commitStatus(); + + if (commitStatus == NdbTransaction::Committed) { /* Already committed transaction to save roundtrip */ DBUG_ASSERT(get_thd_ndb(current_thd)->m_error == false); - } - else if(commitStatus == NdbTransaction::Aborted) - { - /* Commit failed before transaction was started */ + } else if (commitStatus == NdbTransaction::Aborted) { + /* Commit failed before transaction was started */ DBUG_ASSERT(get_thd_ndb(current_thd)->m_error == true); - } - else if(commitStatus == NdbTransaction::NeedAbort) - { + } else if (commitStatus == NdbTransaction::NeedAbort) { /* Commit attempt failed and rollback is needed */ - res = -1; - - } - else - { + res = -1; + + } else { /* Commit was never attempted - this should not be possible */ - DBUG_ASSERT(commitStatus == NdbTransaction::Started || commitStatus == NdbTransaction::NotStarted); - ndb_log_error("found uncommitted autocommit+rbwr transaction, " - "commit status: %d", commitStatus); + DBUG_ASSERT(commitStatus == NdbTransaction::Started || + commitStatus == NdbTransaction::NotStarted); + ndb_log_error( + "found uncommitted autocommit+rbwr transaction, " + "commit status: %d", + commitStatus); abort(); } - } - else - { - const bool ignore_error= applying_binlog(thd); - res= execute_commit(thd_ndb, trans, - THDVAR(thd, force_send), - ignore_error); + } else { + const bool ignore_error = applying_binlog(thd); + res = + execute_commit(thd_ndb, trans, THDVAR(thd, force_send), ignore_error); } } - if (res != 0) - { - if (retry_slave_trans) - { + if (res != 0) { + if (retry_slave_trans) { if (st_ndb_slave_state::MAX_RETRY_TRANS_COUNT > - g_ndb_slave_state.retry_trans_count++) - { + g_ndb_slave_state.retry_trans_count++) { /* Warning is necessary to cause retry from slave.cc exec_relay_log_event() @@ -8952,68 +7668,60 @@ int ndbcluster_commit(handlerton*, THD *thd, bool all) conflict handling */ ndb_mi_set_relay_log_trans_retries(0); - } - else - { + } else { /* Too many retries, print error and exit - normal too many retries mechanism will cause exit */ - ndb_log_error("Ndb slave retried transaction %u time(s) in vain. " - "Giving up.", - st_ndb_slave_state::MAX_RETRY_TRANS_COUNT); + ndb_log_error( + "Ndb slave retried transaction %u time(s) in vain. " + "Giving up.", + st_ndb_slave_state::MAX_RETRY_TRANS_COUNT); } - res= ER_GET_TEMPORARY_ERRMSG; - } - else - { - const NdbError err= trans->getNdbError(); - const NdbOperation *error_op= trans->getNdbErrorOperation(); - res= ndb_to_mysql_error(&err); - if (res != -1) - ndbcluster_print_error(res, error_op); + res = ER_GET_TEMPORARY_ERRMSG; + } else { + const NdbError err = trans->getNdbError(); + const NdbOperation *error_op = trans->getNdbErrorOperation(); + res = ndb_to_mysql_error(&err); + if (res != -1) ndbcluster_print_error(res, error_op); } - } - else - { + } else { /* Update shared statistics for tables inserted into / deleted from*/ - if (thd_ndb->m_handler && // Autocommit Txn - thd_ndb->m_handler->m_share && - thd_ndb->m_handler->m_table_info) - { - modify_shared_stats(thd_ndb->m_handler->m_share, thd_ndb->m_handler->m_table_info); + if (thd_ndb->m_handler && // Autocommit Txn + thd_ndb->m_handler->m_share && thd_ndb->m_handler->m_table_info) { + modify_shared_stats(thd_ndb->m_handler->m_share, + thd_ndb->m_handler->m_table_info); } /* Manual commit: Update all affected NDB_SHAREs found in 'open_tables' */ - for (const auto &key_and_value : thd_ndb->open_tables) - { - THD_NDB_SHARE *thd_share= key_and_value.second; - modify_shared_stats(const_cast(static_cast(thd_share->key)), &thd_share->stat); + for (const auto &key_and_value : thd_ndb->open_tables) { + THD_NDB_SHARE *thd_share = key_and_value.second; + modify_shared_stats(const_cast( + static_cast(thd_share->key)), + &thd_share->stat); } } ndb->closeTransaction(trans); - thd_ndb->trans= NULL; - thd_ndb->m_handler= NULL; + thd_ndb->trans = NULL; + thd_ndb->m_handler = NULL; DBUG_RETURN(res); } - /** Rollback a transaction started in NDB. */ -static int ndbcluster_rollback(handlerton*, THD *thd, bool all) -{ - int res= 0; - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; - NdbTransaction *trans= thd_ndb->trans; +static int ndbcluster_rollback(handlerton *, THD *thd, bool all) { + int res = 0; + Thd_ndb *thd_ndb = get_thd_ndb(thd); + Ndb *ndb = thd_ndb->ndb; + NdbTransaction *trans = thd_ndb->trans; DBUG_ENTER("ndbcluster_rollback"); - DBUG_PRINT("enter", ("all: %d thd_ndb->save_point_count: %d", - all, thd_ndb->save_point_count)); + DBUG_PRINT("enter", ("all: %d thd_ndb->save_point_count: %d", all, + thd_ndb->save_point_count)); DBUG_ASSERT(ndb); Ndb_DDL_transaction_ctx *ddl_ctx = thd_ndb->get_ddl_transaction_ctx(); @@ -9025,17 +7733,14 @@ static int ndbcluster_rollback(handlerton*, THD *thd, bool all) } } - thd_ndb->start_stmt_count= 0; - if (trans == NULL) - { + thd_ndb->start_stmt_count = 0; + if (trans == NULL) { /* Ignore end-of-statement until real rollback or commit is called */ DBUG_PRINT("info", ("trans == NULL")); DBUG_RETURN(0); } - if (!all && - thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) && - (thd_ndb->save_point_count > 0)) - { + if (!all && thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) && + (thd_ndb->save_point_count > 0)) { /* Ignore end-of-statement until real rollback or commit is called as ndb does not support rollback statement @@ -9047,26 +7752,22 @@ static int ndbcluster_rollback(handlerton*, THD *thd, bool all) my_error(ER_WARN_ENGINE_TRANSACTION_ROLLBACK, MYF(0), "NDB"); DBUG_RETURN(0); } - thd_ndb->save_point_count= 0; - if (thd->slave_thread) - g_ndb_slave_state.atTransactionAbort(); - thd_ndb->m_unsent_bytes= 0; + thd_ndb->save_point_count = 0; + if (thd->slave_thread) g_ndb_slave_state.atTransactionAbort(); + thd_ndb->m_unsent_bytes = 0; thd_ndb->m_execute_count++; DBUG_PRINT("info", ("execute_count: %u", thd_ndb->m_execute_count)); - if (trans->execute(NdbTransaction::Rollback) != 0) - { - const NdbError err= trans->getNdbError(); - const NdbOperation *error_op= trans->getNdbErrorOperation(); - res= ndb_to_mysql_error(&err); - if (res != -1) - ndbcluster_print_error(res, error_op); + if (trans->execute(NdbTransaction::Rollback) != 0) { + const NdbError err = trans->getNdbError(); + const NdbOperation *error_op = trans->getNdbErrorOperation(); + res = ndb_to_mysql_error(&err); + if (res != -1) ndbcluster_print_error(res, error_op); } ndb->closeTransaction(trans); - thd_ndb->trans= NULL; - thd_ndb->m_handler= NULL; + thd_ndb->trans = NULL; + thd_ndb->m_handler = NULL; - if (thd->slave_thread) - { + if (thd->slave_thread) { // Copy-out slave thread statistics update_slave_api_stats(thd_ndb->ndb); } @@ -9080,8 +7781,7 @@ static int ndbcluster_rollback(handlerton*, THD *thd, bool all) @param thd Thread object */ -static void ndbcluster_post_ddl(THD *thd) -{ +static void ndbcluster_post_ddl(THD *thd) { DBUG_TRACE; Thd_ndb *thd_ndb = get_thd_ndb(thd); Ndb_DDL_transaction_ctx *ddl_ctx = thd_ndb->get_ddl_transaction_ctx(); @@ -9095,37 +7795,26 @@ static void ndbcluster_post_ddl(THD *thd) } } -static const char* ndb_table_modifier_prefix = "NDB_TABLE="; +static const char *ndb_table_modifier_prefix = "NDB_TABLE="; /* Modifiers that we support currently */ -static const -struct NDB_Modifier ndb_table_modifiers[] = -{ - { NDB_Modifier::M_BOOL, STRING_WITH_LEN("NOLOGGING"), 0, {0} }, - { NDB_Modifier::M_BOOL, STRING_WITH_LEN("READ_BACKUP"), 0, {0} }, - { NDB_Modifier::M_BOOL, STRING_WITH_LEN("FULLY_REPLICATED"), 0, {0} }, - { NDB_Modifier::M_STRING, STRING_WITH_LEN("PARTITION_BALANCE"), 0, {0} }, - { NDB_Modifier::M_BOOL, 0, 0, 0, {0} } -}; - -static const char* ndb_column_modifier_prefix = "NDB_COLUMN="; - -static const -struct NDB_Modifier ndb_column_modifiers[] = -{ - { NDB_Modifier::M_BOOL, STRING_WITH_LEN("MAX_BLOB_PART_SIZE"), 0, {0} }, - { NDB_Modifier::M_BOOL, 0, 0, 0, {0} } -}; - - - -static bool -ndb_column_is_dynamic(THD *thd, - Field *field, - HA_CREATE_INFO *create_info, - bool use_dynamic_as_default, - NDBCOL::StorageType type) -{ +static const struct NDB_Modifier ndb_table_modifiers[] = { + {NDB_Modifier::M_BOOL, STRING_WITH_LEN("NOLOGGING"), 0, {0}}, + {NDB_Modifier::M_BOOL, STRING_WITH_LEN("READ_BACKUP"), 0, {0}}, + {NDB_Modifier::M_BOOL, STRING_WITH_LEN("FULLY_REPLICATED"), 0, {0}}, + {NDB_Modifier::M_STRING, STRING_WITH_LEN("PARTITION_BALANCE"), 0, {0}}, + {NDB_Modifier::M_BOOL, 0, 0, 0, {0}}}; + +static const char *ndb_column_modifier_prefix = "NDB_COLUMN="; + +static const struct NDB_Modifier ndb_column_modifiers[] = { + {NDB_Modifier::M_BOOL, STRING_WITH_LEN("MAX_BLOB_PART_SIZE"), 0, {0}}, + {NDB_Modifier::M_BOOL, 0, 0, 0, {0}}}; + +static bool ndb_column_is_dynamic(THD *thd, Field *field, + HA_CREATE_INFO *create_info, + bool use_dynamic_as_default, + NDBCOL::StorageType type) { DBUG_ENTER("ndb_column_is_dynamic"); /* Check if COLUMN_FORMAT is declared FIXED or DYNAMIC. @@ -9141,47 +7830,40 @@ ndb_column_is_dynamic(THD *thd, NOTE! For COLUMN_STORAGE defined as DISK, the DYNAMIC COLUMN_FORMAT is not supported and a warning will be issued if explicitly declared. */ - const bool default_was_fixed= + const bool default_was_fixed = (opt_ndb_default_column_format == NDB_DEFAULT_COLUMN_FORMAT_FIXED) || (field->table->s->mysql_version < NDB_VERSION_DYNAMIC_IS_DEFAULT); bool dynamic; switch (field->column_format()) { - case(COLUMN_FORMAT_TYPE_FIXED): - dynamic= false; - break; - case(COLUMN_FORMAT_TYPE_DYNAMIC): - dynamic= true; - break; - case(COLUMN_FORMAT_TYPE_DEFAULT): - default: - if (create_info->row_type == ROW_TYPE_DEFAULT) - { - if (default_was_fixed || // Created in old version where fixed was - // the default choice - (field->flags & PRI_KEY_FLAG)) // Primary key - { - dynamic = use_dynamic_as_default; - } - else - { - dynamic = true; - } - } - else - dynamic= (create_info->row_type == ROW_TYPE_DYNAMIC); - break; + case (COLUMN_FORMAT_TYPE_FIXED): + dynamic = false; + break; + case (COLUMN_FORMAT_TYPE_DYNAMIC): + dynamic = true; + break; + case (COLUMN_FORMAT_TYPE_DEFAULT): + default: + if (create_info->row_type == ROW_TYPE_DEFAULT) { + if (default_was_fixed || // Created in old version where fixed was + // the default choice + (field->flags & PRI_KEY_FLAG)) // Primary key + { + dynamic = use_dynamic_as_default; + } else { + dynamic = true; + } + } else + dynamic = (create_info->row_type == ROW_TYPE_DYNAMIC); + break; } - if (type == NDBCOL::StorageTypeDisk) - { - if (dynamic) - { + if (type == NDBCOL::StorageTypeDisk) { + if (dynamic) { DBUG_PRINT("info", ("Dynamic disk stored column %s changed to static", field->field_name)); - dynamic= false; + dynamic = false; } - if (thd && field->column_format() == COLUMN_FORMAT_TYPE_DYNAMIC) - { + if (thd && field->column_format() == COLUMN_FORMAT_TYPE_DYNAMIC) { push_warning_printf(thd, Sql_condition::SL_WARNING, ER_ILLEGAL_HA_CREATE_OPTION, "DYNAMIC column %s with " @@ -9192,27 +7874,25 @@ ndb_column_is_dynamic(THD *thd, } switch (create_info->row_type) { - case ROW_TYPE_FIXED: - if (thd && (dynamic || field_type_forces_var_part(field->type()))) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_ILLEGAL_HA_CREATE_OPTION, - "Row format FIXED incompatible with " - "dynamic attribute %s", - field->field_name); - } - break; - default: - /* - Columns will be dynamic unless explictly specified FIXED - */ - break; + case ROW_TYPE_FIXED: + if (thd && (dynamic || field_type_forces_var_part(field->type()))) { + push_warning_printf(thd, Sql_condition::SL_WARNING, + ER_ILLEGAL_HA_CREATE_OPTION, + "Row format FIXED incompatible with " + "dynamic attribute %s", + field->field_name); + } + break; + default: + /* + Columns will be dynamic unless explictly specified FIXED + */ + break; } DBUG_RETURN(dynamic); } - /** Define NDB column based on Field. @@ -9228,38 +7908,31 @@ ndb_column_is_dynamic(THD *thd, Returns 0 or mysql error code. */ -static int -create_ndb_column(THD *thd, - NDBCOL &col, - Field *field, - HA_CREATE_INFO *create_info, - bool use_dynamic_as_default = false) -{ +static int create_ndb_column(THD *thd, NDBCOL &col, Field *field, + HA_CREATE_INFO *create_info, + bool use_dynamic_as_default = false) { DBUG_ENTER("create_ndb_column"); char buf[MAX_ATTR_DEFAULT_VALUE_SIZE]; assert(field->stored_in_db); // Set name - if (col.setName(field->field_name)) - { + if (col.setName(field->field_name)) { // Can only fail due to memory -> return HA_ERR_OUT_OF_MEM DBUG_RETURN(HA_ERR_OUT_OF_MEM); } // Get char set - CHARSET_INFO *cs= const_cast(field->charset()); + CHARSET_INFO *cs = const_cast(field->charset()); // Set type and sizes - const enum enum_field_types mysql_type= field->real_type(); + const enum enum_field_types mysql_type = field->real_type(); NDB_Modifiers column_modifiers(ndb_column_modifier_prefix, ndb_column_modifiers); - if (column_modifiers.loadComment(field->comment.str, - field->comment.length) == -1) - { + if (column_modifiers.loadComment(field->comment.str, field->comment.length) == + -1) { push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_ILLEGAL_HA_CREATE_OPTION, - "%s", + ER_ILLEGAL_HA_CREATE_OPTION, "%s", column_modifiers.getErrMsg()); my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), ndbcluster_hton_name, "Syntax error in COMMENT modifier"); @@ -9267,41 +7940,36 @@ create_ndb_column(THD *thd, DBUG_RETURN(HA_WRONG_CREATE_OPTION); } - const NDB_Modifier * mod_maxblob = column_modifiers.get("MAX_BLOB_PART_SIZE"); + const NDB_Modifier *mod_maxblob = column_modifiers.get("MAX_BLOB_PART_SIZE"); { /* Clear default value (col obj is reused for whole table def) */ - col.setDefaultValue(NULL, 0); + col.setDefaultValue(NULL, 0); - /* If the data nodes are capable then set native + /* If the data nodes are capable then set native * default. */ bool nativeDefaults = - ! (thd && - (! ndb_native_default_support(get_thd_ndb(thd)-> - ndb->getMinDbNodeVersion()))); - - if (likely( nativeDefaults )) - { - if ((!(field->flags & PRI_KEY_FLAG) ) && - type_supports_default_value(mysql_type)) - { - if (!(field->flags & NO_DEFAULT_VALUE_FLAG)) - { - ptrdiff_t src_offset= field->table->default_values_offset(); - if ((! field->is_real_null(src_offset)) || - ((field->flags & NOT_NULL_FLAG))) - { + !(thd && (!ndb_native_default_support( + get_thd_ndb(thd)->ndb->getMinDbNodeVersion()))); + + if (likely(nativeDefaults)) { + if ((!(field->flags & PRI_KEY_FLAG)) && + type_supports_default_value(mysql_type)) { + if (!(field->flags & NO_DEFAULT_VALUE_FLAG)) { + ptrdiff_t src_offset = field->table->default_values_offset(); + if ((!field->is_real_null(src_offset)) || + ((field->flags & NOT_NULL_FLAG))) { /* Set a non-null native default */ memset(buf, 0, MAX_ATTR_DEFAULT_VALUE_SIZE); get_default_value(buf, field); - /* For bit columns, default length is rounded up to + /* For bit columns, default length is rounded up to nearest word, ensuring all data sent */ Uint32 defaultLen = field_used_length(field); - if(field->type() == MYSQL_TYPE_BIT) - defaultLen = ((defaultLen + 3) /4) * 4; + if (field->type() == MYSQL_TYPE_BIT) + defaultLen = ((defaultLen + 3) / 4) * 4; col.setDefaultValue(buf, defaultLen); } } @@ -9309,359 +7977,321 @@ create_ndb_column(THD *thd, } } switch (mysql_type) { - // Numeric types - case MYSQL_TYPE_TINY: - if (field->flags & UNSIGNED_FLAG) - col.setType(NDBCOL::Tinyunsigned); - else - col.setType(NDBCOL::Tinyint); - col.setLength(1); - break; - case MYSQL_TYPE_SHORT: - if (field->flags & UNSIGNED_FLAG) - col.setType(NDBCOL::Smallunsigned); - else - col.setType(NDBCOL::Smallint); - col.setLength(1); - break; - case MYSQL_TYPE_LONG: - if (field->flags & UNSIGNED_FLAG) - col.setType(NDBCOL::Unsigned); - else - col.setType(NDBCOL::Int); - col.setLength(1); - break; - case MYSQL_TYPE_INT24: - if (field->flags & UNSIGNED_FLAG) - col.setType(NDBCOL::Mediumunsigned); - else - col.setType(NDBCOL::Mediumint); - col.setLength(1); - break; - case MYSQL_TYPE_LONGLONG: - if (field->flags & UNSIGNED_FLAG) - col.setType(NDBCOL::Bigunsigned); - else - col.setType(NDBCOL::Bigint); - col.setLength(1); - break; - case MYSQL_TYPE_FLOAT: - col.setType(NDBCOL::Float); - col.setLength(1); - break; - case MYSQL_TYPE_DOUBLE: - col.setType(NDBCOL::Double); - col.setLength(1); - break; - case MYSQL_TYPE_DECIMAL: - { - Field_decimal *f= (Field_decimal*)field; - uint precision= f->pack_length(); - uint scale= f->decimals(); + // Numeric types + case MYSQL_TYPE_TINY: if (field->flags & UNSIGNED_FLAG) - { - col.setType(NDBCOL::Olddecimalunsigned); - precision-= (scale > 0); - } + col.setType(NDBCOL::Tinyunsigned); else - { + col.setType(NDBCOL::Tinyint); + col.setLength(1); + break; + case MYSQL_TYPE_SHORT: + if (field->flags & UNSIGNED_FLAG) + col.setType(NDBCOL::Smallunsigned); + else + col.setType(NDBCOL::Smallint); + col.setLength(1); + break; + case MYSQL_TYPE_LONG: + if (field->flags & UNSIGNED_FLAG) + col.setType(NDBCOL::Unsigned); + else + col.setType(NDBCOL::Int); + col.setLength(1); + break; + case MYSQL_TYPE_INT24: + if (field->flags & UNSIGNED_FLAG) + col.setType(NDBCOL::Mediumunsigned); + else + col.setType(NDBCOL::Mediumint); + col.setLength(1); + break; + case MYSQL_TYPE_LONGLONG: + if (field->flags & UNSIGNED_FLAG) + col.setType(NDBCOL::Bigunsigned); + else + col.setType(NDBCOL::Bigint); + col.setLength(1); + break; + case MYSQL_TYPE_FLOAT: + col.setType(NDBCOL::Float); + col.setLength(1); + break; + case MYSQL_TYPE_DOUBLE: + col.setType(NDBCOL::Double); + col.setLength(1); + break; + case MYSQL_TYPE_DECIMAL: { + Field_decimal *f = (Field_decimal *)field; + uint precision = f->pack_length(); + uint scale = f->decimals(); + if (field->flags & UNSIGNED_FLAG) { + col.setType(NDBCOL::Olddecimalunsigned); + precision -= (scale > 0); + } else { col.setType(NDBCOL::Olddecimal); - precision-= 1 + (scale > 0); + precision -= 1 + (scale > 0); } col.setPrecision(precision); col.setScale(scale); col.setLength(1); - } - break; - case MYSQL_TYPE_NEWDECIMAL: - { - Field_new_decimal *f= (Field_new_decimal*)field; - uint precision= f->precision; - uint scale= f->decimals(); - if (field->flags & UNSIGNED_FLAG) - { + } break; + case MYSQL_TYPE_NEWDECIMAL: { + Field_new_decimal *f = (Field_new_decimal *)field; + uint precision = f->precision; + uint scale = f->decimals(); + if (field->flags & UNSIGNED_FLAG) { col.setType(NDBCOL::Decimalunsigned); - } - else - { + } else { col.setType(NDBCOL::Decimal); } col.setPrecision(precision); col.setScale(scale); col.setLength(1); - } - break; - // Date types - case MYSQL_TYPE_DATETIME: - col.setType(NDBCOL::Datetime); - col.setLength(1); - break; - case MYSQL_TYPE_DATETIME2: - { - Field_datetimef *f= (Field_datetimef*)field; - uint prec= f->decimals(); + } break; + // Date types + case MYSQL_TYPE_DATETIME: + col.setType(NDBCOL::Datetime); + col.setLength(1); + break; + case MYSQL_TYPE_DATETIME2: { + Field_datetimef *f = (Field_datetimef *)field; + uint prec = f->decimals(); col.setType(NDBCOL::Datetime2); col.setLength(1); col.setPrecision(prec); - } - break; - case MYSQL_TYPE_DATE: // ? - col.setType(NDBCOL::Char); - col.setLength(field->pack_length()); - break; - case MYSQL_TYPE_NEWDATE: - col.setType(NDBCOL::Date); - col.setLength(1); - break; - case MYSQL_TYPE_TIME: - col.setType(NDBCOL::Time); - col.setLength(1); - break; - case MYSQL_TYPE_TIME2: - { - Field_timef *f= (Field_timef*)field; - uint prec= f->decimals(); + } break; + case MYSQL_TYPE_DATE: // ? + col.setType(NDBCOL::Char); + col.setLength(field->pack_length()); + break; + case MYSQL_TYPE_NEWDATE: + col.setType(NDBCOL::Date); + col.setLength(1); + break; + case MYSQL_TYPE_TIME: + col.setType(NDBCOL::Time); + col.setLength(1); + break; + case MYSQL_TYPE_TIME2: { + Field_timef *f = (Field_timef *)field; + uint prec = f->decimals(); col.setType(NDBCOL::Time2); col.setLength(1); col.setPrecision(prec); - } - break; - case MYSQL_TYPE_YEAR: - col.setType(NDBCOL::Year); - col.setLength(1); - break; - case MYSQL_TYPE_TIMESTAMP: - col.setType(NDBCOL::Timestamp); - col.setLength(1); - break; - case MYSQL_TYPE_TIMESTAMP2: - { - Field_timestampf *f= (Field_timestampf*)field; - uint prec= f->decimals(); + } break; + case MYSQL_TYPE_YEAR: + col.setType(NDBCOL::Year); + col.setLength(1); + break; + case MYSQL_TYPE_TIMESTAMP: + col.setType(NDBCOL::Timestamp); + col.setLength(1); + break; + case MYSQL_TYPE_TIMESTAMP2: { + Field_timestampf *f = (Field_timestampf *)field; + uint prec = f->decimals(); col.setType(NDBCOL::Timestamp2); col.setLength(1); col.setPrecision(prec); - } - break; - // Char types - case MYSQL_TYPE_STRING: - if (field->pack_length() == 0) - { - col.setType(NDBCOL::Bit); - col.setLength(1); - } - else if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - { - col.setType(NDBCOL::Binary); - col.setLength(field->pack_length()); - } - else - { - col.setType(NDBCOL::Char); - col.setCharset(cs); - col.setLength(field->pack_length()); - } - break; - case MYSQL_TYPE_VAR_STRING: // ? - case MYSQL_TYPE_VARCHAR: - { - Field_varstring* f= (Field_varstring*)field; - if (f->length_bytes == 1) - { + } break; + // Char types + case MYSQL_TYPE_STRING: + if (field->pack_length() == 0) { + col.setType(NDBCOL::Bit); + col.setLength(1); + } else if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) { + col.setType(NDBCOL::Binary); + col.setLength(field->pack_length()); + } else { + col.setType(NDBCOL::Char); + col.setCharset(cs); + col.setLength(field->pack_length()); + } + break; + case MYSQL_TYPE_VAR_STRING: // ? + case MYSQL_TYPE_VARCHAR: { + Field_varstring *f = (Field_varstring *)field; + if (f->length_bytes == 1) { if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) col.setType(NDBCOL::Varbinary); else { col.setType(NDBCOL::Varchar); col.setCharset(cs); } - } - else if (f->length_bytes == 2) - { + } else if (f->length_bytes == 2) { if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) col.setType(NDBCOL::Longvarbinary); else { col.setType(NDBCOL::Longvarchar); col.setCharset(cs); } - } - else - { + } else { DBUG_RETURN(HA_ERR_UNSUPPORTED); } col.setLength(field->field_length); - } - break; - // Blob types (all come in as MYSQL_TYPE_BLOB) - mysql_type_tiny_blob: - case MYSQL_TYPE_TINY_BLOB: - if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - col.setType(NDBCOL::Blob); - else { - col.setType(NDBCOL::Text); - col.setCharset(cs); - } - col.setInlineSize(256); - // No parts - col.setPartSize(0); - col.setStripeSize(0); - break; - //mysql_type_blob: - case MYSQL_TYPE_GEOMETRY: - case MYSQL_TYPE_BLOB: - if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - col.setType(NDBCOL::Blob); - else { - col.setType(NDBCOL::Text); - col.setCharset(cs); - } - { - Field_blob *field_blob= (Field_blob *)field; - /* - * max_data_length is 2^8-1, 2^16-1, 2^24-1 for tiny, blob, medium. - * Tinyblob gets no blob parts. The other cases are just a crude - * way to control part size and striping. - * - * In mysql blob(256) is promoted to blob(65535) so it does not - * in fact fit "inline" in NDB. - */ - if (field_blob->max_data_length() < (1 << 8)) - goto mysql_type_tiny_blob; - else if (field_blob->max_data_length() < (1 << 16)) + } break; + // Blob types (all come in as MYSQL_TYPE_BLOB) + mysql_type_tiny_blob: + case MYSQL_TYPE_TINY_BLOB: + if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) + col.setType(NDBCOL::Blob); + else { + col.setType(NDBCOL::Text); + col.setCharset(cs); + } + col.setInlineSize(256); + // No parts + col.setPartSize(0); + col.setStripeSize(0); + break; + // mysql_type_blob: + case MYSQL_TYPE_GEOMETRY: + case MYSQL_TYPE_BLOB: + if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) + col.setType(NDBCOL::Blob); + else { + col.setType(NDBCOL::Text); + col.setCharset(cs); + } { - col.setInlineSize(256); - col.setPartSize(2000); - col.setStripeSize(0); - if (mod_maxblob->m_found) - { - col.setPartSize(DEFAULT_MAX_BLOB_PART_SIZE); - } + Field_blob *field_blob = (Field_blob *)field; + /* + * max_data_length is 2^8-1, 2^16-1, 2^24-1 for tiny, blob, medium. + * Tinyblob gets no blob parts. The other cases are just a crude + * way to control part size and striping. + * + * In mysql blob(256) is promoted to blob(65535) so it does not + * in fact fit "inline" in NDB. + */ + if (field_blob->max_data_length() < (1 << 8)) + goto mysql_type_tiny_blob; + else if (field_blob->max_data_length() < (1 << 16)) { + col.setInlineSize(256); + col.setPartSize(2000); + col.setStripeSize(0); + if (mod_maxblob->m_found) { + col.setPartSize(DEFAULT_MAX_BLOB_PART_SIZE); + } + } else if (field_blob->max_data_length() < (1 << 24)) + goto mysql_type_medium_blob; + else + goto mysql_type_long_blob; } - else if (field_blob->max_data_length() < (1 << 24)) - goto mysql_type_medium_blob; - else - goto mysql_type_long_blob; - } - break; - mysql_type_medium_blob: - case MYSQL_TYPE_MEDIUM_BLOB: - if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - col.setType(NDBCOL::Blob); - else { - col.setType(NDBCOL::Text); - col.setCharset(cs); - } - col.setInlineSize(256); - col.setPartSize(4000); - col.setStripeSize(0); - if (mod_maxblob->m_found) - { + break; + mysql_type_medium_blob: + case MYSQL_TYPE_MEDIUM_BLOB: + if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) + col.setType(NDBCOL::Blob); + else { + col.setType(NDBCOL::Text); + col.setCharset(cs); + } + col.setInlineSize(256); + col.setPartSize(4000); + col.setStripeSize(0); + if (mod_maxblob->m_found) { + col.setPartSize(DEFAULT_MAX_BLOB_PART_SIZE); + } + break; + mysql_type_long_blob: + case MYSQL_TYPE_LONG_BLOB: + if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) + col.setType(NDBCOL::Blob); + else { + col.setType(NDBCOL::Text); + col.setCharset(cs); + } + col.setInlineSize(256); col.setPartSize(DEFAULT_MAX_BLOB_PART_SIZE); - } - break; - mysql_type_long_blob: - case MYSQL_TYPE_LONG_BLOB: - if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - col.setType(NDBCOL::Blob); - else { - col.setType(NDBCOL::Text); - col.setCharset(cs); - } - col.setInlineSize(256); - col.setPartSize(DEFAULT_MAX_BLOB_PART_SIZE); - col.setStripeSize(0); - // The mod_maxblob modified has no effect here, already at max - break; + col.setStripeSize(0); + // The mod_maxblob modified has no effect here, already at max + break; - // MySQL 5.7 binary-encoded JSON type - case MYSQL_TYPE_JSON: - { - /* - JSON columns are just like LONG BLOB columns except for inline size - and part size. Inline size is chosen to accommodate a large number - of embedded json documents without spilling over to the part table. - The tradeoff is that only three JSON columns can be defined in a table - due to the large inline size. Part size is chosen to optimize use of - pages in the part table. Note that much of the JSON functionality is - available by storing JSON documents in VARCHAR columns, including - extracting keys from documents to be used as indexes. - */ - const int NDB_JSON_INLINE_SIZE = 4000; - const int NDB_JSON_PART_SIZE = 8100; + // MySQL 5.7 binary-encoded JSON type + case MYSQL_TYPE_JSON: { + /* + JSON columns are just like LONG BLOB columns except for inline size + and part size. Inline size is chosen to accommodate a large number + of embedded json documents without spilling over to the part table. + The tradeoff is that only three JSON columns can be defined in a table + due to the large inline size. Part size is chosen to optimize use of + pages in the part table. Note that much of the JSON functionality is + available by storing JSON documents in VARCHAR columns, including + extracting keys from documents to be used as indexes. + */ + const int NDB_JSON_INLINE_SIZE = 4000; + const int NDB_JSON_PART_SIZE = 8100; - col.setType(NDBCOL::Blob); - col.setInlineSize(NDB_JSON_INLINE_SIZE); - col.setPartSize(NDB_JSON_PART_SIZE); - col.setStripeSize(0); - break; - } + col.setType(NDBCOL::Blob); + col.setInlineSize(NDB_JSON_INLINE_SIZE); + col.setPartSize(NDB_JSON_PART_SIZE); + col.setStripeSize(0); + break; + } - // Other types - case MYSQL_TYPE_ENUM: - col.setType(NDBCOL::Char); - col.setLength(field->pack_length()); - break; - case MYSQL_TYPE_SET: - col.setType(NDBCOL::Char); - col.setLength(field->pack_length()); - break; - case MYSQL_TYPE_BIT: - { - int no_of_bits= field->field_length; - col.setType(NDBCOL::Bit); - if (!no_of_bits) - col.setLength(1); + // Other types + case MYSQL_TYPE_ENUM: + col.setType(NDBCOL::Char); + col.setLength(field->pack_length()); + break; + case MYSQL_TYPE_SET: + col.setType(NDBCOL::Char); + col.setLength(field->pack_length()); + break; + case MYSQL_TYPE_BIT: { + int no_of_bits = field->field_length; + col.setType(NDBCOL::Bit); + if (!no_of_bits) + col.setLength(1); else col.setLength(no_of_bits); - break; - } - case MYSQL_TYPE_NULL: - goto mysql_type_unsupported; - mysql_type_unsupported: - default: - DBUG_RETURN(HA_ERR_UNSUPPORTED); + break; + } + case MYSQL_TYPE_NULL: + goto mysql_type_unsupported; + mysql_type_unsupported: + default: + DBUG_RETURN(HA_ERR_UNSUPPORTED); } // Set nullable and pk col.setNullable(field->maybe_null()); col.setPrimaryKey(field->flags & PRI_KEY_FLAG); - if ((field->flags & FIELD_IN_PART_FUNC_FLAG) != 0) - { + if ((field->flags & FIELD_IN_PART_FUNC_FLAG) != 0) { col.setPartitionKey(true); } // Set autoincrement - if (field->flags & AUTO_INCREMENT_FLAG) - { + if (field->flags & AUTO_INCREMENT_FLAG) { col.setAutoIncrement(true); - ulonglong value= create_info->auto_increment_value ? - create_info->auto_increment_value : (ulonglong) 1; + ulonglong value = create_info->auto_increment_value + ? create_info->auto_increment_value + : (ulonglong)1; DBUG_PRINT("info", ("Autoincrement key, initial: %llu", value)); col.setAutoIncrementInitialValue(value); - } - else + } else col.setAutoIncrement(false); // Storage type { NDBCOL::StorageType type = NDBCOL::StorageTypeMemory; - switch (field->field_storage_type()) - { - case HA_SM_DEFAULT: - DBUG_PRINT("info", ("No storage_type for field, check create_info")); - if (create_info->storage_media == HA_SM_DISK) - { - DBUG_PRINT("info", ("Table storage type is 'disk', using 'disk' " - "for field")); - type = NDBCOL::StorageTypeDisk; - } - break; + switch (field->field_storage_type()) { + case HA_SM_DEFAULT: + DBUG_PRINT("info", ("No storage_type for field, check create_info")); + if (create_info->storage_media == HA_SM_DISK) { + DBUG_PRINT("info", ("Table storage type is 'disk', using 'disk' " + "for field")); + type = NDBCOL::StorageTypeDisk; + } + break; - case HA_SM_DISK: - DBUG_PRINT("info", ("Field storage_type is 'disk'")); - type = NDBCOL::StorageTypeDisk; - break; + case HA_SM_DISK: + DBUG_PRINT("info", ("Field storage_type is 'disk'")); + type = NDBCOL::StorageTypeDisk; + break; - case HA_SM_MEMORY: - break; + case HA_SM_MEMORY: + break; } DBUG_PRINT("info", ("Using storage type: '%s'", @@ -9671,9 +8301,8 @@ create_ndb_column(THD *thd, // Dynamic { - const bool dynamic= - ndb_column_is_dynamic(thd, field, create_info, use_dynamic_as_default, - col.getStorageType()); + const bool dynamic = ndb_column_is_dynamic( + thd, field, create_info, use_dynamic_as_default, col.getStorageType()); DBUG_PRINT("info", ("Using dynamic: %d", dynamic)); col.setDynamic(dynamic); @@ -9691,185 +8320,160 @@ create_ndb_column(THD *thd, compatibility checks. It only cares about type, length, precision, scale and charset and nothing else. */ -static void -create_ndb_fk_fake_column(NDBCOL &col, - const Ha_fk_column_type &fk_col_type) { +static void create_ndb_fk_fake_column(NDBCOL &col, + const Ha_fk_column_type &fk_col_type) { // Get character set. - CHARSET_INFO *cs= const_cast(fk_col_type.field_charset); + CHARSET_INFO *cs = const_cast(fk_col_type.field_charset); switch (fk_col_type.type) { - // Numeric types - case dd::enum_column_types::TINY: - if (fk_col_type.is_unsigned) - col.setType(NDBCOL::Tinyunsigned); - else - col.setType(NDBCOL::Tinyint); - col.setLength(1); - break; - case dd::enum_column_types::SHORT: - if (fk_col_type.is_unsigned) - col.setType(NDBCOL::Smallunsigned); - else - col.setType(NDBCOL::Smallint); - col.setLength(1); - break; - case dd::enum_column_types::LONG: - if (fk_col_type.is_unsigned) - col.setType(NDBCOL::Unsigned); - else - col.setType(NDBCOL::Int); - col.setLength(1); - break; - case dd::enum_column_types::INT24: - if (fk_col_type.is_unsigned) - col.setType(NDBCOL::Mediumunsigned); - else - col.setType(NDBCOL::Mediumint); - col.setLength(1); - break; - case dd::enum_column_types::LONGLONG: - if (fk_col_type.is_unsigned) - col.setType(NDBCOL::Bigunsigned); - else - col.setType(NDBCOL::Bigint); - col.setLength(1); - break; - case dd::enum_column_types::FLOAT: - col.setType(NDBCOL::Float); - col.setLength(1); - break; - case dd::enum_column_types::DOUBLE: - col.setType(NDBCOL::Double); - col.setLength(1); - break; - case dd::enum_column_types::DECIMAL: - { - uint precision= fk_col_type.char_length; - uint scale= fk_col_type.numeric_scale; + // Numeric types + case dd::enum_column_types::TINY: if (fk_col_type.is_unsigned) - { - col.setType(NDBCOL::Olddecimalunsigned); - precision-= (scale > 0); - } + col.setType(NDBCOL::Tinyunsigned); else - { + col.setType(NDBCOL::Tinyint); + col.setLength(1); + break; + case dd::enum_column_types::SHORT: + if (fk_col_type.is_unsigned) + col.setType(NDBCOL::Smallunsigned); + else + col.setType(NDBCOL::Smallint); + col.setLength(1); + break; + case dd::enum_column_types::LONG: + if (fk_col_type.is_unsigned) + col.setType(NDBCOL::Unsigned); + else + col.setType(NDBCOL::Int); + col.setLength(1); + break; + case dd::enum_column_types::INT24: + if (fk_col_type.is_unsigned) + col.setType(NDBCOL::Mediumunsigned); + else + col.setType(NDBCOL::Mediumint); + col.setLength(1); + break; + case dd::enum_column_types::LONGLONG: + if (fk_col_type.is_unsigned) + col.setType(NDBCOL::Bigunsigned); + else + col.setType(NDBCOL::Bigint); + col.setLength(1); + break; + case dd::enum_column_types::FLOAT: + col.setType(NDBCOL::Float); + col.setLength(1); + break; + case dd::enum_column_types::DOUBLE: + col.setType(NDBCOL::Double); + col.setLength(1); + break; + case dd::enum_column_types::DECIMAL: { + uint precision = fk_col_type.char_length; + uint scale = fk_col_type.numeric_scale; + if (fk_col_type.is_unsigned) { + col.setType(NDBCOL::Olddecimalunsigned); + precision -= (scale > 0); + } else { col.setType(NDBCOL::Olddecimal); - precision-= 1 + (scale > 0); + precision -= 1 + (scale > 0); } col.setPrecision(precision); col.setScale(scale); col.setLength(1); - } - break; - case dd::enum_column_types::NEWDECIMAL: - { - uint precision= my_decimal_length_to_precision(fk_col_type.char_length, - fk_col_type.numeric_scale, - fk_col_type.is_unsigned); - uint scale= fk_col_type.numeric_scale; - if (fk_col_type.is_unsigned) - { + } break; + case dd::enum_column_types::NEWDECIMAL: { + uint precision = my_decimal_length_to_precision(fk_col_type.char_length, + fk_col_type.numeric_scale, + fk_col_type.is_unsigned); + uint scale = fk_col_type.numeric_scale; + if (fk_col_type.is_unsigned) { col.setType(NDBCOL::Decimalunsigned); - } - else - { + } else { col.setType(NDBCOL::Decimal); } col.setPrecision(precision); col.setScale(scale); col.setLength(1); - } - break; - // Date types - case dd::enum_column_types::DATETIME: - col.setType(NDBCOL::Datetime); - col.setLength(1); - break; - case dd::enum_column_types::DATETIME2: - { - uint prec= (fk_col_type.char_length > MAX_DATETIME_WIDTH) ? - fk_col_type.char_length - 1 - MAX_DATETIME_WIDTH : 0; + } break; + // Date types + case dd::enum_column_types::DATETIME: + col.setType(NDBCOL::Datetime); + col.setLength(1); + break; + case dd::enum_column_types::DATETIME2: { + uint prec = (fk_col_type.char_length > MAX_DATETIME_WIDTH) + ? fk_col_type.char_length - 1 - MAX_DATETIME_WIDTH + : 0; col.setType(NDBCOL::Datetime2); col.setLength(1); col.setPrecision(prec); - } - break; - case dd::enum_column_types::NEWDATE: - col.setType(NDBCOL::Date); - col.setLength(1); - break; - case dd::enum_column_types::TIME: - col.setType(NDBCOL::Time); - col.setLength(1); - break; - case dd::enum_column_types::TIME2: - { - uint prec= (fk_col_type.char_length > MAX_TIME_WIDTH) ? - fk_col_type.char_length - 1 - MAX_TIME_WIDTH : 0; + } break; + case dd::enum_column_types::NEWDATE: + col.setType(NDBCOL::Date); + col.setLength(1); + break; + case dd::enum_column_types::TIME: + col.setType(NDBCOL::Time); + col.setLength(1); + break; + case dd::enum_column_types::TIME2: { + uint prec = (fk_col_type.char_length > MAX_TIME_WIDTH) + ? fk_col_type.char_length - 1 - MAX_TIME_WIDTH + : 0; col.setType(NDBCOL::Time2); col.setLength(1); col.setPrecision(prec); - } - break; - case dd::enum_column_types::YEAR: - col.setType(NDBCOL::Year); - col.setLength(1); - break; - case dd::enum_column_types::TIMESTAMP: - col.setType(NDBCOL::Timestamp); - col.setLength(1); - break; - case dd::enum_column_types::TIMESTAMP2: - { - uint prec= (fk_col_type.char_length > MAX_DATETIME_WIDTH) ? - fk_col_type.char_length - 1 - MAX_DATETIME_WIDTH : 0; + } break; + case dd::enum_column_types::YEAR: + col.setType(NDBCOL::Year); + col.setLength(1); + break; + case dd::enum_column_types::TIMESTAMP: + col.setType(NDBCOL::Timestamp); + col.setLength(1); + break; + case dd::enum_column_types::TIMESTAMP2: { + uint prec = (fk_col_type.char_length > MAX_DATETIME_WIDTH) + ? fk_col_type.char_length - 1 - MAX_DATETIME_WIDTH + : 0; col.setType(NDBCOL::Timestamp2); col.setLength(1); col.setPrecision(prec); - } - break; - // Char types - case dd::enum_column_types::STRING: - if (fk_col_type.char_length == 0) - { - col.setType(NDBCOL::Bit); - col.setLength(1); - } - else if (cs == &my_charset_bin) - { - col.setType(NDBCOL::Binary); - col.setLength(fk_col_type.char_length); - } - else - { - col.setType(NDBCOL::Char); - col.setCharset(cs); - col.setLength(fk_col_type.char_length); - } - break; - case dd::enum_column_types::VARCHAR: - { + } break; + // Char types + case dd::enum_column_types::STRING: + if (fk_col_type.char_length == 0) { + col.setType(NDBCOL::Bit); + col.setLength(1); + } else if (cs == &my_charset_bin) { + col.setType(NDBCOL::Binary); + col.setLength(fk_col_type.char_length); + } else { + col.setType(NDBCOL::Char); + col.setCharset(cs); + col.setLength(fk_col_type.char_length); + } + break; + case dd::enum_column_types::VARCHAR: { uint length_bytes = HA_VARCHAR_PACKLENGTH(fk_col_type.char_length); - if (length_bytes == 1) - { + if (length_bytes == 1) { if (cs == &my_charset_bin) col.setType(NDBCOL::Varbinary); else { col.setType(NDBCOL::Varchar); col.setCharset(cs); } - } - else if (length_bytes == 2) - { + } else if (length_bytes == 2) { if (cs == &my_charset_bin) col.setType(NDBCOL::Longvarbinary); else { col.setType(NDBCOL::Longvarchar); col.setCharset(cs); } - } - else - { + } else { /* This branch is dead at the moment and has been left for consistency with create_ndb_column(). Instead of returning an error we cheat and @@ -9878,99 +8482,90 @@ create_ndb_fk_fake_column(NDBCOL &col, col.setType(NDBCOL::Blob); } col.setLength(fk_col_type.char_length); - } - break; - // Blob types - case dd::enum_column_types::TINY_BLOB: - case dd::enum_column_types::BLOB: - case dd::enum_column_types::MEDIUM_BLOB: - case dd::enum_column_types::LONG_BLOB: - case dd::enum_column_types::GEOMETRY: - case dd::enum_column_types::JSON: - /* - Since NDB doesn't support foreign keys over Blob and Text columns - anyway, we cheat and always use Blob type in this case without - calculating exact type and other attributes. - */ - col.setType(NDBCOL::Blob); - break; - // Other types - case dd::enum_column_types::ENUM: - col.setType(NDBCOL::Char); - col.setLength(get_enum_pack_length(fk_col_type.elements_count)); - break; - case dd::enum_column_types::SET: - col.setType(NDBCOL::Char); - col.setLength(get_set_pack_length(fk_col_type.elements_count)); - break; - case dd::enum_column_types::BIT: - { - int no_of_bits= fk_col_type.char_length; - col.setType(NDBCOL::Bit); - if (!no_of_bits) - col.setLength(1); + } break; + // Blob types + case dd::enum_column_types::TINY_BLOB: + case dd::enum_column_types::BLOB: + case dd::enum_column_types::MEDIUM_BLOB: + case dd::enum_column_types::LONG_BLOB: + case dd::enum_column_types::GEOMETRY: + case dd::enum_column_types::JSON: + /* + Since NDB doesn't support foreign keys over Blob and Text columns + anyway, we cheat and always use Blob type in this case without + calculating exact type and other attributes. + */ + col.setType(NDBCOL::Blob); + break; + // Other types + case dd::enum_column_types::ENUM: + col.setType(NDBCOL::Char); + col.setLength(get_enum_pack_length(fk_col_type.elements_count)); + break; + case dd::enum_column_types::SET: + col.setType(NDBCOL::Char); + col.setLength(get_set_pack_length(fk_col_type.elements_count)); + break; + case dd::enum_column_types::BIT: { + int no_of_bits = fk_col_type.char_length; + col.setType(NDBCOL::Bit); + if (!no_of_bits) + col.setLength(1); else col.setLength(no_of_bits); - break; - } - // Legacy types. Modern server is not supposed to use them. - case dd::enum_column_types::DATE: - case dd::enum_column_types::VAR_STRING: - // Unsupported types. - case dd::enum_column_types::TYPE_NULL: - default: - /* - Instead of returning an error we cheat and use Blob type - which is not supported in foreign keys. - */ - col.setType(NDBCOL::Blob); - break; + break; + } + // Legacy types. Modern server is not supposed to use them. + case dd::enum_column_types::DATE: + case dd::enum_column_types::VAR_STRING: + // Unsupported types. + case dd::enum_column_types::TYPE_NULL: + default: + /* + Instead of returning an error we cheat and use Blob type + which is not supported in foreign keys. + */ + col.setType(NDBCOL::Blob); + break; } } -static const NdbDictionary::Object::PartitionBalance g_default_partition_balance = - NdbDictionary::Object::PartitionBalance_ForRPByLDM; +static const NdbDictionary::Object::PartitionBalance + g_default_partition_balance = + NdbDictionary::Object::PartitionBalance_ForRPByLDM; -void ha_ndbcluster::update_create_info(HA_CREATE_INFO *create_info) -{ +void ha_ndbcluster::update_create_info(HA_CREATE_INFO *create_info) { DBUG_ENTER("ha_ndbcluster::update_create_info"); - THD *thd= current_thd; - const NDBTAB *ndbtab= m_table; - Ndb *ndb= check_ndb_in_thd(thd); + THD *thd = current_thd; + const NDBTAB *ndbtab = m_table; + Ndb *ndb = check_ndb_in_thd(thd); - if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) - { + if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) { /* Find any initial auto_increment value */ - for (uint i= 0; i < table->s->fields; i++) - { - Field *field= table->field[i]; - if (field->flags & AUTO_INCREMENT_FLAG) - { + for (uint i = 0; i < table->s->fields; i++) { + Field *field = table->field[i]; + if (field->flags & AUTO_INCREMENT_FLAG) { ulonglong auto_value; - uint retries= NDB_AUTO_INCREMENT_RETRIES; - for (;;) - { + uint retries = NDB_AUTO_INCREMENT_RETRIES; + for (;;) { NDB_SHARE::Tuple_id_range_guard g(m_share); - if (ndb->readAutoIncrementValue(ndbtab, g.range, auto_value)) - { + if (ndb->readAutoIncrementValue(ndbtab, g.range, auto_value)) { if (--retries && !thd_killed(thd) && - ndb->getNdbError().status == NdbError::TemporaryError) - { + ndb->getNdbError().status == NdbError::TemporaryError) { ndb_trans_retry_sleep(); continue; } - const NdbError err= ndb->getNdbError(); - ndb_log_error("Error %d in ::update_create_info(): %s", - err.code, err.message); + const NdbError err = ndb->getNdbError(); + ndb_log_error("Error %d in ::update_create_info(): %s", err.code, + err.message); DBUG_VOID_RETURN; } break; } - if (auto_value > 1) - { - create_info->auto_increment_value= auto_value; + if (auto_value > 1) { + create_info->auto_increment_value = auto_value; } break; } @@ -9991,38 +8586,26 @@ void ha_ndbcluster::update_create_info(HA_CREATE_INFO *create_info) * parts of the string we will add those parts by creating a new * comment string. */ - if (thd->lex->sql_command == SQLCOM_ALTER_TABLE) - { + if (thd->lex->sql_command == SQLCOM_ALTER_TABLE) { update_comment_info(thd, create_info, m_table); - } - else if (thd->lex->sql_command == SQLCOM_SHOW_CREATE) - { + } else if (thd->lex->sql_command == SQLCOM_SHOW_CREATE) { update_comment_info(thd, NULL, m_table); } DBUG_VOID_RETURN; } -void -ha_ndbcluster::update_comment_info(THD* thd, - HA_CREATE_INFO *create_info, - const NdbDictionary::Table *ndbtab) -{ +void ha_ndbcluster::update_comment_info(THD *thd, HA_CREATE_INFO *create_info, + const NdbDictionary::Table *ndbtab) { DBUG_ENTER("ha_ndbcluster::update_comment_info"); - NDB_Modifiers table_modifiers(ndb_table_modifier_prefix, - ndb_table_modifiers); - char *comment_str = create_info == NULL ? - table->s->comment.str : - create_info->comment.str; - unsigned comment_len = create_info == NULL ? - table->s->comment.length : - create_info->comment.length; - - if (table_modifiers.loadComment(comment_str, - comment_len) == -1) - { + NDB_Modifiers table_modifiers(ndb_table_modifier_prefix, ndb_table_modifiers); + char *comment_str = + create_info == NULL ? table->s->comment.str : create_info->comment.str; + unsigned comment_len = create_info == NULL ? table->s->comment.length + : create_info->comment.length; + + if (table_modifiers.loadComment(comment_str, comment_len) == -1) { push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_ILLEGAL_HA_CREATE_OPTION, - "%s", + ER_ILLEGAL_HA_CREATE_OPTION, "%s", table_modifiers.getErrMsg()); my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), ndbcluster_hton_name, "Syntax error in COMMENT modifier"); @@ -10031,17 +8614,16 @@ ha_ndbcluster::update_comment_info(THD* thd, const NDB_Modifier *mod_nologging = table_modifiers.get("NOLOGGING"); const NDB_Modifier *mod_read_backup = table_modifiers.get("READ_BACKUP"); const NDB_Modifier *mod_fully_replicated = - table_modifiers.get("FULLY_REPLICATED"); + table_modifiers.get("FULLY_REPLICATED"); const NDB_Modifier *mod_frags = table_modifiers.get("PARTITION_BALANCE"); DBUG_PRINT("info", ("Before: comment_len: %u, comment: %s", - (unsigned int)comment_len, - comment_str)); - + (unsigned int)comment_len, comment_str)); + bool old_nologging = !ndbtab->getLogging(); bool old_read_backup = ndbtab->getReadBackupFlag(); bool old_fully_replicated = ndbtab->getFullyReplicated(); NdbDictionary::Object::PartitionBalance old_part_bal = - ndbtab->getPartitionBalance(); + ndbtab->getPartitionBalance(); /** * We start by calculating how much more space we need in the comment @@ -10053,48 +8635,35 @@ ha_ndbcluster::update_comment_info(THD* thd, bool add_part_bal = false; bool is_fully_replicated = false; - if ((mod_fully_replicated->m_found && - mod_fully_replicated->m_val_bool) || - (old_fully_replicated && - !mod_fully_replicated->m_found)) - { + if ((mod_fully_replicated->m_found && mod_fully_replicated->m_val_bool) || + (old_fully_replicated && !mod_fully_replicated->m_found)) { is_fully_replicated = true; } - if (old_nologging && !mod_nologging->m_found) - { + if (old_nologging && !mod_nologging->m_found) { add_nologging = true; table_modifiers.set("NOLOGGING", true); DBUG_PRINT("info", ("added nologging")); } - if (!is_fully_replicated && - old_read_backup && - !mod_read_backup->m_found) - { + if (!is_fully_replicated && old_read_backup && !mod_read_backup->m_found) { add_read_backup = true; table_modifiers.set("READ_BACKUP", true); DBUG_PRINT("info", ("added read_backup")); } - if (old_fully_replicated && !mod_fully_replicated->m_found) - { + if (old_fully_replicated && !mod_fully_replicated->m_found) { add_fully_replicated = true; table_modifiers.set("FULLY_REPLICATED", true); DBUG_PRINT("info", ("added fully_replicated")); } - if (!mod_frags->m_found && - (old_part_bal != g_default_partition_balance) && - (old_part_bal != NdbDictionary::Object::PartitionBalance_Specific)) - { + if (!mod_frags->m_found && (old_part_bal != g_default_partition_balance) && + (old_part_bal != NdbDictionary::Object::PartitionBalance_Specific)) { add_part_bal = true; const char *old_part_bal_str = - NdbDictionary::Table::getPartitionBalanceString(old_part_bal); + NdbDictionary::Table::getPartitionBalanceString(old_part_bal); table_modifiers.set("PARTITION_BALANCE", old_part_bal_str); DBUG_PRINT("info", ("added part_bal_str")); } - if (!(add_nologging || - add_read_backup || - add_fully_replicated || - add_part_bal)) - { + if (!(add_nologging || add_read_backup || add_fully_replicated || + add_part_bal)) { /* No change of comment is needed. */ DBUG_VOID_RETURN; } @@ -10103,16 +8672,14 @@ ha_ndbcluster::update_comment_info(THD* thd, * All necessary modifiers are set, now regenerate the comment */ const char *updated_str = table_modifiers.generateCommentString(); - if (updated_str == NULL) - { + if (updated_str == NULL) { mem_alloc_error(0); DBUG_VOID_RETURN; } const Uint32 new_len = strlen(updated_str); // Allocate comment memory from TABLE_SHARE's MEM_ROOT - char* const new_str = (char*)table->s->mem_root.Alloc((size_t)new_len); - if (new_str == NULL) - { + char *const new_str = (char *)table->s->mem_root.Alloc((size_t)new_len); + if (new_str == NULL) { mem_alloc_error(0); DBUG_VOID_RETURN; } @@ -10120,60 +8687,48 @@ ha_ndbcluster::update_comment_info(THD* thd, DBUG_PRINT("info", ("new_str: %s", new_str)); /* Update structures */ - if (create_info != NULL) - { + if (create_info != NULL) { create_info->comment.str = new_str; create_info->comment.length = new_len; - } - else - { + } else { table->s->comment.str = new_str; table->s->comment.length = new_len; } - DBUG_PRINT("info", ("After: comment_len: %u, comment: %s", - new_len, - new_str)); + DBUG_PRINT("info", ("After: comment_len: %u, comment: %s", new_len, new_str)); DBUG_VOID_RETURN; } -static uint get_no_fragments(ulonglong max_rows) -{ - ulonglong acc_row_size= 25 + /*safety margin*/ 2; - ulonglong acc_fragment_size= 512*1024*1024; - return uint((max_rows*acc_row_size)/acc_fragment_size)+1; +static uint get_no_fragments(ulonglong max_rows) { + ulonglong acc_row_size = 25 + /*safety margin*/ 2; + ulonglong acc_fragment_size = 512 * 1024 * 1024; + return uint((max_rows * acc_row_size) / acc_fragment_size) + 1; } - /* Routine to adjust default number of partitions to always be a multiple of number of nodes and never more than 4 times the number of nodes. */ -static -bool -adjusted_frag_count(Ndb* ndb, - uint requested_frags, - uint &reported_frags) -{ - unsigned no_nodes= g_ndb_cluster_connection->no_db_nodes(); - unsigned no_replicas= no_nodes == 1 ? 1 : 2; - - unsigned no_threads= 1; - const unsigned no_nodegroups= g_ndb_cluster_connection->max_nodegroup() + 1; +static bool adjusted_frag_count(Ndb *ndb, uint requested_frags, + uint &reported_frags) { + unsigned no_nodes = g_ndb_cluster_connection->no_db_nodes(); + unsigned no_replicas = no_nodes == 1 ? 1 : 2; + + unsigned no_threads = 1; + const unsigned no_nodegroups = g_ndb_cluster_connection->max_nodegroup() + 1; { /** * Use SYSTAB_0 to get #replicas, and to guess #threads */ - char dbname[FN_HEADLEN+1]; - dbname[FN_HEADLEN]= 0; + char dbname[FN_HEADLEN + 1]; + dbname[FN_HEADLEN] = 0; my_stpnmov(dbname, ndb->getDatabaseName(), sizeof(dbname) - 1); ndb->setDatabaseName("sys"); Ndb_table_guard ndbtab_g(ndb->getDictionary(), "SYSTAB_0"); - const NdbDictionary::Table * tab = ndbtab_g.get_table(); - if (tab) - { - no_replicas= ndbtab_g.get_table()->getReplicaCount(); + const NdbDictionary::Table *tab = ndbtab_g.get_table(); + if (tab) { + no_replicas = ndbtab_g.get_table()->getReplicaCount(); /** * Guess #threads @@ -10182,19 +8737,16 @@ adjusted_frag_count(Ndb* ndb, const Uint32 frags = tab->getFragmentCount(); Uint32 node = 0; Uint32 cnt = 0; - for (Uint32 i = 0; igetFragmentNodes(i, replicas, NDB_ARRAY_SIZE(replicas))) - { - if (node == replicas[0] || node == 0) - { + if (tab->getFragmentNodes(i, replicas, NDB_ARRAY_SIZE(replicas))) { + if (node == replicas[0] || node == 0) { node = replicas[0]; - cnt ++; + cnt++; } } } - no_threads = cnt; // No of primary replica on 1-node + no_threads = cnt; // No of primary replica on 1-node } } ndb->setDatabaseName(dbname); @@ -10203,15 +8755,14 @@ adjusted_frag_count(Ndb* ndb, const unsigned usable_nodes = no_replicas * no_nodegroups; const uint max_replicas = 8 * usable_nodes * no_threads; - reported_frags = usable_nodes * no_threads; // Start with 1 frag per threads + reported_frags = usable_nodes * no_threads; // Start with 1 frag per threads Uint32 replicas = reported_frags * no_replicas; /** * Loop until requested replicas, and not exceed max-replicas */ while (reported_frags < requested_frags && - (replicas + usable_nodes * no_threads * no_replicas) <= max_replicas) - { + (replicas + usable_nodes * no_threads * no_replicas) <= max_replicas) { reported_frags += usable_nodes * no_threads; replicas += usable_nodes * no_threads * no_replicas; } @@ -10219,95 +8770,76 @@ adjusted_frag_count(Ndb* ndb, return (reported_frags < requested_frags); } -static -bool -parsePartitionBalance(THD *thd, - const NDB_Modifier * mod, - NdbDictionary::Object::PartitionBalance * part_bal) -{ - if (mod->m_found == false) - return false; // OK +static bool parsePartitionBalance( + THD *thd, const NDB_Modifier *mod, + NdbDictionary::Object::PartitionBalance *part_bal) { + if (mod->m_found == false) return false; // OK NdbDictionary::Object::PartitionBalance ret = - NdbDictionary::Table::getPartitionBalance(mod->m_val_str.str); + NdbDictionary::Table::getPartitionBalance(mod->m_val_str.str); - if (ret == 0) - { - DBUG_PRINT("info", ("PartitionBalance: %s not supported", - mod->m_val_str.str)); + if (ret == 0) { + DBUG_PRINT("info", + ("PartitionBalance: %s not supported", mod->m_val_str.str)); /** * Comment section contains a partition balance we cannot * recognize, we will print warning about this and will * not change the comment string. */ - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, - ER_THD(thd, ER_GET_ERRMSG), - 4500, + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(thd, ER_GET_ERRMSG), 4500, "Comment contains non-supported fragment" " count type", "NDB"); return false; } - if (part_bal) - { - * part_bal = ret; + if (part_bal) { + *part_bal = ret; } return true; } - -extern bool ndb_fk_util_truncate_allowed(THD* thd, - NdbDictionary::Dictionary* dict, - const char* db, - const NdbDictionary::Table* tab, - bool& allow); +extern bool ndb_fk_util_truncate_allowed(THD *thd, + NdbDictionary::Dictionary *dict, + const char *db, + const NdbDictionary::Table *tab, + bool &allow); /* Forward declaration of the utility functions used when creating partitioned tables */ -static int -create_table_set_up_partition_info(partition_info *part_info, - NdbDictionary::Table&, - Ndb_table_map &); -static int -create_table_set_range_data(const partition_info* part_info, - NdbDictionary::Table&); -static int -create_table_set_list_data(const partition_info* part_info, - NdbDictionary::Table&); - - -void ha_ndbcluster::append_create_info(String*) -{ +static int create_table_set_up_partition_info(partition_info *part_info, + NdbDictionary::Table &, + Ndb_table_map &); +static int create_table_set_range_data(const partition_info *part_info, + NdbDictionary::Table &); +static int create_table_set_list_data(const partition_info *part_info, + NdbDictionary::Table &); + +void ha_ndbcluster::append_create_info(String *) { THD *thd = current_thd; Thd_ndb *thd_ndb = get_thd_ndb(thd); Ndb *ndb = thd_ndb->ndb; NDBDICT *dict = ndb->getDictionary(); ndb->setDatabaseName(table_share->db.str); Ndb_table_guard ndbtab_g(dict, table_share->table_name.str); - const NdbDictionary::Table * tab = ndbtab_g.get_table(); + const NdbDictionary::Table *tab = ndbtab_g.get_table(); NdbDictionary::Object::PartitionBalance part_bal = tab->getPartitionBalance(); bool logged_table = tab->getLogging(); bool read_backup = tab->getReadBackupFlag(); bool fully_replicated = tab->getFullyReplicated(); - DBUG_PRINT("info", ("append_create_info: comment: %s, logged_table = %u," - " part_bal = %d, read_backup = %u, fully_replicated = %u", - table_share->comment.length == 0 ? - "NULL" : table_share->comment.str, - logged_table, - part_bal, - read_backup, - fully_replicated)); + DBUG_PRINT( + "info", + ("append_create_info: comment: %s, logged_table = %u," + " part_bal = %d, read_backup = %u, fully_replicated = %u", + table_share->comment.length == 0 ? "NULL" : table_share->comment.str, + logged_table, part_bal, read_backup, fully_replicated)); if (table_share->comment.length == 0 && part_bal == NdbDictionary::Object::PartitionBalance_Specific && - !read_backup && - logged_table && - !fully_replicated) - { + !read_backup && logged_table && !fully_replicated) { /** * No comment set by user * The partition balance is default and thus no need to set @@ -10323,7 +8855,7 @@ void ha_ndbcluster::append_create_info(String*) * property already set in the comment string. */ NdbDictionary::Object::PartitionBalance comment_part_bal = - g_default_partition_balance; + g_default_partition_balance; bool comment_part_bal_set = false; bool comment_logged_table_set = false; @@ -10334,17 +8866,14 @@ void ha_ndbcluster::append_create_info(String*) bool comment_read_backup = false; bool comment_fully_replicated = false; - if (table_share->comment.length) - { + if (table_share->comment.length) { /* Parse the current comment string */ NDB_Modifiers table_modifiers(ndb_table_modifier_prefix, ndb_table_modifiers); if (table_modifiers.loadComment(table_share->comment.str, - table_share->comment.length) == -1) - { + table_share->comment.length) == -1) { push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_ILLEGAL_HA_CREATE_OPTION, - "%s", + ER_ILLEGAL_HA_CREATE_OPTION, "%s", table_modifiers.getErrMsg()); my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), ndbcluster_hton_name, "Syntax error in COMMENT modifier"); @@ -10354,10 +8883,9 @@ void ha_ndbcluster::append_create_info(String*) const NDB_Modifier *mod_read_backup = table_modifiers.get("READ_BACKUP"); const NDB_Modifier *mod_frags = table_modifiers.get("PARTITION_BALANCE"); const NDB_Modifier *mod_fully_replicated = - table_modifiers.get("FULLY_REPLICATED"); + table_modifiers.get("FULLY_REPLICATED"); - if (mod_nologging->m_found) - { + if (mod_nologging->m_found) { /** * NOLOGGING is set, ensure that it is set to the same value as * the table object value. If it is then no need to print anything. @@ -10365,28 +8893,21 @@ void ha_ndbcluster::append_create_info(String*) comment_logged_table = !mod_nologging->m_val_bool; comment_logged_table_set = true; } - if (mod_read_backup->m_found) - { + if (mod_read_backup->m_found) { comment_read_backup_set = true; comment_read_backup = mod_read_backup->m_val_bool; } - if (mod_frags->m_found) - { - if (parsePartitionBalance(thd /* for pushing warning */, - mod_frags, - &comment_part_bal)) - { - if (comment_part_bal != part_bal) - { + if (mod_frags->m_found) { + if (parsePartitionBalance(thd /* for pushing warning */, mod_frags, + &comment_part_bal)) { + if (comment_part_bal != part_bal) { /** * The table property and the comment on the table differs. * Let the comment string stay as is, but push warning * about this fact. */ - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, - ER_THD(thd, ER_GET_ERRMSG), - 4501, + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(thd, ER_GET_ERRMSG), 4501, "Table property is not the same as in" " comment for PARTITION_BALANCE" " property", @@ -10395,109 +8916,79 @@ void ha_ndbcluster::append_create_info(String*) } comment_part_bal_set = true; } - if (mod_fully_replicated->m_found) - { + if (mod_fully_replicated->m_found) { comment_fully_replicated_set = true; comment_fully_replicated = mod_fully_replicated->m_val_bool; } } DBUG_PRINT("info", ("comment_read_backup_set: %u, comment_read_backup: %u", - comment_read_backup_set, - comment_read_backup)); + comment_read_backup_set, comment_read_backup)); DBUG_PRINT("info", ("comment_logged_table_set: %u, comment_logged_table: %u", - comment_logged_table_set, - comment_logged_table)); + comment_logged_table_set, comment_logged_table)); DBUG_PRINT("info", ("comment_part_bal_set: %u, comment_part_bal: %d", - comment_part_bal_set, - comment_part_bal)); - if (!comment_read_backup_set) - { - if (read_backup && !fully_replicated) - { + comment_part_bal_set, comment_part_bal)); + if (!comment_read_backup_set) { + if (read_backup && !fully_replicated) { /** * No property was given in table comment, but table is using read backup * Also table isn't fully replicated. */ - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, - ER_THD(thd, ER_GET_ERRMSG), - 4502, + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(thd, ER_GET_ERRMSG), 4502, "Table property is READ_BACKUP=1," " but not in comment", "NDB"); - } - } - else if (read_backup != comment_read_backup) - { + } + } else if (read_backup != comment_read_backup) { /** * The table property and the comment property differs, we will * print comment string as is and issue a warning to this effect. */ - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, - ER_THD(thd, ER_GET_ERRMSG), - 4502, + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(thd, ER_GET_ERRMSG), 4502, "Table property is not the same as in" " comment for READ_BACKUP property", "NDB"); } - if (!comment_fully_replicated_set) - { - if (fully_replicated) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, - ER_THD(thd, ER_GET_ERRMSG), - 4502, + if (!comment_fully_replicated_set) { + if (fully_replicated) { + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(thd, ER_GET_ERRMSG), 4502, "Table property is FULLY_REPLICATED=1," " but not in comment", "NDB"); } - } - else if (fully_replicated != comment_fully_replicated) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, - ER_THD(thd, ER_GET_ERRMSG), - 4502, + } else if (fully_replicated != comment_fully_replicated) { + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(thd, ER_GET_ERRMSG), 4502, "Table property is not the same as in" " comment for FULLY_REPLICATED property", "NDB"); } - if (!comment_logged_table_set) - { - if (!logged_table) - { + if (!comment_logged_table_set) { + if (!logged_table) { /** * No property was given in table comment, but table is not logged. */ - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, - ER_THD(thd, ER_GET_ERRMSG), - 4502, + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(thd, ER_GET_ERRMSG), 4502, "Table property is NOLOGGING=1," " but not in comment", "NDB"); } - } - else if (logged_table != comment_logged_table) - { + } else if (logged_table != comment_logged_table) { /** * The table property and the comment property differs, we will * print comment string as is and issue a warning to this effect. */ - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, - ER_THD(thd, ER_GET_ERRMSG), - 4502, + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(thd, ER_GET_ERRMSG), 4502, "Table property is not the same as in" " comment for NOLOGGING property", "NDB"); } - if (!comment_part_bal_set) - { - if (part_bal != NdbDictionary::Object::PartitionBalance_Specific) - { + if (!comment_part_bal_set) { + if (part_bal != NdbDictionary::Object::PartitionBalance_Specific) { /** * There is a table property not reflected in the COMMENT string, * most likely someone has done an ALTER TABLE with a new comment @@ -10510,27 +9001,19 @@ void ha_ndbcluster::append_create_info(String*) * The default partition balance need not be visible in comment. */ const NdbDictionary::Object::PartitionBalance default_partition_balance = - g_default_partition_balance; + g_default_partition_balance; - if (part_bal != default_partition_balance) - { - const char * pbname = NdbDictionary::Table::getPartitionBalanceString(part_bal); - if (pbname != NULL) - { + if (part_bal != default_partition_balance) { + const char *pbname = + NdbDictionary::Table::getPartitionBalanceString(part_bal); + if (pbname != NULL) { char msg[200]; - snprintf(msg, - sizeof(msg), - "Table property is PARTITION_BALANCE=%s but not in comment", - pbname); - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, - ER_THD(thd, ER_GET_ERRMSG), - 4503, - msg, - "NDB"); - } - else - { + snprintf(msg, sizeof(msg), + "Table property is PARTITION_BALANCE=%s but not in comment", + pbname); + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(thd, ER_GET_ERRMSG), 4503, msg, "NDB"); + } else { assert(false); /** * This should never happen, the table property should not be set @@ -10539,10 +9022,8 @@ void ha_ndbcluster::append_create_info(String*) * from a cluster on a newer version where additional types have been * added. */ - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, - ER_THD(thd, ER_GET_ERRMSG), - 4503, + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(thd, ER_GET_ERRMSG), 4503, "Table property PARTITION_BALANCE is set to" " an unknown value, could be an upgrade issue", "NDB"); @@ -10575,16 +9056,13 @@ static bool drop_table_and_related(THD *thd, Ndb *ndb, can be used to avoid that ha_ndbcluster::print_error() reports another error. */ -int ha_ndbcluster::create(const char *name, - TABLE *form, - HA_CREATE_INFO *create_info, - dd::Table* table_def) -{ - THD *thd= current_thd; +int ha_ndbcluster::create(const char *name, TABLE *form, + HA_CREATE_INFO *create_info, dd::Table *table_def) { + THD *thd = current_thd; NDBTAB tab; NDBCOL col; - uint i, pk_length= 0; - bool use_disk= false; + uint i, pk_length = 0; + bool use_disk = false; Ndb_fk_list fk_list_for_truncate; // Verify default value for "single user mode" of the table @@ -10613,25 +9091,21 @@ int ha_ndbcluster::create(const char *name, Ndb_schema_dist_client schema_dist_client(thd); - if (check_ndb_connection(thd)) - DBUG_RETURN(HA_ERR_NO_CONNECTION); + if (check_ndb_connection(thd)) DBUG_RETURN(HA_ERR_NO_CONNECTION); Ndb_create_helper create(thd, form->s->table_name.str); - Ndb *ndb= get_ndb(thd); - NDBDICT *dict= ndb->getDictionary(); + Ndb *ndb = get_ndb(thd); + NDBDICT *dict = ndb->getDictionary(); - table= form; + table = form; - if (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE) - { + if (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE) { // This is the final step of table discovery, the table already exists // in NDB and it has already been added to local DD by // calling ha_discover() and thus ndbcluster_discover() // Just finish this process by setting up the binlog for this table const int setup_result = - ndbcluster_binlog_setup_table(thd, ndb, - m_dbname, m_tabname, - table_def); + ndbcluster_binlog_setup_table(thd, ndb, m_dbname, m_tabname, table_def); if (setup_result != 0) { if (setup_result == HA_ERR_TABLE_EXIST) { push_warning_printf( @@ -10649,8 +9123,7 @@ int ha_ndbcluster::create(const char *name, tables since a table being altered might not be known to the mysqld issuing the alter statement. */ - if (thd_sql_command(thd) == SQLCOM_ALTER_TABLE) - { + if (thd_sql_command(thd) == SQLCOM_ALTER_TABLE) { DBUG_PRINT("info", ("Detected copying ALTER TABLE")); // Check that the table name is a temporary name @@ -10658,15 +9131,14 @@ int ha_ndbcluster::create(const char *name, if (!THDVAR(thd, allow_copying_alter_table) && (thd->lex->alter_info->requested_algorithm == - Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT)) - { + Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT)) { // Copying alter table is not allowed and user // have not specified ALGORITHM=COPY DBUG_PRINT("info", ("Refusing implicit copying alter table")); my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0), - "Implicit copying alter", "ndb_allow_copying_alter_table=0", - "ALGORITHM=COPY to force the alter"); + "Implicit copying alter", "ndb_allow_copying_alter_table=0", + "ALGORITHM=COPY to force the alter"); DBUG_RETURN(HA_WRONG_CREATE_OPTION); } @@ -10678,32 +9150,27 @@ int ha_ndbcluster::create(const char *name, Refuse such ALTER TABLE .. RENAME already when trying to create the destination table. */ - const uint flags= thd->lex->alter_info->flags; - if (flags & Alter_info::ALTER_RENAME && - flags & ~Alter_info::ALTER_RENAME) - { + const uint flags = thd->lex->alter_info->flags; + if (flags & Alter_info::ALTER_RENAME && flags & ~Alter_info::ALTER_RENAME) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), thd->query().str); DBUG_RETURN(ER_NOT_SUPPORTED_YET); } } - Thd_ndb *thd_ndb= get_thd_ndb(thd); + Thd_ndb *thd_ndb = get_thd_ndb(thd); if (!(thd_ndb->check_option(Thd_ndb::IS_SCHEMA_DIST_PARTICIPANT) || - thd_ndb->has_required_global_schema_lock("ha_ndbcluster::create"))) - { + thd_ndb->has_required_global_schema_lock("ha_ndbcluster::create"))) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } - if (thd_ndb->check_option(Thd_ndb::CREATE_UTIL_TABLE)) - { + if (thd_ndb->check_option(Thd_ndb::CREATE_UTIL_TABLE)) { // Creating ndbcluster util table. This is done in order to install the // table definition in DD using SQL. Apply special settings for the table // and return DBUG_PRINT("info", ("Creating ndbcluster util table")); - if (thd_ndb->check_option(Thd_ndb::CREATE_UTIL_TABLE_HIDDEN)) - { + if (thd_ndb->check_option(Thd_ndb::CREATE_UTIL_TABLE_HIDDEN)) { // Mark the util table as hidden in DD ndb_dd_table_mark_as_hidden(table_def); } @@ -10717,7 +9184,7 @@ int ha_ndbcluster::create(const char *name, } // Update table definition with the table id and version of the NDB table - const NdbDictionary::Table * const ndbtab = ndbtab_g.get_table(); + const NdbDictionary::Table *const ndbtab = ndbtab_g.get_table(); ndb_dd_table_set_object_id_and_version(table_def, ndbtab->getObjectId(), ndbtab->getObjectVersion()); @@ -10728,31 +9195,25 @@ int ha_ndbcluster::create(const char *name, // Creating table with temporary name, table will only be access by this // MySQL Server -> skip schema distribution DBUG_PRINT("info", ("Creating table with temporary name")); - } - else - { + } else { // Prepare schema distribution - if (!schema_dist_client.prepare(m_dbname, m_tabname)) - { + if (!schema_dist_client.prepare(m_dbname, m_tabname)) { // Failed to prepare schema distributions DBUG_PRINT("info", ("Schema distribution failed to initialize")); DBUG_RETURN(HA_ERR_NO_CONNECTION); } std::string invalid_identifier; - if (!schema_dist_client.check_identifier_limits(invalid_identifier)) - { + if (!schema_dist_client.check_identifier_limits(invalid_identifier)) { // Check of db or table name limits failed my_error(ER_TOO_LONG_IDENT, MYF(0), invalid_identifier.c_str()); DBUG_RETURN(HA_WRONG_CREATE_OPTION); } } - if (thd_sql_command(thd) == SQLCOM_TRUNCATE) - { + if (thd_sql_command(thd) == SQLCOM_TRUNCATE) { Ndb_table_guard ndbtab_g(dict, m_tabname); - if (!ndbtab_g.get_table()) - ERR_RETURN(dict->getNdbError()); + if (!ndbtab_g.get_table()) ERR_RETURN(dict->getNdbError()); /* Don't allow truncate on table which is foreign key parent. @@ -10761,21 +9222,19 @@ int ha_ndbcluster::create(const char *name, fks during this "recreate". */ bool allow; - if (!ndb_fk_util_truncate_allowed(thd, dict, m_dbname, - ndbtab_g.get_table(), allow)) - { + if (!ndb_fk_util_truncate_allowed(thd, dict, m_dbname, ndbtab_g.get_table(), + allow)) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } - if (!allow) - { + if (!allow) { my_error(ER_TRUNCATE_ILLEGAL_FK, MYF(0), ""); DBUG_RETURN(1); } /* save the foreign key information in fk_list */ int err; - if ((err= get_fk_data_for_truncate(dict, ndbtab_g.get_table(), - fk_list_for_truncate))) + if ((err = get_fk_data_for_truncate(dict, ndbtab_g.get_table(), + fk_list_for_truncate))) DBUG_RETURN(err); DBUG_PRINT("info", ("Dropping and re-creating table for TRUNCATE")); @@ -10788,45 +9247,35 @@ int ha_ndbcluster::create(const char *name, DBUG_PRINT("info", ("Start parse of table modifiers, comment = %s", create_info->comment.str)); - NDB_Modifiers table_modifiers(ndb_table_modifier_prefix, - ndb_table_modifiers); + NDB_Modifiers table_modifiers(ndb_table_modifier_prefix, ndb_table_modifiers); if (table_modifiers.loadComment(create_info->comment.str, - create_info->comment.length) == -1) - { + create_info->comment.length) == -1) { thd_ndb->push_warning(ER_ILLEGAL_HA_CREATE_OPTION, "%s", table_modifiers.getErrMsg()); DBUG_RETURN(create.failed_illegal_create_option( "Syntax error in COMMENT modifier")); } - const NDB_Modifier * mod_nologging = table_modifiers.get("NOLOGGING"); - const NDB_Modifier * mod_frags = table_modifiers.get("PARTITION_BALANCE"); - const NDB_Modifier * mod_read_backup = table_modifiers.get("READ_BACKUP"); - const NDB_Modifier * mod_fully_replicated = - table_modifiers.get("FULLY_REPLICATED"); + const NDB_Modifier *mod_nologging = table_modifiers.get("NOLOGGING"); + const NDB_Modifier *mod_frags = table_modifiers.get("PARTITION_BALANCE"); + const NDB_Modifier *mod_read_backup = table_modifiers.get("READ_BACKUP"); + const NDB_Modifier *mod_fully_replicated = + table_modifiers.get("FULLY_REPLICATED"); NdbDictionary::Object::PartitionBalance part_bal = - g_default_partition_balance; - if (parsePartitionBalance(thd /* for pushing warning */, - mod_frags, - &part_bal) == false) - { + g_default_partition_balance; + if (parsePartitionBalance(thd /* for pushing warning */, mod_frags, + &part_bal) == false) { /** * unable to parse => modifier which is not found */ mod_frags = table_modifiers.notfound(); - } - else if (ndbd_support_partition_balance( - ndb->getMinDbNodeVersion()) == 0) - { + } else if (ndbd_support_partition_balance(ndb->getMinDbNodeVersion()) == 0) { DBUG_RETURN(create.failed_illegal_create_option( "PARTITION_BALANCE not supported by current data node versions")); } /* Verify we can support read backup table property if set */ - if ((mod_read_backup->m_found || - opt_ndb_read_backup) && - ndbd_support_read_backup( - ndb->getMinDbNodeVersion()) == 0) - { + if ((mod_read_backup->m_found || opt_ndb_read_backup) && + ndbd_support_read_backup(ndb->getMinDbNodeVersion()) == 0) { DBUG_RETURN(create.failed_illegal_create_option( "READ_BACKUP not supported by current data node versions")); } @@ -10838,55 +9287,48 @@ int ha_ndbcluster::create(const char *name, */ if (!(create_info->row_type == ROW_TYPE_DEFAULT || create_info->row_type == ROW_TYPE_FIXED || - create_info->row_type == ROW_TYPE_DYNAMIC)) - { + create_info->row_type == ROW_TYPE_DYNAMIC)) { /* Unsupported row format requested */ std::string err_message; err_message.append("ROW_FORMAT="); - switch (create_info->row_type) - { - case ROW_TYPE_COMPRESSED: - err_message.append("COMPRESSED"); - break; - case ROW_TYPE_REDUNDANT: - err_message.append("REDUNDANT"); - break; - case ROW_TYPE_COMPACT: - err_message.append("COMPACT"); - break; - case ROW_TYPE_PAGED: - err_message.append("PAGED"); - break; - default: - err_message.append(""); - DBUG_ASSERT(false); - break; + switch (create_info->row_type) { + case ROW_TYPE_COMPRESSED: + err_message.append("COMPRESSED"); + break; + case ROW_TYPE_REDUNDANT: + err_message.append("REDUNDANT"); + break; + case ROW_TYPE_COMPACT: + err_message.append("COMPACT"); + break; + case ROW_TYPE_PAGED: + err_message.append("PAGED"); + break; + default: + err_message.append(""); + DBUG_ASSERT(false); + break; } DBUG_RETURN(create.failed_illegal_create_option(err_message.c_str())); } /* Verify we can support fully replicated table property if set */ - if ((mod_fully_replicated->m_found || - opt_ndb_fully_replicated) && - ndbd_support_fully_replicated( - ndb->getMinDbNodeVersion()) == 0) - { + if ((mod_fully_replicated->m_found || opt_ndb_fully_replicated) && + ndbd_support_fully_replicated(ndb->getMinDbNodeVersion()) == 0) { DBUG_RETURN(create.failed_illegal_create_option( "FULLY_REPLICATED not supported by current data node versions")); } // Read mysql.ndb_replication settings for this table, if any uint32 binlog_flags; - const st_conflict_fn_def* conflict_fn= NULL; + const st_conflict_fn_def *conflict_fn = NULL; st_conflict_fn_arg args[MAX_CONFLICT_ARGS]; uint num_args = MAX_CONFLICT_ARGS; Ndb_binlog_client binlog_client(thd, m_dbname, m_tabname); - if (binlog_client.read_replication_info(ndb, m_dbname, - m_tabname, ::server_id, - &binlog_flags, &conflict_fn, - args, &num_args)) - { + if (binlog_client.read_replication_info(ndb, m_dbname, m_tabname, ::server_id, + &binlog_flags, &conflict_fn, args, + &num_args)) { DBUG_RETURN(HA_WRONG_CREATE_OPTION); } @@ -10894,42 +9336,35 @@ int ha_ndbcluster::create(const char *name, ndb->setDatabaseName(m_dbname); // Use mysql.ndb_replication settings when creating table - if (conflict_fn != NULL) - { - switch(conflict_fn->type) - { - case CFT_NDB_EPOCH: - case CFT_NDB_EPOCH_TRANS: - case CFT_NDB_EPOCH2: - case CFT_NDB_EPOCH2_TRANS: - { - /* Default 6 extra Gci bits allows 2^6 == 64 - * epochs / saveGCP, a comfortable default - */ - Uint32 numExtraGciBits = 6; - Uint32 numExtraAuthorBits = 1; + if (conflict_fn != NULL) { + switch (conflict_fn->type) { + case CFT_NDB_EPOCH: + case CFT_NDB_EPOCH_TRANS: + case CFT_NDB_EPOCH2: + case CFT_NDB_EPOCH2_TRANS: { + /* Default 6 extra Gci bits allows 2^6 == 64 + * epochs / saveGCP, a comfortable default + */ + Uint32 numExtraGciBits = 6; + Uint32 numExtraAuthorBits = 1; - if ((num_args == 1) && - (args[0].type == CFAT_EXTRA_GCI_BITS)) - { - numExtraGciBits = args[0].extraGciBits; - } - DBUG_PRINT("info", ("Setting ExtraRowGciBits to %u, " - "ExtraAuthorBits to %u", - numExtraGciBits, - numExtraAuthorBits)); + if ((num_args == 1) && (args[0].type == CFAT_EXTRA_GCI_BITS)) { + numExtraGciBits = args[0].extraGciBits; + } + DBUG_PRINT("info", ("Setting ExtraRowGciBits to %u, " + "ExtraAuthorBits to %u", + numExtraGciBits, numExtraAuthorBits)); - tab.setExtraRowGciBits(numExtraGciBits); - tab.setExtraRowAuthorBits(numExtraAuthorBits); - } - default: - break; + tab.setExtraRowGciBits(numExtraGciBits); + tab.setExtraRowAuthorBits(numExtraAuthorBits); + } + default: + break; } } Ndb_schema_trans_guard schema_trans(thd_ndb, dict); - if (!schema_trans.begin_trans()) - { + if (!schema_trans.begin_trans()) { DBUG_RETURN(create.failed_warning_already_pushed()); } @@ -10939,8 +9374,7 @@ int ha_ndbcluster::create(const char *name, NdbDictionary::Dictionary *const m_dict; const char *const m_name; bool m_have_invalidated{false}; - void invalidate() - { + void invalidate() { assert(!m_have_invalidated); const NdbDictionary::Table *ndbtab = m_dict->getTableGlobal(m_name); if (ndbtab) { @@ -10954,15 +9388,13 @@ int ha_ndbcluster::create(const char *name, Ndb_table_invalidator_guard(NdbDictionary::Dictionary *dict, const char *tabname) : m_dict(dict), m_name(tabname) {} - Ndb_table_invalidator_guard(const Ndb_table_invalidator_guard&) = delete; - ~Ndb_table_invalidator_guard() - { + Ndb_table_invalidator_guard(const Ndb_table_invalidator_guard &) = delete; + ~Ndb_table_invalidator_guard() { if (!m_have_invalidated) { invalidate(); } } - void invalidate_after_sucessfully_created_table() - { + void invalidate_after_sucessfully_created_table() { // NOTE! This function invalidates the table after table has // been created sucessfully in NDB. The reason why it need to be // invalidated is unknown and no test curently fails if this @@ -10971,29 +9403,22 @@ int ha_ndbcluster::create(const char *name, } } table_invalidator(dict, m_tabname); - if (tab.setName(m_tabname)) - { - DBUG_RETURN( - create.failed_oom("Failed to set table name")); + if (tab.setName(m_tabname)) { + DBUG_RETURN(create.failed_oom("Failed to set table name")); } - if (THDVAR(thd, table_temporary)) - { + if (THDVAR(thd, table_temporary)) { #ifdef DOES_NOT_WORK_CURRENTLY tab.setTemporary(true); #endif DBUG_PRINT("info", ("table_temporary set")); tab.setLogging(false); - } - else if (THDVAR(thd, table_no_logging)) - { + } else if (THDVAR(thd, table_no_logging)) { DBUG_PRINT("info", ("table_no_logging set")); tab.setLogging(false); } - if (mod_nologging->m_found) - { - DBUG_PRINT("info", ("tab.setLogging(%u)", - (!mod_nologging->m_val_bool))); + if (mod_nologging->m_found) { + DBUG_PRINT("info", ("tab.setLogging(%u)", (!mod_nologging->m_val_bool))); tab.setLogging(!mod_nologging->m_val_bool); } @@ -11001,33 +9426,23 @@ int ha_ndbcluster::create(const char *name, bool use_fully_replicated; bool use_read_backup; - if (mod_fully_replicated->m_found) - { + if (mod_fully_replicated->m_found) { use_fully_replicated = mod_fully_replicated->m_val_bool; - } - else - { + } else { use_fully_replicated = opt_ndb_fully_replicated; } - if (mod_read_backup->m_found) - { + if (mod_read_backup->m_found) { use_read_backup = mod_read_backup->m_val_bool; - } - else if (use_fully_replicated) - { + } else if (use_fully_replicated) { use_read_backup = true; - } - else - { + } else { use_read_backup = opt_ndb_read_backup; } - if (use_fully_replicated) - { + if (use_fully_replicated) { /* Fully replicated table */ - if (mod_read_backup->m_found && !mod_read_backup->m_val_bool) - { + if (mod_read_backup->m_found && !mod_read_backup->m_val_bool) { /** * Cannot mix FULLY_REPLICATED=1 and READ_BACKUP=0 since * FULLY_REPLICATED=1 implies READ_BACKUP=1. @@ -11037,16 +9452,13 @@ int ha_ndbcluster::create(const char *name, } tab.setReadBackupFlag(true); tab.setFullyReplicated(true); - } - else if (use_read_backup) - { + } else if (use_read_backup) { tab.setReadBackupFlag(true); } } tab.setRowChecksum(opt_ndb_row_checksum); - if (thd_sql_command(thd) != SQLCOM_ALTER_TABLE) - { + if (thd_sql_command(thd) != SQLCOM_ALTER_TABLE) { update_comment_info(thd, create_info, &tab); } @@ -11057,16 +9469,14 @@ int ha_ndbcluster::create(const char *name, */ dd::sdi_t sdi; - if (!ndb_sdi_serialize(thd, table_def, m_dbname, sdi)) - { + if (!ndb_sdi_serialize(thd, table_def, m_dbname, sdi)) { DBUG_RETURN(create.failed_internal_error( "Failed to serialize dictionary information")); } const int result = tab.setExtraMetadata(2, // version 2 for sdi sdi.c_str(), (Uint32)sdi.length()); - if (result != 0) - { + if (result != 0) { DBUG_RETURN(create.failed_internal_error("Failed to set extra metadata")); } } @@ -11080,15 +9490,12 @@ int ha_ndbcluster::create(const char *name, will save datamemory in NDB at the cost of not being able to add columns inplace. Any other value enables "varpart reference". */ - if (create_info->row_type == ROW_TYPE_FIXED) - { + if (create_info->row_type == ROW_TYPE_FIXED) { // CREATE TABLE .. ROW_FORMAT=FIXED DBUG_PRINT("info", ("Turning off 'varpart reference'")); tab.setForceVarPart(false); DBUG_ASSERT(ndb_dd_table_is_using_fixed_row_format(table_def)); - } - else - { + } else { tab.setForceVarPart(true); DBUG_ASSERT(!ndb_dd_table_is_using_fixed_row_format(table_def)); } @@ -11099,8 +9506,7 @@ int ha_ndbcluster::create(const char *name, Controls wheter the NDB table have corresponding tablespace. It's possible for a table to have tablespace although no columns are on disk. */ - if (create_info->tablespace) - { + if (create_info->tablespace) { // Turn on use_disk if create_info says that table has got a tablespace DBUG_PRINT("info", ("Using 'disk' since create_info says table " "have tablespace")); @@ -11113,45 +9519,36 @@ int ha_ndbcluster::create(const char *name, my_bitmap_map *old_map; { restore_record(form, s->default_values); - old_map= tmp_use_all_columns(form, form->read_set); + old_map = tmp_use_all_columns(form, form->read_set); } - for (i= 0; i < form->s->fields; i++) - { - Field *field= form->field[i]; + for (i = 0; i < form->s->fields; i++) { + Field *field = form->field[i]; DBUG_PRINT("info", ("name: %s, type: %u, pack_length: %d, stored: %d", field->field_name, field->real_type(), field->pack_length(), field->stored_in_db)); - if(field->stored_in_db) - { + if (field->stored_in_db) { const int create_column_result = create_ndb_column(thd, col, field, create_info); - if (create_column_result) - { + if (create_column_result) { DBUG_RETURN(create_column_result); } // Turn on use_disk if the column is configured to be on disk - if (col.getStorageType() == NDBCOL::StorageTypeDisk) - { + if (col.getStorageType() == NDBCOL::StorageTypeDisk) { use_disk = true; } - if (tab.addColumn(col)) - { + if (tab.addColumn(col)) { DBUG_RETURN(create.failed_oom("Failed to add column")); } - if (col.getPrimaryKey()) - pk_length += (field->pack_length() + 3) / 4; + if (col.getPrimaryKey()) pk_length += (field->pack_length() + 3) / 4; } } tmp_restore_column_map(form->read_set, old_map); - if (use_disk) - { - if (mod_nologging->m_found && - mod_nologging->m_val_bool) - { + if (use_disk) { + if (mod_nologging->m_found && mod_nologging->m_val_bool) { // Setting NOLOGGING=1 on a disk table isn't permitted. DBUG_RETURN(create.failed_illegal_create_option( "NOLOGGING=1 on table with fields using STORAGE DISK")); @@ -11159,12 +9556,9 @@ int ha_ndbcluster::create(const char *name, tab.setLogging(true); tab.setTemporary(false); - if (create_info->tablespace) - { + if (create_info->tablespace) { tab.setTablespaceName(create_info->tablespace); - } - else - { + } else { // It's not possible to create a table which uses disk without // also specifying a tablespace name DBUG_RETURN(create.failed_missing_create_option( @@ -11173,8 +9567,7 @@ int ha_ndbcluster::create(const char *name, } // Save the table level storage media setting - switch(create_info->storage_media) - { + switch (create_info->storage_media) { case HA_SM_DISK: tab.setStorageType(NdbDictionary::Column::StorageTypeDisk); break; @@ -11186,20 +9579,16 @@ int ha_ndbcluster::create(const char *name, break; } - DBUG_PRINT("info", ("Table %s is %s stored with tablespace %s", - m_tabname, + DBUG_PRINT("info", ("Table %s is %s stored with tablespace %s", m_tabname, (use_disk) ? "disk" : "memory", (use_disk) ? tab.getTablespaceName() : "N/A")); - KEY* key_info; - for (i= 0, key_info= form->key_info; i < form->s->keys; i++, key_info++) - { - KEY_PART_INFO *key_part= key_info->key_part; - KEY_PART_INFO *end= key_part + key_info->user_defined_key_parts; - for (; key_part != end; key_part++) - { - if (key_part->field->field_storage_type() == HA_SM_DISK) - { + KEY *key_info; + for (i = 0, key_info = form->key_info; i < form->s->keys; i++, key_info++) { + KEY_PART_INFO *key_part = key_info->key_part; + KEY_PART_INFO *end = key_part + key_info->user_defined_key_parts; + for (; key_part != end; key_part++) { + if (key_part->field->field_storage_type() == HA_SM_DISK) { thd_ndb->push_warning(ER_ILLEGAL_HA_CREATE_OPTION, "Cannot create index on DISK column '%s'. Alter " "it in a way to use STORAGE MEMORY.", @@ -11207,17 +9596,15 @@ int ha_ndbcluster::create(const char *name, DBUG_RETURN( create.failed_illegal_create_option("index on DISK column")); } - table_map.getColumn(tab, key_part->fieldnr-1)->setStorageType( - NdbDictionary::Column::StorageTypeMemory); + table_map.getColumn(tab, key_part->fieldnr - 1) + ->setStorageType(NdbDictionary::Column::StorageTypeMemory); } } - // No primary key, create shadow key as 64 bit, auto increment - if (form->s->primary_key == MAX_KEY) - { + // No primary key, create shadow key as 64 bit, auto increment + if (form->s->primary_key == MAX_KEY) { DBUG_PRINT("info", ("Generating shadow key")); - if (col.setName("$PK")) - { + if (col.setName("$PK")) { DBUG_RETURN(create.failed_oom("Failed to set name for shadow key")); } col.setType(NdbDictionary::Column::Bigunsigned); @@ -11226,18 +9613,15 @@ int ha_ndbcluster::create(const char *name, col.setPrimaryKey(true); col.setAutoIncrement(true); col.setDefaultValue(NULL, 0); - if (tab.addColumn(col)) - { + if (tab.addColumn(col)) { DBUG_RETURN(create.failed_oom("Failed to add column for shadow key")); } pk_length += 2; } - + // Make sure that blob tables don't have too big part size - for (i= 0; i < form->s->fields; i++) - { - if(! form->field[i]->stored_in_db) - continue; + for (i = 0; i < form->s->fields; i++) { + if (!form->field[i]->stored_in_db) continue; /** * The extra +7 concists @@ -11246,30 +9630,27 @@ int ha_ndbcluster::create(const char *name, */ switch (form->field[i]->real_type()) { - case MYSQL_TYPE_GEOMETRY: - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_LONG_BLOB: - case MYSQL_TYPE_JSON: - { - NdbDictionary::Column * column= table_map.getColumn(tab, i); - unsigned size= pk_length + (column->getPartSize()+3)/4 + 7; - unsigned ndb_max= MAX_BLOB_ROW_SIZE; - - if (size > ndb_max && - (pk_length+7) < ndb_max) - { - size= ndb_max - pk_length - 7; - column->setPartSize(4*size); + case MYSQL_TYPE_GEOMETRY: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_JSON: { + NdbDictionary::Column *column = table_map.getColumn(tab, i); + unsigned size = pk_length + (column->getPartSize() + 3) / 4 + 7; + unsigned ndb_max = MAX_BLOB_ROW_SIZE; + + if (size > ndb_max && (pk_length + 7) < ndb_max) { + size = ndb_max - pk_length - 7; + column->setPartSize(4 * size); + } + /** + * If size > NDB_MAX and pk_length+7 >= NDB_MAX + * then the table can't be created anyway, so skip + * changing part size, and have error later + */ } - /** - * If size > NDB_MAX and pk_length+7 >= NDB_MAX - * then the table can't be created anyway, so skip - * changing part size, and have error later - */ - } - default: - break; + default: + break; } } @@ -11279,10 +9660,9 @@ int ha_ndbcluster::create(const char *name, DBUG_ASSERT(create_info->min_rows == table_share->min_rows); { - ha_rows max_rows= create_info->max_rows; - ha_rows min_rows= create_info->min_rows; - if (max_rows < min_rows) - max_rows= min_rows; + ha_rows max_rows = create_info->max_rows; + ha_rows min_rows = create_info->min_rows; + if (max_rows < min_rows) max_rows = min_rows; if (max_rows != (ha_rows)0) /* default setting, don't set fragmentation */ { tab.setMaxRows(max_rows); @@ -11293,18 +9673,15 @@ int ha_ndbcluster::create(const char *name, // Check partition info { const int setup_partinfo_result = - create_table_set_up_partition_info(form->part_info, - tab, table_map); - if (setup_partinfo_result) - { + create_table_set_up_partition_info(form->part_info, tab, table_map); + if (setup_partinfo_result) { DBUG_RETURN(setup_partinfo_result); } } if (tab.getFullyReplicated() && (tab.getFragmentType() != NDBTAB::HashMapPartition || - !tab.getDefaultNoPartitionsFlag())) - { + !tab.getDefaultNoPartitionsFlag())) { /** * Fully replicated are only supported on hash map partitions * with standard partition balances, no user defined partitioning @@ -11314,21 +9691,18 @@ int ha_ndbcluster::create(const char *name, * (Wrong fragment count for fully replicated table) */ } - if (tab.getFragmentType() == NDBTAB::HashMapPartition && + if (tab.getFragmentType() == NDBTAB::HashMapPartition && tab.getDefaultNoPartitionsFlag() && - !mod_frags->m_found && // Let PARTITION_BALANCE override max_rows - !tab.getFullyReplicated() && //Ignore max_rows for fully replicated - (create_info->max_rows != 0 || create_info->min_rows != 0)) - { - ulonglong rows= create_info->max_rows >= create_info->min_rows ? - create_info->max_rows : - create_info->min_rows; - uint no_fragments= get_no_fragments(rows); - uint reported_frags= no_fragments; - if (adjusted_frag_count(ndb, no_fragments, reported_frags)) - { - push_warning(current_thd, - Sql_condition::SL_WARNING, ER_UNKNOWN_ERROR, + !mod_frags->m_found && // Let PARTITION_BALANCE override max_rows + !tab.getFullyReplicated() && // Ignore max_rows for fully replicated + (create_info->max_rows != 0 || create_info->min_rows != 0)) { + ulonglong rows = create_info->max_rows >= create_info->min_rows + ? create_info->max_rows + : create_info->min_rows; + uint no_fragments = get_no_fragments(rows); + uint reported_frags = no_fragments; + if (adjusted_frag_count(ndb, no_fragments, reported_frags)) { + push_warning(current_thd, Sql_condition::SL_WARNING, ER_UNKNOWN_ERROR, "Ndb might have problems storing the max amount " "of rows specified"); } @@ -11339,91 +9713,74 @@ int ha_ndbcluster::create(const char *name, } // Check for HashMap - if (tab.getFragmentType() == NDBTAB::HashMapPartition && - tab.getDefaultNoPartitionsFlag()) - { + if (tab.getFragmentType() == NDBTAB::HashMapPartition && + tab.getDefaultNoPartitionsFlag()) { /** * Default partitioning */ tab.setFragmentCount(0); tab.setFragmentData(0, 0); tab.setPartitionBalance(part_bal); - } - else if (tab.getFragmentType() == NDBTAB::HashMapPartition) - { + } else if (tab.getFragmentType() == NDBTAB::HashMapPartition) { NdbDictionary::HashMap hm; - if (dict->getDefaultHashMap(hm, tab.getFragmentCount()) == -1) - { - if (dict->initDefaultHashMap(hm, tab.getFragmentCount()) == -1) - { + if (dict->getDefaultHashMap(hm, tab.getFragmentCount()) == -1) { + if (dict->initDefaultHashMap(hm, tab.getFragmentCount()) == -1) { DBUG_RETURN(create.failed_in_NDB(dict->getNdbError())); } - if (dict->createHashMap(hm) == -1) - { + if (dict->createHashMap(hm) == -1) { DBUG_RETURN(create.failed_in_NDB(dict->getNdbError())); } } } // Create the table in NDB - if (dict->createTable(tab) != 0) - { + if (dict->createTable(tab) != 0) { DBUG_RETURN(create.failed_in_NDB(dict->getNdbError())); } - DBUG_PRINT("info", ("Table '%s/%s' created in NDB, id: %d, version: %d", - m_dbname, m_tabname, - tab.getObjectId(), - tab.getObjectVersion())); + DBUG_PRINT("info", + ("Table '%s/%s' created in NDB, id: %d, version: %d", m_dbname, + m_tabname, tab.getObjectId(), tab.getObjectVersion())); // Update table definition with the table id and version of the newly // created table, the caller will then save this information in the DD - ndb_dd_table_set_object_id_and_version(table_def, - tab.getObjectId(), + ndb_dd_table_set_object_id_and_version(table_def, tab.getObjectId(), tab.getObjectVersion()); // Create secondary indexes - if (create_indexes(thd, form, &tab) != 0) - { + if (create_indexes(thd, form, &tab) != 0) { DBUG_RETURN(create.failed_warning_already_pushed()); } - if (thd_sql_command(thd) != SQLCOM_TRUNCATE) - { + if (thd_sql_command(thd) != SQLCOM_TRUNCATE) { const int create_fks_result = create_fks(thd, ndb); - if (create_fks_result != 0) - { + if (create_fks_result != 0) { DBUG_RETURN(create_fks_result); } } if (thd->lex->sql_command == SQLCOM_ALTER_TABLE || thd->lex->sql_command == SQLCOM_DROP_INDEX || - thd->lex->sql_command == SQLCOM_CREATE_INDEX) - { + thd->lex->sql_command == SQLCOM_CREATE_INDEX) { // Copy foreign keys from the old NDB table (which still exists) const int copy_fk_result = copy_fk_for_offline_alter(thd, ndb, m_tabname); - if (copy_fk_result != 0) - { + if (copy_fk_result != 0) { DBUG_RETURN(copy_fk_result); } } - if (!fk_list_for_truncate.is_empty()) - { + if (!fk_list_for_truncate.is_empty()) { // create foreign keys from the list extracted from old table const int recreate_fk_result = recreate_fk_for_truncate(thd, ndb, m_tabname, fk_list_for_truncate); - if (recreate_fk_result != 0) - { + if (recreate_fk_result != 0) { DBUG_RETURN(recreate_fk_result); } } // All schema objects created, commit NDB schema transaction - if (!schema_trans.commit_trans()) - { + if (!schema_trans.commit_trans()) { DBUG_RETURN(create.failed_warning_already_pushed()); } @@ -11437,8 +9794,7 @@ int ha_ndbcluster::create(const char *name, // Invalidate the sucessfully created table in NdbApi global dict cache table_invalidator.invalidate_after_sucessfully_created_table(); - if (DBUG_EVALUATE_IF("ndb_create_open_fail", true, false)) - { + if (DBUG_EVALUATE_IF("ndb_create_open_fail", true, false)) { // The table has been sucessfully created in NDB, emulate // failure to open the table by dropping the table from NDB Ndb_table_guard ndbtab_g(dict, m_tabname); @@ -11448,9 +9804,8 @@ int ha_ndbcluster::create(const char *name, } Ndb_table_guard ndbtab_g(dict, m_tabname); - const NdbDictionary::Table* ndbtab = ndbtab_g.get_table(); - if (ndbtab == nullptr) - { + const NdbDictionary::Table *ndbtab = ndbtab_g.get_table(); + if (ndbtab == nullptr) { // Failed to open the newly created table from NDB, since the // table is apparently not in NDB it cant be dropped. // However an NDB error must have occurred since the table can't @@ -11461,13 +9816,10 @@ int ha_ndbcluster::create(const char *name, // Check if the DD table object has the correct number of partitions. // Correct the number of partitions in the DD table object in case of // a mismatch - const bool check_partition_count_result = - ndb_dd_table_check_partition_count(table_def, - ndbtab->getPartitionCount()); - if (!check_partition_count_result) - { - ndb_dd_table_fix_partition_count(table_def, - ndbtab->getPartitionCount()); + const bool check_partition_count_result = ndb_dd_table_check_partition_count( + table_def, ndbtab->getPartitionCount()); + if (!check_partition_count_result) { + ndb_dd_table_fix_partition_count(table_def, ndbtab->getPartitionCount()); } // Check that NDB and DD metadata matches @@ -11480,8 +9832,7 @@ int ha_ndbcluster::create(const char *name, mysql_mutex_unlock(&ndbcluster_mutex); - if (!share) - { + if (!share) { // Failed to create the NDB_SHARE instance for this table, most likely OOM. DBUG_RETURN(create.failed_oom("Failed to acquire NDB_SHARE")); } @@ -11490,10 +9841,9 @@ int ha_ndbcluster::create(const char *name, // This will release the share automatically when it goes out of scope. Ndb_share_temp_ref ndb_share_guard(share, "create"); - if (ndb_name_is_temp(m_tabname)) - { + if (ndb_name_is_temp(m_tabname)) { // Temporary named table created OK - DBUG_RETURN(create.succeeded()); // All OK + DBUG_RETURN(create.succeeded()); // All OK } // Apply the mysql.ndb_replication settings @@ -11503,20 +9853,16 @@ int ha_ndbcluster::create(const char *name, DBUG_RETURN(create.failed_warning_already_pushed()); } - if (binlog_client.table_should_have_event(share, ndbtab)) - { - if (binlog_client.create_event(ndb, ndbtab, share)) - { + if (binlog_client.table_should_have_event(share, ndbtab)) { + if (binlog_client.create_event(ndb, ndbtab, share)) { // Failed to create event for this table - DBUG_RETURN(create.failed_internal_error( "Failed to create event")); + DBUG_RETURN(create.failed_internal_error("Failed to create event")); } - if (binlog_client.table_should_have_event_op(share)) - { - Ndb_event_data* event_data; + if (binlog_client.table_should_have_event_op(share)) { + Ndb_event_data *event_data; if (!binlog_client.create_event_data(share, table_def, &event_data) || - binlog_client.create_event_op(share, ndbtab, event_data)) - { + binlog_client.create_event_op(share, ndbtab, event_data)) { // Failed to create event operation for this table DBUG_RETURN( create.failed_internal_error("Failed to create event operation")); @@ -11525,14 +9871,11 @@ int ha_ndbcluster::create(const char *name, } bool schema_dist_result; - if (thd_sql_command(thd) == SQLCOM_TRUNCATE) - { + if (thd_sql_command(thd) == SQLCOM_TRUNCATE) { schema_dist_result = schema_dist_client.truncate_table( share->db, share->table_name, ndbtab->getObjectId(), ndbtab->getObjectVersion()); - } - else - { + } else { DBUG_ASSERT(thd_sql_command(thd) == SQLCOM_CREATE_TABLE); int id = ndbtab->getObjectId(); int version = ndbtab->getObjectVersion(); @@ -11544,77 +9887,71 @@ int ha_ndbcluster::create(const char *name, ddl_ctx->mark_last_stmt_as_distributed(); } } - if (!schema_dist_result) - { + if (!schema_dist_result) { // Failed to distribute the create/truncate of this table to the // other MySQL Servers, fail the CREATE/TRUNCATE DBUG_RETURN(create.failed_internal_error("Failed to distribute table")); } - DBUG_RETURN(create.succeeded()); // All OK + DBUG_RETURN(create.succeeded()); // All OK } int ha_ndbcluster::create_index(THD *thd, const char *name, KEY *key_info, NDB_INDEX_TYPE idx_type, - const NdbDictionary::Table *ndbtab) const -{ - int error= 0; + const NdbDictionary::Table *ndbtab) const { + int error = 0; char unique_name[FN_LEN + 1]; - static const char* unique_suffix= "$unique"; + static const char *unique_suffix = "$unique"; DBUG_ENTER("ha_ndbcluster::create_index"); DBUG_PRINT("enter", ("name: %s", name)); - if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX) - { + if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX) { strxnmov(unique_name, FN_LEN, name, unique_suffix, NullS); DBUG_PRINT("info", ("unique_name: '%s'", unique_name)); } - - switch (idx_type){ - case PRIMARY_KEY_INDEX: - // Do nothing, already created - break; - case PRIMARY_KEY_ORDERED_INDEX: - error= create_index_in_NDB(thd, name, key_info, ndbtab, false); - break; - case UNIQUE_ORDERED_INDEX: - if (!(error= create_index_in_NDB(thd, name, key_info, ndbtab, false))) + + switch (idx_type) { + case PRIMARY_KEY_INDEX: + // Do nothing, already created + break; + case PRIMARY_KEY_ORDERED_INDEX: + error = create_index_in_NDB(thd, name, key_info, ndbtab, false); + break; + case UNIQUE_ORDERED_INDEX: + if (!(error = create_index_in_NDB(thd, name, key_info, ndbtab, false))) + error = create_index_in_NDB(thd, unique_name, key_info, ndbtab, + true /* unique */); + break; + case UNIQUE_INDEX: + if (check_index_fields_not_null(key_info)) { + push_warning_printf( + thd, Sql_condition::SL_WARNING, ER_NULL_COLUMN_IN_INDEX, + "Ndb does not support unique index on NULL valued attributes, " + "index access with NULL value will become full table scan"); + } error = create_index_in_NDB(thd, unique_name, key_info, ndbtab, true /* unique */); - break; - case UNIQUE_INDEX: - if (check_index_fields_not_null(key_info)) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_NULL_COLUMN_IN_INDEX, - "Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan"); - } - error = create_index_in_NDB(thd, unique_name, key_info, ndbtab, - true /* unique */); - break; - case ORDERED_INDEX: - if (key_info->algorithm == HA_KEY_ALG_HASH) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_ILLEGAL_HA_CREATE_OPTION, - ER_THD(thd, ER_ILLEGAL_HA_CREATE_OPTION), - ndbcluster_hton_name, - "Ndb does not support non-unique " - "hash based indexes"); - error= HA_ERR_UNSUPPORTED; break; - } - error= create_index_in_NDB(thd, name, key_info, ndbtab, false); - break; - default: - DBUG_ASSERT(false); - break; + case ORDERED_INDEX: + if (key_info->algorithm == HA_KEY_ALG_HASH) { + push_warning_printf( + thd, Sql_condition::SL_WARNING, ER_ILLEGAL_HA_CREATE_OPTION, + ER_THD(thd, ER_ILLEGAL_HA_CREATE_OPTION), ndbcluster_hton_name, + "Ndb does not support non-unique " + "hash based indexes"); + error = HA_ERR_UNSUPPORTED; + break; + } + error = create_index_in_NDB(thd, name, key_info, ndbtab, false); + break; + default: + DBUG_ASSERT(false); + break; } - + DBUG_RETURN(error); } - /** @brief Create an index in NDB. */ @@ -11622,11 +9959,11 @@ int ha_ndbcluster::create_index_in_NDB(THD *thd, const char *name, KEY *key_info, const NdbDictionary::Table *ndbtab, bool unique) const { - Ndb *ndb= get_ndb(thd); - NdbDictionary::Dictionary *dict= ndb->getDictionary(); - KEY_PART_INFO *key_part= key_info->key_part; - KEY_PART_INFO *end= key_part + key_info->user_defined_key_parts; - + Ndb *ndb = get_ndb(thd); + NdbDictionary::Dictionary *dict = ndb->getDictionary(); + KEY_PART_INFO *key_part = key_info->key_part; + KEY_PART_INFO *end = key_part + key_info->user_defined_key_parts; + DBUG_ENTER("ha_ndbcluster::create_index_in_NDB"); DBUG_PRINT("enter", ("name: %s, unique: %d ", name, unique)); @@ -11637,57 +9974,47 @@ int ha_ndbcluster::create_index_in_NDB(THD *thd, const char *name, NdbDictionary::Index ndb_index(index_name); if (unique) ndb_index.setType(NdbDictionary::Index::UniqueHashIndex); - else - { + else { ndb_index.setType(NdbDictionary::Index::OrderedIndex); ndb_index.setLogging(false); } - if (!ndbtab->getLogging()) - { + if (!ndbtab->getLogging()) { ndb_index.setLogging(false); } - if (ndbtab->getTemporary()) - { + if (ndbtab->getTemporary()) { ndb_index.setTemporary(true); } - if (ndb_index.setTable(m_tabname)) - { + if (ndb_index.setTable(m_tabname)) { // Can only fail due to memory -> return HA_ERR_OUT_OF_MEM DBUG_RETURN(HA_ERR_OUT_OF_MEM); } - for (; key_part != end; key_part++) - { - Field *field= key_part->field; - if (field->field_storage_type() == HA_SM_DISK) - { - my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, - "Cannot create index on DISK column '%s'. Alter it " - "in a way to use STORAGE MEMORY.", - MYF(0), - field->field_name); + for (; key_part != end; key_part++) { + Field *field = key_part->field; + if (field->field_storage_type() == HA_SM_DISK) { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "Cannot create index on DISK column '%s'. Alter it " + "in a way to use STORAGE MEMORY.", + MYF(0), field->field_name); DBUG_RETURN(HA_ERR_UNSUPPORTED); } DBUG_PRINT("info", ("attr: %s", field->field_name)); - if (ndb_index.addColumnName(field->field_name)) - { + if (ndb_index.addColumnName(field->field_name)) { // Can only fail due to memory -> return HA_ERR_OUT_OF_MEM DBUG_RETURN(HA_ERR_OUT_OF_MEM); } } - - if (dict->createIndex(ndb_index, *ndbtab)) - ERR_RETURN(dict->getNdbError()); + + if (dict->createIndex(ndb_index, *ndbtab)) ERR_RETURN(dict->getNdbError()); // Success DBUG_PRINT("info", ("Created index %s", name)); - DBUG_RETURN(0); + DBUG_RETURN(0); } - /** Truncate a table in NDB, after this command there should be no rows left in the table and the autoincrement @@ -11704,8 +10031,7 @@ int ha_ndbcluster::create_index_in_NDB(THD *thd, const char *name, @retval 0 on success */ -int ha_ndbcluster::truncate(dd::Table *table_def) -{ +int ha_ndbcluster::truncate(dd::Table *table_def) { DBUG_ENTER("ha_ndbcluster::truncate"); /* Table should have been opened */ @@ -11721,100 +10047,86 @@ int ha_ndbcluster::truncate(dd::Table *table_def) // Call ha_ndbcluster::create which will detect that this is a // truncate and thus drop the table before creating it again. const int truncate_error = - create(table->s->normalized_path.str, table, - &create_info, - table_def); + create(table->s->normalized_path.str, table, &create_info, table_def); // Open the table again even if the truncate failed, the caller // expect the table to be open. Report any error during open. - const int open_error = - open(table->s->normalized_path.str, 0, 0, table_def); + const int open_error = open(table->s->normalized_path.str, 0, 0, table_def); - if (truncate_error) - DBUG_RETURN(truncate_error); + if (truncate_error) DBUG_RETURN(truncate_error); DBUG_RETURN(open_error); } - - -int ha_ndbcluster::prepare_inplace__add_index(THD *thd, - KEY *key_info, - uint num_of_keys) const -{ - int error= 0; +int ha_ndbcluster::prepare_inplace__add_index(THD *thd, KEY *key_info, + uint num_of_keys) const { + int error = 0; DBUG_ENTER("ha_ndbcluster::prepare_inplace__add_index"); - for (uint idx= 0; idx < num_of_keys; idx++) - { - KEY *key= key_info + idx; - KEY_PART_INFO *key_part= key->key_part; - KEY_PART_INFO *end= key_part + key->user_defined_key_parts; + for (uint idx = 0; idx < num_of_keys; idx++) { + KEY *key = key_info + idx; + KEY_PART_INFO *key_part = key->key_part; + KEY_PART_INFO *end = key_part + key->user_defined_key_parts; // Add fields to key_part struct for (; key_part != end; key_part++) - key_part->field= table->field[key_part->fieldnr]; + key_part->field = table->field[key_part->fieldnr]; // Check index type // Create index in ndb const NDB_INDEX_TYPE idx_type = get_index_type_from_key(idx, key_info, false); - if ((error = create_index(thd, key_info[idx].name, key, idx_type, m_table))) - { + if ((error = + create_index(thd, key_info[idx].name, key, idx_type, m_table))) { break; } } - DBUG_RETURN(error); + DBUG_RETURN(error); } - /* Mark the index at m_index[key_num] as to be dropped * key_num - position of index in m_index */ -void ha_ndbcluster::prepare_inplace__drop_index(uint key_num) -{ +void ha_ndbcluster::prepare_inplace__drop_index(uint key_num) { DBUG_ENTER("ha_ndbcluster::prepare_inplace__drop_index"); // Mark indexes for deletion DBUG_PRINT("info", ("marking index as dropped: %u", key_num)); - m_index[key_num].status= NDB_INDEX_DATA::TO_BE_DROPPED; + m_index[key_num].status = NDB_INDEX_DATA::TO_BE_DROPPED; // Prepare delete of index stat entry if (m_index[key_num].type == PRIMARY_KEY_ORDERED_INDEX || m_index[key_num].type == UNIQUE_ORDERED_INDEX || - m_index[key_num].type == ORDERED_INDEX) - { - const NdbDictionary::Index *index= m_index[key_num].index; - if (index) // safety + m_index[key_num].type == ORDERED_INDEX) { + const NdbDictionary::Index *index = m_index[key_num].index; + if (index) // safety { - int index_id= index->getObjectId(); - int index_version= index->getObjectVersion(); + int index_id = index->getObjectId(); + int index_version = index->getObjectVersion(); ndb_index_stat_free(m_share, index_id, index_version); } } DBUG_VOID_RETURN; } - + /* Really drop all indexes marked for deletion */ -int ha_ndbcluster::inplace__final_drop_index(TABLE *table_arg) -{ +int ha_ndbcluster::inplace__final_drop_index(TABLE *table_arg) { int error; DBUG_ENTER("ha_ndbcluster::inplace__final_drop_index"); // Really drop indexes - THD *thd= current_thd; - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; - error= inplace__drop_indexes(ndb, table_arg); + THD *thd = current_thd; + Thd_ndb *thd_ndb = get_thd_ndb(thd); + Ndb *ndb = thd_ndb->ndb; + error = inplace__drop_indexes(ndb, table_arg); DBUG_RETURN(error); } - -extern void ndb_fk_util_resolve_mock_tables(THD* thd, - NdbDictionary::Dictionary* dict, - const char* new_parent_db, - const char* new_parent_name); +extern void ndb_fk_util_resolve_mock_tables(THD *thd, + NdbDictionary::Dictionary *dict, + const char *new_parent_db, + const char *new_parent_name); int rename_table_impl(THD *thd, Ndb *ndb, Ndb_schema_dist_client *schema_dist_client, @@ -11831,8 +10143,7 @@ int rename_table_impl(THD *thd, Ndb *ndb, DBUG_PRINT("info", ("real_rename_name: '%s'", real_rename_name)); // Verify default values of real_rename related parameters DBUG_ASSERT(real_rename || - (real_rename_db == NULL && - real_rename_name == NULL)); + (real_rename_db == NULL && real_rename_name == NULL)); DBUG_PRINT("info", ("drop_events: %d", drop_events)); DBUG_PRINT("info", ("create_events: %d", create_events)); @@ -11860,10 +10171,9 @@ int rename_table_impl(THD *thd, Ndb *ndb, } }); - NDBDICT* dict = ndb->getDictionary(); + NDBDICT *dict = ndb->getDictionary(); NDBDICT::List index_list; - if (my_strcasecmp(system_charset_info, new_dbname, old_dbname)) - { + if (my_strcasecmp(system_charset_info, new_dbname, old_dbname)) { // When moving tables between databases the indexes need to be // recreated, save list of indexes before rename to allow // them to be recreated afterwards @@ -11871,17 +10181,15 @@ int rename_table_impl(THD *thd, Ndb *ndb, } // Change current database to that of target table - if (ndb->setDatabaseName(new_dbname)) - { + if (ndb->setDatabaseName(new_dbname)) { ERR_RETURN(ndb->getNdbError()); } - const int ndb_table_id= orig_tab->getObjectId(); - const int ndb_table_version= orig_tab->getObjectVersion(); + const int ndb_table_id = orig_tab->getObjectId(); + const int ndb_table_version = orig_tab->getObjectVersion(); Ndb_share_temp_ref share(from, "rename_table_impl"); - if (real_rename) - { + if (real_rename) { /* Prepare the rename on the participant, i.e make the participant save the final table name in the NDB_SHARE of the table to be renamed. @@ -11909,17 +10217,16 @@ int rename_table_impl(THD *thd, Ndb *ndb, real_rename_name); } } - NDB_SHARE_KEY* old_key = share->key; // Save current key - NDB_SHARE_KEY* new_key = NDB_SHARE::create_key(to); + NDB_SHARE_KEY *old_key = share->key; // Save current key + NDB_SHARE_KEY *new_key = NDB_SHARE::create_key(to); (void)NDB_SHARE::rename_share(share, new_key); - Thd_ndb *thd_ndb = get_thd_ndb(thd); Ndb_DDL_transaction_ctx *ddl_ctx = thd_ndb->get_ddl_transaction_ctx(false); const bool rollback_in_progress = (ddl_ctx != nullptr && ddl_ctx->rollback_in_progress()); std::string orig_sdi; - if (!rollback_in_progress){ + if (!rollback_in_progress) { // Backup the original sdi in case if we have to rollback Uint32 version; void *unpacked_data; @@ -11933,11 +10240,11 @@ int rename_table_impl(THD *thd, Ndb *ndb, MYF(0), get_result); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } - orig_sdi.assign(static_cast(unpacked_data), unpacked_len); + orig_sdi.assign(static_cast(unpacked_data), unpacked_len); free(unpacked_data); } - NdbDictionary::Table new_tab= *orig_tab; + NdbDictionary::Table new_tab = *orig_tab; new_tab.setName(new_tabname); { @@ -11957,10 +10264,9 @@ int rename_table_impl(THD *thd, Ndb *ndb, } const int set_result = - new_tab.setExtraMetadata(2, // version 2 for sdi + new_tab.setExtraMetadata(2, // version 2 for sdi sdi.c_str(), (Uint32)sdi.length()); - if (set_result != 0) - { + if (set_result != 0) { my_printf_error(ER_INTERNAL_ERROR, "Failed to set extra metadata during rename table, " "error: %d", @@ -11969,9 +10275,8 @@ int rename_table_impl(THD *thd, Ndb *ndb, } } - if (dict->alterTableGlobal(*orig_tab, new_tab) != 0) - { - const NdbError ndb_error= dict->getNdbError(); + if (dict->alterTableGlobal(*orig_tab, new_tab) != 0) { + const NdbError ndb_error = dict->getNdbError(); // Rename the share back to old_key (void)NDB_SHARE::rename_share(share, old_key); // Release the unused new_key @@ -11981,20 +10286,19 @@ int rename_table_impl(THD *thd, Ndb *ndb, // Release the unused old_key NDB_SHARE::free_key(old_key); - if (!rollback_in_progress){ + if (!rollback_in_progress) { // This is an actual rename and not a rollback of the rename // Fetch the new table version and write it to the table definition, // the caller will then save it into DD Ndb_table_guard ndbtab_g(dict, new_tabname); - const NDBTAB *ndbtab= ndbtab_g.get_table(); + const NDBTAB *ndbtab = ndbtab_g.get_table(); // The id should still be the same as before the rename DBUG_ASSERT(ndbtab->getObjectId() == ndb_table_id); // The version should have been changed by the rename DBUG_ASSERT(ndbtab->getObjectVersion() != ndb_table_version); - ndb_dd_table_set_object_id_and_version(to_table_def, - ndb_table_id, + ndb_dd_table_set_object_id_and_version(to_table_def, ndb_table_id, ndbtab->getObjectVersion()); // Log the rename in the Ndb_DDL_transaction_ctx object @@ -12004,51 +10308,45 @@ int rename_table_impl(THD *thd, Ndb *ndb, from, to, orig_sdi); } - ndb_fk_util_resolve_mock_tables(thd, ndb->getDictionary(), - new_dbname, new_tabname); + ndb_fk_util_resolve_mock_tables(thd, ndb->getDictionary(), new_dbname, + new_tabname); /* handle old table */ - if (drop_events) - { - Ndb_binlog_client::drop_events_for_table(thd, ndb, - old_dbname, old_tabname); + if (drop_events) { + Ndb_binlog_client::drop_events_for_table(thd, ndb, old_dbname, old_tabname); } Ndb_binlog_client binlog_client(thd, new_dbname, new_tabname); - if (create_events) - { + if (create_events) { Ndb_table_guard ndbtab_g2(dict, new_tabname); - const NDBTAB *ndbtab= ndbtab_g2.get_table(); + const NDBTAB *ndbtab = ndbtab_g2.get_table(); // NOTE! Should check error and fail the rename (void)binlog_client.read_and_apply_replication_info(ndb, share, ndbtab, ::server_id); - if (binlog_client.table_should_have_event(share, ndbtab)) - { - if (binlog_client.create_event(ndb, ndbtab, share)) - { + if (binlog_client.table_should_have_event(share, ndbtab)) { + if (binlog_client.create_event(ndb, ndbtab, share)) { // Failed to create event for this table, fail the rename // NOTE! Should cover whole function with schema transaction to cleanup my_printf_error(ER_INTERNAL_ERROR, - "Failed to to create event for table '%s'", - MYF(0), share->key_string()); + "Failed to to create event for table '%s'", MYF(0), + share->key_string()); DBUG_RETURN(ER_INTERNAL_ERROR); } - if (binlog_client.table_should_have_event_op(share)) - { + if (binlog_client.table_should_have_event_op(share)) { // NOTE! Simple renames performs the rename without recreating the event // operation, thus the check for share->have_event_operation() below. - Ndb_event_data* event_data; + Ndb_event_data *event_data; if (share->have_event_operation() == false && (!binlog_client.create_event_data(share, to_table_def, &event_data) || - binlog_client.create_event_op(share, ndbtab, event_data))) - { + binlog_client.create_event_op(share, ndbtab, event_data))) { // Failed to create event for this table, fail the rename - // NOTE! Should cover whole function with schema transaction to cleanup + // NOTE! Should cover whole function with schema transaction to + // cleanup my_printf_error(ER_INTERNAL_ERROR, "Failed to to create event operation for table '%s'", MYF(0), share->key_string()); @@ -12058,8 +10356,7 @@ int rename_table_impl(THD *thd, Ndb *ndb, } } - if (real_rename) - { + if (real_rename) { /* Commit of "real" rename table on participant i.e make the participant extract the original table name which it got in prepare. @@ -12108,22 +10405,20 @@ int rename_table_impl(THD *thd, Ndb *ndb, } } - for (unsigned i = 0; i < index_list.count; i++) - { - NDBDICT::List::Element& index_el = index_list.elements[i]; + for (unsigned i = 0; i < index_list.count; i++) { + NDBDICT::List::Element &index_el = index_list.elements[i]; // Recreate any indexes not stored in the system database - if (my_strcasecmp(system_charset_info, - index_el.database, NDB_SYSTEM_DATABASE)) - { + if (my_strcasecmp(system_charset_info, index_el.database, + NDB_SYSTEM_DATABASE)) { // Get old index ndb->setDatabaseName(old_dbname); - const NDBINDEX * index= dict->getIndexGlobal(index_el.name, new_tab); - DBUG_PRINT("info", ("Creating index %s/%s", - index_el.database, index->getName())); + const NDBINDEX *index = dict->getIndexGlobal(index_el.name, new_tab); + DBUG_PRINT("info", + ("Creating index %s/%s", index_el.database, index->getName())); // Create the same "old" index on new tab dict->createIndex(*index, new_tab); - DBUG_PRINT("info", ("Dropping index %s/%s", - index_el.database, index->getName())); + DBUG_PRINT("info", + ("Dropping index %s/%s", index_el.database, index->getName())); // Drop old index ndb->setDatabaseName(old_dbname); dict->dropIndexGlobal(*index); @@ -12132,43 +10427,34 @@ int rename_table_impl(THD *thd, Ndb *ndb, DBUG_RETURN(0); } - -static -bool -check_table_id_and_version(const dd::Table* table_def, - const NdbDictionary::Table* ndbtab) -{ +static bool check_table_id_and_version(const dd::Table *table_def, + const NdbDictionary::Table *ndbtab) { DBUG_ENTER("check_table_id_and_version"); int object_id, object_version; - if (!ndb_dd_table_get_object_id_and_version(table_def, - object_id, object_version)) - { + if (!ndb_dd_table_get_object_id_and_version(table_def, object_id, + object_version)) { DBUG_RETURN(false); } // Check that the id and version from DD // matches the id and version of the NDB table if (ndbtab->getObjectId() != object_id || - ndbtab->getObjectVersion() != object_version) - { + ndbtab->getObjectVersion() != object_version) { DBUG_RETURN(false); } DBUG_RETURN(true); - } - /** Rename a table in NDB and on the participating mysqld(s) */ int ha_ndbcluster::rename_table(const char *from, const char *to, - const dd::Table* from_table_def, - dd::Table* to_table_def) -{ - THD *thd= current_thd; + const dd::Table *from_table_def, + dd::Table *to_table_def) { + THD *thd = current_thd; char old_dbname[FN_HEADLEN]; char new_dbname[FN_HEADLEN]; char new_tabname[FN_HEADLEN]; @@ -12184,8 +10470,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to, DBUG_PRINT("info", ("old_tabname: '%s'", m_tabname)); DBUG_PRINT("info", ("new_tabname: '%s'", new_tabname)); - if (check_ndb_connection(thd)) - DBUG_RETURN(HA_ERR_NO_CONNECTION); + if (check_ndb_connection(thd)) DBUG_RETURN(HA_ERR_NO_CONNECTION); Ndb_schema_dist_client schema_dist_client(thd); @@ -12207,11 +10492,13 @@ int ha_ndbcluster::rename_table(const char *from, const char *to, prepare_tabname = m_tabname; break; - default: - ndb_log_error("INTERNAL ERROR: Unexpected sql command: %u " - "using rename_table", thd_sql_command(thd)); - abort(); - break; + default: + ndb_log_error( + "INTERNAL ERROR: Unexpected sql command: %u " + "using rename_table", + thd_sql_command(thd)); + abort(); + break; } if (!schema_dist_client.prepare_rename(prepare_dbname, prepare_tabname, @@ -12221,32 +10508,28 @@ int ha_ndbcluster::rename_table(const char *from, const char *to, } std::string invalid_identifier; - if (!schema_dist_client.check_identifier_limits(invalid_identifier)) - { + if (!schema_dist_client.check_identifier_limits(invalid_identifier)) { // Check of db or table name limits failed my_error(ER_TOO_LONG_IDENT, MYF(0), invalid_identifier.c_str()); DBUG_RETURN(HA_WRONG_CREATE_OPTION); } - Thd_ndb *thd_ndb= get_thd_ndb(thd); + Thd_ndb *thd_ndb = get_thd_ndb(thd); if (!thd_ndb->has_required_global_schema_lock("ha_ndbcluster::rename_table")) DBUG_RETURN(HA_ERR_NO_CONNECTION); // Open the table which is to be renamed(aka. the old) - Ndb *ndb= get_ndb(thd); + Ndb *ndb = get_ndb(thd); ndb->setDatabaseName(old_dbname); - NDBDICT *dict= ndb->getDictionary(); + NDBDICT *dict = ndb->getDictionary(); Ndb_table_guard ndbtab_g(dict, m_tabname); const NDBTAB *orig_tab; - if (!(orig_tab= ndbtab_g.get_table())) - ERR_RETURN(dict->getNdbError()); + if (!(orig_tab = ndbtab_g.get_table())) ERR_RETURN(dict->getNdbError()); DBUG_PRINT("info", ("NDB table name: '%s'", orig_tab->getName())); // Check that id and version of the table to be renamed // matches the id and version of the NDB table - if (!check_table_id_and_version(from_table_def, - orig_tab)) - { + if (!check_table_id_and_version(from_table_def, orig_tab)) { DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } @@ -12255,238 +10538,212 @@ int ha_ndbcluster::rename_table(const char *from, const char *to, const bool old_is_temp = ndb_name_is_temp(m_tabname); const bool new_is_temp = ndb_name_is_temp(new_tabname); - switch (thd_sql_command(thd)) - { - case SQLCOM_DROP_INDEX: - case SQLCOM_CREATE_INDEX: - DBUG_PRINT("info", ("CREATE or DROP INDEX as copying ALTER")); - // fallthrough - case SQLCOM_ALTER_TABLE: - DBUG_PRINT("info", ("SQLCOM_ALTER_TABLE")); - - if (!new_is_temp && !old_is_temp) - { - /* - This is a rename directly from real to real which occurs: - 1) when the ALTER is "simple" RENAME i.e only consists of RENAME - and/or enable/disable indexes - 2) as part of inplace ALTER .. RENAME - */ - DBUG_PRINT("info", ("simple rename detected")); - DBUG_RETURN(rename_table_impl(thd, ndb, - &schema_dist_client, - orig_tab, to_table_def, - from, to, - old_dbname, m_tabname, - new_dbname, new_tabname, - true, // real_rename - old_dbname, // real_rename_db - m_tabname, // real_rename_name - true, // drop_events - true, // create events - false)); // commit_alter - } - - // Make sure that inplace was not requested - DBUG_ASSERT(thd->lex->alter_info->requested_algorithm != - Alter_info::ALTER_TABLE_ALGORITHM_INPLACE); + switch (thd_sql_command(thd)) { + case SQLCOM_DROP_INDEX: + case SQLCOM_CREATE_INDEX: + DBUG_PRINT("info", ("CREATE or DROP INDEX as copying ALTER")); + // fallthrough + case SQLCOM_ALTER_TABLE: + DBUG_PRINT("info", ("SQLCOM_ALTER_TABLE")); - /* - This is a copying alter table which is implemented as - 1) Create destination table with temporary name - -> ha_ndbcluster::create_table('#sql_75636-87') - There are now the source table and one with temporary name: - [t1] + [#sql_75636-87] - 2) Copy data from source table to destination table. - 3) Backup the source table by renaming it to another temporary name. - -> ha_ndbcluster::rename_table('t1', '#sql_86545-98') - There are now two temporary named tables: - [#sql_86545-98] + [#sql_75636-87] - 4) Rename the destination table to it's real name. - -> ha_ndbcluster::rename_table('#sql_75636-87', 't1') - 5) Drop the source table + if (!new_is_temp && !old_is_temp) { + /* + This is a rename directly from real to real which occurs: + 1) when the ALTER is "simple" RENAME i.e only consists of RENAME + and/or enable/disable indexes + 2) as part of inplace ALTER .. RENAME + */ + DBUG_PRINT("info", ("simple rename detected")); + DBUG_RETURN(rename_table_impl(thd, ndb, &schema_dist_client, orig_tab, + to_table_def, from, to, old_dbname, + m_tabname, new_dbname, new_tabname, + true, // real_rename + old_dbname, // real_rename_db + m_tabname, // real_rename_name + true, // drop_events + true, // create events + false)); // commit_alter + } + // Make sure that inplace was not requested + DBUG_ASSERT(thd->lex->alter_info->requested_algorithm != + Alter_info::ALTER_TABLE_ALGORITHM_INPLACE); - */ + /* + This is a copying alter table which is implemented as + 1) Create destination table with temporary name + -> ha_ndbcluster::create_table('#sql_75636-87') + There are now the source table and one with temporary name: + [t1] + [#sql_75636-87] + 2) Copy data from source table to destination table. + 3) Backup the source table by renaming it to another temporary name. + -> ha_ndbcluster::rename_table('t1', '#sql_86545-98') + There are now two temporary named tables: + [#sql_86545-98] + [#sql_75636-87] + 4) Rename the destination table to it's real name. + -> ha_ndbcluster::rename_table('#sql_75636-87', 't1') + 5) Drop the source table - if (new_is_temp) - { - if(Ndb_dist_priv_util::is_distributed_priv_table(old_dbname, m_tabname)) - { - // Special case allowing the legacy distributed privilege tables - // to be migrated to local shadow tables. Do not drop the table from - // NdbDictionary or publish this change via schema distribution. - // Rename the share. - ndb_log_info("Migrating legacy privilege table: Rename %s to %s", - m_tabname, new_tabname); - Ndb_share_temp_ref share(from, "rename_table__for_local_shadow"); - // privilege tables never have an event - assert(!share->have_event_operation()); - NDB_SHARE_KEY* old_key = share->key; // Save current key - NDB_SHARE_KEY* new_key = NDB_SHARE::create_key(to); - (void)NDB_SHARE::rename_share(share, new_key); - NDB_SHARE::free_key(old_key); - DBUG_RETURN(0); - } - /* - This is an alter table which renames real name to temp name. - ie. step 3) per above and is the first of - two rename_table() calls. Drop events from the table. - */ - DBUG_PRINT("info", ("real -> temp")); - DBUG_RETURN(rename_table_impl(thd, ndb, - &schema_dist_client, - orig_tab, to_table_def, - from, to, - old_dbname, m_tabname, - new_dbname, new_tabname, - false, // real_rename - NULL, // real_rename_db - NULL, // real_rename_name - true, // drop_events - false, // create events - false)); // commit_alter - } - - if (old_is_temp) - { - /* - This is an alter table which renames temp name to real name. - ie. step 5) per above and is the second call to rename_table(). - Create new events and commit the alter so that participant are - made aware that the table changed and can reopen the table. */ - DBUG_PRINT("info", ("temp -> real")); - /* - Detect if this is the special case which occurs when - the table is both altered and renamed. - - Important here is to remeber to rename the table also - on all partiticipants so they will find the table when - the alter is completed. This is slightly problematic since - their table is renamed directly from real to real name, while - the mysqld who performs the alter renames from temp to real - name. Fortunately it's possible to lookup the original table - name via THD. - */ - const char* orig_name = thd->lex->select_lex->table_list.first->table_name; - const char* orig_db = thd->lex->select_lex->table_list.first->db; - if (thd->lex->alter_info->flags & Alter_info::ALTER_RENAME && - (my_strcasecmp(system_charset_info, orig_db, new_dbname) || - my_strcasecmp(system_charset_info, orig_name, new_tabname))) - { - DBUG_PRINT("info", ("ALTER with RENAME detected")); + if (new_is_temp) { + if (Ndb_dist_priv_util::is_distributed_priv_table(old_dbname, + m_tabname)) { + // Special case allowing the legacy distributed privilege tables + // to be migrated to local shadow tables. Do not drop the table from + // NdbDictionary or publish this change via schema distribution. + // Rename the share. + ndb_log_info("Migrating legacy privilege table: Rename %s to %s", + m_tabname, new_tabname); + Ndb_share_temp_ref share(from, "rename_table__for_local_shadow"); + // privilege tables never have an event + assert(!share->have_event_operation()); + NDB_SHARE_KEY *old_key = share->key; // Save current key + NDB_SHARE_KEY *new_key = NDB_SHARE::create_key(to); + (void)NDB_SHARE::rename_share(share, new_key); + NDB_SHARE::free_key(old_key); + DBUG_RETURN(0); + } + /* - Use the original table name when communicating with participant + This is an alter table which renames real name to temp name. + ie. step 3) per above and is the first of + two rename_table() calls. Drop events from the table. */ - const char* real_rename_db = orig_db; - const char* real_rename_name = orig_name; - - DBUG_RETURN(rename_table_impl(thd, ndb, - &schema_dist_client, - orig_tab, to_table_def, - from, to, - old_dbname, m_tabname, - new_dbname, new_tabname, - true, // real_rename - real_rename_db, - real_rename_name, - false, // drop_events - true, // create events - true)); // commit_alter + DBUG_PRINT("info", ("real -> temp")); + DBUG_RETURN(rename_table_impl(thd, ndb, &schema_dist_client, orig_tab, + to_table_def, from, to, old_dbname, + m_tabname, new_dbname, new_tabname, + false, // real_rename + NULL, // real_rename_db + NULL, // real_rename_name + true, // drop_events + false, // create events + false)); // commit_alter } - DBUG_RETURN(rename_table_impl(thd, ndb, - &schema_dist_client, - orig_tab, to_table_def, - from, to, - old_dbname, m_tabname, - new_dbname, new_tabname, - false, // real_rename - NULL, // real_rename_db - NULL, // real_rename_name - false, // drop_events - true, // create events - true)); // commit_alter - } - break; + if (old_is_temp) { + /* + This is an alter table which renames temp name to real name. + ie. step 5) per above and is the second call to rename_table(). + Create new events and commit the alter so that participant are + made aware that the table changed and can reopen the table. + */ + DBUG_PRINT("info", ("temp -> real")); - case SQLCOM_RENAME_TABLE: - DBUG_PRINT("info", ("SQLCOM_RENAME_TABLE")); - - DBUG_RETURN(rename_table_impl(thd, ndb, - &schema_dist_client, - orig_tab, to_table_def, - from, to, - old_dbname, m_tabname, - new_dbname, new_tabname, - true, // real_rename - old_dbname, // real_rename_db - m_tabname, // real_rename_name - true, // drop_events - true, // create events - false)); // commit_alter - break; + /* + Detect if this is the special case which occurs when + the table is both altered and renamed. + + Important here is to remeber to rename the table also + on all partiticipants so they will find the table when + the alter is completed. This is slightly problematic since + their table is renamed directly from real to real name, while + the mysqld who performs the alter renames from temp to real + name. Fortunately it's possible to lookup the original table + name via THD. + */ + const char *orig_name = + thd->lex->select_lex->table_list.first->table_name; + const char *orig_db = thd->lex->select_lex->table_list.first->db; + if (thd->lex->alter_info->flags & Alter_info::ALTER_RENAME && + (my_strcasecmp(system_charset_info, orig_db, new_dbname) || + my_strcasecmp(system_charset_info, orig_name, new_tabname))) { + DBUG_PRINT("info", ("ALTER with RENAME detected")); + /* + Use the original table name when communicating with participant + */ + const char *real_rename_db = orig_db; + const char *real_rename_name = orig_name; + + DBUG_RETURN(rename_table_impl(thd, ndb, &schema_dist_client, orig_tab, + to_table_def, from, to, old_dbname, + m_tabname, new_dbname, new_tabname, + true, // real_rename + real_rename_db, real_rename_name, + false, // drop_events + true, // create events + true)); // commit_alter + } - default: - ndb_log_error("Unexpected rename case detected, sql_command: %d", - thd_sql_command(thd)); - abort(); - break; + DBUG_RETURN(rename_table_impl(thd, ndb, &schema_dist_client, orig_tab, + to_table_def, from, to, old_dbname, + m_tabname, new_dbname, new_tabname, + false, // real_rename + NULL, // real_rename_db + NULL, // real_rename_name + false, // drop_events + true, // create events + true)); // commit_alter + } + break; + + case SQLCOM_RENAME_TABLE: + DBUG_PRINT("info", ("SQLCOM_RENAME_TABLE")); + + DBUG_RETURN(rename_table_impl(thd, ndb, &schema_dist_client, orig_tab, + to_table_def, from, to, old_dbname, + m_tabname, new_dbname, new_tabname, + true, // real_rename + old_dbname, // real_rename_db + m_tabname, // real_rename_name + true, // drop_events + true, // create events + false)); // commit_alter + break; + + default: + ndb_log_error("Unexpected rename case detected, sql_command: %d", + thd_sql_command(thd)); + abort(); + break; } // Never reached DBUG_RETURN(HA_ERR_UNSUPPORTED); } - - - - // Declare adapter functions for Dummy_table_util function -extern bool ndb_fk_util_build_list(THD*, NdbDictionary::Dictionary*, - const NdbDictionary::Table*, List&); -extern void ndb_fk_util_drop_list(THD*, Ndb* ndb, NdbDictionary::Dictionary*, List&); -extern bool ndb_fk_util_drop_table(THD*, Ndb* ndb, NdbDictionary::Dictionary*, - const NdbDictionary::Table*); -extern bool ndb_fk_util_is_mock_name(const char* table_name); +extern bool ndb_fk_util_build_list(THD *, NdbDictionary::Dictionary *, + const NdbDictionary::Table *, List &); +extern void ndb_fk_util_drop_list(THD *, Ndb *ndb, NdbDictionary::Dictionary *, + List &); +extern bool ndb_fk_util_drop_table(THD *, Ndb *ndb, NdbDictionary::Dictionary *, + const NdbDictionary::Table *); +extern bool ndb_fk_util_is_mock_name(const char *table_name); /** Delete table and its related objects from NDB. */ -static -bool -drop_table_and_related(THD* thd, Ndb* ndb, NdbDictionary::Dictionary* dict, - const NdbDictionary::Table* table, - int drop_flags, bool skip_related) -{ +static bool drop_table_and_related(THD *thd, Ndb *ndb, + NdbDictionary::Dictionary *dict, + const NdbDictionary::Table *table, + int drop_flags, bool skip_related) { DBUG_ENTER("drop_table_and_related"); - DBUG_PRINT("enter", ("cascade_constraints: %d dropdb: %d skip_related: %d", - static_cast(drop_flags & NDBDICT::DropTableCascadeConstraints), - static_cast(drop_flags & NDBDICT::DropTableCascadeConstraintsDropDB), - skip_related)); + DBUG_PRINT( + "enter", + ("cascade_constraints: %d dropdb: %d skip_related: %d", + static_cast(drop_flags & NDBDICT::DropTableCascadeConstraints), + static_cast(drop_flags & + NDBDICT::DropTableCascadeConstraintsDropDB), + skip_related)); /* Build list of objects which should be dropped after the table unless the caller ask to skip dropping related */ List drop_list; - if (!skip_related && - !ndb_fk_util_build_list(thd, dict, table, drop_list)) - { + if (!skip_related && !ndb_fk_util_build_list(thd, dict, table, drop_list)) { DBUG_RETURN(false); } // Drop the table - if (dict->dropTableGlobal(*table, drop_flags) != 0) - { - const NdbError& ndb_err = dict->getNdbError(); + if (dict->dropTableGlobal(*table, drop_flags) != 0) { + const NdbError &ndb_err = dict->getNdbError(); if (ndb_err.code == 21080 && - thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) - { + thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { /* Drop was not allowed because table is still referenced by foreign key(s). Since foreign_key_checks=0 the problem is @@ -12494,13 +10751,10 @@ drop_table_and_related(THD* thd, Ndb* ndb, NdbDictionary::Dictionary* dict, key(s) to point at the mock table and finally dropping the requested table. */ - if (!ndb_fk_util_drop_table(thd, ndb, dict, table)) - { + if (!ndb_fk_util_drop_table(thd, ndb, dict, table)) { DBUG_RETURN(false); } - } - else - { + } else { DBUG_RETURN(false); } } @@ -12511,19 +10765,12 @@ drop_table_and_related(THD* thd, Ndb* ndb, NdbDictionary::Dictionary* dict, DBUG_RETURN(true); } - -int -drop_table_impl(THD *thd, Ndb *ndb, - Ndb_schema_dist_client* schema_dist_client, - const char *path, - const char *db, - const char *table_name) -{ +int drop_table_impl(THD *thd, Ndb *ndb, + Ndb_schema_dist_client *schema_dist_client, + const char *path, const char *db, const char *table_name) { DBUG_ENTER("drop_table_impl"); - NDB_SHARE *share= - NDB_SHARE::acquire_reference_by_key(path, - "delete_table"); + NDB_SHARE *share = NDB_SHARE::acquire_reference_by_key(path, "delete_table"); bool skip_related = false; int drop_flags = 0; @@ -12531,24 +10778,21 @@ drop_table_impl(THD *thd, Ndb *ndb, if ((thd_sql_command(thd) == SQLCOM_ALTER_TABLE || thd_sql_command(thd) == SQLCOM_DROP_INDEX || thd_sql_command(thd) == SQLCOM_CREATE_INDEX) && - ndb_name_is_temp(table_name)) - { + ndb_name_is_temp(table_name)) { DBUG_PRINT("info", ("Using cascade constraints for ALTER of temp table")); drop_flags |= NDBDICT::DropTableCascadeConstraints; // Cascade constraint is used and related will be dropped anyway skip_related = true; } - if (thd_sql_command(thd) == SQLCOM_DROP_DB) - { + if (thd_sql_command(thd) == SQLCOM_DROP_DB) { DBUG_PRINT("info", ("Using cascade constraints DB for drop database")); drop_flags |= NDBDICT::DropTableCascadeConstraintsDropDB; } - if (thd_sql_command(thd) == SQLCOM_TRUNCATE) - { + if (thd_sql_command(thd) == SQLCOM_TRUNCATE) { DBUG_PRINT("info", ("Deleting table for TRUNCATE, skip dropping related")); - skip_related= true; + skip_related = true; } // Drop the table from NDB @@ -12557,19 +10801,16 @@ drop_table_impl(THD *thd, Ndb *ndb, int ndb_table_version = 0; uint retries = 100; ndb->setDatabaseName(db); - while (true) - { + while (true) { Ndb_table_guard ndbtab_g(dict, table_name); const NDBTAB *ndbtab = ndbtab_g.get_table(); - if (ndbtab == nullptr) - { + if (ndbtab == nullptr) { // Table not found break; } if (drop_table_and_related(thd, ndb, dict, ndbtab, drop_flags, - skip_related)) - { + skip_related)) { // Table successfully dropped from NDB ndb_table_id = ndbtab->getObjectId(); ndb_table_version = ndbtab->getObjectVersion(); @@ -12578,14 +10819,12 @@ drop_table_impl(THD *thd, Ndb *ndb, // An error has occurred. Examine the failure and retry if possible if (--retries && dict->getNdbError().status == NdbError::TemporaryError && - !thd_killed(thd)) - { + !thd_killed(thd)) { ndb_trans_retry_sleep(); continue; } - if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT) - { + if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT) { // Invalidate the object and retry ndbtab_g.invalidate(); continue; @@ -12599,12 +10838,10 @@ drop_table_impl(THD *thd, Ndb *ndb, const int dict_error_code = dict->getNdbError().code; // Check if an error has occurred. Note that if the table didn't exist in NDB // (denoted by error codes 709 or 723), it's considered a success - if (dict_error_code && dict_error_code != 709 && dict_error_code != 723) - { + if (dict_error_code && dict_error_code != 709 && dict_error_code != 723) { // The drop table failed, just release the share reference and return error thd_ndb->push_ndb_error_warning(dict->getNdbError()); - if (share) - { + if (share) { NDB_SHARE::release_reference(share, "delete_table"); } DBUG_RETURN(dict_error_code); @@ -12613,8 +10850,7 @@ drop_table_impl(THD *thd, Ndb *ndb, // Drop the event(s) for the table Ndb_binlog_client::drop_events_for_table(thd, ndb, db, table_name); - if (share) - { + if (share) { // Wait for binlog thread to detect the dropped table // and release it's event operations ndbcluster_binlog_wait_synch_drop_table(thd, share); @@ -12627,9 +10863,7 @@ drop_table_impl(THD *thd, Ndb *ndb, (ddl_ctx == nullptr || !ddl_ctx->rollback_in_progress()); if (!ndb_name_is_temp(table_name) && thd_sql_command(thd) != SQLCOM_TRUNCATE && - thd_sql_command(thd) != SQLCOM_DROP_DB && - schema_dist_client != nullptr) - { + thd_sql_command(thd) != SQLCOM_DROP_DB && schema_dist_client != nullptr) { if (!schema_dist_client->drop_table(db, table_name, ndb_table_id, ndb_table_version, log_on_participant)) { @@ -12639,8 +10873,7 @@ drop_table_impl(THD *thd, Ndb *ndb, } } - if (share) - { + if (share) { mysql_mutex_lock(&ndbcluster_mutex); NDB_SHARE::mark_share_dropped(&share); NDB_SHARE::release_reference_have_lock(share, "delete_table"); @@ -12650,30 +10883,25 @@ drop_table_impl(THD *thd, Ndb *ndb, DBUG_RETURN(0); } -static void clear_table_from_dictionary_cache(Ndb *ndb, - const char * db, - const char * table_name) -{ - const NdbDictionary::Dictionary * dict = ndb->getDictionary(); +static void clear_table_from_dictionary_cache(Ndb *ndb, const char *db, + const char *table_name) { + const NdbDictionary::Dictionary *dict = ndb->getDictionary(); ndb->setDatabaseName(db); - const NdbDictionary::Table * tab = dict->getTableGlobal(table_name); - if(tab) - { + const NdbDictionary::Table *tab = dict->getTableGlobal(table_name); + if (tab) { NdbDictionary::Dictionary::List index_list; dict->listIndexes(index_list, *tab); - for (unsigned i = 0; i < index_list.count; i++) - { - const NdbDictionary::Index * index= - dict->getIndexGlobal(index_list.elements[i].name, *tab); + for (unsigned i = 0; i < index_list.count; i++) { + const NdbDictionary::Index *index = + dict->getIndexGlobal(index_list.elements[i].name, *tab); dict->removeIndexGlobal(*index, 1 /*invalidate=true*/); } dict->removeTableGlobal(*tab, 1 /*invalidate=true*/); } } -int ha_ndbcluster::delete_table(const char *path, const dd::Table *) -{ - THD *thd= current_thd; +int ha_ndbcluster::delete_table(const char *path, const dd::Table *) { + THD *thd = current_thd; DBUG_ENTER("ha_ndbcluster::delete_table"); DBUG_PRINT("enter", ("path: %s", path)); @@ -12684,20 +10912,19 @@ int ha_ndbcluster::delete_table(const char *path, const dd::Table *) set_dbname(path); set_tabname(path); - if (check_ndb_connection(thd)) - { + if (check_ndb_connection(thd)) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (!thd_ndb->has_required_global_schema_lock("ha_ndbcluster::delete_table")) - { + Thd_ndb *thd_ndb = get_thd_ndb(thd); + if (!thd_ndb->has_required_global_schema_lock( + "ha_ndbcluster::delete_table")) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } if (ndb_name_is_temp(m_tabname)) { const char *orig_table_name = - thd->lex->select_lex->table_list.first->table_name; + thd->lex->select_lex->table_list.first->table_name; if (thd_sql_command(thd) == SQLCOM_ALTER_TABLE && Ndb_dist_priv_util::is_distributed_priv_table(m_dbname, orig_table_name)) { @@ -12739,132 +10966,121 @@ int ha_ndbcluster::delete_table(const char *path, const dd::Table *) } /* Drop table in NDB and on the other mysqld(s) */ - const int drop_result = drop_table_impl(thd, thd_ndb->ndb, - &schema_dist_client, - path, m_dbname, m_tabname); + const int drop_result = drop_table_impl( + thd, thd_ndb->ndb, &schema_dist_client, path, m_dbname, m_tabname); DBUG_RETURN(drop_result); } void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment, ulonglong, ulonglong *first_value, - ulonglong *nb_reserved_values) -{ + ulonglong *nb_reserved_values) { Uint64 auto_value; - THD *thd= current_thd; + THD *thd = current_thd; DBUG_ENTER("get_auto_increment"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); - Ndb *ndb= get_ndb(table->in_use); - uint retries= NDB_AUTO_INCREMENT_RETRIES; - for (;;) - { + Ndb *ndb = get_ndb(table->in_use); + uint retries = NDB_AUTO_INCREMENT_RETRIES; + for (;;) { NDB_SHARE::Tuple_id_range_guard g(m_share); if ((m_skip_auto_increment && ndb->readAutoIncrementValue(m_table, g.range, auto_value)) || - ndb->getAutoIncrementValue(m_table, g.range, auto_value, - Uint32(m_autoincrement_prefetch), - increment, offset)) - { + ndb->getAutoIncrementValue(m_table, g.range, auto_value, + Uint32(m_autoincrement_prefetch), increment, + offset)) { if (--retries && !thd_killed(thd) && - ndb->getNdbError().status == NdbError::TemporaryError) - { + ndb->getNdbError().status == NdbError::TemporaryError) { ndb_trans_retry_sleep(); continue; } - const NdbError err= ndb->getNdbError(); - ndb_log_error("Error %d in ::get_auto_increment(): %s", - err.code, err.message); - *first_value= ~(ulonglong) 0; + const NdbError err = ndb->getNdbError(); + ndb_log_error("Error %d in ::get_auto_increment(): %s", err.code, + err.message); + *first_value = ~(ulonglong)0; DBUG_VOID_RETURN; } break; } - *first_value= (longlong)auto_value; + *first_value = (longlong)auto_value; /* From the point of view of MySQL, NDB reserves one row at a time */ - *nb_reserved_values= 1; + *nb_reserved_values = 1; DBUG_VOID_RETURN; } - /** Constructor for the NDB Cluster table handler . */ -ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg): - handler(hton, table_arg), - m_table_map(NULL), - m_thd_ndb(NULL), - m_active_cursor(NULL), - m_table(NULL), - m_ndb_record(0), - m_ndb_hidden_key_record(0), - m_table_info(NULL), - m_share(0), - m_key_fields(NULL), - m_part_info(NULL), - m_user_defined_partitioning(false), - m_use_partition_pruning(false), - m_sorted(false), - m_use_write(false), - m_ignore_dup_key(false), - m_has_unique_index(false), - m_ignore_no_key(false), - m_read_before_write_removal_possible(false), - m_read_before_write_removal_used(false), - m_rows_updated(0), - m_rows_deleted(0), - m_rows_to_insert((ha_rows) 1), - m_rows_inserted((ha_rows) 0), - m_delete_cannot_batch(false), - m_update_cannot_batch(false), - m_skip_auto_increment(true), - m_blobs_pending(0), - m_is_bulk_delete(false), - m_blobs_row_total_size(0), - m_blobs_buffer(0), - m_blobs_buffer_size(0), - m_dupkey((uint) -1), - m_autoincrement_prefetch(DEFAULT_AUTO_PREFETCH), - m_pushed_join_member(NULL), - m_pushed_join_operation(-1), - m_disable_pushed_join(false), - m_active_query(NULL), - m_pushed_operation(NULL), - m_cond(), - m_multi_cursor(NULL) -{ +ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg) + : handler(hton, table_arg), + m_table_map(NULL), + m_thd_ndb(NULL), + m_active_cursor(NULL), + m_table(NULL), + m_ndb_record(0), + m_ndb_hidden_key_record(0), + m_table_info(NULL), + m_share(0), + m_key_fields(NULL), + m_part_info(NULL), + m_user_defined_partitioning(false), + m_use_partition_pruning(false), + m_sorted(false), + m_use_write(false), + m_ignore_dup_key(false), + m_has_unique_index(false), + m_ignore_no_key(false), + m_read_before_write_removal_possible(false), + m_read_before_write_removal_used(false), + m_rows_updated(0), + m_rows_deleted(0), + m_rows_to_insert((ha_rows)1), + m_rows_inserted((ha_rows)0), + m_delete_cannot_batch(false), + m_update_cannot_batch(false), + m_skip_auto_increment(true), + m_blobs_pending(0), + m_is_bulk_delete(false), + m_blobs_row_total_size(0), + m_blobs_buffer(0), + m_blobs_buffer_size(0), + m_dupkey((uint)-1), + m_autoincrement_prefetch(DEFAULT_AUTO_PREFETCH), + m_pushed_join_member(NULL), + m_pushed_join_operation(-1), + m_disable_pushed_join(false), + m_active_query(NULL), + m_pushed_operation(NULL), + m_cond(), + m_multi_cursor(NULL) { uint i; - + DBUG_ENTER("ha_ndbcluster"); - m_tabname[0]= '\0'; - m_dbname[0]= '\0'; + m_tabname[0] = '\0'; + m_dbname[0] = '\0'; - stats.records= ~(ha_rows)0; // uninitialized - stats.block_size= 1024; + stats.records = ~(ha_rows)0; // uninitialized + stats.block_size = 1024; - for (i= 0; i < MAX_KEY; i++) - ndb_init_index(m_index[i]); + for (i = 0; i < MAX_KEY; i++) ndb_init_index(m_index[i]); // make sure is initialized init_alloc_root(PSI_INSTRUMENT_ME, &m_fk_mem_root, fk_root_block_size, 0); - m_fk_data= NULL; + m_fk_data = NULL; DBUG_VOID_RETURN; } - /** Destructor for NDB Cluster table handler. */ -ha_ndbcluster::~ha_ndbcluster() -{ - THD *thd= current_thd; - Ndb *ndb= thd ? check_ndb_in_thd(thd) : g_ndb; +ha_ndbcluster::~ha_ndbcluster() { + THD *thd = current_thd; + Ndb *ndb = thd ? check_ndb_in_thd(thd) : g_ndb; DBUG_ENTER("~ha_ndbcluster"); - if (m_share) - { + if (m_share) { // NOTE! Release the m_share acquired in create(), this // violates the normal flow which acquires in open() and // releases in close(). Code path seems unused. @@ -12881,11 +11097,10 @@ ha_ndbcluster::~ha_ndbcluster() DBUG_PRINT("info", ("Deleting pushed joins")); DBUG_ASSERT(m_active_query == NULL); DBUG_ASSERT(m_active_cursor == NULL); - if (m_pushed_join_operation==PUSHED_ROOT) - { - delete m_pushed_join_member; // Also delete QueryDef + if (m_pushed_join_operation == PUSHED_ROOT) { + delete m_pushed_join_member; // Also delete QueryDef } - m_pushed_join_member= NULL; + m_pushed_join_member = NULL; /* m_fk_mem_root is already freed inside release_fk_data * called from inside release_metadata. But if get_metadata() @@ -12895,7 +11110,7 @@ ha_ndbcluster::~ha_ndbcluster() * m_fk_mem_root gets released in that case. */ free_root(&m_fk_mem_root, 0); - m_fk_data= NULL; + m_fk_data = NULL; DBUG_VOID_RETURN; } @@ -12912,8 +11127,8 @@ std::string ha_ndbcluster::explain_extra() const { str += std::string(", activating pushed join of ") + std::to_string(pushed_count) + " tables"; } else { - str += std::string(", child of ") + - parent_of_pushed_join()->alias + " in pushed join"; + str += std::string(", child of ") + parent_of_pushed_join()->alias + + " in pushed join"; } } @@ -12935,9 +11150,8 @@ std::string ha_ndbcluster::explain_extra() const { */ int ha_ndbcluster::open(const char *name, int, uint, - const dd::Table* table_def) -{ - THD *thd= current_thd; + const dd::Table *table_def) { + THD *thd = current_thd; int res; KEY *key; KEY_PART_INFO *key_part_info; @@ -12945,88 +11159,73 @@ int ha_ndbcluster::open(const char *name, int, uint, DBUG_ENTER("ha_ndbcluster::open"); DBUG_PRINT("enter", ("name: %s", name)); - if (table_share->primary_key != MAX_KEY) - { + if (table_share->primary_key != MAX_KEY) { /* Setup ref_length to make room for the whole primary key to be written in the ref variable */ - key= table->key_info+table_share->primary_key; - ref_length= key->key_length; - } - else - { - if (m_user_defined_partitioning) - { + key = table->key_info + table_share->primary_key; + ref_length = key->key_length; + } else { + if (m_user_defined_partitioning) { /* Add space for partid in ref */ - ref_length+= sizeof(m_part_id); + ref_length += sizeof(m_part_id); } } DBUG_PRINT("info", ("ref_length: %d", ref_length)); { - char* bitmap_array; - uint extra_hidden_keys= table_share->primary_key != MAX_KEY ? 0 : 1; - uint n_keys= table_share->keys + extra_hidden_keys; - uint ptr_size= sizeof(MY_BITMAP*) * (n_keys + 1 /* null termination */); - uint map_size= sizeof(MY_BITMAP) * n_keys; - m_key_fields= (MY_BITMAP**)my_malloc(PSI_INSTRUMENT_ME, - ptr_size + map_size, - MYF(MY_WME + MY_ZEROFILL)); - if (!m_key_fields) - { + char *bitmap_array; + uint extra_hidden_keys = table_share->primary_key != MAX_KEY ? 0 : 1; + uint n_keys = table_share->keys + extra_hidden_keys; + uint ptr_size = sizeof(MY_BITMAP *) * (n_keys + 1 /* null termination */); + uint map_size = sizeof(MY_BITMAP) * n_keys; + m_key_fields = (MY_BITMAP **)my_malloc( + PSI_INSTRUMENT_ME, ptr_size + map_size, MYF(MY_WME + MY_ZEROFILL)); + if (!m_key_fields) { local_close(thd, false); DBUG_RETURN(1); - } - bitmap_array= ((char*)m_key_fields) + ptr_size; - for (i= 0; i < n_keys; i++) - { - my_bitmap_map *bitbuf= NULL; - bool is_hidden_key= (i == table_share->keys); - m_key_fields[i]= (MY_BITMAP*)bitmap_array; - if (is_hidden_key || (i == table_share->primary_key)) - { - m_pk_bitmap_p= m_key_fields[i]; - bitbuf= m_pk_bitmap_buf; + } + bitmap_array = ((char *)m_key_fields) + ptr_size; + for (i = 0; i < n_keys; i++) { + my_bitmap_map *bitbuf = NULL; + bool is_hidden_key = (i == table_share->keys); + m_key_fields[i] = (MY_BITMAP *)bitmap_array; + if (is_hidden_key || (i == table_share->primary_key)) { + m_pk_bitmap_p = m_key_fields[i]; + bitbuf = m_pk_bitmap_buf; } - if (bitmap_init(m_key_fields[i], bitbuf, - table_share->fields, false)) - { - m_key_fields[i]= NULL; + if (bitmap_init(m_key_fields[i], bitbuf, table_share->fields, false)) { + m_key_fields[i] = NULL; local_close(thd, false); DBUG_RETURN(1); } - if (!is_hidden_key) - { - key= table->key_info + i; - key_part_info= key->key_part; - key_parts= key->user_defined_key_parts; - for (j= 0; j < key_parts; j++, key_part_info++) - bitmap_set_bit(m_key_fields[i], key_part_info->fieldnr-1); - } - else - { - uint field_no= table_share->fields; - ((uchar *)m_pk_bitmap_buf)[field_no>>3]|= (1 << (field_no & 7)); + if (!is_hidden_key) { + key = table->key_info + i; + key_part_info = key->key_part; + key_parts = key->user_defined_key_parts; + for (j = 0; j < key_parts; j++, key_part_info++) + bitmap_set_bit(m_key_fields[i], key_part_info->fieldnr - 1); + } else { + uint field_no = table_share->fields; + ((uchar *)m_pk_bitmap_buf)[field_no >> 3] |= (1 << (field_no & 7)); } - bitmap_array+= sizeof(MY_BITMAP); + bitmap_array += sizeof(MY_BITMAP); } - m_key_fields[i]= NULL; + m_key_fields[i] = NULL; } set_dbname(name); set_tabname(name); - if ((res= check_ndb_connection(thd)) != 0) - { + if ((res = check_ndb_connection(thd)) != 0) { local_close(thd, false); DBUG_RETURN(res); } // Acquire NDB_SHARE reference for handler m_share = NDB_SHARE::acquire_for_handler(name, this); - if (m_share == nullptr) - { + if (m_share == nullptr) { // NOTE! This never happens, the NDB_SHARE should already have been // created by schema distribution or auto discovery local_close(thd, false); @@ -13034,23 +11233,19 @@ int ha_ndbcluster::open(const char *name, int, uint, } // Init table lock structure - thr_lock_data_init(&m_share->lock,&m_lock,(void*) 0); + thr_lock_data_init(&m_share->lock, &m_lock, (void *)0); - if ((res= get_metadata(thd, table_def))) - { + if ((res = get_metadata(thd, table_def))) { local_close(thd, false); DBUG_RETURN(res); } - if ((res= update_stats(thd, 1)) || - (res= info(HA_STATUS_CONST))) - { + if ((res = update_stats(thd, 1)) || (res = info(HA_STATUS_CONST))) { local_close(thd, true); DBUG_RETURN(res); } - if (ndb_binlog_is_read_only()) - { - table->db_stat|= HA_READ_ONLY; + if (ndb_binlog_is_read_only()) { + table->db_stat |= HA_READ_ONLY; ndb_log_info("table '%s' opened read only", name); } DBUG_RETURN(0); @@ -13061,94 +11256,70 @@ int ha_ndbcluster::open(const char *name, int, uint, * reclaims unused space of deleted rows * and updates index statistics */ -int ha_ndbcluster::optimize(THD* thd, HA_CHECK_OPT*) -{ - ulong error, stats_error= 0; - const uint delay= (uint)THDVAR(thd, optimization_delay); +int ha_ndbcluster::optimize(THD *thd, HA_CHECK_OPT *) { + ulong error, stats_error = 0; + const uint delay = (uint)THDVAR(thd, optimization_delay); - error= ndb_optimize_table(thd, delay); - stats_error= update_stats(thd, 1); + error = ndb_optimize_table(thd, delay); + stats_error = update_stats(thd, 1); return (error) ? error : stats_error; } -int ha_ndbcluster::ndb_optimize_table(THD* thd, uint delay) const -{ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; - NDBDICT *dict= ndb->getDictionary(); - int result=0, error= 0; +int ha_ndbcluster::ndb_optimize_table(THD *thd, uint delay) const { + Thd_ndb *thd_ndb = get_thd_ndb(thd); + Ndb *ndb = thd_ndb->ndb; + NDBDICT *dict = ndb->getDictionary(); + int result = 0, error = 0; uint i; NdbDictionary::OptimizeTableHandle th; NdbDictionary::OptimizeIndexHandle ih; DBUG_ENTER("ndb_optimize_table"); - if ((error= dict->optimizeTable(*m_table, th))) - { - DBUG_PRINT("info", - ("Optimze table %s returned %d", m_tabname, error)); + if ((error = dict->optimizeTable(*m_table, th))) { + DBUG_PRINT("info", ("Optimze table %s returned %d", m_tabname, error)); ERR_RETURN(ndb->getNdbError()); } - while((result= th.next()) == 1) - { - if (thd->killed) - DBUG_RETURN(-1); + while ((result = th.next()) == 1) { + if (thd->killed) DBUG_RETURN(-1); ndb_milli_sleep(delay); } - if (result == -1 || th.close() == -1) - { - DBUG_PRINT("info", - ("Optimize table %s did not complete", m_tabname)); + if (result == -1 || th.close() == -1) { + DBUG_PRINT("info", ("Optimize table %s did not complete", m_tabname)); ERR_RETURN(ndb->getNdbError()); }; - for (i= 0; i < MAX_KEY; i++) - { - if (thd->killed) - DBUG_RETURN(-1); - if (m_index[i].status == NDB_INDEX_DATA::ACTIVE) - { - const NdbDictionary::Index *index= m_index[i].index; - const NdbDictionary::Index *unique_index= m_index[i].unique_index; - - if (index) - { - if ((error= dict->optimizeIndex(*index, ih))) - { + for (i = 0; i < MAX_KEY; i++) { + if (thd->killed) DBUG_RETURN(-1); + if (m_index[i].status == NDB_INDEX_DATA::ACTIVE) { + const NdbDictionary::Index *index = m_index[i].index; + const NdbDictionary::Index *unique_index = m_index[i].unique_index; + + if (index) { + if ((error = dict->optimizeIndex(*index, ih))) { DBUG_PRINT("info", - ("Optimze index %s returned %d", - index->getName(), error)); + ("Optimze index %s returned %d", index->getName(), error)); ERR_RETURN(ndb->getNdbError()); - } - while((result= ih.next()) == 1) - { - if (thd->killed) - DBUG_RETURN(-1); + while ((result = ih.next()) == 1) { + if (thd->killed) DBUG_RETURN(-1); ndb_milli_sleep(delay); } - if (result == -1 || ih.close() == -1) - { + if (result == -1 || ih.close() == -1) { DBUG_PRINT("info", ("Optimize index %s did not complete", index->getName())); ERR_RETURN(ndb->getNdbError()); - } + } } - if (unique_index) - { - if ((error= dict->optimizeIndex(*unique_index, ih))) - { - DBUG_PRINT("info", - ("Optimze unique index %s returned %d", - unique_index->getName(), error)); + if (unique_index) { + if ((error = dict->optimizeIndex(*unique_index, ih))) { + DBUG_PRINT("info", ("Optimze unique index %s returned %d", + unique_index->getName(), error)); ERR_RETURN(ndb->getNdbError()); - } - while((result= ih.next()) == 1) - { - if (thd->killed) - DBUG_RETURN(-1); + } + while ((result = ih.next()) == 1) { + if (thd->killed) DBUG_RETURN(-1); ndb_milli_sleep(delay); } - if (result == -1 || ih.close() == -1) - { + if (result == -1 || ih.close() == -1) { DBUG_PRINT("info", ("Optimize index %s did not complete", index->getName())); ERR_RETURN(ndb->getNdbError()); @@ -13159,23 +11330,20 @@ int ha_ndbcluster::ndb_optimize_table(THD* thd, uint delay) const DBUG_RETURN(0); } -int ha_ndbcluster::analyze(THD* thd, HA_CHECK_OPT*) -{ +int ha_ndbcluster::analyze(THD *thd, HA_CHECK_OPT *) { DBUG_ENTER("ha_ndbcluster::analyze"); // update table partition statistics int error = update_stats(thd, 1); // analyze index if index stat is enabled - if (error == 0 && - THDVAR(NULL, index_stat_enable) && THDVAR(thd, index_stat_enable)) - { + if (error == 0 && THDVAR(NULL, index_stat_enable) && + THDVAR(thd, index_stat_enable)) { error = analyze_index(); } // handle any errors - if (error != 0) - { + if (error != 0) { // Push the ndb error into stack before returning NdbError ndberr = (get_ndb(thd))->getNdbError(error); my_error(ER_GET_ERRMSG, MYF(0), error, ndberr.message, "NDB"); @@ -13184,32 +11352,24 @@ int ha_ndbcluster::analyze(THD* thd, HA_CHECK_OPT*) DBUG_RETURN(0); } -int -ha_ndbcluster::analyze_index() -{ +int ha_ndbcluster::analyze_index() { DBUG_ENTER("ha_ndbcluster::analyze_index"); uint inx_list[MAX_INDEXES]; - uint inx_count= 0; + uint inx_count = 0; - for (uint inx= 0; inx < table_share->keys; inx++) - { - NDB_INDEX_TYPE idx_type= get_index_type(inx); + for (uint inx = 0; inx < table_share->keys; inx++) { + NDB_INDEX_TYPE idx_type = get_index_type(inx); if ((idx_type == PRIMARY_KEY_ORDERED_INDEX || - idx_type == UNIQUE_ORDERED_INDEX || - idx_type == ORDERED_INDEX)) - { - if (inx_count < MAX_INDEXES) - inx_list[inx_count++]= inx; + idx_type == UNIQUE_ORDERED_INDEX || idx_type == ORDERED_INDEX)) { + if (inx_count < MAX_INDEXES) inx_list[inx_count++] = inx; } } - if (inx_count != 0) - { - int err= ndb_index_stat_analyze(inx_list, inx_count); - if (err != 0) - DBUG_RETURN(err); + if (inx_count != 0) { + int err = ndb_index_stat_analyze(inx_list, inx_count); + if (err != 0) DBUG_RETURN(err); } DBUG_RETURN(0); } @@ -13228,29 +11388,25 @@ ha_ndbcluster::analyze_index() Set up partition info when handler object created */ -void ha_ndbcluster::set_part_info(partition_info *part_info, bool early) -{ +void ha_ndbcluster::set_part_info(partition_info *part_info, bool early) { DBUG_ENTER("ha_ndbcluster::set_part_info"); - m_part_info= part_info; - if (!early) - { - m_use_partition_pruning= false; + m_part_info = part_info; + if (!early) { + m_use_partition_pruning = false; if (!(m_part_info->part_type == partition_type::HASH && m_part_info->list_of_part_fields && - !m_part_info->is_sub_partitioned())) - { + !m_part_info->is_sub_partitioned())) { /* PARTITION BY HASH, RANGE and LIST plus all subpartitioning variants all use MySQL defined partitioning. PARTITION BY KEY uses NDB native partitioning scheme. */ - m_use_partition_pruning= true; - m_user_defined_partitioning= true; + m_use_partition_pruning = true; + m_user_defined_partitioning = true; } if (m_part_info->part_type == partition_type::HASH && m_part_info->list_of_part_fields && - m_part_info->num_full_part_fields == 0) - { + m_part_info->num_full_part_fields == 0) { /* CREATE TABLE t (....) ENGINE NDB PARTITON BY KEY(); where no primary key is defined uses a hidden key as partition field @@ -13261,10 +11417,10 @@ void ha_ndbcluster::set_part_info(partition_info *part_info, bool early) scans given that the hidden key is unknown. In write_row, update_row, and delete_row the normal hidden key handling will fix things. */ - m_use_partition_pruning= false; + m_use_partition_pruning = false; } - DBUG_PRINT("info", ("m_use_partition_pruning = %d", - m_use_partition_pruning)); + DBUG_PRINT("info", + ("m_use_partition_pruning = %d", m_use_partition_pruning)); } DBUG_VOID_RETURN; } @@ -13274,29 +11430,23 @@ void ha_ndbcluster::set_part_info(partition_info *part_info, bool early) - release resources setup by open() */ -void ha_ndbcluster::local_close(THD *thd, bool release_metadata_flag) -{ +void ha_ndbcluster::local_close(THD *thd, bool release_metadata_flag) { Ndb *ndb; DBUG_ENTER("ha_ndbcluster::local_close"); - if (m_key_fields) - { + if (m_key_fields) { MY_BITMAP **inx_bitmap; - for (inx_bitmap= m_key_fields; - (inx_bitmap != NULL) && ((*inx_bitmap) != NULL); - inx_bitmap++) - if ((*inx_bitmap)->bitmap != m_pk_bitmap_buf) - bitmap_free(*inx_bitmap); + for (inx_bitmap = m_key_fields; + (inx_bitmap != NULL) && ((*inx_bitmap) != NULL); inx_bitmap++) + if ((*inx_bitmap)->bitmap != m_pk_bitmap_buf) bitmap_free(*inx_bitmap); my_free(m_key_fields); - m_key_fields= NULL; + m_key_fields = NULL; } - if (m_share) - { + if (m_share) { NDB_SHARE::release_for_handler(m_share, this); m_share = nullptr; } - if (release_metadata_flag) - { - ndb= thd ? check_ndb_in_thd(thd) : g_ndb; + if (release_metadata_flag) { + ndb = thd ? check_ndb_in_thd(thd) : g_ndb; release_metadata(thd, ndb); } @@ -13307,43 +11457,34 @@ void ha_ndbcluster::local_close(THD *thd, bool release_metadata_flag) DBUG_VOID_RETURN; } -int ha_ndbcluster::close(void) -{ +int ha_ndbcluster::close(void) { DBUG_ENTER("close"); - THD *thd= table->in_use; + THD *thd = table->in_use; local_close(thd, true); DBUG_RETURN(0); } - -int ha_ndbcluster::check_ndb_connection(THD* thd) const -{ +int ha_ndbcluster::check_ndb_connection(THD *thd) const { Ndb *ndb; DBUG_ENTER("check_ndb_connection"); - - if (!(ndb= check_ndb_in_thd(thd, true))) - DBUG_RETURN(HA_ERR_NO_CONNECTION); - if (ndb->setDatabaseName(m_dbname)) - { + + if (!(ndb = check_ndb_in_thd(thd, true))) DBUG_RETURN(HA_ERR_NO_CONNECTION); + if (ndb->setDatabaseName(m_dbname)) { ERR_RETURN(ndb->getNdbError()); } DBUG_RETURN(0); } - -static int ndbcluster_close_connection(handlerton*, THD *thd) -{ - Thd_ndb *thd_ndb= get_thd_ndb(thd); +static int ndbcluster_close_connection(handlerton *, THD *thd) { + Thd_ndb *thd_ndb = get_thd_ndb(thd); DBUG_ENTER("ndbcluster_close_connection"); - if (thd_ndb) - { + if (thd_ndb) { Thd_ndb::release(thd_ndb); thd_set_thd_ndb(thd, NULL); } DBUG_RETURN(0); } - /** Try to discover one table from NDB. Return the "serialized table definition". @@ -13366,21 +11507,18 @@ static int ndbcluster_close_connection(handlerton*, THD *thd) ER_NO_SUCH_TABLE error */ -static -int ndbcluster_discover(handlerton*, THD* thd, - const char *db, const char *name, - uchar **frmblob, - size_t *frmlen) -{ +static int ndbcluster_discover(handlerton *, THD *thd, const char *db, + const char *name, uchar **frmblob, + size_t *frmlen) { DBUG_ENTER("ndbcluster_discover"); DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); - Ndb* ndb = check_ndb_in_thd(thd); - if (ndb == nullptr) - { + Ndb *ndb = check_ndb_in_thd(thd); + if (ndb == nullptr) { push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, "Failed to discover table '%s' from NDB, could not " - "connect to storage engine", name); + "connect to storage engine", + name); DBUG_RETURN(1); } Thd_ndb *thd_ndb = get_thd_ndb(thd); @@ -13393,31 +11531,28 @@ int ndbcluster_discover(handlerton*, THD* thd, // Temporary workaround for Bug 27543602 if (strcmp("mysql", db) == 0 && (strcmp("ndb_index_stat_head", name) == 0 || - strcmp("ndb_index_stat_sample", name) == 0)) - { - thd_ndb->push_warning("The table '%s' exists but cannot be installed into " - "DD. The table can still be accessed using NDB tools", - name); + strcmp("ndb_index_stat_sample", name) == 0)) { + thd_ndb->push_warning( + "The table '%s' exists but cannot be installed into " + "DD. The table can still be accessed using NDB tools", + name); DBUG_RETURN(1); } #endif - if (ndb->setDatabaseName(db)) - { + if (ndb->setDatabaseName(db)) { thd_ndb->push_ndb_error_warning(ndb->getNdbError()); thd_ndb->push_warning("Failed to discover table '%s' from NDB", name); DBUG_RETURN(1); } - NDBDICT* dict= ndb->getDictionary(); + NDBDICT *dict = ndb->getDictionary(); Ndb_table_guard ndbtab_g(dict, name); - const NDBTAB *ndbtab= ndbtab_g.get_table(); - if (ndbtab == nullptr) - { + const NDBTAB *ndbtab = ndbtab_g.get_table(); + if (ndbtab == nullptr) { // Could not open the table from NDB - const NdbError err= dict->getNdbError(); - if (err.code == 709 || err.code == 723) - { + const NdbError err = dict->getNdbError(); + if (err.code == 709 || err.code == 723) { // Got the normal 'No such table existed' DBUG_PRINT("info", ("No such table, error: %u", err.code)); DBUG_RETURN(1); @@ -13432,90 +11567,80 @@ int ndbcluster_discover(handlerton*, THD* thd, // Magically detect which context this function is called in by // checking which kind of metadata locks are held on the table name. - if (!thd->mdl_context.owns_equal_or_stronger_lock(MDL_key::TABLE, - db, - name, - MDL_EXCLUSIVE)) - { + if (!thd->mdl_context.owns_equal_or_stronger_lock(MDL_key::TABLE, db, name, + MDL_EXCLUSIVE)) { // No exclusive MDL lock, this is ha_check_if_table_exists, just // return a dummy frmblob to indicate that table exists DBUG_PRINT("info", ("return dummy exists for ha_check_if_table_exists()")); - *frmlen= 37; - *frmblob= (uchar*)my_malloc(PSI_NOT_INSTRUMENTED, - *frmlen, - MYF(0)); - DBUG_RETURN(0); // Table exists + *frmlen = 37; + *frmblob = (uchar *)my_malloc(PSI_NOT_INSTRUMENTED, *frmlen, MYF(0)); + DBUG_RETURN(0); // Table exists } DBUG_PRINT("info", ("table exists, check if it can also be discovered")); // 2) Assume that exclusive MDL lock is held on the table at this point - DBUG_ASSERT(thd->mdl_context.owns_equal_or_stronger_lock(MDL_key::TABLE, - db, - name, - MDL_EXCLUSIVE)); + DBUG_ASSERT(thd->mdl_context.owns_equal_or_stronger_lock( + MDL_key::TABLE, db, name, MDL_EXCLUSIVE)); // Don't allow discover unless schema distribution is ready and // "schema synchronization" have completed(which currently can be // checked using ndb_binlog_is_read_only()). The user who wants to use // this table simply has to wait - if (!Ndb_schema_dist::is_ready(thd) || - ndb_binlog_is_read_only()) - { + if (!Ndb_schema_dist::is_ready(thd) || ndb_binlog_is_read_only()) { // Can't discover, schema distribution is not ready - thd_ndb->push_warning("Failed to discover table '%s' from NDB, schema " - "distribution is not ready", name); + thd_ndb->push_warning( + "Failed to discover table '%s' from NDB, schema " + "distribution is not ready", + name); my_error(ER_NO_SUCH_TABLE, MYF(0), db, name); DBUG_RETURN(1); } { Uint32 version; - void* unpacked_data; + void *unpacked_data; Uint32 unpacked_len; - if (ndbtab->getExtraMetadata(version, - &unpacked_data, &unpacked_len) != 0) - { - thd_ndb->push_warning("Failed to discover table '%s' from NDB, could not " - "get extra metadata", name); + if (ndbtab->getExtraMetadata(version, &unpacked_data, &unpacked_len) != 0) { + thd_ndb->push_warning( + "Failed to discover table '%s' from NDB, could not " + "get extra metadata", + name); my_error(ER_NO_SUCH_TABLE, MYF(0), db, name); DBUG_RETURN(1); } Ndb_dd_client dd_client(thd); - if (version == 1) - { + if (version == 1) { // Upgrade the "old" metadata and install the table into DD, // don't use force_overwrite since this function would never // have been called unless the table didn't exist - if (!dd_client.migrate_table(db, name, static_cast - (unpacked_data), unpacked_len, false)) - { - thd_ndb->push_warning("Failed to discover table '%s' from NDB, could " - "not upgrade table with extra metadata version 1", - name); + if (!dd_client.migrate_table( + db, name, static_cast(unpacked_data), + unpacked_len, false)) { + thd_ndb->push_warning( + "Failed to discover table '%s' from NDB, could " + "not upgrade table with extra metadata version 1", + name); my_error(ER_NO_SUCH_TABLE, MYF(0), db, name); ndbtab_g.invalidate(); free(unpacked_data); DBUG_RETURN(1); } - } - else - { + } else { // Assign the unpacked data to sdi_t(which is string data type) dd::sdi_t sdi; - sdi.assign(static_cast(unpacked_data), unpacked_len); + sdi.assign(static_cast(unpacked_data), unpacked_len); const std::string tablespace_name = ndb_table_tablespace_name(dict, ndbtab); - if (!tablespace_name.empty()) - { + if (!tablespace_name.empty()) { // Acquire IX MDL on tablespace - if (!dd_client.mdl_lock_tablespace(tablespace_name.c_str(), true)) - { - thd_ndb->push_warning("Failed to discover table '%s' from NDB, could " - "not acquire metadata lock on tablespace '%s'", - name, tablespace_name.c_str()); + if (!dd_client.mdl_lock_tablespace(tablespace_name.c_str(), true)) { + thd_ndb->push_warning( + "Failed to discover table '%s' from NDB, could " + "not acquire metadata lock on tablespace '%s'", + name, tablespace_name.c_str()); my_error(ER_NO_SUCH_TABLE, MYF(0), db, name); ndbtab_g.invalidate(); free(unpacked_data); @@ -13525,14 +11650,14 @@ int ndbcluster_discover(handlerton*, THD* thd, // Install the table into DD, use force_overwrite since this function // may be called both for non existent table as well as for metadata // version mismatch - if (!dd_client.install_table(db, name, sdi, ndbtab->getObjectId(), - ndbtab->getObjectVersion(), - ndbtab->getPartitionCount(), tablespace_name, - true)) - { + if (!dd_client.install_table( + db, name, sdi, ndbtab->getObjectId(), ndbtab->getObjectVersion(), + ndbtab->getPartitionCount(), tablespace_name, true)) { // Table existed in NDB but it could not be inserted into DD - thd_ndb->push_warning("Failed to discover table '%s' from NDB, could " - "not install table in DD", name); + thd_ndb->push_warning( + "Failed to discover table '%s' from NDB, could " + "not install table in DD", + name); my_error(ER_NO_SUCH_TABLE, MYF(0), db, name); ndbtab_g.invalidate(); free(unpacked_data); @@ -13549,93 +11674,77 @@ int ndbcluster_discover(handlerton*, THD* thd, // and has been installed into DD DBUG_PRINT("info", ("no sdi returned for ha_create_table_from_engine() " "since the table definition is already installed")); - *frmlen= 0; - *frmblob= nullptr; + *frmlen = 0; + *frmblob = nullptr; DBUG_RETURN(0); } - /** Check if a table exists in NDB. */ -static -int ndbcluster_table_exists_in_engine(handlerton*, THD* thd, - const char *db, - const char *name) -{ - Ndb* ndb; +static int ndbcluster_table_exists_in_engine(handlerton *, THD *thd, + const char *db, const char *name) { + Ndb *ndb; DBUG_ENTER("ndbcluster_table_exists_in_engine"); DBUG_PRINT("enter", ("db: %s name: %s", db, name)); - if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(HA_ERR_NO_CONNECTION); + if (!(ndb = check_ndb_in_thd(thd))) DBUG_RETURN(HA_ERR_NO_CONNECTION); - const Thd_ndb* thd_ndb = get_thd_ndb(thd); + const Thd_ndb *thd_ndb = get_thd_ndb(thd); if (thd_ndb->check_option(Thd_ndb::CREATE_UTIL_TABLE)) { DBUG_PRINT("exit", ("Simulate that table does not exist in NDB")); DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); } - NDBDICT* dict= ndb->getDictionary(); + NDBDICT *dict = ndb->getDictionary(); NdbDictionary::Dictionary::List list; - if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) - { + if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) { ndb_to_mysql_error(&dict->getNdbError()); DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); } - for (uint i= 0 ; i < list.count ; i++) - { - NdbDictionary::Dictionary::List::Element& elmt= list.elements[i]; - if (my_strcasecmp(table_alias_charset, elmt.database, db)) - continue; - if (my_strcasecmp(table_alias_charset, elmt.name, name)) - continue; + for (uint i = 0; i < list.count; i++) { + NdbDictionary::Dictionary::List::Element &elmt = list.elements[i]; + if (my_strcasecmp(table_alias_charset, elmt.database, db)) continue; + if (my_strcasecmp(table_alias_charset, elmt.name, name)) continue; DBUG_PRINT("info", ("Found table")); DBUG_RETURN(HA_ERR_TABLE_EXIST); } DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); } - /** Drop a database and all its tables from NDB */ static int ndbcluster_drop_database_impl( - THD *thd, Ndb_schema_dist_client& schema_dist_client, const char *path) -{ + THD *thd, Ndb_schema_dist_client &schema_dist_client, const char *path) { DBUG_ENTER("ndbcluster_drop_database_impl"); char dbname[FN_HEADLEN]; - Ndb* ndb; + Ndb *ndb; NdbDictionary::Dictionary::List list; uint i; char *tabname; List drop_list; - int ret= 0; + int ret = 0; ha_ndbcluster::set_dbname(path, (char *)&dbname); DBUG_PRINT("enter", ("db: %s", dbname)); - - if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(-1); - + + if (!(ndb = check_ndb_in_thd(thd))) DBUG_RETURN(-1); + // List tables in NDB - NDBDICT *dict= ndb->getDictionary(); - if (dict->listObjects(list, - NdbDictionary::Object::UserTable) != 0) - { - const NdbError err= dict->getNdbError(); - if (err.code == 4008 || err.code == 4012) - { - ret= ndb_to_mysql_error(&err); + NDBDICT *dict = ndb->getDictionary(); + if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) { + const NdbError err = dict->getNdbError(); + if (err.code == 4008 || err.code == 4012) { + ret = ndb_to_mysql_error(&err); } DBUG_RETURN(ret); } - for (i= 0 ; i < list.count ; i++) - { - NdbDictionary::Dictionary::List::Element& elmt= list.elements[i]; - DBUG_PRINT("info", ("Found %s/%s in NDB", elmt.database, elmt.name)); - + for (i = 0; i < list.count; i++) { + NdbDictionary::Dictionary::List::Element &elmt = list.elements[i]; + DBUG_PRINT("info", ("Found %s/%s in NDB", elmt.database, elmt.name)); + // Add only tables that belongs to db // Ignore Blob part tables - they are deleted when their table // is deleted. @@ -13643,28 +11752,25 @@ static int ndbcluster_drop_database_impl( ndb_name_is_blob_prefix(elmt.name) || ndb_fk_util_is_mock_name(elmt.name)) continue; - DBUG_PRINT("info", ("%s must be dropped", elmt.name)); + DBUG_PRINT("info", ("%s must be dropped", elmt.name)); drop_list.push_back(thd->mem_strdup(elmt.name)); } // Drop any tables belonging to database char full_path[FN_REFLEN + 1]; - char *tmp= full_path + - build_table_filename(full_path, sizeof(full_path) - 1, dbname, "", "", 0); - if (ndb->setDatabaseName(dbname)) - { + char *tmp = full_path + build_table_filename(full_path, sizeof(full_path) - 1, + dbname, "", "", 0); + if (ndb->setDatabaseName(dbname)) { ERR_RETURN(ndb->getNdbError()); } List_iterator_fast it(drop_list); - while ((tabname=it++)) - { - tablename_to_filename(tabname, tmp, (uint)(FN_REFLEN - (tmp - full_path)-1)); - if (drop_table_impl(thd, ndb, &schema_dist_client, - full_path, dbname, tabname)) - { - const NdbError err= dict->getNdbError(); - if (err.code != 709 && err.code != 723) - { - ret= ndb_to_mysql_error(&err); + while ((tabname = it++)) { + tablename_to_filename(tabname, tmp, + (uint)(FN_REFLEN - (tmp - full_path) - 1)); + if (drop_table_impl(thd, ndb, &schema_dist_client, full_path, dbname, + tabname)) { + const NdbError err = dict->getNdbError(); + if (err.code != 709 && err.code != 723) { + ret = ndb_to_mysql_error(&err); } } } @@ -13673,10 +11779,8 @@ static int ndbcluster_drop_database_impl( DBUG_RETURN(ret); } - -static void ndbcluster_drop_database(handlerton*, char *path) -{ - THD *thd= current_thd; +static void ndbcluster_drop_database(handlerton *, char *path) { + THD *thd = current_thd; DBUG_ENTER("ndbcluster_drop_database"); DBUG_PRINT("enter", ("path: '%s'", path)); @@ -13684,19 +11788,16 @@ static void ndbcluster_drop_database(handlerton*, char *path) ndb_set_dbname(path, db); Ndb_schema_dist_client schema_dist_client(thd); - if (!schema_dist_client.prepare(db, "")) - { + if (!schema_dist_client.prepare(db, "")) { /* Don't allow drop database unless schema distribution is ready */ DBUG_VOID_RETURN; } - if (ndbcluster_drop_database_impl(thd, schema_dist_client, path) != 0) - { + if (ndbcluster_drop_database_impl(thd, schema_dist_client, path) != 0) { DBUG_VOID_RETURN; } - if (!schema_dist_client.drop_db(db)) - { + if (!schema_dist_client.drop_db(db)) { // NOTE! There is currently no way to report an error from this // function, just log an error and proceed ndb_log_error("Failed to distribute 'DROP DATABASE %s'", db); @@ -13704,14 +11805,12 @@ static void ndbcluster_drop_database(handlerton*, char *path) DBUG_VOID_RETURN; } - /** Check if the given table is a system table which is supported to store in NDB */ -static bool is_supported_system_table(const char *, const char *, bool) -{ +static bool is_supported_system_table(const char *, const char *, bool) { /* It is not currently supported to store any standard system tables in NDB. @@ -13719,46 +11818,39 @@ static bool is_supported_system_table(const char *, const char *, bool) return false; } - /* Call back after cluster connect */ -static int connect_callback() -{ +static int connect_callback() { mysql_mutex_lock(&ndbcluster_mutex); - update_status_variables(NULL, &g_ndb_status, - g_ndb_cluster_connection); + update_status_variables(NULL, &g_ndb_status, g_ndb_cluster_connection); mysql_cond_broadcast(&ndbcluster_cond); mysql_mutex_unlock(&ndbcluster_mutex); return 0; } -bool ndbcluster_is_connected(uint max_wait_sec) -{ +bool ndbcluster_is_connected(uint max_wait_sec) { mysql_mutex_lock(&ndbcluster_mutex); - bool connected= - !(!g_ndb_status.cluster_node_id && ndbcluster_hton->slot != ~(uint)0); + bool connected = + !(!g_ndb_status.cluster_node_id && ndbcluster_hton->slot != ~(uint)0); - if (!connected) - { + if (!connected) { /* ndb not connected yet */ struct timespec abstime; set_timespec(&abstime, max_wait_sec); mysql_cond_timedwait(&ndbcluster_cond, &ndbcluster_mutex, &abstime); - connected= - !(!g_ndb_status.cluster_node_id && ndbcluster_hton->slot != ~(uint)0); + connected = + !(!g_ndb_status.cluster_node_id && ndbcluster_hton->slot != ~(uint)0); } mysql_mutex_unlock(&ndbcluster_mutex); return connected; } - Ndb_index_stat_thread ndb_index_stat_thread; Ndb_metadata_change_monitor ndb_metadata_change_monitor_thread; -extern THD * ndb_create_thd(char * stackptr); +extern THD *ndb_create_thd(char *stackptr); -static int ndb_wait_setup_func(ulong max_wait) -{ +static int ndb_wait_setup_func(ulong max_wait) { DBUG_ENTER("ndb_wait_setup_func"); mysql_mutex_lock(&ndbcluster_mutex); @@ -13767,23 +11859,16 @@ static int ndb_wait_setup_func(ulong max_wait) set_timespec(&abstime, 1); while (max_wait && - (!ndb_setup_complete || !ndb_index_stat_thread.is_setup_complete())) - { - const int rc= mysql_cond_timedwait(&ndbcluster_cond, - &ndbcluster_mutex, - &abstime); - if (rc) - { - if (rc == ETIMEDOUT) - { + (!ndb_setup_complete || !ndb_index_stat_thread.is_setup_complete())) { + const int rc = + mysql_cond_timedwait(&ndbcluster_cond, &ndbcluster_mutex, &abstime); + if (rc) { + if (rc == ETIMEDOUT) { DBUG_PRINT("info", ("1s elapsed waiting")); - max_wait--; - set_timespec(&abstime, 1); /* 1 second from now*/ - } - else - { - DBUG_PRINT("info", ("Bad mysql_cond_timedwait rc : %u", - rc)); + max_wait--; + set_timespec(&abstime, 1); /* 1 second from now*/ + } else { + DBUG_PRINT("info", ("Bad mysql_cond_timedwait rc : %u", rc)); assert(false); break; } @@ -13792,53 +11877,48 @@ static int ndb_wait_setup_func(ulong max_wait) mysql_mutex_unlock(&ndbcluster_mutex); - DBUG_RETURN((ndb_setup_complete == 1)? 0 : 1); + DBUG_RETURN((ndb_setup_complete == 1) ? 0 : 1); } - /* Function installed as server hook to be called just before connections are allowed. Wait for --ndb-wait-setup= seconds for ndbcluster connect to NDB and complete setup. */ -static int -ndb_wait_setup_server_startup(void*) -{ +static int ndb_wait_setup_server_startup(void *) { // Signal components that server is started ndb_index_stat_thread.set_server_started(); ndbcluster_binlog_set_server_started(); ndb_metadata_change_monitor_thread.set_server_started(); - if (ndb_wait_setup_func(opt_ndb_wait_setup) != 0) - { - ndb_log_error("Tables not available after %lu seconds. Consider " - "increasing --ndb-wait-setup value", opt_ndb_wait_setup); + if (ndb_wait_setup_func(opt_ndb_wait_setup) != 0) { + ndb_log_error( + "Tables not available after %lu seconds. Consider " + "increasing --ndb-wait-setup value", + opt_ndb_wait_setup); } - return 0; // NOTE! return value ignored by caller + return 0; // NOTE! return value ignored by caller } - /* Function installed as server hook to be called before the applier thread starts. Wait --ndb-wait-setup= seconds for ndbcluster connect to NDB and complete setup. */ -static int -ndb_wait_setup_replication_applier(void*) -{ - if (ndb_wait_setup_func(opt_ndb_wait_setup) != 0) - { - ndb_log_error("NDB Slave: Tables not available after %lu seconds. Consider " - "increasing --ndb-wait-setup value", opt_ndb_wait_setup); +static int ndb_wait_setup_replication_applier(void *) { + if (ndb_wait_setup_func(opt_ndb_wait_setup) != 0) { + ndb_log_error( + "NDB Slave: Tables not available after %lu seconds. Consider " + "increasing --ndb-wait-setup value", + opt_ndb_wait_setup); } - return 0; // NOTE! could return error to fail applier + return 0; // NOTE! could return error to fail applier } static Ndb_server_hooks ndb_server_hooks; - /** Callback handling the notification of ALTER TABLE start and end on the given key. The function locks or unlocks the GSL thus @@ -13872,8 +11952,7 @@ static bool ndbcluster_notify_alter_table( bool victimized; bool result; - do - { + do { result = ndb_gsl_lock(thd, notification == HA_NOTIFY_PRE_EVENT, false /* is_tablespace */, &victimized); if (result && thd_killed(thd)) { @@ -13891,8 +11970,7 @@ static bool ndbcluster_notify_alter_table( */ DBUG_RETURN(false); } - } - while (victimized); + } while (victimized); DBUG_RETURN(result); } @@ -13948,10 +12026,9 @@ static bool ndbcluster_notify_exclusive_mdl(THD *thd, const MDL_key *mdl_key, @return True if types are compatible, False if not. */ -static bool -ndbcluster_check_fk_column_compat(const Ha_fk_column_type *child_column_type, - const Ha_fk_column_type *parent_column_type, - bool /* check_charsets */) { +static bool ndbcluster_check_fk_column_compat( + const Ha_fk_column_type *child_column_type, + const Ha_fk_column_type *parent_column_type, bool /* check_charsets */) { NDBCOL child_col, parent_col; create_ndb_fk_fake_column(child_col, *child_column_type); @@ -13960,32 +12037,31 @@ ndbcluster_check_fk_column_compat(const Ha_fk_column_type *child_column_type, return child_col.isBindable(parent_col) != -1; } - /* Version in composite numerical format */ static Uint32 ndb_version = NDB_VERSION_D; -static MYSQL_SYSVAR_UINT( - version, /* name */ - ndb_version, /* var */ - PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOPERSIST, - "Compile version for ndbcluster", - NULL, /* check func. */ - NULL, /* update func. */ - 0, /* default */ - 0, /* min */ - 0, /* max */ - 0 /* block */ +static MYSQL_SYSVAR_UINT(version, /* name */ + ndb_version, /* var */ + PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY | + PLUGIN_VAR_NOPERSIST, + "Compile version for ndbcluster", + NULL, /* check func. */ + NULL, /* update func. */ + 0, /* default */ + 0, /* min */ + 0, /* max */ + 0 /* block */ ); /* Version in ndb-Y.Y.Y[-status] format */ -static char* ndb_version_string = const_cast(NDB_NDB_VERSION_STRING); -static MYSQL_SYSVAR_STR( - version_string, /* name */ - ndb_version_string, /* var */ - PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOPERSIST, - "Compile version string for ndbcluster", - NULL, /* check func. */ - NULL, /* update func. */ - NULL /* default */ +static char *ndb_version_string = const_cast(NDB_NDB_VERSION_STRING); +static MYSQL_SYSVAR_STR(version_string, /* name */ + ndb_version_string, /* var */ + PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY | + PLUGIN_VAR_NOPERSIST, + "Compile version string for ndbcluster", + NULL, /* check func. */ + NULL, /* update func. */ + NULL /* default */ ); extern int ndb_dictionary_is_mysqld; @@ -13993,8 +12069,7 @@ extern int ndb_dictionary_is_mysqld; Uint32 recv_thread_num_cpus; static int ndb_recv_thread_cpu_mask_check_str(const char *str); static int ndb_recv_thread_cpu_mask_update(); -handlerton* ndbcluster_hton; - +handlerton *ndbcluster_hton; /* Handle failure from ndbcluster_init() by printing error @@ -14005,9 +12080,7 @@ handlerton* ndbcluster_hton; the plugin. */ -static -void ndbcluster_init_abort(const char* error) -{ +static void ndbcluster_init_abort(const char *error) { ndb_log_error("%s", error); ndb_log_error("Failed to initialize ndbcluster, aborting!"); ndb_log_error("Use --skip-ndbcluster to start without ndbcluster."); @@ -14020,7 +12093,6 @@ void ndbcluster_init_abort(const char* error) exit(1); } - /* Initialize the ndbcluster storage engine part of the "ndbcluster plugin" @@ -14029,84 +12101,77 @@ void ndbcluster_init_abort(const char* error) the "ndbcluster plugin" */ -static -int ndbcluster_init(void* handlerton_ptr) -{ +static int ndbcluster_init(void *handlerton_ptr) { DBUG_ENTER("ndbcluster_init"); DBUG_ASSERT(!ndbcluster_inited); - handlerton* hton = static_cast(handlerton_ptr); + handlerton *hton = static_cast(handlerton_ptr); - if (unlikely(opt_initialize)) - { + if (unlikely(opt_initialize)) { /* Don't schema-distribute 'mysqld --initialize' of data dictionary */ ndb_log_info("'--initialize' -> ndbcluster plugin disabled"); hton->state = SHOW_OPTION_DISABLED; DBUG_ASSERT(!ha_storage_engine_is_enabled(hton)); - DBUG_RETURN(0); // Return before init will disable ndbcluster-SE. + DBUG_RETURN(0); // Return before init will disable ndbcluster-SE. } /* Check const alignment */ assert(DependencyTracker::InvalidTransactionId == Ndb_binlog_extra_row_info::InvalidTransactionId); - if (global_system_variables.binlog_format == BINLOG_FORMAT_STMT) - { + if (global_system_variables.binlog_format == BINLOG_FORMAT_STMT) { /* Set global to mixed - note that this is not the default, * but the current global value */ global_system_variables.binlog_format = BINLOG_FORMAT_MIXED; - ndb_log_info("Changed global value of binlog_format from STATEMENT to MIXED"); - + ndb_log_info( + "Changed global value of binlog_format from STATEMENT to MIXED"); } - if (opt_mts_slave_parallel_workers) - { - ndb_log_info("Changed global value of --slave-parallel-workers " - "from %lu to 0", opt_mts_slave_parallel_workers); + if (opt_mts_slave_parallel_workers) { + ndb_log_info( + "Changed global value of --slave-parallel-workers " + "from %lu to 0", + opt_mts_slave_parallel_workers); opt_mts_slave_parallel_workers = 0; } if (ndb_index_stat_thread.init() || - DBUG_EVALUATE_IF("ndbcluster_init_fail1", true, false)) - { + DBUG_EVALUATE_IF("ndbcluster_init_fail1", true, false)) { ndbcluster_init_abort("Failed to initialize NDB Index Stat"); } - if (ndb_metadata_change_monitor_thread.init()) - { + if (ndb_metadata_change_monitor_thread.init()) { ndbcluster_init_abort("Failed to initialize NDB Metadata Change Monitor"); } mysql_mutex_init(PSI_INSTRUMENT_ME, &ndbcluster_mutex, MY_MUTEX_INIT_FAST); mysql_cond_init(PSI_INSTRUMENT_ME, &ndbcluster_cond); - ndb_dictionary_is_mysqld= 1; - ndb_setup_complete= 0; - - ndbcluster_hton= hton; - hton->state= SHOW_OPTION_YES; - hton->db_type= DB_TYPE_NDBCLUSTER; - hton->close_connection= ndbcluster_close_connection; - hton->commit= ndbcluster_commit; - hton->rollback= ndbcluster_rollback; - hton->create= ndbcluster_create_handler; /* Create a new handler */ - hton->drop_database= ndbcluster_drop_database; /* Drop a database */ - hton->panic= ndbcluster_end; /* Panic call */ - hton->show_status= ndbcluster_show_status; /* Show status */ - hton->get_tablespace= ndbcluster_get_tablespace; /* Get ts for old ver */ - hton->alter_tablespace= + ndb_dictionary_is_mysqld = 1; + ndb_setup_complete = 0; + + ndbcluster_hton = hton; + hton->state = SHOW_OPTION_YES; + hton->db_type = DB_TYPE_NDBCLUSTER; + hton->close_connection = ndbcluster_close_connection; + hton->commit = ndbcluster_commit; + hton->rollback = ndbcluster_rollback; + hton->create = ndbcluster_create_handler; /* Create a new handler */ + hton->drop_database = ndbcluster_drop_database; /* Drop a database */ + hton->panic = ndbcluster_end; /* Panic call */ + hton->show_status = ndbcluster_show_status; /* Show status */ + hton->get_tablespace = ndbcluster_get_tablespace; /* Get ts for old ver */ + hton->alter_tablespace = ndbcluster_alter_tablespace; /* Tablespace and logfile group */ - hton->get_tablespace_statistics= - ndbcluster_get_tablespace_statistics; /* Provide data to I_S */ - hton->partition_flags= ndbcluster_partition_flags; /* Partition flags */ + hton->get_tablespace_statistics = + ndbcluster_get_tablespace_statistics; /* Provide data to I_S */ + hton->partition_flags = ndbcluster_partition_flags; /* Partition flags */ ndbcluster_binlog_init(hton); - hton->flags= HTON_TEMPORARY_NOT_SUPPORTED | - HTON_NO_BINLOG_ROW_OPT | - HTON_SUPPORTS_FOREIGN_KEYS | - HTON_SUPPORTS_ATOMIC_DDL; - hton->discover= ndbcluster_discover; - hton->table_exists_in_engine= ndbcluster_table_exists_in_engine; - hton->make_pushed_join= ndbcluster_make_pushed_join; + hton->flags = HTON_TEMPORARY_NOT_SUPPORTED | HTON_NO_BINLOG_ROW_OPT | + HTON_SUPPORTS_FOREIGN_KEYS | HTON_SUPPORTS_ATOMIC_DDL; + hton->discover = ndbcluster_discover; + hton->table_exists_in_engine = ndbcluster_table_exists_in_engine; + hton->make_pushed_join = ndbcluster_make_pushed_join; hton->is_supported_system_table = is_supported_system_table; // Install dummy callbacks to avoid writing _.SDI files @@ -14132,13 +12197,14 @@ int ndbcluster_init(void* handlerton_ptr) // Initialize NdbApi ndb_init_internal(1); - if (!ndb_server_hooks.register_server_started(ndb_wait_setup_server_startup)) - { - ndbcluster_init_abort("Failed to register ndb_wait_setup at server startup"); + if (!ndb_server_hooks.register_server_started( + ndb_wait_setup_server_startup)) { + ndbcluster_init_abort( + "Failed to register ndb_wait_setup at server startup"); } - if (!ndb_server_hooks.register_applier_start(ndb_wait_setup_replication_applier)) - { + if (!ndb_server_hooks.register_applier_start( + ndb_wait_setup_replication_applier)) { ndbcluster_init_abort("Failed to register ndb_wait_setup at applier start"); } @@ -14146,47 +12212,38 @@ int ndbcluster_init(void* handlerton_ptr) NDB_SHARE::initialize(table_alias_charset); /* allocate connection resources and connect to cluster */ - const uint global_opti_node_select= THDVAR(NULL, optimized_node_selection); - if (ndbcluster_connect(connect_callback, opt_ndb_wait_connected, - opt_ndb_cluster_connection_pool, - opt_connection_pool_nodeids_str, - (global_opti_node_select & 1), - opt_ndb_connectstring, - opt_ndb_nodeid, - opt_ndb_recv_thread_activation_threshold, - opt_ndb_data_node_neighbour)) - { + const uint global_opti_node_select = THDVAR(NULL, optimized_node_selection); + if (ndbcluster_connect( + connect_callback, opt_ndb_wait_connected, + opt_ndb_cluster_connection_pool, opt_connection_pool_nodeids_str, + (global_opti_node_select & 1), opt_ndb_connectstring, opt_ndb_nodeid, + opt_ndb_recv_thread_activation_threshold, + opt_ndb_data_node_neighbour)) { ndbcluster_init_abort("Failed to initialize connection(s)"); } /* Translate recv thread cpu mask if set */ - if (ndb_recv_thread_cpu_mask_check_str(opt_ndb_recv_thread_cpu_mask) == 0) - { - if (recv_thread_num_cpus) - { - if (ndb_recv_thread_cpu_mask_update()) - { + if (ndb_recv_thread_cpu_mask_check_str(opt_ndb_recv_thread_cpu_mask) == 0) { + if (recv_thread_num_cpus) { + if (ndb_recv_thread_cpu_mask_update()) { ndbcluster_init_abort("Failed to lock receive thread(s) to CPU(s)"); } } } /* start the ndb injector thread */ - if (ndbcluster_binlog_start()) - { + if (ndbcluster_binlog_start()) { ndbcluster_init_abort("Failed to start NDB Binlog"); } // Create index statistics thread if (ndb_index_stat_thread.start() || - DBUG_EVALUATE_IF("ndbcluster_init_fail2", true, false)) - { + DBUG_EVALUATE_IF("ndbcluster_init_fail2", true, false)) { ndbcluster_init_abort("Failed to start NDB Index Stat"); } // Create metadata change monitor thread - if (ndb_metadata_change_monitor_thread.start()) - { + if (ndb_metadata_change_monitor_thread.start()) { ndbcluster_init_abort("Failed to start NDB Metadata Change Monitor"); } @@ -14197,27 +12254,23 @@ int ndbcluster_init(void* handlerton_ptr) { my_service service( "dynamic_privilege_register.mysql_server", registry); - if ((! service.is_valid()) || - service->register_privilege(STRING_WITH_LEN("NDB_STORED_USER"))) - { + if ((!service.is_valid()) || + service->register_privilege(STRING_WITH_LEN("NDB_STORED_USER"))) { ndbcluster_init_abort("Failed to register dynamic privilege"); } } mysql_plugin_registry_release(registry); - ndbcluster_inited= 1; + ndbcluster_inited = 1; - DBUG_RETURN(0); // OK + DBUG_RETURN(0); // OK } - -static int ndbcluster_end(handlerton *, ha_panic_function) -{ +static int ndbcluster_end(handlerton *, ha_panic_function) { DBUG_ENTER("ndbcluster_end"); - if (!ndbcluster_inited) - DBUG_RETURN(0); - ndbcluster_inited= 0; + if (!ndbcluster_inited) DBUG_RETURN(0); + ndbcluster_inited = 0; // Stop threads started by ndbcluster_init() except the // ndb_metadata_change_monitor_thread. This is stopped and deinited in the @@ -14244,7 +12297,6 @@ static int ndbcluster_end(handlerton *, ha_panic_function) DBUG_RETURN(0); } - /* Deintialize the ndbcluster storage engine part of the "ndbcluster plugin" @@ -14253,20 +12305,13 @@ static int ndbcluster_end(handlerton *, ha_panic_function) the "ndbcluster plugin" */ -static int -ndbcluster_deinit(void*) -{ - return 0; -} - +static int ndbcluster_deinit(void *) { return 0; } -void ha_ndbcluster::print_error(int error, myf errflag) -{ +void ha_ndbcluster::print_error(int error, myf errflag) { DBUG_ENTER("ha_ndbcluster::print_error"); DBUG_PRINT("enter", ("error: %d", error)); - if (error == HA_ERR_GENERIC) - { + if (error == HA_ERR_GENERIC) { // This error code is used to indicate that the error already has been // handled and reported in other parts of ha_ndbcluster, thus it can be // safely ignored here. NOTE! HA_ERR_GENERIC is not used elsewhere in @@ -14278,21 +12323,18 @@ void ha_ndbcluster::print_error(int error, myf errflag) DBUG_VOID_RETURN; } - if (error == HA_ERR_NO_PARTITION_FOUND) - { + if (error == HA_ERR_NO_PARTITION_FOUND) { m_part_info->print_no_partition_found(current_thd, table); DBUG_VOID_RETURN; } - if (error == HA_ERR_NO_CONNECTION) - { + if (error == HA_ERR_NO_CONNECTION) { handler::print_error(4009, errflag); DBUG_VOID_RETURN; } if (error == HA_ERR_FOUND_DUPP_KEY && - (table == NULL || table->file == NULL)) - { + (table == NULL || table->file == NULL)) { /* This is a sideffect of 'ndbcluster_print_error' (called from 'ndbcluster_commit' and 'ndbcluster_rollback') which realises @@ -14308,19 +12350,15 @@ void ha_ndbcluster::print_error(int error, myf errflag) DBUG_VOID_RETURN; } - if (error == ER_CANT_DROP_FIELD_OR_KEY) - { + if (error == ER_CANT_DROP_FIELD_OR_KEY) { /* Called on drop unknown FK by server when algorithm=copy or by handler when algorithm=inplace. In both cases the error was already printed in ha_ndb_ddl_fk.cc. */ - THD* thd= NULL; - if (table != NULL && - (thd= table->in_use) != NULL && - thd->lex != NULL && - thd_sql_command(thd) == SQLCOM_ALTER_TABLE) - { + THD *thd = NULL; + if (table != NULL && (thd = table->in_use) != NULL && thd->lex != NULL && + thd_sql_command(thd) == SQLCOM_ALTER_TABLE) { DBUG_VOID_RETURN; } DBUG_ASSERT(false); @@ -14330,13 +12368,11 @@ void ha_ndbcluster::print_error(int error, myf errflag) DBUG_VOID_RETURN; } - /** Set a given location from full pathname to database name. */ -void ha_ndbcluster::set_dbname(const char *path_name, char *dbname) -{ +void ha_ndbcluster::set_dbname(const char *path_name, char *dbname) { ndb_set_dbname(path_name, dbname); } @@ -14344,8 +12380,7 @@ void ha_ndbcluster::set_dbname(const char *path_name, char *dbname) Set m_dbname from full pathname to table file. */ -void ha_ndbcluster::set_dbname(const char *path_name) -{ +void ha_ndbcluster::set_dbname(const char *path_name) { ndb_set_dbname(path_name, m_dbname); } @@ -14353,9 +12388,7 @@ void ha_ndbcluster::set_dbname(const char *path_name) Set a given location from full pathname to table file. */ -void -ha_ndbcluster::set_tabname(const char *path_name, char * tabname) -{ +void ha_ndbcluster::set_tabname(const char *path_name, char *tabname) { ndb_set_tabname(path_name, tabname); } @@ -14363,26 +12396,22 @@ ha_ndbcluster::set_tabname(const char *path_name, char * tabname) Set m_tabname from full pathname to table file. */ -void ha_ndbcluster::set_tabname(const char *path_name) -{ +void ha_ndbcluster::set_tabname(const char *path_name) { ndb_set_tabname(path_name, m_tabname); } - /* If there are no stored stats, should we do a tree-dive on all db nodes. The result is fairly good but does mean a round-trip. */ -static const bool g_ndb_records_in_range_tree_dive= false; +static const bool g_ndb_records_in_range_tree_dive = false; /* Determine roughly how many records are in the range specified */ -ha_rows -ha_ndbcluster::records_in_range(uint inx, key_range *min_key, - key_range *max_key) -{ - KEY *key_info= table->key_info + inx; - uint key_length= key_info->key_length; - NDB_INDEX_TYPE idx_type= get_index_type(inx); +ha_rows ha_ndbcluster::records_in_range(uint inx, key_range *min_key, + key_range *max_key) { + KEY *key_info = table->key_info + inx; + uint key_length = key_info->key_length; + NDB_INDEX_TYPE idx_type = get_index_type(inx); DBUG_ENTER("records_in_range"); // Prevent partial read of hash indexes by returning HA_POS_ERROR @@ -14390,37 +12419,32 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key, ((min_key && min_key->length < key_length) || (max_key && max_key->length < key_length))) DBUG_RETURN(HA_POS_ERROR); - + // Read from hash index with full key - // This is a "const" table which returns only one record! + // This is a "const" table which returns only one record! if ((idx_type != ORDERED_INDEX) && ((min_key && min_key->length == key_length) && (max_key && max_key->length == key_length) && - (min_key->key==max_key->key || - memcmp(min_key->key, max_key->key, key_length)==0))) + (min_key->key == max_key->key || + memcmp(min_key->key, max_key->key, key_length) == 0))) DBUG_RETURN(1); - + // XXX why this if if ((idx_type == PRIMARY_KEY_ORDERED_INDEX || - idx_type == UNIQUE_ORDERED_INDEX || - idx_type == ORDERED_INDEX)) - { - THD *thd= current_thd; - const bool index_stat_enable= THDVAR(NULL, index_stat_enable) && - THDVAR(thd, index_stat_enable); - - if (index_stat_enable) - { - ha_rows rows= HA_POS_ERROR; - int err= ndb_index_stat_get_rir(inx, min_key, max_key, &rows); - if (err == 0) - { + idx_type == UNIQUE_ORDERED_INDEX || idx_type == ORDERED_INDEX)) { + THD *thd = current_thd; + const bool index_stat_enable = + THDVAR(NULL, index_stat_enable) && THDVAR(thd, index_stat_enable); + + if (index_stat_enable) { + ha_rows rows = HA_POS_ERROR; + int err = ndb_index_stat_get_rir(inx, min_key, max_key, &rows); + if (err == 0) { /** * optmizer thinks that all values < 2 are exact...but * but we don't provide exact statistics */ - if (rows < 2) - rows = 2; + if (rows < 2) rows = 2; DBUG_RETURN(rows); } if (err != 0 && @@ -14429,8 +12453,7 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key, /* warning was printed at first error */ err != NdbIndexStat::MyHasError && /* stats thread aborted request */ - err != NdbIndexStat::MyAbortReq) - { + err != NdbIndexStat::MyAbortReq) { push_warning_printf(thd, Sql_condition::SL_WARNING, ER_CANT_GET_STAT, /* pun? */ "index stats (RIR) for key %s:" @@ -14440,54 +12463,38 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key, /*fall through*/ } - if (g_ndb_records_in_range_tree_dive) - { - NDB_INDEX_DATA& d=m_index[inx]; - const NDBINDEX* index= d.index; - Ndb *ndb= get_ndb(thd); - NdbTransaction* active_trans= m_thd_ndb ? m_thd_ndb->trans : 0; - NdbTransaction* trans=NULL; - int res=0; + if (g_ndb_records_in_range_tree_dive) { + NDB_INDEX_DATA &d = m_index[inx]; + const NDBINDEX *index = d.index; + Ndb *ndb = get_ndb(thd); + NdbTransaction *active_trans = m_thd_ndb ? m_thd_ndb->trans : 0; + NdbTransaction *trans = NULL; + int res = 0; Uint64 rows; - do - { - if ((trans=active_trans) == NULL || - trans->commitStatus() != NdbTransaction::Started) - { + do { + if ((trans = active_trans) == NULL || + trans->commitStatus() != NdbTransaction::Started) { DBUG_PRINT("info", ("no active trans")); - if (! (trans=ndb->startTransaction())) + if (!(trans = ndb->startTransaction())) ERR_BREAK(ndb->getNdbError(), res); } - + /* Create an IndexBound struct for the keys */ NdbIndexScanOperation::IndexBound ib; - compute_index_bounds(ib, - key_info, - min_key, - max_key, - 0); + compute_index_bounds(ib, key_info, min_key, max_key, 0); - ib.range_no= 0; + ib.range_no = 0; NdbIndexStat is; - if (is.records_in_range(index, - trans, - d.ndb_record_key, - m_ndb_record, - &ib, - 0, - &rows, - 0) == -1) + if (is.records_in_range(index, trans, d.ndb_record_key, m_ndb_record, + &ib, 0, &rows, 0) == -1) ERR_BREAK(is.getNdbError(), res); } while (0); - if (trans != active_trans && rows == 0) - rows = 1; - if (trans != active_trans && trans != NULL) - ndb->closeTransaction(trans); - if (res == 0) - DBUG_RETURN(rows); + if (trans != active_trans && rows == 0) rows = 1; + if (trans != active_trans && trans != NULL) ndb->closeTransaction(trans); + if (res == 0) DBUG_RETURN(rows); /*fall through*/ } } @@ -14495,120 +12502,97 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key, /* Use simple heuristics to estimate fraction of 'stats.record' returned from range. */ - do - { - if (stats.records == ~(ha_rows)0 || stats.records == 0) - { + do { + if (stats.records == ~(ha_rows)0 || stats.records == 0) { /* Refresh statistics, only read from datanodes if 'use_exact_count' */ - THD *thd= current_thd; - if (update_stats(thd, THDVAR(thd, use_exact_count))) - break; + THD *thd = current_thd; + if (update_stats(thd, THDVAR(thd, use_exact_count))) break; } Uint64 rows; - Uint64 table_rows= stats.records; - size_t eq_bound_len= 0; - size_t min_key_length= (min_key) ? min_key->length : 0; - size_t max_key_length= (max_key) ? max_key->length : 0; + Uint64 table_rows = stats.records; + size_t eq_bound_len = 0; + size_t min_key_length = (min_key) ? min_key->length : 0; + size_t max_key_length = (max_key) ? max_key->length : 0; // Might have an closed/open range bound: // Low range open - if (!min_key_length) - { - rows= (!max_key_length) - ? table_rows // No range was specified - : table_rows/10; // -oo .. -> 10% selectivity + if (!min_key_length) { + rows = (!max_key_length) + ? table_rows // No range was specified + : table_rows / 10; // -oo .. -> 10% selectivity } // High range open - else if (!max_key_length) - { - rows= table_rows/10; // ..oo -> 10% selectivity - } - else - { - size_t bounds_len= MIN(min_key_length,max_key_length); - uint eq_bound_len= 0; - uint eq_bound_offs= 0; - - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; - for (; key_part != end; key_part++) - { - uint part_length= key_part->store_length; - if (eq_bound_offs+part_length > bounds_len || - memcmp(&min_key->key[eq_bound_offs], - &max_key->key[eq_bound_offs], - part_length)) - { + else if (!max_key_length) { + rows = table_rows / 10; // ..oo -> 10% selectivity + } else { + size_t bounds_len = MIN(min_key_length, max_key_length); + uint eq_bound_len = 0; + uint eq_bound_offs = 0; + + KEY_PART_INFO *key_part = key_info->key_part; + KEY_PART_INFO *end = key_part + key_info->user_defined_key_parts; + for (; key_part != end; key_part++) { + uint part_length = key_part->store_length; + if (eq_bound_offs + part_length > bounds_len || + memcmp(&min_key->key[eq_bound_offs], &max_key->key[eq_bound_offs], + part_length)) { break; } - eq_bound_len+= key_part->length; - eq_bound_offs+= part_length; + eq_bound_len += key_part->length; + eq_bound_offs += part_length; } - if (!eq_bound_len) - { - rows= table_rows/20; // .. -> 5% - } - else - { + if (!eq_bound_len) { + rows = table_rows / 20; // .. -> 5% + } else { // Has an equality range on a leading part of 'key_length': // - Assume reduced selectivity for non-unique indexes // by decreasing 'eq_fraction' by 20% // - Assume equal selectivity for all eq_parts in key. double eq_fraction = (double)(eq_bound_len) / key_length; - if (idx_type == ORDERED_INDEX) // Non-unique index -> less selectivity - eq_fraction/= 1.20; - if (eq_fraction >= 1.0) // Exact match -> 1 row + if (idx_type == ORDERED_INDEX) // Non-unique index -> less selectivity + eq_fraction /= 1.20; + if (eq_fraction >= 1.0) // Exact match -> 1 row DBUG_RETURN(1); - rows = (Uint64)((double)table_rows / pow((double)table_rows, eq_fraction)); - if (rows > (table_rows/50)) // EQ-range: Max 2% of rows - rows= (table_rows/50); + rows = + (Uint64)((double)table_rows / pow((double)table_rows, eq_fraction)); + if (rows > (table_rows / 50)) // EQ-range: Max 2% of rows + rows = (table_rows / 50); - if (min_key_length > eq_bound_offs) - rows/= 2; - if (max_key_length > eq_bound_offs) - rows/= 2; + if (min_key_length > eq_bound_offs) rows /= 2; + if (max_key_length > eq_bound_offs) rows /= 2; } } // Make sure that EQ is preferred even if row-count is low - if (eq_bound_len && rows < 2) // At least 2 rows as not exact - rows= 2; + if (eq_bound_len && rows < 2) // At least 2 rows as not exact + rows = 2; else if (rows < 3) - rows= 3; - DBUG_RETURN(MIN(rows,table_rows)); + rows = 3; + DBUG_RETURN(MIN(rows, table_rows)); } while (0); DBUG_RETURN(10); /* Poor guess when you don't know anything */ } -ulonglong ha_ndbcluster::table_flags(void) const -{ - THD *thd= current_thd; - ulonglong f= - HA_NULL_IN_KEY | - HA_AUTO_PART_KEY | - HA_NO_PREFIX_CHAR_KEYS | - HA_CAN_GEOMETRY | - HA_CAN_BIT_FIELD | - HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | - HA_PARTIAL_COLUMN_READ | - HA_HAS_OWN_BINLOGGING | - HA_BINLOG_ROW_CAPABLE | - HA_COUNT_ROWS_INSTANT | - HA_READ_BEFORE_WRITE_REMOVAL | - HA_GENERATED_COLUMNS | - 0; +ulonglong ha_ndbcluster::table_flags(void) const { + THD *thd = current_thd; + ulonglong f = HA_NULL_IN_KEY | HA_AUTO_PART_KEY | HA_NO_PREFIX_CHAR_KEYS | + HA_CAN_GEOMETRY | HA_CAN_BIT_FIELD | + HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | HA_PARTIAL_COLUMN_READ | + HA_HAS_OWN_BINLOGGING | HA_BINLOG_ROW_CAPABLE | + HA_COUNT_ROWS_INSTANT | HA_READ_BEFORE_WRITE_REMOVAL | + HA_GENERATED_COLUMNS | 0; /* To allow for logging of ndb tables during stmt based logging; flag cabablity, but also turn off flag for OWN_BINLOGGING */ if (thd->variables.binlog_format == BINLOG_FORMAT_STMT) - f= (f | HA_BINLOG_STMT_CAPABLE) & ~HA_HAS_OWN_BINLOGGING; + f = (f | HA_BINLOG_STMT_CAPABLE) & ~HA_HAS_OWN_BINLOGGING; /* Allow MySQL Server to decide that STATEMENT logging should be used @@ -14618,41 +12602,30 @@ ulonglong ha_ndbcluster::table_flags(void) const table as a "no_replicate" table. */ if (thd_sql_command(thd) == SQLCOM_TRUNCATE) - f= (f | HA_BINLOG_STMT_CAPABLE) & ~HA_HAS_OWN_BINLOGGING; + f = (f | HA_BINLOG_STMT_CAPABLE) & ~HA_HAS_OWN_BINLOGGING; /** - * To maximize join pushability we want const-table + * To maximize join pushability we want const-table * optimization blocked if 'ndb_join_pushdown= on' */ - if (THDVAR(thd, join_pushdown)) - f= f | HA_BLOCK_CONST_TABLE; + if (THDVAR(thd, join_pushdown)) f = f | HA_BLOCK_CONST_TABLE; return f; } -const char * ha_ndbcluster::table_type() const -{ - return("NDBCLUSTER"); -} -uint ha_ndbcluster::max_supported_keys() const -{ - return MAX_KEY; -} -uint ha_ndbcluster::max_supported_key_parts() const -{ +const char *ha_ndbcluster::table_type() const { return ("NDBCLUSTER"); } +uint ha_ndbcluster::max_supported_keys() const { return MAX_KEY; } +uint ha_ndbcluster::max_supported_key_parts() const { return NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY; } -uint ha_ndbcluster::max_supported_key_length() const -{ +uint ha_ndbcluster::max_supported_key_length() const { return NDB_MAX_KEY_SIZE; } uint ha_ndbcluster::max_supported_key_part_length( - HA_CREATE_INFO *create_info MY_ATTRIBUTE((unused))) const -{ + HA_CREATE_INFO *create_info MY_ATTRIBUTE((unused))) const { return NDB_MAX_KEY_SIZE; } -bool ha_ndbcluster::low_byte_first() const -{ +bool ha_ndbcluster::low_byte_first() const { #ifdef WORDS_BIGENDIAN return false; #else @@ -14660,7 +12633,6 @@ bool ha_ndbcluster::low_byte_first() const #endif } - struct ndb_table_statistics_row { Uint64 rows; Uint64 commits; @@ -14669,79 +12641,62 @@ struct ndb_table_statistics_row { Uint64 var_mem; }; -int ha_ndbcluster::update_stats(THD *thd, - bool do_read_stat, - uint part_id) -{ +int ha_ndbcluster::update_stats(THD *thd, bool do_read_stat, uint part_id) { struct Ndb_statistics stat; - Thd_ndb *thd_ndb= get_thd_ndb(thd); + Thd_ndb *thd_ndb = get_thd_ndb(thd); DBUG_ENTER("ha_ndbcluster::update_stats"); - do - { - if (m_share && !do_read_stat) - { + do { + if (m_share && !do_read_stat) { mysql_mutex_lock(&m_share->mutex); - stat= m_share->stat; + stat = m_share->stat; mysql_mutex_unlock(&m_share->mutex); - DBUG_ASSERT(stat.row_count != ~(ha_rows)0); // should never be invalid + DBUG_ASSERT(stat.row_count != ~(ha_rows)0); // should never be invalid /* Accept shared cached statistics if row_count is valid. */ - if (stat.row_count != ~(ha_rows)0) - break; + if (stat.row_count != ~(ha_rows)0) break; } /* Request statistics from datanodes */ - Ndb *ndb= thd_ndb->ndb; - if (ndb->setDatabaseName(m_dbname)) - { + Ndb *ndb = thd_ndb->ndb; + if (ndb->setDatabaseName(m_dbname)) { DBUG_RETURN(HA_ERR_OUT_OF_MEM); } - if (int err= ndb_get_table_statistics(thd, - this, - ndb, - m_table, - m_ndb_record, - &stat, - part_id)) - { + if (int err = ndb_get_table_statistics(thd, this, ndb, m_table, + m_ndb_record, &stat, part_id)) { DBUG_RETURN(err); } /* Update shared statistics with fresh data */ - if (m_share) - { + if (m_share) { mysql_mutex_lock(&m_share->mutex); - m_share->stat= stat; + m_share->stat = stat; mysql_mutex_unlock(&m_share->mutex); } break; - } - while(0); + } while (0); - int no_uncommitted_rows_count= 0; - if (m_table_info && !thd_ndb->m_error) - { - m_table_info->records= stat.row_count; - m_table_info->last_count= thd_ndb->count; - no_uncommitted_rows_count= m_table_info->no_uncommitted_rows_count; - } - stats.mean_rec_length= stat.row_size; - stats.data_file_length= stat.fragment_memory; - stats.records= stat.row_count + no_uncommitted_rows_count; - stats.max_data_file_length= stat.fragment_extent_space; - stats.delete_length= stat.fragment_extent_free_space; - - DBUG_PRINT("exit", ("stats.records: %d " - "stat->row_count: %d " - "no_uncommitted_rows_count: %d" - "stat->fragment_extent_space: %u " - "stat->fragment_extent_free_space: %u", - (int)stats.records, - (int)stat.row_count, - (int)no_uncommitted_rows_count, - (uint)stat.fragment_extent_space, - (uint)stat.fragment_extent_free_space)); + int no_uncommitted_rows_count = 0; + if (m_table_info && !thd_ndb->m_error) { + m_table_info->records = stat.row_count; + m_table_info->last_count = thd_ndb->count; + no_uncommitted_rows_count = m_table_info->no_uncommitted_rows_count; + } + stats.mean_rec_length = stat.row_size; + stats.data_file_length = stat.fragment_memory; + stats.records = stat.row_count + no_uncommitted_rows_count; + stats.max_data_file_length = stat.fragment_extent_space; + stats.delete_length = stat.fragment_extent_free_space; + + DBUG_PRINT("exit", + ("stats.records: %d " + "stat->row_count: %d " + "no_uncommitted_rows_count: %d" + "stat->fragment_extent_space: %u " + "stat->fragment_extent_free_space: %u", + (int)stats.records, (int)stat.row_count, + (int)no_uncommitted_rows_count, (uint)stat.fragment_extent_space, + (uint)stat.fragment_extent_free_space)); DBUG_RETURN(0); } @@ -14751,26 +12706,25 @@ int ha_ndbcluster::update_stats(THD *thd, 'local_stat'. Should be called when transaction has succesfully commited its changes. */ -static -void modify_shared_stats(NDB_SHARE *share, - Ndb_local_table_statistics *local_stat) -{ - if (local_stat->no_uncommitted_rows_count) - { +static void modify_shared_stats(NDB_SHARE *share, + Ndb_local_table_statistics *local_stat) { + if (local_stat->no_uncommitted_rows_count) { mysql_mutex_lock(&share->mutex); - DBUG_ASSERT(share->stat.row_count != ~(ha_rows)0);// should never be invalid - if (share->stat.row_count != ~(ha_rows)0) - { + DBUG_ASSERT(share->stat.row_count != + ~(ha_rows)0); // should never be invalid + if (share->stat.row_count != ~(ha_rows)0) { DBUG_PRINT("info", ("Update row_count for %s, row_count: %lu, with:%d", - share->table_name, (ulong) share->stat.row_count, + share->table_name, (ulong)share->stat.row_count, local_stat->no_uncommitted_rows_count)); - share->stat.row_count= - ((Int64)share->stat.row_count+local_stat->no_uncommitted_rows_count > 0) - ? share->stat.row_count+local_stat->no_uncommitted_rows_count - : 0; + share->stat.row_count = + ((Int64)share->stat.row_count + + local_stat->no_uncommitted_rows_count > + 0) + ? share->stat.row_count + local_stat->no_uncommitted_rows_count + : 0; } mysql_mutex_unlock(&share->mutex); - local_stat->no_uncommitted_rows_count= 0; + local_stat->no_uncommitted_rows_count = 0; } } @@ -14779,21 +12733,16 @@ void modify_shared_stats(NDB_SHARE *share, Otherwise, it returns the table-statistics, which is an aggregate over all partitions of that table. */ -static -int -ndb_get_table_statistics(THD *thd, - ha_ndbcluster* file, - Ndb* ndb, - const NdbDictionary::Table* tab, - const NdbRecord *record, - struct Ndb_statistics * ndbstat, - uint part_id) -{ - Thd_ndb *thd_ndb= get_thd_ndb(current_thd); - NdbTransaction* pTrans; +static int ndb_get_table_statistics(THD *thd, ha_ndbcluster *file, Ndb *ndb, + const NdbDictionary::Table *tab, + const NdbRecord *record, + struct Ndb_statistics *ndbstat, + uint part_id) { + Thd_ndb *thd_ndb = get_thd_ndb(current_thd); + NdbTransaction *pTrans; NdbError error; - int retries= 100; - int reterr= 0; + int retries = 100; + int reterr = 0; const char *dummyRowPtr; NdbOperation::GetValueSpec extraGets[7]; Uint64 rows, fixed_mem, var_mem, ext_space, free_ext_space; @@ -14802,123 +12751,109 @@ ndb_get_table_statistics(THD *thd, DBUG_ENTER("ndb_get_table_statistics"); DBUG_ASSERT(record != 0); - + /* We use the passed in NdbRecord just to get access to the table, we mask out any/all columns it may have and add our reads as extraGets. This is necessary as they are all pseudo-columns */ - extraGets[0].column= NdbDictionary::Column::ROW_COUNT; - extraGets[0].appStorage= &rows; - extraGets[1].column= NdbDictionary::Column::ROW_SIZE; - extraGets[1].appStorage= &size; - extraGets[2].column= NdbDictionary::Column::FRAGMENT_FIXED_MEMORY; - extraGets[2].appStorage= &fixed_mem; - extraGets[3].column= NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY; - extraGets[3].appStorage= &var_mem; - extraGets[4].column= NdbDictionary::Column::FRAGMENT_EXTENT_SPACE; - extraGets[4].appStorage= &ext_space; - extraGets[5].column= NdbDictionary::Column::FRAGMENT_FREE_EXTENT_SPACE; - extraGets[5].appStorage= &free_ext_space; - extraGets[6].column= NdbDictionary::Column::FRAGMENT; - extraGets[6].appStorage= &fragid; - - const Uint32 codeWords= 1; - Uint32 codeSpace[ codeWords ]; - NdbInterpretedCode code(NULL, // Table is irrelevant - &codeSpace[0], - codeWords); - if ((code.interpret_exit_last_row() != 0) || - (code.finalise() != 0)) - { - reterr= code.getNdbError().code; + extraGets[0].column = NdbDictionary::Column::ROW_COUNT; + extraGets[0].appStorage = &rows; + extraGets[1].column = NdbDictionary::Column::ROW_SIZE; + extraGets[1].appStorage = &size; + extraGets[2].column = NdbDictionary::Column::FRAGMENT_FIXED_MEMORY; + extraGets[2].appStorage = &fixed_mem; + extraGets[3].column = NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY; + extraGets[3].appStorage = &var_mem; + extraGets[4].column = NdbDictionary::Column::FRAGMENT_EXTENT_SPACE; + extraGets[4].appStorage = &ext_space; + extraGets[5].column = NdbDictionary::Column::FRAGMENT_FREE_EXTENT_SPACE; + extraGets[5].appStorage = &free_ext_space; + extraGets[6].column = NdbDictionary::Column::FRAGMENT; + extraGets[6].appStorage = &fragid; + + const Uint32 codeWords = 1; + Uint32 codeSpace[codeWords]; + NdbInterpretedCode code(NULL, // Table is irrelevant + &codeSpace[0], codeWords); + if ((code.interpret_exit_last_row() != 0) || (code.finalise() != 0)) { + reterr = code.getNdbError().code; DBUG_PRINT("exit", ("failed, reterr: %u, NdbError %u(%s)", reterr, error.code, error.message)); DBUG_RETURN(reterr); } - do - { - Uint32 count= 0; - Uint64 sum_rows= 0; - Uint64 sum_row_size= 0; - Uint64 sum_mem= 0; - Uint64 sum_ext_space= 0; - Uint64 sum_free_ext_space= 0; - NdbScanOperation*pOp; + do { + Uint32 count = 0; + Uint64 sum_rows = 0; + Uint64 sum_row_size = 0; + Uint64 sum_mem = 0; + Uint64 sum_ext_space = 0; + Uint64 sum_free_ext_space = 0; + NdbScanOperation *pOp; int check; /** * TODO WL#9019, pass table to startTransaction to allow fully * replicated table to select data_node_neighbour */ - if ((pTrans= ndb->startTransaction(tab)) == NULL) - { - error= ndb->getNdbError(); + if ((pTrans = ndb->startTransaction(tab)) == NULL) { + error = ndb->getNdbError(); goto retry; } NdbScanOperation::ScanOptions options; - options.optionsPresent= NdbScanOperation::ScanOptions::SO_BATCH | - NdbScanOperation::ScanOptions::SO_GETVALUE | - NdbScanOperation::ScanOptions::SO_INTERPRETED; + options.optionsPresent = NdbScanOperation::ScanOptions::SO_BATCH | + NdbScanOperation::ScanOptions::SO_GETVALUE | + NdbScanOperation::ScanOptions::SO_INTERPRETED; /* Set batch_size=1, as we need only one row per fragment. */ - options.batch= 1; - options.extraGetValues= &extraGets[0]; - options.numExtraGetValues= sizeof(extraGets)/sizeof(extraGets[0]); - options.interpretedCode= &code; - - if ((pOp= pTrans->scanTable(record, NdbOperation::LM_CommittedRead, - empty_mask, - &options, - sizeof(NdbScanOperation::ScanOptions))) == NULL) - { - error= pTrans->getNdbError(); + options.batch = 1; + options.extraGetValues = &extraGets[0]; + options.numExtraGetValues = sizeof(extraGets) / sizeof(extraGets[0]); + options.interpretedCode = &code; + + if ((pOp = pTrans->scanTable( + record, NdbOperation::LM_CommittedRead, empty_mask, &options, + sizeof(NdbScanOperation::ScanOptions))) == NULL) { + error = pTrans->getNdbError(); goto retry; } thd_ndb->m_scan_count++; - thd_ndb->m_pruned_scan_count += (pOp->getPruned()? 1 : 0); - + thd_ndb->m_pruned_scan_count += (pOp->getPruned() ? 1 : 0); + thd_ndb->m_execute_count++; DBUG_PRINT("info", ("execute_count: %u", thd_ndb->m_execute_count)); - if (pTrans->execute(NdbTransaction::NoCommit, - NdbOperation::AbortOnError, - true) == -1) - { - error= pTrans->getNdbError(); + if (pTrans->execute(NdbTransaction::NoCommit, NdbOperation::AbortOnError, + true) == -1) { + error = pTrans->getNdbError(); goto retry; } - - while ((check= pOp->nextResult(&dummyRowPtr, true, true)) == 0) - { - DBUG_PRINT("info", ("nextResult rows: %llu, " - "fixed_mem_size %llu var_mem_size %llu " - "fragmentid %u extent_space %llu free_extent_space %llu", - rows, fixed_mem, var_mem, fragid, - ext_space, free_ext_space)); - if ((part_id != ~(uint)0) && fragid != part_id) - { + while ((check = pOp->nextResult(&dummyRowPtr, true, true)) == 0) { + DBUG_PRINT("info", + ("nextResult rows: %llu, " + "fixed_mem_size %llu var_mem_size %llu " + "fragmentid %u extent_space %llu free_extent_space %llu", + rows, fixed_mem, var_mem, fragid, ext_space, free_ext_space)); + + if ((part_id != ~(uint)0) && fragid != part_id) { continue; } - sum_rows+= rows; - if (sum_row_size < size) - sum_row_size= size; - sum_mem+= fixed_mem + var_mem; + sum_rows += rows; + if (sum_row_size < size) sum_row_size = size; + sum_mem += fixed_mem + var_mem; count++; sum_ext_space += ext_space; sum_free_ext_space += free_ext_space; - if ((part_id != ~(uint)0) && fragid == part_id) - { + if ((part_id != ~(uint)0) && fragid == part_id) { break; } } - - if (check == -1) - { - error= pOp->getNdbError(); + + if (check == -1) { + error = pOp->getNdbError(); goto retry; } @@ -14926,11 +12861,11 @@ ndb_get_table_statistics(THD *thd, ndb->closeTransaction(pTrans); - ndbstat->row_count= sum_rows; - ndbstat->row_size= (ulong)sum_row_size; - ndbstat->fragment_memory= sum_mem; - ndbstat->fragment_extent_space= sum_ext_space; - ndbstat->fragment_extent_free_space= sum_free_ext_space; + ndbstat->row_count = sum_rows; + ndbstat->row_size = (ulong)sum_row_size; + ndbstat->fragment_memory = sum_mem; + ndbstat->fragment_extent_space = sum_ext_space; + ndbstat->fragment_extent_free_space = sum_free_ext_space; DBUG_PRINT("exit", ("records: %llu row_size: %llu " "mem: %llu allocated: %llu free: %llu count: %u", @@ -14938,57 +12873,49 @@ ndb_get_table_statistics(THD *thd, sum_free_ext_space, count)); DBUG_RETURN(0); -retry: - if (file && pTrans) - { - reterr= file->ndb_err(pTrans); - } - else - { - const NdbError& tmp= error; + retry: + if (file && pTrans) { + reterr = file->ndb_err(pTrans); + } else { + const NdbError &tmp = error; ERR_PRINT(tmp); - reterr= ndb_to_mysql_error(&tmp); + reterr = ndb_to_mysql_error(&tmp); } - if (pTrans) - { + if (pTrans) { ndb->closeTransaction(pTrans); - pTrans= NULL; + pTrans = NULL; } - if (error.status == NdbError::TemporaryError && - retries-- && !thd_killed(thd)) - { + if (error.status == NdbError::TemporaryError && retries-- && + !thd_killed(thd)) { ndb_trans_retry_sleep(); continue; } break; - } while(1); - DBUG_PRINT("exit", ("failed, reterr: %u, NdbError %u(%s)", reterr, - error.code, error.message)); + } while (1); + DBUG_PRINT("exit", ("failed, reterr: %u, NdbError %u(%s)", reterr, error.code, + error.message)); DBUG_RETURN(reterr); } - -void ha_ndbcluster::check_read_before_write_removal() -{ +void ha_ndbcluster::check_read_before_write_removal() { DBUG_ENTER("check_read_before_write_removal"); /* Must have determined that rbwr is possible */ assert(m_read_before_write_removal_possible); - m_read_before_write_removal_used= true; + m_read_before_write_removal_used = true; /* Can't use on table with hidden primary key */ assert(table_share->primary_key != MAX_KEY); /* Index must be unique */ DBUG_PRINT("info", ("using index %d", active_index)); - const KEY *key= table->key_info + active_index; + const KEY *key = table->key_info + active_index; ndbcluster::ndbrequire((key->flags & HA_NOSAME)); DBUG_VOID_RETURN; } - /**************************************************************************** * MRR interface implementation ***************************************************************************/ @@ -15005,12 +12932,11 @@ void ha_ndbcluster::check_read_before_write_removal() Code assumes that X < enum_ordered_range is a valid check for range converted to key operation. */ -enum multi_range_types -{ - enum_unique_range, /// Range converted to key operation - enum_empty_unique_range, /// No data found (in key operation) - enum_ordered_range, /// Normal ordered index scan range - enum_skip_range /// Empty range (eg. partition pruning) +enum multi_range_types { + enum_unique_range, /// Range converted to key operation + enum_empty_unique_range, /// No data found (in key operation) + enum_ordered_range, /// Normal ordered index scan range + enum_skip_range /// Empty range (eg. partition pruning) }; /** @@ -15032,42 +12958,31 @@ enum multi_range_types bytes of row data. */ -static inline -ulong multi_range_buffer_size(const HANDLER_BUFFER* buffer) -{ +static inline ulong multi_range_buffer_size(const HANDLER_BUFFER *buffer) { const size_t buf_size = buffer->buffer_end - buffer->buffer; DBUG_ASSERT(buf_size < ULONG_MAX); return (ulong)buf_size; } /* Return the needed size of the fixed array at start of HANDLER_BUFFER. */ -static ulong -multi_range_fixed_size(int num_ranges) -{ - if (num_ranges > MRR_MAX_RANGES) - num_ranges= MRR_MAX_RANGES; +static ulong multi_range_fixed_size(int num_ranges) { + if (num_ranges > MRR_MAX_RANGES) num_ranges = MRR_MAX_RANGES; return num_ranges * sizeof(char *); } /* Return max number of ranges so that fixed part will still fit in buffer. */ -static int -multi_range_max_ranges(int num_ranges, ulong bufsize) -{ - if (num_ranges > MRR_MAX_RANGES) - num_ranges= MRR_MAX_RANGES; +static int multi_range_max_ranges(int num_ranges, ulong bufsize) { + if (num_ranges > MRR_MAX_RANGES) num_ranges = MRR_MAX_RANGES; if (num_ranges * sizeof(char *) > bufsize) - num_ranges= bufsize / sizeof(char *); + num_ranges = bufsize / sizeof(char *); return num_ranges; } /* Return the size in HANDLER_BUFFER of a variable-sized entry. */ -static ulong -multi_range_entry_size(bool use_keyop, ulong reclength) -{ +static ulong multi_range_entry_size(bool use_keyop, ulong reclength) { /* Space for type byte. */ - ulong len= 1; - if (use_keyop) - len+= reclength; + ulong len = 1; + if (use_keyop) len += reclength; return len; } @@ -15078,50 +12993,37 @@ multi_range_entry_size(bool use_keyop, ulong reclength) converted to a hash key operation or needs to be done as an ordered index scan). */ -static ulong -multi_range_max_entry(NDB_INDEX_TYPE keytype, ulong reclength) -{ +static ulong multi_range_max_entry(NDB_INDEX_TYPE keytype, ulong reclength) { return multi_range_entry_size(keytype != ORDERED_INDEX, reclength); } -static uchar & -multi_range_entry_type(uchar *p) -{ - return *p; -} +static uchar &multi_range_entry_type(uchar *p) { return *p; } /* Find the start of the next entry in HANDLER_BUFFER. */ -static uchar * -multi_range_next_entry(uchar *p, ulong reclength) -{ - bool use_keyop= multi_range_entry_type(p) < enum_ordered_range; +static uchar *multi_range_next_entry(uchar *p, ulong reclength) { + bool use_keyop = multi_range_entry_type(p) < enum_ordered_range; return p + multi_range_entry_size(use_keyop, reclength); } /* Get pointer to row data (for range converted to key operation). */ -static uchar * -multi_range_row(uchar *p) -{ +static uchar *multi_range_row(uchar *p) { DBUG_ASSERT(multi_range_entry_type(p) == enum_unique_range); return p + 1; } /* Get and put upper layer custom char *, use memcpy() for unaligned access. */ -static char * -multi_range_get_custom(HANDLER_BUFFER *buffer, int range_no) -{ +static char *multi_range_get_custom(HANDLER_BUFFER *buffer, int range_no) { DBUG_ASSERT(range_no < MRR_MAX_RANGES); - char* res; - memcpy(&res, buffer->buffer + range_no*sizeof(char*), sizeof(char*)); + char *res; + memcpy(&res, buffer->buffer + range_no * sizeof(char *), sizeof(char *)); return res; } -static void -multi_range_put_custom(HANDLER_BUFFER *buffer, int range_no, char *custom) -{ +static void multi_range_put_custom(HANDLER_BUFFER *buffer, int range_no, + char *custom) { DBUG_ASSERT(range_no < MRR_MAX_RANGES); // memcpy() required for unaligned access. - memcpy(buffer->buffer + range_no*sizeof(char*), &custom, sizeof(char*)); + memcpy(buffer->buffer + range_no * sizeof(char *), &custom, sizeof(char *)); } /* @@ -15130,23 +13032,20 @@ multi_range_put_custom(HANDLER_BUFFER *buffer, int range_no, char *custom) If a scan is not needed, we use a faster primary/unique key operation instead. */ -static bool -read_multi_needs_scan(NDB_INDEX_TYPE cur_index_type, const KEY *key_info, - const KEY_MULTI_RANGE *r, bool is_pushed) -{ - if (cur_index_type == ORDERED_INDEX || is_pushed) - return true; - if (cur_index_type == PRIMARY_KEY_INDEX || - cur_index_type == UNIQUE_INDEX) +static bool read_multi_needs_scan(NDB_INDEX_TYPE cur_index_type, + const KEY *key_info, const KEY_MULTI_RANGE *r, + bool is_pushed) { + if (cur_index_type == ORDERED_INDEX || is_pushed) return true; + if (cur_index_type == PRIMARY_KEY_INDEX || cur_index_type == UNIQUE_INDEX) return false; DBUG_ASSERT(cur_index_type == PRIMARY_KEY_ORDERED_INDEX || cur_index_type == UNIQUE_ORDERED_INDEX); if (r->start_key.length != key_info->key_length || r->start_key.flag != HA_READ_KEY_EXACT) - return true; // Not exact match, need scan + return true; // Not exact match, need scan if (cur_index_type == UNIQUE_ORDERED_INDEX && - check_null_in_key(key_info, r->start_key.key,r->start_key.length)) - return true; // Can't use for NULL values + check_null_in_key(key_info, r->start_key.key, r->start_key.length)) + return true; // Can't use for NULL values return false; } @@ -15165,24 +13064,19 @@ read_multi_needs_scan(NDB_INDEX_TYPE cur_index_type, const KEY *key_info, See NOTES for handler::multi_range_read_info_const(). */ -ha_rows -ha_ndbcluster::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq, - void *seq_init_param, - uint n_ranges, uint *bufsz, - uint *flags, Cost_estimate *cost) -{ +ha_rows ha_ndbcluster::multi_range_read_info_const( + uint keyno, RANGE_SEQ_IF *seq, void *seq_init_param, uint n_ranges, + uint *bufsz, uint *flags, Cost_estimate *cost) { ha_rows rows; - uint def_flags= *flags; - uint def_bufsz= *bufsz; + uint def_flags = *flags; + uint def_bufsz = *bufsz; DBUG_ENTER("ha_ndbcluster::multi_range_read_info_const"); /* Get cost/flags/mem_usage of default MRR implementation */ - rows= handler::multi_range_read_info_const(keyno, seq, seq_init_param, - n_ranges, &def_bufsz, - &def_flags, cost); - if (unlikely(rows == HA_POS_ERROR)) - { + rows = handler::multi_range_read_info_const( + keyno, seq, seq_init_param, n_ranges, &def_bufsz, &def_flags, cost); + if (unlikely(rows == HA_POS_ERROR)) { DBUG_RETURN(rows); } @@ -15193,15 +13087,12 @@ ha_ndbcluster::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq, capabilities, cost and mrr* flags of @@optimizer_switch. */ if ((*flags & HA_MRR_USE_DEFAULT_IMPL) || - choose_mrr_impl(keyno, n_ranges, rows, bufsz, flags, cost)) - { + choose_mrr_impl(keyno, n_ranges, rows, bufsz, flags, cost)) { DBUG_PRINT("info", ("Default MRR implementation choosen")); - *flags= def_flags; - *bufsz= def_bufsz; + *flags = def_flags; + *bufsz = def_bufsz; DBUG_ASSERT(*flags & HA_MRR_USE_DEFAULT_IMPL); - } - else - { + } else { /* *flags and *bufsz were set by choose_mrr_impl */ DBUG_PRINT("info", ("NDB-MRR implementation choosen")); DBUG_ASSERT(!(*flags & HA_MRR_USE_DEFAULT_IMPL)); @@ -15209,7 +13100,6 @@ ha_ndbcluster::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq, DBUG_RETURN(rows); } - /* Get cost and other information about MRR scan over some sequence of ranges @@ -15217,39 +13107,32 @@ ha_ndbcluster::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq, See handler::multi_range_read_info. */ -ha_rows -ha_ndbcluster::multi_range_read_info(uint keyno, uint n_ranges, uint n_rows, - uint *bufsz, uint *flags, - Cost_estimate *cost) -{ +ha_rows ha_ndbcluster::multi_range_read_info(uint keyno, uint n_ranges, + uint n_rows, uint *bufsz, + uint *flags, Cost_estimate *cost) { ha_rows res; - uint def_flags= *flags; - uint def_bufsz= *bufsz; + uint def_flags = *flags; + uint def_bufsz = *bufsz; DBUG_ENTER("ha_ndbcluster::multi_range_read_info"); /* Get cost/flags/mem_usage of default MRR implementation */ - res= handler::multi_range_read_info(keyno, n_ranges, n_rows, - &def_bufsz, &def_flags, - cost); - if (unlikely(res == HA_POS_ERROR)) - { + res = handler::multi_range_read_info(keyno, n_ranges, n_rows, &def_bufsz, + &def_flags, cost); + if (unlikely(res == HA_POS_ERROR)) { /* Default implementation can't perform MRR scan => we can't either */ DBUG_RETURN(res); } DBUG_ASSERT(!res); - if ((*flags & HA_MRR_USE_DEFAULT_IMPL) || - choose_mrr_impl(keyno, n_ranges, n_rows, bufsz, flags, cost)) - { + if ((*flags & HA_MRR_USE_DEFAULT_IMPL) || + choose_mrr_impl(keyno, n_ranges, n_rows, bufsz, flags, cost)) { /* Default implementation is choosen */ DBUG_PRINT("info", ("Default MRR implementation choosen")); - *flags= def_flags; - *bufsz= def_bufsz; + *flags = def_flags; + *bufsz = def_bufsz; DBUG_ASSERT(*flags & HA_MRR_USE_DEFAULT_IMPL); - } - else - { + } else { /* *flags and *bufsz were set by choose_mrr_impl */ DBUG_PRINT("info", ("NDB-MRR implementation choosen")); DBUG_ASSERT(!(*flags & HA_MRR_USE_DEFAULT_IMPL)); @@ -15258,16 +13141,18 @@ ha_ndbcluster::multi_range_read_info(uint keyno, uint n_ranges, uint n_rows, } /** - Internals: Choose between Default MRR implementation and + Internals: Choose between Default MRR implementation and native ha_ndbcluster MRR - Make the choice between using Default MRR implementation and ha_ndbcluster-MRR. - This function contains common functionality factored out of multi_range_read_info() - and multi_range_read_info_const(). The function assumes that the default MRR - implementation's applicability requirements are satisfied. + Make the choice between using Default MRR implementation and + ha_ndbcluster-MRR. This function contains common functionality factored out of + multi_range_read_info() and multi_range_read_info_const(). The function + assumes that the default MRR implementation's applicability requirements are + satisfied. @param keyno Index number - @param n_ranges Number of ranges/keys (i.e. intervals) in the range sequence. + @param n_ranges Number of ranges/keys (i.e. intervals) in the range + sequence. @param n_rows E(full rows to be retrieved) @param bufsz OUT If DS-MRR is choosen, buffer use of DS-MRR implementation else the value is not modified @@ -15283,20 +13168,17 @@ ha_ndbcluster::multi_range_read_info(uint keyno, uint n_ranges, uint n_rows, */ bool ha_ndbcluster::choose_mrr_impl(uint keyno, uint n_ranges, ha_rows n_rows, - uint *bufsz, uint *flags, Cost_estimate*) -{ - THD *thd= current_thd; - NDB_INDEX_TYPE key_type= get_index_type(keyno); + uint *bufsz, uint *flags, Cost_estimate *) { + THD *thd = current_thd; + NDB_INDEX_TYPE key_type = get_index_type(keyno); - get_read_set(true, keyno); //read_set needed for uses_blob_value() + get_read_set(true, keyno); // read_set needed for uses_blob_value() /* Disable MRR on blob read and on NULL lookup in unique index. */ if (!thd->optimizer_switch_flag(OPTIMIZER_SWITCH_MRR) || uses_blob_value(table->read_set) || - ( key_type == UNIQUE_INDEX && - has_null_in_unique_index(keyno) && - !(*flags & HA_MRR_NO_NULL_ENDPOINTS))) - { + (key_type == UNIQUE_INDEX && has_null_in_unique_index(keyno) && + !(*flags & HA_MRR_NO_NULL_ENDPOINTS))) { /* Use the default implementation, don't modify args: See comments */ return true; } @@ -15306,28 +13188,24 @@ bool ha_ndbcluster::choose_mrr_impl(uint keyno, uint n_ranges, ha_rows n_rows, * suffient buffer space for NDB-MRR */ { - uint save_bufsize= *bufsz; - ulong reclength= table_share->reclength; - uint entry_size= multi_range_max_entry(key_type, reclength); - uint min_total_size= entry_size + multi_range_fixed_size(1); + uint save_bufsize = *bufsz; + ulong reclength = table_share->reclength; + uint entry_size = multi_range_max_entry(key_type, reclength); + uint min_total_size = entry_size + multi_range_fixed_size(1); DBUG_PRINT("info", ("MRR bufsize suggested=%u want=%u limit=%d", save_bufsize, (uint)(n_rows + 1) * entry_size, (*flags & HA_MRR_LIMITS) != 0)); - if (save_bufsize < min_total_size) - { - if (*flags & HA_MRR_LIMITS) - { + if (save_bufsize < min_total_size) { + if (*flags & HA_MRR_LIMITS) { /* Too small buffer limit for native NDB-MRR. */ return true; } - *bufsz= min_total_size; - } - else - { - uint max_ranges= (n_ranges > 0) ? n_ranges : MRR_MAX_RANGES; - *bufsz= std::min(save_bufsize, - (uint)(n_rows * entry_size + - multi_range_fixed_size(max_ranges))); + *bufsz = min_total_size; + } else { + uint max_ranges = (n_ranges > 0) ? n_ranges : MRR_MAX_RANGES; + *bufsz = std::min( + save_bufsize, + (uint)(n_rows * entry_size + multi_range_fixed_size(max_ranges))); } DBUG_PRINT("info", ("MRR bufsize set to %u", *bufsz)); } @@ -15336,18 +13214,15 @@ bool ha_ndbcluster::choose_mrr_impl(uint keyno, uint n_ranges, ha_rows n_rows, * Cost based MRR optimization is known to be incorrect. * Disabled -> always use NDB-MRR whenever possible */ - *flags&= ~HA_MRR_USE_DEFAULT_IMPL; - *flags|= HA_MRR_SUPPORT_SORTED; + *flags &= ~HA_MRR_USE_DEFAULT_IMPL; + *flags |= HA_MRR_SUPPORT_SORTED; return false; } - -int ha_ndbcluster::multi_range_read_init(RANGE_SEQ_IF *seq_funcs, - void *seq_init_param, - uint n_ranges, uint mode, - HANDLER_BUFFER *buffer) -{ +int ha_ndbcluster::multi_range_read_init(RANGE_SEQ_IF *seq_funcs, + void *seq_init_param, uint n_ranges, + uint mode, HANDLER_BUFFER *buffer) { int error; DBUG_ENTER("ha_ndbcluster::multi_range_read_init"); @@ -15355,43 +13230,39 @@ int ha_ndbcluster::multi_range_read_init(RANGE_SEQ_IF *seq_funcs, If supplied buffer is smaller than needed for just one range, we cannot do multi_range_read. */ - const ulong bufsize= multi_range_buffer_size(buffer); - - if (mode & HA_MRR_USE_DEFAULT_IMPL - || bufsize < multi_range_fixed_size(1) + - multi_range_max_entry(get_index_type(active_index), - table_share->reclength) - || (m_pushed_join_operation==PUSHED_ROOT && - !m_disable_pushed_join && - !m_pushed_join_member->get_query_def().isScanQuery()) - || m_delete_cannot_batch || m_update_cannot_batch) - { - m_disable_multi_read= true; + const ulong bufsize = multi_range_buffer_size(buffer); + + if (mode & HA_MRR_USE_DEFAULT_IMPL || + bufsize < multi_range_fixed_size(1) + + multi_range_max_entry(get_index_type(active_index), + table_share->reclength) || + (m_pushed_join_operation == PUSHED_ROOT && !m_disable_pushed_join && + !m_pushed_join_member->get_query_def().isScanQuery()) || + m_delete_cannot_batch || m_update_cannot_batch) { + m_disable_multi_read = true; DBUG_RETURN(handler::multi_range_read_init(seq_funcs, seq_init_param, n_ranges, mode, buffer)); } /** - * There may still be an open m_multi_cursor from the previous mrr access on this handler. - * Close it now to free up resources for this NdbScanOperation. - */ - if (unlikely((error= close_scan()))) - DBUG_RETURN(error); + * There may still be an open m_multi_cursor from the previous mrr access on + * this handler. Close it now to free up resources for this NdbScanOperation. + */ + if (unlikely((error = close_scan()))) DBUG_RETURN(error); - m_disable_multi_read= false; + m_disable_multi_read = false; - mrr_is_output_sorted= (mode & HA_MRR_SORTED); + mrr_is_output_sorted = (mode & HA_MRR_SORTED); /* Copy arguments into member variables */ - multi_range_buffer= buffer; - mrr_funcs= *seq_funcs; - mrr_iter= mrr_funcs.init(seq_init_param, n_ranges, mode); - ranges_in_seq= n_ranges; - m_range_res= mrr_funcs.next(mrr_iter, &mrr_cur_range); + multi_range_buffer = buffer; + mrr_funcs = *seq_funcs; + mrr_iter = mrr_funcs.init(seq_init_param, n_ranges, mode); + ranges_in_seq = n_ranges; + m_range_res = mrr_funcs.next(mrr_iter, &mrr_cur_range); const bool mrr_need_range_assoc = !(mode & HA_MRR_NO_ASSOCIATION); - if (mrr_need_range_assoc) - { + if (mrr_need_range_assoc) { ha_statistic_increment(&System_status_var::ha_multi_range_read_init_count); } @@ -15409,26 +13280,23 @@ int ha_ndbcluster::multi_range_read_init(RANGE_SEQ_IF *seq_funcs, multi_range_read_init() does not correctly set the error status, so we get an assert on missing result status in net_end_statement(). */ - first_running_range= 0; - first_unstarted_range= 0; + first_running_range = 0; + first_unstarted_range = 0; DBUG_RETURN(0); } - -int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) -{ - KEY* key_info= table->key_info + active_index; - ulong reclength= table_share->reclength; - const NdbOperation* op; - NDB_INDEX_TYPE cur_index_type= get_index_type(active_index); +int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) { + KEY *key_info = table->key_info + active_index; + ulong reclength = table_share->reclength; + const NdbOperation *op; + NDB_INDEX_TYPE cur_index_type = get_index_type(active_index); const NdbOperation *oplist[MRR_MAX_RANGES]; - uint num_keyops= 0; - NdbTransaction *trans= m_thd_ndb->trans; + uint num_keyops = 0; + NdbTransaction *trans = m_thd_ndb->trans; int error; - const bool is_pushed= - check_if_pushable(NdbQueryOperationDef::OrderedIndexScan, - active_index); + const bool is_pushed = + check_if_pushable(NdbQueryOperationDef::OrderedIndexScan, active_index); DBUG_ENTER("multi_range_start_retrievals"); @@ -15443,7 +13311,7 @@ int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) * pk-op 4 pk-op 4 * range 5 * pk-op 6 pk-op 6 - */ + */ /* We loop over all ranges, converting into primary/unique key operations if @@ -15454,11 +13322,11 @@ int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) */ DBUG_ASSERT(cur_index_type != UNDEFINED_INDEX); - DBUG_ASSERT(m_multi_cursor==NULL); - DBUG_ASSERT(m_active_query==NULL); + DBUG_ASSERT(m_multi_cursor == NULL); + DBUG_ASSERT(m_active_query == NULL); const NdbOperation::LockMode lm = get_ndb_lock_mode(m_lock.type); - const uchar *end_of_buffer= multi_range_buffer->buffer_end; + const uchar *end_of_buffer = multi_range_buffer->buffer_end; /* Normally we should have sufficient buffer for the whole fixed_sized part. @@ -15468,38 +13336,33 @@ int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) We already checked (in multi_range_read_init()) that we got enough buffer for at least one range. */ - uint min_entry_size= - multi_range_entry_size(!read_multi_needs_scan(cur_index_type, key_info, - &mrr_cur_range, is_pushed), - reclength); - const ulong bufsize= multi_range_buffer_size(multi_range_buffer); - int max_range= multi_range_max_ranges(ranges_in_seq, - bufsize - min_entry_size); + uint min_entry_size = + multi_range_entry_size(!read_multi_needs_scan(cur_index_type, key_info, + &mrr_cur_range, is_pushed), + reclength); + const ulong bufsize = multi_range_buffer_size(multi_range_buffer); + int max_range = + multi_range_max_ranges(ranges_in_seq, bufsize - min_entry_size); DBUG_ASSERT(max_range > 0); - uchar *row_buf= multi_range_buffer->buffer + multi_range_fixed_size(max_range); - m_multi_range_result_ptr= row_buf; + uchar *row_buf = + multi_range_buffer->buffer + multi_range_fixed_size(max_range); + m_multi_range_result_ptr = row_buf; - int range_no= 0; - int mrr_range_no= starting_range; - bool any_real_read= false; + int range_no = 0; + int mrr_range_no = starting_range; + bool any_real_read = false; - if (m_read_before_write_removal_possible) - check_read_before_write_removal(); + if (m_read_before_write_removal_possible) check_read_before_write_removal(); - for (; - !m_range_res; - range_no++, m_range_res= mrr_funcs.next(mrr_iter, &mrr_cur_range)) - { - if (range_no >= max_range) - break; - bool need_scan= - read_multi_needs_scan(cur_index_type, key_info, &mrr_cur_range, is_pushed); + for (; !m_range_res; + range_no++, m_range_res = mrr_funcs.next(mrr_iter, &mrr_cur_range)) { + if (range_no >= max_range) break; + bool need_scan = read_multi_needs_scan(cur_index_type, key_info, + &mrr_cur_range, is_pushed); if (row_buf + multi_range_entry_size(!need_scan, reclength) > end_of_buffer) break; - if (need_scan) - { - if (range_no > NdbIndexScanOperation::MaxRangeNo) - break; + if (need_scan) { + if (range_no > NdbIndexScanOperation::MaxRangeNo) break; /* Check how much KEYINFO data we already used for index bounds, and split the MRR here if it exceeds a certain limit. This way we avoid @@ -15507,106 +13370,89 @@ int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) The limit used is based on the value MAX_KEY_SIZE_IN_WORDS. */ - if (m_multi_cursor && m_multi_cursor->getCurrentKeySize() >= 1000) - break; + if (m_multi_cursor && m_multi_cursor->getCurrentKeySize() >= 1000) break; } mrr_range_no++; multi_range_put_custom(multi_range_buffer, range_no, mrr_cur_range.ptr); part_id_range part_spec; - if (m_use_partition_pruning) - { + if (m_use_partition_pruning) { get_partition_set(table, table->record[0], active_index, - &mrr_cur_range.start_key, - &part_spec); + &mrr_cur_range.start_key, &part_spec); DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u", part_spec.start_part, part_spec.end_part)); /* If partition pruning has found no partition in set we can skip this scan */ - if (part_spec.start_part > part_spec.end_part) - { + if (part_spec.start_part > part_spec.end_part) { /* We can skip this range since the key won't fit into any partition */ - multi_range_entry_type(row_buf)= enum_skip_range; - row_buf= multi_range_next_entry(row_buf, reclength); + multi_range_entry_type(row_buf) = enum_skip_range; + row_buf = multi_range_next_entry(row_buf, reclength); continue; } - if (!trans && - (part_spec.start_part == part_spec.end_part)) - if (unlikely(!(trans= start_transaction_part_id(part_spec.start_part, - error)))) + if (!trans && (part_spec.start_part == part_spec.end_part)) + if (unlikely(!(trans = start_transaction_part_id(part_spec.start_part, + error)))) DBUG_RETURN(error); } - if (need_scan) - { - if (!trans) - { + if (need_scan) { + if (!trans) { // ToDo see if we can use start_transaction_key here instead - if (!m_use_partition_pruning) - { + if (!m_use_partition_pruning) { get_partition_set(table, table->record[0], active_index, - &mrr_cur_range.start_key, - &part_spec); - if (part_spec.start_part == part_spec.end_part) - { - if (unlikely(!(trans= start_transaction_part_id(part_spec.start_part, - error)))) + &mrr_cur_range.start_key, &part_spec); + if (part_spec.start_part == part_spec.end_part) { + if (unlikely(!(trans = start_transaction_part_id( + part_spec.start_part, error)))) DBUG_RETURN(error); - } - else if (unlikely(!(trans= start_transaction(error)))) + } else if (unlikely(!(trans = start_transaction(error)))) DBUG_RETURN(error); - } - else if (unlikely(!(trans= start_transaction(error)))) + } else if (unlikely(!(trans = start_transaction(error)))) DBUG_RETURN(error); } - any_real_read= true; + any_real_read = true; DBUG_PRINT("info", ("any_real_read= true")); /* Create the scan operation for the first scan range. */ - if (check_if_pushable(NdbQueryOperationDef::OrderedIndexScan, - active_index)) - { + if (check_if_pushable(NdbQueryOperationDef::OrderedIndexScan, + active_index)) { DBUG_ASSERT(!m_read_before_write_removal_used); - if (!m_active_query) - { - const int error= create_pushed_join(); - if (unlikely(error)) - DBUG_RETURN(error); + if (!m_active_query) { + const int error = create_pushed_join(); + if (unlikely(error)) DBUG_RETURN(error); - NdbQuery* const query= m_active_query; + NdbQuery *const query = m_active_query; if (mrr_is_output_sorted && - query->getQueryOperation((uint)PUSHED_ROOT)->setOrdering(NdbQueryOptions::ScanOrdering_ascending)) + query->getQueryOperation((uint)PUSHED_ROOT) + ->setOrdering(NdbQueryOptions::ScanOrdering_ascending)) ERR_RETURN(query->getNdbError()); } - } // check_if_pushable() - else - if (!m_multi_cursor) - { - /* Do a multi-range index scan for ranges not done by primary/unique key. */ + } // check_if_pushable() + else if (!m_multi_cursor) { + /* Do a multi-range index scan for ranges not done by primary/unique + * key. */ NdbScanOperation::ScanOptions options; NdbInterpretedCode code(m_table); - options.optionsPresent= - NdbScanOperation::ScanOptions::SO_SCANFLAGS | - NdbScanOperation::ScanOptions::SO_PARALLEL; + options.optionsPresent = NdbScanOperation::ScanOptions::SO_SCANFLAGS | + NdbScanOperation::ScanOptions::SO_PARALLEL; - options.scan_flags= - NdbScanOperation::SF_ReadRangeNo | - NdbScanOperation::SF_MultiRange; + options.scan_flags = + NdbScanOperation::SF_ReadRangeNo | NdbScanOperation::SF_MultiRange; if (lm == NdbOperation::LM_Read) - options.scan_flags|= NdbScanOperation::SF_KeyInfo; + options.scan_flags |= NdbScanOperation::SF_KeyInfo; if (mrr_is_output_sorted) - options.scan_flags|= NdbScanOperation::SF_OrderByFull; + options.scan_flags |= NdbScanOperation::SF_OrderByFull; - options.parallel= DEFAULT_PARALLELISM; + options.parallel = DEFAULT_PARALLELISM; NdbOperation::GetValueSpec gets[2]; if (table_share->primary_key == MAX_KEY) @@ -15616,183 +13462,156 @@ int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) get_read_set(true, active_index); /* Define scan */ - NdbIndexScanOperation *scanOp= trans->scanIndex - (m_index[active_index].ndb_record_key, - m_ndb_record, - lm, - m_table_map->get_column_mask(table->read_set), - NULL, /* All bounds specified below */ - &options, - sizeof(NdbScanOperation::ScanOptions)); - - if (!scanOp) - ERR_RETURN(trans->getNdbError()); + NdbIndexScanOperation *scanOp = + trans->scanIndex(m_index[active_index].ndb_record_key, m_ndb_record, + lm, m_table_map->get_column_mask(table->read_set), + NULL, /* All bounds specified below */ + &options, sizeof(NdbScanOperation::ScanOptions)); - m_multi_cursor= scanOp; + if (!scanOp) ERR_RETURN(trans->getNdbError()); + + m_multi_cursor = scanOp; /* Can't have blobs in multi range read */ DBUG_ASSERT(!uses_blob_value(table->read_set)); - /* We set m_next_row=0 to m that no row was fetched from the scan yet. */ - m_next_row= 0; + /* We set m_next_row=0 to m that no row was fetched from the scan yet. + */ + m_next_row = 0; } Ndb::PartitionSpec ndbPartitionSpec; - const Ndb::PartitionSpec* ndbPartSpecPtr= NULL; + const Ndb::PartitionSpec *ndbPartSpecPtr = NULL; /* If this table uses user-defined partitioning, use MySQLD provided * partition info as pruning info * Otherwise, scan range pruning is performed automatically by * NDBAPI based on distribution key values. */ - if (m_use_partition_pruning && - m_user_defined_partitioning && - (part_spec.start_part == part_spec.end_part)) - { - DBUG_PRINT("info", ("Range on user-def-partitioned table can be pruned to part %u", - part_spec.start_part)); - ndbPartitionSpec.type= Ndb::PartitionSpec::PS_USER_DEFINED; - ndbPartitionSpec.UserDefined.partitionId= part_spec.start_part; - ndbPartSpecPtr= &ndbPartitionSpec; + if (m_use_partition_pruning && m_user_defined_partitioning && + (part_spec.start_part == part_spec.end_part)) { + DBUG_PRINT( + "info", + ("Range on user-def-partitioned table can be pruned to part %u", + part_spec.start_part)); + ndbPartitionSpec.type = Ndb::PartitionSpec::PS_USER_DEFINED; + ndbPartitionSpec.UserDefined.partitionId = part_spec.start_part; + ndbPartSpecPtr = &ndbPartitionSpec; } /* Include this range in the ordered index scan. */ NdbIndexScanOperation::IndexBound bound; - compute_index_bounds(bound, key_info, - &mrr_cur_range.start_key, &mrr_cur_range.end_key, 0); - bound.range_no= range_no; + compute_index_bounds(bound, key_info, &mrr_cur_range.start_key, + &mrr_cur_range.end_key, 0); + bound.range_no = range_no; - const NdbRecord *key_rec= m_index[active_index].ndb_record_key; - if (m_active_query) - { + const NdbRecord *key_rec = m_index[active_index].ndb_record_key; + if (m_active_query) { DBUG_PRINT("info", ("setBound:%d, for pushed join", bound.range_no)); - if (m_active_query->setBound(key_rec, &bound)) - { + if (m_active_query->setBound(key_rec, &bound)) { ERR_RETURN(trans->getNdbError()); } - } - else - { - if (m_multi_cursor->setBound(m_index[active_index].ndb_record_key, - bound, - ndbPartSpecPtr, // Only for user-def tables - sizeof(Ndb::PartitionSpec))) - { + } else { + if (m_multi_cursor->setBound( + m_index[active_index].ndb_record_key, bound, + ndbPartSpecPtr, // Only for user-def tables + sizeof(Ndb::PartitionSpec))) { ERR_RETURN(trans->getNdbError()); } } - multi_range_entry_type(row_buf)= enum_ordered_range; - row_buf= multi_range_next_entry(row_buf, reclength); - } - else - { - multi_range_entry_type(row_buf)= enum_unique_range; + multi_range_entry_type(row_buf) = enum_ordered_range; + row_buf = multi_range_next_entry(row_buf, reclength); + } else { + multi_range_entry_type(row_buf) = enum_unique_range; - if (!trans) - { + if (!trans) { DBUG_ASSERT(active_index != MAX_KEY); - if (unlikely(!(trans= start_transaction_key(active_index, - mrr_cur_range.start_key.key, - error)))) + if (unlikely(!(trans = start_transaction_key( + active_index, mrr_cur_range.start_key.key, error)))) DBUG_RETURN(error); } - if (m_read_before_write_removal_used) - { + if (m_read_before_write_removal_used) { DBUG_PRINT("info", ("m_read_before_write_removal_used == true")); /* Key will later be returned as result record. * Save it in 'row_buf' from where it will later retrieved. */ key_restore(multi_range_row(row_buf), - pointer_cast(mrr_cur_range.start_key.key), + pointer_cast(mrr_cur_range.start_key.key), key_info, key_info->key_length); - op= NULL; // read_before_write_removal - } - else - { - any_real_read= true; + op = NULL; // read_before_write_removal + } else { + any_real_read = true; DBUG_PRINT("info", ("any_real_read= true")); /* Convert to primary/unique key operation. */ Uint32 partitionId; - Uint32* ppartitionId = NULL; + Uint32 *ppartitionId = NULL; if (m_user_defined_partitioning && (cur_index_type == PRIMARY_KEY_ORDERED_INDEX || - cur_index_type == PRIMARY_KEY_INDEX)) - { - partitionId=part_spec.start_part; - ppartitionId=&partitionId; + cur_index_type == PRIMARY_KEY_INDEX)) { + partitionId = part_spec.start_part; + ppartitionId = &partitionId; } /** * 'Pushable codepath' is incomplete and expected not - * to be produced as make_join_pushed() handle + * to be produced as make_join_pushed() handle * AT_MULTI_UNIQUE_KEY as non-pushable. */ - if (m_pushed_join_operation==PUSHED_ROOT && - !m_disable_pushed_join && - !m_pushed_join_member->get_query_def().isScanQuery()) - { - op= NULL; // Avoid compiler warning + if (m_pushed_join_operation == PUSHED_ROOT && !m_disable_pushed_join && + !m_pushed_join_member->get_query_def().isScanQuery()) { + op = NULL; // Avoid compiler warning DBUG_ASSERT(false); // FIXME: Incomplete code, should not be executed DBUG_ASSERT(lm == NdbOperation::LM_CommittedRead); - const int error = - pk_unique_index_read_key_pushed(active_index, - mrr_cur_range.start_key.key); - if (unlikely(error)) - { + const int error = pk_unique_index_read_key_pushed( + active_index, mrr_cur_range.start_key.key); + if (unlikely(error)) { DBUG_RETURN(error); } - } - else - { - if (m_pushed_join_operation == PUSHED_ROOT) - { - DBUG_PRINT("info", ("Cannot push join due to incomplete implementation.")); + } else { + if (m_pushed_join_operation == PUSHED_ROOT) { + DBUG_PRINT("info", + ("Cannot push join due to incomplete implementation.")); m_thd_ndb->m_pushed_queries_dropped++; } - if (!(op= pk_unique_index_read_key(active_index, - mrr_cur_range.start_key.key, - multi_range_row(row_buf), lm, - ppartitionId))) + if (!(op = pk_unique_index_read_key( + active_index, mrr_cur_range.start_key.key, + multi_range_row(row_buf), lm, ppartitionId))) ERR_RETURN(trans->getNdbError()); } } - oplist[num_keyops++]= op; - row_buf= multi_range_next_entry(row_buf, reclength); + oplist[num_keyops++] = op; + row_buf = multi_range_next_entry(row_buf, reclength); } } - if (m_active_query != NULL && - m_pushed_join_member->get_query_def().isScanQuery()) - { + if (m_active_query != NULL && + m_pushed_join_member->get_query_def().isScanQuery()) { m_thd_ndb->m_scan_count++; - if (mrr_is_output_sorted) - { + if (mrr_is_output_sorted) { m_thd_ndb->m_sorted_scan_count++; } - bool prunable= false; + bool prunable = false; if (unlikely(m_active_query->isPrunable(prunable) != 0)) ERR_RETURN(m_active_query->getNdbError()); - if (prunable) - m_thd_ndb->m_pruned_scan_count++; + if (prunable) m_thd_ndb->m_pruned_scan_count++; - DBUG_PRINT("info", ("Is MRR scan-query pruned to 1 partition? :%u", prunable)); + DBUG_PRINT("info", + ("Is MRR scan-query pruned to 1 partition? :%u", prunable)); DBUG_ASSERT(!m_multi_cursor); } - if (m_multi_cursor) - { + if (m_multi_cursor) { DBUG_PRINT("info", ("Is MRR scan pruned to 1 partition? :%u", m_multi_cursor->getPruned())); m_thd_ndb->m_scan_count++; - m_thd_ndb->m_pruned_scan_count += (m_multi_cursor->getPruned()? 1 : 0); - if (mrr_is_output_sorted) - { + m_thd_ndb->m_pruned_scan_count += (m_multi_cursor->getPruned() ? 1 : 0); + if (mrr_is_output_sorted) { m_thd_ndb->m_sorted_scan_count++; } } @@ -15800,8 +13619,7 @@ int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) if (any_real_read && execute_no_commit_ie(m_thd_ndb, trans)) ERR_RETURN(trans->getNdbError()); - if (!m_range_res) - { + if (!m_range_res) { DBUG_PRINT("info", ("Split MRR read, %d-%d of %d bufsize=%lu used=%lu range_no=%d", starting_range, mrr_range_no - 1, ranges_in_seq, @@ -15814,14 +13632,13 @@ int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) This as we don't want mysqld to reuse the buffer when we read the remaining ranges. */ - multi_range_buffer->end_of_used_area= multi_range_buffer->buffer_end; - } - else - multi_range_buffer->end_of_used_area= row_buf; + multi_range_buffer->end_of_used_area = multi_range_buffer->buffer_end; + } else + multi_range_buffer->end_of_used_area = row_buf; - first_running_range= first_range_in_batch= starting_range; - first_unstarted_range= mrr_range_no; - m_current_range_no= 0; + first_running_range = first_range_in_batch = starting_range; + first_unstarted_range = mrr_range_no; + m_current_range_no = 0; /* Now we need to inspect all ranges that were converted to key operations. @@ -15831,26 +13648,21 @@ int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) actually get to it in multi_range_next_entry() (we may have done further execute()'s in a different handler object during joins eg.) */ - row_buf= m_multi_range_result_ptr; - uint op_idx= 0; - for (uint r= first_range_in_batch; r < first_unstarted_range; r++) - { - uchar &type_loc= multi_range_entry_type(row_buf); - row_buf= multi_range_next_entry(row_buf, reclength); - if (type_loc >= enum_ordered_range) - continue; + row_buf = m_multi_range_result_ptr; + uint op_idx = 0; + for (uint r = first_range_in_batch; r < first_unstarted_range; r++) { + uchar &type_loc = multi_range_entry_type(row_buf); + row_buf = multi_range_next_entry(row_buf, reclength); + if (type_loc >= enum_ordered_range) continue; DBUG_ASSERT(op_idx < MRR_MAX_RANGES); - if ((op= oplist[op_idx++]) == NULL) - continue; // read_before_write_removal + if ((op = oplist[op_idx++]) == NULL) continue; // read_before_write_removal - const NdbError &error= op->getNdbError(); - if (error.code != 0) - { + const NdbError &error = op->getNdbError(); + if (error.code != 0) { if (error.classification == NdbError::NoDataFound) - type_loc= enum_empty_unique_range; - else - { + type_loc = enum_empty_unique_range; + else { /* This shouldn't really happen. @@ -15861,7 +13673,7 @@ int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) (But we can still safely return an error code in non-debug builds). */ DBUG_ASSERT(false); - ERR_RETURN(error); /* purecov: deadcode */ + ERR_RETURN(error); /* purecov: deadcode */ } } } @@ -15869,25 +13681,20 @@ int ha_ndbcluster::multi_range_start_retrievals(uint starting_range) DBUG_RETURN(0); } -int ha_ndbcluster::multi_range_read_next(char **range_info) -{ +int ha_ndbcluster::multi_range_read_next(char **range_info) { DBUG_ENTER("ha_ndbcluster::multi_range_read_next"); - if (m_disable_multi_read) - { + if (m_disable_multi_read) { DBUG_RETURN(handler::multi_range_read_next(range_info)); } - for(;;) - { + for (;;) { /* for each range (we should have remembered the number) */ - while (first_running_range < first_unstarted_range) - { - uchar *row_buf= m_multi_range_result_ptr; - int expected_range_no= first_running_range - first_range_in_batch; + while (first_running_range < first_unstarted_range) { + uchar *row_buf = m_multi_range_result_ptr; + int expected_range_no = first_running_range - first_range_in_batch; - switch (multi_range_entry_type(row_buf)) - { + switch (multi_range_entry_type(row_buf)) { case enum_skip_range: case enum_empty_unique_range: /* Nothing in this range; continue with next. */ @@ -15899,60 +13706,53 @@ int ha_ndbcluster::multi_range_read_next(char **range_info) range. */ first_running_range++; - m_multi_range_result_ptr= - multi_range_next_entry(m_multi_range_result_ptr, - table_share->reclength); + m_multi_range_result_ptr = multi_range_next_entry( + m_multi_range_result_ptr, table_share->reclength); /* Clear m_active_cursor; it is used as a flag in update_row() / delete_row() to know whether the current tuple is from a scan or pk operation. */ - m_active_cursor= NULL; + m_active_cursor = NULL; /* Return the record. */ - *range_info= multi_range_get_custom(multi_range_buffer, - expected_range_no); + *range_info = + multi_range_get_custom(multi_range_buffer, expected_range_no); memcpy(table->record[0], multi_range_row(row_buf), table_share->stored_rec_length); - if (unlikely(!m_cond.check_condition())) - { - continue; // 'False', move to next range + if (unlikely(!m_cond.check_condition())) { + continue; // 'False', move to next range } - if(table->has_gcol()) - { + if (table->has_gcol()) { update_generated_read_fields(table->record[0], table); } - DBUG_ASSERT(pushed_cond == nullptr || const_cast(pushed_cond)->val_int()); + DBUG_ASSERT(pushed_cond == nullptr || + const_cast(pushed_cond)->val_int()); DBUG_RETURN(0); case enum_ordered_range: /* An index scan range. */ { int res; - if ((res= read_multi_range_fetch_next()) != 0) - { - *range_info= multi_range_get_custom(multi_range_buffer, - expected_range_no); + if ((res = read_multi_range_fetch_next()) != 0) { + *range_info = + multi_range_get_custom(multi_range_buffer, expected_range_no); first_running_range++; - m_multi_range_result_ptr= - multi_range_next_entry(m_multi_range_result_ptr, - table_share->reclength); + m_multi_range_result_ptr = multi_range_next_entry( + m_multi_range_result_ptr, table_share->reclength); DBUG_RETURN(res); } } - if (!m_next_row) - { + if (!m_next_row) { /* The whole scan is done, and the cursor has been closed. So nothing more for this range. Move to next. */ break; - } - else - { - int current_range_no= m_current_range_no; + } else { + int current_range_no = m_current_range_no; /* For a sorted index scan, we will receive rows in increasing range_no order, so we can return ranges in order, pausing when @@ -15963,22 +13763,20 @@ int ha_ndbcluster::multi_range_read_next(char **range_info) fragment followed by a low range_no from another fragment. So we need to process all index scan ranges together. */ - if (!mrr_is_output_sorted || expected_range_no == current_range_no) - { - *range_info= multi_range_get_custom(multi_range_buffer, - current_range_no); + if (!mrr_is_output_sorted || + expected_range_no == current_range_no) { + *range_info = + multi_range_get_custom(multi_range_buffer, current_range_no); /* Copy out data from the new row. */ - const int ignore = - unpack_record_and_set_generated_fields(table->record[0], - m_next_row); + const int ignore = unpack_record_and_set_generated_fields( + table->record[0], m_next_row); /* Mark that we have used this row, so we need to fetch a new one on the next call. */ - m_next_row= 0; + m_next_row = 0; - if (unlikely(ignore)) - { + if (unlikely(ignore)) { /* Not a valid row, continue with next row */ break; } @@ -15987,14 +13785,14 @@ int ha_ndbcluster::multi_range_read_next(char **range_info) delete_row() to know whether the current tuple is from a scan or pk operation. */ - m_active_cursor= m_multi_cursor; + m_active_cursor = m_multi_cursor; - DBUG_ASSERT(pushed_cond==nullptr || const_cast(pushed_cond)->val_int()); + DBUG_ASSERT(pushed_cond == nullptr || + const_cast(pushed_cond)->val_int()); DBUG_RETURN(0); } - if (current_range_no > expected_range_no) - { + if (current_range_no > expected_range_no) { /* Nothing more in scan for this range. Move to next. */ break; } @@ -16004,7 +13802,7 @@ int ha_ndbcluster::multi_range_read_next(char **range_info) the order we requested them. */ DBUG_ASSERT(0); - break; // Attempt to carry on + break; // Attempt to carry on } default: @@ -16012,24 +13810,23 @@ int ha_ndbcluster::multi_range_read_next(char **range_info) } /* At this point the current range is done, proceed to next. */ first_running_range++; - m_multi_range_result_ptr= - multi_range_next_entry(m_multi_range_result_ptr, table_share->reclength); + m_multi_range_result_ptr = multi_range_next_entry( + m_multi_range_result_ptr, table_share->reclength); } - if (m_range_res) // mrr_funcs.next() has consumed all ranges. + if (m_range_res) // mrr_funcs.next() has consumed all ranges. DBUG_RETURN(HA_ERR_END_OF_FILE); /* Read remaining ranges */ int res; - if ((res= multi_range_start_retrievals(first_running_range))) + if ((res = multi_range_start_retrievals(first_running_range))) DBUG_RETURN(res); - } // for(;;) + } // for(;;) } - /* Fetch next row from the ordered index cursor in multi range scan. @@ -16038,58 +13835,44 @@ int ha_ndbcluster::multi_range_read_next(char **range_info) to correctly interleave rows from primary/unique key operations with rows from the scan. */ -int -ha_ndbcluster::read_multi_range_fetch_next() -{ +int ha_ndbcluster::read_multi_range_fetch_next() { DBUG_ENTER("read_multi_range_fetch_next"); - if (m_active_query) - { - DBUG_PRINT("info", ("read_multi_range_fetch_next from pushed join, m_next_row:%p", m_next_row)); - if (!m_next_row) - { - int res= fetch_next_pushed(); - if (res == NdbQuery::NextResult_gotRow) - { - m_current_range_no= 0; -// m_current_range_no= cursor->get_range_no(); // FIXME SPJ, need rangeNo from index scan - } - else if (res == NdbQuery::NextResult_scanComplete) - { + if (m_active_query) { + DBUG_PRINT("info", + ("read_multi_range_fetch_next from pushed join, m_next_row:%p", + m_next_row)); + if (!m_next_row) { + int res = fetch_next_pushed(); + if (res == NdbQuery::NextResult_gotRow) { + m_current_range_no = 0; + // m_current_range_no= cursor->get_range_no(); // FIXME SPJ, need + // rangeNo from index scan + } else if (res == NdbQuery::NextResult_scanComplete) { /* We have fetched the last row from the scan. */ m_active_query->close(false); - m_active_query= NULL; - m_next_row= 0; + m_active_query = NULL; + m_next_row = 0; DBUG_RETURN(0); - } - else - { + } else { /* An error. */ DBUG_RETURN(res); } } - } - else if (m_multi_cursor) - { - if (!m_next_row) - { - NdbIndexScanOperation *cursor= m_multi_cursor; - int res= fetch_next(cursor); - if (res == 0) - { - m_current_range_no= cursor->get_range_no(); - } - else if (res == 1) - { + } else if (m_multi_cursor) { + if (!m_next_row) { + NdbIndexScanOperation *cursor = m_multi_cursor; + int res = fetch_next(cursor); + if (res == 0) { + m_current_range_no = cursor->get_range_no(); + } else if (res == 1) { /* We have fetched the last row from the scan. */ cursor->close(false, true); - m_active_cursor= 0; - m_multi_cursor= 0; - m_next_row= 0; + m_active_cursor = 0; + m_multi_cursor = 0; + m_next_row = 0; DBUG_RETURN(0); - } - else - { + } else { /* An error. */ DBUG_RETURN(res); } @@ -16098,7 +13881,6 @@ ha_ndbcluster::read_multi_range_fetch_next() DBUG_RETURN(0); } - /** * Try to find pushable subsets of a join plan. * @param hton unused (maybe useful for other engines). @@ -16108,43 +13890,37 @@ ha_ndbcluster::read_multi_range_fetch_next() */ static int ndbcluster_make_pushed_join(handlerton *, THD *thd, - const AQP::Join_plan *plan) -{ + const AQP::Join_plan *plan) { DBUG_ENTER("ndbcluster_make_pushed_join"); if (THDVAR(thd, join_pushdown) && // Check for online upgrade/downgrade. - ndbd_join_pushdown(g_ndb_cluster_connection->get_min_db_version())) - { + ndbd_join_pushdown(g_ndb_cluster_connection->get_min_db_version())) { ndb_pushed_builder_ctx pushed_builder(*plan); - for (uint i= 0; i < plan->get_access_count()-1; i++) - { - const AQP::Table_access* const join_root= plan->get_table_access(i); - const ndb_pushed_join* pushed_join= NULL; + for (uint i = 0; i < plan->get_access_count() - 1; i++) { + const AQP::Table_access *const join_root = plan->get_table_access(i); + const ndb_pushed_join *pushed_join = NULL; // Try to build a ndb_pushed_join starting from 'join_root' - int error= pushed_builder.make_pushed_join(join_root, pushed_join); - if (unlikely(error)) - { + int error = pushed_builder.make_pushed_join(join_root, pushed_join); + if (unlikely(error)) { if (error < 0) // getNdbError() gives us the error code { - ERR_SET(pushed_builder.getNdbError(),error); + ERR_SET(pushed_builder.getNdbError(), error); } join_root->get_table()->file->print_error(error, MYF(0)); DBUG_RETURN(error); } - // Assign any produced pushed_join definitions to + // Assign any produced pushed_join definitions to // the ha_ndbcluster instance representing its root. - if (pushed_join != NULL) - { - ha_ndbcluster* const handler= - static_cast(join_root->get_table()->file); + if (pushed_join != NULL) { + ha_ndbcluster *const handler = + static_cast(join_root->get_table()->file); - error= handler->assign_pushed_join(pushed_join); - if (unlikely(error)) - { + error = handler->assign_pushed_join(pushed_join); + if (unlikely(error)) { delete pushed_join; handler->print_error(error, MYF(0)); DBUG_RETURN(error); @@ -16155,35 +13931,30 @@ static int ndbcluster_make_pushed_join(handlerton *, THD *thd, DBUG_RETURN(0); } - /** * In case a pushed join having the table for this handler as its root * has been produced. ::assign_pushed_join() is responsible for setting - * up this ha_ndbcluster instance such that the prepared NdbQuery + * up this ha_ndbcluster instance such that the prepared NdbQuery * might be instantiated at execution time. */ -int -ha_ndbcluster::assign_pushed_join(const ndb_pushed_join* pushed_join) -{ +int ha_ndbcluster::assign_pushed_join(const ndb_pushed_join *pushed_join) { DBUG_ENTER("assign_pushed_join"); m_thd_ndb->m_pushed_queries_defined++; - for (uint i = 0; i < pushed_join->get_operation_count(); i++) - { - const TABLE* const tab= pushed_join->get_table(i); + for (uint i = 0; i < pushed_join->get_operation_count(); i++) { + const TABLE *const tab = pushed_join->get_table(i); DBUG_ASSERT(tab->file->ht == ht); - ha_ndbcluster* child= static_cast(tab->file); - child->m_pushed_join_member= pushed_join; - child->m_pushed_join_operation= i; + ha_ndbcluster *child = static_cast(tab->file); + child->m_pushed_join_member = pushed_join; + child->m_pushed_join_operation = i; } - DBUG_PRINT("info", ("Assigned pushed join with %d child operations", - pushed_join->get_operation_count()-1)); + DBUG_PRINT("info", ("Assigned pushed join with %d child operations", + pushed_join->get_operation_count() - 1)); DBUG_RETURN(0); } - /** * First level of filtering tables which *maybe* may be part of * a pushed query: Returning 'false' will eliminate this table @@ -16191,36 +13962,31 @@ ha_ndbcluster::assign_pushed_join(const ndb_pushed_join* pushed_join) * A 'reason' for rejecting this table is required if 'false' * is returned. */ -bool -ha_ndbcluster::maybe_pushable_join(const char*& reason) const -{ - reason= NULL; - if (uses_blob_value(table->read_set)) - { - reason= "select list can't contain BLOB columns"; +bool ha_ndbcluster::maybe_pushable_join(const char *&reason) const { + reason = NULL; + if (uses_blob_value(table->read_set)) { + reason = "select list can't contain BLOB columns"; return false; } - if (m_user_defined_partitioning) - { - reason= "has user defined partioning"; + if (m_user_defined_partitioning) { + reason = "has user defined partioning"; return false; } // Pushed operations may not set locks. - const NdbOperation::LockMode lockMode= get_ndb_lock_mode(m_lock.type); - switch (lockMode) - { - case NdbOperation::LM_CommittedRead: - return true; + const NdbOperation::LockMode lockMode = get_ndb_lock_mode(m_lock.type); + switch (lockMode) { + case NdbOperation::LM_CommittedRead: + return true; - case NdbOperation::LM_Read: - case NdbOperation::LM_Exclusive: - reason= "lock modes other than 'read committed' not implemented"; - return false; - - default: // Other lock modes not used by handler. - assert(false); - return false; + case NdbOperation::LM_Read: + case NdbOperation::LM_Exclusive: + reason = "lock modes other than 'read committed' not implemented"; + return false; + + default: // Other lock modes not used by handler. + assert(false); + return false; } return true; @@ -16229,55 +13995,47 @@ ha_ndbcluster::maybe_pushable_join(const char*& reason) const /** * Check if this table access operation (and a number of succeding operation) * can be pushed to the cluster and executed there. This requires that there - * is an NdbQueryDefiniton and that it still matches the corresponds to the - * type of operation that we intend to execute. (The MySQL server will + * is an NdbQueryDefiniton and that it still matches the corresponds to the + * type of operation that we intend to execute. (The MySQL server will * sometimes change its mind and replace a scan with a lookup or vice versa * as it works its way into the nested loop join.) * * @param type This is the operation type that the server want to execute. * @param idx Index used whenever relevant for operation type - * @param needSorted True if the root operation is an ordered index scan + * @param needSorted True if the root operation is an ordered index scan * with sorted results. * @return True if the operation may be pushed. */ -bool -ha_ndbcluster::check_if_pushable(int type, //NdbQueryOperationDef::Type, - uint idx) const -{ - if (m_disable_pushed_join) - { +bool ha_ndbcluster::check_if_pushable(int type, // NdbQueryOperationDef::Type, + uint idx) const { + if (m_disable_pushed_join) { DBUG_PRINT("info", ("Push disabled (HA_EXTRA_KEYREAD)")); return false; } - return m_pushed_join_operation == PUSHED_ROOT - && m_pushed_join_member != NULL - && m_pushed_join_member->match_definition( - type, - (idxmatch_definition( + type, (idx < MAX_KEY) ? &m_index[idx] : NULL); } - -int -ha_ndbcluster::create_pushed_join(const NdbQueryParamValue* keyFieldParams, uint paramCnt) -{ +int ha_ndbcluster::create_pushed_join(const NdbQueryParamValue *keyFieldParams, + uint paramCnt) { DBUG_ENTER("create_pushed_join"); DBUG_ASSERT(m_pushed_join_member && m_pushed_join_operation == PUSHED_ROOT); - NdbQuery* const query= - m_pushed_join_member->make_query_instance(m_thd_ndb->trans, keyFieldParams, paramCnt); + NdbQuery *const query = m_pushed_join_member->make_query_instance( + m_thd_ndb->trans, keyFieldParams, paramCnt); - if (unlikely(query==NULL)) - ERR_RETURN(m_thd_ndb->trans->getNdbError()); + if (unlikely(query == NULL)) ERR_RETURN(m_thd_ndb->trans->getNdbError()); // Bind to instantiated NdbQueryOperations. - for (uint i= 0; i < m_pushed_join_member->get_operation_count(); i++) - { - const TABLE* const tab= m_pushed_join_member->get_table(i); - ha_ndbcluster* handler= static_cast(tab->file); + for (uint i = 0; i < m_pushed_join_member->get_operation_count(); i++) { + const TABLE *const tab = m_pushed_join_member->get_table(i); + ha_ndbcluster *handler = static_cast(tab->file); - DBUG_ASSERT(handler->m_pushed_join_operation==(int)i); - NdbQueryOperation* const op= query->getQueryOperation(i); - handler->m_pushed_operation= op; + DBUG_ASSERT(handler->m_pushed_join_operation == (int)i); + NdbQueryOperation *const op = query->getQueryOperation(i); + handler->m_pushed_operation = op; handler->get_read_set(false, handler->active_index); /** @@ -16307,7 +14065,8 @@ ha_ndbcluster::create_pushed_join(const NdbQueryParamValue* keyFieldParams, uint handler->generate_scan_filter(&code, NULL); const uint codeSize = code.getWordsUsed(); if (code.getNdbError().code == 0) { - const NdbQueryOperationDef::Type type = op->getQueryOperationDef().getType(); + const NdbQueryOperationDef::Type type = + op->getQueryOperationDef().getType(); const bool isLookup = (type == NdbQueryOperationDef::PrimaryKeyAccess || type == NdbQueryOperationDef::UniqueIndexAccess); if (isLookup && codeSize >= 64) { @@ -16321,78 +14080,64 @@ ha_ndbcluster::create_pushed_join(const NdbQueryParamValue* keyFieldParams, uint } // Bind to result buffers - int res= op->setResultRowRef( - handler->m_ndb_record, - handler->_m_next_row, - handler->m_table_map->get_column_mask(tab->read_set)); - if (unlikely(res)) - ERR_RETURN(query->getNdbError()); - + int res = op->setResultRowRef( + handler->m_ndb_record, handler->_m_next_row, + handler->m_table_map->get_column_mask(tab->read_set)); + if (unlikely(res)) ERR_RETURN(query->getNdbError()); + // We clear 'm_next_row' to say that no row was fetched from the query yet. - handler->_m_next_row= 0; + handler->_m_next_row = 0; } - DBUG_ASSERT(m_active_query==NULL); - m_active_query= query; + DBUG_ASSERT(m_active_query == NULL); + m_active_query = query; m_thd_ndb->m_pushed_queries_executed++; DBUG_RETURN(0); } - /** * Check if this table access operation is part of a pushed join operation * which is actively executing. */ -bool -ha_ndbcluster::check_is_pushed() const -{ - if (m_pushed_join_member == NULL) - return false; +bool ha_ndbcluster::check_is_pushed() const { + if (m_pushed_join_member == NULL) return false; - handler *root= m_pushed_join_member->get_table(PUSHED_ROOT)->file; - return (static_cast(root)->m_active_query); + handler *root = m_pushed_join_member->get_table(PUSHED_ROOT)->file; + return (static_cast(root)->m_active_query); } -uint -ha_ndbcluster::number_of_pushed_joins() const -{ +uint ha_ndbcluster::number_of_pushed_joins() const { if (m_pushed_join_member == NULL) return 0; else return m_pushed_join_member->get_operation_count(); } -const TABLE* -ha_ndbcluster::member_of_pushed_join() const -{ +const TABLE *ha_ndbcluster::member_of_pushed_join() const { if (m_pushed_join_member == NULL) return NULL; else return m_pushed_join_member->get_table(PUSHED_ROOT); } -const TABLE* -ha_ndbcluster::parent_of_pushed_join() const -{ - if (m_pushed_join_operation > PUSHED_ROOT) - { - DBUG_ASSERT(m_pushed_join_member!=NULL); - uint parent_ix= m_pushed_join_member - ->get_query_def().getQueryOperation(m_pushed_join_operation) - ->getParentOperation(0) - ->getOpNo(); +const TABLE *ha_ndbcluster::parent_of_pushed_join() const { + if (m_pushed_join_operation > PUSHED_ROOT) { + DBUG_ASSERT(m_pushed_join_member != NULL); + uint parent_ix = m_pushed_join_member->get_query_def() + .getQueryOperation(m_pushed_join_operation) + ->getParentOperation(0) + ->getOpNo(); return m_pushed_join_member->get_table(parent_ix); } return NULL; } - /* Condition pushdown */ /** - Push a condition to ndbcluster storage engine for evaluation + Push a condition to ndbcluster storage engine for evaluation during table and index scans. The conditions will be cleared by calling handler::extra(HA_EXTRA_RESET) or handler::reset(). @@ -16407,7 +14152,7 @@ ha_ndbcluster::parent_of_pushed_join() const handler::pushed_cond will be assigned the (part of) the condition which we accepted to be pushed down. - + @param cond Condition to be pushed down. @param other_tbls_ok Are other tables allowed to be referred from the condition terms pushed down. @@ -16416,81 +14161,68 @@ ha_ndbcluster::parent_of_pushed_join() const sum of boolean terms which could not be pushed. A nullptr is returned if entire condition was supported. */ -const Item* -ha_ndbcluster::cond_push(const Item *cond, bool other_tbls_ok) -{ +const Item *ha_ndbcluster::cond_push(const Item *cond, bool other_tbls_ok) { DBUG_ENTER("ha_ndbcluster::cond_push"); DBUG_ASSERT(pushed_cond == nullptr); DBUG_ASSERT(cond != nullptr); - DBUG_EXECUTE("where",print_where(ha_thd(), cond, m_tabname, QT_ORDINARY);); + DBUG_EXECUTE("where", print_where(ha_thd(), cond, m_tabname, QT_ORDINARY);); if (m_pushed_join_member != nullptr && - m_pushed_join_operation > PUSHED_ROOT) - { + m_pushed_join_operation > PUSHED_ROOT) { // This is a 'child' in a pushed join operation. Field values from // other tables are not known yet when we generate the scan filters. other_tbls_ok = false; } Item *pushed; - const Item* rem = m_cond.cond_push(cond, table, down_cast(m_table), other_tbls_ok, pushed); + const Item *rem = m_cond.cond_push( + cond, table, down_cast(m_table), other_tbls_ok, pushed); pushed_cond = pushed; DBUG_RETURN(rem); } - /* Implements the SHOW ENGINE NDB STATUS command. */ -bool -ndbcluster_show_status(handlerton*, THD* thd, stat_print_fn *stat_print, - enum ha_stat_type stat_type) -{ +bool ndbcluster_show_status(handlerton *, THD *thd, stat_print_fn *stat_print, + enum ha_stat_type stat_type) { char buf[IO_SIZE]; uint buflen; DBUG_ENTER("ndbcluster_show_status"); - - if (stat_type != HA_ENGINE_STATUS) - { + + if (stat_type != HA_ENGINE_STATUS) { DBUG_RETURN(false); } - Ndb* ndb= check_ndb_in_thd(thd); - Thd_ndb *thd_ndb= get_thd_ndb(thd); + Ndb *ndb = check_ndb_in_thd(thd); + Thd_ndb *thd_ndb = get_thd_ndb(thd); struct st_ndb_status ns; if (ndb) update_status_variables(thd_ndb, &ns, thd_ndb->connection); else update_status_variables(NULL, &ns, g_ndb_cluster_connection); - buflen= (uint) - snprintf(buf, sizeof(buf), - "cluster_node_id=%ld, " - "connected_host=%s, " - "connected_port=%ld, " - "number_of_data_nodes=%ld, " - "number_of_ready_data_nodes=%ld, " - "connect_count=%ld", - ns.cluster_node_id, - ns.connected_host, - ns.connected_port, - ns.number_of_data_nodes, - ns.number_of_ready_data_nodes, - ns.connect_count); + buflen = (uint)snprintf(buf, sizeof(buf), + "cluster_node_id=%ld, " + "connected_host=%s, " + "connected_port=%ld, " + "number_of_data_nodes=%ld, " + "number_of_ready_data_nodes=%ld, " + "connect_count=%ld", + ns.cluster_node_id, ns.connected_host, + ns.connected_port, ns.number_of_data_nodes, + ns.number_of_ready_data_nodes, ns.connect_count); if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length, STRING_WITH_LEN("connection"), buf, buflen)) DBUG_RETURN(true); - if (ndb) - { + if (ndb) { Ndb::Free_list_usage tmp; - tmp.m_name= 0; - while (ndb->get_free_list_usage(&tmp)) - { - buflen= (uint) - snprintf(buf, sizeof(buf), - "created=%u, free=%u, sizeof=%u", - tmp.m_created, tmp.m_free, tmp.m_sizeof); + tmp.m_name = 0; + while (ndb->get_free_list_usage(&tmp)) { + buflen = + (uint)snprintf(buf, sizeof(buf), "created=%u, free=%u, sizeof=%u", + tmp.m_created, tmp.m_free, tmp.m_sizeof); if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length, tmp.m_name, (uint)strlen(tmp.m_name), buf, buflen)) DBUG_RETURN(true); @@ -16498,8 +14230,7 @@ ndbcluster_show_status(handlerton*, THD* thd, stat_print_fn *stat_print, } buflen = (uint)ndbcluster_show_status_binlog(buf, sizeof(buf)); - if (buflen) - { + if (buflen) { if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length, STRING_WITH_LEN("binlog"), buf, buflen)) DBUG_RETURN(true); @@ -16508,64 +14239,53 @@ ndbcluster_show_status(handlerton*, THD* thd, stat_print_fn *stat_print, DBUG_RETURN(false); } +int ha_ndbcluster::get_default_num_partitions(HA_CREATE_INFO *create_info) { + THD *thd = current_thd; -int ha_ndbcluster::get_default_num_partitions(HA_CREATE_INFO *create_info) -{ - THD* thd = current_thd; - - if (check_ndb_connection(thd)) - { + if (check_ndb_connection(thd)) { my_error(HA_ERR_NO_CONNECTION, MYF(0)); return -1; } - Thd_ndb * thd_ndb = get_thd_ndb(thd); + Thd_ndb *thd_ndb = get_thd_ndb(thd); ha_rows max_rows, min_rows; - if (create_info) - { - max_rows= create_info->max_rows; - min_rows= create_info->min_rows; - } - else - { - max_rows= table_share->max_rows; - min_rows= table_share->min_rows; - } - uint no_fragments= get_no_fragments(max_rows >= min_rows ? - max_rows : min_rows); + if (create_info) { + max_rows = create_info->max_rows; + min_rows = create_info->min_rows; + } else { + max_rows = table_share->max_rows; + min_rows = table_share->min_rows; + } + uint no_fragments = + get_no_fragments(max_rows >= min_rows ? max_rows : min_rows); uint reported_frags; - adjusted_frag_count(thd_ndb->ndb, - no_fragments, - reported_frags); + adjusted_frag_count(thd_ndb->ndb, no_fragments, reported_frags); return reported_frags; } -uint32 ha_ndbcluster::calculate_key_hash_value(Field **field_array) -{ +uint32 ha_ndbcluster::calculate_key_hash_value(Field **field_array) { Uint32 hash_value; struct Ndb::Key_part_ptr key_data[MAX_REF_PARTS]; - struct Ndb::Key_part_ptr *key_data_ptr= &key_data[0]; - Uint32 i= 0; + struct Ndb::Key_part_ptr *key_data_ptr = &key_data[0]; + Uint32 i = 0; int ret_val; - Uint64 tmp[(MAX_KEY_SIZE_IN_WORDS*MAX_XFRM_MULTIPLY) >> 1]; - void *buf= (void*)&tmp[0]; + Uint64 tmp[(MAX_KEY_SIZE_IN_WORDS * MAX_XFRM_MULTIPLY) >> 1]; + void *buf = (void *)&tmp[0]; DBUG_ENTER("ha_ndbcluster::calculate_key_hash_value"); - do - { - Field *field= *field_array; - uint len= field->data_length(); + do { + Field *field = *field_array; + uint len = field->data_length(); DBUG_ASSERT(!field->is_real_null()); if (field->real_type() == MYSQL_TYPE_VARCHAR) - len+= ((Field_varstring*)field)->length_bytes; - key_data[i].ptr= field->ptr; - key_data[i++].len= len; + len += ((Field_varstring *)field)->length_bytes; + key_data[i].ptr = field->ptr; + key_data[i++].len = len; } while (*(++field_array)); - key_data[i].ptr= 0; - if ((ret_val= Ndb::computeHash(&hash_value, m_table, - key_data_ptr, buf, sizeof(tmp)))) - { + key_data[i].ptr = 0; + if ((ret_val = Ndb::computeHash(&hash_value, m_table, key_data_ptr, buf, + sizeof(tmp)))) { DBUG_PRINT("info", ("ret_val = %d", ret_val)); DBUG_ASSERT(false); abort(); @@ -16573,14 +14293,13 @@ uint32 ha_ndbcluster::calculate_key_hash_value(Field **field_array) DBUG_RETURN(m_table->getPartitionId(hash_value)); } - /* Set-up auto-partitioning for NDB Cluster SYNOPSIS set_auto_partitions() part_info Partition info struct to set-up - + RETURN VALUE NONE @@ -16592,122 +14311,100 @@ uint32 ha_ndbcluster::calculate_key_hash_value(Field **field_array) */ enum ndb_distribution_enum { - NDB_DISTRIBUTION_KEYHASH= 0, - NDB_DISTRIBUTION_LINHASH= 1 + NDB_DISTRIBUTION_KEYHASH = 0, + NDB_DISTRIBUTION_LINHASH = 1 }; -static const char* distribution_names[]= { "KEYHASH", "LINHASH", NullS }; +static const char *distribution_names[] = {"KEYHASH", "LINHASH", NullS}; static ulong opt_ndb_distribution; -static TYPELIB distribution_typelib= { - array_elements(distribution_names) - 1, - "", - distribution_names, - NULL -}; -static MYSQL_SYSVAR_ENUM( - distribution, /* name */ - opt_ndb_distribution, /* var */ - PLUGIN_VAR_RQCMDARG, - "Default distribution for new tables in ndb", - NULL, /* check func. */ - NULL, /* update func. */ - NDB_DISTRIBUTION_KEYHASH, /* default */ - &distribution_typelib /* typelib */ +static TYPELIB distribution_typelib = {array_elements(distribution_names) - 1, + "", distribution_names, NULL}; +static MYSQL_SYSVAR_ENUM(distribution, /* name */ + opt_ndb_distribution, /* var */ + PLUGIN_VAR_RQCMDARG, + "Default distribution for new tables in ndb", + NULL, /* check func. */ + NULL, /* update func. */ + NDB_DISTRIBUTION_KEYHASH, /* default */ + &distribution_typelib /* typelib */ ); -enum row_type ha_ndbcluster::get_partition_row_type(const dd::Table*, uint) { +enum row_type ha_ndbcluster::get_partition_row_type(const dd::Table *, uint) { return table_share->real_row_type; } -void ha_ndbcluster::set_auto_partitions(partition_info *part_info) -{ +void ha_ndbcluster::set_auto_partitions(partition_info *part_info) { DBUG_ENTER("ha_ndbcluster::set_auto_partitions"); - part_info->list_of_part_fields= true; - part_info->part_type= partition_type::HASH; - switch (opt_ndb_distribution) - { - case NDB_DISTRIBUTION_KEYHASH: - part_info->linear_hash_ind= false; - break; - case NDB_DISTRIBUTION_LINHASH: - part_info->linear_hash_ind= true; - break; - default: - DBUG_ASSERT(false); - break; + part_info->list_of_part_fields = true; + part_info->part_type = partition_type::HASH; + switch (opt_ndb_distribution) { + case NDB_DISTRIBUTION_KEYHASH: + part_info->linear_hash_ind = false; + break; + case NDB_DISTRIBUTION_LINHASH: + part_info->linear_hash_ind = true; + break; + default: + DBUG_ASSERT(false); + break; } DBUG_VOID_RETURN; } - -static int -create_table_set_range_data(const partition_info *part_info, - NdbDictionary::Table& ndbtab) -{ +static int create_table_set_range_data(const partition_info *part_info, + NdbDictionary::Table &ndbtab) { const uint num_parts = part_info->num_parts; DBUG_ENTER("create_table_set_range_data"); - int32 *range_data= (int32*)my_malloc(PSI_INSTRUMENT_ME, num_parts*sizeof(int32), MYF(0)); - if (!range_data) - { - mem_alloc_error(num_parts*sizeof(int32)); + int32 *range_data = + (int32 *)my_malloc(PSI_INSTRUMENT_ME, num_parts * sizeof(int32), MYF(0)); + if (!range_data) { + mem_alloc_error(num_parts * sizeof(int32)); DBUG_RETURN(1); } - for (uint i= 0; i < num_parts; i++) - { - longlong range_val= part_info->range_int_array[i]; - const bool unsigned_flag= part_info->part_expr->unsigned_flag; - if (unsigned_flag) - range_val-= 0x8000000000000000ULL; - if (range_val < INT_MIN32 || range_val >= INT_MAX32) - { - if ((i != num_parts - 1) || - (range_val != LLONG_MAX)) - { + for (uint i = 0; i < num_parts; i++) { + longlong range_val = part_info->range_int_array[i]; + const bool unsigned_flag = part_info->part_expr->unsigned_flag; + if (unsigned_flag) range_val -= 0x8000000000000000ULL; + if (range_val < INT_MIN32 || range_val >= INT_MAX32) { + if ((i != num_parts - 1) || (range_val != LLONG_MAX)) { my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB"); my_free(range_data); DBUG_RETURN(1); } - range_val= INT_MAX32; + range_val = INT_MAX32; } - range_data[i]= (int32)range_val; + range_data[i] = (int32)range_val; } ndbtab.setRangeListData(range_data, num_parts); my_free(range_data); DBUG_RETURN(0); } - -static int -create_table_set_list_data(const partition_info *part_info, - NdbDictionary::Table& ndbtab) -{ +static int create_table_set_list_data(const partition_info *part_info, + NdbDictionary::Table &ndbtab) { const uint num_list_values = part_info->num_list_values; - int32 *list_data= (int32*)my_malloc(PSI_INSTRUMENT_ME, - num_list_values*2*sizeof(int32), MYF(0)); + int32 *list_data = (int32 *)my_malloc( + PSI_INSTRUMENT_ME, num_list_values * 2 * sizeof(int32), MYF(0)); DBUG_ENTER("create_table_set_list_data"); - if (!list_data) - { - mem_alloc_error(num_list_values*2*sizeof(int32)); + if (!list_data) { + mem_alloc_error(num_list_values * 2 * sizeof(int32)); DBUG_RETURN(1); } - for (uint i= 0; i < num_list_values; i++) - { - LIST_PART_ENTRY *list_entry= &part_info->list_array[i]; - longlong list_val= list_entry->list_value; - const bool unsigned_flag= part_info->part_expr->unsigned_flag; - if (unsigned_flag) - list_val-= 0x8000000000000000ULL; - if (list_val < INT_MIN32 || list_val > INT_MAX32) - { + for (uint i = 0; i < num_list_values; i++) { + LIST_PART_ENTRY *list_entry = &part_info->list_array[i]; + longlong list_val = list_entry->list_value; + const bool unsigned_flag = part_info->part_expr->unsigned_flag; + if (unsigned_flag) list_val -= 0x8000000000000000ULL; + if (list_val < INT_MIN32 || list_val > INT_MAX32) { my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB"); my_free(list_data); DBUG_RETURN(1); } - list_data[2*i]= (int32)list_val; - list_data[2*i+1]= list_entry->partition_id; + list_data[2 * i] = (int32)list_val; + list_data[2 * i + 1] = list_entry->partition_id; } - ndbtab.setRangeListData(list_data, 2*num_list_values); + ndbtab.setRangeListData(list_data, 2 * num_list_values); my_free(list_data); DBUG_RETURN(0); } @@ -16724,33 +14421,26 @@ create_table_set_list_data(const partition_info *part_info, implement the function to map to a partition. */ -static int -create_table_set_up_partition_info(partition_info *part_info, - NdbDictionary::Table& ndbtab, - Ndb_table_map & colIdMap) -{ +static int create_table_set_up_partition_info(partition_info *part_info, + NdbDictionary::Table &ndbtab, + Ndb_table_map &colIdMap) { DBUG_ENTER("create_table_set_up_partition_info"); if (part_info->part_type == partition_type::HASH && - part_info->list_of_part_fields == true) - { - Field **fields= part_info->part_field_array; + part_info->list_of_part_fields == true) { + Field **fields = part_info->part_field_array; DBUG_PRINT("info", ("Using HashMapPartition fragmentation type")); ndbtab.setFragmentType(NDBTAB::HashMapPartition); - for (uint i= 0; i < part_info->part_field_list.elements; i++) - { + for (uint i = 0; i < part_info->part_field_list.elements; i++) { DBUG_ASSERT(fields[i]->stored_in_db); - NDBCOL *col= colIdMap.getColumn(ndbtab, fields[i]->field_index); - DBUG_PRINT("info",("setting dist key on %s", col->getName())); + NDBCOL *col = colIdMap.getColumn(ndbtab, fields[i]->field_index); + DBUG_PRINT("info", ("setting dist key on %s", col->getName())); col->setPartitionKey(true); } - } - else - { - if (!current_thd->variables.new_mode) - { + } else { + if (!current_thd->variables.new_mode) { push_warning_printf(current_thd, Sql_condition::SL_WARNING, ER_ILLEGAL_HA_CREATE_OPTION, ER_THD(current_thd, ER_ILLEGAL_HA_CREATE_OPTION), @@ -16759,13 +14449,13 @@ create_table_set_up_partition_info(partition_info *part_info, " use --new option to enable"); DBUG_RETURN(HA_ERR_UNSUPPORTED); } - /* - Create a shadow field for those tables that have user defined - partitioning. This field stores the value of the partition - function such that NDB can handle reorganisations of the data - even when the MySQL Server isn't available to assist with - calculation of the partition function value. - */ + /* + Create a shadow field for those tables that have user defined + partitioning. This field stores the value of the partition + function such that NDB can handle reorganisations of the data + even when the MySQL Server isn't available to assist with + calculation of the partition function value. + */ NDBCOL col; DBUG_PRINT("info", ("Generating partition func value field")); col.setName("$PART_FUNC_VALUE"); @@ -16775,19 +14465,14 @@ create_table_set_up_partition_info(partition_info *part_info, col.setPrimaryKey(false); col.setAutoIncrement(false); ndbtab.addColumn(col); - if (part_info->part_type == partition_type::RANGE) - { + if (part_info->part_type == partition_type::RANGE) { const int error = create_table_set_range_data(part_info, ndbtab); - if (error) - { + if (error) { DBUG_RETURN(error); } - } - else if (part_info->part_type == partition_type::LIST) - { + } else if (part_info->part_type == partition_type::LIST) { const int error = create_table_set_list_data(part_info, ndbtab); - if (error) - { + if (error) { DBUG_RETURN(error); } } @@ -16800,9 +14485,8 @@ create_table_set_up_partition_info(partition_info *part_info, ndbtab.setDefaultNoPartitionsFlag(use_default_num_parts); ndbtab.setLinearFlag(part_info->linear_hash_ind); - if (ndbtab.getFragmentType() == NDBTAB::HashMapPartition && - use_default_num_parts) - { + if (ndbtab.getFragmentType() == NDBTAB::HashMapPartition && + use_default_num_parts) { /** * Skip below for default partitioning, this removes the need to undo * these settings later in ha_ndbcluster::create. @@ -16815,27 +14499,22 @@ create_table_set_up_partition_info(partition_info *part_info, // build array describing which nodegroup should store each // partition(each partition is mapped to one fragment in the table). uint32 frag_data[MAX_PARTITIONS]; - ulong fd_index= 0; + ulong fd_index = 0; partition_element *part_elem; List_iterator part_it(part_info->partitions); - while((part_elem = part_it++)) - { - if (!part_info->is_sub_partitioned()) - { - const Uint32 ng= part_elem->nodegroup_id; + while ((part_elem = part_it++)) { + if (!part_info->is_sub_partitioned()) { + const Uint32 ng = part_elem->nodegroup_id; assert(fd_index < NDB_ARRAY_SIZE(frag_data)); - frag_data[fd_index++]= ng; - } - else - { + frag_data[fd_index++] = ng; + } else { partition_element *subpart_elem; List_iterator sub_it(part_elem->subpartitions); - while((subpart_elem = sub_it++)) - { - const Uint32 ng= subpart_elem->nodegroup_id; + while ((subpart_elem = sub_it++)) { + const Uint32 ng = subpart_elem->nodegroup_id; assert(fd_index < NDB_ARRAY_SIZE(frag_data)); - frag_data[fd_index++]= ng; + frag_data[fd_index++] = ng; } } } @@ -16845,27 +14524,25 @@ create_table_set_up_partition_info(partition_info *part_info, ndbtab.setFragmentCount(fd_index); ndbtab.setFragmentData(frag_data, fd_index); - ndbtab.setPartitionBalance(NdbDictionary::Object::PartitionBalance_Specific); + ndbtab.setPartitionBalance( + NdbDictionary::Object::PartitionBalance_Specific); } DBUG_RETURN(0); } -class NDB_ALTER_DATA : public inplace_alter_handler_ctx -{ -public: - NDB_ALTER_DATA(THD* thd, NdbDictionary::Dictionary *dict, - const NdbDictionary::Table *table) : - dictionary(dict), - old_table(table), - new_table(new NdbDictionary::Table(*table)), - table_id(table->getObjectId()), - old_table_version(table->getObjectVersion()), - schema_dist_client(thd) - {} - ~NDB_ALTER_DATA() - { delete new_table; } +class NDB_ALTER_DATA : public inplace_alter_handler_ctx { + public: + NDB_ALTER_DATA(THD *thd, NdbDictionary::Dictionary *dict, + const NdbDictionary::Table *table) + : dictionary(dict), + old_table(table), + new_table(new NdbDictionary::Table(*table)), + table_id(table->getObjectId()), + old_table_version(table->getObjectVersion()), + schema_dist_client(thd) {} + ~NDB_ALTER_DATA() { delete new_table; } NdbDictionary::Dictionary *dictionary; - const NdbDictionary::Table *old_table; + const NdbDictionary::Table *old_table; NdbDictionary::Table *new_table; const Uint32 table_id; const Uint32 old_table_version; @@ -16877,21 +14554,16 @@ class NDB_ALTER_DATA : public inplace_alter_handler_ctx is not supported. */ -static inline -enum_alter_inplace_result -inplace_unsupported(Alter_inplace_info *alter_info, - const char* reason) -{ +static inline enum_alter_inplace_result inplace_unsupported( + Alter_inplace_info *alter_info, const char *reason) { DBUG_ENTER("inplace_unsupported"); DBUG_PRINT("info", ("%s", reason)); alter_info->unsupported_reason = reason; DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } -void -ha_ndbcluster::check_implicit_column_format_change(TABLE *altered_table, - Alter_inplace_info *ha_alter_info) const -{ +void ha_ndbcluster::check_implicit_column_format_change( + TABLE *altered_table, Alter_inplace_info *ha_alter_info) const { /* We need to check if the table was defined when the default COLUMN_FORMAT was FIXED and will now be become DYNAMIC. @@ -16899,58 +14571,50 @@ ha_ndbcluster::check_implicit_column_format_change(TABLE *altered_table, and the column which will change isn't about to be dropped. */ DBUG_ENTER("ha_ndbcluster::check_implicit_column_format_change"); - DBUG_PRINT("info", ("Checking table with version %lu", - table->s->mysql_version)); - Alter_inplace_info::HA_ALTER_FLAGS alter_flags= - ha_alter_info->handler_flags; + DBUG_PRINT("info", + ("Checking table with version %lu", table->s->mysql_version)); + Alter_inplace_info::HA_ALTER_FLAGS alter_flags = ha_alter_info->handler_flags; /* Find the old fields */ - for (uint i= 0; i < table->s->fields; i++) - { - Field *field= table->field[i]; + for (uint i = 0; i < table->s->fields; i++) { + Field *field = table->field[i]; /* Find fields that are not part of the primary key and that have a default COLUMN_FORMAT. */ - if ((! (field->flags & PRI_KEY_FLAG)) && - field->column_format() == COLUMN_FORMAT_TYPE_DEFAULT) - { + if ((!(field->flags & PRI_KEY_FLAG)) && + field->column_format() == COLUMN_FORMAT_TYPE_DEFAULT) { DBUG_PRINT("info", ("Found old non-pk field %s", field->field_name)); - bool modified_explicitly= false; - bool dropped= false; + bool modified_explicitly = false; + bool dropped = false; /* If the field is dropped or modified with and explicit COLUMN_FORMAT (FIXED or DYNAMIC) we don't need to warn the user about that field. */ if (alter_flags & Alter_inplace_info::DROP_COLUMN || - alter_flags & Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT) - { - if (alter_flags & Alter_inplace_info::DROP_COLUMN) - dropped= true; + alter_flags & Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT) { + if (alter_flags & Alter_inplace_info::DROP_COLUMN) dropped = true; /* Find the fields in modified table*/ - for (uint j= 0; j < altered_table->s->fields; j++) - { - Field *field2= altered_table->field[j]; - if (!my_strcasecmp(system_charset_info, - field->field_name, field2->field_name)) - { - dropped= false; - if (field2->column_format() != COLUMN_FORMAT_TYPE_DEFAULT) - { - modified_explicitly= true; + for (uint j = 0; j < altered_table->s->fields; j++) { + Field *field2 = altered_table->field[j]; + if (!my_strcasecmp(system_charset_info, field->field_name, + field2->field_name)) { + dropped = false; + if (field2->column_format() != COLUMN_FORMAT_TYPE_DEFAULT) { + modified_explicitly = true; } } } if (dropped) DBUG_PRINT("info", ("Field %s is to be dropped", field->field_name)); if (modified_explicitly) - DBUG_PRINT("info", ("Field %s is modified with explicit COLUMN_FORMAT", - field->field_name)); + DBUG_PRINT("info", + ("Field %s is modified with explicit COLUMN_FORMAT", + field->field_name)); } - if ((! dropped) && (! modified_explicitly)) - { + if ((!dropped) && (!modified_explicitly)) { // push a warning of COLUMN_FORMAT change push_warning_printf(current_thd, Sql_condition::SL_WARNING, ER_ALTER_INFO, @@ -16966,42 +14630,31 @@ ha_ndbcluster::check_implicit_column_format_change(TABLE *altered_table, DBUG_VOID_RETURN; } -bool -ha_ndbcluster::table_storage_changed(HA_CREATE_INFO *create_info) const -{ - enum ha_storage_media new_table_storage= create_info->storage_media; - if (new_table_storage == HA_SM_DEFAULT) - new_table_storage= HA_SM_MEMORY; - enum ha_storage_media old_table_storage= - table->s->default_storage_media; - if (old_table_storage == HA_SM_DEFAULT) - old_table_storage= HA_SM_MEMORY; - if (new_table_storage != old_table_storage) - { +bool ha_ndbcluster::table_storage_changed(HA_CREATE_INFO *create_info) const { + enum ha_storage_media new_table_storage = create_info->storage_media; + if (new_table_storage == HA_SM_DEFAULT) new_table_storage = HA_SM_MEMORY; + enum ha_storage_media old_table_storage = table->s->default_storage_media; + if (old_table_storage == HA_SM_DEFAULT) old_table_storage = HA_SM_MEMORY; + if (new_table_storage != old_table_storage) { return true; } return false; } -bool -ha_ndbcluster::column_has_index(TABLE *tab, uint field_idx, - uint start_field, uint end_field) const -{ +bool ha_ndbcluster::column_has_index(TABLE *tab, uint field_idx, + uint start_field, uint end_field) const { /** * Check all indexes to determine if column has index instead of checking * field->flags (PRI_KEY_FLAG | UNIQUE_KEY_FLAG | MULTIPLE_KEY_FLAG * since field->flags appears to only be set on first column in * multi-part index */ - for (uint j= start_field; jkey_info + j; - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; - for (; key_part != end; key_part++) - { - if (key_part->field->field_index == field_idx) - { + for (uint j = start_field; j < end_field; j++) { + KEY *key_info = tab->key_info + j; + KEY_PART_INFO *key_part = key_info->key_part; + KEY_PART_INFO *end = key_part + key_info->user_defined_key_parts; + for (; key_part != end; key_part++) { + if (key_part->field->field_index == field_idx) { return true; } } @@ -17009,19 +14662,15 @@ ha_ndbcluster::column_has_index(TABLE *tab, uint field_idx, return false; } -enum_alter_inplace_result -ha_ndbcluster::supported_inplace_ndb_column_change(uint field_idx, - TABLE *altered_table, - Alter_inplace_info *ha_alter_info, - bool table_storage_changed, - bool index_on_column) const -{ +enum_alter_inplace_result ha_ndbcluster::supported_inplace_ndb_column_change( + uint field_idx, TABLE *altered_table, Alter_inplace_info *ha_alter_info, + bool table_storage_changed, bool index_on_column) const { DBUG_ENTER("ha_ndbcluster::supported_inplace_ndb_column_change"); - HA_CREATE_INFO *create_info= ha_alter_info->create_info; - Field *old_field= table->field[field_idx]; - const NDBCOL *old_col= m_table_map->getColumn(field_idx); - Field *new_field= altered_table->field[field_idx]; + HA_CREATE_INFO *create_info = ha_alter_info->create_info; + Field *old_field = table->field[field_idx]; + const NDBCOL *old_col = m_table_map->getColumn(field_idx); + Field *new_field = altered_table->field[field_idx]; NDBCOL new_col; /* Create the new NdbDictionary::Column to be able to analyse if @@ -17032,8 +14681,7 @@ ha_ndbcluster::supported_inplace_ndb_column_change(uint field_idx, */ create_ndb_column(0, new_col, new_field, create_info, old_col->getDynamic()); - if (index_on_column) - { + if (index_on_column) { /** * Index columns are stored in memory. Impose it on the new_col * being created just now, in order to make the check 'if @@ -17043,21 +14691,17 @@ ha_ndbcluster::supported_inplace_ndb_column_change(uint field_idx, * will avoid the cost of moving it back to disk by copy alter). */ new_col.setStorageType(NdbDictionary::Column::StorageTypeMemory); - } - else - { + } else { if (old_field->field_storage_type() == HA_SM_DEFAULT && table_storage_changed && - new_col.getStorageType() != old_col->getStorageType()) - { - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Column storage media is changed due " - "to change in table storage media")); - } + new_col.getStorageType() != old_col->getStorageType()) { + DBUG_RETURN(inplace_unsupported(ha_alter_info, + "Column storage media is changed due " + "to change in table storage media")); + } if (old_field->field_storage_type() != new_field->field_storage_type() && - new_col.getStorageType() != old_col->getStorageType()) - { + new_col.getStorageType() != old_col->getStorageType()) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Column storage media is changed")); } @@ -17080,16 +14724,13 @@ ha_ndbcluster::supported_inplace_ndb_column_change(uint field_idx, // Check if we are adding an index to a disk stored column if (new_field->flags & FIELD_IN_ADD_INDEX && - new_col.getStorageType() == NdbDictionary::Column::StorageTypeDisk) - { + new_col.getStorageType() == NdbDictionary::Column::StorageTypeDisk) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Add/drop index is not supported for disk " "stored column")); } - if (index_on_column && - new_field->field_storage_type() == HA_SM_DISK) - { + if (index_on_column && new_field->field_storage_type() == HA_SM_DISK) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Changing COLUMN_STORAGE " "to disk (Explicit STORAGE DISK) " @@ -17101,32 +14742,28 @@ ha_ndbcluster::supported_inplace_ndb_column_change(uint field_idx, and the column changes storage type this is not allowed */ if (new_field->field_storage_type() != HA_SM_DEFAULT && - old_col->getStorageType() != new_col.getStorageType()) - { - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Column storage media is changed")); + old_col->getStorageType() != new_col.getStorageType()) { + DBUG_RETURN( + inplace_unsupported(ha_alter_info, "Column storage media is changed")); } // Check if type is changed - if (new_col.getType() != old_col->getType()) - { - DBUG_PRINT("info", ("Detected unsupported type change for field %s : " - "field types : old %u new %u " - "ndb column types : old %u new %u ", - old_field->field_name, - old_field->real_type(), - new_field->real_type(), - old_col->getType(), - new_col.getType())); + if (new_col.getType() != old_col->getType()) { + DBUG_PRINT("info", + ("Detected unsupported type change for field %s : " + "field types : old %u new %u " + "ndb column types : old %u new %u ", + old_field->field_name, old_field->real_type(), + new_field->real_type(), old_col->getType(), new_col.getType())); DBUG_RETURN(inplace_unsupported(ha_alter_info, "Altering field type is not supported")); } - Alter_inplace_info::HA_ALTER_FLAGS alter_flags= ha_alter_info->handler_flags; - bool altering_column= (alter_flags & - (Alter_inplace_info::ALTER_COLUMN_DEFAULT | - Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE | - Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT)); + Alter_inplace_info::HA_ALTER_FLAGS alter_flags = ha_alter_info->handler_flags; + bool altering_column = + (alter_flags & (Alter_inplace_info::ALTER_COLUMN_DEFAULT | + Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE | + Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT)); /* Check if format is changed. @@ -17138,59 +14775,47 @@ ha_ndbcluster::supported_inplace_ndb_column_change(uint field_idx, if (altering_column && new_field->column_format() != COLUMN_FORMAT_TYPE_DEFAULT && (new_field->column_format() != old_field->column_format() || - new_col.getDynamic() != old_col->getDynamic())) - { + new_col.getDynamic() != old_col->getDynamic())) { DBUG_PRINT("info", ("Detected unsupported format change for field %s : " "field format : old %u new %u " "ndb column format : old %u new %u ", - old_field->field_name, - old_field->column_format(), - new_field->column_format(), - old_col->getDynamic(), + old_field->field_name, old_field->column_format(), + new_field->column_format(), old_col->getDynamic(), new_col.getDynamic())); - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Altering column format")); + DBUG_RETURN(inplace_unsupported(ha_alter_info, "Altering column format")); } DBUG_RETURN(HA_ALTER_INPLACE_SHARED_LOCK); } -enum_alter_inplace_result -ha_ndbcluster::supported_inplace_field_change(Alter_inplace_info *ha_alter_info, - Field *old_field, - Field *new_field, - bool field_fk_reference, - bool index_on_column) const -{ +enum_alter_inplace_result ha_ndbcluster::supported_inplace_field_change( + Alter_inplace_info *ha_alter_info, Field *old_field, Field *new_field, + bool field_fk_reference, bool index_on_column) const { DBUG_ENTER("supported_inplace_field_change"); // Check for defintion change - if (! old_field->eq_def(new_field)) - { + if (!old_field->eq_def(new_field)) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Altering field definition is " "not supported")); } // Check max display length - if (new_field->max_display_length() != old_field->max_display_length()) - { + if (new_field->max_display_length() != old_field->max_display_length()) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Altering field display length is " "not supported")); } // Check if nullable change - if (new_field->maybe_null() != old_field->maybe_null()) - { + if (new_field->maybe_null() != old_field->maybe_null()) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Altering if field is nullable is " "not supported")); } // Check if auto_increment change - if (new_field->auto_flags != old_field->auto_flags) - { + if (new_field->auto_flags != old_field->auto_flags) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Altering field auto_increment " "is not supported")); @@ -17198,33 +14823,29 @@ ha_ndbcluster::supported_inplace_field_change(Alter_inplace_info *ha_alter_info, // Check that BLOB fields are not modified if ((old_field->flags & BLOB_FLAG || new_field->flags & BLOB_FLAG) && - !old_field->eq_def(new_field)) - { + !old_field->eq_def(new_field)) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Altering BLOB field is not supported")); } - const enum enum_field_types mysql_type= old_field->real_type(); + const enum enum_field_types mysql_type = old_field->real_type(); char old_buf[MAX_ATTR_DEFAULT_VALUE_SIZE]; char new_buf[MAX_ATTR_DEFAULT_VALUE_SIZE]; - if ((!(old_field->flags & PRI_KEY_FLAG) ) && - type_supports_default_value(mysql_type)) - { - if (!(old_field->flags & NO_DEFAULT_VALUE_FLAG)) - { - ptrdiff_t src_offset= old_field->table->default_values_offset(); - if ((! old_field->is_real_null(src_offset)) || - ((old_field->flags & NOT_NULL_FLAG))) - { + if ((!(old_field->flags & PRI_KEY_FLAG)) && + type_supports_default_value(mysql_type)) { + if (!(old_field->flags & NO_DEFAULT_VALUE_FLAG)) { + ptrdiff_t src_offset = old_field->table->default_values_offset(); + if ((!old_field->is_real_null(src_offset)) || + ((old_field->flags & NOT_NULL_FLAG))) { DBUG_PRINT("info", ("Checking default value hasn't changed " - "for field %s", old_field->field_name)); + "for field %s", + old_field->field_name)); memset(old_buf, 0, MAX_ATTR_DEFAULT_VALUE_SIZE); get_default_value(old_buf, old_field); memset(new_buf, 0, MAX_ATTR_DEFAULT_VALUE_SIZE); get_default_value(new_buf, new_field); - if (memcmp(old_buf, new_buf, MAX_ATTR_DEFAULT_VALUE_SIZE)) - { + if (memcmp(old_buf, new_buf, MAX_ATTR_DEFAULT_VALUE_SIZE)) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Altering default value is " "not supported")); @@ -17235,37 +14856,33 @@ ha_ndbcluster::supported_inplace_field_change(Alter_inplace_info *ha_alter_info, // Check if the field is renamed if ((new_field->flags & FIELD_IS_RENAMED) || - (strcmp(old_field->field_name, new_field->field_name) != 0)) - { + (strcmp(old_field->field_name, new_field->field_name) != 0)) { DBUG_PRINT("info", ("Detected field %s is renamed %s", old_field->field_name, new_field->field_name)); - if (field_fk_reference) - { - DBUG_PRINT("info", ("Detected unsupported rename field %s being reference from a foreign key", + if (field_fk_reference) { + DBUG_PRINT("info", ("Detected unsupported rename field %s being " + "reference from a foreign key", old_field->field_name)); - my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0), - "ALTER TABLE", - "Altering name of a field being referenced from a foreign key is not supported", + my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0), "ALTER TABLE", + "Altering name of a field being referenced from a foreign key " + "is not supported", "dropping foreign key first"); DBUG_RETURN(HA_ALTER_ERROR); } - if (index_on_column) - { + if (index_on_column) { // Renaming column that is part of an index is not supported - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Renaming column that is part of an index is not supported")); + DBUG_RETURN(inplace_unsupported( + ha_alter_info, + "Renaming column that is part of an index is not supported")); } } DBUG_RETURN(HA_ALTER_INPLACE_SHARED_LOCK); } -enum_alter_inplace_result -ha_ndbcluster::supported_inplace_column_change(THD* thd, - TABLE *altered_table, - uint field_position, Field *old_field, - Alter_inplace_info *ha_alter_info) const -{ +enum_alter_inplace_result ha_ndbcluster::supported_inplace_column_change( + THD *thd, TABLE *altered_table, uint field_position, Field *old_field, + Alter_inplace_info *ha_alter_info) const { /* Alter_inplace_info flags indicate a column has been modified we need to check if usupported field type change is found, @@ -17274,19 +14891,17 @@ ha_ndbcluster::supported_inplace_column_change(THD* thd, */ DBUG_ENTER("supported_inplace_column_change"); - HA_CREATE_INFO *create_info= ha_alter_info->create_info; + HA_CREATE_INFO *create_info = ha_alter_info->create_info; - bool is_table_storage_changed= table_storage_changed(create_info); + bool is_table_storage_changed = table_storage_changed(create_info); DBUG_PRINT("info", ("Checking if supported column change for field %s", old_field->field_name)); - Field *new_field= altered_table->field[field_position]; + Field *new_field = altered_table->field[field_position]; // Ignore if old and new fields are virtual - if(old_field->is_virtual_gcol() && - new_field->is_virtual_gcol()) - { + if (old_field->is_virtual_gcol() && new_field->is_virtual_gcol()) { DBUG_RETURN(HA_ALTER_INPLACE_INSTANT); } @@ -17306,110 +14921,94 @@ ha_ndbcluster::supported_inplace_column_change(THD* thd, "Unsupported change involving generated stored/virtual column")); } - bool is_index_on_column= - column_has_index(table, field_position, 0, table->s->keys); + bool is_index_on_column = + column_has_index(table, field_position, 0, table->s->keys); // Check if storage type or format are changed from Ndb's point of view - enum_alter_inplace_result ndb_column_change_result= - supported_inplace_ndb_column_change(field_position, altered_table, - ha_alter_info, - is_table_storage_changed, - is_index_on_column); + enum_alter_inplace_result ndb_column_change_result = + supported_inplace_ndb_column_change( + field_position, altered_table, ha_alter_info, + is_table_storage_changed, is_index_on_column); if (ndb_column_change_result == HA_ALTER_INPLACE_NOT_SUPPORTED || - ndb_column_change_result == HA_ALTER_ERROR) - { + ndb_column_change_result == HA_ALTER_ERROR) { DBUG_RETURN(ndb_column_change_result); } - bool field_fk_reference= has_fk_dependency(thd, m_table->getColumn(field_position)); + bool field_fk_reference = + has_fk_dependency(thd, m_table->getColumn(field_position)); // Check if table field properties are changed - enum_alter_inplace_result field_change_result= - supported_inplace_field_change(ha_alter_info, - old_field, - new_field, - field_fk_reference, - is_index_on_column); + enum_alter_inplace_result field_change_result = + supported_inplace_field_change(ha_alter_info, old_field, new_field, + field_fk_reference, is_index_on_column); if (field_change_result == HA_ALTER_INPLACE_NOT_SUPPORTED || - field_change_result == HA_ALTER_ERROR) - { + field_change_result == HA_ALTER_ERROR) { DBUG_RETURN(field_change_result); } DBUG_RETURN(HA_ALTER_INPLACE_SHARED_LOCK); } -enum_alter_inplace_result -ha_ndbcluster::check_inplace_alter_supported(TABLE *altered_table, - Alter_inplace_info *ha_alter_info) -{ - THD *thd= current_thd; - HA_CREATE_INFO *create_info= ha_alter_info->create_info; - Alter_info *alter_info= ha_alter_info->alter_info; +enum_alter_inplace_result ha_ndbcluster::check_inplace_alter_supported( + TABLE *altered_table, Alter_inplace_info *ha_alter_info) { + THD *thd = current_thd; + HA_CREATE_INFO *create_info = ha_alter_info->create_info; + Alter_info *alter_info = ha_alter_info->alter_info; /* Save the Alter_inplace_info::HA_ALTER_FLAGS set by the server. Note that some of the flags are set if conservatively and after double checking for real changes some flags can be reset. */ - Alter_inplace_info::HA_ALTER_FLAGS alter_flags= - ha_alter_info->handler_flags; - const Alter_inplace_info::HA_ALTER_FLAGS supported= - Alter_inplace_info::ADD_INDEX | - Alter_inplace_info::DROP_INDEX | - Alter_inplace_info::ADD_UNIQUE_INDEX | - Alter_inplace_info::DROP_UNIQUE_INDEX | - Alter_inplace_info::ADD_STORED_BASE_COLUMN | - Alter_inplace_info::ADD_VIRTUAL_COLUMN | - Alter_inplace_info::ALTER_COLUMN_DEFAULT | - Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE | - Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT | - Alter_inplace_info::ADD_PARTITION | - Alter_inplace_info::ALTER_TABLE_REORG | - Alter_inplace_info::CHANGE_CREATE_OPTION | - Alter_inplace_info::ADD_FOREIGN_KEY | - Alter_inplace_info::DROP_FOREIGN_KEY | - Alter_inplace_info::ALTER_INDEX_COMMENT | - Alter_inplace_info::ALTER_COLUMN_NAME; - - const Alter_inplace_info::HA_ALTER_FLAGS not_supported= ~supported; - - Alter_inplace_info::HA_ALTER_FLAGS add_column= - Alter_inplace_info::ADD_VIRTUAL_COLUMN | - Alter_inplace_info::ADD_STORED_BASE_COLUMN; - - const Alter_inplace_info::HA_ALTER_FLAGS adding= - Alter_inplace_info::ADD_INDEX | - Alter_inplace_info::ADD_UNIQUE_INDEX; - - const Alter_inplace_info::HA_ALTER_FLAGS dropping= - Alter_inplace_info::DROP_INDEX | - Alter_inplace_info::DROP_UNIQUE_INDEX; - - enum_alter_inplace_result result= HA_ALTER_INPLACE_SHARED_LOCK; + Alter_inplace_info::HA_ALTER_FLAGS alter_flags = ha_alter_info->handler_flags; + const Alter_inplace_info::HA_ALTER_FLAGS supported = + Alter_inplace_info::ADD_INDEX | Alter_inplace_info::DROP_INDEX | + Alter_inplace_info::ADD_UNIQUE_INDEX | + Alter_inplace_info::DROP_UNIQUE_INDEX | + Alter_inplace_info::ADD_STORED_BASE_COLUMN | + Alter_inplace_info::ADD_VIRTUAL_COLUMN | + Alter_inplace_info::ALTER_COLUMN_DEFAULT | + Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE | + Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT | + Alter_inplace_info::ADD_PARTITION | + Alter_inplace_info::ALTER_TABLE_REORG | + Alter_inplace_info::CHANGE_CREATE_OPTION | + Alter_inplace_info::ADD_FOREIGN_KEY | + Alter_inplace_info::DROP_FOREIGN_KEY | + Alter_inplace_info::ALTER_INDEX_COMMENT | + Alter_inplace_info::ALTER_COLUMN_NAME; + + const Alter_inplace_info::HA_ALTER_FLAGS not_supported = ~supported; + + Alter_inplace_info::HA_ALTER_FLAGS add_column = + Alter_inplace_info::ADD_VIRTUAL_COLUMN | + Alter_inplace_info::ADD_STORED_BASE_COLUMN; + + const Alter_inplace_info::HA_ALTER_FLAGS adding = + Alter_inplace_info::ADD_INDEX | Alter_inplace_info::ADD_UNIQUE_INDEX; + + const Alter_inplace_info::HA_ALTER_FLAGS dropping = + Alter_inplace_info::DROP_INDEX | Alter_inplace_info::DROP_UNIQUE_INDEX; + + enum_alter_inplace_result result = HA_ALTER_INPLACE_SHARED_LOCK; DBUG_ENTER("ha_ndbcluster::check_inplace_alter_supported"); - if (alter_flags & Alter_inplace_info::DROP_COLUMN) - { - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Dropping column")); + if (alter_flags & Alter_inplace_info::DROP_COLUMN) { + DBUG_RETURN(inplace_unsupported(ha_alter_info, "Dropping column")); } - if (alter_flags & Alter_inplace_info::ALTER_STORED_COLUMN_ORDER) - { - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Altering column order")); + if (alter_flags & Alter_inplace_info::ALTER_STORED_COLUMN_ORDER) { + DBUG_RETURN(inplace_unsupported(ha_alter_info, "Altering column order")); } - partition_info *part_info= altered_table->part_info; - const NDBTAB *old_tab= m_table; + partition_info *part_info = altered_table->part_info; + const NDBTAB *old_tab = m_table; if (THDVAR(thd, use_copying_alter_table) && (alter_info->requested_algorithm == - Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT)) - { + Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT)) { // Usage of copying alter has been forced and user has not specified // any ALGORITHM=, don't allow inplace DBUG_RETURN(inplace_unsupported(ha_alter_info, @@ -17420,22 +15019,19 @@ ha_ndbcluster::check_inplace_alter_supported(TABLE *altered_table, DBUG_PRINT("info", ("Supported 0x%llx", supported)); DBUG_PRINT("info", ("Not supported 0x%llx", not_supported)); DBUG_PRINT("info", ("alter_flags & not_supported 0x%llx", - alter_flags & not_supported)); + alter_flags & not_supported)); - bool max_rows_changed= false; + bool max_rows_changed = false; bool comment_changed = false; - if (alter_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) - { + if (alter_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) { DBUG_PRINT("info", ("Some create options changed")); if (create_info->used_fields & HA_CREATE_USED_AUTO && - create_info->auto_increment_value != stats.auto_increment_value) - { + create_info->auto_increment_value != stats.auto_increment_value) { DBUG_PRINT("info", ("The AUTO_INCREMENT value changed")); /* Check that no other create option changed */ - if (create_info->used_fields ^ ~HA_CREATE_USED_AUTO) - { + if (create_info->used_fields ^ ~HA_CREATE_USED_AUTO) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Not only AUTO_INCREMENT value " "changed")); @@ -17444,85 +15040,75 @@ ha_ndbcluster::check_inplace_alter_supported(TABLE *altered_table, /* Check that ROW_FORMAT didn't change */ if (create_info->used_fields & HA_CREATE_USED_ROW_FORMAT && - create_info->row_type != table_share->real_row_type) - { + create_info->row_type != table_share->real_row_type) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "ROW_FORMAT changed")); } - if (create_info->used_fields & HA_CREATE_USED_MAX_ROWS) - { + if (create_info->used_fields & HA_CREATE_USED_MAX_ROWS) { DBUG_PRINT("info", ("The MAX_ROWS value changed")); - max_rows_changed= true; + max_rows_changed = true; const ulonglong curr_max_rows = table_share->max_rows; - if (curr_max_rows == 0) - { + if (curr_max_rows == 0) { // Don't support setting MAX_ROWS on a table without MAX_ROWS DBUG_RETURN(inplace_unsupported(ha_alter_info, "setting MAX_ROWS on table " "without MAX_ROWS")); } } - if (create_info->used_fields & HA_CREATE_USED_COMMENT) - { + if (create_info->used_fields & HA_CREATE_USED_COMMENT) { DBUG_PRINT("info", ("The COMMENT string changed")); comment_changed = true; } } - if (alter_flags & Alter_inplace_info::ALTER_TABLE_REORG) - { + if (alter_flags & Alter_inplace_info::ALTER_TABLE_REORG) { DBUG_PRINT("info", ("Reorganize partitions")); /* sql_partition.cc tries to compute what is going on and sets flags...that we clear */ - if (part_info->use_default_num_partitions) - { + if (part_info->use_default_num_partitions) { DBUG_PRINT("info", ("Using default number of partitions, " "clear some flags")); - alter_flags= alter_flags & ~Alter_inplace_info::COALESCE_PARTITION; - alter_flags= alter_flags & ~Alter_inplace_info::ADD_PARTITION; + alter_flags = alter_flags & ~Alter_inplace_info::COALESCE_PARTITION; + alter_flags = alter_flags & ~Alter_inplace_info::ADD_PARTITION; } } - Ndb *ndb= get_ndb(thd); - NDBDICT *dict= ndb->getDictionary(); + Ndb *ndb = get_ndb(thd); + NDBDICT *dict = ndb->getDictionary(); ndb->setDatabaseName(m_dbname); - NdbDictionary::Table new_tab= *old_tab; + NdbDictionary::Table new_tab = *old_tab; /** * Check whether altering column properties can be performed inplace * by comparing all old and new fields */ - for (uint i= 0; i < table->s->fields; i++) - { - Field *field= table->field[i]; - enum_alter_inplace_result column_change_result= - supported_inplace_column_change(thd, - altered_table, - i, field, - ha_alter_info); - - switch(column_change_result) { - case HA_ALTER_ERROR: - case HA_ALTER_INPLACE_NOT_SUPPORTED: - DBUG_RETURN(column_change_result); - break; - default: - // Save the highest lock requirement - result= MIN(result, column_change_result); - break; + for (uint i = 0; i < table->s->fields; i++) { + Field *field = table->field[i]; + enum_alter_inplace_result column_change_result = + supported_inplace_column_change(thd, altered_table, i, field, + ha_alter_info); + + switch (column_change_result) { + case HA_ALTER_ERROR: + case HA_ALTER_INPLACE_NOT_SUPPORTED: + DBUG_RETURN(column_change_result); + break; + default: + // Save the highest lock requirement + result = MIN(result, column_change_result); + break; } /* If we are changing a field name then change the corresponding column name in the temporary Ndb table. */ - if (alter_flags & Alter_inplace_info::ALTER_COLUMN_NAME) - { - Field *new_field= altered_table->field[i]; + if (alter_flags & Alter_inplace_info::ALTER_COLUMN_NAME) { + Field *new_field = altered_table->field[i]; if (strcmp(field->field_name, new_field->field_name) != 0 && !field->is_virtual_gcol()) { NDBCOL *ndbCol = new_tab.getColumn(new_field->field_index); @@ -17530,246 +15116,210 @@ ha_ndbcluster::check_inplace_alter_supported(TABLE *altered_table, } } } - if (!(alter_flags & Alter_inplace_info::ADD_STORED_BASE_COLUMN)) - { - if (alter_flags & Alter_inplace_info::ALTER_COLUMN_DEFAULT) - { + if (!(alter_flags & Alter_inplace_info::ADD_STORED_BASE_COLUMN)) { + if (alter_flags & Alter_inplace_info::ALTER_COLUMN_DEFAULT) { // We didn't find that the default value has changed, remove flag DBUG_PRINT("info", ("No change of default value found, ignoring flag")); - alter_flags&= ~Alter_inplace_info::ALTER_COLUMN_DEFAULT; + alter_flags &= ~Alter_inplace_info::ALTER_COLUMN_DEFAULT; } - if (alter_flags & Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE) - { + if (alter_flags & Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE) { // We didn't find that the storage type has changed, remove flag DBUG_PRINT("info", ("No change of storage type found, ignoring flag")); - alter_flags&= ~Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE; + alter_flags &= ~Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE; } - if (alter_flags & Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT) - { + if (alter_flags & Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT) { // We didn't find that the storage format has changed, remove flag DBUG_PRINT("info", ("No change of storage format found, ignoring flag")); - alter_flags&= ~Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT; + alter_flags &= ~Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT; } - if (alter_flags & Alter_inplace_info::ALTER_STORED_COLUMN_TYPE) - { + if (alter_flags & Alter_inplace_info::ALTER_STORED_COLUMN_TYPE) { // We didn't find that the storage of the column has changed, remove flag DBUG_PRINT("info", ("No change of storage type, ignoring flag")); - alter_flags&= ~Alter_inplace_info::ALTER_STORED_COLUMN_TYPE; + alter_flags &= ~Alter_inplace_info::ALTER_STORED_COLUMN_TYPE; } } - if (alter_flags & Alter_inplace_info::ALTER_COLUMN_NAME) - { + if (alter_flags & Alter_inplace_info::ALTER_COLUMN_NAME) { /* Check that we are only renaming column */ - if (alter_flags & ~Alter_inplace_info::ALTER_COLUMN_NAME) - { + if (alter_flags & ~Alter_inplace_info::ALTER_COLUMN_NAME) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Only rename column exclusively can be " "performed inplace")); } - } + } - if (alter_flags & not_supported) - { + if (alter_flags & not_supported) { if (alter_info->requested_algorithm == Alter_info::ALTER_TABLE_ALGORITHM_INPLACE) - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_ALTER_INFO, + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_ALTER_INFO, "Detected unsupported change: " "HA_ALTER_FLAGS = 0x%llx", alter_flags & not_supported); - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Detected unsupported change")); + DBUG_RETURN( + inplace_unsupported(ha_alter_info, "Detected unsupported change")); } if (alter_flags & Alter_inplace_info::ALTER_COLUMN_NAME || alter_flags & Alter_inplace_info::ADD_STORED_BASE_COLUMN || alter_flags & Alter_inplace_info::ADD_PARTITION || - alter_flags & Alter_inplace_info::ALTER_TABLE_REORG || - max_rows_changed || - comment_changed) - { + alter_flags & Alter_inplace_info::ALTER_TABLE_REORG || max_rows_changed || + comment_changed) { + result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK; + if (alter_flags & Alter_inplace_info::ADD_STORED_BASE_COLUMN) { + NDBCOL col; - result= HA_ALTER_INPLACE_EXCLUSIVE_LOCK; - if (alter_flags & Alter_inplace_info::ADD_STORED_BASE_COLUMN) - { - NDBCOL col; + /* + Check that we are only adding columns + */ + /* + HA_COLUMN_DEFAULT_VALUE & HA_COLUMN_STORAGE & HA_COLUMN_FORMAT + are set if they are specified in an later cmd + even if they're no change. This is probably a bug + conclusion: add them to add_column-mask, so that we silently "accept" + them In case of someone trying to change a column, the HA_CHANGE_COLUMN + would be set which we don't support, so we will still return + HA_ALTER_NOT_SUPPORTED in those cases + */ + add_column |= Alter_inplace_info::ALTER_COLUMN_DEFAULT; + add_column |= Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE; + add_column |= Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT; + if (alter_flags & ~add_column) { + DBUG_RETURN(inplace_unsupported(ha_alter_info, + "Only add column exclusively can be " + "performed online")); + } + /* + Check for extra fields for hidden primary key + or user defined partitioning + */ + if (table_share->primary_key == MAX_KEY || + part_info->part_type != partition_type::HASH || + !part_info->list_of_part_fields) { + DBUG_RETURN(inplace_unsupported(ha_alter_info, + "Found hidden primary key or " + "user defined partitioning")); + } - /* - Check that we are only adding columns - */ - /* - HA_COLUMN_DEFAULT_VALUE & HA_COLUMN_STORAGE & HA_COLUMN_FORMAT - are set if they are specified in an later cmd - even if they're no change. This is probably a bug - conclusion: add them to add_column-mask, so that we silently "accept" them - In case of someone trying to change a column, the HA_CHANGE_COLUMN would be set - which we don't support, so we will still return HA_ALTER_NOT_SUPPORTED in those cases - */ - add_column|= Alter_inplace_info::ALTER_COLUMN_DEFAULT; - add_column|= Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE; - add_column|= Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT; - if (alter_flags & ~add_column) - { - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Only add column exclusively can be " - "performed online")); - } - /* - Check for extra fields for hidden primary key - or user defined partitioning - */ - if (table_share->primary_key == MAX_KEY || - part_info->part_type != partition_type::HASH || - !part_info->list_of_part_fields) - { - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Found hidden primary key or " - "user defined partitioning")); - } - - /* Find the new fields */ - for (uint i= table->s->fields; i < altered_table->s->fields; i++) - { - Field *field= altered_table->field[i]; - if(field->is_virtual_gcol()) - { - DBUG_PRINT("info", ("Field %s is VIRTUAL; not adding.", field->field_name)); - continue; - } - DBUG_PRINT("info", ("Found new field %s", field->field_name)); - DBUG_PRINT("info", ("storage_type %i, column_format %i", - (uint) field->field_storage_type(), - (uint) field->column_format())); - if (!(field->flags & NO_DEFAULT_VALUE_FLAG)) - { - ptrdiff_t src_offset= field->table->s->default_values - - field->table->record[0]; - if (/* - Check that column doesn't have non NULL specified - as default value. - */ - (! field->is_real_null(src_offset)) || - ((field->flags & NOT_NULL_FLAG)) || - /* - Check that column doesn't have - DEFAULT/ON INSERT/UPDATE CURRENT_TIMESTAMP as default - or AUTO_INCREMENT. + /* Find the new fields */ + for (uint i = table->s->fields; i < altered_table->s->fields; i++) { + Field *field = altered_table->field[i]; + if (field->is_virtual_gcol()) { + DBUG_PRINT("info", + ("Field %s is VIRTUAL; not adding.", field->field_name)); + continue; + } + DBUG_PRINT("info", ("Found new field %s", field->field_name)); + DBUG_PRINT("info", ("storage_type %i, column_format %i", + (uint)field->field_storage_type(), + (uint)field->column_format())); + if (!(field->flags & NO_DEFAULT_VALUE_FLAG)) { + ptrdiff_t src_offset = + field->table->s->default_values - field->table->record[0]; + if (/* + Check that column doesn't have non NULL specified + as default value. */ - ((field->has_insert_default_datetime_value_expression() || - field->has_update_default_datetime_value_expression()) || - ((field->auto_flags & Field::NEXT_NUMBER) != 0))) - { - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Adding column with non-null default value " - "is not supported online")); - } - } - /* Create new field to check if it can be added */ - const int create_column_result = - create_ndb_column(thd, col, field, create_info, - true /* use_dynamic_as_default */); - if (create_column_result) - { - DBUG_PRINT("info", ("Failed to create NDB column, error %d", - create_column_result)); - DBUG_RETURN(HA_ALTER_ERROR); - } - if (new_tab.addColumn(col)) - { - DBUG_PRINT("info", ("Failed to add NDB column to table")); - DBUG_RETURN(HA_ALTER_ERROR); - } - } - } - - if (alter_flags & Alter_inplace_info::ALTER_TABLE_REORG) - { - const ulonglong curr_max_rows = table_share->max_rows; - if (curr_max_rows != 0) - { - // No inplace REORGANIZE PARTITION for table with MAX_ROWS - DBUG_RETURN(inplace_unsupported(ha_alter_info, + (!field->is_real_null(src_offset)) || + ((field->flags & NOT_NULL_FLAG)) || + /* + Check that column doesn't have + DEFAULT/ON INSERT/UPDATE CURRENT_TIMESTAMP as default + or AUTO_INCREMENT. + */ + ((field->has_insert_default_datetime_value_expression() || + field->has_update_default_datetime_value_expression()) || + ((field->auto_flags & Field::NEXT_NUMBER) != 0))) { + DBUG_RETURN( + inplace_unsupported(ha_alter_info, + "Adding column with non-null default value " + "is not supported online")); + } + } + /* Create new field to check if it can be added */ + const int create_column_result = create_ndb_column( + thd, col, field, create_info, true /* use_dynamic_as_default */); + if (create_column_result) { + DBUG_PRINT("info", ("Failed to create NDB column, error %d", + create_column_result)); + DBUG_RETURN(HA_ALTER_ERROR); + } + if (new_tab.addColumn(col)) { + DBUG_PRINT("info", ("Failed to add NDB column to table")); + DBUG_RETURN(HA_ALTER_ERROR); + } + } + } + + if (alter_flags & Alter_inplace_info::ALTER_TABLE_REORG) { + const ulonglong curr_max_rows = table_share->max_rows; + if (curr_max_rows != 0) { + // No inplace REORGANIZE PARTITION for table with MAX_ROWS + DBUG_RETURN(inplace_unsupported(ha_alter_info, "REORGANIZE of table " "with MAX_ROWS")); - } - new_tab.setFragmentCount(0); - new_tab.setFragmentData(0, 0); - } - else if (alter_flags & Alter_inplace_info::ADD_PARTITION) - { - DBUG_PRINT("info", ("Adding partition (%u)", part_info->num_parts)); - new_tab.setFragmentCount(part_info->num_parts); - new_tab.setPartitionBalance(NdbDictionary::Object::PartitionBalance_Specific); - if (new_tab.getFullyReplicated()) - { - DBUG_PRINT("info", ("Add partition isn't supported on fully" - " replicated tables")); - DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); - } - } - if (comment_changed && - parse_comment_changes(&new_tab, - old_tab, - create_info, - thd, - max_rows_changed)) - { - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Unsupported table modifiers")); - } - else if (max_rows_changed) - { - ulonglong rows= create_info->max_rows; - uint no_fragments= get_no_fragments(rows); - uint reported_frags= no_fragments; - if (adjusted_frag_count(ndb, no_fragments, reported_frags)) - { - push_warning(current_thd, - Sql_condition::SL_WARNING, ER_UNKNOWN_ERROR, - "Ndb might have problems storing the max amount " - "of rows specified"); - } - if (reported_frags < old_tab->getFragmentCount()) - { - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Online reduction in number of " - "fragments not supported")); - } - else if (rows == 0) - { - /* Dont support setting MAX_ROWS to 0 inplace */ - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Setting MAX_ROWS to 0 is " - "not supported online")); - } - new_tab.setFragmentCount(reported_frags); - new_tab.setDefaultNoPartitionsFlag(false); - new_tab.setFragmentData(0, 0); - new_tab.setPartitionBalance(NdbDictionary::Object::PartitionBalance_Specific); - } - - if (dict->supportedAlterTable(*old_tab, new_tab)) - { - DBUG_PRINT("info", ("Adding column(s) or add/reorganize partition supported online")); - } - else - { - DBUG_RETURN(inplace_unsupported(ha_alter_info, - "Adding column(s) or add/reorganize partition not supported online")); - } + } + new_tab.setFragmentCount(0); + new_tab.setFragmentData(0, 0); + } else if (alter_flags & Alter_inplace_info::ADD_PARTITION) { + DBUG_PRINT("info", ("Adding partition (%u)", part_info->num_parts)); + new_tab.setFragmentCount(part_info->num_parts); + new_tab.setPartitionBalance( + NdbDictionary::Object::PartitionBalance_Specific); + if (new_tab.getFullyReplicated()) { + DBUG_PRINT("info", ("Add partition isn't supported on fully" + " replicated tables")); + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); + } + } + if (comment_changed && parse_comment_changes(&new_tab, old_tab, create_info, + thd, max_rows_changed)) { + DBUG_RETURN( + inplace_unsupported(ha_alter_info, "Unsupported table modifiers")); + } else if (max_rows_changed) { + ulonglong rows = create_info->max_rows; + uint no_fragments = get_no_fragments(rows); + uint reported_frags = no_fragments; + if (adjusted_frag_count(ndb, no_fragments, reported_frags)) { + push_warning(current_thd, Sql_condition::SL_WARNING, ER_UNKNOWN_ERROR, + "Ndb might have problems storing the max amount " + "of rows specified"); + } + if (reported_frags < old_tab->getFragmentCount()) { + DBUG_RETURN(inplace_unsupported(ha_alter_info, + "Online reduction in number of " + "fragments not supported")); + } else if (rows == 0) { + /* Dont support setting MAX_ROWS to 0 inplace */ + DBUG_RETURN(inplace_unsupported(ha_alter_info, + "Setting MAX_ROWS to 0 is " + "not supported online")); + } + new_tab.setFragmentCount(reported_frags); + new_tab.setDefaultNoPartitionsFlag(false); + new_tab.setFragmentData(0, 0); + new_tab.setPartitionBalance( + NdbDictionary::Object::PartitionBalance_Specific); + } + + if (dict->supportedAlterTable(*old_tab, new_tab)) { + DBUG_PRINT( + "info", + ("Adding column(s) or add/reorganize partition supported online")); + } else { + DBUG_RETURN(inplace_unsupported( + ha_alter_info, + "Adding column(s) or add/reorganize partition not supported online")); + } } /* Check that we are not adding multiple indexes */ - if (alter_flags & adding) - { + if (alter_flags & adding) { if (((altered_table->s->keys - table->s->keys) != 1) || - (alter_flags & dropping)) - { + (alter_flags & dropping)) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Only one index can be added online")); } @@ -17778,11 +15328,9 @@ ha_ndbcluster::check_inplace_alter_supported(TABLE *altered_table, /* Check that we are not dropping multiple indexes */ - if (alter_flags & dropping) - { + if (alter_flags & dropping) { if (((table->s->keys - altered_table->s->keys) != 1) || - (alter_flags & adding)) - { + (alter_flags & adding)) { DBUG_RETURN(inplace_unsupported(ha_alter_info, "Only one index can be dropped online")); } @@ -17794,19 +15342,15 @@ ha_ndbcluster::check_inplace_alter_supported(TABLE *altered_table, DBUG_RETURN(result); } -enum_alter_inplace_result -ha_ndbcluster::check_if_supported_inplace_alter(TABLE *altered_table, - Alter_inplace_info *ha_alter_info) -{ +enum_alter_inplace_result ha_ndbcluster::check_if_supported_inplace_alter( + TABLE *altered_table, Alter_inplace_info *ha_alter_info) { DBUG_ENTER("ha_ndbcluster::check_if_supported_inplace_alter"); - Alter_info *alter_info= ha_alter_info->alter_info; + Alter_info *alter_info = ha_alter_info->alter_info; - enum_alter_inplace_result result= - check_inplace_alter_supported(altered_table, - ha_alter_info); + enum_alter_inplace_result result = + check_inplace_alter_supported(altered_table, ha_alter_info); - if (result == HA_ALTER_INPLACE_NOT_SUPPORTED) - { + if (result == HA_ALTER_INPLACE_NOT_SUPPORTED) { /* The ALTER TABLE is not supported inplace and will fall back to use copying ALTER TABLE. If --ndb-default-column-format is dynamic @@ -17817,115 +15361,90 @@ ha_ndbcluster::check_if_supported_inplace_alter(TABLE *altered_table, if ((opt_ndb_default_column_format == NDB_DEFAULT_COLUMN_FORMAT_DYNAMIC) && (table->s->mysql_version < NDB_VERSION_DYNAMIC_IS_DEFAULT) && (alter_info->requested_algorithm != - Alter_info::ALTER_TABLE_ALGORITHM_INPLACE)) - { + Alter_info::ALTER_TABLE_ALGORITHM_INPLACE)) { check_implicit_column_format_change(altered_table, ha_alter_info); } } DBUG_RETURN(result); } -bool -ha_ndbcluster::parse_comment_changes(NdbDictionary::Table *new_tab, - const NdbDictionary::Table *old_tab, - HA_CREATE_INFO *create_info, - THD *thd, - bool & max_rows_changed) const -{ +bool ha_ndbcluster::parse_comment_changes(NdbDictionary::Table *new_tab, + const NdbDictionary::Table *old_tab, + HA_CREATE_INFO *create_info, THD *thd, + bool &max_rows_changed) const { DBUG_ENTER("ha_ndbcluster::parse_comment_changes"); - NDB_Modifiers table_modifiers(ndb_table_modifier_prefix, - ndb_table_modifiers); + NDB_Modifiers table_modifiers(ndb_table_modifier_prefix, ndb_table_modifiers); if (table_modifiers.loadComment(create_info->comment.str, - create_info->comment.length) == -1) - { + create_info->comment.length) == -1) { push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_ILLEGAL_HA_CREATE_OPTION, - "%s", + ER_ILLEGAL_HA_CREATE_OPTION, "%s", table_modifiers.getErrMsg()); my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), ndbcluster_hton_name, "Syntax error in COMMENT modifier"); DBUG_RETURN(true); } - const NDB_Modifier* mod_nologging = table_modifiers.get("NOLOGGING"); - const NDB_Modifier* mod_frags = table_modifiers.get("PARTITION_BALANCE"); - const NDB_Modifier* mod_read_backup = table_modifiers.get("READ_BACKUP"); - const NDB_Modifier* mod_fully_replicated = - table_modifiers.get("FULLY_REPLICATED"); + const NDB_Modifier *mod_nologging = table_modifiers.get("NOLOGGING"); + const NDB_Modifier *mod_frags = table_modifiers.get("PARTITION_BALANCE"); + const NDB_Modifier *mod_read_backup = table_modifiers.get("READ_BACKUP"); + const NDB_Modifier *mod_fully_replicated = + table_modifiers.get("FULLY_REPLICATED"); NdbDictionary::Object::PartitionBalance part_bal = - g_default_partition_balance; - if (parsePartitionBalance(thd /* for pushing warning */, - mod_frags, &part_bal) == false) - { + g_default_partition_balance; + if (parsePartitionBalance(thd /* for pushing warning */, mod_frags, + &part_bal) == false) { /** * unable to parse => modifier which is not found */ mod_frags = table_modifiers.notfound(); - } - else if (ndbd_support_partition_balance( - get_thd_ndb(thd)->ndb->getMinDbNodeVersion()) == 0) - { + } else if (ndbd_support_partition_balance( + get_thd_ndb(thd)->ndb->getMinDbNodeVersion()) == 0) { /** * NDB_TABLE=PARTITION_BALANCE not supported by data nodes. */ - my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), - ndbcluster_hton_name, + my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), ndbcluster_hton_name, "PARTITION_BALANCE not supported by current data node versions"); DBUG_RETURN(true); } - if (mod_nologging->m_found) - { - if (new_tab->getLogging() != (!mod_nologging->m_val_bool)) - { - my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), - ndbcluster_hton_name, + if (mod_nologging->m_found) { + if (new_tab->getLogging() != (!mod_nologging->m_val_bool)) { + my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), ndbcluster_hton_name, "Cannot alter nologging inplace"); DBUG_RETURN(true); } new_tab->setLogging(!mod_nologging->m_val_bool); } - if (mod_read_backup->m_found) - { + if (mod_read_backup->m_found) { if (ndbd_support_read_backup( - get_thd_ndb(thd)->ndb->getMinDbNodeVersion()) == 0) - { + get_thd_ndb(thd)->ndb->getMinDbNodeVersion()) == 0) { /** * NDB_TABLE=READ_BACKUP not supported by data nodes. */ - my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), - ndbcluster_hton_name, + my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), ndbcluster_hton_name, "READ_BACKUP not supported by current data node versions"); DBUG_RETURN(true); } - if (old_tab->getFullyReplicated() && - (!mod_read_backup->m_val_bool)) - { + if (old_tab->getFullyReplicated() && (!mod_read_backup->m_val_bool)) { my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0), - "ALGORITHM=INPLACE", - "READ_BACKUP off with FULLY_REPLICATED on", + "ALGORITHM=INPLACE", "READ_BACKUP off with FULLY_REPLICATED on", "ALGORITHM=COPY"); DBUG_RETURN(true); } new_tab->setReadBackupFlag(mod_read_backup->m_val_bool); } - if (mod_fully_replicated->m_found) - { + if (mod_fully_replicated->m_found) { if (ndbd_support_fully_replicated( - get_thd_ndb(thd)->ndb->getMinDbNodeVersion()) == 0) - { + get_thd_ndb(thd)->ndb->getMinDbNodeVersion()) == 0) { /** * NDB_TABLE=FULLY_REPLICATED not supported by data nodes. */ - my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), - ndbcluster_hton_name, - "FULLY_REPLICATED not supported by current data node versions"); + my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), ndbcluster_hton_name, + "FULLY_REPLICATED not supported by current data node versions"); DBUG_RETURN(true); } - if (!old_tab->getFullyReplicated()) - { + if (!old_tab->getFullyReplicated()) { my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0), - "ALGORITHM=INPLACE", - "Turning FULLY_REPLICATED on after create", + "ALGORITHM=INPLACE", "Turning FULLY_REPLICATED on after create", "ALGORITHM=COPY"); DBUG_RETURN(true); } @@ -17936,26 +15455,20 @@ ha_ndbcluster::parse_comment_changes(NdbDictionary::Table *new_tab, * variable to affect new tables. For ALTER TABLE one has to set these * properties explicitly. */ - if (mod_frags->m_found) - { - if (max_rows_changed) - { + if (mod_frags->m_found) { + if (max_rows_changed) { max_rows_changed = false; } new_tab->setFragmentCount(0); - new_tab->setFragmentData(0,0); + new_tab->setFragmentData(0, 0); new_tab->setPartitionBalance(part_bal); DBUG_PRINT("info", ("parse_comment_changes: PartitionBalance: %s", new_tab->getPartitionBalanceString())); - } - else - { + } else { part_bal = old_tab->getPartitionBalance(); } - if (old_tab->getFullyReplicated()) - { - if (part_bal != old_tab->getPartitionBalance()) - { + if (old_tab->getFullyReplicated()) { + if (part_bal != old_tab->getPartitionBalance()) { /** * We cannot change partition balance inplace for fully * replicated tables. @@ -17971,7 +15484,6 @@ ha_ndbcluster::parse_comment_changes(NdbDictionary::Table *new_tab, DBUG_RETURN(false); } - /** Updates the internal structures and prepares them for the inplace alter. @@ -17986,129 +15498,112 @@ ha_ndbcluster::parse_comment_changes(NdbDictionary::Table *new_tab, @retval true Error @retval false Success */ -bool -ha_ndbcluster::prepare_inplace_alter_table(TABLE *altered_table, - Alter_inplace_info *ha_alter_info, - const dd::Table *, dd::Table *) -{ - int error= 0; - THD *thd= current_thd; - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= get_ndb(thd); - NDBDICT *dict= ndb->getDictionary(); +bool ha_ndbcluster::prepare_inplace_alter_table( + TABLE *altered_table, Alter_inplace_info *ha_alter_info, const dd::Table *, + dd::Table *) { + int error = 0; + THD *thd = current_thd; + Thd_ndb *thd_ndb = get_thd_ndb(thd); + Ndb *ndb = get_ndb(thd); + NDBDICT *dict = ndb->getDictionary(); ndb->setDatabaseName(m_dbname); - HA_CREATE_INFO *create_info= ha_alter_info->create_info; + HA_CREATE_INFO *create_info = ha_alter_info->create_info; - const Alter_inplace_info::HA_ALTER_FLAGS alter_flags= - ha_alter_info->handler_flags; + const Alter_inplace_info::HA_ALTER_FLAGS alter_flags = + ha_alter_info->handler_flags; - const Alter_inplace_info::HA_ALTER_FLAGS adding= - Alter_inplace_info::ADD_INDEX | - Alter_inplace_info::ADD_UNIQUE_INDEX; + const Alter_inplace_info::HA_ALTER_FLAGS adding = + Alter_inplace_info::ADD_INDEX | Alter_inplace_info::ADD_UNIQUE_INDEX; - const Alter_inplace_info::HA_ALTER_FLAGS dropping= - Alter_inplace_info::DROP_INDEX | - Alter_inplace_info::DROP_UNIQUE_INDEX; + const Alter_inplace_info::HA_ALTER_FLAGS dropping = + Alter_inplace_info::DROP_INDEX | Alter_inplace_info::DROP_UNIQUE_INDEX; DBUG_ENTER("ha_ndbcluster::prepare_inplace_alter_table"); - ha_alter_info->handler_ctx= 0; - if (!thd_ndb->has_required_global_schema_lock("ha_ndbcluster::prepare_inplace_alter_table")) + ha_alter_info->handler_ctx = 0; + if (!thd_ndb->has_required_global_schema_lock( + "ha_ndbcluster::prepare_inplace_alter_table")) DBUG_RETURN(true); NDB_ALTER_DATA *alter_data; - if (!(alter_data= new (*THR_MALLOC) NDB_ALTER_DATA(thd, dict, m_table))) + if (!(alter_data = new (*THR_MALLOC) NDB_ALTER_DATA(thd, dict, m_table))) DBUG_RETURN(true); - if (!alter_data->schema_dist_client.prepare(m_dbname, m_tabname)) - { + if (!alter_data->schema_dist_client.prepare(m_dbname, m_tabname)) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } - const NDBTAB* const old_tab = alter_data->old_table; - NdbDictionary::Table * const new_tab = alter_data->new_table; - ha_alter_info->handler_ctx= alter_data; + const NDBTAB *const old_tab = alter_data->old_table; + NdbDictionary::Table *const new_tab = alter_data->new_table; + ha_alter_info->handler_ctx = alter_data; DBUG_PRINT("info", ("altered_table: '%s, alter_flags: 0x%llx", - altered_table->s->table_name.str, - alter_flags)); + altered_table->s->table_name.str, alter_flags)); - bool max_rows_changed= false; + bool max_rows_changed = false; bool comment_changed = false; - if (alter_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) - { + if (alter_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) { if (create_info->used_fields & HA_CREATE_USED_MAX_ROWS) - max_rows_changed= true; - if (create_info->used_fields & HA_CREATE_USED_COMMENT) - { + max_rows_changed = true; + if (create_info->used_fields & HA_CREATE_USED_COMMENT) { DBUG_PRINT("info", ("The COMMENT string changed")); - comment_changed= true; + comment_changed = true; } } // Pin the NDB_SHARE of the altered table - NDB_SHARE::acquire_reference_on_existing(m_share, - "inplace_alter"); + NDB_SHARE::acquire_reference_on_existing(m_share, "inplace_alter"); - if (dict->beginSchemaTrans() == -1) - { + if (dict->beginSchemaTrans() == -1) { thd_ndb->set_ndb_error(dict->getNdbError(), "Failed to start schema transaction"); goto err; } - if (alter_flags & adding) - { - KEY *key_info; - KEY *key; - uint *idx_p; - uint *idx_end_p; + if (alter_flags & adding) { + KEY *key_info; + KEY *key; + uint *idx_p; + uint *idx_end_p; KEY_PART_INFO *key_part; KEY_PART_INFO *part_end; DBUG_PRINT("info", ("Adding indexes")); - key_info= (KEY*) thd->alloc(sizeof(KEY) * ha_alter_info->index_add_count); - key= key_info; - for (idx_p= ha_alter_info->index_add_buffer, - idx_end_p= idx_p + ha_alter_info->index_add_count; - idx_p < idx_end_p; - idx_p++, key++) - { + key_info = (KEY *)thd->alloc(sizeof(KEY) * ha_alter_info->index_add_count); + key = key_info; + for (idx_p = ha_alter_info->index_add_buffer, + idx_end_p = idx_p + ha_alter_info->index_add_count; + idx_p < idx_end_p; idx_p++, key++) { /* Copy the KEY struct. */ - *key= ha_alter_info->key_info_buffer[*idx_p]; + *key = ha_alter_info->key_info_buffer[*idx_p]; /* Fix the key parts. */ - part_end= key->key_part + key->user_defined_key_parts; - for (key_part= key->key_part; key_part < part_end; key_part++) - key_part->field= table->field[key_part->fieldnr]; + part_end = key->key_part + key->user_defined_key_parts; + for (key_part = key->key_part; key_part < part_end; key_part++) + key_part->field = table->field[key_part->fieldnr]; } if ((error = prepare_inplace__add_index(thd, key_info, - ha_alter_info->index_add_count))) - { + ha_alter_info->index_add_count))) { /* - Exchange the key_info for the error message. If we exchange - key number by key name in the message later, we need correct info. + Exchange the key_info for the error message. If we exchange + key number by key name in the message later, we need correct info. */ - KEY *save_key_info= table->key_info; - table->key_info= key_info; + KEY *save_key_info = table->key_info; + table->key_info = key_info; table->file->print_error(error, MYF(0)); - table->key_info= save_key_info; + table->key_info = save_key_info; goto abort; } } - if (alter_flags & dropping) - { - for (uint i =0; i < ha_alter_info->index_drop_count; i++) - { - const KEY* key_ptr = ha_alter_info->index_drop_buffer[i]; - for(uint key_num=0; key_num < table->s->keys; key_num++) - { + if (alter_flags & dropping) { + for (uint i = 0; i < ha_alter_info->index_drop_count; i++) { + const KEY *key_ptr = ha_alter_info->index_drop_buffer[i]; + for (uint key_num = 0; key_num < table->s->keys; key_num++) { /* Find the key_num of the key to be dropped and mark it as dropped */ - if (key_ptr == table->key_info + key_num) - { + if (key_ptr == table->key_info + key_num) { prepare_inplace__drop_index(key_num); break; } @@ -18116,124 +15611,100 @@ ha_ndbcluster::prepare_inplace_alter_table(TABLE *altered_table, } } - if (alter_flags & Alter_inplace_info::ALTER_COLUMN_NAME) - { - DBUG_PRINT("info", ("Finding renamed field")); - /* Find the renamed field */ - for (uint i= 0; i < table->s->fields; i++) - { - Field *old_field= table->field[i]; - Field *new_field= altered_table->field[i]; - if (strcmp(old_field->field_name, new_field->field_name) != 0) - { - DBUG_PRINT("info", ("Found field %s renamed to %s", - old_field->field_name, new_field->field_name)); - NdbDictionary::Column* ndbCol= new_tab->getColumn(new_field->field_index); - ndbCol->setName(new_field->field_name); - } - } - } - - if (alter_flags & Alter_inplace_info::ADD_STORED_BASE_COLUMN) - { - NDBCOL col; - - /* Find the new fields */ - for (uint i = table->s->fields; i < altered_table->s->fields; i++) - { - Field *field= altered_table->field[i]; - if(! field->stored_in_db) - continue; - - DBUG_PRINT("info", ("Found new field %s", field->field_name)); - if (create_ndb_column(thd, col, field, create_info, - true /* use_dynamic_as_default */) != 0) - { - // Failed to create column in NDB - goto abort; - } - - /* - If the user has not specified the field format - make it dynamic to enable online add attribute - */ - if (field->column_format() == COLUMN_FORMAT_TYPE_DEFAULT && - create_info->row_type == ROW_TYPE_DEFAULT && - col.getDynamic()) - { - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_ILLEGAL_HA_CREATE_OPTION, - "Converted FIXED field '%s' to DYNAMIC " - "to enable online ADD COLUMN", - field->field_name); - } - new_tab->addColumn(col); - } + if (alter_flags & Alter_inplace_info::ALTER_COLUMN_NAME) { + DBUG_PRINT("info", ("Finding renamed field")); + /* Find the renamed field */ + for (uint i = 0; i < table->s->fields; i++) { + Field *old_field = table->field[i]; + Field *new_field = altered_table->field[i]; + if (strcmp(old_field->field_name, new_field->field_name) != 0) { + DBUG_PRINT("info", ("Found field %s renamed to %s", + old_field->field_name, new_field->field_name)); + NdbDictionary::Column *ndbCol = + new_tab->getColumn(new_field->field_index); + ndbCol->setName(new_field->field_name); + } + } + } + + if (alter_flags & Alter_inplace_info::ADD_STORED_BASE_COLUMN) { + NDBCOL col; + + /* Find the new fields */ + for (uint i = table->s->fields; i < altered_table->s->fields; i++) { + Field *field = altered_table->field[i]; + if (!field->stored_in_db) continue; + + DBUG_PRINT("info", ("Found new field %s", field->field_name)); + if (create_ndb_column(thd, col, field, create_info, + true /* use_dynamic_as_default */) != 0) { + // Failed to create column in NDB + goto abort; + } + + /* + If the user has not specified the field format + make it dynamic to enable online add attribute + */ + if (field->column_format() == COLUMN_FORMAT_TYPE_DEFAULT && + create_info->row_type == ROW_TYPE_DEFAULT && col.getDynamic()) { + push_warning_printf(thd, Sql_condition::SL_WARNING, + ER_ILLEGAL_HA_CREATE_OPTION, + "Converted FIXED field '%s' to DYNAMIC " + "to enable online ADD COLUMN", + field->field_name); + } + new_tab->addColumn(col); + } } if (alter_flags & Alter_inplace_info::ALTER_TABLE_REORG || - alter_flags & Alter_inplace_info::ADD_PARTITION || - max_rows_changed || - comment_changed) - { - if (alter_flags & Alter_inplace_info::ALTER_TABLE_REORG) - { + alter_flags & Alter_inplace_info::ADD_PARTITION || max_rows_changed || + comment_changed) { + if (alter_flags & Alter_inplace_info::ALTER_TABLE_REORG) { new_tab->setFragmentCount(0); new_tab->setFragmentData(0, 0); - } - else if (alter_flags & Alter_inplace_info::ADD_PARTITION) - { - partition_info *part_info= altered_table->part_info; + } else if (alter_flags & Alter_inplace_info::ADD_PARTITION) { + partition_info *part_info = altered_table->part_info; DBUG_PRINT("info", ("Adding partition (%u)", part_info->num_parts)); new_tab->setFragmentCount(part_info->num_parts); new_tab->setPartitionBalance( - NdbDictionary::Object::PartitionBalance_Specific); - } - else if (comment_changed && - parse_comment_changes(new_tab, - old_tab, - create_info, - thd, - max_rows_changed)) - { + NdbDictionary::Object::PartitionBalance_Specific); + } else if (comment_changed && + parse_comment_changes(new_tab, old_tab, create_info, thd, + max_rows_changed)) { goto abort; - } - else if (max_rows_changed) - { - ulonglong rows= create_info->max_rows; - uint no_fragments= get_no_fragments(rows); - uint reported_frags= no_fragments; - if (adjusted_frag_count(ndb, no_fragments, reported_frags)) - { + } else if (max_rows_changed) { + ulonglong rows = create_info->max_rows; + uint no_fragments = get_no_fragments(rows); + uint reported_frags = no_fragments; + if (adjusted_frag_count(ndb, no_fragments, reported_frags)) { DBUG_ASSERT(false); /* Checked above */ } - if (reported_frags < old_tab->getFragmentCount()) - { + if (reported_frags < old_tab->getFragmentCount()) { DBUG_ASSERT(false); DBUG_RETURN(false); } - /* Note we don't set the ndb table's max_rows param, as that + /* Note we don't set the ndb table's max_rows param, as that * is considered a 'real' change */ - //new_tab->setMaxRows(create_info->max_rows); + // new_tab->setMaxRows(create_info->max_rows); new_tab->setFragmentCount(reported_frags); new_tab->setDefaultNoPartitionsFlag(false); new_tab->setFragmentData(0, 0); - new_tab->setPartitionBalance(NdbDictionary::Object::PartitionBalance_Specific); + new_tab->setPartitionBalance( + NdbDictionary::Object::PartitionBalance_Specific); } - - if (dict->prepareHashMap(*old_tab, *new_tab) == -1) - { + + if (dict->prepareHashMap(*old_tab, *new_tab) == -1) { thd_ndb->set_ndb_error(dict->getNdbError(), "Failed to prepare hash map"); goto abort; } } - if (alter_flags & Alter_inplace_info::ADD_FOREIGN_KEY) - { + if (alter_flags & Alter_inplace_info::ADD_FOREIGN_KEY) { const int create_fks_result = create_fks(thd, ndb); - if (create_fks_result != 0) - { + if (create_fks_result != 0) { table->file->print_error(create_fks_result, MYF(0)); goto abort; } @@ -18241,9 +15712,7 @@ ha_ndbcluster::prepare_inplace_alter_table(TABLE *altered_table, DBUG_RETURN(false); abort: - if (dict->endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort) - == -1) - { + if (dict->endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort) == -1) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to abort NDB schema transaction"); } @@ -18252,32 +15721,25 @@ ha_ndbcluster::prepare_inplace_alter_table(TABLE *altered_table, DBUG_RETURN(true); } - -static -int -inplace__set_sdi_and_alter_in_ndb(THD *thd, - const NDB_ALTER_DATA* alter_data, - dd::Table* new_table_def, - const char* schema_name) -{ +static int inplace__set_sdi_and_alter_in_ndb(THD *thd, + const NDB_ALTER_DATA *alter_data, + dd::Table *new_table_def, + const char *schema_name) { DBUG_ENTER("inplace__set_sdi_and_alter_in_ndb"); ndb_dd_fix_inplace_alter_table_def(new_table_def, alter_data->old_table->getName()); dd::sdi_t sdi; - if (!ndb_sdi_serialize(thd, new_table_def, schema_name, sdi)) - { + if (!ndb_sdi_serialize(thd, new_table_def, schema_name, sdi)) { DBUG_RETURN(1); } - - NdbDictionary::Table* new_tab= alter_data->new_table; + NdbDictionary::Table *new_tab = alter_data->new_table; const int set_result = - new_tab->setExtraMetadata(2, // version 2 for frm + new_tab->setExtraMetadata(2, // version 2 for frm sdi.c_str(), (Uint32)sdi.length()); - if (set_result != 0) - { + if (set_result != 0) { my_printf_error(ER_GET_ERRMSG, "Failed to set extra metadata during" "inplace alter table, error: %d", @@ -18285,18 +15747,16 @@ inplace__set_sdi_and_alter_in_ndb(THD *thd, DBUG_RETURN(2); } - NdbDictionary::Dictionary* dict= alter_data->dictionary; - if (dict->alterTableGlobal(*alter_data->old_table, *new_tab)) - { - DBUG_PRINT("info", ("Inplace alter of table %s failed", - new_tab->getName())); - const NdbError ndberr= dict->getNdbError(); - const int error= ndb_to_mysql_error(&ndberr); + NdbDictionary::Dictionary *dict = alter_data->dictionary; + if (dict->alterTableGlobal(*alter_data->old_table, *new_tab)) { + DBUG_PRINT("info", + ("Inplace alter of table %s failed", new_tab->getName())); + const NdbError ndberr = dict->getNdbError(); + const int error = ndb_to_mysql_error(&ndberr); my_error(ER_GET_ERRMSG, MYF(0), error, ndberr.message, "NDBCLUSTER"); DBUG_RETURN(error); } - DBUG_RETURN(0); } @@ -18305,46 +15765,40 @@ bool ha_ndbcluster::inplace_alter_table(TABLE *, const dd::Table *, dd::Table *new_table_def) { DBUG_ENTER("ha_ndbcluster::inplace_alter_table"); - int error= 0; - THD *thd= current_thd; - Thd_ndb *thd_ndb= get_thd_ndb(thd); - HA_CREATE_INFO *create_info= ha_alter_info->create_info; - NDB_ALTER_DATA *alter_data= (NDB_ALTER_DATA *) ha_alter_info->handler_ctx; - NDBDICT *dict= alter_data->dictionary; - const Alter_inplace_info::HA_ALTER_FLAGS alter_flags= - ha_alter_info->handler_flags; - const Alter_inplace_info::HA_ALTER_FLAGS dropping= - Alter_inplace_info::DROP_INDEX | - Alter_inplace_info::DROP_UNIQUE_INDEX; - - if (!thd_ndb->has_required_global_schema_lock("ha_ndbcluster::inplace_alter_table")) - { + int error = 0; + THD *thd = current_thd; + Thd_ndb *thd_ndb = get_thd_ndb(thd); + HA_CREATE_INFO *create_info = ha_alter_info->create_info; + NDB_ALTER_DATA *alter_data = (NDB_ALTER_DATA *)ha_alter_info->handler_ctx; + NDBDICT *dict = alter_data->dictionary; + const Alter_inplace_info::HA_ALTER_FLAGS alter_flags = + ha_alter_info->handler_flags; + const Alter_inplace_info::HA_ALTER_FLAGS dropping = + Alter_inplace_info::DROP_INDEX | Alter_inplace_info::DROP_UNIQUE_INDEX; + + if (!thd_ndb->has_required_global_schema_lock( + "ha_ndbcluster::inplace_alter_table")) { DBUG_RETURN(true); } - bool auto_increment_value_changed= false; - if (alter_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) - { + bool auto_increment_value_changed = false; + if (alter_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) { if (create_info->auto_increment_value != - table->file->stats.auto_increment_value) - auto_increment_value_changed= true; + table->file->stats.auto_increment_value) + auto_increment_value_changed = true; } - if (alter_flags & dropping) - { + if (alter_flags & dropping) { /* Tell the handler to finally drop the indexes. */ - if ((error= inplace__final_drop_index(table))) - { + if ((error = inplace__final_drop_index(table))) { print_error(error, MYF(0)); goto abort; } } - if (alter_flags & Alter_inplace_info::DROP_FOREIGN_KEY) - { - const NDBTAB* tab= alter_data->old_table; - if ((error= inplace__drop_fks(thd, thd_ndb->ndb, dict, tab)) != 0) - { + if (alter_flags & Alter_inplace_info::DROP_FOREIGN_KEY) { + const NDBTAB *tab = alter_data->old_table; + if ((error = inplace__drop_fks(thd, thd_ndb->ndb, dict, tab)) != 0) { print_error(error, MYF(0)); goto abort; } @@ -18352,35 +15806,30 @@ bool ha_ndbcluster::inplace_alter_table(TABLE *, DBUG_ASSERT(m_table != 0); - error= inplace__set_sdi_and_alter_in_ndb(thd, alter_data, - new_table_def, m_dbname); - if (!error) - { + error = inplace__set_sdi_and_alter_in_ndb(thd, alter_data, new_table_def, + m_dbname); + if (!error) { /* * Alter succesful, commit schema transaction */ - if (dict->endSchemaTrans() == -1) - { - error= ndb_to_mysql_error(&dict->getNdbError()); - DBUG_PRINT("info", ("Failed to commit schema transaction, error %u", - error)); + if (dict->endSchemaTrans() == -1) { + error = ndb_to_mysql_error(&dict->getNdbError()); + DBUG_PRINT("info", + ("Failed to commit schema transaction, error %u", error)); table->file->print_error(error, MYF(0)); goto err; } if (auto_increment_value_changed) - error= set_auto_inc_val(thd, create_info->auto_increment_value); - if (error) - { + error = set_auto_inc_val(thd, create_info->auto_increment_value); + if (error) { DBUG_PRINT("info", ("Failed to set auto_increment value")); goto err; } - } - else // if (error) + } else // if (error) { -abort: - if (dict->endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort) - == -1) - { + abort: + if (dict->endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort) == + -1) { DBUG_PRINT("info", ("Failed to abort schema transaction")); ERR_PRINT(dict->getNdbError()); } @@ -18390,30 +15839,25 @@ bool ha_ndbcluster::inplace_alter_table(TABLE *, DBUG_RETURN(error ? true : false); } -bool -ha_ndbcluster::commit_inplace_alter_table(TABLE *altered_table, - Alter_inplace_info *ha_alter_info, - bool commit, - const dd::Table*, - dd::Table* new_table_def) -{ +bool ha_ndbcluster::commit_inplace_alter_table( + TABLE *altered_table, Alter_inplace_info *ha_alter_info, bool commit, + const dd::Table *, dd::Table *new_table_def) { DBUG_ENTER("ha_ndbcluster::commit_inplace_alter_table"); if (!commit) - DBUG_RETURN(abort_inplace_alter_table(altered_table, - ha_alter_info)); - THD *thd= current_thd; - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (!thd_ndb->has_required_global_schema_lock("ha_ndbcluster::commit_inplace_alter_table")) - { - DBUG_RETURN(true); // Error + DBUG_RETURN(abort_inplace_alter_table(altered_table, ha_alter_info)); + THD *thd = current_thd; + Thd_ndb *thd_ndb = get_thd_ndb(thd); + if (!thd_ndb->has_required_global_schema_lock( + "ha_ndbcluster::commit_inplace_alter_table")) { + DBUG_RETURN(true); // Error } - const char *db= table->s->db.str; - const char *name= table->s->table_name.str; - NDB_ALTER_DATA *alter_data= (NDB_ALTER_DATA *) ha_alter_info->handler_ctx; - const Uint32 table_id= alter_data->table_id; - const Uint32 table_version= alter_data->old_table_version; + const char *db = table->s->db.str; + const char *name = table->s->table_name.str; + NDB_ALTER_DATA *alter_data = (NDB_ALTER_DATA *)ha_alter_info->handler_ctx; + const Uint32 table_id = alter_data->table_id; + const Uint32 table_version = alter_data->old_table_version; // Pass pointer to table_def for usage by schema dist participant // in the binlog thread of this mysqld. @@ -18421,8 +15865,7 @@ ha_ndbcluster::commit_inplace_alter_table(TABLE *altered_table, Ndb_schema_dist_client &schema_dist_client = alter_data->schema_dist_client; if (!schema_dist_client.alter_table_inplace_prepare(db, name, table_id, - table_version)) - { + table_version)) { // Failed to distribute the prepare of this alter table to the // other MySQL Servers, just log error and continue ndb_log_error("Failed to distribute inplace alter table prepare for '%s'", @@ -18437,23 +15880,21 @@ ha_ndbcluster::commit_inplace_alter_table(TABLE *altered_table, // The caller will then save it into DD { Ndb_table_guard ndbtab_g(alter_data->dictionary, name); - const NDBTAB *ndbtab= ndbtab_g.get_table(); + const NDBTAB *ndbtab = ndbtab_g.get_table(); // The id should still be the same as before the alter DBUG_ASSERT((Uint32)ndbtab->getObjectId() == table_id); // The version should have been changed by the alter DBUG_ASSERT((Uint32)ndbtab->getObjectVersion() != table_version); - ndb_dd_table_set_object_id_and_version(new_table_def, - table_id, + ndb_dd_table_set_object_id_and_version(new_table_def, table_id, ndbtab->getObjectVersion()); // Also check and correct the partition count if required. const bool check_partition_count_result = ndb_dd_table_check_partition_count(new_table_def, ndbtab->getPartitionCount()); - if (!check_partition_count_result) - { + if (!check_partition_count_result) { ndb_dd_table_fix_partition_count(new_table_def, ndbtab->getPartitionCount()); } @@ -18462,24 +15903,21 @@ ha_ndbcluster::commit_inplace_alter_table(TABLE *altered_table, // Unpin the NDB_SHARE of the altered table NDB_SHARE::release_reference(m_share, "inplace_alter"); - DBUG_RETURN(false); // OK + DBUG_RETURN(false); // OK } -bool ha_ndbcluster::abort_inplace_alter_table(TABLE *, - Alter_inplace_info *ha_alter_info) -{ +bool ha_ndbcluster::abort_inplace_alter_table( + TABLE *, Alter_inplace_info *ha_alter_info) { DBUG_ENTER("ha_ndbcluster::abort_inplace_alter_table"); - NDB_ALTER_DATA *alter_data= (NDB_ALTER_DATA *) ha_alter_info->handler_ctx; - if (!alter_data) - { + NDB_ALTER_DATA *alter_data = (NDB_ALTER_DATA *)ha_alter_info->handler_ctx; + if (!alter_data) { // Could not find any alter_data, nothing to abort or already aborted - DBUG_RETURN(false); // OK + DBUG_RETURN(false); // OK } - NDBDICT *dict= alter_data->dictionary; - if (dict->endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort) == -1) - { + NDBDICT *dict = alter_data->dictionary; + if (dict->endSchemaTrans(NdbDictionary::Dictionary::SchemaTransAbort) == -1) { DBUG_PRINT("info", ("Failed to abort schema transaction")); ERR_PRINT(dict->getNdbError()); } @@ -18488,43 +15926,40 @@ bool ha_ndbcluster::abort_inplace_alter_table(TABLE *, // schema distribution has been aborted destroy(alter_data); - ha_alter_info->handler_ctx= 0; + ha_alter_info->handler_ctx = 0; // Unpin the NDB_SHARE of the altered table NDB_SHARE::release_reference(m_share, "inplace_alter"); - DBUG_RETURN(false); // OK + DBUG_RETURN(false); // OK } -void ha_ndbcluster::notify_table_changed(Alter_inplace_info *alter_info) -{ +void ha_ndbcluster::notify_table_changed(Alter_inplace_info *alter_info) { DBUG_ENTER("ha_ndbcluster::notify_table_changed "); /* all mysqld's will read frms from disk and setup new event operation for the table (new_op) */ - THD *thd= current_thd; - const char *db= table->s->db.str; - const char *name= table->s->table_name.str; - uint32 table_id= 0, table_version= 0; + THD *thd = current_thd; + const char *db = table->s->db.str; + const char *name = table->s->table_name.str; + uint32 table_id = 0, table_version = 0; /* Get table id/version for new table */ { - Ndb* ndb= get_ndb(thd); + Ndb *ndb = get_ndb(thd); DBUG_ASSERT(ndb != 0); - if (ndb) - { + if (ndb) { ndb->setDatabaseName(db); Ndb_table_guard ndbtab(ndb->getDictionary(), name); - const NDBTAB *new_tab= ndbtab.get_table(); + const NDBTAB *new_tab = ndbtab.get_table(); DBUG_ASSERT(new_tab != 0); - if (new_tab) - { - table_id= new_tab->getObjectId(); - table_version= new_tab->getObjectVersion(); + if (new_tab) { + table_id = new_tab->getObjectId(); + table_version = new_tab->getObjectVersion(); // NOTE! There is already table id, version etc. in NDB_ALTER_DATA, // why not take it from there instead of doing an additional // NDB roundtrip to fetch the table definition @@ -18540,8 +15975,7 @@ void ha_ndbcluster::notify_table_changed(Alter_inplace_info *alter_info) static_cast(alter_info->handler_ctx); Ndb_schema_dist_client &schema_dist_client = alter_data->schema_dist_client; if (!schema_dist_client.alter_table_inplace_commit(db, name, table_id, - table_version)) - { + table_version)) { // Failed to distribute the prepare of this alter table to the // other MySQL Servers, just log error and continue ndb_log_error("Failed to distribute inplace alter table commit of '%s'", @@ -18549,12 +15983,11 @@ void ha_ndbcluster::notify_table_changed(Alter_inplace_info *alter_info) } destroy(alter_data); - alter_info->handler_ctx= 0; + alter_info->handler_ctx = 0; DBUG_VOID_RETURN; } - /** Get the tablespace name from the NDB dictionary for the given table in the given schema. @@ -18577,60 +16010,50 @@ void ha_ndbcluster::notify_table_changed(Alter_inplace_info *alter_info) @retval != 0 Error (handler error code returned). */ -static -int ndbcluster_get_tablespace(THD* thd, - LEX_CSTRING db_name, - LEX_CSTRING table_name, - LEX_CSTRING *tablespace_name) -{ +static int ndbcluster_get_tablespace(THD *thd, LEX_CSTRING db_name, + LEX_CSTRING table_name, + LEX_CSTRING *tablespace_name) { DBUG_ENTER("ndbcluster_get_tablespace"); - DBUG_PRINT("enter", ("db_name: %s, table_name: %s", db_name.str, - table_name.str)); + DBUG_PRINT("enter", + ("db_name: %s, table_name: %s", db_name.str, table_name.str)); DBUG_ASSERT(tablespace_name != NULL); - Ndb* ndb= check_ndb_in_thd(thd); - if (ndb == NULL) - DBUG_RETURN(HA_ERR_NO_CONNECTION); + Ndb *ndb = check_ndb_in_thd(thd); + if (ndb == NULL) DBUG_RETURN(HA_ERR_NO_CONNECTION); - NDBDICT *dict= ndb->getDictionary(); - const NDBTAB *tab= NULL; + NDBDICT *dict = ndb->getDictionary(); + const NDBTAB *tab = NULL; ndb->setDatabaseName(db_name.str); Ndb_table_guard ndbtab_g(dict, table_name.str); - if (!(tab= ndbtab_g.get_table())) - ERR_RETURN(dict->getNdbError()); + if (!(tab = ndbtab_g.get_table())) ERR_RETURN(dict->getNdbError()); Uint32 id; - if (tab->getTablespace(&id)) - { - NdbDictionary::Tablespace ts= dict->getTablespace(id); - if (ndb_dict_check_NDB_error(dict)) - { - const char *tablespace= ts.getName(); + if (tab->getTablespace(&id)) { + NdbDictionary::Tablespace ts = dict->getTablespace(id); + if (ndb_dict_check_NDB_error(dict)) { + const char *tablespace = ts.getName(); DBUG_ASSERT(tablespace); - const size_t tablespace_len= strlen(tablespace); + const size_t tablespace_len = strlen(tablespace); DBUG_PRINT("info", ("Found tablespace '%s'", tablespace)); - lex_string_strmake(thd->mem_root, tablespace_name, tablespace, tablespace_len); + lex_string_strmake(thd->mem_root, tablespace_name, tablespace, + tablespace_len); } } DBUG_RETURN(0); } - -static -bool create_tablespace_in_NDB(st_alter_tablespace *alter_info, - NdbDictionary::Dictionary* dict, - const Thd_ndb* thd_ndb, - int& object_id, int& object_version) -{ +static bool create_tablespace_in_NDB(st_alter_tablespace *alter_info, + NdbDictionary::Dictionary *dict, + const Thd_ndb *thd_ndb, int &object_id, + int &object_version) { NdbDictionary::Tablespace ndb_ts; ndb_ts.setName(alter_info->tablespace_name); ndb_ts.setExtentSize(static_cast(alter_info->extent_size)); ndb_ts.setDefaultLogfileGroup(alter_info->logfile_group_name); NdbDictionary::ObjectId objid; - if (dict->createTablespace(ndb_ts, &objid)) - { + if (dict->createTablespace(ndb_ts, &objid)) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to create tablespace '%s' in NDB", alter_info->tablespace_name); @@ -18639,60 +16062,46 @@ bool create_tablespace_in_NDB(st_alter_tablespace *alter_info, } object_id = objid.getObjectId(); object_version = objid.getObjectVersion(); - if (dict->getWarningFlags() & NdbDictionary::Dictionary::WarnExtentRoundUp) - { + if (dict->getWarningFlags() & NdbDictionary::Dictionary::WarnExtentRoundUp) { thd_ndb->push_warning("Extent size rounded up to kernel page size"); } return true; } - -static -bool create_datafile_in_NDB(st_alter_tablespace* alter_info, - NdbDictionary::Dictionary* dict, - const Thd_ndb* thd_ndb) -{ +static bool create_datafile_in_NDB(st_alter_tablespace *alter_info, + NdbDictionary::Dictionary *dict, + const Thd_ndb *thd_ndb) { NdbDictionary::Datafile ndb_df; ndb_df.setPath(alter_info->data_file_name); ndb_df.setSize(alter_info->initial_size); ndb_df.setTablespace(alter_info->tablespace_name); - if (dict->createDatafile(ndb_df)) - { + if (dict->createDatafile(ndb_df)) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to create datafile '%s' in NDB", alter_info->data_file_name); - if (alter_info->ts_cmd_type == CREATE_TABLESPACE) - { + if (alter_info->ts_cmd_type == CREATE_TABLESPACE) { my_error(ER_CREATE_FILEGROUP_FAILED, MYF(0), "DATAFILE"); - } - else - { + } else { my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), "CREATE DATAFILE FAILED"); } return false; } - if (dict->getWarningFlags() & NdbDictionary::Dictionary::WarnDatafileRoundUp) - { + if (dict->getWarningFlags() & + NdbDictionary::Dictionary::WarnDatafileRoundUp) { thd_ndb->push_warning("Datafile size rounded up to extent size"); - } - else if (dict->getWarningFlags() & - NdbDictionary::Dictionary::WarnDatafileRoundDown) - { + } else if (dict->getWarningFlags() & + NdbDictionary::Dictionary::WarnDatafileRoundDown) { thd_ndb->push_warning("Datafile size rounded down to extent size"); } return true; } - -static -bool drop_datafile_from_NDB(const char* tablespace_name, - const char* datafile_name, - NdbDictionary::Dictionary* dict, - const Thd_ndb* thd_ndb) -{ +static bool drop_datafile_from_NDB(const char *tablespace_name, + const char *datafile_name, + NdbDictionary::Dictionary *dict, + const Thd_ndb *thd_ndb) { NdbDictionary::Tablespace ts = dict->getTablespace(tablespace_name); - if(ndb_dict_check_NDB_error(dict)) - { + if (ndb_dict_check_NDB_error(dict)) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to get tablespace '%s' from NDB", tablespace_name); @@ -18700,8 +16109,7 @@ bool drop_datafile_from_NDB(const char* tablespace_name, return false; } NdbDictionary::Datafile df = dict->getDatafile(0, datafile_name); - if(ndb_dict_check_NDB_error(dict)) - { + if (ndb_dict_check_NDB_error(dict)) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to get datafile '%s' from NDB", datafile_name); @@ -18712,35 +16120,27 @@ bool drop_datafile_from_NDB(const char* tablespace_name, NdbDictionary::ObjectId objid; df.getTablespaceId(&objid); if (ts.getObjectId() == objid.getObjectId() && - strcmp(df.getPath(), datafile_name) == 0) - { - if (dict->dropDatafile(df)) - { + strcmp(df.getPath(), datafile_name) == 0) { + if (dict->dropDatafile(df)) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to drop datafile '%s' from NDB", datafile_name); my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), "DROP DATAFILE FAILED"); return false; } - } - else - { + } else { my_error(ER_WRONG_FILE_NAME, MYF(0), datafile_name); return false; } return true; } - -static -bool drop_tablespace_from_NDB(const char* tablespace_name, - NdbDictionary::Dictionary* dict, - const Thd_ndb* thd_ndb, - int& object_id, int& object_version) -{ +static bool drop_tablespace_from_NDB(const char *tablespace_name, + NdbDictionary::Dictionary *dict, + const Thd_ndb *thd_ndb, int &object_id, + int &object_version) { NdbDictionary::Tablespace ts = dict->getTablespace(tablespace_name); - if (ndb_dict_check_NDB_error(dict)) - { + if (ndb_dict_check_NDB_error(dict)) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to get tablespace '%s' from NDB", tablespace_name); @@ -18749,8 +16149,7 @@ bool drop_tablespace_from_NDB(const char* tablespace_name, } object_id = ts.getObjectId(); object_version = ts.getObjectVersion(); - if (dict->dropTablespace(ts)) - { + if (dict->dropTablespace(ts)) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to drop tablespace '%s' from NDB", tablespace_name); @@ -18760,19 +16159,15 @@ bool drop_tablespace_from_NDB(const char* tablespace_name, return true; } - -static -bool create_logfile_group_in_NDB(st_alter_tablespace *alter_info, - NdbDictionary::Dictionary* dict, - const Thd_ndb* thd_ndb, - int& object_id, int& object_version) -{ +static bool create_logfile_group_in_NDB(st_alter_tablespace *alter_info, + NdbDictionary::Dictionary *dict, + const Thd_ndb *thd_ndb, int &object_id, + int &object_version) { NdbDictionary::LogfileGroup ndb_lg; ndb_lg.setName(alter_info->logfile_group_name); ndb_lg.setUndoBufferSize(static_cast(alter_info->undo_buffer_size)); NdbDictionary::ObjectId objid; - if (dict->createLogfileGroup(ndb_lg, &objid)) - { + if (dict->createLogfileGroup(ndb_lg, &objid)) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to create logfile group '%s' in NDB", alter_info->logfile_group_name); @@ -18782,56 +16177,43 @@ bool create_logfile_group_in_NDB(st_alter_tablespace *alter_info, object_id = objid.getObjectId(); object_version = objid.getObjectVersion(); if (dict->getWarningFlags() & - NdbDictionary::Dictionary::WarnUndobufferRoundUp) - { + NdbDictionary::Dictionary::WarnUndobufferRoundUp) { thd_ndb->push_warning("Undo buffer size rounded up to kernel page size"); } return true; } - -static -bool create_undofile_in_NDB(st_alter_tablespace* alter_info, - NdbDictionary::Dictionary* dict, - const Thd_ndb* thd_ndb) -{ +static bool create_undofile_in_NDB(st_alter_tablespace *alter_info, + NdbDictionary::Dictionary *dict, + const Thd_ndb *thd_ndb) { NdbDictionary::Undofile ndb_uf; ndb_uf.setPath(alter_info->undo_file_name); ndb_uf.setSize(alter_info->initial_size); ndb_uf.setLogfileGroup(alter_info->logfile_group_name); - if (dict->createUndofile(ndb_uf)) - { + if (dict->createUndofile(ndb_uf)) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to create undofile '%s' in NDB", alter_info->undo_file_name); - if (alter_info->ts_cmd_type == CREATE_LOGFILE_GROUP) - { + if (alter_info->ts_cmd_type == CREATE_LOGFILE_GROUP) { my_error(ER_CREATE_FILEGROUP_FAILED, MYF(0), "UNDOFILE"); - } - else - { + } else { my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), "CREATE UNDOFILE FAILED"); } return false; } if (dict->getWarningFlags() & - NdbDictionary::Dictionary::WarnUndofileRoundDown) - { + NdbDictionary::Dictionary::WarnUndofileRoundDown) { thd_ndb->push_warning("Undofile size rounded down to kernel page size"); } return true; } - -static -bool drop_logfile_group_from_NDB(const char* logfile_group_name, - NdbDictionary::Dictionary* dict, - const Thd_ndb* thd_ndb, - int& object_id, int& object_version) -{ +static bool drop_logfile_group_from_NDB(const char *logfile_group_name, + NdbDictionary::Dictionary *dict, + const Thd_ndb *thd_ndb, int &object_id, + int &object_version) { NdbDictionary::LogfileGroup lg = dict->getLogfileGroup(logfile_group_name); - if (ndb_dict_check_NDB_error(dict)) - { + if (ndb_dict_check_NDB_error(dict)) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to get logfile group '%s' from NDB", logfile_group_name); @@ -18840,8 +16222,7 @@ bool drop_logfile_group_from_NDB(const char* logfile_group_name, } object_id = lg.getObjectId(); object_version = lg.getObjectVersion(); - if (dict->dropLogfileGroup(lg)) - { + if (dict->dropLogfileGroup(lg)) { thd_ndb->push_ndb_error_warning(dict->getNdbError()); thd_ndb->push_warning("Failed to drop logfile group '%s' from NDB", logfile_group_name); @@ -18851,7 +16232,6 @@ bool drop_logfile_group_from_NDB(const char* logfile_group_name, return true; } - /** Create, drop or alter tablespace or logfile group @@ -18879,448 +16259,393 @@ bool drop_logfile_group_from_NDB(const char* logfile_group_name, been called to set the MySQL error code. */ -static -int ndbcluster_alter_tablespace(handlerton*, - THD* thd, st_alter_tablespace *alter_info, - const dd::Tablespace*, - dd::Tablespace* new_ts_def) -{ +static int ndbcluster_alter_tablespace(handlerton *, THD *thd, + st_alter_tablespace *alter_info, + const dd::Tablespace *, + dd::Tablespace *new_ts_def) { DBUG_ENTER("ndbcluster_alter_tablespace"); Ndb *ndb = check_ndb_in_thd(thd, true); - if (ndb == nullptr) - { + if (ndb == nullptr) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } - NdbDictionary::Dictionary* dict = ndb->getDictionary(); + NdbDictionary::Dictionary *dict = ndb->getDictionary(); Ndb_schema_dist_client schema_dist_client(thd); - const Thd_ndb* thd_ndb = get_thd_ndb(thd); + const Thd_ndb *thd_ndb = get_thd_ndb(thd); // Function should be called with GSL held - if (!thd_ndb->has_required_global_schema_lock("ndbcluster_alter_tablespace")){ + if (!thd_ndb->has_required_global_schema_lock( + "ndbcluster_alter_tablespace")) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } switch (alter_info->ts_cmd_type) { - case CREATE_TABLESPACE: - { - if (DBUG_EVALUATE_IF("ndb_skip_create_tablespace_in_NDB", true, false)) - { - // Force mismatch by skipping creation of the tablespace in NDB - ndb_dd_disk_data_set_object_id_and_version(new_ts_def, 0, 0); - ndb_dd_disk_data_set_object_type(new_ts_def, object_type::TABLESPACE); - DBUG_RETURN(0); - } - - if (alter_info->extent_size >= (Uint64(1) << 32)) - { - thd_ndb->push_warning("Value specified for EXTENT_SIZE was too large"); - my_error(ER_WRONG_SIZE_NUMBER, MYF(0)); - DBUG_RETURN(1); - } - - if (alter_info->max_size > 0) - { - thd_ndb->push_warning("MAX_SIZE cannot be set to a value greater than 0"); - my_error(ER_WRONG_SIZE_NUMBER, MYF(0)); - DBUG_RETURN(1); - } - - if (!schema_dist_client.prepare("", alter_info->tablespace_name)) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } - - Ndb_schema_trans_guard schema_trans(thd_ndb, dict); - if (!schema_trans.begin_trans()) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } - - int object_id, object_version; - if (!create_tablespace_in_NDB(alter_info, dict, thd_ndb, - object_id, object_version)) - { - DBUG_RETURN(1); - } - - if (!create_datafile_in_NDB(alter_info, dict, thd_ndb)) - { - DBUG_RETURN(1); - } - - if (!schema_trans.commit_trans()) - { - my_error(ER_CREATE_FILEGROUP_FAILED, MYF(0), "TABLESPACE"); - DBUG_RETURN(1); - } - DBUG_PRINT("info", ("Successfully created tablespace '%s' and datafile " - "'%s' in NDB", alter_info->tablespace_name, - alter_info->data_file_name)); - - // Set se_private_id and se_private_data for the tablespace - ndb_dd_disk_data_set_object_id_and_version(new_ts_def, object_id, - object_version); - ndb_dd_disk_data_set_object_type(new_ts_def, object_type::TABLESPACE); + case CREATE_TABLESPACE: { + if (DBUG_EVALUATE_IF("ndb_skip_create_tablespace_in_NDB", true, false)) { + // Force mismatch by skipping creation of the tablespace in NDB + ndb_dd_disk_data_set_object_id_and_version(new_ts_def, 0, 0); + ndb_dd_disk_data_set_object_type(new_ts_def, object_type::TABLESPACE); + DBUG_RETURN(0); + } - if (!schema_dist_client.create_tablespace(alter_info->tablespace_name, - object_id, object_version)) - { - // Schema distibution failed, just push a warning and continue - thd_ndb->push_warning("Failed to distribute CREATE TABLESPACE '%s'", - alter_info->tablespace_name); - } - break; - } - case ALTER_TABLESPACE: - { - if (!schema_dist_client.prepare("", alter_info->tablespace_name)) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } + if (alter_info->extent_size >= (Uint64(1) << 32)) { + thd_ndb->push_warning("Value specified for EXTENT_SIZE was too large"); + my_error(ER_WRONG_SIZE_NUMBER, MYF(0)); + DBUG_RETURN(1); + } - switch (alter_info->ts_alter_tablespace_type) { - case ALTER_TABLESPACE_ADD_FILE: - { - if (alter_info->max_size > 0) - { - thd_ndb->push_warning("MAX_SIZE cannot be set to a value greater than " - "0"); + if (alter_info->max_size > 0) { + thd_ndb->push_warning( + "MAX_SIZE cannot be set to a value greater than 0"); my_error(ER_WRONG_SIZE_NUMBER, MYF(0)); DBUG_RETURN(1); } + if (!schema_dist_client.prepare("", alter_info->tablespace_name)) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } + Ndb_schema_trans_guard schema_trans(thd_ndb, dict); - if (!schema_trans.begin_trans()) - { + if (!schema_trans.begin_trans()) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } - if (!create_datafile_in_NDB(alter_info, dict, thd_ndb)) - { + int object_id, object_version; + if (!create_tablespace_in_NDB(alter_info, dict, thd_ndb, object_id, + object_version)) { DBUG_RETURN(1); } - if (!schema_trans.commit_trans()) - { - my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), "CREATE DATAFILE FAILED"); + if (!create_datafile_in_NDB(alter_info, dict, thd_ndb)) { DBUG_RETURN(1); } - DBUG_PRINT("info", ("Successfully created datafile '%s' in NDB", - alter_info->data_file_name)); - break; - } - case ALTER_TABLESPACE_DROP_FILE: - { - Ndb_schema_trans_guard schema_trans(thd_ndb, dict); - if (!schema_trans.begin_trans()) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } - if (!drop_datafile_from_NDB(alter_info->tablespace_name, - alter_info->data_file_name, - dict, thd_ndb)) - { + if (!schema_trans.commit_trans()) { + my_error(ER_CREATE_FILEGROUP_FAILED, MYF(0), "TABLESPACE"); DBUG_RETURN(1); } + DBUG_PRINT("info", + ("Successfully created tablespace '%s' and datafile " + "'%s' in NDB", + alter_info->tablespace_name, alter_info->data_file_name)); + + // Set se_private_id and se_private_data for the tablespace + ndb_dd_disk_data_set_object_id_and_version(new_ts_def, object_id, + object_version); + ndb_dd_disk_data_set_object_type(new_ts_def, object_type::TABLESPACE); - if (!schema_trans.commit_trans()) - { - my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), "DROP DATAFILE FAILED"); - DBUG_RETURN(1); + if (!schema_dist_client.create_tablespace(alter_info->tablespace_name, + object_id, object_version)) { + // Schema distibution failed, just push a warning and continue + thd_ndb->push_warning("Failed to distribute CREATE TABLESPACE '%s'", + alter_info->tablespace_name); } - DBUG_PRINT("info", ("Successfully dropped datafile '%s' from NDB", - alter_info->data_file_name)); break; } - default: - { - DBUG_PRINT("error", ("Unsupported alter tablespace type: %d", - alter_info->ts_alter_tablespace_type)); - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); - } - } + case ALTER_TABLESPACE: { + if (!schema_dist_client.prepare("", alter_info->tablespace_name)) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } - NdbDictionary::Tablespace ts = - dict->getTablespace(alter_info->tablespace_name); - if (ndb_dict_check_NDB_error(dict)) - { - // Failed to get tablespace from NDB, push warnings and continue - thd_ndb->push_ndb_error_warning(dict->getNdbError()); - thd_ndb->push_warning("Failed to get tablespace '%s' from NDB", - alter_info->tablespace_name); - thd_ndb->push_warning("Failed to distribute ALTER TABLESPACE '%s'", - alter_info->tablespace_name); - break; - } + switch (alter_info->ts_alter_tablespace_type) { + case ALTER_TABLESPACE_ADD_FILE: { + if (alter_info->max_size > 0) { + thd_ndb->push_warning( + "MAX_SIZE cannot be set to a value greater than " + "0"); + my_error(ER_WRONG_SIZE_NUMBER, MYF(0)); + DBUG_RETURN(1); + } - if (!schema_dist_client.alter_tablespace(alter_info->tablespace_name, - ts.getObjectId(), - ts.getObjectVersion())) - { - // Schema distibution failed, just push a warning and continue - thd_ndb->push_warning("Failed to distribute ALTER TABLESPACE '%s'", - alter_info->tablespace_name); - } - break; - } - case CREATE_LOGFILE_GROUP: - { - if (alter_info->undo_file_name == nullptr) - { - thd_ndb->push_warning("REDO files in LOGFILE GROUP are not supported"); - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); - } + Ndb_schema_trans_guard schema_trans(thd_ndb, dict); + if (!schema_trans.begin_trans()) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } - if (alter_info->undo_buffer_size >= (Uint64(1) << 32)) - { - thd_ndb->push_warning("Size specified for UNDO_BUFFER_SIZE was too " - "large"); - my_error(ER_WRONG_SIZE_NUMBER, MYF(0)); - DBUG_RETURN(1); - } + if (!create_datafile_in_NDB(alter_info, dict, thd_ndb)) { + DBUG_RETURN(1); + } - if (!schema_dist_client.prepare("", alter_info->logfile_group_name)) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } + if (!schema_trans.commit_trans()) { + my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), + "CREATE DATAFILE FAILED"); + DBUG_RETURN(1); + } + DBUG_PRINT("info", ("Successfully created datafile '%s' in NDB", + alter_info->data_file_name)); + break; + } + case ALTER_TABLESPACE_DROP_FILE: { + Ndb_schema_trans_guard schema_trans(thd_ndb, dict); + if (!schema_trans.begin_trans()) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } - Ndb_schema_trans_guard schema_trans(thd_ndb, dict); - if (!schema_trans.begin_trans()) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } + if (!drop_datafile_from_NDB(alter_info->tablespace_name, + alter_info->data_file_name, dict, + thd_ndb)) { + DBUG_RETURN(1); + } - int object_id, object_version; - if (!create_logfile_group_in_NDB(alter_info, dict, thd_ndb, - object_id, object_version)) - { - DBUG_RETURN(1); - } + if (!schema_trans.commit_trans()) { + my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), "DROP DATAFILE FAILED"); + DBUG_RETURN(1); + } + DBUG_PRINT("info", ("Successfully dropped datafile '%s' from NDB", + alter_info->data_file_name)); + break; + } + default: { + DBUG_PRINT("error", ("Unsupported alter tablespace type: %d", + alter_info->ts_alter_tablespace_type)); + DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); + } + } - if (!create_undofile_in_NDB(alter_info, dict, thd_ndb)) - { - DBUG_RETURN(1); - } + NdbDictionary::Tablespace ts = + dict->getTablespace(alter_info->tablespace_name); + if (ndb_dict_check_NDB_error(dict)) { + // Failed to get tablespace from NDB, push warnings and continue + thd_ndb->push_ndb_error_warning(dict->getNdbError()); + thd_ndb->push_warning("Failed to get tablespace '%s' from NDB", + alter_info->tablespace_name); + thd_ndb->push_warning("Failed to distribute ALTER TABLESPACE '%s'", + alter_info->tablespace_name); + break; + } - // Add Logfile Group entry to the DD as a tablespace - Ndb_dd_client dd_client(thd); - std::vector undofile_names = {alter_info->undo_file_name}; - if (!dd_client.install_logfile_group(alter_info->logfile_group_name, - undofile_names, object_id, - object_version, - false /* force_overwrite */) || - DBUG_EVALUATE_IF("ndb_dd_client_install_logfile_group_fail", - true, false)) - { - thd_ndb->push_warning("Logfile group '%s' could not be stored in DD", - alter_info->logfile_group_name); - my_error(ER_CREATE_FILEGROUP_FAILED, MYF(0), "LOGFILE GROUP"); - DBUG_RETURN(1); + if (!schema_dist_client.alter_tablespace(alter_info->tablespace_name, + ts.getObjectId(), + ts.getObjectVersion())) { + // Schema distibution failed, just push a warning and continue + thd_ndb->push_warning("Failed to distribute ALTER TABLESPACE '%s'", + alter_info->tablespace_name); + } + break; } + case CREATE_LOGFILE_GROUP: { + if (alter_info->undo_file_name == nullptr) { + thd_ndb->push_warning("REDO files in LOGFILE GROUP are not supported"); + DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); + } + + if (alter_info->undo_buffer_size >= (Uint64(1) << 32)) { + thd_ndb->push_warning( + "Size specified for UNDO_BUFFER_SIZE was too " + "large"); + my_error(ER_WRONG_SIZE_NUMBER, MYF(0)); + DBUG_RETURN(1); + } + + if (!schema_dist_client.prepare("", alter_info->logfile_group_name)) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } + + Ndb_schema_trans_guard schema_trans(thd_ndb, dict); + if (!schema_trans.begin_trans()) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } + + int object_id, object_version; + if (!create_logfile_group_in_NDB(alter_info, dict, thd_ndb, object_id, + object_version)) { + DBUG_RETURN(1); + } - // Objects created in NDB and DD. Time to commit NDB schema transaction - if (!schema_trans.commit_trans()) - { - if (DBUG_EVALUATE_IF("ndb_dd_client_lfg_force_commit", true, false)) - { - // Force commit of logfile group creation in DD when creation in NDB - // has failed leading to a mismatch - dd_client.commit(); - DBUG_RETURN(0); + if (!create_undofile_in_NDB(alter_info, dict, thd_ndb)) { + DBUG_RETURN(1); } - my_error(ER_CREATE_FILEGROUP_FAILED, MYF(0), "LOGFILE GROUP"); - DBUG_RETURN(1); - } - DBUG_PRINT("info", ("Successfully created logfile group '%s' and undofile " - "'%s' in NDB", alter_info->logfile_group_name, - alter_info->undo_file_name)); - // NDB schema transaction committed successfully, safe to commit in DD - dd_client.commit(); + // Add Logfile Group entry to the DD as a tablespace + Ndb_dd_client dd_client(thd); + std::vector undofile_names = {alter_info->undo_file_name}; + if (!dd_client.install_logfile_group( + alter_info->logfile_group_name, undofile_names, object_id, + object_version, false /* force_overwrite */) || + DBUG_EVALUATE_IF("ndb_dd_client_install_logfile_group_fail", true, + false)) { + thd_ndb->push_warning("Logfile group '%s' could not be stored in DD", + alter_info->logfile_group_name); + my_error(ER_CREATE_FILEGROUP_FAILED, MYF(0), "LOGFILE GROUP"); + DBUG_RETURN(1); + } - if (!schema_dist_client.create_logfile_group(alter_info->logfile_group_name, - object_id, object_version)) - { - // Schema distibution failed, just push a warning and continue - thd_ndb->push_warning("Failed to distribute CREATE LOGFILE GROUP '%s'", - alter_info->logfile_group_name); - } - break; - } - case ALTER_LOGFILE_GROUP: - { - if (alter_info->undo_file_name == nullptr) - { - thd_ndb->push_warning("REDO files in LOGFILE GROUP are not supported"); - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); + // Objects created in NDB and DD. Time to commit NDB schema transaction + if (!schema_trans.commit_trans()) { + if (DBUG_EVALUATE_IF("ndb_dd_client_lfg_force_commit", true, false)) { + // Force commit of logfile group creation in DD when creation in NDB + // has failed leading to a mismatch + dd_client.commit(); + DBUG_RETURN(0); + } + my_error(ER_CREATE_FILEGROUP_FAILED, MYF(0), "LOGFILE GROUP"); + DBUG_RETURN(1); + } + DBUG_PRINT("info", + ("Successfully created logfile group '%s' and undofile " + "'%s' in NDB", + alter_info->logfile_group_name, alter_info->undo_file_name)); + + // NDB schema transaction committed successfully, safe to commit in DD + dd_client.commit(); + + if (!schema_dist_client.create_logfile_group( + alter_info->logfile_group_name, object_id, object_version)) { + // Schema distibution failed, just push a warning and continue + thd_ndb->push_warning("Failed to distribute CREATE LOGFILE GROUP '%s'", + alter_info->logfile_group_name); + } + break; } + case ALTER_LOGFILE_GROUP: { + if (alter_info->undo_file_name == nullptr) { + thd_ndb->push_warning("REDO files in LOGFILE GROUP are not supported"); + DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); + } - if (!schema_dist_client.prepare("", alter_info->logfile_group_name)) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } + if (!schema_dist_client.prepare("", alter_info->logfile_group_name)) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } - Ndb_schema_trans_guard schema_trans(thd_ndb, dict); - if (!schema_trans.begin_trans()) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } + Ndb_schema_trans_guard schema_trans(thd_ndb, dict); + if (!schema_trans.begin_trans()) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } - if (!create_undofile_in_NDB(alter_info, dict, thd_ndb)) - { - DBUG_RETURN(1); - } + if (!create_undofile_in_NDB(alter_info, dict, thd_ndb)) { + DBUG_RETURN(1); + } - // Update Logfile Group entry in the DD - Ndb_dd_client dd_client(thd); - if (!dd_client.install_undo_file(alter_info->logfile_group_name, - alter_info->undo_file_name) || - DBUG_EVALUATE_IF("ndb_dd_client_install_undo_file_fail", - true, false)) - { - thd_ndb->push_warning("Undofile '%s' could not be added to logfile " - "group '%s' in DD", alter_info->undo_file_name, - alter_info->logfile_group_name); - my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), "CREATE UNDOFILE FAILED"); - DBUG_RETURN(1); - } + // Update Logfile Group entry in the DD + Ndb_dd_client dd_client(thd); + if (!dd_client.install_undo_file(alter_info->logfile_group_name, + alter_info->undo_file_name) || + DBUG_EVALUATE_IF("ndb_dd_client_install_undo_file_fail", true, + false)) { + thd_ndb->push_warning( + "Undofile '%s' could not be added to logfile " + "group '%s' in DD", + alter_info->undo_file_name, alter_info->logfile_group_name); + my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), "CREATE UNDOFILE FAILED"); + DBUG_RETURN(1); + } - // Objects created in NDB and DD. Time to commit NDB schema transaction - if (!schema_trans.commit_trans()) - { - my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), "CREATE UNDOFILE FAILED"); - DBUG_RETURN(1); + // Objects created in NDB and DD. Time to commit NDB schema transaction + if (!schema_trans.commit_trans()) { + my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), "CREATE UNDOFILE FAILED"); + DBUG_RETURN(1); + } + DBUG_PRINT("info", ("Successfully created undofile '%s' in NDB", + alter_info->undo_file_name)); + + // NDB schema transaction committed successfully, safe to commit in DD + dd_client.commit(); + + NdbDictionary::LogfileGroup ndb_lg = + dict->getLogfileGroup(alter_info->logfile_group_name); + if (ndb_dict_check_NDB_error(dict)) { + // Failed to get logfile group from NDB, push warnings and continue + thd_ndb->push_ndb_error_warning(dict->getNdbError()); + thd_ndb->push_warning("Failed to get logfile group '%s' from NDB", + alter_info->logfile_group_name); + thd_ndb->push_warning("Failed to distribute ALTER LOGFILE GROUP '%s'", + alter_info->logfile_group_name); + break; + } + if (!schema_dist_client.alter_logfile_group( + alter_info->logfile_group_name, ndb_lg.getObjectId(), + ndb_lg.getObjectVersion())) { + // Schema distibution failed, just push a warning and continue + thd_ndb->push_warning("Failed to distribute ALTER LOGFILE GROUP '%s'", + alter_info->logfile_group_name); + } + break; } - DBUG_PRINT("info", ("Successfully created undofile '%s' in NDB", - alter_info->undo_file_name)); + case DROP_TABLESPACE: { + if (!schema_dist_client.prepare("", alter_info->tablespace_name)) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } - // NDB schema transaction committed successfully, safe to commit in DD - dd_client.commit(); + Ndb_schema_trans_guard schema_trans(thd_ndb, dict); + if (!schema_trans.begin_trans()) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } - NdbDictionary::LogfileGroup ndb_lg = - dict->getLogfileGroup(alter_info->logfile_group_name); - if (ndb_dict_check_NDB_error(dict)) - { - // Failed to get logfile group from NDB, push warnings and continue - thd_ndb->push_ndb_error_warning(dict->getNdbError()); - thd_ndb->push_warning("Failed to get logfile group '%s' from NDB", - alter_info->logfile_group_name); - thd_ndb->push_warning("Failed to distribute ALTER LOGFILE GROUP '%s'", - alter_info->logfile_group_name); + int object_id, object_version; + if (!drop_tablespace_from_NDB(alter_info->tablespace_name, dict, thd_ndb, + object_id, object_version)) { + DBUG_RETURN(1); + } + if (!schema_trans.commit_trans()) { + my_error(ER_DROP_FILEGROUP_FAILED, MYF(0), "TABLESPACE"); + DBUG_RETURN(1); + } + DBUG_PRINT("info", ("Successfully dropped tablespace '%s' from NDB", + alter_info->tablespace_name)); + + if (!schema_dist_client.drop_tablespace(alter_info->tablespace_name, + object_id, object_version)) { + // Schema distribution failed, just push a warning and continue + thd_ndb->push_warning("Failed to distribute DROP TABLESPACE '%s'", + alter_info->tablespace_name); + } break; } - if (!schema_dist_client.alter_logfile_group(alter_info->logfile_group_name, - ndb_lg.getObjectId(), - ndb_lg.getObjectVersion())) - { - // Schema distibution failed, just push a warning and continue - thd_ndb->push_warning("Failed to distribute ALTER LOGFILE GROUP '%s'", - alter_info->logfile_group_name); - } - break; - } - case DROP_TABLESPACE: - { - if (!schema_dist_client.prepare("", alter_info->tablespace_name)) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } + case DROP_LOGFILE_GROUP: { + if (!schema_dist_client.prepare("", alter_info->logfile_group_name)) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } - Ndb_schema_trans_guard schema_trans(thd_ndb, dict); - if (!schema_trans.begin_trans()) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } + Ndb_schema_trans_guard schema_trans(thd_ndb, dict); + if (!schema_trans.begin_trans()) { + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } - int object_id, object_version; - if (!drop_tablespace_from_NDB(alter_info->tablespace_name, dict, thd_ndb, - object_id, object_version)) - { - DBUG_RETURN(1); - } - if (!schema_trans.commit_trans()) - { - my_error(ER_DROP_FILEGROUP_FAILED, MYF(0), "TABLESPACE"); - DBUG_RETURN(1); - } - DBUG_PRINT("info", ("Successfully dropped tablespace '%s' from NDB", - alter_info->tablespace_name)); + int object_id, object_version; + if (!drop_logfile_group_from_NDB(alter_info->logfile_group_name, dict, + thd_ndb, object_id, object_version)) { + DBUG_RETURN(1); + } - if (!schema_dist_client.drop_tablespace(alter_info->tablespace_name, - object_id, object_version)) - { - // Schema distribution failed, just push a warning and continue - thd_ndb->push_warning("Failed to distribute DROP TABLESPACE '%s'", - alter_info->tablespace_name); - } - break; - } - case DROP_LOGFILE_GROUP: - { - if (!schema_dist_client.prepare("", alter_info->logfile_group_name)) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } + // Drop Logfile Group entry from the DD + Ndb_dd_client dd_client(thd); + if (!dd_client.drop_logfile_group(alter_info->logfile_group_name) || + DBUG_EVALUATE_IF("ndb_dd_client_drop_logfile_group_fail", true, + false)) { + thd_ndb->push_warning("Logfile group '%s' could not be dropped from DD", + alter_info->logfile_group_name); + my_error(ER_DROP_FILEGROUP_FAILED, MYF(0), "LOGFILE GROUP"); + DBUG_RETURN(1); + } - Ndb_schema_trans_guard schema_trans(thd_ndb, dict); - if (!schema_trans.begin_trans()) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } + // Objects dropped from NDB and DD. Time to commit NDB schema transaction + if (!schema_trans.commit_trans()) { + my_error(ER_DROP_FILEGROUP_FAILED, MYF(0), "LOGFILE GROUP"); + DBUG_RETURN(1); + } + DBUG_PRINT("info", ("Successfully dropped logfile group '%s' from NDB", + alter_info->logfile_group_name)); - int object_id, object_version; - if (!drop_logfile_group_from_NDB(alter_info->logfile_group_name, - dict, thd_ndb, object_id, object_version)) - { - DBUG_RETURN(1); - } + // NDB schema transaction committed successfully, safe to commit in DD + dd_client.commit(); - // Drop Logfile Group entry from the DD - Ndb_dd_client dd_client(thd); - if (!dd_client.drop_logfile_group(alter_info->logfile_group_name) || - DBUG_EVALUATE_IF("ndb_dd_client_drop_logfile_group_fail", - true, false)) - { - thd_ndb->push_warning("Logfile group '%s' could not be dropped from DD", - alter_info->logfile_group_name); - my_error(ER_DROP_FILEGROUP_FAILED, MYF(0), "LOGFILE GROUP"); - DBUG_RETURN(1); + if (!schema_dist_client.drop_logfile_group(alter_info->logfile_group_name, + object_id, object_version)) { + // Schema distibution failed, just push a warning and continue + thd_ndb->push_warning("Failed to distribute DROP LOGFILE GROUP '%s'", + alter_info->logfile_group_name); + } + break; } - - // Objects dropped from NDB and DD. Time to commit NDB schema transaction - if (!schema_trans.commit_trans()) - { - my_error(ER_DROP_FILEGROUP_FAILED, MYF(0), "LOGFILE GROUP"); - DBUG_RETURN(1); + case CHANGE_FILE_TABLESPACE: + case ALTER_ACCESS_MODE_TABLESPACE: { + DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); } - DBUG_PRINT("info", ("Successfully dropped logfile group '%s' from NDB", - alter_info->logfile_group_name)); - - // NDB schema transaction committed successfully, safe to commit in DD - dd_client.commit(); - - if (!schema_dist_client.drop_logfile_group(alter_info->logfile_group_name, - object_id, object_version)) - { - // Schema distibution failed, just push a warning and continue - thd_ndb->push_warning("Failed to distribute DROP LOGFILE GROUP '%s'", - alter_info->logfile_group_name); + default: { + // Unexpected, crash in debug + DBUG_ASSERT(false); + DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); } - break; - } - case CHANGE_FILE_TABLESPACE: - case ALTER_ACCESS_MODE_TABLESPACE: - { - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); - } - default: - { - // Unexpected, crash in debug - DBUG_ASSERT(false); - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); - } } DBUG_RETURN(0); @@ -19338,38 +16663,30 @@ int ndbcluster_alter_tablespace(handlerton*, @returns false on success, true on failure */ -static -bool ndbcluster_get_tablespace_statistics(const char *tablespace_name, - const char *file_name, - const dd::Properties - &ts_se_private_data, - ha_tablespace_statistics *stats) -{ +static bool ndbcluster_get_tablespace_statistics( + const char *tablespace_name, const char *file_name, + const dd::Properties &ts_se_private_data, ha_tablespace_statistics *stats) { DBUG_ENTER("ndbcluster_get_tablespace_statistics"); // Find out type of object. The type is stored in se_private_data enum object_type type; - if (!ndb_dd_disk_data_get_object_type(ts_se_private_data, type)) - { + if (!ndb_dd_disk_data_get_object_type(ts_se_private_data, type)) { my_printf_error(ER_INTERNAL_ERROR, "Could not get object type", MYF(0)); DBUG_RETURN(true); } - THD *thd= current_thd; - Ndb *ndb= check_ndb_in_thd(thd); - if (!ndb) - { + THD *thd = current_thd; + Ndb *ndb = check_ndb_in_thd(thd); + if (!ndb) { // No connection to NDB my_error(HA_ERR_NO_CONNECTION, MYF(0)); DBUG_RETURN(true); } - Thd_ndb* thd_ndb = get_thd_ndb(thd); - - if (type == object_type::LOGFILE_GROUP) - { + Thd_ndb *thd_ndb = get_thd_ndb(thd); - NdbDictionary::Dictionary* dict= ndb->getDictionary(); + if (type == object_type::LOGFILE_GROUP) { + NdbDictionary::Dictionary *dict = ndb->getDictionary(); /* Find a node which is alive. NDB's view of an undo file * is actually a composite of the stats found across all @@ -19378,17 +16695,15 @@ bool ndbcluster_get_tablespace_statistics(const char *tablespace_name, * Since the stats of interest don't vary across the data * nodes, using the first available data node is acceptable. */ - NdbDictionary::Undofile uf= dict->getUndofile(-1, file_name); - if (ndb_dict_check_NDB_error(dict)) - { + NdbDictionary::Undofile uf = dict->getUndofile(-1, file_name); + if (ndb_dict_check_NDB_error(dict)) { thd_ndb->set_ndb_error(dict->getNdbError(), "Could not get undo file"); DBUG_RETURN(true); } - NdbDictionary::LogfileGroup lfg= - dict->getLogfileGroup(uf.getLogfileGroup()); - if (ndb_dict_check_NDB_error(dict)) - { + NdbDictionary::LogfileGroup lfg = + dict->getLogfileGroup(uf.getLogfileGroup()); + if (ndb_dict_check_NDB_error(dict)) { thd_ndb->set_ndb_error(dict->getNdbError(), "Could not get logfile group"); DBUG_RETURN(true); @@ -19398,34 +16713,31 @@ bool ndbcluster_get_tablespace_statistics(const char *tablespace_name, * Failure means that the NDB dictionary has gone out * of sync with the DD */ - if (strcmp(lfg.getName(), tablespace_name) != 0) - { + if (strcmp(lfg.getName(), tablespace_name) != 0) { my_error(ER_TABLESPACE_MISSING, MYF(0), tablespace_name); DBUG_ASSERT(false); DBUG_RETURN(true); } // Populate statistics - stats->m_id= uf.getObjectId(); - stats->m_type= "UNDO LOG"; - stats->m_logfile_group_name= lfg.getName(); - stats->m_logfile_group_number= lfg.getObjectId(); - stats->m_total_extents= uf.getSize() / 4; - stats->m_extent_size= 4; - stats->m_initial_size= uf.getSize(); - stats->m_maximum_size= uf.getSize(); - stats->m_version= uf.getObjectVersion(); + stats->m_id = uf.getObjectId(); + stats->m_type = "UNDO LOG"; + stats->m_logfile_group_name = lfg.getName(); + stats->m_logfile_group_number = lfg.getObjectId(); + stats->m_total_extents = uf.getSize() / 4; + stats->m_extent_size = 4; + stats->m_initial_size = uf.getSize(); + stats->m_maximum_size = uf.getSize(); + stats->m_version = uf.getObjectVersion(); std::stringstream extra; extra << "UNDO_BUFFER_SIZE=" << lfg.getUndoBufferSize(); - stats->m_extra= extra.str().c_str(); + stats->m_extra = extra.str().c_str(); DBUG_RETURN(false); } - if (type == object_type::TABLESPACE) - { - - NdbDictionary::Dictionary* dict= ndb->getDictionary(); + if (type == object_type::TABLESPACE) { + NdbDictionary::Dictionary *dict = ndb->getDictionary(); /* Find a node which is alive. NDB's view of a data file * is actually a composite of the stats found across all @@ -19434,17 +16746,14 @@ bool ndbcluster_get_tablespace_statistics(const char *tablespace_name, * Since the stats of interest don't vary across the data * nodes, using the first available data node is acceptable. */ - NdbDictionary::Datafile df= dict->getDatafile(-1, file_name); - if (ndb_dict_check_NDB_error(dict)) - { + NdbDictionary::Datafile df = dict->getDatafile(-1, file_name); + if (ndb_dict_check_NDB_error(dict)) { thd_ndb->set_ndb_error(dict->getNdbError(), "Could not get data file"); DBUG_RETURN(true); } - NdbDictionary::Tablespace ts= - dict->getTablespace(df.getTablespace()); - if (ndb_dict_check_NDB_error(dict)) - { + NdbDictionary::Tablespace ts = dict->getTablespace(df.getTablespace()); + if (ndb_dict_check_NDB_error(dict)) { thd_ndb->set_ndb_error(dict->getNdbError(), "Could not get tablespace"); DBUG_RETURN(true); } @@ -19453,25 +16762,24 @@ bool ndbcluster_get_tablespace_statistics(const char *tablespace_name, * from DD. Failure means that the NDB dictionary has gone out * of sync with the DD */ - if (strcmp(ts.getName(), tablespace_name) != 0) - { + if (strcmp(ts.getName(), tablespace_name) != 0) { my_error(ER_TABLESPACE_MISSING, MYF(0), tablespace_name); DBUG_ASSERT(false); DBUG_RETURN(true); } // Populate statistics - stats->m_id= df.getObjectId(); - stats->m_type= "DATAFILE"; - stats->m_logfile_group_name= ts.getDefaultLogfileGroup(); - stats->m_logfile_group_number= ts.getDefaultLogfileGroupId(); - stats->m_free_extents= df.getFree() / ts.getExtentSize(); - stats->m_total_extents= df.getSize()/ ts.getExtentSize(); - stats->m_extent_size= ts.getExtentSize(); - stats->m_initial_size= df.getSize(); - stats->m_maximum_size= df.getSize(); - stats->m_version= df.getObjectVersion(); - stats->m_row_format= "FIXED"; + stats->m_id = df.getObjectId(); + stats->m_type = "DATAFILE"; + stats->m_logfile_group_name = ts.getDefaultLogfileGroup(); + stats->m_logfile_group_number = ts.getDefaultLogfileGroupId(); + stats->m_free_extents = df.getFree() / ts.getExtentSize(); + stats->m_total_extents = df.getSize() / ts.getExtentSize(); + stats->m_extent_size = ts.getExtentSize(); + stats->m_initial_size = df.getSize(); + stats->m_maximum_size = df.getSize(); + stats->m_version = df.getObjectVersion(); + stats->m_row_format = "FIXED"; DBUG_RETURN(false); } @@ -19481,8 +16789,6 @@ bool ndbcluster_get_tablespace_statistics(const char *tablespace_name, DBUG_RETURN(true); } - - /** Return number of partitions for table in SE @@ -19494,9 +16800,7 @@ bool ndbcluster_get_tablespace_statistics(const char *tablespace_name, @retval true for failure, for example table didn't exist in engine */ -bool -ha_ndbcluster::get_num_parts(const char *name, uint *num_parts) -{ +bool ha_ndbcluster::get_num_parts(const char *name, uint *num_parts) { /* NOTE! This function is called very early in the code path for opening a table and ha_ndbcluster might not have been @@ -19507,18 +16811,14 @@ ha_ndbcluster::get_num_parts(const char *name, uint *num_parts) */ struct impl { - - static - int get_num_parts(const char* name, uint* num_parts) - { + static int get_num_parts(const char *name, uint *num_parts) { DBUG_ENTER("impl::get_num_parts"); // Since this function is always called early in the code // path, it's safe to allow the Ndb object to be recycled const bool allow_recycle_ndb = true; - Ndb * const ndb = check_ndb_in_thd(current_thd, allow_recycle_ndb); - if (!ndb) - { + Ndb *const ndb = check_ndb_in_thd(current_thd, allow_recycle_ndb); + if (!ndb) { // No connection to NDB DBUG_RETURN(HA_ERR_NO_CONNECTION); } @@ -19531,31 +16831,28 @@ ha_ndbcluster::get_num_parts(const char *name, uint *num_parts) // Open the table from NDB ndb->setDatabaseName(db_name); - NdbDictionary::Dictionary* dict= ndb->getDictionary(); + NdbDictionary::Dictionary *dict = ndb->getDictionary(); Ndb_table_guard ndbtab_g(dict, table_name); - if (!ndbtab_g.get_table()) - { + if (!ndbtab_g.get_table()) { // Could not open table from NDB ERR_RETURN(dict->getNdbError()); } // Return number of partitions used in the table - *num_parts= ndbtab_g.get_table()->getPartitionCount(); + *num_parts = ndbtab_g.get_table()->getPartitionCount(); DBUG_RETURN(0); } }; const int error = impl::get_num_parts(name, num_parts); - if (error) - { + if (error) { print_error(error, MYF(0)); - return true; // Could not return number of partitions + return true; // Could not return number of partitions } return false; } - /** Set Engine specific data to dd::Table object for upgrade. @@ -19567,34 +16864,26 @@ ha_ndbcluster::get_num_parts(const char *name, uint *num_parts) @return false on success, true on failure */ -bool -ha_ndbcluster::upgrade_table(THD* thd, - const char*, - const char* table_name, - dd::Table* dd_table) -{ +bool ha_ndbcluster::upgrade_table(THD *thd, const char *, + const char *table_name, dd::Table *dd_table) { + Ndb *ndb = check_ndb_in_thd(thd); - Ndb *ndb= check_ndb_in_thd(thd); - - if (!ndb) - { + if (!ndb) { // No connection to NDB my_error(HA_ERR_NO_CONNECTION, MYF(0)); return true; } - NdbDictionary::Dictionary* dict= ndb->getDictionary(); + NdbDictionary::Dictionary *dict = ndb->getDictionary(); Ndb_table_guard ndbtab_g(dict, table_name); - const NdbDictionary::Table *ndbtab= ndbtab_g.get_table(); + const NdbDictionary::Table *ndbtab = ndbtab_g.get_table(); - if (ndbtab == nullptr) - { + if (ndbtab == nullptr) { return true; } // Set object id and version - ndb_dd_table_set_object_id_and_version(dd_table, - ndbtab->getObjectId(), + ndb_dd_table_set_object_id_and_version(dd_table, ndbtab->getObjectId(), ndbtab->getObjectVersion()); /* @@ -19611,7 +16900,6 @@ ha_ndbcluster::upgrade_table(THD* thd, return false; } - /* @brief Shut down ndbcluster background tasks that could access the DD @@ -19619,40 +16907,37 @@ ha_ndbcluster::upgrade_table(THD* thd, @return Void */ -static void ndbcluster_pre_dd_shutdown(handlerton *) -{ +static void ndbcluster_pre_dd_shutdown(handlerton *) { // Stop and deinitialize the ndb_metadata_change_monitor thread ndb_metadata_change_monitor_thread.stop(); ndb_metadata_change_monitor_thread.deinit(); } -static -int show_ndb_status(THD* thd, SHOW_VAR* var, char*) -{ - if (!check_ndb_in_thd(thd)) - return -1; +static int show_ndb_status(THD *thd, SHOW_VAR *var, char *) { + if (!check_ndb_in_thd(thd)) return -1; struct st_ndb_status *st; SHOW_VAR *st_var; { // Allocate memory in current MEM_ROOT - char *mem= (char*)(*THR_MALLOC)->Alloc(sizeof(struct st_ndb_status) + - sizeof(ndb_status_vars_dynamic)); - st= new (mem) st_ndb_status; - st_var= (SHOW_VAR*)(mem + sizeof(struct st_ndb_status)); + char *mem = (char *)(*THR_MALLOC) + ->Alloc(sizeof(struct st_ndb_status) + + sizeof(ndb_status_vars_dynamic)); + st = new (mem) st_ndb_status; + st_var = (SHOW_VAR *)(mem + sizeof(struct st_ndb_status)); memcpy(st_var, &ndb_status_vars_dynamic, sizeof(ndb_status_vars_dynamic)); - int i= 0; - SHOW_VAR *tmp= &(ndb_status_vars_dynamic[0]); + int i = 0; + SHOW_VAR *tmp = &(ndb_status_vars_dynamic[0]); for (; tmp->value; tmp++, i++) - st_var[i].value= mem + (tmp->value - (char*)&g_ndb_status); + st_var[i].value = mem + (tmp->value - (char *)&g_ndb_status); } { - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb_cluster_connection *c= thd_ndb->connection; + Thd_ndb *thd_ndb = get_thd_ndb(thd); + Ndb_cluster_connection *c = thd_ndb->connection; update_status_variables(thd_ndb, st, c); } - var->type= SHOW_ARRAY; - var->value= (char *) st_var; + var->type = SHOW_ARRAY; + var->value = (char *)st_var; return 0; } @@ -19665,157 +16950,132 @@ int show_ndb_status(THD* thd, SHOW_VAR* var, char*) can be read. */ -static SHOW_VAR ndb_status_vars[] = -{ - {"Ndb", (char*) &show_ndb_status, SHOW_FUNC, SHOW_SCOPE_GLOBAL}, - {"Ndb_conflict", (char*) &show_ndb_status_conflict, SHOW_FUNC, SHOW_SCOPE_GLOBAL}, - {"Ndb", (char*) &show_ndb_status_injector, SHOW_FUNC,SHOW_SCOPE_GLOBAL}, - {"Ndb", (char*) &ndb_status_vars_slave, SHOW_ARRAY, SHOW_SCOPE_GLOBAL}, - {"Ndb", (char*) &show_ndb_status_server_api, SHOW_FUNC, SHOW_SCOPE_GLOBAL}, - {"Ndb_index_stat", (char*) &show_ndb_status_index_stat, SHOW_FUNC, SHOW_SCOPE_GLOBAL}, - {"Ndb", (char*) &show_ndb_metadata_check, SHOW_FUNC, - SHOW_SCOPE_GLOBAL}, - {"Ndb", (char*) &show_ndb_metadata_synced, SHOW_FUNC, - SHOW_SCOPE_GLOBAL}, - {"Ndb", (char*) &show_ndb_metadata_blacklist_size, SHOW_FUNC, - SHOW_SCOPE_GLOBAL}, - {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL} -}; - - -static MYSQL_SYSVAR_ULONG( - extra_logging, /* name */ - opt_ndb_extra_logging, /* var */ - PLUGIN_VAR_OPCMDARG, - "Turn on more logging in the error log.", - NULL, /* check func. */ - NULL, /* update func. */ - 1, /* default */ - 0, /* min */ - 0, /* max */ - 0 /* block */ +static SHOW_VAR ndb_status_vars[] = { + {"Ndb", (char *)&show_ndb_status, SHOW_FUNC, SHOW_SCOPE_GLOBAL}, + {"Ndb_conflict", (char *)&show_ndb_status_conflict, SHOW_FUNC, + SHOW_SCOPE_GLOBAL}, + {"Ndb", (char *)&show_ndb_status_injector, SHOW_FUNC, SHOW_SCOPE_GLOBAL}, + {"Ndb", (char *)&ndb_status_vars_slave, SHOW_ARRAY, SHOW_SCOPE_GLOBAL}, + {"Ndb", (char *)&show_ndb_status_server_api, SHOW_FUNC, SHOW_SCOPE_GLOBAL}, + {"Ndb_index_stat", (char *)&show_ndb_status_index_stat, SHOW_FUNC, + SHOW_SCOPE_GLOBAL}, + {"Ndb", (char *)&show_ndb_metadata_check, SHOW_FUNC, SHOW_SCOPE_GLOBAL}, + {"Ndb", (char *)&show_ndb_metadata_synced, SHOW_FUNC, SHOW_SCOPE_GLOBAL}, + {"Ndb", (char *)&show_ndb_metadata_blacklist_size, SHOW_FUNC, + SHOW_SCOPE_GLOBAL}, + {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL}}; + +static MYSQL_SYSVAR_ULONG(extra_logging, /* name */ + opt_ndb_extra_logging, /* var */ + PLUGIN_VAR_OPCMDARG, + "Turn on more logging in the error log.", + NULL, /* check func. */ + NULL, /* update func. */ + 1, /* default */ + 0, /* min */ + 0, /* max */ + 0 /* block */ ); - -static MYSQL_SYSVAR_ULONG( - wait_connected, /* name */ - opt_ndb_wait_connected, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Time (in seconds) for mysqld to wait for connection " - "to cluster management and data nodes.", - NULL, /* check func. */ - NULL, /* update func. */ - 30, /* default */ - 0, /* min */ - ONE_YEAR_IN_SECONDS, /* max */ - 0 /* block */ +static MYSQL_SYSVAR_ULONG(wait_connected, /* name */ + opt_ndb_wait_connected, /* var */ + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Time (in seconds) for mysqld to wait for connection " + "to cluster management and data nodes.", + NULL, /* check func. */ + NULL, /* update func. */ + 30, /* default */ + 0, /* min */ + ONE_YEAR_IN_SECONDS, /* max */ + 0 /* block */ ); - -static MYSQL_SYSVAR_ULONG( - wait_setup, /* name */ - opt_ndb_wait_setup, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Time (in seconds) for mysqld to wait for setup to " - "complete (0 = no wait)", - NULL, /* check func. */ - NULL, /* update func. */ - 30, /* default */ - 0, /* min */ - ONE_YEAR_IN_SECONDS, /* max */ - 0 /* block */ +static MYSQL_SYSVAR_ULONG(wait_setup, /* name */ + opt_ndb_wait_setup, /* var */ + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Time (in seconds) for mysqld to wait for setup to " + "complete (0 = no wait)", + NULL, /* check func. */ + NULL, /* update func. */ + 30, /* default */ + 0, /* min */ + ONE_YEAR_IN_SECONDS, /* max */ + 0 /* block */ ); static const int MAX_CLUSTER_CONNECTIONS = 63; static MYSQL_SYSVAR_UINT( - cluster_connection_pool, /* name */ - opt_ndb_cluster_connection_pool, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Pool of cluster connections to be used by mysql server.", - NULL, /* check func. */ - NULL, /* update func. */ - 1, /* default */ - 1, /* min */ - MAX_CLUSTER_CONNECTIONS, /* max */ - 0 /* block */ + cluster_connection_pool, /* name */ + opt_ndb_cluster_connection_pool, /* var */ + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Pool of cluster connections to be used by mysql server.", + NULL, /* check func. */ + NULL, /* update func. */ + 1, /* default */ + 1, /* min */ + MAX_CLUSTER_CONNECTIONS, /* max */ + 0 /* block */ ); static MYSQL_SYSVAR_STR( - cluster_connection_pool_nodeids, /* name */ - opt_connection_pool_nodeids_str, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Comma separated list of nodeids to use for the cluster connection pool. " - "Overrides node id specified in --ndb-connectstring. First nodeid " - "must be equal to --ndb-nodeid(if specified)." , - NULL, /* check func. */ - NULL, /* update func. */ - NULL /* default */ + cluster_connection_pool_nodeids, /* name */ + opt_connection_pool_nodeids_str, /* var */ + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Comma separated list of nodeids to use for the cluster connection pool. " + "Overrides node id specified in --ndb-connectstring. First nodeid " + "must be equal to --ndb-nodeid(if specified).", + NULL, /* check func. */ + NULL, /* update func. */ + NULL /* default */ ); static const int MIN_ACTIVATION_THRESHOLD = 0; static const int MAX_ACTIVATION_THRESHOLD = 16; -static -int -ndb_recv_thread_activation_threshold_check(THD*, - SYS_VAR*, - void *, - struct st_mysql_value *value) -{ +static int ndb_recv_thread_activation_threshold_check( + THD *, SYS_VAR *, void *, struct st_mysql_value *value) { long long int_buf; int val = (int)value->val_int(value, &int_buf); int new_val = (int)int_buf; - if (val != 0 || - new_val < MIN_ACTIVATION_THRESHOLD || - new_val > MAX_ACTIVATION_THRESHOLD) - { + if (val != 0 || new_val < MIN_ACTIVATION_THRESHOLD || + new_val > MAX_ACTIVATION_THRESHOLD) { return 1; } opt_ndb_recv_thread_activation_threshold = new_val; return 0; } -static -void -ndb_recv_thread_activation_threshold_update(THD*, - SYS_VAR*, - void *, - const void *) -{ +static void ndb_recv_thread_activation_threshold_update(THD *, SYS_VAR *, + void *, const void *) { ndb_set_recv_thread_activation_threshold( - opt_ndb_recv_thread_activation_threshold); + opt_ndb_recv_thread_activation_threshold); } static MYSQL_SYSVAR_UINT( - recv_thread_activation_threshold, /* name */ - opt_ndb_recv_thread_activation_threshold, /* var */ - PLUGIN_VAR_RQCMDARG, - "Activation threshold when receive thread takes over the polling " - "of the cluster connection (measured in concurrently active " - "threads)", - ndb_recv_thread_activation_threshold_check, /* check func. */ - ndb_recv_thread_activation_threshold_update, /* update func. */ - 8, /* default */ - MIN_ACTIVATION_THRESHOLD, /* min */ - MAX_ACTIVATION_THRESHOLD, /* max */ - 0 /* block */ + recv_thread_activation_threshold, /* name */ + opt_ndb_recv_thread_activation_threshold, /* var */ + PLUGIN_VAR_RQCMDARG, + "Activation threshold when receive thread takes over the polling " + "of the cluster connection (measured in concurrently active " + "threads)", + ndb_recv_thread_activation_threshold_check, /* check func. */ + ndb_recv_thread_activation_threshold_update, /* update func. */ + 8, /* default */ + MIN_ACTIVATION_THRESHOLD, /* min */ + MAX_ACTIVATION_THRESHOLD, /* max */ + 0 /* block */ ); - /* Definitions needed for receive thread cpu mask config variable */ static const int ndb_recv_thread_cpu_mask_option_buf_size = 512; -char ndb_recv_thread_cpu_mask_option_buf[ndb_recv_thread_cpu_mask_option_buf_size]; +char ndb_recv_thread_cpu_mask_option_buf + [ndb_recv_thread_cpu_mask_option_buf_size]; Uint16 recv_thread_cpuid_array[1 * MAX_CLUSTER_CONNECTIONS]; -static -int -ndb_recv_thread_cpu_mask_check(THD*, - SYS_VAR*, - void *, - struct st_mysql_value *value) -{ +static int ndb_recv_thread_cpu_mask_check(THD *, SYS_VAR *, void *, + struct st_mysql_value *value) { char buf[ndb_recv_thread_cpu_mask_option_buf_size]; int len = sizeof(buf); const char *str = value->val_str(value, buf, &len); @@ -19823,37 +17083,31 @@ ndb_recv_thread_cpu_mask_check(THD*, return ndb_recv_thread_cpu_mask_check_str(str); } -static int -ndb_recv_thread_cpu_mask_check_str(const char *str) -{ +static int ndb_recv_thread_cpu_mask_check_str(const char *str) { unsigned i; SparseBitmask bitmask; recv_thread_num_cpus = 0; - if (str == 0) - { + if (str == 0) { /* Setting to empty string is interpreted as remove locking to CPU */ return 0; } - if (parse_mask(str, bitmask) < 0) - { - ndb_log_info("Trying to set ndb_recv_thread_cpu_mask to" - " illegal value = %s, ignored", - str); + if (parse_mask(str, bitmask) < 0) { + ndb_log_info( + "Trying to set ndb_recv_thread_cpu_mask to" + " illegal value = %s, ignored", + str); goto error; } - for (i = bitmask.find(0); - i != SparseBitmask::NotFound; - i = bitmask.find(i + 1)) - { - if (recv_thread_num_cpus == - 1 * MAX_CLUSTER_CONNECTIONS) - { - ndb_log_info("Trying to set too many CPU's in " - "ndb_recv_thread_cpu_mask, ignored" - " this variable, erroneus value = %s", - str); + for (i = bitmask.find(0); i != SparseBitmask::NotFound; + i = bitmask.find(i + 1)) { + if (recv_thread_num_cpus == 1 * MAX_CLUSTER_CONNECTIONS) { + ndb_log_info( + "Trying to set too many CPU's in " + "ndb_recv_thread_cpu_mask, ignored" + " this variable, erroneus value = %s", + str); goto error; } recv_thread_cpuid_array[recv_thread_num_cpus++] = i; @@ -19863,360 +17117,324 @@ ndb_recv_thread_cpu_mask_check_str(const char *str) return 1; } -static -int -ndb_recv_thread_cpu_mask_update() -{ - return ndb_set_recv_thread_cpu(recv_thread_cpuid_array, - recv_thread_num_cpus); +static int ndb_recv_thread_cpu_mask_update() { + return ndb_set_recv_thread_cpu(recv_thread_cpuid_array, recv_thread_num_cpus); } -static -void -ndb_recv_thread_cpu_mask_update_func(THD *, - SYS_VAR*, - void *, const void *) -{ +static void ndb_recv_thread_cpu_mask_update_func(THD *, SYS_VAR *, void *, + const void *) { (void)ndb_recv_thread_cpu_mask_update(); } static MYSQL_SYSVAR_STR( - recv_thread_cpu_mask, /* name */ - opt_ndb_recv_thread_cpu_mask, /* var */ - PLUGIN_VAR_RQCMDARG, - "CPU mask for locking receiver threads to specific CPU, specified " - " as hexadecimal as e.g. 0x33, one CPU is used per receiver thread.", - ndb_recv_thread_cpu_mask_check, /* check func. */ - ndb_recv_thread_cpu_mask_update_func,/* update func. */ - ndb_recv_thread_cpu_mask_option_buf -); - - + recv_thread_cpu_mask, /* name */ + opt_ndb_recv_thread_cpu_mask, /* var */ + PLUGIN_VAR_RQCMDARG, + "CPU mask for locking receiver threads to specific CPU, specified " + " as hexadecimal as e.g. 0x33, one CPU is used per receiver thread.", + ndb_recv_thread_cpu_mask_check, /* check func. */ + ndb_recv_thread_cpu_mask_update_func, /* update func. */ + ndb_recv_thread_cpu_mask_option_buf); static MYSQL_SYSVAR_STR( - index_stat_option, /* name */ - opt_ndb_index_stat_option, /* var */ - PLUGIN_VAR_RQCMDARG, - "Comma-separated tunable options for ndb index statistics", - ndb_index_stat_option_check, /* check func. */ - ndb_index_stat_option_update, /* update func. */ - ndb_index_stat_option_buf -); - + index_stat_option, /* name */ + opt_ndb_index_stat_option, /* var */ + PLUGIN_VAR_RQCMDARG, + "Comma-separated tunable options for ndb index statistics", + ndb_index_stat_option_check, /* check func. */ + ndb_index_stat_option_update, /* update func. */ + ndb_index_stat_option_buf); ulong opt_ndb_report_thresh_binlog_epoch_slip; static MYSQL_SYSVAR_ULONG( - report_thresh_binlog_epoch_slip, /* name */ - opt_ndb_report_thresh_binlog_epoch_slip,/* var */ - PLUGIN_VAR_RQCMDARG, - "Threshold for Binlog injector thread consumption lag, " - "before reporting the Event buffer status' message with reason " - "BUFFERED_EPOCHS_OVER_THRESHOLD. " - "The lag is defined as the number of epochs completely buffered in " - "the event buffer, but not consumed by the Binlog injector thread yet.", - NULL, /* check func. */ - NULL, /* update func. */ - 10, /* default */ - 0, /* min */ - 256, /* max */ - 0 /* block */ + report_thresh_binlog_epoch_slip, /* name */ + opt_ndb_report_thresh_binlog_epoch_slip, /* var */ + PLUGIN_VAR_RQCMDARG, + "Threshold for Binlog injector thread consumption lag, " + "before reporting the Event buffer status' message with reason " + "BUFFERED_EPOCHS_OVER_THRESHOLD. " + "The lag is defined as the number of epochs completely buffered in " + "the event buffer, but not consumed by the Binlog injector thread yet.", + NULL, /* check func. */ + NULL, /* update func. */ + 10, /* default */ + 0, /* min */ + 256, /* max */ + 0 /* block */ ); - ulong opt_ndb_report_thresh_binlog_mem_usage; static MYSQL_SYSVAR_ULONG( - report_thresh_binlog_mem_usage, /* name */ - opt_ndb_report_thresh_binlog_mem_usage,/* var */ - PLUGIN_VAR_RQCMDARG, - "Threshold on percentage of free memory before reporting binlog " - "status. E.g. 10 means that if amount of available memory for " - "receiving binlog data from the storage nodes goes below 10%, " - "a status message will be sent to the cluster log.", - NULL, /* check func. */ - NULL, /* update func. */ - 10, /* default */ - 0, /* min */ - 100, /* max */ - 0 /* block */ + report_thresh_binlog_mem_usage, /* name */ + opt_ndb_report_thresh_binlog_mem_usage, /* var */ + PLUGIN_VAR_RQCMDARG, + "Threshold on percentage of free memory before reporting binlog " + "status. E.g. 10 means that if amount of available memory for " + "receiving binlog data from the storage nodes goes below 10%, " + "a status message will be sent to the cluster log.", + NULL, /* check func. */ + NULL, /* update func. */ + 10, /* default */ + 0, /* min */ + 100, /* max */ + 0 /* block */ ); - ulong opt_ndb_eventbuffer_max_alloc; -static MYSQL_SYSVAR_ULONG( - eventbuffer_max_alloc, /* name */ - opt_ndb_eventbuffer_max_alloc, /* var */ - PLUGIN_VAR_RQCMDARG, - "Maximum memory that can be allocated for buffering " - "events by the ndb api.", - NULL, /* check func. */ - NULL, /* update func. */ - 0, /* default */ - 0, /* min */ - UINT_MAX32, /* max */ - 0 /* block */ +static MYSQL_SYSVAR_ULONG(eventbuffer_max_alloc, /* name */ + opt_ndb_eventbuffer_max_alloc, /* var */ + PLUGIN_VAR_RQCMDARG, + "Maximum memory that can be allocated for buffering " + "events by the ndb api.", + NULL, /* check func. */ + NULL, /* update func. */ + 0, /* default */ + 0, /* min */ + UINT_MAX32, /* max */ + 0 /* block */ ); - uint opt_ndb_eventbuffer_free_percent; -static MYSQL_SYSVAR_UINT( - eventbuffer_free_percent, /* name */ - opt_ndb_eventbuffer_free_percent,/* var */ - PLUGIN_VAR_RQCMDARG, - "Percentage of free memory that should be available " - "in event buffer before resuming buffering " - "after the max_alloc limit is hit.", - NULL, /* check func. */ - NULL, /* update func. */ - 20, /* default */ - 1, /* min */ - 99, /* max */ - 0 /* block */ +static MYSQL_SYSVAR_UINT(eventbuffer_free_percent, /* name */ + opt_ndb_eventbuffer_free_percent, /* var */ + PLUGIN_VAR_RQCMDARG, + "Percentage of free memory that should be available " + "in event buffer before resuming buffering " + "after the max_alloc limit is hit.", + NULL, /* check func. */ + NULL, /* update func. */ + 20, /* default */ + 1, /* min */ + 99, /* max */ + 0 /* block */ ); static MYSQL_SYSVAR_ULONG( - row_checksum, /* name */ - opt_ndb_row_checksum, /* var */ - PLUGIN_VAR_OPCMDARG, - "Create tables with a row checksum, this checks for HW issues at the" - "expense of performance", - NULL, /* check func. */ - NULL, /* update func. */ - 1, /* default */ - 0, /* min */ - 1, /* max */ - 0 /* block */ + row_checksum, /* name */ + opt_ndb_row_checksum, /* var */ + PLUGIN_VAR_OPCMDARG, + "Create tables with a row checksum, this checks for HW issues at the" + "expense of performance", + NULL, /* check func. */ + NULL, /* update func. */ + 1, /* default */ + 0, /* min */ + 1, /* max */ + 0 /* block */ ); static MYSQL_SYSVAR_BOOL( - fully_replicated, /* name */ - opt_ndb_fully_replicated, /* var */ - PLUGIN_VAR_OPCMDARG, - "Create tables that are fully replicated by default. This enables reading" - " from any data node when using ReadCommitted. This is great for read" - " scalability but hampers write scalability", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ + fully_replicated, /* name */ + opt_ndb_fully_replicated, /* var */ + PLUGIN_VAR_OPCMDARG, + "Create tables that are fully replicated by default. This enables reading" + " from any data node when using ReadCommitted. This is great for read" + " scalability but hampers write scalability", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); bool opt_ndb_metadata_check; static MYSQL_SYSVAR_BOOL( - metadata_check, /* name */ - opt_ndb_metadata_check, /* var */ - PLUGIN_VAR_OPCMDARG, - "Enable the automatic detection of NDB metadata changes to be synchronized " - "with the DD", - NULL, /* check func. */ - NULL, /* update func. */ - true /* default */ + metadata_check, /* name */ + opt_ndb_metadata_check, /* var */ + PLUGIN_VAR_OPCMDARG, + "Enable the automatic detection of NDB metadata changes to be synchronized " + "with the DD", + NULL, /* check func. */ + NULL, /* update func. */ + true /* default */ ); ulong opt_ndb_metadata_check_interval; -static void metadata_check_interval_update(THD*, SYS_VAR*, void* var_ptr, - const void* save) -{ - const ulong updated_interval = *static_cast(save); - *static_cast(var_ptr) = updated_interval; +static void metadata_check_interval_update(THD *, SYS_VAR *, void *var_ptr, + const void *save) { + const ulong updated_interval = *static_cast(save); + *static_cast(var_ptr) = updated_interval; ndb_metadata_change_monitor_thread.set_check_interval(updated_interval); } -static MYSQL_SYSVAR_ULONG( - metadata_check_interval, /* name */ - opt_ndb_metadata_check_interval, /* var */ - PLUGIN_VAR_RQCMDARG, - "Interval of time (in seconds) at which a check is done to see if there are " - "NDB metadata changes to be synchronized", - NULL, /* check func. */ - metadata_check_interval_update, /* update func. */ - 60, /* default */ - 0, /* min */ - ONE_YEAR_IN_SECONDS, /* max */ - 0 /* block */ +static MYSQL_SYSVAR_ULONG(metadata_check_interval, /* name */ + opt_ndb_metadata_check_interval, /* var */ + PLUGIN_VAR_RQCMDARG, + "Interval of time (in seconds) at which a check is " + "done to see if there are " + "NDB metadata changes to be synchronized", + NULL, /* check func. */ + metadata_check_interval_update, /* update func. */ + 60, /* default */ + 0, /* min */ + ONE_YEAR_IN_SECONDS, /* max */ + 0 /* block */ ); static MYSQL_SYSVAR_BOOL( - read_backup, /* name */ - opt_ndb_read_backup, /* var */ - PLUGIN_VAR_OPCMDARG, - "Create tables with Read Backup flag set. Enables those tables to be" - " read from backup replicas as well as from primary replicas. Delays" - " commit acknowledge of write transactions to accomplish this.", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ + read_backup, /* name */ + opt_ndb_read_backup, /* var */ + PLUGIN_VAR_OPCMDARG, + "Create tables with Read Backup flag set. Enables those tables to be" + " read from backup replicas as well as from primary replicas. Delays" + " commit acknowledge of write transactions to accomplish this.", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); -static -void -ndb_data_node_neighbour_update_func(THD*, - SYS_VAR*, - void *var_ptr, - const void *save) -{ - const ulong data_node_neighbour = *static_cast(save); - *static_cast(var_ptr) = data_node_neighbour; +static void ndb_data_node_neighbour_update_func(THD *, SYS_VAR *, void *var_ptr, + const void *save) { + const ulong data_node_neighbour = *static_cast(save); + *static_cast(var_ptr) = data_node_neighbour; ndb_set_data_node_neighbour(data_node_neighbour); } static MYSQL_SYSVAR_ULONG( - data_node_neighbour, /* name */ - opt_ndb_data_node_neighbour, /* var */ - PLUGIN_VAR_OPCMDARG, - "My closest data node, if 0 no closest neighbour, used to select" - " an appropriate data node to contact to run a transaction at.", - NULL, /* check func. */ - ndb_data_node_neighbour_update_func, /* update func. */ - 0, /* default */ - 0, /* min */ - MAX_NDB_NODES, /* max */ - 0 /* block */ + data_node_neighbour, /* name */ + opt_ndb_data_node_neighbour, /* var */ + PLUGIN_VAR_OPCMDARG, + "My closest data node, if 0 no closest neighbour, used to select" + " an appropriate data node to contact to run a transaction at.", + NULL, /* check func. */ + ndb_data_node_neighbour_update_func, /* update func. */ + 0, /* default */ + 0, /* min */ + MAX_NDB_NODES, /* max */ + 0 /* block */ ); bool opt_ndb_log_update_as_write; static MYSQL_SYSVAR_BOOL( - log_update_as_write, /* name */ - opt_ndb_log_update_as_write, /* var */ - PLUGIN_VAR_OPCMDARG, - "For efficiency log only after image as a write event. " - "Ignore before image. This may cause compatibility problems if " - "replicating to other storage engines than ndbcluster.", - NULL, /* check func. */ - NULL, /* update func. */ - 1 /* default */ + log_update_as_write, /* name */ + opt_ndb_log_update_as_write, /* var */ + PLUGIN_VAR_OPCMDARG, + "For efficiency log only after image as a write event. " + "Ignore before image. This may cause compatibility problems if " + "replicating to other storage engines than ndbcluster.", + NULL, /* check func. */ + NULL, /* update func. */ + 1 /* default */ ); bool opt_ndb_log_update_minimal; static MYSQL_SYSVAR_BOOL( - log_update_minimal, /* name */ - opt_ndb_log_update_minimal, /* var */ - PLUGIN_VAR_OPCMDARG, - "For efficiency, log updates in a minimal format" - "Log only the primary key value(s) in the before " - "image. Log only the changed columns in the after " - "image. This may cause compatibility problems if " - "replicating to other storage engines than ndbcluster.", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ + log_update_minimal, /* name */ + opt_ndb_log_update_minimal, /* var */ + PLUGIN_VAR_OPCMDARG, + "For efficiency, log updates in a minimal format" + "Log only the primary key value(s) in the before " + "image. Log only the changed columns in the after " + "image. This may cause compatibility problems if " + "replicating to other storage engines than ndbcluster.", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); bool opt_ndb_log_updated_only; static MYSQL_SYSVAR_BOOL( - log_updated_only, /* name */ - opt_ndb_log_updated_only, /* var */ - PLUGIN_VAR_OPCMDARG, - "For efficiency log only updated columns. Columns are considered " - "as \"updated\" even if they are updated with the same value. " - "This may cause compatibility problems if " - "replicating to other storage engines than ndbcluster.", - NULL, /* check func. */ - NULL, /* update func. */ - 1 /* default */ + log_updated_only, /* name */ + opt_ndb_log_updated_only, /* var */ + PLUGIN_VAR_OPCMDARG, + "For efficiency log only updated columns. Columns are considered " + "as \"updated\" even if they are updated with the same value. " + "This may cause compatibility problems if " + "replicating to other storage engines than ndbcluster.", + NULL, /* check func. */ + NULL, /* update func. */ + 1 /* default */ ); bool opt_ndb_log_empty_update; -static MYSQL_SYSVAR_BOOL( - log_empty_update, /* name */ - opt_ndb_log_empty_update, /* var */ - PLUGIN_VAR_OPCMDARG, - "Normally empty updates are filtered away " - "before they are logged. However, for read tracking " - "in conflict resolution a hidden pesudo attribute is " - "set which will result in an empty update along with " - "special flags set. For this to work empty updates " - "have to be allowed.", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ +static MYSQL_SYSVAR_BOOL(log_empty_update, /* name */ + opt_ndb_log_empty_update, /* var */ + PLUGIN_VAR_OPCMDARG, + "Normally empty updates are filtered away " + "before they are logged. However, for read tracking " + "in conflict resolution a hidden pesudo attribute is " + "set which will result in an empty update along with " + "special flags set. For this to work empty updates " + "have to be allowed.", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); bool opt_ndb_log_orig; static MYSQL_SYSVAR_BOOL( - log_orig, /* name */ - opt_ndb_log_orig, /* var */ - PLUGIN_VAR_OPCMDARG, - "Log originating server id and epoch in ndb_binlog_index. Each epoch " - "may in this case have multiple rows in ndb_binlog_index, one for " - "each originating epoch.", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ + log_orig, /* name */ + opt_ndb_log_orig, /* var */ + PLUGIN_VAR_OPCMDARG, + "Log originating server id and epoch in ndb_binlog_index. Each epoch " + "may in this case have multiple rows in ndb_binlog_index, one for " + "each originating epoch.", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); - bool opt_ndb_log_bin; static MYSQL_SYSVAR_BOOL( - log_bin, /* name */ - opt_ndb_log_bin, /* var */ - PLUGIN_VAR_OPCMDARG, - "Log ndb tables in the binary log. Option only has meaning if " - "the binary log has been turned on for the server.", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ + log_bin, /* name */ + opt_ndb_log_bin, /* var */ + PLUGIN_VAR_OPCMDARG, + "Log ndb tables in the binary log. Option only has meaning if " + "the binary log has been turned on for the server.", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); - bool opt_ndb_log_binlog_index; static MYSQL_SYSVAR_BOOL( - log_binlog_index, /* name */ - opt_ndb_log_binlog_index, /* var */ - PLUGIN_VAR_OPCMDARG, - "Insert mapping between epochs and binlog positions into the " - "ndb_binlog_index table.", - NULL, /* check func. */ - NULL, /* update func. */ - 1 /* default */ + log_binlog_index, /* name */ + opt_ndb_log_binlog_index, /* var */ + PLUGIN_VAR_OPCMDARG, + "Insert mapping between epochs and binlog positions into the " + "ndb_binlog_index table.", + NULL, /* check func. */ + NULL, /* update func. */ + 1 /* default */ ); - static bool opt_ndb_log_empty_epochs; -static MYSQL_SYSVAR_BOOL( - log_empty_epochs, /* name */ - opt_ndb_log_empty_epochs, /* var */ - PLUGIN_VAR_OPCMDARG, - "", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ +static MYSQL_SYSVAR_BOOL(log_empty_epochs, /* name */ + opt_ndb_log_empty_epochs, /* var */ + PLUGIN_VAR_OPCMDARG, "", NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); -bool ndb_log_empty_epochs(void) -{ - return opt_ndb_log_empty_epochs; -} +bool ndb_log_empty_epochs(void) { return opt_ndb_log_empty_epochs; } bool opt_ndb_log_apply_status; static MYSQL_SYSVAR_BOOL( - log_apply_status, /* name */ - opt_ndb_log_apply_status, /* var */ - PLUGIN_VAR_OPCMDARG, - "Log ndb_apply_status updates from Master in the Binlog", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ + log_apply_status, /* name */ + opt_ndb_log_apply_status, /* var */ + PLUGIN_VAR_OPCMDARG, + "Log ndb_apply_status updates from Master in the Binlog", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); - bool opt_ndb_log_transaction_id; -static MYSQL_SYSVAR_BOOL( - log_transaction_id, /* name */ - opt_ndb_log_transaction_id, /* var */ - PLUGIN_VAR_OPCMDARG, - "Log Ndb transaction identities per row in the Binlog", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ +static MYSQL_SYSVAR_BOOL(log_transaction_id, /* name */ + opt_ndb_log_transaction_id, /* var */ + PLUGIN_VAR_OPCMDARG, + "Log Ndb transaction identities per row in the Binlog", + NULL, /* check func. */ + NULL, /* update func. */ + 0 /* default */ ); bool opt_ndb_clear_apply_status; static MYSQL_SYSVAR_BOOL( - clear_apply_status, /* name */ - opt_ndb_clear_apply_status, /* var */ - PLUGIN_VAR_OPCMDARG, - "Whether RESET SLAVE will clear all entries in ndb_apply_status", - NULL, /* check func. */ - NULL, /* update func. */ - 1 /* default */ + clear_apply_status, /* name */ + opt_ndb_clear_apply_status, /* var */ + PLUGIN_VAR_OPCMDARG, + "Whether RESET SLAVE will clear all entries in ndb_apply_status", + NULL, /* check func. */ + NULL, /* update func. */ + 1 /* default */ ); bool opt_ndb_schema_dist_upgrade_allowed; @@ -20236,149 +17454,122 @@ static MYSQL_SYSVAR_BOOL( int opt_ndb_schema_dist_timeout; static MYSQL_SYSVAR_INT( - schema_dist_timeout, /* name */ - opt_ndb_schema_dist_timeout, /* var */ + schema_dist_timeout, /* name */ + opt_ndb_schema_dist_timeout, /* var */ PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, "Controls how many seconds it takes before timeout is detected during " "schema distribution. Timeout might indicate that activity on the other " "MySQL Server(s) are high or are somehow prevented from acquiring the " "necessary resources at this time.", - NULL, /* check func. */ - NULL, /* update func. */ - 120, /* default */ - 5, /* min */ - 1200, /* max */ - 0 /* block */ + NULL, /* check func. */ + NULL, /* update func. */ + 120, /* default */ + 5, /* min */ + 1200, /* max */ + 0 /* block */ ); ulong opt_ndb_schema_dist_lock_wait_timeout; static MYSQL_SYSVAR_ULONG( - schema_dist_lock_wait_timeout, /* name */ - opt_ndb_schema_dist_lock_wait_timeout, /* var */ - PLUGIN_VAR_RQCMDARG, - "Time (in seconds) during schema distribution to wait for a lock before " - "returning an error. This setting allows avoiding that the binlog " - "injector thread waits too long while handling schema operations.", - NULL, /* check func. */ - NULL, /* update func. */ - 30, /* default */ - 0, /* min */ - 1200, /* max */ - 0 /* block */ + schema_dist_lock_wait_timeout, /* name */ + opt_ndb_schema_dist_lock_wait_timeout, /* var */ + PLUGIN_VAR_RQCMDARG, + "Time (in seconds) during schema distribution to wait for a lock before " + "returning an error. This setting allows avoiding that the binlog " + "injector thread waits too long while handling schema operations.", + NULL, /* check func. */ + NULL, /* update func. */ + 30, /* default */ + 0, /* min */ + 1200, /* max */ + 0 /* block */ ); -static MYSQL_SYSVAR_STR( - connectstring, /* name */ - opt_ndb_connectstring, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Connect string for ndbcluster.", - NULL, /* check func. */ - NULL, /* update func. */ - NULL /* default */ +static MYSQL_SYSVAR_STR(connectstring, /* name */ + opt_ndb_connectstring, /* var */ + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Connect string for ndbcluster.", + NULL, /* check func. */ + NULL, /* update func. */ + NULL /* default */ ); - -static MYSQL_SYSVAR_STR( - mgmd_host, /* name */ - opt_ndb_connectstring, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Same as --ndb-connectstring", - NULL, /* check func. */ - NULL, /* update func. */ - NULL /* default */ +static MYSQL_SYSVAR_STR(mgmd_host, /* name */ + opt_ndb_connectstring, /* var */ + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Same as --ndb-connectstring", NULL, /* check func. */ + NULL, /* update func. */ + NULL /* default */ ); - static MYSQL_SYSVAR_UINT( - nodeid, /* name */ - opt_ndb_nodeid, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Set nodeid for this node. Overrides node id specified " - "in --ndb-connectstring.", - NULL, /* check func. */ - NULL, /* update func. */ - 0, /* default */ - 0, /* min */ - MAX_NODES_ID, /* max */ - 0 /* block */ + nodeid, /* name */ + opt_ndb_nodeid, /* var */ + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Set nodeid for this node. Overrides node id specified " + "in --ndb-connectstring.", + NULL, /* check func. */ + NULL, /* update func. */ + 0, /* default */ + 0, /* min */ + MAX_NODES_ID, /* max */ + 0 /* block */ ); -static const char* slave_conflict_role_names[] = -{ - "NONE", - "SECONDARY", - "PRIMARY", - "PASS", - NullS -}; - -static TYPELIB slave_conflict_role_typelib = -{ - array_elements(slave_conflict_role_names) - 1, - "", - slave_conflict_role_names, - NULL -}; +static const char *slave_conflict_role_names[] = {"NONE", "SECONDARY", + "PRIMARY", "PASS", NullS}; +static TYPELIB slave_conflict_role_typelib = { + array_elements(slave_conflict_role_names) - 1, "", + slave_conflict_role_names, NULL}; /** * slave_conflict_role_check_func. - * + * * Perform most validation of a role change request. * Inspired by sql_plugin.cc::check_func_enum() */ -static int slave_conflict_role_check_func(THD *thd, SYS_VAR*, - void *save, st_mysql_value *value) -{ +static int slave_conflict_role_check_func(THD *thd, SYS_VAR *, void *save, + st_mysql_value *value) { char buff[STRING_BUFFER_USUAL_SIZE]; const char *str; long long tmp; long result; int length; - do - { - if (value->value_type(value) == MYSQL_VALUE_TYPE_STRING) - { - length= sizeof(buff); - if (!(str= value->val_str(value, buff, &length))) - break; - if ((result= (long)find_type(str, &slave_conflict_role_typelib, 0) - 1) < 0) - break; - } - else - { - if (value->val_int(value, &tmp)) + do { + if (value->value_type(value) == MYSQL_VALUE_TYPE_STRING) { + length = sizeof(buff); + if (!(str = value->val_str(value, buff, &length))) break; + if ((result = (long)find_type(str, &slave_conflict_role_typelib, 0) - 1) < + 0) break; + } else { + if (value->val_int(value, &tmp)) break; if (tmp < 0 || tmp >= static_cast(slave_conflict_role_typelib.count)) break; - result= (long) tmp; + result = (long)tmp; } - - const char* failure_cause_str = NULL; + + const char *failure_cause_str = NULL; if (!st_ndb_slave_state::checkSlaveConflictRoleChange( - (enum_slave_conflict_role) opt_ndb_slave_conflict_role, - (enum_slave_conflict_role) result, - &failure_cause_str)) - { + (enum_slave_conflict_role)opt_ndb_slave_conflict_role, + (enum_slave_conflict_role)result, &failure_cause_str)) { char msgbuf[256]; - snprintf(msgbuf, - sizeof(msgbuf), - "Role change from %s to %s failed : %s", - get_type(&slave_conflict_role_typelib, opt_ndb_slave_conflict_role), - get_type(&slave_conflict_role_typelib, result), - failure_cause_str); - + snprintf( + msgbuf, sizeof(msgbuf), "Role change from %s to %s failed : %s", + get_type(&slave_conflict_role_typelib, opt_ndb_slave_conflict_role), + get_type(&slave_conflict_role_typelib, result), failure_cause_str); + thd->raise_error_printf(ER_ERROR_WHEN_EXECUTING_COMMAND, - "SET GLOBAL ndb_slave_conflict_role", - msgbuf); - + "SET GLOBAL ndb_slave_conflict_role", msgbuf); + break; } - + /* Ok */ - *(long*)save= result; + *(long *)save = result; return 0; } while (0); /* Error */ @@ -20393,134 +17584,125 @@ static int slave_conflict_role_check_func(THD *thd, SYS_VAR*, * * Inspired by sql_plugin.cc::update_func_long() */ -static void slave_conflict_role_update_func(THD*, SYS_VAR*, - void *tgt, const void *save) -{ - *(long *)tgt= *static_cast(save); +static void slave_conflict_role_update_func(THD *, SYS_VAR *, void *tgt, + const void *save) { + *(long *)tgt = *static_cast(save); } static MYSQL_SYSVAR_ENUM( - slave_conflict_role, /* Name */ - opt_ndb_slave_conflict_role, /* Var */ - PLUGIN_VAR_RQCMDARG, - "Role for Slave to play in asymmetric conflict algorithms.", - slave_conflict_role_check_func, /* Check func */ - slave_conflict_role_update_func, /* Update func */ - SCR_NONE, /* Default value */ - &slave_conflict_role_typelib /* typelib */ + slave_conflict_role, /* Name */ + opt_ndb_slave_conflict_role, /* Var */ + PLUGIN_VAR_RQCMDARG, + "Role for Slave to play in asymmetric conflict algorithms.", + slave_conflict_role_check_func, /* Check func */ + slave_conflict_role_update_func, /* Update func */ + SCR_NONE, /* Default value */ + &slave_conflict_role_typelib /* typelib */ ); #ifndef DBUG_OFF -static -void -dbg_check_shares_update(THD*, SYS_VAR*, void*, const void*) -{ +static void dbg_check_shares_update(THD *, SYS_VAR *, void *, const void *) { NDB_SHARE::dbg_check_shares_update(); } -static MYSQL_THDVAR_UINT( - dbg_check_shares, /* name */ - PLUGIN_VAR_RQCMDARG, - "Debug, only...check that no shares are lingering...", - NULL, /* check func */ - dbg_check_shares_update, /* update func */ - 0, /* default */ - 0, /* min */ - 1, /* max */ - 0 /* block */ +static MYSQL_THDVAR_UINT(dbg_check_shares, /* name */ + PLUGIN_VAR_RQCMDARG, + "Debug, only...check that no shares are lingering...", + NULL, /* check func */ + dbg_check_shares_update, /* update func */ + 0, /* default */ + 0, /* min */ + 1, /* max */ + 0 /* block */ ); #endif -static SYS_VAR* system_variables[]= { - MYSQL_SYSVAR(extra_logging), - MYSQL_SYSVAR(wait_connected), - MYSQL_SYSVAR(wait_setup), - MYSQL_SYSVAR(cluster_connection_pool), - MYSQL_SYSVAR(cluster_connection_pool_nodeids), - MYSQL_SYSVAR(recv_thread_activation_threshold), - MYSQL_SYSVAR(recv_thread_cpu_mask), - MYSQL_SYSVAR(report_thresh_binlog_mem_usage), - MYSQL_SYSVAR(report_thresh_binlog_epoch_slip), - MYSQL_SYSVAR(eventbuffer_max_alloc), - MYSQL_SYSVAR(eventbuffer_free_percent), - MYSQL_SYSVAR(log_update_as_write), - MYSQL_SYSVAR(log_updated_only), - MYSQL_SYSVAR(log_update_minimal), - MYSQL_SYSVAR(log_empty_update), - MYSQL_SYSVAR(log_orig), - MYSQL_SYSVAR(distribution), - MYSQL_SYSVAR(autoincrement_prefetch_sz), - MYSQL_SYSVAR(force_send), - MYSQL_SYSVAR(use_exact_count), - MYSQL_SYSVAR(use_transactions), - MYSQL_SYSVAR(use_copying_alter_table), - MYSQL_SYSVAR(allow_copying_alter_table), - MYSQL_SYSVAR(optimized_node_selection), - MYSQL_SYSVAR(batch_size), - MYSQL_SYSVAR(optimization_delay), - MYSQL_SYSVAR(index_stat_enable), - MYSQL_SYSVAR(index_stat_option), - MYSQL_SYSVAR(table_no_logging), - MYSQL_SYSVAR(table_temporary), - MYSQL_SYSVAR(log_bin), - MYSQL_SYSVAR(log_binlog_index), - MYSQL_SYSVAR(log_empty_epochs), - MYSQL_SYSVAR(log_apply_status), - MYSQL_SYSVAR(log_transaction_id), - MYSQL_SYSVAR(clear_apply_status), - MYSQL_SYSVAR(schema_dist_upgrade_allowed), - MYSQL_SYSVAR(schema_dist_timeout), - MYSQL_SYSVAR(schema_dist_lock_wait_timeout), - MYSQL_SYSVAR(connectstring), - MYSQL_SYSVAR(mgmd_host), - MYSQL_SYSVAR(nodeid), - MYSQL_SYSVAR(blob_read_batch_bytes), - MYSQL_SYSVAR(blob_write_batch_bytes), - MYSQL_SYSVAR(deferred_constraints), - MYSQL_SYSVAR(join_pushdown), - MYSQL_SYSVAR(log_exclusive_reads), - MYSQL_SYSVAR(read_backup), - MYSQL_SYSVAR(data_node_neighbour), - MYSQL_SYSVAR(fully_replicated), - MYSQL_SYSVAR(row_checksum), +static SYS_VAR *system_variables[] = { + MYSQL_SYSVAR(extra_logging), + MYSQL_SYSVAR(wait_connected), + MYSQL_SYSVAR(wait_setup), + MYSQL_SYSVAR(cluster_connection_pool), + MYSQL_SYSVAR(cluster_connection_pool_nodeids), + MYSQL_SYSVAR(recv_thread_activation_threshold), + MYSQL_SYSVAR(recv_thread_cpu_mask), + MYSQL_SYSVAR(report_thresh_binlog_mem_usage), + MYSQL_SYSVAR(report_thresh_binlog_epoch_slip), + MYSQL_SYSVAR(eventbuffer_max_alloc), + MYSQL_SYSVAR(eventbuffer_free_percent), + MYSQL_SYSVAR(log_update_as_write), + MYSQL_SYSVAR(log_updated_only), + MYSQL_SYSVAR(log_update_minimal), + MYSQL_SYSVAR(log_empty_update), + MYSQL_SYSVAR(log_orig), + MYSQL_SYSVAR(distribution), + MYSQL_SYSVAR(autoincrement_prefetch_sz), + MYSQL_SYSVAR(force_send), + MYSQL_SYSVAR(use_exact_count), + MYSQL_SYSVAR(use_transactions), + MYSQL_SYSVAR(use_copying_alter_table), + MYSQL_SYSVAR(allow_copying_alter_table), + MYSQL_SYSVAR(optimized_node_selection), + MYSQL_SYSVAR(batch_size), + MYSQL_SYSVAR(optimization_delay), + MYSQL_SYSVAR(index_stat_enable), + MYSQL_SYSVAR(index_stat_option), + MYSQL_SYSVAR(table_no_logging), + MYSQL_SYSVAR(table_temporary), + MYSQL_SYSVAR(log_bin), + MYSQL_SYSVAR(log_binlog_index), + MYSQL_SYSVAR(log_empty_epochs), + MYSQL_SYSVAR(log_apply_status), + MYSQL_SYSVAR(log_transaction_id), + MYSQL_SYSVAR(clear_apply_status), + MYSQL_SYSVAR(schema_dist_upgrade_allowed), + MYSQL_SYSVAR(schema_dist_timeout), + MYSQL_SYSVAR(schema_dist_lock_wait_timeout), + MYSQL_SYSVAR(connectstring), + MYSQL_SYSVAR(mgmd_host), + MYSQL_SYSVAR(nodeid), + MYSQL_SYSVAR(blob_read_batch_bytes), + MYSQL_SYSVAR(blob_write_batch_bytes), + MYSQL_SYSVAR(deferred_constraints), + MYSQL_SYSVAR(join_pushdown), + MYSQL_SYSVAR(log_exclusive_reads), + MYSQL_SYSVAR(read_backup), + MYSQL_SYSVAR(data_node_neighbour), + MYSQL_SYSVAR(fully_replicated), + MYSQL_SYSVAR(row_checksum), #ifndef DBUG_OFF - MYSQL_SYSVAR(dbg_check_shares), + MYSQL_SYSVAR(dbg_check_shares), #endif - MYSQL_SYSVAR(version), - MYSQL_SYSVAR(version_string), - MYSQL_SYSVAR(show_foreign_key_mock_tables), - MYSQL_SYSVAR(slave_conflict_role), - MYSQL_SYSVAR(default_column_format), - MYSQL_SYSVAR(metadata_check), - MYSQL_SYSVAR(metadata_check_interval), - NULL -}; - -struct st_mysql_storage_engine ndbcluster_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION }; + MYSQL_SYSVAR(version), + MYSQL_SYSVAR(version_string), + MYSQL_SYSVAR(show_foreign_key_mock_tables), + MYSQL_SYSVAR(slave_conflict_role), + MYSQL_SYSVAR(default_column_format), + MYSQL_SYSVAR(metadata_check), + MYSQL_SYSVAR(metadata_check_interval), + NULL}; + +struct st_mysql_storage_engine ndbcluster_storage_engine = { + MYSQL_HANDLERTON_INTERFACE_VERSION}; extern struct st_mysql_plugin ndbinfo_plugin; -mysql_declare_plugin(ndbcluster) -{ - MYSQL_STORAGE_ENGINE_PLUGIN, - &ndbcluster_storage_engine, - ndbcluster_hton_name, - "MySQL AB", - "Clustered, fault-tolerant tables", - PLUGIN_LICENSE_GPL, - ndbcluster_init, /* plugin init */ - NULL, /* plugin check uninstall */ - ndbcluster_deinit, /* plugin deinit */ - 0x0100, /* plugin version */ - ndb_status_vars, /* status variables */ - system_variables, /* system variables */ - NULL, /* config options */ - 0 /* flags */ +mysql_declare_plugin(ndbcluster){ + MYSQL_STORAGE_ENGINE_PLUGIN, + &ndbcluster_storage_engine, + ndbcluster_hton_name, + "MySQL AB", + "Clustered, fault-tolerant tables", + PLUGIN_LICENSE_GPL, + ndbcluster_init, /* plugin init */ + NULL, /* plugin check uninstall */ + ndbcluster_deinit, /* plugin deinit */ + 0x0100, /* plugin version */ + ndb_status_vars, /* status variables */ + system_variables, /* system variables */ + NULL, /* config options */ + 0 /* flags */ }, -ndbinfo_plugin, -ndb_transid_mysql_connection_map_table -mysql_declare_plugin_end; - + ndbinfo_plugin, + ndb_transid_mysql_connection_map_table mysql_declare_plugin_end; diff --git a/storage/ndb/plugin/ha_ndbcluster.h b/storage/ndb/plugin/ha_ndbcluster.h index d3a9a51e63b8..4881e025e58a 100644 --- a/storage/ndb/plugin/ha_ndbcluster.h +++ b/storage/ndb/plugin/ha_ndbcluster.h @@ -46,8 +46,8 @@ class Ndb; // Forward declaration class NdbOperation; // Forward declaration class NdbTransaction; // Forward declaration class NdbRecAttr; // Forward declaration -class NdbScanOperation; -class NdbIndexScanOperation; +class NdbScanOperation; +class NdbIndexScanOperation; class NdbBlob; class NdbIndexStat; class NdbEventOperation; @@ -69,11 +69,7 @@ enum NDB_INDEX_TYPE { struct NDB_INDEX_DATA { NDB_INDEX_TYPE type; - enum { - UNDEFINED = 0, - ACTIVE = 1, - TO_BE_DROPPED = 2 - } status; + enum { UNDEFINED = 0, ACTIVE = 1, TO_BE_DROPPED = 2 } status; const NdbDictionary::Index *index; const NdbDictionary::Index *unique_index; unsigned char *unique_index_attrid_map; @@ -90,16 +86,11 @@ struct NDB_INDEX_DATA { }; // Wrapper class for list to hold NDBFKs -class Ndb_fk_list :public List -{ -public: - ~Ndb_fk_list() - { - delete_elements(); - } +class Ndb_fk_list : public List { + public: + ~Ndb_fk_list() { delete_elements(); } }; - #include "storage/ndb/plugin/ndb_ndbapi_util.h" #include "storage/ndb/plugin/ndb_share.h" @@ -114,7 +105,7 @@ struct Ndb_local_table_statistics { struct st_ndb_status { st_ndb_status() { memset(this, 0, sizeof(struct st_ndb_status)); } long cluster_node_id; - const char * connected_host; + const char *connected_host; long connected_port; long number_of_data_nodes; long number_of_ready_data_nodes; @@ -132,13 +123,12 @@ struct st_ndb_status { long long last_commit_epoch_server; long long last_commit_epoch_session; long long api_client_stats[Ndb::NumClientStatistics]; - const char * system_name; + const char *system_name; }; -int ndbcluster_commit(handlerton*, THD *thd, bool all); +int ndbcluster_commit(handlerton *, THD *thd, bool all); -class ha_ndbcluster: public handler, public Partition_handler -{ +class ha_ndbcluster : public handler, public Partition_handler { friend class ndb_pushed_builder_ctx; public: @@ -149,43 +139,47 @@ class ha_ndbcluster: public handler, public Partition_handler int open(const char *name, int mode, uint test_if_locked, const dd::Table *table_def) override; -private: + + private: void local_close(THD *thd, bool release_metadata); -public: + + public: int close(void) override; - int optimize(THD* thd, HA_CHECK_OPT*) override; -private: + int optimize(THD *thd, HA_CHECK_OPT *) override; + + private: int analyze_index(); -public: - int analyze(THD* thd, HA_CHECK_OPT*) override; + + public: + int analyze(THD *thd, HA_CHECK_OPT *) override; int write_row(uchar *buf) override; int update_row(const uchar *old_data, uchar *new_data) override; int delete_row(const uchar *buf) override; int index_init(uint index, bool sorted) override; int index_end() override; - int index_read(uchar *buf, const uchar *key, uint key_len, + int index_read(uchar *buf, const uchar *key, uint key_len, enum ha_rkey_function find_flag) override; int index_next(uchar *buf) override; int index_prev(uchar *buf) override; int index_first(uchar *buf) override; int index_last(uchar *buf) override; int index_next_same(uchar *buf, const uchar *key, uint keylen) override; - int index_read_last(uchar * buf, const uchar * key, uint key_len) override; + int index_read_last(uchar *buf, const uchar *key, uint key_len) override; int rnd_init(bool scan) override; int rnd_end() override; int rnd_next(uchar *buf) override; int rnd_pos(uchar *buf, uchar *pos) override; void position(const uchar *record) override; - int cmp_ref(const uchar * ref1, const uchar * ref2) const override; -private: + int cmp_ref(const uchar *ref1, const uchar *ref2) const override; + + private: int read_range_first_to_buf(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted, - uchar* buf); -public: - int read_range_first(const key_range *start_key, - const key_range *end_key, + const key_range *end_key, bool eq_range, + bool sorted, uchar *buf); + + public: + int read_range_first(const key_range *start_key, const key_range *end_key, bool eq_range, bool sorted) override; int read_range_next() override; @@ -214,17 +208,17 @@ class ha_ndbcluster: public handler, public Partition_handler uint part_id) override; private: - bool choose_mrr_impl(uint keyno, uint n_ranges, ha_rows n_rows, - uint *bufsz, uint *flags, Cost_estimate *); + bool choose_mrr_impl(uint keyno, uint n_ranges, ha_rows n_rows, uint *bufsz, + uint *flags, Cost_estimate *); -private: + private: uint first_running_range; uint first_range_in_batch; uint first_unstarted_range; int multi_range_start_retrievals(uint first_range); -public: + public: bool get_error_message(int error, String *buf) override; int records(ha_rows *num_rows) override; ha_rows estimate_rows_upper_bound() override { return HA_POS_ERROR; } @@ -238,77 +232,74 @@ class ha_ndbcluster: public handler, public Partition_handler void unlock_row() override; int start_stmt(THD *thd, thr_lock_type) override; void update_create_info(HA_CREATE_INFO *create_info) override; -private: - void update_comment_info(THD* thd, HA_CREATE_INFO *create_info, + + private: + void update_comment_info(THD *thd, HA_CREATE_INFO *create_info, const NdbDictionary::Table *tab); -public: + + public: void print_error(int error, myf errflag) override; - const char * table_type() const override; + const char *table_type() const override; ulonglong table_flags(void) const override; ulong index_flags(uint idx, uint part, bool all_parts) const override; bool primary_key_is_clustered() const override; uint max_supported_keys() const override; uint max_supported_key_parts() const override; uint max_supported_key_length() const override; - uint max_supported_key_part_length(HA_CREATE_INFO - *create_info) const override; -private: - int get_child_or_parent_fk_list(List*f_key_list, + uint max_supported_key_part_length( + HA_CREATE_INFO *create_info) const override; + + private: + int get_child_or_parent_fk_list(List *f_key_list, bool is_child, bool is_parent); -public: - int get_foreign_key_list(THD *thd, - List *f_key_list) override; - int get_parent_foreign_key_list(THD *thd, - List *f_key_list) override; - uint referenced_by_foreign_key() override; - char* get_foreign_key_create_info() override; - void free_foreign_key_create_info(char* str) override; + public: + int get_foreign_key_list(THD *thd, + List *f_key_list) override; + int get_parent_foreign_key_list(THD *thd, + List *f_key_list) override; + uint referenced_by_foreign_key() override; + + char *get_foreign_key_create_info() override; + void free_foreign_key_create_info(char *str) override; int rename_table(const char *from, const char *to, const dd::Table *from_table_def, dd::Table *to_table_def) override; int delete_table(const char *name, const dd::Table *table_def) override; - bool upgrade_table(THD* thd, - const char*, - const char* table_name, - dd::Table* dd_table) override; + bool upgrade_table(THD *thd, const char *, const char *table_name, + dd::Table *dd_table) override; - row_type get_real_row_type(const HA_CREATE_INFO *create_info) const override - { + row_type get_real_row_type(const HA_CREATE_INFO *create_info) const override { DBUG_ENTER("ha_ndbcluster::get_real_row_type"); // ROW_RORMAT=FIXED -> using FIXED - if (create_info->row_type == ROW_TYPE_FIXED) - DBUG_RETURN(ROW_TYPE_FIXED); + if (create_info->row_type == ROW_TYPE_FIXED) DBUG_RETURN(ROW_TYPE_FIXED); // All other values uses DYNAMIC DBUG_RETURN(ROW_TYPE_DYNAMIC); } int create(const char *name, TABLE *form, HA_CREATE_INFO *info, - dd::Table* table_def) override; - int truncate(dd::Table* table_def) override; - bool is_ignorable_error(int error) override - { + dd::Table *table_def) override; + int truncate(dd::Table *table_def) override; + bool is_ignorable_error(int error) override { if (handler::is_ignorable_error(error) || error == HA_ERR_NO_PARTITION_FOUND) return true; return false; } - - THR_LOCK_DATA **store_lock(THD *thd, - THR_LOCK_DATA **to, + THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) override; bool low_byte_first() const override; - enum ha_key_alg get_default_index_algorithm() const override - { + enum ha_key_alg get_default_index_algorithm() const override { /* NDB uses hash indexes only when explicitly requested. */ return HA_KEY_ALG_BTREE; } - bool is_index_algorithm_supported(enum ha_key_alg key_alg) const override - { return key_alg == HA_KEY_ALG_BTREE || key_alg == HA_KEY_ALG_HASH; } + bool is_index_algorithm_supported(enum ha_key_alg key_alg) const override { + return key_alg == HA_KEY_ALG_BTREE || key_alg == HA_KEY_ALG_HASH; + } double scan_time() override; ha_rows records_in_range(uint inx, key_range *min_key, @@ -321,52 +312,53 @@ class ha_ndbcluster: public handler, public Partition_handler uint *dup_key_found) override; int exec_bulk_update(uint *dup_key_found) override; void end_bulk_update() override; -private: + + private: int ndb_update_row(const uchar *old_data, uchar *new_data, int is_bulk_update); -public: + + public: static void set_dbname(const char *pathname, char *dbname); static void set_tabname(const char *pathname, char *tabname); /* static member function as it needs to access private NdbTransaction methods */ - static void release_completed_operations(NdbTransaction*); + static void release_completed_operations(NdbTransaction *); /* Condition pushdown */ - /* - Push condition down to the table handler. - SYNOPSIS - cond_push() - cond Condition to be pushed. The condition tree must not be - modified by the by the caller. - other_tbls_ok Are other tables allowed to be referred - from the condition terms pushed down. - RETURN - The 'remainder' condition that caller must use to filter out records. - NULL means the handler will not return rows that do not match the - passed condition. - NOTES - The table handler filters out rows using (pushed_cond1 AND pushed_cond2 - AND ... AND pushed_condN) - or less restrictive condition, depending on handler's capabilities. - - handler->reset() call discard any pushed conditions. - Calls to rnd_init/rnd_end, index_init/index_end etc do not affect - any condition being pushed. - The current implementation supports arbitrary AND/OR nested conditions - with comparisons between columns and constants (including constant - expressions and function calls) and the following comparison operators: - =, !=, >, >=, <, <=, like, "not like", "is null", and "is not null". - Negated conditions are supported by NOT which generate NAND/NOR groups. - */ - const Item *cond_push(const Item *cond, - bool other_tbls_ok) override; - -public: + /* + Push condition down to the table handler. + SYNOPSIS + cond_push() + cond Condition to be pushed. The condition tree must not be + modified by the by the caller. + other_tbls_ok Are other tables allowed to be referred + from the condition terms pushed down. + RETURN + The 'remainder' condition that caller must use to filter out records. + NULL means the handler will not return rows that do not match the + passed condition. + NOTES + The table handler filters out rows using (pushed_cond1 AND pushed_cond2 + AND ... AND pushed_condN) + or less restrictive condition, depending on handler's capabilities. + + handler->reset() call discard any pushed conditions. + Calls to rnd_init/rnd_end, index_init/index_end etc do not affect + any condition being pushed. + The current implementation supports arbitrary AND/OR nested conditions + with comparisons between columns and constants (including constant + expressions and function calls) and the following comparison operators: + =, !=, >, >=, <, <=, like, "not like", "is null", and "is not null". + Negated conditions are supported by NOT which generate NAND/NOR groups. + */ + const Item *cond_push(const Item *cond, bool other_tbls_ok) override; + + public: /** * Generate the ScanFilters code for the condition(s) previously * accepted for cond_push'ing. @@ -382,42 +374,41 @@ class ha_ndbcluster: public handler, public Partition_handler * arguments. * @return 1 if generation of the key part failed. */ - int generate_scan_filter_with_key( - NdbInterpretedCode *code, - NdbScanOperation::ScanOptions *options, - const KEY *key_info, - const key_range *start_key, - const key_range *end_key); - -private: - bool maybe_pushable_join(const char*& reason) const; -public: - int assign_pushed_join(const ndb_pushed_join* pushed_join); + int generate_scan_filter_with_key(NdbInterpretedCode *code, + NdbScanOperation::ScanOptions *options, + const KEY *key_info, + const key_range *start_key, + const key_range *end_key); + + private: + bool maybe_pushable_join(const char *&reason) const; + + public: + int assign_pushed_join(const ndb_pushed_join *pushed_join); uint number_of_pushed_joins() const override; - const TABLE* member_of_pushed_join() const override; - const TABLE* parent_of_pushed_join() const override; + const TABLE *member_of_pushed_join() const override; + const TABLE *parent_of_pushed_join() const override; int index_read_pushed(uchar *buf, const uchar *key, key_part_map keypart_map) override; - int index_next_pushed(uchar * buf) override; + int index_next_pushed(uchar *buf) override; /* * Internal to ha_ndbcluster, used by C functions */ - int ndb_err(NdbTransaction*); + int ndb_err(NdbTransaction *); - enum_alter_inplace_result - check_if_supported_inplace_alter(TABLE *altered_table, - Alter_inplace_info *ha_alter_info) override; + enum_alter_inplace_result check_if_supported_inplace_alter( + TABLE *altered_table, Alter_inplace_info *ha_alter_info) override; -private: + private: bool parse_comment_changes(NdbDictionary::Table *new_tab, const NdbDictionary::Table *old_tab, - HA_CREATE_INFO *create_info, - THD *thd, - bool & max_rows_changed) const; -public: + HA_CREATE_INFO *create_info, THD *thd, + bool &max_rows_changed) const; + + public: bool prepare_inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alter_info, const dd::Table *old_table_def, @@ -430,8 +421,7 @@ class ha_ndbcluster: public handler, public Partition_handler bool commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alter_info, - bool commit, - const dd::Table *old_table_def, + bool commit, const dd::Table *old_table_def, dd::Table *new_table_def) override; void notify_table_changed(Alter_inplace_info *alter_info) override; @@ -440,40 +430,30 @@ class ha_ndbcluster: public handler, public Partition_handler void prepare_inplace__drop_index(uint key_num); int inplace__final_drop_index(TABLE *table_arg); - enum_alter_inplace_result - supported_inplace_field_change(Alter_inplace_info*, - Field*, Field*, bool, bool) const; - bool table_storage_changed(HA_CREATE_INFO*) const; - bool column_has_index(TABLE*, uint, uint, uint) const; - enum_alter_inplace_result - supported_inplace_ndb_column_change(uint, TABLE*, - Alter_inplace_info*, - bool, bool) const; - enum_alter_inplace_result - supported_inplace_column_change(THD*, TABLE*, uint, Field*, Alter_inplace_info*) const; - enum_alter_inplace_result - check_inplace_alter_supported(TABLE *altered_table, - Alter_inplace_info *ha_alter_info); - void - check_implicit_column_format_change(TABLE *altered_table, - Alter_inplace_info *ha_alter_info) const; + enum_alter_inplace_result supported_inplace_field_change(Alter_inplace_info *, + Field *, Field *, + bool, bool) const; + bool table_storage_changed(HA_CREATE_INFO *) const; + bool column_has_index(TABLE *, uint, uint, uint) const; + enum_alter_inplace_result supported_inplace_ndb_column_change( + uint, TABLE *, Alter_inplace_info *, bool, bool) const; + enum_alter_inplace_result supported_inplace_column_change( + THD *, TABLE *, uint, Field *, Alter_inplace_info *) const; + enum_alter_inplace_result check_inplace_alter_supported( + TABLE *altered_table, Alter_inplace_info *ha_alter_info); + void check_implicit_column_format_change( + TABLE *altered_table, Alter_inplace_info *ha_alter_info) const; bool abort_inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha_alter_info); - int prepare_conflict_detection(enum_conflicting_op_type op_type, - const NdbRecord* key_rec, - const NdbRecord* data_rec, - const uchar* old_data, - const uchar* new_data, - const MY_BITMAP *write_set, - NdbTransaction* trans, - NdbInterpretedCode* code, - NdbOperation::OperationOptions* options, - bool& conflict_handled, - bool& avoid_ndbapi_write); + int prepare_conflict_detection( + enum_conflicting_op_type op_type, const NdbRecord *key_rec, + const NdbRecord *data_rec, const uchar *old_data, const uchar *new_data, + const MY_BITMAP *write_set, NdbTransaction *trans, + NdbInterpretedCode *code, NdbOperation::OperationOptions *options, + bool &conflict_handled, bool &avoid_ndbapi_write); void setup_key_ref_for_ndb_record(const NdbRecord **key_rec, - const uchar **key_row, - const uchar *record, + const uchar **key_row, const uchar *record, bool use_active_index); void check_read_before_write_removal(); @@ -490,56 +470,52 @@ class ha_ndbcluster: public handler, public Partition_handler int create_indexes(THD *thd, TABLE *tab, const NdbDictionary::Table *ndbtab) const; int open_indexes(Ndb *ndb, TABLE *tab); - void release_indexes(NdbDictionary::Dictionary* dict, int invalidate); + void release_indexes(NdbDictionary::Dictionary *dict, int invalidate); void inplace__renumber_indexes(uint dropped_index_num); int inplace__drop_indexes(Ndb *ndb, TABLE *tab); - int add_index_handle(NdbDictionary::Dictionary *dict, - KEY *key_info, const char *key_name, uint index_no); + int add_index_handle(NdbDictionary::Dictionary *dict, KEY *key_info, + const char *key_name, uint index_no); int add_table_ndb_record(NdbDictionary::Dictionary *dict); int add_hidden_pk_ndb_record(NdbDictionary::Dictionary *dict); - int add_index_ndb_record(NdbDictionary::Dictionary *dict, - KEY *key_info, uint index_no); + int add_index_ndb_record(NdbDictionary::Dictionary *dict, KEY *key_info, + uint index_no); int get_fk_data(THD *thd, Ndb *ndb); void release_fk_data(); int create_fks(THD *thd, Ndb *ndb); - int copy_fk_for_offline_alter(THD *thd, Ndb *, const char* tabname); - int inplace__drop_fks(THD*, Ndb*, NdbDictionary::Dictionary*, - const NdbDictionary::Table*); - static int get_fk_data_for_truncate(NdbDictionary::Dictionary*, - const NdbDictionary::Table*, - Ndb_fk_list&); - static int recreate_fk_for_truncate(THD*, Ndb*, const char*, - Ndb_fk_list&); - bool has_fk_dependency(THD*, const NdbDictionary::Column*) const; - int check_default_values(const NdbDictionary::Table* ndbtab); - int get_metadata(THD *thd, const dd::Table* table_def); + int copy_fk_for_offline_alter(THD *thd, Ndb *, const char *tabname); + int inplace__drop_fks(THD *, Ndb *, NdbDictionary::Dictionary *, + const NdbDictionary::Table *); + static int get_fk_data_for_truncate(NdbDictionary::Dictionary *, + const NdbDictionary::Table *, + Ndb_fk_list &); + static int recreate_fk_for_truncate(THD *, Ndb *, const char *, + Ndb_fk_list &); + bool has_fk_dependency(THD *, const NdbDictionary::Column *) const; + int check_default_values(const NdbDictionary::Table *ndbtab); + int get_metadata(THD *thd, const dd::Table *table_def); void release_metadata(THD *thd, Ndb *ndb); NDB_INDEX_TYPE get_index_type(uint idx_no) const; NDB_INDEX_TYPE get_index_type_from_table(uint index_no) const; - NDB_INDEX_TYPE get_index_type_from_key(uint index_no, KEY *key_info, + NDB_INDEX_TYPE get_index_type_from_key(uint index_no, KEY *key_info, bool primary) const; bool has_null_in_unique_index(uint idx_no) const; bool check_index_fields_not_null(KEY *key_info) const; - bool check_if_pushable(int type, //NdbQueryOperationDef::Type, - uint idx= MAX_KEY) const; + bool check_if_pushable(int type, // NdbQueryOperationDef::Type, + uint idx = MAX_KEY) const; bool check_is_pushed() const; - int create_pushed_join(const NdbQueryParamValue* keyFieldParams=NULL, - uint paramCnt= 0); + int create_pushed_join(const NdbQueryParamValue *keyFieldParams = NULL, + uint paramCnt = 0); - int ndb_pk_update_row(THD *thd, - const uchar *old_data, uchar *new_data); + int ndb_pk_update_row(THD *thd, const uchar *old_data, uchar *new_data); int pk_read(const uchar *key, uchar *buf, uint32 *part_id); - int ordered_index_scan(const key_range *start_key, - const key_range *end_key, - bool sorted, bool descending, uchar* buf, + int ordered_index_scan(const key_range *start_key, const key_range *end_key, + bool sorted, bool descending, uchar *buf, part_id_range *part_spec); int unique_index_read(const uchar *key, uchar *buf); - int full_table_scan(const KEY* key_info, - const key_range *start_key, - const key_range *end_key, - uchar *buf); - int flush_bulk_insert(bool allow_batch= false); + int full_table_scan(const KEY *key_info, const key_range *start_key, + const key_range *end_key, uchar *buf); + int flush_bulk_insert(bool allow_batch = false); int ndb_write_row(uchar *record, bool primary_key_update, bool batched_update); @@ -547,26 +523,21 @@ class ha_ndbcluster: public handler, public Partition_handler int end_bulk_delete() override; int ndb_delete_row(const uchar *record, bool primary_key_update); - int ndb_optimize_table(THD* thd, uint delay) const; + int ndb_optimize_table(THD *thd, uint delay) const; bool check_all_operations_for_error(NdbTransaction *trans, const NdbOperation *first, - const NdbOperation *last, - uint errcode); + const NdbOperation *last, uint errcode); - enum NDB_WRITE_OP { - NDB_INSERT = 0, - NDB_UPDATE = 1, - NDB_PK_UPDATE = 2 - }; + enum NDB_WRITE_OP { NDB_INSERT = 0, NDB_UPDATE = 1, NDB_PK_UPDATE = 2 }; int peek_indexed_rows(const uchar *record, NDB_WRITE_OP write_op); int scan_handle_lock_tuple(NdbScanOperation *scanOp, NdbTransaction *trans); - int fetch_next(NdbScanOperation* op); + int fetch_next(NdbScanOperation *op); int fetch_next_pushed(); int set_auto_inc(THD *thd, Field *field); int set_auto_inc_val(THD *thd, Uint64 value); - int next_result(uchar *buf); + int next_result(uchar *buf); int close_scan(); int unpack_record(uchar *dst_row, const uchar *src_row); int unpack_record_and_set_generated_fields(uchar *dst_row, @@ -601,21 +572,18 @@ class ha_ndbcluster: public handler, public Partition_handler NdbOperation::OperationOptions *options) const; bool check_index_fields_in_write_set(uint keyno); - int log_exclusive_read(const NdbRecord *key_rec, - const uchar *key, - uchar *buf, + int log_exclusive_read(const NdbRecord *key_rec, const uchar *key, uchar *buf, Uint32 *ppartition_id); - int scan_log_exclusive_read(NdbScanOperation*, - NdbTransaction*); - const NdbOperation *pk_unique_index_read_key(uint idx, - const uchar *key, uchar *buf, + int scan_log_exclusive_read(NdbScanOperation *, NdbTransaction *); + const NdbOperation *pk_unique_index_read_key(uint idx, const uchar *key, + uchar *buf, NdbOperation::LockMode lm, Uint32 *ppartition_id); int pk_unique_index_read_key_pushed(uint idx, const uchar *key); int read_multi_range_fetch_next(); - - int primary_key_cmp(const uchar * old_row, const uchar * new_row); + + int primary_key_cmp(const uchar *old_row, const uchar *new_row); void get_auto_increment(ulonglong offset, ulonglong increment, ulonglong number_of_desired_values, @@ -623,72 +591,60 @@ class ha_ndbcluster: public handler, public Partition_handler ulonglong *nb_reserved_values) override; bool uses_blob_value(const MY_BITMAP *bitmap) const; - int check_ndb_connection(THD* thd) const; + int check_ndb_connection(THD *thd) const; void set_rec_per_key(); void no_uncommitted_rows_execute_failure(); void no_uncommitted_rows_update(int); /* Ordered index statistics v4 */ - int ndb_index_stat_query(uint inx, - const key_range *min_key, - const key_range *max_key, - NdbIndexStat::Stat& stat, + int ndb_index_stat_query(uint inx, const key_range *min_key, + const key_range *max_key, NdbIndexStat::Stat &stat, int from); - int ndb_index_stat_get_rir(uint inx, - key_range *min_key, - key_range *max_key, + int ndb_index_stat_get_rir(uint inx, key_range *min_key, key_range *max_key, ha_rows *rows_out); int ndb_index_stat_set_rpk(uint inx); - int ndb_index_stat_analyze(uint *inx_list, - uint inx_count); + int ndb_index_stat_analyze(uint *inx_list, uint inx_count); NdbTransaction *start_transaction_part_id(uint32 part_id, int &error); - inline NdbTransaction *get_transaction_part_id(uint32 part_id, int &error) - { - if (m_thd_ndb->trans) - return m_thd_ndb->trans; + inline NdbTransaction *get_transaction_part_id(uint32 part_id, int &error) { + if (m_thd_ndb->trans) return m_thd_ndb->trans; return start_transaction_part_id(part_id, error); } NdbTransaction *start_transaction(int &error); - inline NdbTransaction *get_transaction(int &error) - { - if (m_thd_ndb->trans) - return m_thd_ndb->trans; + inline NdbTransaction *get_transaction(int &error) { + if (m_thd_ndb->trans) return m_thd_ndb->trans; return start_transaction(error); } NdbTransaction *start_transaction_row(const NdbRecord *ndb_record, - const uchar *record, - int &error); - NdbTransaction *start_transaction_key(uint index, - const uchar *key_data, + const uchar *record, int &error); + NdbTransaction *start_transaction_key(uint index, const uchar *key_data, int &error); - friend int check_completed_operations_pre_commit(Thd_ndb*, - NdbTransaction*, - const NdbOperation*, + friend int check_completed_operations_pre_commit(Thd_ndb *, NdbTransaction *, + const NdbOperation *, uint *ignore_count); - friend int ndbcluster_commit(handlerton*, THD *thd, bool all); + friend int ndbcluster_commit(handlerton *, THD *thd, bool all); int start_statement(THD *thd, Thd_ndb *thd_ndb, uint table_count); int init_handler_for_statement(THD *thd); /* Implementing Partition_handler API. */ - Partition_handler *get_partition_handler() override - { return static_cast(this); } + Partition_handler *get_partition_handler() override { + return static_cast(this); + } uint alter_flags(uint flags) const override; void get_dynamic_partition_info(ha_statistics *stat_info, - ha_checksum *checksum, - uint part_id) override; + ha_checksum *checksum, uint part_id) override; int get_default_num_partitions(HA_CREATE_INFO *info) override; bool get_num_parts(const char *name, uint *num_parts) override; void set_auto_partitions(partition_info *part_info) override; void set_part_info(partition_info *part_info, bool early) override; /* End of Partition_handler API */ - Ndb_table_map* m_table_map; + Ndb_table_map *m_table_map; Thd_ndb *m_thd_ndb; NdbScanOperation *m_active_cursor; const NdbDictionary::Table *m_table; @@ -702,32 +658,33 @@ class ha_ndbcluster: public handler, public Partition_handler /* Bitmap used for NdbRecord operation column mask. */ MY_BITMAP m_bitmap; - my_bitmap_map m_bitmap_buf[(NDB_MAX_ATTRIBUTES_IN_TABLE + - 8*sizeof(my_bitmap_map) - 1) / - (8*sizeof(my_bitmap_map))]; // Buffer for m_bitmap + my_bitmap_map + m_bitmap_buf[(NDB_MAX_ATTRIBUTES_IN_TABLE + 8 * sizeof(my_bitmap_map) - + 1) / + (8 * sizeof(my_bitmap_map))]; // Buffer for m_bitmap /* Bitmap with bit set for all primary key columns. */ MY_BITMAP *m_pk_bitmap_p; - my_bitmap_map m_pk_bitmap_buf[(NDB_MAX_ATTRIBUTES_IN_TABLE + - 8*sizeof(my_bitmap_map) - 1) / - (8*sizeof(my_bitmap_map))]; // Buffer for m_pk_bitmap + my_bitmap_map + m_pk_bitmap_buf[(NDB_MAX_ATTRIBUTES_IN_TABLE + 8 * sizeof(my_bitmap_map) - + 1) / + (8 * sizeof(my_bitmap_map))]; // Buffer for m_pk_bitmap struct Ndb_local_table_statistics *m_table_info; struct Ndb_local_table_statistics m_table_info_instance; char m_dbname[FN_HEADLEN]; - //char m_schemaname[FN_HEADLEN]; + // char m_schemaname[FN_HEADLEN]; char m_tabname[FN_HEADLEN]; THR_LOCK_DATA m_lock; bool m_lock_tuple; NDB_SHARE *m_share; - NDB_INDEX_DATA m_index[MAX_KEY]; - static const size_t fk_root_block_size= 1024; + NDB_INDEX_DATA m_index[MAX_KEY]; + static const size_t fk_root_block_size = 1024; MEM_ROOT m_fk_mem_root; struct Ndb_fk_data *m_fk_data; /* Pointer to row returned from scan nextResult(). */ - union - { + union { const char *_m_next_row; const uchar *m_next_row; }; @@ -752,7 +709,8 @@ class ha_ndbcluster: public handler, public Partition_handler bool m_read_before_write_removal_used; ha_rows m_rows_updated; ha_rows m_rows_deleted; - ha_rows m_rows_to_insert; // TODO: merge it with handler::estimation_rows_to_insert? + ha_rows m_rows_to_insert; // TODO: merge it with + // handler::estimation_rows_to_insert? ha_rows m_rows_inserted; bool m_delete_cannot_batch; bool m_update_cannot_batch; @@ -767,7 +725,7 @@ class ha_ndbcluster: public handler, public Partition_handler uint m_blob_expected_count_per_row; uchar *m_blob_destination_record; Uint64 m_blobs_row_total_size; /* Bytes needed for all blobs in current row */ - + // memory for blobs in one tuple uchar *m_blobs_buffer; Uint64 m_blobs_buffer_size; @@ -777,13 +735,13 @@ class ha_ndbcluster: public handler, public Partition_handler // Joins pushed to NDB. const ndb_pushed_join - *m_pushed_join_member; // Pushed join def. I am member of - int m_pushed_join_operation; // Op. id. in above pushed join - static const int PUSHED_ROOT= 0; // Op. id. if I'm root + *m_pushed_join_member; // Pushed join def. I am member of + int m_pushed_join_operation; // Op. id. in above pushed join + static const int PUSHED_ROOT = 0; // Op. id. if I'm root - bool m_disable_pushed_join; // Pushed execution allowed? - NdbQuery* m_active_query; // Pushed query instance executing - NdbQueryOperation* m_pushed_operation; // Pushed operation instance + bool m_disable_pushed_join; // Pushed execution allowed? + NdbQuery *m_active_query; // Pushed query instance executing + NdbQueryOperation *m_pushed_operation; // Pushed operation instance /* In case we failed to push a 'pushed_cond', the handler will evaluate it */ ha_ndbcluster_cond m_cond; @@ -792,20 +750,18 @@ class ha_ndbcluster: public handler, public Partition_handler NdbIndexScanOperation *m_multi_cursor; Ndb *get_ndb(THD *thd) const; - int update_stats(THD *thd, bool do_read_stat, - uint part_id= ~(uint)0); - int add_handler_to_open_tables(THD*, Thd_ndb*, ha_ndbcluster* handler); + int update_stats(THD *thd, bool do_read_stat, uint part_id = ~(uint)0); + int add_handler_to_open_tables(THD *, Thd_ndb *, ha_ndbcluster *handler); }; // Global handler synchronization extern mysql_mutex_t ndbcluster_mutex; -extern mysql_cond_t ndbcluster_cond; +extern mysql_cond_t ndbcluster_cond; extern int ndb_setup_complete; static const int NDB_INVALID_SCHEMA_OBJECT = 241; - int ndb_to_mysql_error(const NdbError *ndberr); #endif diff --git a/storage/ndb/plugin/ha_ndbcluster_binlog.cc b/storage/ndb/plugin/ha_ndbcluster_binlog.cc index 95e302a1a9a8..a6b6fc5ed909 100644 --- a/storage/ndb/plugin/ha_ndbcluster_binlog.cc +++ b/storage/ndb/plugin/ha_ndbcluster_binlog.cc @@ -32,11 +32,11 @@ #include "mysql/plugin.h" #include "sql/auth/acl_change_notification.h" #include "sql/binlog.h" -#include "sql/dd/types/abstract_table.h" // dd::enum_table_type -#include "sql/dd/types/tablespace.h" // dd::Tablespace -#include "sql/derror.h" // ER_THD -#include "sql/mysqld.h" // opt_bin_log -#include "sql/mysqld_thd_manager.h" // Global_THD_manager +#include "sql/dd/types/abstract_table.h" // dd::enum_table_type +#include "sql/dd/types/tablespace.h" // dd::Tablespace +#include "sql/derror.h" // ER_THD +#include "sql/mysqld.h" // opt_bin_log +#include "sql/mysqld_thd_manager.h" // Global_THD_manager #include "sql/protocol_classic.h" #include "sql/rpl_injector.h" #include "sql/rpl_slave.h" @@ -106,45 +106,42 @@ void ndb_index_stat_restart(); #include "storage/ndb/plugin/ndb_schema_dist.h" #include "storage/ndb/plugin/ndb_schema_object.h" -extern Ndb_cluster_connection* g_ndb_cluster_connection; +extern Ndb_cluster_connection *g_ndb_cluster_connection; /* Timeout for syncing schema events between mysql servers, and between mysql server and the binlog */ -static const int DEFAULT_SYNC_TIMEOUT= 120; +static const int DEFAULT_SYNC_TIMEOUT = 120; /* Column numbers in the ndb_binlog_index table */ -enum Ndb_binlog_index_cols -{ - NBICOL_START_POS = 0 - ,NBICOL_START_FILE = 1 - ,NBICOL_EPOCH = 2 - ,NBICOL_NUM_INSERTS = 3 - ,NBICOL_NUM_UPDATES = 4 - ,NBICOL_NUM_DELETES = 5 - ,NBICOL_NUM_SCHEMAOPS = 6 +enum Ndb_binlog_index_cols { + NBICOL_START_POS = 0, + NBICOL_START_FILE = 1, + NBICOL_EPOCH = 2, + NBICOL_NUM_INSERTS = 3, + NBICOL_NUM_UPDATES = 4, + NBICOL_NUM_DELETES = 5, + NBICOL_NUM_SCHEMAOPS = 6 /* Following colums in schema 'v2' */ - ,NBICOL_ORIG_SERVERID = 7 - ,NBICOL_ORIG_EPOCH = 8 - ,NBICOL_GCI = 9 + , + NBICOL_ORIG_SERVERID = 7, + NBICOL_ORIG_EPOCH = 8, + NBICOL_GCI = 9 /* Following columns in schema 'v3' */ - ,NBICOL_NEXT_POS = 10 - ,NBICOL_NEXT_FILE = 11 + , + NBICOL_NEXT_POS = 10, + NBICOL_NEXT_FILE = 11 }; -class Mutex_guard -{ -public: - Mutex_guard(mysql_mutex_t &mutex) : m_mutex(mutex) - { +class Mutex_guard { + public: + Mutex_guard(mysql_mutex_t &mutex) : m_mutex(mutex) { mysql_mutex_lock(&m_mutex); } - ~Mutex_guard() - { - mysql_mutex_unlock(&m_mutex); - } -private: + ~Mutex_guard() { mysql_mutex_unlock(&m_mutex); } + + private: mysql_mutex_t &m_mutex; }; @@ -157,14 +154,14 @@ class Mutex_guard and concurrent create and drop of events from client threads. It also protects injector_ndb and schema_ndb which are the Ndb objects used for the above create/drop/pollEvents() - Rational for splitting these into two separate mutexes, is that + Rational for splitting these into two separate mutexes, is that the injector_event_mutex is held for 10ms across pollEvents(). That could (almost) block access to the shared binlog injector data, like ndb_binlog_is_read_only(). */ static mysql_mutex_t injector_event_mutex; static mysql_mutex_t injector_data_mutex; -static mysql_cond_t injector_data_cond; +static mysql_cond_t injector_data_cond; /* NOTE: @@ -184,27 +181,23 @@ static mysql_cond_t injector_data_cond; and when such changes are received, they will be written to the binary log */ -bool ndb_binlog_running= false; - -static bool ndb_binlog_tables_inited= false; //injector_data_mutex, relaxed -static bool ndb_binlog_is_ready= false; //injector_data_mutex, relaxed - -bool -ndb_binlog_is_read_only(void) -{ +bool ndb_binlog_running = false; + +static bool ndb_binlog_tables_inited = false; // injector_data_mutex, relaxed +static bool ndb_binlog_is_ready = false; // injector_data_mutex, relaxed + +bool ndb_binlog_is_read_only(void) { /* - Could be called from any client thread. Need a mutex to + Could be called from any client thread. Need a mutex to protect ndb_binlog_tables_inited and ndb_binlog_is_ready. */ Mutex_guard injector_g(injector_data_mutex); - if (!ndb_binlog_tables_inited) - { + if (!ndb_binlog_tables_inited) { /* the ndb_* system tables not setup yet */ return true; } - if (ndb_binlog_running && !ndb_binlog_is_ready) - { + if (ndb_binlog_running && !ndb_binlog_is_ready) { /* The binlog thread is supposed to write to binlog but not ready (still initializing or has lost connection) @@ -214,7 +207,7 @@ ndb_binlog_is_read_only(void) return false; } -static THD *injector_thd= NULL; +static THD *injector_thd = NULL; /* Global reference to ndb injector thd object. @@ -226,39 +219,34 @@ static THD *injector_thd= NULL; Must therefore always be used with a surrounding mysql_mutex_lock(&injector_event_mutex), when create/dropEventOperation */ -static Ndb *injector_ndb= NULL; //Need injector_event_mutex -static Ndb *schema_ndb= NULL; //Need injector_event_mutex +static Ndb *injector_ndb = NULL; // Need injector_event_mutex +static Ndb *schema_ndb = NULL; // Need injector_event_mutex -static int ndbcluster_binlog_inited= 0; +static int ndbcluster_binlog_inited = 0; /* NDB Injector thread (used for binlog creation) */ -static ulonglong ndb_latest_applied_binlog_epoch= 0; -static ulonglong ndb_latest_handled_binlog_epoch= 0; -static ulonglong ndb_latest_received_binlog_epoch= 0; +static ulonglong ndb_latest_applied_binlog_epoch = 0; +static ulonglong ndb_latest_handled_binlog_epoch = 0; +static ulonglong ndb_latest_received_binlog_epoch = 0; -NDB_SHARE *ndb_apply_status_share= NULL; +NDB_SHARE *ndb_apply_status_share = NULL; extern bool opt_log_slave_updates; static bool g_ndb_log_slave_updates; static bool g_injector_v1_warning_emitted = false; -bool -Ndb_binlog_client::create_event_data(NDB_SHARE *share, - const dd::Table *table_def, - Ndb_event_data **event_data) const -{ +bool Ndb_binlog_client::create_event_data(NDB_SHARE *share, + const dd::Table *table_def, + Ndb_event_data **event_data) const { DBUG_ENTER("Ndb_binlog_client::create_event_data"); DBUG_ASSERT(table_def); DBUG_ASSERT(event_data); - Ndb_event_data* new_event_data = - Ndb_event_data::create_event_data(m_thd, share, - share->db, share->table_name, - share->key_string(), injector_thd, - table_def); - if (!new_event_data) - DBUG_RETURN(false); + Ndb_event_data *new_event_data = Ndb_event_data::create_event_data( + m_thd, share, share->db, share->table_name, share->key_string(), + injector_thd, table_def); + if (!new_event_data) DBUG_RETURN(false); // Return the newly created event_data to caller *event_data = new_event_data; @@ -266,84 +254,69 @@ Ndb_binlog_client::create_event_data(NDB_SHARE *share, DBUG_RETURN(true); } - -static int -get_ndb_blobs_value(TABLE* table, NdbValue* value_array, - uchar*& buffer, uint& buffer_size, - ptrdiff_t ptrdiff) -{ +static int get_ndb_blobs_value(TABLE *table, NdbValue *value_array, + uchar *&buffer, uint &buffer_size, + ptrdiff_t ptrdiff) { DBUG_ENTER("get_ndb_blobs_value"); // Field has no field number so cannot use TABLE blob_field // Loop twice, first only counting total buffer size - for (int loop= 0; loop <= 1; loop++) - { - uint32 offset= 0; - for (uint i= 0; i < table->s->fields; i++) - { - Field *field= table->field[i]; - NdbValue value= value_array[i]; - if (! (field->flags & BLOB_FLAG && field->stored_in_db)) - continue; - if (value.blob == NULL) - { - DBUG_PRINT("info",("[%u] skipped", i)); + for (int loop = 0; loop <= 1; loop++) { + uint32 offset = 0; + for (uint i = 0; i < table->s->fields; i++) { + Field *field = table->field[i]; + NdbValue value = value_array[i]; + if (!(field->flags & BLOB_FLAG && field->stored_in_db)) continue; + if (value.blob == NULL) { + DBUG_PRINT("info", ("[%u] skipped", i)); continue; } - Field_blob *field_blob= (Field_blob *)field; - NdbBlob *ndb_blob= value.blob; + Field_blob *field_blob = (Field_blob *)field; + NdbBlob *ndb_blob = value.blob; int isNull; - if (ndb_blob->getNull(isNull) != 0) - DBUG_RETURN(-1); + if (ndb_blob->getNull(isNull) != 0) DBUG_RETURN(-1); if (isNull == 0) { - Uint64 len64= 0; - if (ndb_blob->getLength(len64) != 0) - DBUG_RETURN(-1); + Uint64 len64 = 0; + if (ndb_blob->getLength(len64) != 0) DBUG_RETURN(-1); // Align to Uint64 - uint32 size= Uint32(len64); - if (size % 8 != 0) - size+= 8 - size % 8; - if (loop == 1) - { - uchar *buf= buffer + offset; - uint32 len= buffer_size - offset; // Size of buf - if (ndb_blob->readData(buf, len) != 0) - DBUG_RETURN(-1); - DBUG_PRINT("info", ("[%u] offset: %u buf: 0x%lx len=%u [ptrdiff=%d]", - i, offset, (long) buf, len, (int)ptrdiff)); + uint32 size = Uint32(len64); + if (size % 8 != 0) size += 8 - size % 8; + if (loop == 1) { + uchar *buf = buffer + offset; + uint32 len = buffer_size - offset; // Size of buf + if (ndb_blob->readData(buf, len) != 0) DBUG_RETURN(-1); + DBUG_PRINT("info", + ("[%u] offset: %u buf: 0x%lx len=%u [ptrdiff=%d]", i, + offset, (long)buf, len, (int)ptrdiff)); DBUG_ASSERT(len == len64); // Ugly hack assumes only ptr needs to be changed field_blob->set_ptr_offset(ptrdiff, len, buf); } - offset+= size; - } - else if (loop == 1) // undefined or null + offset += size; + } else if (loop == 1) // undefined or null { // have to set length even in this case - uchar *buf= buffer + offset; // or maybe NULL - uint32 len= 0; + uchar *buf = buffer + offset; // or maybe NULL + uint32 len = 0; field_blob->set_ptr_offset(ptrdiff, len, buf); DBUG_PRINT("info", ("[%u] isNull=%d", i, isNull)); } } - if (loop == 0 && offset > buffer_size) - { + if (loop == 0 && offset > buffer_size) { my_free(buffer); - buffer_size= 0; + buffer_size = 0; DBUG_PRINT("info", ("allocate blobs buffer size %u", offset)); - buffer= (uchar*) my_malloc(PSI_INSTRUMENT_ME, offset, MYF(MY_WME)); - if (buffer == NULL) - { + buffer = (uchar *)my_malloc(PSI_INSTRUMENT_ME, offset, MYF(MY_WME)); + if (buffer == NULL) { ndb_log_error("get_ndb_blobs_value, my_malloc(%u) failed", offset); DBUG_RETURN(-1); } - buffer_size= offset; + buffer_size = offset; } } DBUG_RETURN(0); } - /* @brief Wait until the last committed epoch from the session enters the binlog. Wait a maximum of 30 seconds. This wait is necessary in @@ -351,12 +324,10 @@ get_ndb_blobs_value(TABLE* table, NdbValue* value_array, in RESET MASTER before clearing ndbcluster's binlog index. @param thd Thread handle to wait for its changes to enter the binlog. */ -static void ndbcluster_binlog_wait(THD *thd) -{ +static void ndbcluster_binlog_wait(THD *thd) { DBUG_ENTER("ndbcluster_binlog_wait"); - if (!ndb_binlog_running) - { + if (!ndb_binlog_running) { DBUG_PRINT("exit", ("Not writing binlog -> nothing to wait for")); DBUG_VOID_RETURN; } @@ -366,16 +337,14 @@ static void ndbcluster_binlog_wait(THD *thd) thd_sql_command(thd) == SQLCOM_FLUSH || thd_sql_command(thd) == SQLCOM_RESET); - if (thd->system_thread == SYSTEM_THREAD_NDBCLUSTER_BINLOG) - { + if (thd->system_thread == SYSTEM_THREAD_NDBCLUSTER_BINLOG) { // Binlog Injector thread should not wait for itself DBUG_PRINT("exit", ("binlog injector should not wait for itself")); DBUG_VOID_RETURN; } Thd_ndb *thd_ndb = get_thd_ndb(thd); - if (!thd_ndb) - { + if (!thd_ndb) { // Thread has not used NDB before, no need for waiting DBUG_PRINT("exit", ("Thread has not used NDB, nothing to wait for")); DBUG_VOID_RETURN; @@ -393,13 +362,12 @@ static void ndbcluster_binlog_wait(THD *thd) // Wait until the last committed epoch from the session enters Binlog. // Break any possible deadlock after 30s. - int count = 30; // seconds + int count = 30; // seconds mysql_mutex_lock(&injector_data_mutex); const Uint64 start_handled_epoch = ndb_latest_handled_binlog_epoch; while (!thd->killed && count && ndb_binlog_running && (ndb_latest_handled_binlog_epoch == 0 || - ndb_latest_handled_binlog_epoch < session_last_committed_epoch)) - { + ndb_latest_handled_binlog_epoch < session_last_committed_epoch)) { count--; struct timespec abstime; set_timespec(&abstime, 1); @@ -407,17 +375,17 @@ static void ndbcluster_binlog_wait(THD *thd) } mysql_mutex_unlock(&injector_data_mutex); - if (count == 0) - { - ndb_log_warning("Thread id %u timed out (30s) waiting for epoch %u/%u " - "to be handled. Progress : %u/%u -> %u/%u.", - thd->thread_id(), - Uint32((session_last_committed_epoch >> 32) & 0xffffffff), - Uint32(session_last_committed_epoch & 0xffffffff), - Uint32((start_handled_epoch >> 32) & 0xffffffff), - Uint32(start_handled_epoch & 0xffffffff), - Uint32((ndb_latest_handled_binlog_epoch >> 32) & 0xffffffff), - Uint32(ndb_latest_handled_binlog_epoch & 0xffffffff)); + if (count == 0) { + ndb_log_warning( + "Thread id %u timed out (30s) waiting for epoch %u/%u " + "to be handled. Progress : %u/%u -> %u/%u.", + thd->thread_id(), + Uint32((session_last_committed_epoch >> 32) & 0xffffffff), + Uint32(session_last_committed_epoch & 0xffffffff), + Uint32((start_handled_epoch >> 32) & 0xffffffff), + Uint32(start_handled_epoch & 0xffffffff), + Uint32((ndb_latest_handled_binlog_epoch >> 32) & 0xffffffff), + Uint32(ndb_latest_handled_binlog_epoch & 0xffffffff)); // Fail on wait/deadlock timeout in debug compile DBUG_ASSERT(false); @@ -431,33 +399,29 @@ static void ndbcluster_binlog_wait(THD *thd) Setup THD object 'Inspired' from ha_ndbcluster.cc : ndb_util_thread_func */ -THD * -ndb_create_thd(char * stackptr) -{ +THD *ndb_create_thd(char *stackptr) { DBUG_ENTER("ndb_create_thd"); - THD * thd= new THD; /* note that constructor of THD uses DBUG_ */ - if (thd == 0) - { + THD *thd = new THD; /* note that constructor of THD uses DBUG_ */ + if (thd == 0) { DBUG_RETURN(0); } THD_CHECK_SENTRY(thd); - thd->thread_stack= stackptr; /* remember where our stack is */ + thd->thread_stack = stackptr; /* remember where our stack is */ thd->store_globals(); thd->init_query_mem_roots(); thd->set_command(COM_DAEMON); - thd->system_thread= SYSTEM_THREAD_NDBCLUSTER_BINLOG; + thd->system_thread = SYSTEM_THREAD_NDBCLUSTER_BINLOG; thd->get_protocol_classic()->set_client_capabilities(0); - thd->lex->start_transaction_opt= 0; + thd->lex->start_transaction_opt = 0; thd->security_context()->skip_grants(); - CHARSET_INFO *charset_connection= get_charset_by_csname("utf8", - MY_CS_PRIMARY, - MYF(MY_WME)); - thd->variables.character_set_client= charset_connection; - thd->variables.character_set_results= charset_connection; - thd->variables.collation_connection= charset_connection; + CHARSET_INFO *charset_connection = + get_charset_by_csname("utf8", MY_CS_PRIMARY, MYF(MY_WME)); + thd->variables.character_set_client = charset_connection; + thd->variables.character_set_results = charset_connection; + thd->variables.collation_connection = charset_connection; thd->update_charset(); DBUG_RETURN(thd); } @@ -516,7 +480,7 @@ static int ndbcluster_binlog_index_purge_file(THD *thd, const char *filename) { if (ndbcluster_binlog_index_remove_file(tmp_thd, filename)) { // Failed to delete rows from table ndb_log_warning("NDB Binlog: Failed to purge: '%s'", filename); - error = 1; // Failed + error = 1; // Failed } delete tmp_thd; @@ -537,12 +501,10 @@ static int ndbcluster_binlog_index_purge_file(THD *thd, const char *filename) { -- privilege tables have been modified */ -static void -ndbcluster_binlog_log_query(handlerton*, THD *thd, - enum_binlog_command binlog_command, - const char *query, uint query_length, - const char *db, const char*) -{ +static void ndbcluster_binlog_log_query(handlerton *, THD *thd, + enum_binlog_command binlog_command, + const char *query, uint query_length, + const char *db, const char *) { DBUG_ENTER("ndbcluster_binlog_log_query"); DBUG_PRINT("enter", ("binlog_command: %d, db: '%s', query: '%s'", binlog_command, db, query)); @@ -553,8 +515,7 @@ ndbcluster_binlog_log_query(handlerton*, THD *thd, Ndb_schema_dist_client schema_dist_client(thd); - if (!schema_dist_client.prepare(db, "")) - { + if (!schema_dist_client.prepare(db, "")) { // Could not prepare the schema distribution client // NOTE! As there is no way return error, this may have to be // revisited, the prepare should be done @@ -566,8 +527,8 @@ ndbcluster_binlog_log_query(handlerton*, THD *thd, unsigned int id = schema_dist_client.unique_id(); unsigned int version = schema_dist_client.unique_version(); - const bool result = schema_dist_client.create_db(query, query_length, db, - id, version); + const bool result = + schema_dist_client.create_db(query, query_length, db, id, version); if (result) { // Update the schema with the generated id and version but skip // committing the change in DD. Commit will be done by the caller. @@ -585,8 +546,7 @@ ndbcluster_binlog_log_query(handlerton*, THD *thd, Ndb_schema_dist_client schema_dist_client(thd); - if (!schema_dist_client.prepare(db, "")) - { + if (!schema_dist_client.prepare(db, "")) { // Could not prepare the schema distribution client // NOTE! As there is no way return error, this may have to be // revisited, the prepare should be done @@ -598,8 +558,8 @@ ndbcluster_binlog_log_query(handlerton*, THD *thd, unsigned int id = schema_dist_client.unique_id(); unsigned int version = schema_dist_client.unique_version(); - const bool result = schema_dist_client.alter_db(query, query_length, db, - id, version); + const bool result = + schema_dist_client.alter_db(query, query_length, db, id, version); if (result) { // Update the schema with the generated id and version but skip // committing the change in DD. Commit will be done by the caller. @@ -625,14 +585,12 @@ ndbcluster_binlog_log_query(handlerton*, THD *thd, DBUG_VOID_RETURN; } -static void -ndbcluster_acl_notify(THD *thd, const Acl_change_notification * notice) -{ +static void ndbcluster_acl_notify(THD *thd, + const Acl_change_notification *notice) { DBUG_TRACE; const std::string &query = notice->get_query(); - if(! check_ndb_in_thd(thd)) - { + if (!check_ndb_in_thd(thd)) { ndb_log_error("Privilege distribution failed to seize thd_ndb"); return; } @@ -640,46 +598,41 @@ ndbcluster_acl_notify(THD *thd, const Acl_change_notification * notice) /* If this is the binlog thread, the ACL change has arrived via schema distribution and requires no further action. */ - if(get_thd_ndb(thd)->check_option(Thd_ndb::NO_LOG_SCHEMA_OP)) - { + if (get_thd_ndb(thd)->check_option(Thd_ndb::NO_LOG_SCHEMA_OP)) { return; } { ndb_log_verbose(9, "ACL considering: %s", query.c_str()); std::string user_list; - bool dist_use_db = false; // Prepend "use [db];" to statement - bool dist_refresh = false; // All participants must refresh their caches + bool dist_use_db = false; // Prepend "use [db];" to statement + bool dist_refresh = false; // All participants must refresh their caches Ndb_stored_grants::Strategy strategy = - Ndb_stored_grants::handle_local_acl_change(thd, notice, &user_list, - &dist_use_db, &dist_refresh); + Ndb_stored_grants::handle_local_acl_change(thd, notice, &user_list, + &dist_use_db, &dist_refresh); Ndb_schema_dist_client schema_dist_client(thd); - if(strategy == Ndb_stored_grants::Strategy::ERROR) - { + if (strategy == Ndb_stored_grants::Strategy::ERROR) { ndb_log_error("Not distributing ACL change after error."); return; } - if(strategy == Ndb_stored_grants::Strategy::NONE) - { + if (strategy == Ndb_stored_grants::Strategy::NONE) { ndb_log_verbose(9, "ACL change distribution: NONE"); return; } - const unsigned int & node_id = g_ndb_cluster_connection->node_id(); - if (! schema_dist_client.prepare_acl_change(node_id)) - { + const unsigned int &node_id = g_ndb_cluster_connection->node_id(); + if (!schema_dist_client.prepare_acl_change(node_id)) { ndb_log_error("Failed to distribute '%s' (Failed prepare)", query.c_str()); return; } - if(strategy == Ndb_stored_grants::Strategy::SNAPSHOT) - { + if (strategy == Ndb_stored_grants::Strategy::SNAPSHOT) { ndb_log_verbose(9, "ACL change distribution: SNAPSHOT"); - if(! schema_dist_client.acl_notify(user_list)) + if (!schema_dist_client.acl_notify(user_list)) ndb_log_error("Failed to distribute '%s' (SNAPSHOT)", query.c_str()); return; } @@ -698,13 +651,11 @@ ndbcluster_acl_notify(THD *thd, const Acl_change_notification * notice) - wait for binlog thread to shutdown */ -int ndbcluster_binlog_end() -{ +int ndbcluster_binlog_end() { DBUG_ENTER("ndbcluster_binlog_end"); - if (ndbcluster_binlog_inited) - { - ndbcluster_binlog_inited= 0; + if (ndbcluster_binlog_inited) { + ndbcluster_binlog_inited = 0; ndb_binlog_thread.stop(); ndb_binlog_thread.deinit(); @@ -720,10 +671,8 @@ int ndbcluster_binlog_end() /***************************************************************** functions called from slave sql client threads ****************************************************************/ -static void ndbcluster_reset_slave(THD *thd) -{ - if (!ndb_binlog_running) - return; +static void ndbcluster_reset_slave(THD *thd) { + if (!ndb_binlog_running) return; DBUG_ENTER("ndbcluster_reset_slave"); @@ -732,8 +681,7 @@ static void ndbcluster_reset_slave(THD *thd) - if table does not exist ignore the error as it is a consistent behavior */ - if (opt_ndb_clear_apply_status) - { + if (opt_ndb_clear_apply_status) { Ndb_local_connection mysqld(thd); const bool ignore_no_such_table = true; if (mysqld.delete_rows(Ndb_apply_status_table::DB_NAME, @@ -749,141 +697,121 @@ static void ndbcluster_reset_slave(THD *thd) DBUG_VOID_RETURN; } - -static int ndbcluster_binlog_func(handlerton*, THD *thd, - enum_binlog_func fn, - void *arg) -{ +static int ndbcluster_binlog_func(handlerton *, THD *thd, enum_binlog_func fn, + void *arg) { DBUG_ENTER("ndbcluster_binlog_func"); - int res= 0; - switch(fn) - { - case BFN_RESET_LOGS: - break; - case BFN_RESET_SLAVE: - ndbcluster_reset_slave(thd); - break; - case BFN_BINLOG_WAIT: - ndbcluster_binlog_wait(thd); - break; - case BFN_BINLOG_END: - res= ndbcluster_binlog_end(); - break; - case BFN_BINLOG_PURGE_FILE: - res= ndbcluster_binlog_index_purge_file(thd, (const char *)arg); - break; + int res = 0; + switch (fn) { + case BFN_RESET_LOGS: + break; + case BFN_RESET_SLAVE: + ndbcluster_reset_slave(thd); + break; + case BFN_BINLOG_WAIT: + ndbcluster_binlog_wait(thd); + break; + case BFN_BINLOG_END: + res = ndbcluster_binlog_end(); + break; + case BFN_BINLOG_PURGE_FILE: + res = ndbcluster_binlog_index_purge_file(thd, (const char *)arg); + break; } DBUG_RETURN(res); } -void ndbcluster_binlog_init(handlerton* h) -{ - h->binlog_func= ndbcluster_binlog_func; - h->binlog_log_query= ndbcluster_binlog_log_query; - h->acl_notify= ndbcluster_acl_notify; +void ndbcluster_binlog_init(handlerton *h) { + h->binlog_func = ndbcluster_binlog_func; + h->binlog_log_query = ndbcluster_binlog_log_query; + h->acl_notify = ndbcluster_acl_notify; } - /* ndb_notify_tables_writable - + Called to notify any waiting threads that Ndb tables are now writable -*/ -static void ndb_notify_tables_writable() -{ +*/ +static void ndb_notify_tables_writable() { mysql_mutex_lock(&ndbcluster_mutex); - ndb_setup_complete= 1; + ndb_setup_complete = 1; mysql_cond_broadcast(&ndbcluster_cond); mysql_mutex_unlock(&ndbcluster_mutex); } - -static bool -migrate_table_with_old_extra_metadata(THD *thd, Ndb *ndb, - const char *schema_name, - const char *table_name, - void* unpacked_data, - Uint32 unpacked_len, - bool force_overwrite) -{ +static bool migrate_table_with_old_extra_metadata( + THD *thd, Ndb *ndb, const char *schema_name, const char *table_name, + void *unpacked_data, Uint32 unpacked_len, bool force_overwrite) { #ifndef BUG27543602 // Temporary workaround for Bug 27543602 if (strcmp("mysql", schema_name) == 0 && (strcmp("ndb_index_stat_head", table_name) == 0 || - strcmp("ndb_index_stat_sample", table_name) == 0)) - { - ndb_log_info("Skipped installation of the ndb_index_stat table '%s.%s'. " - "The table can still be accessed using NDB tools", - schema_name, table_name); + strcmp("ndb_index_stat_sample", table_name) == 0)) { + ndb_log_info( + "Skipped installation of the ndb_index_stat table '%s.%s'. " + "The table can still be accessed using NDB tools", + schema_name, table_name); return true; } #endif // Migrate tables that have old metadata to data dictionary // using on the fly translation - ndb_log_info("Table '%s.%s' has obsolete extra metadata. " - "The table is installed into the data dictionary " - "by translating the old metadata", schema_name, - table_name); + ndb_log_info( + "Table '%s.%s' has obsolete extra metadata. " + "The table is installed into the data dictionary " + "by translating the old metadata", + schema_name, table_name); - const uchar* frm_data = static_cast(unpacked_data); + const uchar *frm_data = static_cast(unpacked_data); // Install table in DD Ndb_dd_client dd_client(thd); // First acquire exclusive MDL lock on schema and table - if (!dd_client.mdl_locks_acquire_exclusive(schema_name, table_name)) - { - ndb_log_error("Failed to acquire MDL lock on table '%s.%s'", - schema_name, table_name); + if (!dd_client.mdl_locks_acquire_exclusive(schema_name, table_name)) { + ndb_log_error("Failed to acquire MDL lock on table '%s.%s'", schema_name, + table_name); return false; } - const bool migrate_result= - dd_client.migrate_table(schema_name, table_name, frm_data, - unpacked_len, force_overwrite); + const bool migrate_result = dd_client.migrate_table( + schema_name, table_name, frm_data, unpacked_len, force_overwrite); - if (!migrate_result) - { + if (!migrate_result) { // Failed to create DD entry for table - ndb_log_error("Failed to create entry in DD for table '%s.%s'", - schema_name, table_name); + ndb_log_error("Failed to create entry in DD for table '%s.%s'", schema_name, + table_name); return false; } // Check if table need to be setup for binlogging or // schema distribution - const dd::Table* table_def; + const dd::Table *table_def; // Acquire MDL lock on table - if (!dd_client.mdl_lock_table(schema_name, table_name)) - { - ndb_log_error("Failed to acquire MDL lock for table '%s.%s'", - schema_name, table_name); + if (!dd_client.mdl_lock_table(schema_name, table_name)) { + ndb_log_error("Failed to acquire MDL lock for table '%s.%s'", schema_name, + table_name); return false; } - if (!dd_client.get_table(schema_name, table_name, &table_def)) - { - ndb_log_error("Failed to open table '%s.%s' from DD", - schema_name, table_name); + if (!dd_client.get_table(schema_name, table_name, &table_def)) { + ndb_log_error("Failed to open table '%s.%s' from DD", schema_name, + table_name); return false; } - if (ndbcluster_binlog_setup_table(thd, ndb, - schema_name, table_name, - table_def) != 0) - { - ndb_log_error("Failed to setup binlog for table '%s.%s'", - schema_name, table_name); + if (ndbcluster_binlog_setup_table(thd, ndb, schema_name, table_name, + table_def) != 0) { + ndb_log_error("Failed to setup binlog for table '%s.%s'", schema_name, + table_name); return false; } return true; } - /** Utility class encapsulating the code which setup the 'ndb binlog thread' to be "connected" to the cluster. @@ -896,14 +824,13 @@ migrate_table_with_old_extra_metadata(THD *thd, Ndb *ndb, */ class Ndb_binlog_setup { - - THD* const m_thd; + THD *const m_thd; // Enum defining database ddl types enum Ndb_schema_ddl_type : unsigned short { - SCHEMA_DDL_CREATE= 0, - SCHEMA_DDL_ALTER= 1, - SCHEMA_DDL_DROP= 2 + SCHEMA_DDL_CREATE = 0, + SCHEMA_DDL_ALTER = 1, + SCHEMA_DDL_DROP = 2 }; // A tuple to hold the values read from ndb_schema table @@ -1262,40 +1189,30 @@ class Ndb_binlog_setup { DBUG_RETURN(true); } - bool - remove_table_from_dd(const char* schema_name, - const char* table_name) - { + bool remove_table_from_dd(const char *schema_name, const char *table_name) { Ndb_dd_client dd_client(m_thd); - if (!dd_client.mdl_locks_acquire_exclusive(schema_name, table_name)) - { + if (!dd_client.mdl_locks_acquire_exclusive(schema_name, table_name)) { return false; } - if (!dd_client.remove_table(schema_name, table_name)) - { + if (!dd_client.remove_table(schema_name, table_name)) { return false; } dd_client.commit(); - return true; // OK + return true; // OK } - - bool - remove_deleted_ndb_tables_from_dd() - { - + bool remove_deleted_ndb_tables_from_dd() { ndb_log_verbose(50, "Looking for deleted tables..."); Ndb_dd_client dd_client(m_thd); // Fetch list of schemas in DD std::vector schema_names; - if (!dd_client.fetch_schema_names(&schema_names)) - { + if (!dd_client.fetch_schema_names(&schema_names)) { ndb_log_error("Failed to fetch schema names from DD"); return false; } @@ -1304,12 +1221,10 @@ class Ndb_binlog_setup { // Iterate over each schema and remove deleted NDB tables // from the DD one by one - for (const auto& name : schema_names) - { - const char* schema_name = name.c_str(); + for (const auto &name : schema_names) { + const char *schema_name = name.c_str(); // Lock the schema in DD - if (!dd_client.mdl_lock_schema(schema_name)) - { + if (!dd_client.mdl_lock_schema(schema_name)) { ndb_log_error("Failed to acquire MDL lock on schema '%s'", schema_name); return false; } @@ -1320,13 +1235,13 @@ class Ndb_binlog_setup { // table names std::unordered_set ndb_tables_in_DD; if (!dd_client.get_ndb_table_names_in_schema(schema_name, - &ndb_tables_in_DD)) - { + &ndb_tables_in_DD)) { ndb_log_error("Failed to get list of NDB tables in schema '%s' from DD", schema_name); return false; } - ndb_log_verbose(50, "Found %zu NDB tables in DD", ndb_tables_in_DD.size()); + ndb_log_verbose(50, "Found %zu NDB tables in DD", + ndb_tables_in_DD.size()); // Fetch list of NDB tables in NDB std::unordered_set ndb_tables_in_NDB; @@ -1334,8 +1249,10 @@ class Ndb_binlog_setup { if (!ndb_get_table_names_in_schema(ndb->getDictionary(), schema_name, ndb_tables_in_NDB)) { log_NDB_error(ndb->getDictionary()->getNdbError()); - ndb_log_error("Failed to get list of NDB tables in schema '%s' from " - "NDB", schema_name); + ndb_log_error( + "Failed to get list of NDB tables in schema '%s' from " + "NDB", + schema_name); return false; } @@ -1345,14 +1262,11 @@ class Ndb_binlog_setup { // Iterate over all NDB tables found in DD. If they // don't exist in NDB anymore, then remove the table // from DD - for (const auto& ndb_table_name : ndb_tables_in_DD) - { - if (ndb_tables_in_NDB.find(ndb_table_name) == ndb_tables_in_NDB.end()) - { - ndb_log_info("Removing table '%s.%s'", - schema_name, ndb_table_name.c_str()); - if (!remove_table_from_dd(schema_name, ndb_table_name.c_str())) - { + for (const auto &ndb_table_name : ndb_tables_in_DD) { + if (ndb_tables_in_NDB.find(ndb_table_name) == ndb_tables_in_NDB.end()) { + ndb_log_info("Removing table '%s.%s'", schema_name, + ndb_table_name.c_str()); + if (!remove_table_from_dd(schema_name, ndb_table_name.c_str())) { ndb_log_error("Failed to remove table '%s.%s' from DD", schema_name, ndb_table_name.c_str()); return false; @@ -1364,61 +1278,48 @@ class Ndb_binlog_setup { ndb_log_verbose(50, "Done looking for deleted tables!"); return true; - } - - bool - install_table_from_NDB(THD *thd, - const char *schema_name, - const char *table_name, - const NdbDictionary::Table* ndbtab, - bool force_overwrite = false) - { + bool install_table_from_NDB(THD *thd, const char *schema_name, + const char *table_name, + const NdbDictionary::Table *ndbtab, + bool force_overwrite = false) { DBUG_ENTER("install_table_from_NDB"); - DBUG_PRINT("enter", ("schema_name: %s, table_name: %s", - schema_name, table_name)); + DBUG_PRINT("enter", + ("schema_name: %s, table_name: %s", schema_name, table_name)); - Thd_ndb* thd_ndb = get_thd_ndb(thd); - Ndb* ndb = thd_ndb->ndb; + Thd_ndb *thd_ndb = get_thd_ndb(thd); + Ndb *ndb = thd_ndb->ndb; dd::sdi_t sdi; { Uint32 version; - void* unpacked_data; + void *unpacked_data; Uint32 unpacked_len; const int get_result = - ndbtab->getExtraMetadata(version, - &unpacked_data, &unpacked_len); - if (get_result != 0) - { - DBUG_PRINT("error", ("Could not get extra metadata, error: %d", - get_result)); + ndbtab->getExtraMetadata(version, &unpacked_data, &unpacked_len); + if (get_result != 0) { + DBUG_PRINT("error", + ("Could not get extra metadata, error: %d", get_result)); DBUG_RETURN(false); } - if (version != 1 && version != 2) - { + if (version != 1 && version != 2) { // Skip install of table which has unsupported extra metadata // versions - ndb_log_info("Skipping setup of table '%s.%s', it has " - "unsupported extra metadata version %d.", - schema_name, table_name, version); - return true; // Skipped + ndb_log_info( + "Skipping setup of table '%s.%s', it has " + "unsupported extra metadata version %d.", + schema_name, table_name, version); + return true; // Skipped } - if (version == 1) - { - const bool migrate_result= - migrate_table_with_old_extra_metadata(thd, ndb, - schema_name, - table_name, - unpacked_data, - unpacked_len, - force_overwrite); - - if (!migrate_result) - { + if (version == 1) { + const bool migrate_result = migrate_table_with_old_extra_metadata( + thd, ndb, schema_name, table_name, unpacked_data, unpacked_len, + force_overwrite); + + if (!migrate_result) { free(unpacked_data); DBUG_RETURN(false); } @@ -1427,7 +1328,7 @@ class Ndb_binlog_setup { DBUG_RETURN(true); } - sdi.assign(static_cast(unpacked_data), unpacked_len); + sdi.assign(static_cast(unpacked_data), unpacked_len); free(unpacked_data); } @@ -1436,8 +1337,7 @@ class Ndb_binlog_setup { Ndb_dd_client dd_client(thd); // First acquire exclusive MDL lock on schema and table - if (!dd_client.mdl_locks_acquire_exclusive(schema_name, table_name)) - { + if (!dd_client.mdl_locks_acquire_exclusive(schema_name, table_name)) { ndb_log_error("Couldn't acquire exclusive metadata locks on '%s.%s'", schema_name, table_name); DBUG_RETURN(false); @@ -1445,107 +1345,86 @@ class Ndb_binlog_setup { const std::string tablespace_name = ndb_table_tablespace_name(ndb->getDictionary(), ndbtab); - if (!tablespace_name.empty()) - { + if (!tablespace_name.empty()) { // Acquire IX MDL on tablespace - if (!dd_client.mdl_lock_tablespace(tablespace_name.c_str(), true)) - { + if (!dd_client.mdl_lock_tablespace(tablespace_name.c_str(), true)) { ndb_log_error("Couldn't acquire metadata lock on tablespace '%s'", tablespace_name.c_str()); DBUG_RETURN(false); } } - if (!dd_client.install_table(schema_name, table_name, - sdi, - ndbtab->getObjectId(), - ndbtab->getObjectVersion(), - ndbtab->getPartitionCount(), - tablespace_name, - force_overwrite)) - { + if (!dd_client.install_table( + schema_name, table_name, sdi, ndbtab->getObjectId(), + ndbtab->getObjectVersion(), ndbtab->getPartitionCount(), + tablespace_name, force_overwrite)) { // Failed to install table - ndb_log_warning("Failed to install table '%s.%s'", - schema_name, table_name); + ndb_log_warning("Failed to install table '%s.%s'", schema_name, + table_name); DBUG_RETURN(false); } - const dd::Table* table_def; - if (!dd_client.get_table(schema_name, table_name, &table_def)) - { + const dd::Table *table_def; + if (!dd_client.get_table(schema_name, table_name, &table_def)) { ndb_log_error("Couldn't open table '%s.%s' from DD after install", schema_name, table_name); DBUG_RETURN(false); } // Check if binlogging should be setup for this table - if (ndbcluster_binlog_setup_table(thd, ndb, - schema_name, table_name, - table_def)) - { + if (ndbcluster_binlog_setup_table(thd, ndb, schema_name, table_name, + table_def)) { DBUG_RETURN(false); } dd_client.commit(); - DBUG_RETURN(true); // OK + DBUG_RETURN(true); // OK } - - void - log_NDB_error(const NdbError& ndb_error) const - { + void log_NDB_error(const NdbError &ndb_error) const { // Display error code and message returned by NDB - ndb_log_error("Got error '%d: %s' from NDB", - ndb_error.code, ndb_error.message); + ndb_log_error("Got error '%d: %s' from NDB", ndb_error.code, + ndb_error.message); } - - bool - synchronize_table(const char* schema_name, - const char* table_name) - { + bool synchronize_table(const char *schema_name, const char *table_name) { Ndb *ndb = get_thd_ndb(m_thd)->ndb; - ndb_log_verbose(1, - "Synchronizing table '%s.%s'", - schema_name, table_name); + ndb_log_verbose(1, "Synchronizing table '%s.%s'", schema_name, table_name); Ndb_table_guard ndbtab_g(ndb, schema_name, table_name); - const NDBTAB *ndbtab= ndbtab_g.get_table(); - if (!ndbtab) - { + const NDBTAB *ndbtab = ndbtab_g.get_table(); + if (!ndbtab) { // Failed to open the table from NDB log_NDB_error(ndb->getDictionary()->getNdbError()); - ndb_log_error("Failed to setup table '%s.%s'", - schema_name, table_name); + ndb_log_error("Failed to setup table '%s.%s'", schema_name, table_name); - // Failed, table was listed but could not be opened, retry + // Failed, table was listed but could not be opened, retry return false; } - if (ndbtab->getFrmLength() == 0) - { + if (ndbtab->getFrmLength() == 0) { ndb_log_verbose(1, "Skipping setup of table '%s.%s', no extra " - "metadata", schema_name, table_name); - return true; // Ok, table skipped + "metadata", + schema_name, table_name); + return true; // Ok, table skipped } { Uint32 version; - void* unpacked_data; + void *unpacked_data; Uint32 unpacked_length; const int get_result = - ndbtab->getExtraMetadata(version, - &unpacked_data, &unpacked_length); + ndbtab->getExtraMetadata(version, &unpacked_data, &unpacked_length); - if (get_result != 0) - { + if (get_result != 0) { // Header corrupt or failed to unpack - ndb_log_error("Failed to setup table '%s.%s', could not " - "unpack extra metadata, error: %d", - schema_name, table_name, get_result); + ndb_log_error( + "Failed to setup table '%s.%s', could not " + "unpack extra metadata, error: %d", + schema_name, table_name, get_result); return false; } @@ -1555,55 +1434,51 @@ class Ndb_binlog_setup { Ndb_dd_client dd_client(m_thd); // Acquire MDL lock on table - if (!dd_client.mdl_lock_table(schema_name, table_name)) - { - ndb_log_error("Failed to acquire MDL lock for table '%s.%s'", - schema_name, table_name); + if (!dd_client.mdl_lock_table(schema_name, table_name)) { + ndb_log_error("Failed to acquire MDL lock for table '%s.%s'", schema_name, + table_name); return false; } - const dd::Table* existing; - if (!dd_client.get_table(schema_name, table_name, &existing)) - { - ndb_log_error("Failed to open table '%s.%s' from DD", - schema_name, table_name); + const dd::Table *existing; + if (!dd_client.get_table(schema_name, table_name, &existing)) { + ndb_log_error("Failed to open table '%s.%s' from DD", schema_name, + table_name); return false; } - if (existing == nullptr) - { + if (existing == nullptr) { ndb_log_info("Table '%s.%s' does not exist in DD, installing...", schema_name, table_name); - if (!install_table_from_NDB(m_thd, schema_name, table_name, - ndbtab, false /* need overwrite */)) - { + if (!install_table_from_NDB(m_thd, schema_name, table_name, ndbtab, + false /* need overwrite */)) { // Failed to install into DD or setup binlogging - ndb_log_error("Failed to install table '%s.%s'", - schema_name, table_name); + ndb_log_error("Failed to install table '%s.%s'", schema_name, + table_name); return false; } - return true; // OK + return true; // OK } // Skip if table exists in DD, but is in other engine const dd::String_type engine = ndb_dd_table_get_engine(existing); - if (engine != "ndbcluster") - { - ndb_log_info("Skipping table '%s.%s' with same name which is in " - "engine '%s'", - schema_name, table_name, - engine.c_str()); - return true; // Skipped + if (engine != "ndbcluster") { + ndb_log_info( + "Skipping table '%s.%s' with same name which is in " + "engine '%s'", + schema_name, table_name, engine.c_str()); + return true; // Skipped } int table_id, table_version; - if (!ndb_dd_table_get_object_id_and_version(existing, - table_id, table_version)) - { + if (!ndb_dd_table_get_object_id_and_version(existing, table_id, + table_version)) { // - ndb_log_error("Failed to extract id and version from table definition " - "for table '%s.%s'", schema_name, table_name); + ndb_log_error( + "Failed to extract id and version from table definition " + "for table '%s.%s'", + schema_name, table_name); DBUG_ASSERT(false); return false; } @@ -1611,53 +1486,45 @@ class Ndb_binlog_setup { // Check that latest version of table definition for this NDB table // is installed in DD if (ndbtab->getObjectId() != table_id || - ndbtab->getObjectVersion() != table_version) - { - ndb_log_info("Table '%s.%s' have different version in DD, reinstalling...", - schema_name, table_name); - if (!install_table_from_NDB(m_thd, schema_name, table_name, - ndbtab, true /* need overwrite */)) - { + ndbtab->getObjectVersion() != table_version) { + ndb_log_info( + "Table '%s.%s' have different version in DD, reinstalling...", + schema_name, table_name); + if (!install_table_from_NDB(m_thd, schema_name, table_name, ndbtab, + true /* need overwrite */)) { // Failed to create table from NDB - ndb_log_error("Failed to install table '%s.%s' from NDB", - schema_name, table_name); + ndb_log_error("Failed to install table '%s.%s' from NDB", schema_name, + table_name); return false; } } // Check if table need to be setup for binlogging or // schema distribution - const dd::Table* table_def; - if (!dd_client.get_table(schema_name, table_name, &table_def)) - { - ndb_log_error("Failed to open table '%s.%s' from DD", - schema_name, table_name); + const dd::Table *table_def; + if (!dd_client.get_table(schema_name, table_name, &table_def)) { + ndb_log_error("Failed to open table '%s.%s' from DD", schema_name, + table_name); return false; } - if (ndbcluster_binlog_setup_table(m_thd, ndb, - schema_name, table_name, - table_def) != 0) - { - ndb_log_error("Failed to setup binlog for table '%s.%s'", - schema_name, table_name); + if (ndbcluster_binlog_setup_table(m_thd, ndb, schema_name, table_name, + table_def) != 0) { + ndb_log_error("Failed to setup binlog for table '%s.%s'", schema_name, + table_name); return false; } return true; } - - bool - synchronize_schema(const char* schema_name) - { + bool synchronize_schema(const char *schema_name) { Ndb_dd_client dd_client(m_thd); ndb_log_info("Synchronizing schema '%s'", schema_name); // Lock the schema in DD - if (!dd_client.mdl_lock_schema(schema_name)) - { + if (!dd_client.mdl_lock_schema(schema_name)) { ndb_log_error("Failed to acquire MDL lock on schema '%s'", schema_name); return false; } @@ -1666,8 +1533,7 @@ class Ndb_binlog_setup { std::unordered_set ndb_tables_in_NDB; Ndb *ndb = get_thd_ndb(m_thd)->ndb; NdbDictionary::Dictionary *dict = ndb->getDictionary(); - if (!ndb_get_table_names_in_schema(dict, schema_name, ndb_tables_in_NDB)) - { + if (!ndb_get_table_names_in_schema(dict, schema_name, ndb_tables_in_NDB)) { log_NDB_error(dict->getNdbError()); ndb_log_error("Failed to get list of NDB tables in schema '%s' from NDB", schema_name); @@ -1675,12 +1541,10 @@ class Ndb_binlog_setup { } // Iterate over each table in NDB and synchronize them to DD - for (const auto ndb_table_name : ndb_tables_in_NDB) - { - if (!synchronize_table(schema_name, ndb_table_name.c_str())) - { - ndb_log_info("Failed to synchronize table '%s.%s'", - schema_name, ndb_table_name.c_str()); + for (const auto ndb_table_name : ndb_tables_in_NDB) { + if (!synchronize_table(schema_name, ndb_table_name.c_str())) { + ndb_log_info("Failed to synchronize table '%s.%s'", schema_name, + ndb_table_name.c_str()); continue; } } @@ -1688,27 +1552,19 @@ class Ndb_binlog_setup { return true; } - - bool - install_logfile_group_into_DD(const char* logfile_group_name, - NdbDictionary::LogfileGroup ndb_lfg, - const std::vector &undofile_names, - bool force_overwrite) - { + bool install_logfile_group_into_DD( + const char *logfile_group_name, NdbDictionary::LogfileGroup ndb_lfg, + const std::vector &undofile_names, bool force_overwrite) { Ndb_dd_client dd_client(m_thd); - if (!dd_client.mdl_lock_logfile_group_exclusive(logfile_group_name)) - { + if (!dd_client.mdl_lock_logfile_group_exclusive(logfile_group_name)) { ndb_log_error("MDL lock could not be acquired for logfile group '%s'", logfile_group_name); return false; } - if (!dd_client.install_logfile_group(logfile_group_name, - undofile_names, - ndb_lfg.getObjectId(), - ndb_lfg.getObjectVersion(), - force_overwrite)) - { + if (!dd_client.install_logfile_group( + logfile_group_name, undofile_names, ndb_lfg.getObjectId(), + ndb_lfg.getObjectVersion(), force_overwrite)) { ndb_log_error("Logfile group '%s' could not be stored in DD", logfile_group_name); return false; @@ -1718,41 +1574,31 @@ class Ndb_binlog_setup { return true; } - - bool - compare_file_list(const std::vector& file_names_in_NDB, - const std::vector& file_names_in_DD) - { - if (file_names_in_NDB.size() != file_names_in_DD.size()) - { + bool compare_file_list(const std::vector &file_names_in_NDB, + const std::vector &file_names_in_DD) { + if (file_names_in_NDB.size() != file_names_in_DD.size()) { return false; } - for (const auto file_name : file_names_in_NDB) - { - if (std::find(file_names_in_DD.begin(), - file_names_in_DD.end(), - file_name) == file_names_in_DD.end()) - { + for (const auto file_name : file_names_in_NDB) { + if (std::find(file_names_in_DD.begin(), file_names_in_DD.end(), + file_name) == file_names_in_DD.end()) { return false; } } return true; } - - bool - synchronize_logfile_group(const char* logfile_group_name, - const std::unordered_set& lfg_in_DD) - { + bool synchronize_logfile_group( + const char *logfile_group_name, + const std::unordered_set &lfg_in_DD) { ndb_log_verbose(1, "Synchronizing logfile group '%s'", logfile_group_name); - Ndb* ndb = get_thd_ndb(m_thd)->ndb; + Ndb *ndb = get_thd_ndb(m_thd)->ndb; NdbDictionary::Dictionary *dict = ndb->getDictionary(); NdbDictionary::LogfileGroup ndb_lfg = dict->getLogfileGroup(logfile_group_name); - if (ndb_dict_check_NDB_error(dict)) - { + if (ndb_dict_check_NDB_error(dict)) { // Failed to open logfile group from NDB log_NDB_error(dict->getNdbError()); ndb_log_error("Failed to get logfile group '%s' from NDB", @@ -1761,24 +1607,22 @@ class Ndb_binlog_setup { } const auto lfg_position = lfg_in_DD.find(logfile_group_name); - if (lfg_position == lfg_in_DD.end()) - { + if (lfg_position == lfg_in_DD.end()) { // Logfile group exists only in NDB. Install into DD ndb_log_info("Logfile group '%s' does not exist in DD, installing..", logfile_group_name); std::vector undofile_names; - if (!ndb_get_undofile_names(dict, logfile_group_name, undofile_names)) - { + if (!ndb_get_undofile_names(dict, logfile_group_name, undofile_names)) { log_NDB_error(dict->getNdbError()); - ndb_log_error("Failed to get undofiles assigned to logfile group '%s' " - "from NDB", logfile_group_name); + ndb_log_error( + "Failed to get undofiles assigned to logfile group '%s' " + "from NDB", + logfile_group_name); return false; } - if (!install_logfile_group_into_DD(logfile_group_name, - ndb_lfg, + if (!install_logfile_group_into_DD(logfile_group_name, ndb_lfg, undofile_names, - false /*force_overwrite*/)) - { + false /*force_overwrite*/)) { return false; } return true; @@ -1787,22 +1631,19 @@ class Ndb_binlog_setup { // Logfile group exists in DD Ndb_dd_client dd_client(m_thd); if (!dd_client.mdl_lock_logfile_group(logfile_group_name, - true /* intention_exclusive */)) - { + true /* intention_exclusive */)) { ndb_log_error("MDL lock could not be acquired for logfile group '%s'", logfile_group_name); return false; } const dd::Tablespace *existing = nullptr; - if (!dd_client.get_logfile_group(logfile_group_name, &existing)) - { + if (!dd_client.get_logfile_group(logfile_group_name, &existing)) { ndb_log_error("Failed to acquire logfile group '%s' from DD", logfile_group_name); return false; } - if (existing == nullptr) - { + if (existing == nullptr) { ndb_log_error("Logfile group '%s' does not exist in DD", logfile_group_name); DBUG_ASSERT(false); @@ -1811,12 +1652,12 @@ class Ndb_binlog_setup { // Check if the DD has the latest definition of the logfile group int object_id_in_DD, object_version_in_DD; - if (!ndb_dd_disk_data_get_object_id_and_version(existing, - object_id_in_DD, - object_version_in_DD)) - { - ndb_log_error("Could not extract id and version from the definition " - "of logfile group '%s'", logfile_group_name); + if (!ndb_dd_disk_data_get_object_id_and_version(existing, object_id_in_DD, + object_version_in_DD)) { + ndb_log_error( + "Could not extract id and version from the definition " + "of logfile group '%s'", + logfile_group_name); DBUG_ASSERT(false); return false; } @@ -1825,11 +1666,12 @@ class Ndb_binlog_setup { const int object_version_in_NDB = ndb_lfg.getObjectVersion(); std::vector undofile_names_in_NDB; if (!ndb_get_undofile_names(dict, logfile_group_name, - undofile_names_in_NDB)) - { + undofile_names_in_NDB)) { log_NDB_error(dict->getNdbError()); - ndb_log_error("Failed to get undofiles assigned to logfile group '%s' " - "from NDB", logfile_group_name); + ndb_log_error( + "Failed to get undofiles assigned to logfile group '%s' " + "from NDB", + logfile_group_name); return false; } @@ -1846,16 +1688,14 @@ class Ndb_binlog_setup { // that's possible after an initial cluster restart. In such // cases, it's possible the ids and versions match even though // they are entirely different objects - !compare_file_list(undofile_names_in_NDB, - undofile_names_in_DD)) - { - ndb_log_info("Logfile group '%s' has outdated version in DD, " - "reinstalling..", logfile_group_name); - if (!install_logfile_group_into_DD(logfile_group_name, - ndb_lfg, + !compare_file_list(undofile_names_in_NDB, undofile_names_in_DD)) { + ndb_log_info( + "Logfile group '%s' has outdated version in DD, " + "reinstalling..", + logfile_group_name); + if (!install_logfile_group_into_DD(logfile_group_name, ndb_lfg, undofile_names_in_NDB, - true /* force_overwrite */)) - { + true /* force_overwrite */)) { return false; } } @@ -1864,18 +1704,14 @@ class Ndb_binlog_setup { return true; } - - bool - synchronize_logfile_groups() - { + bool synchronize_logfile_groups() { ndb_log_info("Synchronizing logfile groups"); // Retrieve list of logfile groups from NDB std::unordered_set lfg_in_NDB; Ndb *ndb = get_thd_ndb(m_thd)->ndb; const NdbDictionary::Dictionary *dict = ndb->getDictionary(); - if (!ndb_get_logfile_group_names(dict, lfg_in_NDB)) - { + if (!ndb_get_logfile_group_names(dict, lfg_in_NDB)) { log_NDB_error(dict->getNdbError()); ndb_log_error("Failed to fetch logfile group names from NDB"); return false; @@ -1885,40 +1721,33 @@ class Ndb_binlog_setup { // Retrieve list of logfile groups from DD std::unordered_set lfg_in_DD; - if (!dd_client.fetch_ndb_logfile_group_names(lfg_in_DD)) - { + if (!dd_client.fetch_ndb_logfile_group_names(lfg_in_DD)) { ndb_log_error("Failed to fetch logfile group names from DD"); return false; } - for (const auto logfile_group_name : lfg_in_NDB) - { - if (!synchronize_logfile_group(logfile_group_name.c_str(), - lfg_in_DD)) - { - ndb_log_info("Failed to synchronize logfile group '%s'", - logfile_group_name.c_str()); + for (const auto logfile_group_name : lfg_in_NDB) { + if (!synchronize_logfile_group(logfile_group_name.c_str(), lfg_in_DD)) { + ndb_log_info("Failed to synchronize logfile group '%s'", + logfile_group_name.c_str()); } lfg_in_DD.erase(logfile_group_name); } // Any entries left in lfg_in_DD exist in DD alone and not NDB // and can be removed - for (const auto logfile_group_name : lfg_in_DD) - { + for (const auto logfile_group_name : lfg_in_DD) { ndb_log_info("Logfile group '%s' does not exist in NDB, dropping", logfile_group_name.c_str()); if (!dd_client.mdl_lock_logfile_group_exclusive( - logfile_group_name.c_str())) - { + logfile_group_name.c_str())) { ndb_log_info("MDL lock could not be acquired for logfile group '%s'", logfile_group_name.c_str()); ndb_log_info("Failed to synchronize logfile group '%s'", logfile_group_name.c_str()); continue; } - if (!dd_client.drop_logfile_group(logfile_group_name.c_str())) - { + if (!dd_client.drop_logfile_group(logfile_group_name.c_str())) { ndb_log_info("Failed to synchronize logfile group '%s'", logfile_group_name.c_str()); } @@ -1927,27 +1756,19 @@ class Ndb_binlog_setup { return true; } - - bool - install_tablespace_into_DD(const char* tablespace_name, - NdbDictionary::Tablespace ndb_tablespace, - const std::vector& data_file_names, - bool force_overwrite) - { + bool install_tablespace_into_DD( + const char *tablespace_name, NdbDictionary::Tablespace ndb_tablespace, + const std::vector &data_file_names, bool force_overwrite) { Ndb_dd_client dd_client(m_thd); - if (!dd_client.mdl_lock_tablespace_exclusive(tablespace_name)) - { + if (!dd_client.mdl_lock_tablespace_exclusive(tablespace_name)) { ndb_log_error("MDL lock could not be acquired for tablespace '%s'", tablespace_name); return false; } - if (!dd_client.install_tablespace(tablespace_name, - data_file_names, - ndb_tablespace.getObjectId(), - ndb_tablespace.getObjectVersion(), - force_overwrite)) - { + if (!dd_client.install_tablespace( + tablespace_name, data_file_names, ndb_tablespace.getObjectId(), + ndb_tablespace.getObjectVersion(), force_overwrite)) { ndb_log_error("Tablespace '%s' could not be stored in DD", tablespace_name); return false; @@ -1957,45 +1778,37 @@ class Ndb_binlog_setup { return true; } - - bool - synchronize_tablespace(const char* tablespace_name, - const std::unordered_set& - tablespaces_in_DD) - { + bool synchronize_tablespace( + const char *tablespace_name, + const std::unordered_set &tablespaces_in_DD) { ndb_log_verbose(1, "Synchronizing tablespace '%s'", tablespace_name); - Ndb* ndb = get_thd_ndb(m_thd)->ndb; + Ndb *ndb = get_thd_ndb(m_thd)->ndb; NdbDictionary::Dictionary *dict = ndb->getDictionary(); const auto tablespace_position = tablespaces_in_DD.find(tablespace_name); NdbDictionary::Tablespace ndb_tablespace = dict->getTablespace(tablespace_name); - if (ndb_dict_check_NDB_error(dict)) - { + if (ndb_dict_check_NDB_error(dict)) { // Failed to open tablespace from NDB log_NDB_error(dict->getNdbError()); ndb_log_error("Failed to get tablespace '%s' from NDB", tablespace_name); return false; } - if (tablespace_position == tablespaces_in_DD.end()) - { + if (tablespace_position == tablespaces_in_DD.end()) { // Tablespace exists only in NDB. Install in DD ndb_log_info("Tablespace '%s' does not exist in DD, installing..", tablespace_name); std::vector datafile_names; - if (!ndb_get_datafile_names(dict, tablespace_name, datafile_names)) - { + if (!ndb_get_datafile_names(dict, tablespace_name, datafile_names)) { log_NDB_error(dict->getNdbError()); ndb_log_error("Failed to get datafiles assigned to tablespace '%s'", tablespace_name); return false; } - if (!install_tablespace_into_DD(tablespace_name, - ndb_tablespace, + if (!install_tablespace_into_DD(tablespace_name, ndb_tablespace, datafile_names, - false /*force_overwrite*/)) - { + false /*force_overwrite*/)) { return false; } return true; @@ -2004,22 +1817,19 @@ class Ndb_binlog_setup { // Tablespace exists in DD Ndb_dd_client dd_client(m_thd); if (!dd_client.mdl_lock_tablespace(tablespace_name, - true /* intention_exclusive */)) - { + true /* intention_exclusive */)) { ndb_log_error("MDL lock could not be acquired on tablespace '%s'", tablespace_name); return false; } const dd::Tablespace *existing = nullptr; - if (!dd_client.get_tablespace(tablespace_name, &existing)) - { + if (!dd_client.get_tablespace(tablespace_name, &existing)) { ndb_log_error("Failed to acquire tablespace '%s' from DD", tablespace_name); return false; } - if (existing == nullptr) - { + if (existing == nullptr) { ndb_log_error("Tablespace '%s' does not exist in DD", tablespace_name); DBUG_ASSERT(false); return false; @@ -2027,12 +1837,12 @@ class Ndb_binlog_setup { // Check if the DD has the latest definition of the tablespace int object_id_in_DD, object_version_in_DD; - if (!ndb_dd_disk_data_get_object_id_and_version(existing, - object_id_in_DD, - object_version_in_DD)) - { - ndb_log_error("Could not extract id and version from the definition " - "of tablespace '%s'", tablespace_name); + if (!ndb_dd_disk_data_get_object_id_and_version(existing, object_id_in_DD, + object_version_in_DD)) { + ndb_log_error( + "Could not extract id and version from the definition " + "of tablespace '%s'", + tablespace_name); DBUG_ASSERT(false); return false; } @@ -2040,11 +1850,12 @@ class Ndb_binlog_setup { const int object_id_in_NDB = ndb_tablespace.getObjectId(); const int object_version_in_NDB = ndb_tablespace.getObjectVersion(); std::vector datafile_names_in_NDB; - if (!ndb_get_datafile_names(dict, tablespace_name, datafile_names_in_NDB)) - { + if (!ndb_get_datafile_names(dict, tablespace_name, datafile_names_in_NDB)) { log_NDB_error(dict->getNdbError()); - ndb_log_error("Failed to get datafiles assigned to tablespace '%s' from " - "NDB", tablespace_name); + ndb_log_error( + "Failed to get datafiles assigned to tablespace '%s' from " + "NDB", + tablespace_name); return false; } @@ -2061,16 +1872,14 @@ class Ndb_binlog_setup { // that's possible after an initial cluster restart. In such // cases, it's possible the ids and versions match even though // they are entirely different objects - !compare_file_list(datafile_names_in_NDB, - datafile_names_in_DD)) - { - ndb_log_info("Tablespace '%s' has outdated version in DD, " - "reinstalling..", tablespace_name); - if (!install_tablespace_into_DD(tablespace_name, - ndb_tablespace, + !compare_file_list(datafile_names_in_NDB, datafile_names_in_DD)) { + ndb_log_info( + "Tablespace '%s' has outdated version in DD, " + "reinstalling..", + tablespace_name); + if (!install_tablespace_into_DD(tablespace_name, ndb_tablespace, datafile_names_in_NDB, - true /* force_overwrite */)) - { + true /* force_overwrite */)) { return false; } } @@ -2079,18 +1888,14 @@ class Ndb_binlog_setup { return true; } - - bool - synchronize_tablespaces() - { + bool synchronize_tablespaces() { ndb_log_info("Synchronizing tablespaces"); // Retrieve list of tablespaces from NDB std::unordered_set tablespaces_in_NDB; Ndb *ndb = get_thd_ndb(m_thd)->ndb; const NdbDictionary::Dictionary *dict = ndb->getDictionary(); - if (!ndb_get_tablespace_names(dict, tablespaces_in_NDB)) - { + if (!ndb_get_tablespace_names(dict, tablespaces_in_NDB)) { log_NDB_error(dict->getNdbError()); ndb_log_error("Failed to fetch tablespace names from NDB"); return false; @@ -2099,17 +1904,13 @@ class Ndb_binlog_setup { Ndb_dd_client dd_client(m_thd); // Retrieve list of tablespaces from DD std::unordered_set tablespaces_in_DD; - if (!dd_client.fetch_ndb_tablespace_names(tablespaces_in_DD)) - { + if (!dd_client.fetch_ndb_tablespace_names(tablespaces_in_DD)) { ndb_log_error("Failed to fetch tablespace names from DD"); return false; } - for (const auto tablespace_name : tablespaces_in_NDB) - { - if (!synchronize_tablespace(tablespace_name.c_str(), - tablespaces_in_DD)) - { + for (const auto tablespace_name : tablespaces_in_NDB) { + if (!synchronize_tablespace(tablespace_name.c_str(), tablespaces_in_DD)) { ndb_log_warning("Failed to synchronize tablespace '%s'", tablespace_name.c_str()); } @@ -2118,20 +1919,17 @@ class Ndb_binlog_setup { // Any entries left in tablespaces_in_DD exist in DD alone and not NDB // and can be removed - for (const auto tablespace_name : tablespaces_in_DD) - { + for (const auto tablespace_name : tablespaces_in_DD) { ndb_log_info("Tablespace '%s' does not exist in NDB, dropping", tablespace_name.c_str()); - if (!dd_client.mdl_lock_tablespace_exclusive(tablespace_name.c_str())) - { + if (!dd_client.mdl_lock_tablespace_exclusive(tablespace_name.c_str())) { ndb_log_warning("MDL lock could not be acquired on tablespace '%s'", tablespace_name.c_str()); ndb_log_warning("Failed to synchronize tablespace '%s'", tablespace_name.c_str()); continue; } - if (!dd_client.drop_tablespace(tablespace_name.c_str())) - { + if (!dd_client.drop_tablespace(tablespace_name.c_str())) { ndb_log_warning("Failed to synchronize tablespace '%s'", tablespace_name.c_str()); } @@ -2140,29 +1938,22 @@ class Ndb_binlog_setup { return true; } - - bool - synchronize_data_dictionary(void) - { - + bool synchronize_data_dictionary(void) { ndb_log_info("Starting metadata synchronization..."); // Synchronize logfile groups and tablespaces - if (!synchronize_logfile_groups()) - { + if (!synchronize_logfile_groups()) { ndb_log_warning("Failed to synchronize logfile groups"); return false; } - if (!synchronize_tablespaces()) - { + if (!synchronize_tablespaces()) { ndb_log_warning("Failed to synchronize tablespaces"); return false; } // Synchronize databases - if (!synchronize_databases()) - { + if (!synchronize_databases()) { ndb_log_warning("Failed to synchronize databases"); return false; } @@ -2171,8 +1962,7 @@ class Ndb_binlog_setup { // Fetch list of schemas in DD std::vector schema_names; - if (!dd_client.fetch_schema_names(&schema_names)) - { + if (!dd_client.fetch_schema_names(&schema_names)) { ndb_log_verbose(19, "Failed to synchronize metadata, could not " "fetch schema names"); @@ -2182,10 +1972,8 @@ class Ndb_binlog_setup { // Iterate over each schema and synchronize it one by one, // the assumption is that even large deployments have // a manageable number of tables in each schema - for (const auto name : schema_names) - { - if (!synchronize_schema(name.c_str())) - { + for (const auto name : schema_names) { + if (!synchronize_schema(name.c_str())) { ndb_log_info("Failed to synchronize metadata, schema: '%s'", name.c_str()); return false; @@ -2199,138 +1987,129 @@ class Ndb_binlog_setup { Ndb_binlog_setup(const Ndb_binlog_setup &) = delete; Ndb_binlog_setup operator=(const Ndb_binlog_setup &) = delete; -public: - Ndb_binlog_setup(THD *thd) : m_thd(thd) {} + public: + Ndb_binlog_setup(THD *thd) : m_thd(thd) {} - /** - @brief Setup this node to take part in schema distribution by creating the - ndbcluster util tables, perform schema synchronization and create references - to NDB_SHARE for all tables. + /** + @brief Setup this node to take part in schema distribution by creating the + ndbcluster util tables, perform schema synchronization and create references + to NDB_SHARE for all tables. + + @note See special error handling required when function fails. + + @return true if setup is succesful + @return false if setup fails. The creation of ndb_schema table and setup + of event operation registers this node in schema distribution protocol. Thus + this node is expected to reply to schema distribution events. Replying is + however not possible until setup has succesfully completed and the binlog + thread has started to handle events. If setup fails the event operation on + ndb_schema table and all other event operations must be removed in order to + signal unsubcribe and remove this node from schema distribution. + */ + bool setup(Thd_ndb *thd_ndb) { + /* Test binlog_setup on this mysqld being slower (than other mysqld) */ + if (DBUG_EVALUATE_IF("ndb_binlog_setup_slow", true, false)) { + ndb_log_info("'ndb_binlog_setup_slow' -> sleep"); + ndb_milli_sleep(10 * 1000); + ndb_log_info(" <- sleep"); + } - @note See special error handling required when function fails. + DBUG_ASSERT(ndb_apply_status_share == nullptr); - @return true if setup is succesful - @return false if setup fails. The creation of ndb_schema table and setup - of event operation registers this node in schema distribution protocol. Thus - this node is expected to reply to schema distribution events. Replying is - however not possible until setup has succesfully completed and the binlog - thread has started to handle events. If setup fails the event operation on - ndb_schema table and all other event operations must be removed in order to - signal unsubcribe and remove this node from schema distribution. - */ - bool setup(Thd_ndb* thd_ndb) { - /* Test binlog_setup on this mysqld being slower (than other mysqld) */ - if (DBUG_EVALUATE_IF("ndb_binlog_setup_slow", true, false)) { - ndb_log_info("'ndb_binlog_setup_slow' -> sleep"); - ndb_milli_sleep(10 * 1000); - ndb_log_info(" <- sleep"); - } - - DBUG_ASSERT(ndb_apply_status_share == nullptr); - - // Protect the schema synchronization with GSL(Global Schema Lock) - Ndb_global_schema_lock_guard global_schema_lock_guard(m_thd); - if (global_schema_lock_guard.lock()) { - return false; - } - - // Remove deleted NDB tables - if (!remove_deleted_ndb_tables_from_dd()) { - return false; - } - - /* Give additional 'binlog_setup rights' to this Thd_ndb */ - Thd_ndb::Options_guard thd_ndb_options(thd_ndb); - thd_ndb_options.set(Thd_ndb::ALLOW_BINLOG_SETUP); - - Ndb_schema_dist_table schema_dist_table(thd_ndb); - if (!schema_dist_table.create_or_upgrade(m_thd, - opt_ndb_schema_dist_upgrade_allowed)) - return false; - - if (!Ndb_schema_dist::is_ready(m_thd)) { - ndb_log_verbose(50, "Schema distribution setup failed"); - return false; - } - - if (DBUG_EVALUATE_IF("ndb_binlog_setup_incomplete", true, false)) { - // Remove the dbug keyword, only fail first time and avoid infinite setup - DBUG_SET("-d,ndb_binlog_setup_incomplete"); - // Test handling of setup failing to complete *after* created 'ndb_schema' - ndb_log_info("Simulate 'ndb_binlog_setup_incomplete' -> return error"); - return false; - } - - Ndb_schema_result_table schema_result_table(thd_ndb); - if (!schema_result_table.create_or_upgrade(m_thd, - opt_ndb_schema_dist_upgrade_allowed)) - return false; - - Ndb_apply_status_table apply_status_table(thd_ndb); - if (!apply_status_table.create_or_upgrade(m_thd, true)) - return false; - - if (!synchronize_data_dictionary()) { - ndb_log_verbose(9, "Failed to synchronize DD with NDB"); - return false; - } - - // Check that references for ndb_apply_status has been created - DBUG_ASSERT(!ndb_binlog_running || ndb_apply_status_share); - - if(! Ndb_stored_grants::initialize(m_thd, thd_ndb)) { - ndb_log_warning("stored grants: failed to initialize"); - return false; - } - - Mutex_guard injector_mutex_g(injector_data_mutex); - ndb_binlog_tables_inited = true; - - return true; // Setup completed OK - } -}; + // Protect the schema synchronization with GSL(Global Schema Lock) + Ndb_global_schema_lock_guard global_schema_lock_guard(m_thd); + if (global_schema_lock_guard.lock()) { + return false; + } + // Remove deleted NDB tables + if (!remove_deleted_ndb_tables_from_dd()) { + return false; + } -/* - Defines for the expected order of columns in ndb_schema table, should - match the accepted table definition. -*/ -constexpr uint SCHEMA_DB_I = 0; -constexpr uint SCHEMA_NAME_I = 1; -constexpr uint SCHEMA_SLOCK_I = 2; -constexpr uint SCHEMA_QUERY_I = 3; -constexpr uint SCHEMA_NODE_ID_I = 4; -constexpr uint SCHEMA_EPOCH_I = 5; -constexpr uint SCHEMA_ID_I = 6; -constexpr uint SCHEMA_VERSION_I = 7; -constexpr uint SCHEMA_TYPE_I = 8; -constexpr uint SCHEMA_OP_ID_I = 9; + /* Give additional 'binlog_setup rights' to this Thd_ndb */ + Thd_ndb::Options_guard thd_ndb_options(thd_ndb); + thd_ndb_options.set(Thd_ndb::ALLOW_BINLOG_SETUP); -static void ndb_report_waiting(const char *key, - int the_time, - const char *op, - const char *obj) -{ - ulonglong ndb_latest_epoch= 0; - const char *proc_info= ""; - mysql_mutex_lock(&injector_event_mutex); - if (injector_ndb) - ndb_latest_epoch= injector_ndb->getLatestGCI(); - if (injector_thd) - proc_info= injector_thd->proc_info; - mysql_mutex_unlock(&injector_event_mutex); + Ndb_schema_dist_table schema_dist_table(thd_ndb); + if (!schema_dist_table.create_or_upgrade( + m_thd, opt_ndb_schema_dist_upgrade_allowed)) + return false; + + if (!Ndb_schema_dist::is_ready(m_thd)) { + ndb_log_verbose(50, "Schema distribution setup failed"); + return false; + } + + if (DBUG_EVALUATE_IF("ndb_binlog_setup_incomplete", true, false)) { + // Remove the dbug keyword, only fail first time and avoid infinite setup + DBUG_SET("-d,ndb_binlog_setup_incomplete"); + // Test handling of setup failing to complete *after* created 'ndb_schema' + ndb_log_info("Simulate 'ndb_binlog_setup_incomplete' -> return error"); + return false; + } + + Ndb_schema_result_table schema_result_table(thd_ndb); + if (!schema_result_table.create_or_upgrade( + m_thd, opt_ndb_schema_dist_upgrade_allowed)) + return false; + + Ndb_apply_status_table apply_status_table(thd_ndb); + if (!apply_status_table.create_or_upgrade(m_thd, true)) return false; + + if (!synchronize_data_dictionary()) { + ndb_log_verbose(9, "Failed to synchronize DD with NDB"); + return false; + } + + // Check that references for ndb_apply_status has been created + DBUG_ASSERT(!ndb_binlog_running || ndb_apply_status_share); + + if (!Ndb_stored_grants::initialize(m_thd, thd_ndb)) { + ndb_log_warning("stored grants: failed to initialize"); + return false; + } + + Mutex_guard injector_mutex_g(injector_data_mutex); + ndb_binlog_tables_inited = true; + + return true; // Setup completed OK + } +}; + +/* + Defines for the expected order of columns in ndb_schema table, should + match the accepted table definition. +*/ +constexpr uint SCHEMA_DB_I = 0; +constexpr uint SCHEMA_NAME_I = 1; +constexpr uint SCHEMA_SLOCK_I = 2; +constexpr uint SCHEMA_QUERY_I = 3; +constexpr uint SCHEMA_NODE_ID_I = 4; +constexpr uint SCHEMA_EPOCH_I = 5; +constexpr uint SCHEMA_ID_I = 6; +constexpr uint SCHEMA_VERSION_I = 7; +constexpr uint SCHEMA_TYPE_I = 8; +constexpr uint SCHEMA_OP_ID_I = 9; + +static void ndb_report_waiting(const char *key, int the_time, const char *op, + const char *obj) { + ulonglong ndb_latest_epoch = 0; + const char *proc_info = ""; + mysql_mutex_lock(&injector_event_mutex); + if (injector_ndb) ndb_latest_epoch = injector_ndb->getLatestGCI(); + if (injector_thd) proc_info = injector_thd->proc_info; + mysql_mutex_unlock(&injector_event_mutex); { - ndb_log_info("%s, waiting max %u sec for %s %s." - " epochs: (%u/%u,%u/%u,%u/%u)" - " injector proc_info: %s", - key, the_time, op, obj, - (uint)(ndb_latest_handled_binlog_epoch >> 32), - (uint)(ndb_latest_handled_binlog_epoch), - (uint)(ndb_latest_received_binlog_epoch >> 32), - (uint)(ndb_latest_received_binlog_epoch), - (uint)(ndb_latest_epoch >> 32), - (uint)(ndb_latest_epoch), - proc_info); + ndb_log_info( + "%s, waiting max %u sec for %s %s." + " epochs: (%u/%u,%u/%u,%u/%u)" + " injector proc_info: %s", + key, the_time, op, obj, (uint)(ndb_latest_handled_binlog_epoch >> 32), + (uint)(ndb_latest_handled_binlog_epoch), + (uint)(ndb_latest_received_binlog_epoch >> 32), + (uint)(ndb_latest_received_binlog_epoch), + (uint)(ndb_latest_epoch >> 32), (uint)(ndb_latest_epoch), proc_info); } } @@ -2415,11 +2194,9 @@ bool Ndb_schema_dist_client::write_schema_op_to_NDB( */ int Ndb_schema_dist_client::log_schema_op_impl( - Ndb* ndb, - const char *query, int query_length, const char *db, const char *table_name, - uint32 ndb_table_id, uint32 ndb_table_version, SCHEMA_OP_TYPE type, - uint32 anyvalue) -{ + Ndb *ndb, const char *query, int query_length, const char *db, + const char *table_name, uint32 ndb_table_id, uint32 ndb_table_version, + SCHEMA_OP_TYPE type, uint32 anyvalue) { DBUG_ENTER("Ndb_schema_dist_client::log_schema_op_impl"); DBUG_PRINT("enter", ("query: %s db: %s table_name: %s", query, db, table_name)); @@ -2499,7 +2276,7 @@ int Ndb_schema_dist_client::log_schema_op_impl( // Inspect results in NDB_SCHEMA_OBJECT before it's released std::vector participant_results; ndb_schema_object->client_get_schema_op_results(participant_results); - for (auto& it : participant_results) { + for (auto &it : participant_results) { // Save result for later m_schema_op_results.push_back({it.nodeid, it.result, it.message}); // Push the result as warning @@ -2510,7 +2287,6 @@ int Ndb_schema_dist_client::log_schema_op_impl( DBUG_RETURN(0); } - /* ndbcluster_binlog_event_operation_teardown @@ -2528,36 +2304,31 @@ int Ndb_schema_dist_client::log_schema_op_impl( listen to events on all the other tables. */ -static -void -ndbcluster_binlog_event_operation_teardown(THD *thd, - Ndb *is_ndb, - NdbEventOperation *pOp) -{ +static void ndbcluster_binlog_event_operation_teardown(THD *thd, Ndb *is_ndb, + NdbEventOperation *pOp) { DBUG_ENTER("ndbcluster_binlog_event_operation_teardown"); DBUG_PRINT("enter", ("pOp: %p", pOp)); // Get Ndb_event_data associated with the NdbEventOperation - const Ndb_event_data* event_data= - static_cast(pOp->getCustomData()); + const Ndb_event_data *event_data = + static_cast(pOp->getCustomData()); DBUG_ASSERT(event_data); // Get NDB_SHARE associated with the Ndb_event_data, the share // is referenced by "binlog" and will not go away until released // further down in this function - NDB_SHARE *share= event_data->share; + NDB_SHARE *share = event_data->share; // Invalidate any cached NdbApi table if object version is lower // than what was used when setting up the NdbEventOperation // NOTE! This functionality need to be explained further { - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; + Thd_ndb *thd_ndb = get_thd_ndb(thd); + Ndb *ndb = thd_ndb->ndb; Ndb_table_guard ndbtab_g(ndb, share->db, share->table_name); - const NDBTAB *ev_tab= pOp->getTable(); - const NDBTAB *cache_tab= ndbtab_g.get_table(); - if (cache_tab && - cache_tab->getObjectId() == ev_tab->getObjectId() && + const NDBTAB *ev_tab = pOp->getTable(); + const NDBTAB *cache_tab = ndbtab_g.get_table(); + if (cache_tab && cache_tab->getObjectId() == ev_tab->getObjectId() && cache_tab->getObjectVersion() <= ev_tab->getObjectVersion()) ndbtab_g.invalidate(); } @@ -2565,7 +2336,7 @@ ndbcluster_binlog_event_operation_teardown(THD *thd, // Remove NdbEventOperation from the share mysql_mutex_lock(&share->mutex); DBUG_ASSERT(share->op == pOp); - share->op= NULL; + share->op = NULL; mysql_mutex_unlock(&share->mutex); /* Signal ha_ndbcluster::delete/rename_table that drop is done */ @@ -2593,7 +2364,6 @@ ndbcluster_binlog_event_operation_teardown(THD *thd, DBUG_VOID_RETURN; } - /* Data used by the Ndb_schema_event_handler which lives as long as the NDB Binlog thread is connected to the cluster. @@ -2607,7 +2377,7 @@ class Ndb_schema_dist_data { // List of active schema operations in this coordinator. Having an // active schema operation means it need to be checked // for timeout or request to be killed regularly - std::unordered_set m_active_schema_ops; + std::unordered_set m_active_schema_ops; std::chrono::steady_clock::time_point m_next_check_time; @@ -2641,7 +2411,8 @@ class Ndb_schema_dist_data { @brief Add current subscribers to list of nodes. @param subscriber_list List of subscriber */ - void get_subscriber_list(std::unordered_set& subscriber_list) const { + void get_subscriber_list( + std::unordered_set &subscriber_list) const { for (uint i = bitmap_get_first_set(&m_bitmap); i != MY_BIT_NONE; i = bitmap_get_next_set(&m_bitmap, i)) { subscriber_list.insert(i); @@ -2653,7 +2424,7 @@ class Ndb_schema_dist_data { Node_subscribers per data node, this avoids the need to know which data nodes are connected. */ - std::unordered_map m_subscriber_bitmaps; + std::unordered_map m_subscriber_bitmaps; /** @brief Find node subscribers for given data node @@ -2678,17 +2449,16 @@ class Ndb_schema_dist_data { } // Holds the new key for a table to be renamed - struct NDB_SHARE_KEY* m_prepared_rename_key; + struct NDB_SHARE_KEY *m_prepared_rename_key; // Holds the Ndb_event_data which is created during inplace alter table // prepare and used during commit // NOTE! this place holder is only used for the participant in same node - const class Ndb_event_data* m_inplace_alter_event_data{nullptr}; -public: - Ndb_schema_dist_data(const Ndb_schema_dist_data&); // Not implemented - Ndb_schema_dist_data() : - m_prepared_rename_key(NULL) - {} + const class Ndb_event_data *m_inplace_alter_event_data{nullptr}; + + public: + Ndb_schema_dist_data(const Ndb_schema_dist_data &); // Not implemented + Ndb_schema_dist_data() : m_prepared_rename_key(NULL) {} ~Ndb_schema_dist_data() { // There should be no schema operations active DBUG_ASSERT(m_active_schema_ops.size() == 0); @@ -2710,8 +2480,7 @@ class Ndb_schema_dist_data { m_max_subscribers = max_subscribers; } - void release(void) - { + void release(void) { // Release the subscriber bitmaps for (const auto it : m_subscriber_bitmaps) { Node_subscribers *subscriber_bitmap = it.second; @@ -2731,24 +2500,22 @@ class Ndb_schema_dist_data { m_inplace_alter_event_data = nullptr; // Release any remaining active schema operations - for (const NDB_SCHEMA_OBJECT *schema_op: m_active_schema_ops) { - ndb_log_info(" - releasing schema operation on '%s.%s'", - schema_op->db(), schema_op->name()); + for (const NDB_SCHEMA_OBJECT *schema_op : m_active_schema_ops) { + ndb_log_info(" - releasing schema operation on '%s.%s'", schema_op->db(), + schema_op->name()); schema_op->fail_schema_op(Ndb_schema_dist::COORD_ABORT, "Coordinator aborted"); // Release coordinator reference - NDB_SCHEMA_OBJECT::release(const_cast(schema_op)); + NDB_SCHEMA_OBJECT::release(const_cast(schema_op)); } m_active_schema_ops.clear(); } - void report_data_node_failure(unsigned data_node_id) - { + void report_data_node_failure(unsigned data_node_id) { ndb_log_verbose(1, "Data node %d failed", data_node_id); Node_subscribers *subscribers = find_node_subscribers(data_node_id); - if (subscribers){ - + if (subscribers) { subscribers->clear_all(); ndb_log_verbose(19, "Subscribers[%d]: %s", data_node_id, @@ -2756,15 +2523,13 @@ class Ndb_schema_dist_data { } } - void report_subscribe(unsigned data_node_id, unsigned subscriber_node_id) - { + void report_subscribe(unsigned data_node_id, unsigned subscriber_node_id) { ndb_log_verbose(1, "Data node %d reports subscribe from node %d", data_node_id, subscriber_node_id); ndbcluster::ndbrequire(subscriber_node_id != 0); - Node_subscribers* subscribers = find_node_subscribers(data_node_id); - if (subscribers){ - + Node_subscribers *subscribers = find_node_subscribers(data_node_id); + if (subscribers) { subscribers->set(subscriber_node_id); ndb_log_verbose(19, "Subscribers[%d]: %s", data_node_id, @@ -2772,15 +2537,13 @@ class Ndb_schema_dist_data { } } - void report_unsubscribe(unsigned data_node_id, unsigned subscriber_node_id) - { + void report_unsubscribe(unsigned data_node_id, unsigned subscriber_node_id) { ndb_log_verbose(1, "Data node %d reports unsubscribe from node %d", data_node_id, subscriber_node_id); ndbcluster::ndbrequire(subscriber_node_id != 0); - Node_subscribers* subscribers = find_node_subscribers(data_node_id); - if (subscribers){ - + Node_subscribers *subscribers = find_node_subscribers(data_node_id); + if (subscribers) { subscribers->clear(subscriber_node_id); ndb_log_verbose(19, "Subscribers[%d]: %s", data_node_id, @@ -2794,7 +2557,7 @@ class Ndb_schema_dist_data { subscribed. @param subscriber_list The list where to return subscribers */ - void get_subscriber_list(std::unordered_set& subscriber_list) const { + void get_subscriber_list(std::unordered_set &subscriber_list) const { for (const auto it : m_subscriber_bitmaps) { Node_subscribers *subscribers = it.second; subscribers->get_subscriber_list(subscriber_list); @@ -2803,29 +2566,24 @@ class Ndb_schema_dist_data { subscriber_list.insert(m_own_nodeid); } - void save_prepared_rename_key(NDB_SHARE_KEY* key) - { + void save_prepared_rename_key(NDB_SHARE_KEY *key) { m_prepared_rename_key = key; } - NDB_SHARE_KEY* get_prepared_rename_key() const - { + NDB_SHARE_KEY *get_prepared_rename_key() const { return m_prepared_rename_key; } - void save_inplace_alter_event_data(const Ndb_event_data* event_data) - { + void save_inplace_alter_event_data(const Ndb_event_data *event_data) { // Should not already be set when saving a new pointer - DBUG_ASSERT(event_data == nullptr || - !m_inplace_alter_event_data); + DBUG_ASSERT(event_data == nullptr || !m_inplace_alter_event_data); m_inplace_alter_event_data = event_data; } - const Ndb_event_data* get_inplace_alter_event_data() const - { + const Ndb_event_data *get_inplace_alter_event_data() const { return m_inplace_alter_event_data; } - void add_active_schema_op(NDB_SCHEMA_OBJECT* schema_op) { + void add_active_schema_op(NDB_SCHEMA_OBJECT *schema_op) { // Current assumption is that as long as all users of schema distribution // hold the GSL, there will ever only be one active schema operation at a // time. This assumption will probably change soon, but until then it can @@ -2840,7 +2598,7 @@ class Ndb_schema_dist_data { ndbcluster::ndbrequire(m_active_schema_ops.insert(schema_op).second); } - void remove_active_schema_op(NDB_SCHEMA_OBJECT* schema_op) { + void remove_active_schema_op(NDB_SCHEMA_OBJECT *schema_op) { // Need to have active schema op for decrement ndbcluster::ndbrequire(m_active_schema_ops.size() > 0); @@ -2865,13 +2623,10 @@ class Ndb_schema_dist_data { return true; } -}; //class Ndb_schema_dist_data - +}; // class Ndb_schema_dist_data class Ndb_schema_event_handler { - - class Ndb_schema_op - { + class Ndb_schema_op { /* Unpack arbitrary length varbinary field and return pointer to zero terminated string allocated in current memory root. @@ -2879,7 +2634,7 @@ class Ndb_schema_event_handler { @param field The field to unpack @return pointer to string allocated in current MEM_ROOT */ - static char* unpack_varbinary(Field *field) { + static char *unpack_varbinary(Field *field) { /* The Schema_dist_client will check the schema of the ndb_schema table and will not send any commands unless the table fulfills requirements. @@ -2894,8 +2649,9 @@ class Ndb_schema_event_handler { ndbcluster::ndbrequire(length_bytes <= 2); // Read length of the varbinary which is stored in the field - const uint varbinary_length = - length_bytes == 1 ? static_cast(*field->ptr) : uint2korr(field->ptr); + const uint varbinary_length = length_bytes == 1 + ? static_cast(*field->ptr) + : uint2korr(field->ptr); DBUG_PRINT("info", ("varbinary length: %u", varbinary_length)); // Check that varbinary length is not greater than fields max length // (this would indicate that corrupted data has been written to table) @@ -2941,15 +2697,16 @@ class Ndb_schema_event_handler { Uint32 read_len = static_cast(blob_len); ndbcluster::ndbrequire(ndb_blob->readData(str, read_len) == 0); ndbcluster::ndbrequire(blob_len == read_len); // Assume all read - str[blob_len] = 0; // Zero terminate + str[blob_len] = 0; // Zero terminate DBUG_PRINT("unpack_blob", ("str: '%s'", str)); return str; } - void unpack_slock(const Field* field) { + void unpack_slock(const Field *field) { // Allocate bitmap buffer in current MEM_ROOT - slock_buf = static_cast((*THR_MALLOC)->Alloc(field->field_length)); + slock_buf = static_cast( + (*THR_MALLOC)->Alloc(field->field_length)); ndbcluster::ndbrequire(slock_buf); // Initialize bitmap(always suceeds when buffer is already allocated) @@ -2960,12 +2717,11 @@ class Ndb_schema_event_handler { } // Unpack Ndb_schema_op from event_data pointer - void unpack_event(const Ndb_event_data *event_data) - { - TABLE *table= event_data->shadow_table; + void unpack_event(const Ndb_event_data *event_data) { + TABLE *table = event_data->shadow_table; Field **field = table->field; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->read_set); /* db, varbinary */ db = unpack_varbinary(*field); @@ -2984,23 +2740,23 @@ class Ndb_schema_event_handler { field++; /* node_id */ - node_id= (Uint32)((Field_long *)*field)->val_int(); + node_id = (Uint32)((Field_long *)*field)->val_int(); /* epoch */ field++; - epoch= ((Field_long *)*field)->val_int(); + epoch = ((Field_long *)*field)->val_int(); /* id */ field++; - id= (Uint32)((Field_long *)*field)->val_int(); + id = (Uint32)((Field_long *)*field)->val_int(); /* version */ field++; - version= (Uint32)((Field_long *)*field)->val_int(); + version = (Uint32)((Field_long *)*field)->val_int(); /* type */ field++; - type= (Uint32)((Field_long *)*field)->val_int(); + type = (Uint32)((Field_long *)*field)->val_int(); /* schema_op_id */ field++; if (*field) { - // Optional column + // Optional column schema_op_id = (Uint32)((Field_long *)*field)->val_int(); } else { schema_op_id = 0; @@ -3009,17 +2765,19 @@ class Ndb_schema_event_handler { dbug_tmp_restore_column_map(table->read_set, old_map); } - public: + public: // Note! The db, name, slock_buf and query variables point to memory // allocated in the current MEM_ROOT. When the Ndb_schema_op is put in the // list to be executed after epoch, only the pointers are copied and // still point to same memory inside the MEM_ROOT. char *db; char *name; - private: + + private: // Buffer for the slock bitmap my_bitmap_map *slock_buf; - public: + + public: MY_BITMAP slock; char *query; size_t query_length() const { @@ -3037,20 +2795,16 @@ class Ndb_schema_event_handler { /** Create a Ndb_schema_op from event_data */ - static const Ndb_schema_op* - create(const Ndb_event_data* event_data, - Uint32 any_value) - { + static const Ndb_schema_op *create(const Ndb_event_data *event_data, + Uint32 any_value) { DBUG_ENTER("Ndb_schema_op::create"); // Allocate memory in current MEM_ROOT - Ndb_schema_op* schema_op= - (Ndb_schema_op*)(*THR_MALLOC)->Alloc(sizeof(Ndb_schema_op)); + Ndb_schema_op *schema_op = + (Ndb_schema_op *)(*THR_MALLOC)->Alloc(sizeof(Ndb_schema_op)); schema_op->unpack_event(event_data); - schema_op->any_value= any_value; - DBUG_PRINT("exit", ("'%s.%s': query: '%s' type: %d", - schema_op->db, schema_op->name, - schema_op->query, - schema_op->type)); + schema_op->any_value = any_value; + DBUG_PRINT("exit", ("'%s.%s': query: '%s' type: %d", schema_op->db, + schema_op->name, schema_op->query, schema_op->type)); DBUG_RETURN(schema_op); } }; @@ -3058,13 +2812,14 @@ class Ndb_schema_event_handler { class Ndb_schema_op_result { uint32 m_result{0}; std::string m_message; - public: - void set_result(Ndb_schema_dist::Schema_op_result_code result, - const std::string message) { - // Both result and message must be set - DBUG_ASSERT(result && message.length()); - m_result = result; - m_message = message; + + public: + void set_result(Ndb_schema_dist::Schema_op_result_code result, + const std::string message) { + // Both result and message must be set + DBUG_ASSERT(result && message.length()); + m_result = result; + m_message = message; } const char *message() const { return m_message.c_str(); } uint32 result() const { return m_result; } @@ -3126,12 +2881,8 @@ class Ndb_schema_event_handler { ndb_error.message); } - static void - write_schema_op_to_binlog(THD *thd, const Ndb_schema_op *schema) - { - - if (!ndb_binlog_running) - { + static void write_schema_op_to_binlog(THD *thd, const Ndb_schema_op *schema) { + if (!ndb_binlog_running) { // This mysqld is not writing a binlog return; } @@ -3139,13 +2890,13 @@ class Ndb_schema_event_handler { /* any_value == 0 means local cluster sourced change that * should be logged */ - if (ndbcluster_anyvalue_is_reserved(schema->any_value)) - { + if (ndbcluster_anyvalue_is_reserved(schema->any_value)) { /* Originating SQL node did not want this query logged */ - if (!ndbcluster_anyvalue_is_nologging(schema->any_value)) - { - ndb_log_warning("unknown value for binlog signalling 0x%X, " - "query not logged", schema->any_value); + if (!ndbcluster_anyvalue_is_nologging(schema->any_value)) { + ndb_log_warning( + "unknown value for binlog signalling 0x%X, " + "query not logged", + schema->any_value); } return; } @@ -3161,21 +2912,17 @@ class Ndb_schema_event_handler { */ Uint32 loggedServerId = schema->any_value; - if (queryServerId) - { + if (queryServerId) { /* AnyValue has non-zero serverId, must be a query applied by a slave mysqld. TODO : Assert that we are running in the Binlog injector thread? */ - if (! g_ndb_log_slave_updates) - { + if (!g_ndb_log_slave_updates) { /* This MySQLD does not log slave updates */ return; } - } - else - { + } else { /* No ServerId associated with this query, mark it as ours */ ndbcluster_anyvalue_set_serverid(loggedServerId, ::server_id); } @@ -3184,21 +2931,20 @@ class Ndb_schema_event_handler { Write the DDL query to binlog with server_id set to the server_id where the query originated. */ - const uint32 thd_server_id_save= thd->server_id; + const uint32 thd_server_id_save = thd->server_id; DBUG_ASSERT(sizeof(thd_server_id_save) == sizeof(thd->server_id)); thd->server_id = loggedServerId; - LEX_CSTRING thd_db_save= thd->db(); - LEX_CSTRING schema_db_lex_cstr= {schema->db, strlen(schema->db)}; + LEX_CSTRING thd_db_save = thd->db(); + LEX_CSTRING schema_db_lex_cstr = {schema->db, strlen(schema->db)}; thd->reset_db(schema_db_lex_cstr); int errcode = query_error_code(thd, thd->killed == THD::NOT_KILLED); - thd->binlog_query(THD::STMT_QUERY_TYPE, - schema->query, schema->query_length(), - false, // is_trans - true, // direct - schema->name[0] == 0 || thd->db().str[0] == 0, - errcode); + thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query, + schema->query_length(), + false, // is_trans + true, // direct + schema->name[0] == 0 || thd->db().str[0] == 0, errcode); // Commit the binlog write (void)trans_commit_stmt(thd); @@ -3207,7 +2953,7 @@ class Ndb_schema_event_handler { Restore original server_id and db after commit since the server_id is being used also in the commit logic */ - thd->server_id= thd_server_id_save; + thd->server_id = thd_server_id_save; thd->reset_db(thd_db_save); } @@ -3224,7 +2970,7 @@ class Ndb_schema_event_handler { are currently unused */ - int ack_schema_op(const Ndb_schema_op* schema) const { + int ack_schema_op(const Ndb_schema_op *schema) const { DBUG_ENTER("ack_schema_op"); Ndb *ndb = m_thd_ndb->ndb; @@ -3240,8 +2986,8 @@ class Ndb_schema_event_handler { const NdbError *ndb_error = nullptr; char tmp_buf[FN_REFLEN]; - NdbTransaction *trans= 0; - int retries= 100; + NdbTransaction *trans = 0; + int retries = 100; std::string before_slock; // Bitmap for the slock bits @@ -3251,37 +2997,34 @@ class Ndb_schema_event_handler { ndbcluster::ndbrequire(own_nodeid() <= slock_bits); (void)bitmap_init(&slock, nullptr, slock_bits, false); - while (1) - { - if ((trans= ndb->startTransaction()) == 0) - goto err; + while (1) { + if ((trans = ndb->startTransaction()) == 0) goto err; { - NdbOperation *op= 0; - int r= 0; + NdbOperation *op = 0; + int r = 0; /* read row from ndb_schema with exlusive row lock */ - r|= (op= trans->getNdbOperation(ndbtab)) == 0; + r |= (op = trans->getNdbOperation(ndbtab)) == 0; DBUG_ASSERT(r == 0); - r|= op->readTupleExclusive(); + r |= op->readTupleExclusive(); DBUG_ASSERT(r == 0); /* db */ ndb_pack_varchar(ndbtab, SCHEMA_DB_I, tmp_buf, schema->db, strlen(schema->db)); - r|= op->equal(SCHEMA_DB_I, tmp_buf); + r |= op->equal(SCHEMA_DB_I, tmp_buf); DBUG_ASSERT(r == 0); /* name */ - ndb_pack_varchar(ndbtab, SCHEMA_NAME_I, tmp_buf, - schema->name, strlen(schema->name)); - r|= op->equal(SCHEMA_NAME_I, tmp_buf); + ndb_pack_varchar(ndbtab, SCHEMA_NAME_I, tmp_buf, schema->name, + strlen(schema->name)); + r |= op->equal(SCHEMA_NAME_I, tmp_buf); DBUG_ASSERT(r == 0); /* slock */ - r|= op->getValue(SCHEMA_SLOCK_I, (char*)slock.bitmap) == 0; + r |= op->getValue(SCHEMA_SLOCK_I, (char *)slock.bitmap) == 0; DBUG_ASSERT(r == 0); /* Execute in NDB */ - if (trans->execute(NdbTransaction::NoCommit)) - goto err; + if (trans->execute(NdbTransaction::NoCommit)) goto err; } if (ndb_log_get_verbose_level() > 19) { @@ -3299,71 +3042,66 @@ class Ndb_schema_event_handler { } { - NdbOperation *op= 0; - int r= 0; + NdbOperation *op = 0; + int r = 0; /* now update the tuple */ - r|= (op= trans->getNdbOperation(ndbtab)) == 0; + r |= (op = trans->getNdbOperation(ndbtab)) == 0; DBUG_ASSERT(r == 0); - r|= op->updateTuple(); + r |= op->updateTuple(); DBUG_ASSERT(r == 0); /* db */ ndb_pack_varchar(ndbtab, SCHEMA_DB_I, tmp_buf, schema->db, strlen(schema->db)); - r|= op->equal(SCHEMA_DB_I, tmp_buf); + r |= op->equal(SCHEMA_DB_I, tmp_buf); DBUG_ASSERT(r == 0); /* name */ - ndb_pack_varchar(ndbtab, SCHEMA_NAME_I, tmp_buf, - schema->name, strlen(schema->name)); - r|= op->equal(SCHEMA_NAME_I, tmp_buf); + ndb_pack_varchar(ndbtab, SCHEMA_NAME_I, tmp_buf, schema->name, + strlen(schema->name)); + r |= op->equal(SCHEMA_NAME_I, tmp_buf); DBUG_ASSERT(r == 0); /* slock */ - r|= op->setValue(SCHEMA_SLOCK_I, (char*)slock.bitmap); + r |= op->setValue(SCHEMA_SLOCK_I, (char *)slock.bitmap); DBUG_ASSERT(r == 0); /* node_id */ // NOTE! Sends own nodeid here instead of nodeid who started schema op - r|= op->setValue(SCHEMA_NODE_ID_I, own_nodeid()); + r |= op->setValue(SCHEMA_NODE_ID_I, own_nodeid()); DBUG_ASSERT(r == 0); /* type */ - r|= op->setValue(SCHEMA_TYPE_I, (uint32)SOT_CLEAR_SLOCK); + r |= op->setValue(SCHEMA_TYPE_I, (uint32)SOT_CLEAR_SLOCK); DBUG_ASSERT(r == 0); } if (trans->execute(NdbTransaction::Commit, - NdbOperation::DefaultAbortOption, 1 /*force send*/) == 0) - { - DBUG_PRINT("info", ("node %d cleared lock on '%s.%s'", - own_nodeid(), schema->db, schema->name)); + NdbOperation::DefaultAbortOption, + 1 /*force send*/) == 0) { + DBUG_PRINT("info", ("node %d cleared lock on '%s.%s'", own_nodeid(), + schema->db, schema->name)); (void)ndb->getDictionary()->forceGCPWait(1); break; } err: - const NdbError *this_error= trans ? - &trans->getNdbError() : &ndb->getNdbError(); + const NdbError *this_error = + trans ? &trans->getNdbError() : &ndb->getNdbError(); if (this_error->status == NdbError::TemporaryError && - !thd_killed(m_thd)) - { - if (retries--) - { - if (trans) - ndb->closeTransaction(trans); + !thd_killed(m_thd)) { + if (retries--) { + if (trans) ndb->closeTransaction(trans); ndb_trans_retry_sleep(); - continue; // retry + continue; // retry } } - ndb_error= this_error; + ndb_error = this_error; break; } - if (ndb_error) - { - ndb_log_warning("Could not release slock on '%s.%s', " - "Error code: %d Message: %s", - schema->db, schema->name, - ndb_error->code, ndb_error->message); + if (ndb_error) { + ndb_log_warning( + "Could not release slock on '%s.%s', " + "Error code: %d Message: %s", + schema->db, schema->name, ndb_error->code, ndb_error->message); } - if (trans) - ndb->closeTransaction(trans); + if (trans) ndb->closeTransaction(trans); bitmap_free(&slock); DBUG_RETURN(0); } @@ -3395,7 +3133,7 @@ class Ndb_schema_event_handler { ndbcluster::ndbrequire(ndb->getDictionary()->getNdbError().code == 4009); DBUG_RETURN(1); } - const NdbDictionary::Table* ndbtab = schema_dist_table.get_table(); + const NdbDictionary::Table *ndbtab = schema_dist_table.get_table(); // Pack db and table_name char db_buf[FN_REFLEN]; @@ -3407,7 +3145,7 @@ class Ndb_schema_event_handler { // Buffer with zeroes for slock std::vector slock_zeroes; slock_zeroes.assign(schema_dist_table.get_slock_bytes(), 0); - const char* slock_buf = slock_zeroes.data(); + const char *slock_buf = slock_zeroes.data(); // Function for updating row in ndb_schema std::function ack_schema_op_final_func = @@ -3544,7 +3282,7 @@ class Ndb_schema_event_handler { ndbcluster::ndbrequire(ndb->getDictionary()->getNdbError().code == 4009); return; } - const NdbDictionary::Table* ndbtab = schema_result_table.get_table(); + const NdbDictionary::Table *ndbtab = schema_result_table.get_table(); const uint nodeid = own_nodeid(); // Function for deleting all rows from ndb_schema_result matching @@ -3670,18 +3408,15 @@ class Ndb_schema_event_handler { } } - bool check_is_ndb_schema_event(const Ndb_event_data* event_data) const - { - if (!event_data) - { + bool check_is_ndb_schema_event(const Ndb_event_data *event_data) const { + if (!event_data) { // Received event without event data pointer assert(false); return false; } - NDB_SHARE *share= event_data->share; - if (!share) - { + NDB_SHARE *share = event_data->share; + if (!share) { // Received event where the event_data is not properly initialized assert(false); return false; @@ -3694,22 +3429,16 @@ class Ndb_schema_event_handler { return true; } - - void - handle_after_epoch(const Ndb_schema_op* schema) - { + void handle_after_epoch(const Ndb_schema_op *schema) { DBUG_ENTER("handle_after_epoch"); DBUG_PRINT("info", ("Pushing Ndb_schema_op on list to be " "handled after epoch")); - assert(!is_post_epoch()); // Only before epoch + assert(!is_post_epoch()); // Only before epoch m_post_epoch_handle_list.push_back(schema, m_mem_root); DBUG_VOID_RETURN; } - uint own_nodeid(void) const - { - return m_own_nodeid; - } + uint own_nodeid(void) const { return m_own_nodeid; } void ndbapi_invalidate_table(const char *db_name, const char *table_name) const { @@ -3719,18 +3448,14 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - NDB_SHARE* acquire_reference(const char* db, const char* name, - const char* reference) const - { + NDB_SHARE *acquire_reference(const char *db, const char *name, + const char *reference) const { DBUG_ENTER("acquire_reference"); DBUG_PRINT("enter", ("db: '%s', name: '%s'", db, name)); char key[FN_REFLEN + 1]; - build_table_filename(key, sizeof(key) - 1, - db, name, "", 0); - NDB_SHARE *share= - NDB_SHARE::acquire_reference_by_key(key, - reference); + build_table_filename(key, sizeof(key) - 1, db, name, "", 0); + NDB_SHARE *share = NDB_SHARE::acquire_reference_by_key(key, reference); DBUG_RETURN(share); } @@ -3769,8 +3494,7 @@ class Ndb_schema_event_handler { // Check if there is existing table in DD which is not a NDB table, in such // case refuse to overwrite the "shadow table" - if (has_shadow_table(dd_client, schema_name, table_name)) - return false; + if (has_shadow_table(dd_client, schema_name, table_name)) return false; if (!tablespace_name.empty()) { // Acquire IX MDL on tablespace @@ -3784,10 +3508,9 @@ class Ndb_schema_event_handler { Ndb_referenced_tables_invalidator invalidator(m_thd, dd_client); if (!dd_client.install_table( - schema_name, table_name, sdi, table_id, - table_version, num_partitions, - tablespace_name, force_overwrite, - (invalidate_referenced_tables ? &invalidator : nullptr))) { + schema_name, table_name, sdi, table_id, table_version, + num_partitions, tablespace_name, force_overwrite, + (invalidate_referenced_tables ? &invalidator : nullptr))) { log_and_clear_THD_conditions(); ndb_log_error("Failed to install table '%s.%s' in DD", schema_name, table_name); @@ -3804,9 +3527,9 @@ class Ndb_schema_event_handler { return true; } - bool create_table_from_engine(const char *schema_name, const char *table_name, - bool force_overwrite, - bool invalidate_referenced_tables = false) const { + bool create_table_from_engine( + const char *schema_name, const char *table_name, bool force_overwrite, + bool invalidate_referenced_tables = false) const { DBUG_TRACE; DBUG_PRINT("enter", ("schema_name: %s, table_name: %s", schema_name, table_name)); @@ -3832,7 +3555,6 @@ class Ndb_schema_event_handler { return false; } - // Deserialize the metadata from NDB Ndb_dd_table dd_table(m_thd); const dd::sdi_t sdi = serialized_metadata.c_str(); @@ -3858,10 +3580,10 @@ class Ndb_schema_event_handler { // Install the table definition in DD // NOTE! This is done after create/setup the NDB_SHARE to avoid that // server tries to open the table before the NDB_SHARE has been created - if (!install_table_in_dd( - schema_name, table_name, sdi, ndbtab->getObjectId(), - ndbtab->getObjectVersion(), ndbtab->getPartitionCount(), - tablespace_name, force_overwrite, invalidate_referenced_tables)) { + if (!install_table_in_dd(schema_name, table_name, sdi, + ndbtab->getObjectId(), ndbtab->getObjectVersion(), + ndbtab->getPartitionCount(), tablespace_name, + force_overwrite, invalidate_referenced_tables)) { ndb_log_warning( "Failed to update table definition in DD, continue anyway..."); } @@ -3869,14 +3591,12 @@ class Ndb_schema_event_handler { return true; } - void handle_clear_slock(const Ndb_schema_op* schema) - { + void handle_clear_slock(const Ndb_schema_op *schema) { DBUG_ENTER("handle_clear_slock"); assert(is_post_epoch()); - if (DBUG_EVALUATE_IF("ndb_binlog_random_tableid", true, false)) - { + if (DBUG_EVALUATE_IF("ndb_binlog_random_tableid", true, false)) { // Try to create a race between SLOCK acks handled after another // schema operation on same object could have been started. @@ -3950,48 +3670,42 @@ class Ndb_schema_event_handler { * * If the coordinator then starts yet another schema operation * on the same schema / table, it will need a schema_object with - * the same key as the one already completed, and which this + * the same key as the one already completed, and which this * thread still referrs. Thus, it will get this schema_object, * instead of creating a new one as normally expected. */ - if (DBUG_EVALUATE_IF("ndb_binlog_schema_object_race", true, false)) - { + if (DBUG_EVALUATE_IF("ndb_binlog_schema_object_race", true, false)) { ndb_milli_sleep(10); } DBUG_VOID_RETURN; } - void - handle_offline_alter_table_commit(const Ndb_schema_op* schema) - { + void handle_offline_alter_table_commit(const Ndb_schema_op *schema) { DBUG_ENTER("handle_offline_alter_table_commit"); - assert(is_post_epoch()); // Always after epoch + assert(is_post_epoch()); // Always after epoch - if (schema->node_id == own_nodeid()) - DBUG_VOID_RETURN; + if (schema->node_id == own_nodeid()) DBUG_VOID_RETURN; write_schema_op_to_binlog(m_thd, schema); ndbapi_invalidate_table(schema->db, schema->name); ndb_tdc_close_cached_table(m_thd, schema->db, schema->name); - NDB_SHARE *share= + NDB_SHARE *share = acquire_reference(schema->db, schema->name, "offline_alter_table_commit"); // Temp ref. - if (share) - { + if (share) { mysql_mutex_lock(&share->mutex); - if (share->op) - { - const Ndb_event_data *event_data= - static_cast(share->op->getCustomData()); + if (share->op) { + const Ndb_event_data *event_data = + static_cast(share->op->getCustomData()); Ndb_event_data::destroy(event_data); share->op->setCustomData(NULL); { Mutex_guard injector_mutex_g(injector_event_mutex); injector_ndb->dropEventOperation(share->op); } - share->op= 0; + share->op = 0; NDB_SHARE::release_reference(share, "binlog"); } mysql_mutex_unlock(&share->mutex); @@ -4010,23 +3724,19 @@ class Ndb_schema_event_handler { if (!create_table_from_engine(schema->db, schema->name, true /* force_overwrite */, true /* invalidate_referenced_tables */)) { - ndb_log_error("Distribution of ALTER TABLE '%s.%s' failed", - schema->db, schema->name); + ndb_log_error("Distribution of ALTER TABLE '%s.%s' failed", schema->db, + schema->name); } DBUG_VOID_RETURN; } - - void - handle_online_alter_table_prepare(const Ndb_schema_op* schema) - { - assert(is_post_epoch()); // Always after epoch + void handle_online_alter_table_prepare(const Ndb_schema_op *schema) { + assert(is_post_epoch()); // Always after epoch ndbapi_invalidate_table(schema->db, schema->name); ndb_tdc_close_cached_table(m_thd, schema->db, schema->name); - if (schema->node_id == own_nodeid()) - { + if (schema->node_id == own_nodeid()) { // Special case for schema dist participant in own node! // The schema dist client has exclusive MDL lock and thus // the schema dist participant(this code) on the same mysqld @@ -4034,24 +3744,20 @@ class Ndb_schema_event_handler { // another MDL lock will just block. Instead(since this is in // the same mysqld) it provides the new table def via a // pointer in the NDB_SHARE. - NDB_SHARE *share= + NDB_SHARE *share = acquire_reference(schema->db, schema->name, - "online_alter_table_prepare"); // temporary ref. + "online_alter_table_prepare"); // temporary ref. - const dd::Table* new_table_def = - static_cast(share->inplace_alter_new_table_def); + const dd::Table *new_table_def = + static_cast(share->inplace_alter_new_table_def); DBUG_ASSERT(new_table_def); - // Create a new Ndb_event_data which will be used when creating // the new NdbEventOperation - Ndb_event_data* event_data = - Ndb_event_data::create_event_data(m_thd, share, - share->db, share->table_name, - share->key_string(), injector_thd, - new_table_def); - if (!event_data) - { + Ndb_event_data *event_data = Ndb_event_data::create_event_data( + m_thd, share, share->db, share->table_name, share->key_string(), + injector_thd, new_table_def); + if (!event_data) { ndb_log_error("NDB Binlog: Failed to create event data for table %s.%s", schema->db, schema->name); DBUG_ASSERT(false); @@ -4060,10 +3766,9 @@ class Ndb_schema_event_handler { // Release old prepared event_data, this is rare but will happen // when an inplace alter table fails between prepare and commit phase - const Ndb_event_data* old_event_data = + const Ndb_event_data *old_event_data = m_schema_dist_data.get_inplace_alter_event_data(); - if (old_event_data) - { + if (old_event_data) { Ndb_event_data::destroy(old_event_data); m_schema_dist_data.save_inplace_alter_event_data(nullptr); } @@ -4072,10 +3777,8 @@ class Ndb_schema_event_handler { m_schema_dist_data.save_inplace_alter_event_data(event_data); NDB_SHARE::release_reference(share, - "online_alter_table_prepare"); // temp ref. - } - else - { + "online_alter_table_prepare"); // temp ref. + } else { write_schema_op_to_binlog(m_thd, schema); // Install table from NDB, overwrite the altered table. @@ -4117,7 +3820,6 @@ class Ndb_schema_event_handler { return nullptr; } - // Deserialize the metadata from NDB Ndb_dd_table dd_table(m_thd); const dd::sdi_t sdi = serialized_metadata.c_str(); @@ -4129,13 +3831,10 @@ class Ndb_schema_event_handler { } // Create new event_data - Ndb_event_data* event_data = - Ndb_event_data::create_event_data(m_thd, share, - schema_name, table_name, - share->key_string(), injector_thd, - dd_table.get_table_def()); - if (!event_data) - { + Ndb_event_data *event_data = Ndb_event_data::create_event_data( + m_thd, share, schema_name, table_name, share->key_string(), + injector_thd, dd_table.get_table_def()); + if (!event_data) { ndb_log_error("NDB Binlog: Failed to create event data for table '%s.%s'", share->db, share->table_name); DBUG_RETURN(nullptr); @@ -4144,44 +3843,34 @@ class Ndb_schema_event_handler { DBUG_RETURN(event_data); } + void handle_online_alter_table_commit(const Ndb_schema_op *schema) { + assert(is_post_epoch()); // Always after epoch - void - handle_online_alter_table_commit(const Ndb_schema_op* schema) - { - assert(is_post_epoch()); // Always after epoch - - NDB_SHARE *share= + NDB_SHARE *share = acquire_reference(schema->db, schema->name, - "online_alter_table_commit"); // temporary ref. - if (share) - { + "online_alter_table_commit"); // temporary ref. + if (share) { ndb_log_verbose(9, "NDB Binlog: handling online alter/rename"); mysql_mutex_lock(&share->mutex); - const Ndb_event_data* event_data; - if (schema->node_id == own_nodeid()) - { + const Ndb_event_data *event_data; + if (schema->node_id == own_nodeid()) { // Get the event_data which has been created during prepare phase - event_data = - m_schema_dist_data.get_inplace_alter_event_data(); - if (!event_data) - { + event_data = m_schema_dist_data.get_inplace_alter_event_data(); + if (!event_data) { ndb_log_error("Failed to get prepared event data '%s'", share->key_string()); DBUG_ASSERT(false); } // The event_data pointer has been taken over m_schema_dist_data.save_inplace_alter_event_data(nullptr); - } - else - { + } else { // Create Ndb_event_data which will be used when creating // the new NdbEventOperation. event_data = remote_participant_inplace_alter_create_event_data( share, share->db, share->table_name); - if (!event_data) - { + if (!event_data) { ndb_log_error("Failed to create event data for table '%s'", share->key_string()); DBUG_ASSERT(false); @@ -4189,9 +3878,8 @@ class Ndb_schema_event_handler { } DBUG_ASSERT(event_data); - NdbEventOperation* new_op = nullptr; - if (share->op && event_data /* safety */) - { + NdbEventOperation *new_op = nullptr; + if (share->op && event_data /* safety */) { Ndb_binlog_client binlog_client(m_thd, schema->db, schema->name); // The table have an event operation setup and during an inplace // alter table that need to be recrated for the new table layout. @@ -4206,35 +3894,30 @@ class Ndb_schema_event_handler { // reference as it will be acquired again in create_event_op() // NOTE! This should probably be rewritten to not assign share->op and // acquire the reference in create_event_op() - NdbEventOperation * const curr_op= share->op; - share->op= nullptr; + NdbEventOperation *const curr_op = share->op; + share->op = nullptr; NDB_SHARE::release_reference(share, "binlog"); // Get table from NDB Ndb_table_guard ndbtab_g(m_thd_ndb->ndb, schema->db, schema->name); - const NDBTAB *ndbtab= ndbtab_g.get_table(); + const NDBTAB *ndbtab = ndbtab_g.get_table(); // Create new NdbEventOperation - if (binlog_client.create_event_op(share, ndbtab, event_data)) - { + if (binlog_client.create_event_op(share, ndbtab, event_data)) { ndb_log_error("Failed to create event operation for table '%s'", share->key_string()); // NOTE! Should fail the alter here DBUG_ASSERT(false); - } - else - { + } else { // Get the newly created NdbEventOperation, will be swapped // into place (again) later - new_op= share->op; + new_op = share->op; } // Reinstall the current NdbEventOperation - share->op= curr_op; - } - else - { + share->op = curr_op; + } else { // New event_data was created(that's the default) but the table didn't // have event operations and thus the event_data is unused, free it Ndb_event_data::destroy(event_data); @@ -4246,11 +3929,10 @@ class Ndb_schema_event_handler { DBUG_ASSERT(m_schema_dist_data.get_inplace_alter_event_data() == nullptr); // Start using the new event operation and release the old - if (share->op && new_op) - { + if (share->op && new_op) { // Delete old event_data - const Ndb_event_data *event_data= - static_cast(share->op->getCustomData()); + const Ndb_event_data *event_data = + static_cast(share->op->getCustomData()); share->op->setCustomData(NULL); Ndb_event_data::destroy(event_data); @@ -4260,12 +3942,12 @@ class Ndb_schema_event_handler { injector_ndb->dropEventOperation(share->op); } // Install new event operation - share->op= new_op; + share->op = new_op; } mysql_mutex_unlock(&share->mutex); NDB_SHARE::release_reference(share, - "online_alter_table_commit"); // temp ref. + "online_alter_table_commit"); // temp ref. } DBUG_ASSERT(m_schema_dist_data.get_inplace_alter_event_data() == nullptr); @@ -4286,8 +3968,7 @@ class Ndb_schema_event_handler { // Check if there is existing table in DD which is not a NDB table, in such // case refuse to remove the "shadow table" - if (has_shadow_table(dd_client, schema_name, table_name)) - return false; + if (has_shadow_table(dd_client, schema_name, table_name)) return false; if (!dd_client.remove_table(schema_name, table_name, &invalidator)) { log_and_clear_THD_conditions(); @@ -4296,8 +3977,7 @@ class Ndb_schema_event_handler { return false; } - if (!invalidator.invalidate()) - { + if (!invalidator.invalidate()) { log_and_clear_THD_conditions(); ndb_log_error("Failed to invalidate referenced tables for '%s.%s'", schema_name, table_name); @@ -4308,15 +3988,12 @@ class Ndb_schema_event_handler { return true; } - void - handle_drop_table(const Ndb_schema_op* schema) - { + void handle_drop_table(const Ndb_schema_op *schema) { DBUG_ENTER("handle_drop_table"); - assert(is_post_epoch()); // Always after epoch + assert(is_post_epoch()); // Always after epoch - if (schema->node_id == own_nodeid()) - DBUG_VOID_RETURN; + if (schema->node_id == own_nodeid()) DBUG_VOID_RETURN; write_schema_op_to_binlog(m_thd, schema); @@ -4333,19 +4010,18 @@ class Ndb_schema_event_handler { "Failed to remove table definition from DD, continue anyway..."); } - NDB_SHARE *share= acquire_reference(schema->db, schema->name, - "drop_table"); // temporary ref. - if (!share || !share->op) - { + NDB_SHARE *share = acquire_reference(schema->db, schema->name, + "drop_table"); // temporary ref. + if (!share || !share->op) { ndbapi_invalidate_table(schema->db, schema->name); ndb_tdc_close_cached_table(m_thd, schema->db, schema->name); } - if (share) - { + if (share) { mysql_mutex_lock(&ndbcluster_mutex); - NDB_SHARE::mark_share_dropped(&share); // server ref. - DBUG_ASSERT(share); // Should still be ref'ed - NDB_SHARE::release_reference_have_lock(share, "drop_table"); // temporary ref. + NDB_SHARE::mark_share_dropped(&share); // server ref. + DBUG_ASSERT(share); // Should still be ref'ed + NDB_SHARE::release_reference_have_lock(share, + "drop_table"); // temporary ref. mysql_mutex_unlock(&ndbcluster_mutex); } @@ -4355,38 +4031,32 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - /* The RENAME is performed in two steps. 1) PREPARE_RENAME - sends the new table key to participants 2) RENAME - perform the actual rename */ - void - handle_rename_table_prepare(const Ndb_schema_op* schema) - { + void handle_rename_table_prepare(const Ndb_schema_op *schema) { DBUG_ENTER("handle_rename_table_prepare"); - assert(is_post_epoch()); // Always after epoch + assert(is_post_epoch()); // Always after epoch - if (schema->node_id == own_nodeid()) - DBUG_VOID_RETURN; + if (schema->node_id == own_nodeid()) DBUG_VOID_RETURN; - const char* new_key_for_table= schema->query; + const char *new_key_for_table = schema->query; DBUG_PRINT("info", ("new_key_for_table: '%s'", new_key_for_table)); // Release potentially previously prepared new_key { - NDB_SHARE_KEY* old_prepared_key = + NDB_SHARE_KEY *old_prepared_key = m_schema_dist_data.get_prepared_rename_key(); - if (old_prepared_key) - NDB_SHARE::free_key(old_prepared_key); + if (old_prepared_key) NDB_SHARE::free_key(old_prepared_key); } // Create a new key save it, then hope for the best(i.e // that it can be found later when the RENAME arrives) - NDB_SHARE_KEY* new_prepared_key = - NDB_SHARE::create_key(new_key_for_table); + NDB_SHARE_KEY *new_prepared_key = NDB_SHARE::create_key(new_key_for_table); m_schema_dist_data.save_prepared_rename_key(new_prepared_key); DBUG_VOID_RETURN; @@ -4495,44 +4165,37 @@ class Ndb_schema_event_handler { return true; } - void - handle_rename_table(const Ndb_schema_op* schema) - { + void handle_rename_table(const Ndb_schema_op *schema) { DBUG_ENTER("handle_rename_table"); - assert(is_post_epoch()); // Always after epoch + assert(is_post_epoch()); // Always after epoch - if (schema->node_id == own_nodeid()) - DBUG_VOID_RETURN; + if (schema->node_id == own_nodeid()) DBUG_VOID_RETURN; write_schema_op_to_binlog(m_thd, schema); // Participant never takes GSL assert(m_thd_ndb->check_option(Thd_ndb::IS_SCHEMA_DIST_PARTICIPANT)); - NDB_SHARE *share= acquire_reference(schema->db, schema->name, - "rename_table"); // temporary ref. - if (!share || !share->op) - { + NDB_SHARE *share = acquire_reference(schema->db, schema->name, + "rename_table"); // temporary ref. + if (!share || !share->op) { ndbapi_invalidate_table(schema->db, schema->name); ndb_tdc_close_cached_table(m_thd, schema->db, schema->name); } if (share) - NDB_SHARE::release_reference(share, "rename_table"); // temporary ref. + NDB_SHARE::release_reference(share, "rename_table"); // temporary ref. - share= acquire_reference(schema->db, schema->name, - "rename_table"); // temporary ref. - if (!share) - { + share = acquire_reference(schema->db, schema->name, + "rename_table"); // temporary ref. + if (!share) { // The RENAME need to find share so it can be renamed DBUG_ASSERT(share); DBUG_VOID_RETURN; } - NDB_SHARE_KEY* prepared_key = - m_schema_dist_data.get_prepared_rename_key(); - if (!prepared_key) - { + NDB_SHARE_KEY *prepared_key = m_schema_dist_data.get_prepared_rename_key(); + if (!prepared_key) { // The rename need to have new_key set // by a previous RENAME_PREPARE DBUG_ASSERT(prepared_key); @@ -4545,12 +4208,11 @@ class Ndb_schema_event_handler { DBUG_ASSERT(!ndb_name_is_temp(NDB_SHARE::key_get_table_name(prepared_key))); // Open the renamed table from NDB - const char* new_db_name = NDB_SHARE::key_get_db_name(prepared_key); - const char* new_table_name = NDB_SHARE::key_get_table_name(prepared_key); + const char *new_db_name = NDB_SHARE::key_get_db_name(prepared_key); + const char *new_table_name = NDB_SHARE::key_get_table_name(prepared_key); Ndb_table_guard ndbtab_g(m_thd_ndb->ndb, new_db_name, new_table_name); - const NdbDictionary::Table *ndbtab= ndbtab_g.get_table(); - if (!ndbtab) - { + const NdbDictionary::Table *ndbtab = ndbtab_g.get_table(); + if (!ndbtab) { // Could not open the table from NDB, very unusual log_NDB_error(m_thd_ndb->ndb->getDictionary()->getNdbError()); ndb_log_error("Failed to rename, could not open table '%s.%s' from NDB", @@ -4564,19 +4226,19 @@ class Ndb_schema_event_handler { // Rename table in DD if (!rename_table_in_dd(schema->db, schema->name, NDB_SHARE::key_get_db_name(prepared_key), - NDB_SHARE::key_get_table_name(prepared_key), - ndbtab, tablespace_name)) { + NDB_SHARE::key_get_table_name(prepared_key), ndbtab, + tablespace_name)) { ndb_log_warning( "Failed to rename table definition in DD, continue anyway..."); } // Rename share and release the old key - NDB_SHARE_KEY* old_key = share->key; + NDB_SHARE_KEY *old_key = share->key; NDB_SHARE::rename_share(share, prepared_key); m_schema_dist_data.save_prepared_rename_key(NULL); NDB_SHARE::free_key(old_key); - NDB_SHARE::release_reference(share, "rename_table"); // temporary ref. + NDB_SHARE::release_reference(share, "rename_table"); // temporary ref. ndbapi_invalidate_table(schema->db, schema->name); ndb_tdc_close_cached_table(m_thd, schema->db, schema->name); @@ -4584,16 +4246,12 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - - void - handle_drop_db(const Ndb_schema_op* schema) - { + void handle_drop_db(const Ndb_schema_op *schema) { DBUG_ENTER("handle_drop_db"); - assert(is_post_epoch()); // Always after epoch + assert(is_post_epoch()); // Always after epoch - if (schema->node_id == own_nodeid()) - DBUG_VOID_RETURN; + if (schema->node_id == own_nodeid()) DBUG_VOID_RETURN; write_schema_op_to_binlog(m_thd, schema); @@ -4603,8 +4261,7 @@ class Ndb_schema_event_handler { Ndb_dd_client dd_client(m_thd); // Lock the schema in DD - if (!dd_client.mdl_lock_schema(schema->db)) - { + if (!dd_client.mdl_lock_schema(schema->db)) { log_and_clear_THD_conditions(); ndb_log_error("Failed to acquire MDL for db '%s'", schema->db); // Failed to lock the DD, skip dropping @@ -4612,18 +4269,15 @@ class Ndb_schema_event_handler { } bool schema_exists; - if (!dd_client.schema_exists(schema->db, &schema_exists)) - { + if (!dd_client.schema_exists(schema->db, &schema_exists)) { log_and_clear_THD_conditions(); ndb_log_error("Failed to determine if database '%s' exists", schema->db); // Failed to check if database existed, skip dropping DBUG_VOID_RETURN; } - if (!schema_exists) - { - DBUG_PRINT("info", ("Schema '%s' does not exist", - schema->db)); + if (!schema_exists) { + DBUG_PRINT("info", ("Schema '%s' does not exist", schema->db)); // Nothing to do DBUG_VOID_RETURN; } @@ -4636,8 +4290,8 @@ class Ndb_schema_event_handler { // appropriate to log error messages to the server log file describing // any problems which occur in these functions. std::unordered_set ndb_tables_in_DD; - if (!dd_client.get_ndb_table_names_in_schema(schema->db, &ndb_tables_in_DD)) - { + if (!dd_client.get_ndb_table_names_in_schema(schema->db, + &ndb_tables_in_DD)) { log_and_clear_THD_conditions(); ndb_log_error("Failed to get list of NDB tables in database '%s'", schema->db); @@ -4646,11 +4300,9 @@ class Ndb_schema_event_handler { Ndb_referenced_tables_invalidator invalidator(m_thd, dd_client); - for (const auto& ndb_table_name : ndb_tables_in_DD) - { + for (const auto &ndb_table_name : ndb_tables_in_DD) { if (!dd_client.mdl_locks_acquire_exclusive(schema->db, - ndb_table_name.c_str())) - { + ndb_table_name.c_str())) { log_and_clear_THD_conditions(); ndb_log_warning("Failed to acquire exclusive MDL on '%s.%s'", schema->db, ndb_table_name.c_str()); @@ -4658,8 +4310,7 @@ class Ndb_schema_event_handler { } if (!dd_client.remove_table(schema->db, ndb_table_name.c_str(), - &invalidator)) - { + &invalidator)) { // Failed to remove the table from DD, not much else to do // than try with the next log_and_clear_THD_conditions(); @@ -4668,20 +4319,18 @@ class Ndb_schema_event_handler { continue; } - NDB_SHARE *share= - acquire_reference(schema->db, ndb_table_name.c_str(), - "drop_db"); // temporary ref. - if (!share || !share->op) - { + NDB_SHARE *share = acquire_reference(schema->db, ndb_table_name.c_str(), + "drop_db"); // temporary ref. + if (!share || !share->op) { ndbapi_invalidate_table(schema->db, ndb_table_name.c_str()); ndb_tdc_close_cached_table(m_thd, schema->db, ndb_table_name.c_str()); } - if (share) - { + if (share) { mysql_mutex_lock(&ndbcluster_mutex); - NDB_SHARE::mark_share_dropped(&share); // server ref. - DBUG_ASSERT(share); // Should still be ref'ed - NDB_SHARE::release_reference_have_lock(share, "drop_db"); // temporary ref. + NDB_SHARE::mark_share_dropped(&share); // server ref. + DBUG_ASSERT(share); // Should still be ref'ed + NDB_SHARE::release_reference_have_lock(share, + "drop_db"); // temporary ref. mysql_mutex_unlock(&ndbcluster_mutex); } @@ -4689,8 +4338,7 @@ class Ndb_schema_event_handler { ndb_tdc_close_cached_table(m_thd, schema->db, ndb_table_name.c_str()); } - if (!invalidator.invalidate()) - { + if (!invalidator.invalidate()) { log_and_clear_THD_conditions(); ndb_log_error("Failed to invalidate referenced tables for database '%s'", schema->db); @@ -4700,8 +4348,8 @@ class Ndb_schema_event_handler { dd_client.commit(); bool found_local_tables; - if (!dd_client.have_local_tables_in_schema(schema->db, &found_local_tables)) - { + if (!dd_client.have_local_tables_in_schema(schema->db, + &found_local_tables)) { // Failed to access the DD to check if non NDB tables existed, assume // the worst and skip dropping this database log_and_clear_THD_conditions(); @@ -4711,16 +4359,15 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - DBUG_PRINT("exit",("found_local_tables: %d", found_local_tables)); + DBUG_PRINT("exit", ("found_local_tables: %d", found_local_tables)); - if (found_local_tables) - { + if (found_local_tables) { /* Tables exists as a local table, print error and leave it */ - ndb_log_warning("NDB Binlog: Skipping drop database '%s' since " - "it contained local tables " - "binlog schema event '%s' from node %d. ", - schema->db, schema->query, - schema->node_id); + ndb_log_warning( + "NDB Binlog: Skipping drop database '%s' since " + "it contained local tables " + "binlog schema event '%s' from node %d. ", + schema->db, schema->query, schema->node_id); DBUG_VOID_RETURN; } @@ -4730,7 +4377,7 @@ class Ndb_schema_event_handler { // in the schema, but at least all the NDB tables have in such case // already been removed from the DD Ndb_local_connection mysqld(m_thd); - if (mysqld.drop_database(schema->db)){ + if (mysqld.drop_database(schema->db)) { log_and_clear_THD_conditions(); ndb_log_error("Failed to execute 'DROP DATABASE' for database '%s'", schema->db); @@ -4740,77 +4387,63 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - - void - handle_truncate_table(const Ndb_schema_op* schema) - { + void handle_truncate_table(const Ndb_schema_op *schema) { DBUG_ENTER("handle_truncate_table"); - assert(!is_post_epoch()); // Always directly + assert(!is_post_epoch()); // Always directly - if (schema->node_id == own_nodeid()) - DBUG_VOID_RETURN; + if (schema->node_id == own_nodeid()) DBUG_VOID_RETURN; write_schema_op_to_binlog(m_thd, schema); - NDB_SHARE *share= acquire_reference(schema->db, schema->name, - "truncate_table"); + NDB_SHARE *share = + acquire_reference(schema->db, schema->name, "truncate_table"); // invalidation already handled by binlog thread - if (!share || !share->op) - { + if (!share || !share->op) { ndbapi_invalidate_table(schema->db, schema->name); ndb_tdc_close_cached_table(m_thd, schema->db, schema->name); } - if (share) - { + if (share) { // Reset the tables shared auto_increment counter share->reset_tuple_id_range(); - NDB_SHARE::release_reference(share, "truncate_table"); // temporary ref. + NDB_SHARE::release_reference(share, "truncate_table"); // temporary ref. } if (!create_table_from_engine(schema->db, schema->name, true /* force_overwrite */)) { - ndb_log_error("Distribution of TRUNCATE TABLE '%s.%s' failed", - schema->db, schema->name); + ndb_log_error("Distribution of TRUNCATE TABLE '%s.%s' failed", schema->db, + schema->name); } DBUG_VOID_RETURN; } - - void - handle_create_table(const Ndb_schema_op* schema) - { + void handle_create_table(const Ndb_schema_op *schema) { DBUG_ENTER("handle_create_table"); - assert(!is_post_epoch()); // Always directly + assert(!is_post_epoch()); // Always directly - if (schema->node_id == own_nodeid()) - DBUG_VOID_RETURN; + if (schema->node_id == own_nodeid()) DBUG_VOID_RETURN; write_schema_op_to_binlog(m_thd, schema); if (!create_table_from_engine(schema->db, schema->name, true, /* force_overwrite */ true /* invalidate_referenced_tables */)) { - ndb_log_error("Distribution of CREATE TABLE '%s.%s' failed", - schema->db, schema->name); + ndb_log_error("Distribution of CREATE TABLE '%s.%s' failed", schema->db, + schema->name); } DBUG_VOID_RETURN; } - - void - handle_create_db(const Ndb_schema_op* schema) - { + void handle_create_db(const Ndb_schema_op *schema) { DBUG_ENTER("handle_create_db"); - assert(!is_post_epoch()); // Always directly + assert(!is_post_epoch()); // Always directly - if (schema->node_id == own_nodeid()) - DBUG_VOID_RETURN; + if (schema->node_id == own_nodeid()) DBUG_VOID_RETURN; write_schema_op_to_binlog(m_thd, schema); @@ -4837,16 +4470,12 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - - void - handle_alter_db(const Ndb_schema_op* schema) - { + void handle_alter_db(const Ndb_schema_op *schema) { DBUG_ENTER("handle_alter_db"); - assert(!is_post_epoch()); // Always directly + assert(!is_post_epoch()); // Always directly - if (schema->node_id == own_nodeid()) - DBUG_VOID_RETURN; + if (schema->node_id == own_nodeid()) DBUG_VOID_RETURN; write_schema_op_to_binlog(m_thd, schema); @@ -4873,34 +4502,29 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - - void - handle_grant_op(const Ndb_schema_op* schema) - { + void handle_grant_op(const Ndb_schema_op *schema) { DBUG_ENTER("handle_grant_op"); Ndb_local_connection sql_runner(m_thd); - assert(!is_post_epoch()); // Always directly + assert(!is_post_epoch()); // Always directly // Participant never takes GSL - assert(get_thd_ndb(m_thd)->check_option(Thd_ndb::IS_SCHEMA_DIST_PARTICIPANT)); + assert( + get_thd_ndb(m_thd)->check_option(Thd_ndb::IS_SCHEMA_DIST_PARTICIPANT)); - if (schema->node_id == own_nodeid()) - DBUG_VOID_RETURN; + if (schema->node_id == own_nodeid()) DBUG_VOID_RETURN; /* SOT_GRANT was sent by a pre-8.0 mysqld. Just ignore it. */ - if(schema->type == SOT_GRANT) - { + if (schema->type == SOT_GRANT) { ndb_log_verbose(9, "Got SOT_GRANT event, disregarding."); DBUG_VOID_RETURN; } /* For SOT_ACL_SNAPSHOT, update the snapshots for the users listed. - */ - if(schema->type == SOT_ACL_SNAPSHOT) - { - if(! Ndb_stored_grants::update_users_from_snapshot(m_thd, schema->query)) - { + */ + if (schema->type == SOT_ACL_SNAPSHOT) { + if (!Ndb_stored_grants::update_users_from_snapshot(m_thd, + schema->query)) { ndb_log_error("Failed to apply ACL snapshot for users: %s", schema->query); } @@ -4910,20 +4534,19 @@ class Ndb_schema_event_handler { DBUG_ASSERT(schema->type == SOT_ACL_STATEMENT || schema->type == SOT_ACL_STATEMENT_REFRESH); - LEX_CSTRING thd_db_save= m_thd->db(); + LEX_CSTRING thd_db_save = m_thd->db(); std::string use_db(schema->db); std::string query(schema->query); - if(! query.compare(0, 4, "use ")) - { + if (!query.compare(0, 4, "use ")) { size_t delimiter = query.find_first_of(';'); - use_db = query.substr(4, delimiter-4); - query = query.substr(delimiter+1); + use_db = query.substr(4, delimiter - 4); + query = query.substr(delimiter + 1); } /* Execute ACL query */ - LEX_CSTRING set_db = {use_db.c_str() , use_db.length()}; + LEX_CSTRING set_db = {use_db.c_str(), use_db.length()}; m_thd->reset_db(set_db); ndb_log_verbose(40, "Using database: %s", use_db.c_str()); sql_runner.run_acl_statement(query); @@ -4931,8 +4554,7 @@ class Ndb_schema_event_handler { /* Reset database */ m_thd->reset_db(thd_db_save); - if(schema->type == SOT_ACL_STATEMENT_REFRESH) - { + if (schema->type == SOT_ACL_STATEMENT_REFRESH) { Ndb_stored_grants::maintain_cache(m_thd); } @@ -4949,28 +4571,22 @@ class Ndb_schema_event_handler { Ndb *ndb = m_thd_ndb->ndb; NdbDictionary::Dictionary *dict = ndb->getDictionary(); std::vector datafile_names; - if (!ndb_get_datafile_names(dict, tablespace_name, datafile_names)) - { + if (!ndb_get_datafile_names(dict, tablespace_name, datafile_names)) { log_NDB_error(dict->getNdbError()); ndb_log_error("Failed to get data files assigned to tablespace '%s'", tablespace_name); DBUG_RETURN(false); } - if (!dd_client.mdl_lock_tablespace_exclusive(tablespace_name)) - { + if (!dd_client.mdl_lock_tablespace_exclusive(tablespace_name)) { log_and_clear_THD_conditions(); ndb_log_error("MDL lock could not be acquired for tablespace '%s'", tablespace_name); DBUG_RETURN(false); } - if (!dd_client.install_tablespace(tablespace_name, - datafile_names, - id, - version, - true /* force_overwrite */)) - { + if (!dd_client.install_tablespace(tablespace_name, datafile_names, id, + version, true /* force_overwrite */)) { log_and_clear_THD_conditions(); ndb_log_error("Failed to install tablespace '%s' in DD", tablespace_name); DBUG_RETURN(false); @@ -4979,15 +4595,12 @@ class Ndb_schema_event_handler { DBUG_RETURN(true); } - void - handle_create_tablespace(const Ndb_schema_op* schema) - { + void handle_create_tablespace(const Ndb_schema_op *schema) { DBUG_ENTER("handle_create_tablespace"); - assert(!is_post_epoch()); // Always directly + assert(!is_post_epoch()); // Always directly - if (schema->node_id == own_nodeid()) - { + if (schema->node_id == own_nodeid()) { DBUG_VOID_RETURN; } @@ -5004,35 +4617,28 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - - bool get_tablespace_table_refs(const char *name, - std::vector - &table_refs) const - { + bool get_tablespace_table_refs( + const char *name, + std::vector &table_refs) const { Ndb_dd_client dd_client(m_thd); - if (!dd_client.mdl_lock_tablespace(name, - true /* intention_exclusive */)) - { + if (!dd_client.mdl_lock_tablespace(name, true /* intention_exclusive */)) { log_and_clear_THD_conditions(); ndb_log_error("MDL lock could not be acquired on tablespace '%s'", name); return false; } const dd::Tablespace *existing = nullptr; - if (!dd_client.get_tablespace(name, &existing)) - { + if (!dd_client.get_tablespace(name, &existing)) { log_and_clear_THD_conditions(); return false; } - if (existing == nullptr) - { + if (existing == nullptr) { // Tablespace doesn't exist, no need to update tables after the ALTER return true; } - if (!ndb_dd_disk_data_get_table_refs(m_thd, *existing, table_refs)) - { + if (!ndb_dd_disk_data_get_table_refs(m_thd, *existing, table_refs)) { log_and_clear_THD_conditions(); ndb_log_error("Failed to get table refs in tablespace '%s'", name); return false; @@ -5040,16 +4646,11 @@ class Ndb_schema_event_handler { return true; } - - bool - update_tablespace_id_in_tables(Ndb_dd_client &dd_client, - const char *tablespace_name, - const std::vector - &table_refs) const - { + bool update_tablespace_id_in_tables( + Ndb_dd_client &dd_client, const char *tablespace_name, + const std::vector &table_refs) const { if (!dd_client.mdl_lock_tablespace(tablespace_name, - true /* intention_exclusive */)) - { + true /* intention_exclusive */)) { log_and_clear_THD_conditions(); ndb_log_error("MDL lock could not be acquired on tablespace '%s'", tablespace_name); @@ -5057,16 +4658,14 @@ class Ndb_schema_event_handler { } dd::Object_id tablespace_id; - if (!dd_client.lookup_tablespace_id(tablespace_name, &tablespace_id)) - { + if (!dd_client.lookup_tablespace_id(tablespace_name, &tablespace_id)) { log_and_clear_THD_conditions(); ndb_log_error("Failed to retrieve object id of tablespace '%s'", tablespace_name); return false; } - for (auto &table_ref : table_refs) - { + for (auto &table_ref : table_refs) { // Convert table_refs to correct case when necessary const std::string schema_name = ndb_dd_fs_name_case(table_ref.m_schema_name.c_str()); @@ -5091,16 +4690,12 @@ class Ndb_schema_event_handler { return true; } - - void - handle_alter_tablespace(const Ndb_schema_op* schema) - { + void handle_alter_tablespace(const Ndb_schema_op *schema) { DBUG_ENTER("handle_alter_tablespace"); - assert(!is_post_epoch()); // Always directly + assert(!is_post_epoch()); // Always directly - if (schema->node_id == own_nodeid()) - { + if (schema->node_id == own_nodeid()) { DBUG_VOID_RETURN; } @@ -5110,8 +4705,7 @@ class Ndb_schema_event_handler { // required for after the ALTER as the tablespace id of the every table // should be updated std::vector table_refs; - if (!get_tablespace_table_refs(schema->name, table_refs)) - { + if (!get_tablespace_table_refs(schema->name, table_refs)) { ndb_log_error("Distribution of ALTER TABLESPACE '%s' failed", schema->name); DBUG_VOID_RETURN; @@ -5125,13 +4719,14 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - if (!table_refs.empty()) - { + if (!table_refs.empty()) { // Update tables in the tablespace with the new tablespace id - if (!update_tablespace_id_in_tables(dd_client, schema->name, table_refs)) - { - ndb_log_error("Failed to update tables in tablespace '%s' with the " - "new tablespace id", schema->name); + if (!update_tablespace_id_in_tables(dd_client, schema->name, + table_refs)) { + ndb_log_error( + "Failed to update tables in tablespace '%s' with the " + "new tablespace id", + schema->name); ndb_log_error("Distribution of ALTER TABLESPACE '%s' failed", schema->name); DBUG_VOID_RETURN; @@ -5141,24 +4736,19 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - - void - handle_drop_tablespace(const Ndb_schema_op* schema) - { + void handle_drop_tablespace(const Ndb_schema_op *schema) { DBUG_ENTER("handle_drop_tablespace"); - assert(is_post_epoch()); // Always after epoch + assert(is_post_epoch()); // Always after epoch - if (schema->node_id == own_nodeid()) - { + if (schema->node_id == own_nodeid()) { DBUG_VOID_RETURN; } write_schema_op_to_binlog(m_thd, schema); Ndb_dd_client dd_client(m_thd); - if (!dd_client.mdl_lock_tablespace_exclusive(schema->name)) - { + if (!dd_client.mdl_lock_tablespace_exclusive(schema->name)) { log_and_clear_THD_conditions(); ndb_log_error("MDL lock could not be acquired for tablespace '%s'", schema->name); @@ -5168,8 +4758,7 @@ class Ndb_schema_event_handler { } if (!dd_client.drop_tablespace(schema->name, - false /* fail_if_not_exists */)) - { + false /* fail_if_not_exists */)) { log_and_clear_THD_conditions(); ndb_log_error("Failed to drop tablespace '%s' from DD", schema->name); ndb_log_error("Distribution of DROP TABLESPACE '%s' failed", @@ -5188,10 +4777,9 @@ class Ndb_schema_event_handler { logfile_group_name, id, version)); Ndb *ndb = m_thd_ndb->ndb; - NdbDictionary::Dictionary* dict = ndb->getDictionary(); + NdbDictionary::Dictionary *dict = ndb->getDictionary(); std::vector undofile_names; - if (!ndb_get_undofile_names(dict, logfile_group_name, undofile_names)) - { + if (!ndb_get_undofile_names(dict, logfile_group_name, undofile_names)) { log_NDB_error(dict->getNdbError()); ndb_log_error("Failed to get undo files assigned to logfile group '%s'", logfile_group_name); @@ -5199,20 +4787,15 @@ class Ndb_schema_event_handler { } Ndb_dd_client dd_client(m_thd); - if (!dd_client.mdl_lock_logfile_group_exclusive(logfile_group_name)) - { + if (!dd_client.mdl_lock_logfile_group_exclusive(logfile_group_name)) { log_and_clear_THD_conditions(); ndb_log_error("MDL lock could not be acquired for logfile group '%s'", logfile_group_name); DBUG_RETURN(false); } - if (!dd_client.install_logfile_group(logfile_group_name, - undofile_names, - id, - version, - true /* force_overwrite */)) - { + if (!dd_client.install_logfile_group(logfile_group_name, undofile_names, id, + version, true /* force_overwrite */)) { log_and_clear_THD_conditions(); ndb_log_error("Failed to install logfile group '%s' in DD", logfile_group_name); @@ -5223,15 +4806,12 @@ class Ndb_schema_event_handler { DBUG_RETURN(true); } - void - handle_create_logfile_group(const Ndb_schema_op* schema) - { + void handle_create_logfile_group(const Ndb_schema_op *schema) { DBUG_ENTER("handle_create_logfile_group"); - assert(!is_post_epoch()); // Always directly + assert(!is_post_epoch()); // Always directly - if (schema->node_id == own_nodeid()) - { + if (schema->node_id == own_nodeid()) { DBUG_VOID_RETURN; } @@ -5246,16 +4826,12 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - - void - handle_alter_logfile_group(const Ndb_schema_op* schema) - { + void handle_alter_logfile_group(const Ndb_schema_op *schema) { DBUG_ENTER("handle_alter_logfile_group"); - assert(!is_post_epoch()); // Always directly + assert(!is_post_epoch()); // Always directly - if (schema->node_id == own_nodeid()) - { + if (schema->node_id == own_nodeid()) { DBUG_VOID_RETURN; } @@ -5270,24 +4846,19 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - - void - handle_drop_logfile_group(const Ndb_schema_op* schema) - { + void handle_drop_logfile_group(const Ndb_schema_op *schema) { DBUG_ENTER("handle_drop_logfile_group"); - assert(is_post_epoch()); // Always after epoch + assert(is_post_epoch()); // Always after epoch - if (schema->node_id == own_nodeid()) - { + if (schema->node_id == own_nodeid()) { DBUG_VOID_RETURN; } write_schema_op_to_binlog(m_thd, schema); Ndb_dd_client dd_client(m_thd); - if (!dd_client.mdl_lock_logfile_group_exclusive(schema->name)) - { + if (!dd_client.mdl_lock_logfile_group_exclusive(schema->name)) { log_and_clear_THD_conditions(); ndb_log_error("MDL lock could not be acquired for logfile group '%s'", schema->name); @@ -5297,8 +4868,7 @@ class Ndb_schema_event_handler { } if (!dd_client.drop_logfile_group(schema->name, - false /* fail_if_not_exists */)) - { + false /* fail_if_not_exists */)) { log_and_clear_THD_conditions(); ndb_log_error("Failed to drop logfile group '%s' from DD", schema->name); ndb_log_error("Distribution of DROP LOGFILE GROUP '%s' failed", @@ -5310,25 +4880,19 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - - int - handle_schema_op(const Ndb_schema_op* schema) - { + int handle_schema_op(const Ndb_schema_op *schema) { DBUG_ENTER("handle_schema_op"); { - const SCHEMA_OP_TYPE schema_type= (SCHEMA_OP_TYPE)schema->type; + const SCHEMA_OP_TYPE schema_type = (SCHEMA_OP_TYPE)schema->type; ndb_log_verbose(19, "got schema event on '%s.%s(%u/%u)' query: '%s' " "type: %s(%d) node: %u slock: %x%08x", - schema->db, schema->name, - schema->id, schema->version, + schema->db, schema->name, schema->id, schema->version, schema->query, Ndb_schema_dist_client::type_name( static_cast(schema->type)), - schema_type, - schema->node_id, - schema->slock.bitmap[1], + schema_type, schema->node_id, schema->slock.bitmap[1], schema->slock.bitmap[0]); DBUG_EXECUTE_IF("ndb_schema_op_start_crash", DBUG_SUICIDE();); @@ -5336,8 +4900,7 @@ class Ndb_schema_event_handler { // Return to simulate schema operation timeout DBUG_EXECUTE_IF("ndb_schema_op_start_timeout", DBUG_RETURN(0);); - if ((schema->db[0] == 0) && (schema->name[0] == 0)) - { + if ((schema->db[0] == 0) && (schema->name[0] == 0)) { /** * This happens if there is a schema event on a table (object) * that this mysqld does not know about. @@ -5346,8 +4909,7 @@ class Ndb_schema_event_handler { DBUG_RETURN(0); } - if (schema_type == SOT_CLEAR_SLOCK) - { + if (schema_type == SOT_CLEAR_SLOCK) { // Handle the ack after epoch to ensure that schema events are inserted // in the binlog after any data events handle_after_epoch(schema); @@ -5385,7 +4947,7 @@ class Ndb_schema_event_handler { // Test schema dist client killed if (DBUG_EVALUATE_IF("ndb_schema_dist_client_killed", true, false)) { // Wait until the Client has set "coordinator completed" - while(!ndb_schema_object->check_coordinator_completed()) + while (!ndb_schema_object->check_coordinator_completed()) ndb_milli_sleep(100); } } @@ -5395,86 +4957,84 @@ class Ndb_schema_event_handler { opt_ndb_schema_dist_lock_wait_timeout); Ndb_schema_op_result schema_op_result; - switch (schema_type) - { - case SOT_CLEAR_SLOCK: - // Already handled above, should never end up here - ndbcluster::ndbrequire(schema_type != SOT_CLEAR_SLOCK); - DBUG_RETURN(0); - - case SOT_ALTER_TABLE_COMMIT: - case SOT_RENAME_TABLE_PREPARE: - case SOT_ONLINE_ALTER_TABLE_PREPARE: - case SOT_ONLINE_ALTER_TABLE_COMMIT: - case SOT_RENAME_TABLE: - case SOT_DROP_TABLE: - case SOT_DROP_DB: - case SOT_DROP_TABLESPACE: - case SOT_DROP_LOGFILE_GROUP: - handle_after_epoch(schema); - DBUG_RETURN(0); + switch (schema_type) { + case SOT_CLEAR_SLOCK: + // Already handled above, should never end up here + ndbcluster::ndbrequire(schema_type != SOT_CLEAR_SLOCK); + DBUG_RETURN(0); - case SOT_TRUNCATE_TABLE: - handle_truncate_table(schema); - break; + case SOT_ALTER_TABLE_COMMIT: + case SOT_RENAME_TABLE_PREPARE: + case SOT_ONLINE_ALTER_TABLE_PREPARE: + case SOT_ONLINE_ALTER_TABLE_COMMIT: + case SOT_RENAME_TABLE: + case SOT_DROP_TABLE: + case SOT_DROP_DB: + case SOT_DROP_TABLESPACE: + case SOT_DROP_LOGFILE_GROUP: + handle_after_epoch(schema); + DBUG_RETURN(0); - case SOT_CREATE_TABLE: - handle_create_table(schema); - break; + case SOT_TRUNCATE_TABLE: + handle_truncate_table(schema); + break; - case SOT_CREATE_DB: - handle_create_db(schema); - break; + case SOT_CREATE_TABLE: + handle_create_table(schema); + break; - case SOT_ALTER_DB: - handle_alter_db(schema); - break; + case SOT_CREATE_DB: + handle_create_db(schema); + break; - case SOT_CREATE_USER: - case SOT_DROP_USER: - case SOT_RENAME_USER: - case SOT_GRANT: - case SOT_REVOKE: - case SOT_ACL_SNAPSHOT: - case SOT_ACL_STATEMENT: - case SOT_ACL_STATEMENT_REFRESH: - handle_grant_op(schema); - break; + case SOT_ALTER_DB: + handle_alter_db(schema); + break; - case SOT_TABLESPACE: - case SOT_LOGFILE_GROUP: - if (schema->node_id == own_nodeid()) + case SOT_CREATE_USER: + case SOT_DROP_USER: + case SOT_RENAME_USER: + case SOT_GRANT: + case SOT_REVOKE: + case SOT_ACL_SNAPSHOT: + case SOT_ACL_STATEMENT: + case SOT_ACL_STATEMENT_REFRESH: + handle_grant_op(schema); break; - write_schema_op_to_binlog(m_thd, schema); - break; - case SOT_RENAME_TABLE_NEW: - /* - Only very old MySQL Server connected to the cluster may - send this schema operation, ignore it - */ - ndb_log_error("Skipping old schema operation" - "(RENAME_TABLE_NEW) on %s.%s", - schema->db, schema->name); - DBUG_ASSERT(false); - break; + case SOT_TABLESPACE: + case SOT_LOGFILE_GROUP: + if (schema->node_id == own_nodeid()) break; + write_schema_op_to_binlog(m_thd, schema); + break; - case SOT_CREATE_TABLESPACE: - handle_create_tablespace(schema); - break; + case SOT_RENAME_TABLE_NEW: + /* + Only very old MySQL Server connected to the cluster may + send this schema operation, ignore it + */ + ndb_log_error( + "Skipping old schema operation" + "(RENAME_TABLE_NEW) on %s.%s", + schema->db, schema->name); + DBUG_ASSERT(false); + break; - case SOT_ALTER_TABLESPACE: - handle_alter_tablespace(schema); - break; + case SOT_CREATE_TABLESPACE: + handle_create_tablespace(schema); + break; - case SOT_CREATE_LOGFILE_GROUP: - handle_create_logfile_group(schema); - break; + case SOT_ALTER_TABLESPACE: + handle_alter_tablespace(schema); + break; - case SOT_ALTER_LOGFILE_GROUP: - handle_alter_logfile_group(schema); - break; + case SOT_CREATE_LOGFILE_GROUP: + handle_create_logfile_group(schema); + break; + case SOT_ALTER_LOGFILE_GROUP: + handle_alter_logfile_group(schema); + break; } if (schema->schema_op_id) { @@ -5499,61 +5059,59 @@ class Ndb_schema_event_handler { void handle_schema_op_post_epoch(const Ndb_schema_op *schema, Ndb_schema_op_result &) { DBUG_ENTER("handle_schema_op_post_epoch"); - DBUG_PRINT("enter", ("%s.%s: query: '%s' type: %d", - schema->db, schema->name, - schema->query, schema->type)); + DBUG_PRINT("enter", ("%s.%s: query: '%s' type: %d", schema->db, + schema->name, schema->query, schema->type)); // Set the custom lock_wait_timeout for schema distribution Lock_wait_timeout_guard lwt_guard(m_thd, opt_ndb_schema_dist_lock_wait_timeout); { - const SCHEMA_OP_TYPE schema_type= (SCHEMA_OP_TYPE)schema->type; + const SCHEMA_OP_TYPE schema_type = (SCHEMA_OP_TYPE)schema->type; ndb_log_verbose(9, "%s - %s.%s", Ndb_schema_dist_client::type_name( static_cast(schema->type)), schema->db, schema->name); - switch (schema_type) - { - case SOT_DROP_DB: - handle_drop_db(schema); - break; + switch (schema_type) { + case SOT_DROP_DB: + handle_drop_db(schema); + break; - case SOT_DROP_TABLE: - handle_drop_table(schema); - break; + case SOT_DROP_TABLE: + handle_drop_table(schema); + break; - case SOT_RENAME_TABLE_PREPARE: - handle_rename_table_prepare(schema); - break; + case SOT_RENAME_TABLE_PREPARE: + handle_rename_table_prepare(schema); + break; - case SOT_RENAME_TABLE: - handle_rename_table(schema); - break; + case SOT_RENAME_TABLE: + handle_rename_table(schema); + break; - case SOT_ALTER_TABLE_COMMIT: - handle_offline_alter_table_commit(schema); - break; + case SOT_ALTER_TABLE_COMMIT: + handle_offline_alter_table_commit(schema); + break; - case SOT_ONLINE_ALTER_TABLE_PREPARE: - handle_online_alter_table_prepare(schema); - break; + case SOT_ONLINE_ALTER_TABLE_PREPARE: + handle_online_alter_table_prepare(schema); + break; - case SOT_ONLINE_ALTER_TABLE_COMMIT: - handle_online_alter_table_commit(schema); - break; + case SOT_ONLINE_ALTER_TABLE_COMMIT: + handle_online_alter_table_commit(schema); + break; - case SOT_DROP_TABLESPACE: - handle_drop_tablespace(schema); - break; + case SOT_DROP_TABLESPACE: + handle_drop_tablespace(schema); + break; - case SOT_DROP_LOGFILE_GROUP: - handle_drop_logfile_group(schema); - break; + case SOT_DROP_LOGFILE_GROUP: + handle_drop_logfile_group(schema); + break; - default: - DBUG_ASSERT(false); + default: + DBUG_ASSERT(false); } } @@ -5565,9 +5123,9 @@ class Ndb_schema_event_handler { THD *const m_thd; Thd_ndb *const m_thd_ndb; - MEM_ROOT* m_mem_root; + MEM_ROOT *m_mem_root; uint m_own_nodeid; - Ndb_schema_dist_data& m_schema_dist_data; + Ndb_schema_dist_data &m_schema_dist_data; bool m_post_epoch; bool is_post_epoch(void) const { return m_post_epoch; } @@ -5576,18 +5134,18 @@ class Ndb_schema_event_handler { public: Ndb_schema_event_handler() = delete; - Ndb_schema_event_handler(const Ndb_schema_event_handler&) = delete; - - Ndb_schema_event_handler(THD* thd, MEM_ROOT* mem_root, uint own_nodeid, - Ndb_schema_dist_data& schema_dist_data): - m_thd(thd), m_thd_ndb(get_thd_ndb(thd)), m_mem_root(mem_root), m_own_nodeid(own_nodeid), - m_schema_dist_data(schema_dist_data), - m_post_epoch(false) - { - } - - ~Ndb_schema_event_handler() - { + Ndb_schema_event_handler(const Ndb_schema_event_handler &) = delete; + + Ndb_schema_event_handler(THD *thd, MEM_ROOT *mem_root, uint own_nodeid, + Ndb_schema_dist_data &schema_dist_data) + : m_thd(thd), + m_thd_ndb(get_thd_ndb(thd)), + m_mem_root(mem_root), + m_own_nodeid(own_nodeid), + m_schema_dist_data(schema_dist_data), + m_post_epoch(false) {} + + ~Ndb_schema_event_handler() { // There should be no work left todo... DBUG_ASSERT(m_post_epoch_handle_list.elements == 0); } @@ -5631,7 +5189,6 @@ class Ndb_schema_event_handler { void handle_schema_result_event(Ndb *s_ndb, NdbEventOperation *pOp, NdbDictionary::Event::TableEvent event_type, const Ndb_event_data *event_data) { - // Test "coordinator abort active" by simulating cluster failure if (DBUG_EVALUATE_IF("ndb_schema_dist_coord_abort_active", true, false)) { ndb_log_info("Simulating cluster failure..."); @@ -5646,25 +5203,25 @@ class Ndb_schema_event_handler { event_data->unpack_string(4)); break; - case NdbDictionary::Event::TE_CLUSTER_FAILURE: - // fall through - case NdbDictionary::Event::TE_DROP: - // Cluster failure or ndb_schema_result table dropped - if (ndb_binlog_tables_inited && ndb_binlog_running) - ndb_log_verbose( - 1, "NDB Binlog: NDB tables initially readonly on reconnect."); + case NdbDictionary::Event::TE_CLUSTER_FAILURE: + // fall through + case NdbDictionary::Event::TE_DROP: + // Cluster failure or ndb_schema_result table dropped + if (ndb_binlog_tables_inited && ndb_binlog_running) + ndb_log_verbose( + 1, "NDB Binlog: NDB tables initially readonly on reconnect."); - // Indicate util tables not ready - mysql_mutex_lock(&injector_data_mutex); - ndb_binlog_tables_inited= false; - ndb_binlog_is_ready= false; - mysql_mutex_unlock(&injector_data_mutex); + // Indicate util tables not ready + mysql_mutex_lock(&injector_data_mutex); + ndb_binlog_tables_inited = false; + ndb_binlog_is_ready = false; + mysql_mutex_unlock(&injector_data_mutex); - ndb_tdc_close_cached_tables(); + ndb_tdc_close_cached_tables(); - // Tear down the event subscription on ndb_schema_result - ndbcluster_binlog_event_operation_teardown(m_thd, s_ndb, pOp); - break; + // Tear down the event subscription on ndb_schema_result + ndbcluster_binlog_event_operation_teardown(m_thd, s_ndb, pOp); + break; default: // Ignore other event types @@ -5673,12 +5230,11 @@ class Ndb_schema_event_handler { return; } - void handle_event(Ndb* s_ndb, NdbEventOperation *pOp) - { + void handle_event(Ndb *s_ndb, NdbEventOperation *pOp) { DBUG_ENTER("handle_event"); - const Ndb_event_data *event_data= - static_cast(pOp->getCustomData()); + const Ndb_event_data *event_data = + static_cast(pOp->getCustomData()); if (Ndb_schema_dist_client::is_schema_dist_result_table( event_data->share->db, event_data->share->table_name)) { // Received event on ndb_schema_result table @@ -5686,10 +5242,9 @@ class Ndb_schema_event_handler { DBUG_VOID_RETURN; } - if (!check_is_ndb_schema_event(event_data)) - DBUG_VOID_RETURN; + if (!check_is_ndb_schema_event(event_data)) DBUG_VOID_RETURN; - NDBEVENT::TableEvent ev_type= pOp->getEventType(); + NDBEVENT::TableEvent ev_type = pOp->getEventType(); // Test "fail all schema ops" by simulating cluster failure // before the schema operation has been registered @@ -5706,85 +5261,80 @@ class Ndb_schema_event_handler { ndbcluster::ndbrequire(NDB_SCHEMA_OBJECT::count_active_schema_ops() == 1); } - switch (ev_type) - { - case NDBEVENT::TE_INSERT: - case NDBEVENT::TE_UPDATE: - { - /* ndb_schema table, row INSERTed or UPDATEed*/ - const Ndb_schema_op* schema_op= - Ndb_schema_op::create(event_data, pOp->getAnyValue()); - handle_schema_op(schema_op); - break; - } - - case NDBEVENT::TE_DELETE: - /* ndb_schema table, row DELETEd */ - break; + switch (ev_type) { + case NDBEVENT::TE_INSERT: + case NDBEVENT::TE_UPDATE: { + /* ndb_schema table, row INSERTed or UPDATEed*/ + const Ndb_schema_op *schema_op = + Ndb_schema_op::create(event_data, pOp->getAnyValue()); + handle_schema_op(schema_op); + break; + } - case NDBEVENT::TE_CLUSTER_FAILURE: - ndb_log_verbose(1, "cluster failure at epoch %u/%u.", - (uint)(pOp->getGCI() >> 32), (uint)(pOp->getGCI())); + case NDBEVENT::TE_DELETE: + /* ndb_schema table, row DELETEd */ + break; - // fall through - case NDBEVENT::TE_DROP: - /* ndb_schema table DROPped */ - if (ndb_binlog_tables_inited && ndb_binlog_running) - ndb_log_verbose( - 1, "NDB Binlog: NDB tables initially readonly on reconnect."); + case NDBEVENT::TE_CLUSTER_FAILURE: + ndb_log_verbose(1, "cluster failure at epoch %u/%u.", + (uint)(pOp->getGCI() >> 32), (uint)(pOp->getGCI())); - // Indicate util tables not ready - mysql_mutex_lock(&injector_data_mutex); - ndb_binlog_tables_inited= false; - ndb_binlog_is_ready= false; - mysql_mutex_unlock(&injector_data_mutex); + // fall through + case NDBEVENT::TE_DROP: + /* ndb_schema table DROPped */ + if (ndb_binlog_tables_inited && ndb_binlog_running) + ndb_log_verbose( + 1, "NDB Binlog: NDB tables initially readonly on reconnect."); - ndb_tdc_close_cached_tables(); + // Indicate util tables not ready + mysql_mutex_lock(&injector_data_mutex); + ndb_binlog_tables_inited = false; + ndb_binlog_is_ready = false; + mysql_mutex_unlock(&injector_data_mutex); - ndbcluster_binlog_event_operation_teardown(m_thd, s_ndb, pOp); + ndb_tdc_close_cached_tables(); - if (DBUG_EVALUATE_IF("ndb_schema_dist_client_not_ready", true, false)) { - ndb_log_info("Wait for client to detect not ready..."); - while (NDB_SCHEMA_OBJECT::count_active_schema_ops() > 0) - ndb_milli_sleep(100); - } - break; + ndbcluster_binlog_event_operation_teardown(m_thd, s_ndb, pOp); - case NDBEVENT::TE_ALTER: - /* ndb_schema table ALTERed */ - break; + if (DBUG_EVALUATE_IF("ndb_schema_dist_client_not_ready", true, false)) { + ndb_log_info("Wait for client to detect not ready..."); + while (NDB_SCHEMA_OBJECT::count_active_schema_ops() > 0) + ndb_milli_sleep(100); + } + break; - case NDBEVENT::TE_NODE_FAILURE: - { - /* Remove all subscribers for node */ - m_schema_dist_data.report_data_node_failure(pOp->getNdbdNodeId()); - check_wakeup_clients(Ndb_schema_dist::NODE_FAILURE, "Data node failed"); - break; - } + case NDBEVENT::TE_ALTER: + /* ndb_schema table ALTERed */ + break; - case NDBEVENT::TE_SUBSCRIBE: - { - /* Add node as subscriber */ - m_schema_dist_data.report_subscribe(pOp->getNdbdNodeId(), - pOp->getReqNodeId()); - // No 'check_wakeup_clients', adding subscribers doesn't complete anything - break; - } + case NDBEVENT::TE_NODE_FAILURE: { + /* Remove all subscribers for node */ + m_schema_dist_data.report_data_node_failure(pOp->getNdbdNodeId()); + check_wakeup_clients(Ndb_schema_dist::NODE_FAILURE, "Data node failed"); + break; + } - case NDBEVENT::TE_UNSUBSCRIBE: - { - /* Remove node as subscriber */ - m_schema_dist_data.report_unsubscribe(pOp->getNdbdNodeId(), + case NDBEVENT::TE_SUBSCRIBE: { + /* Add node as subscriber */ + m_schema_dist_data.report_subscribe(pOp->getNdbdNodeId(), pOp->getReqNodeId()); - check_wakeup_clients(Ndb_schema_dist::NODE_UNSUBSCRIBE, - "Node unsubscribed"); - break; - } + // No 'check_wakeup_clients', adding subscribers doesn't complete + // anything + break; + } - default: - { - ndb_log_error("unknown event %u, ignoring...", ev_type); - } + case NDBEVENT::TE_UNSUBSCRIBE: { + /* Remove node as subscriber */ + m_schema_dist_data.report_unsubscribe(pOp->getNdbdNodeId(), + pOp->getReqNodeId()); + check_wakeup_clients(Ndb_schema_dist::NODE_UNSUBSCRIBE, + "Node unsubscribed"); + break; + } + + default: { + ndb_log_error("unknown event %u, ignoring...", ev_type); + } } DBUG_VOID_RETURN; @@ -5794,27 +5344,26 @@ class Ndb_schema_event_handler { // This function is called repeatedly as epochs pass but checks should only // be performed at regular intervals. Check if it's time for one now and // calculate the time for next if time is up - if (likely(!m_schema_dist_data.time_for_check())) - return; + if (likely(!m_schema_dist_data.time_for_check())) return; const uint active_ops = m_schema_dist_data.active_schema_ops().size(); if (likely(active_ops == 0)) return; // Nothing to do at this time - ndb_log_info("Coordinator checking active schema operations, " - "epochs: (%u/%u,%u/%u,%u/%u), proc_info: '%s'", - (uint)(ndb_latest_handled_binlog_epoch >> 32), - (uint)(ndb_latest_handled_binlog_epoch), - (uint)(ndb_latest_received_binlog_epoch >> 32), - (uint)(ndb_latest_received_binlog_epoch), - (uint)(current_epoch >> 32), - (uint)(current_epoch), m_thd->proc_info); + ndb_log_info( + "Coordinator checking active schema operations, " + "epochs: (%u/%u,%u/%u,%u/%u), proc_info: '%s'", + (uint)(ndb_latest_handled_binlog_epoch >> 32), + (uint)(ndb_latest_handled_binlog_epoch), + (uint)(ndb_latest_received_binlog_epoch >> 32), + (uint)(ndb_latest_received_binlog_epoch), (uint)(current_epoch >> 32), + (uint)(current_epoch), m_thd->proc_info); for (const NDB_SCHEMA_OBJECT *schema_object : m_schema_dist_data.active_schema_ops()) { // Print into about this schema operation ndb_log_info(" - schema operation active on '%s.%s'", schema_object->db(), schema_object->name()); - if (ndb_log_get_verbose_level() > 30){ + if (ndb_log_get_verbose_level() > 30) { ndb_log_error_dump("%s", schema_object->to_string().c_str()); } @@ -5830,23 +5379,20 @@ class Ndb_schema_event_handler { } } - void post_epoch(ulonglong ndb_latest_epoch) - { - if (unlikely(m_post_epoch_handle_list.elements > 0)) - { + void post_epoch(ulonglong ndb_latest_epoch) { + if (unlikely(m_post_epoch_handle_list.elements > 0)) { // Set the flag used to check that functions are called at correct time - m_post_epoch= true; + m_post_epoch = true; /* process any operations that should be done after the epoch is complete */ - const Ndb_schema_op* schema; - while ((schema= m_post_epoch_handle_list.pop())) - { - if (schema->type == SOT_CLEAR_SLOCK){ + const Ndb_schema_op *schema; + while ((schema = m_post_epoch_handle_list.pop())) { + if (schema->type == SOT_CLEAR_SLOCK) { handle_clear_slock(schema); - continue; // Handled an ack -> don't send new ack + continue; // Handled an ack -> don't send new ack } Ndb_schema_op_result schema_op_result; @@ -5902,36 +5448,30 @@ struct ndb_binlog_index_row { struct ndb_binlog_index_row *next; }; - /** Utility class encapsulating the code which open and writes to the mysql.ndb_binlog_index table */ -class Ndb_binlog_index_table_util -{ - static constexpr const char* const DB_NAME = "mysql"; - static constexpr const char* const TABLE_NAME = "ndb_binlog_index"; +class Ndb_binlog_index_table_util { + static constexpr const char *const DB_NAME = "mysql"; + static constexpr const char *const TABLE_NAME = "ndb_binlog_index"; /* Open the ndb_binlog_index table for writing */ - static int - open_binlog_index_table(THD *thd, - TABLE **ndb_binlog_index) - { - const char *save_proc_info= + static int open_binlog_index_table(THD *thd, TABLE **ndb_binlog_index) { + const char *save_proc_info = thd_proc_info(thd, "Opening 'mysql.ndb_binlog_index'"); - TABLE_LIST tables(DB_NAME, // db - TABLE_NAME, // name, alias - TL_WRITE); // for write + TABLE_LIST tables(DB_NAME, // db + TABLE_NAME, // name, alias + TL_WRITE); // for write /* Only allow real table to be opened */ - tables.required_type= dd::enum_table_type::BASE_TABLE; + tables.required_type = dd::enum_table_type::BASE_TABLE; const uint flags = - MYSQL_LOCK_IGNORE_TIMEOUT; /* Wait for lock "infinitely" */ - if (open_and_lock_tables(thd, &tables, flags)) - { + MYSQL_LOCK_IGNORE_TIMEOUT; /* Wait for lock "infinitely" */ + if (open_and_lock_tables(thd, &tables, flags)) { if (thd->killed) DBUG_PRINT("error", ("NDB Binlog: Opening ndb_binlog_index: killed")); else @@ -5941,24 +5481,20 @@ class Ndb_binlog_index_table_util thd_proc_info(thd, save_proc_info); return -1; } - *ndb_binlog_index= tables.table; + *ndb_binlog_index = tables.table; thd_proc_info(thd, save_proc_info); return 0; } - /* Write rows to the ndb_binlog_index table */ - static int - write_rows_impl(THD *thd, - ndb_binlog_index_row *row) - { - int error= 0; - ndb_binlog_index_row *first= row; - TABLE *ndb_binlog_index= 0; + static int write_rows_impl(THD *thd, ndb_binlog_index_row *row) { + int error = 0; + ndb_binlog_index_row *first = row; + TABLE *ndb_binlog_index = 0; // Save previous option settings - ulonglong option_bits= thd->variables.option_bits; + ulonglong option_bits = thd->variables.option_bits; /* Assume this function is not called with an error set in thd @@ -5973,13 +5509,14 @@ class Ndb_binlog_index_table_util */ Disable_binlog_guard binlog_guard(thd); - if (open_binlog_index_table(thd, &ndb_binlog_index)) - { + if (open_binlog_index_table(thd, &ndb_binlog_index)) { if (thd->killed) - DBUG_PRINT("error", ("NDB Binlog: Unable to lock table ndb_binlog_index, killed")); + DBUG_PRINT( + "error", + ("NDB Binlog: Unable to lock table ndb_binlog_index, killed")); else ndb_log_error("NDB Binlog: Unable to lock table ndb_binlog_index"); - error= -1; + error = -1; goto add_ndb_binlog_index_err; } @@ -5987,127 +5524,110 @@ class Ndb_binlog_index_table_util ndb_binlog_index->use_all_columns(); // Turn off autocommit to do all writes in one transaction - thd->variables.option_bits|= OPTION_NOT_AUTOCOMMIT; - do - { - ulonglong epoch= 0, orig_epoch= 0; - uint orig_server_id= 0; + thd->variables.option_bits |= OPTION_NOT_AUTOCOMMIT; + do { + ulonglong epoch = 0, orig_epoch = 0; + uint orig_server_id = 0; // Intialize ndb_binlog_index->record[0] empty_record(ndb_binlog_index); - ndb_binlog_index->field[NBICOL_START_POS] - ->store(first->start_master_log_pos, true); - ndb_binlog_index->field[NBICOL_START_FILE] - ->store(first->start_master_log_file, - (uint)strlen(first->start_master_log_file), - &my_charset_bin); - ndb_binlog_index->field[NBICOL_EPOCH] - ->store(epoch= first->epoch, true); - if (ndb_binlog_index->s->fields > NBICOL_ORIG_SERVERID) - { + ndb_binlog_index->field[NBICOL_START_POS]->store( + first->start_master_log_pos, true); + ndb_binlog_index->field[NBICOL_START_FILE]->store( + first->start_master_log_file, + (uint)strlen(first->start_master_log_file), &my_charset_bin); + ndb_binlog_index->field[NBICOL_EPOCH]->store(epoch = first->epoch, true); + if (ndb_binlog_index->s->fields > NBICOL_ORIG_SERVERID) { /* Table has ORIG_SERVERID / ORIG_EPOCH columns. * Write rows with different ORIG_SERVERID / ORIG_EPOCH * separately */ - ndb_binlog_index->field[NBICOL_NUM_INSERTS] - ->store(row->n_inserts, true); - ndb_binlog_index->field[NBICOL_NUM_UPDATES] - ->store(row->n_updates, true); - ndb_binlog_index->field[NBICOL_NUM_DELETES] - ->store(row->n_deletes, true); - ndb_binlog_index->field[NBICOL_NUM_SCHEMAOPS] - ->store(row->n_schemaops, true); - ndb_binlog_index->field[NBICOL_ORIG_SERVERID] - ->store(orig_server_id= row->orig_server_id, true); - ndb_binlog_index->field[NBICOL_ORIG_EPOCH] - ->store(orig_epoch= row->orig_epoch, true); - ndb_binlog_index->field[NBICOL_GCI] - ->store(first->gci, true); - - if (ndb_binlog_index->s->fields > NBICOL_NEXT_POS) - { + ndb_binlog_index->field[NBICOL_NUM_INSERTS]->store(row->n_inserts, + true); + ndb_binlog_index->field[NBICOL_NUM_UPDATES]->store(row->n_updates, + true); + ndb_binlog_index->field[NBICOL_NUM_DELETES]->store(row->n_deletes, + true); + ndb_binlog_index->field[NBICOL_NUM_SCHEMAOPS]->store(row->n_schemaops, + true); + ndb_binlog_index->field[NBICOL_ORIG_SERVERID]->store( + orig_server_id = row->orig_server_id, true); + ndb_binlog_index->field[NBICOL_ORIG_EPOCH]->store( + orig_epoch = row->orig_epoch, true); + ndb_binlog_index->field[NBICOL_GCI]->store(first->gci, true); + + if (ndb_binlog_index->s->fields > NBICOL_NEXT_POS) { /* Table has next log pos fields, fill them in */ - ndb_binlog_index->field[NBICOL_NEXT_POS] - ->store(first->next_master_log_pos, true); - ndb_binlog_index->field[NBICOL_NEXT_FILE] - ->store(first->next_master_log_file, - (uint)strlen(first->next_master_log_file), - &my_charset_bin); + ndb_binlog_index->field[NBICOL_NEXT_POS]->store( + first->next_master_log_pos, true); + ndb_binlog_index->field[NBICOL_NEXT_FILE]->store( + first->next_master_log_file, + (uint)strlen(first->next_master_log_file), &my_charset_bin); } - row= row->next; - } - else - { + row = row->next; + } else { /* Old schema : Table has no separate * ORIG_SERVERID / ORIG_EPOCH columns. * Merge operation counts and write one row */ - while ((row= row->next)) - { - first->n_inserts+= row->n_inserts; - first->n_updates+= row->n_updates; - first->n_deletes+= row->n_deletes; - first->n_schemaops+= row->n_schemaops; + while ((row = row->next)) { + first->n_inserts += row->n_inserts; + first->n_updates += row->n_updates; + first->n_deletes += row->n_deletes; + first->n_schemaops += row->n_schemaops; } - ndb_binlog_index->field[NBICOL_NUM_INSERTS] - ->store((ulonglong)first->n_inserts, true); - ndb_binlog_index->field[NBICOL_NUM_UPDATES] - ->store((ulonglong)first->n_updates, true); - ndb_binlog_index->field[NBICOL_NUM_DELETES] - ->store((ulonglong)first->n_deletes, true); - ndb_binlog_index->field[NBICOL_NUM_SCHEMAOPS] - ->store((ulonglong)first->n_schemaops, true); + ndb_binlog_index->field[NBICOL_NUM_INSERTS]->store( + (ulonglong)first->n_inserts, true); + ndb_binlog_index->field[NBICOL_NUM_UPDATES]->store( + (ulonglong)first->n_updates, true); + ndb_binlog_index->field[NBICOL_NUM_DELETES]->store( + (ulonglong)first->n_deletes, true); + ndb_binlog_index->field[NBICOL_NUM_SCHEMAOPS]->store( + (ulonglong)first->n_schemaops, true); } - error= ndb_binlog_index->file->ha_write_row(ndb_binlog_index->record[0]); + error = ndb_binlog_index->file->ha_write_row(ndb_binlog_index->record[0]); /* Fault injection to test logging */ if (DBUG_EVALUATE_IF("ndb_injector_binlog_index_write_fail_random", true, - false)) - { - if ((((uint32)rand()) % 10) == 9) - { + false)) { + if ((((uint32)rand()) % 10) == 9) { ndb_log_error("NDB Binlog: Injecting random write failure"); - error= ndb_binlog_index->file->ha_write_row(ndb_binlog_index->record[0]); + error = + ndb_binlog_index->file->ha_write_row(ndb_binlog_index->record[0]); } } - if (error) - { - ndb_log_error("NDB Binlog: Failed writing to ndb_binlog_index for " - "epoch %u/%u orig_server_id %u orig_epoch %u/%u " - "with error %d.", - uint(epoch >> 32), uint(epoch), - orig_server_id, - uint(orig_epoch >> 32), uint(orig_epoch), - error); - + if (error) { + ndb_log_error( + "NDB Binlog: Failed writing to ndb_binlog_index for " + "epoch %u/%u orig_server_id %u orig_epoch %u/%u " + "with error %d.", + uint(epoch >> 32), uint(epoch), orig_server_id, + uint(orig_epoch >> 32), uint(orig_epoch), error); + bool seen_error_row = false; - ndb_binlog_index_row* cursor= first; - do - { + ndb_binlog_index_row *cursor = first; + do { char tmp[128]; if (ndb_binlog_index->s->fields > NBICOL_ORIG_SERVERID) - snprintf(tmp, sizeof(tmp), "%u/%u,%u,%u/%u", - uint(epoch >> 32), uint(epoch), - uint(cursor->orig_server_id), - uint(cursor->orig_epoch >> 32), - uint(cursor->orig_epoch)); + snprintf(tmp, sizeof(tmp), "%u/%u,%u,%u/%u", uint(epoch >> 32), + uint(epoch), uint(cursor->orig_server_id), + uint(cursor->orig_epoch >> 32), uint(cursor->orig_epoch)); else snprintf(tmp, sizeof(tmp), "%u/%u", uint(epoch >> 32), uint(epoch)); bool error_row = (row == (cursor->next)); - ndb_log_error("NDB Binlog: Writing row (%s) to ndb_binlog_index - %s", - tmp, - (error_row?"ERROR": - (seen_error_row?"Discarded":"OK"))); + ndb_log_error( + "NDB Binlog: Writing row (%s) to ndb_binlog_index - %s", tmp, + (error_row ? "ERROR" : (seen_error_row ? "Discarded" : "OK"))); seen_error_row |= error_row; } while ((cursor = cursor->next)); - error= -1; + error = -1; goto add_ndb_binlog_index_err; } } while (row); @@ -6119,29 +5639,25 @@ class Ndb_binlog_index_table_util Note, trans_rollback_stmt() is defined to never fail. */ thd->get_stmt_da()->set_overwrite_status(true); - if (error) - { + if (error) { // Error, rollback trans_rollback_stmt(thd); - } - else - { + } else { assert(!thd->is_error()); // Commit - const bool failed= trans_commit_stmt(thd); - if (failed || - thd->transaction_rollback_request) - { + const bool failed = trans_commit_stmt(thd); + if (failed || thd->transaction_rollback_request) { /* Transaction failed to commit or was rolled back internally by the engine print an error message in the log and return the error, which will cause replication to stop. */ - error= thd->get_stmt_da()->mysql_errno(); - ndb_log_error("NDB Binlog: Failed committing transaction to " - "ndb_binlog_index with error %d.", - error); + error = thd->get_stmt_da()->mysql_errno(); + ndb_log_error( + "NDB Binlog: Failed committing transaction to " + "ndb_binlog_index with error %d.", + error); trans_rollback_stmt(thd); } } @@ -6149,7 +5665,7 @@ class Ndb_binlog_index_table_util thd->get_stmt_da()->set_overwrite_status(false); // Restore previous option settings - thd->variables.option_bits= option_bits; + thd->variables.option_bits = option_bits; // Close the tables this thread has opened close_thread_tables(thd); @@ -6164,13 +5680,11 @@ class Ndb_binlog_index_table_util Write rows to the ndb_binlog_index table using a separate THD to avoid the write being killed */ - static - void write_rows_with_new_thd(ndb_binlog_index_row *rows) - { + static void write_rows_with_new_thd(ndb_binlog_index_row *rows) { // Create a new THD and retry the write - THD* new_thd = new THD; + THD *new_thd = new THD; new_thd->set_new_thread_id(); - new_thd->thread_stack = (char*)&new_thd; + new_thd->thread_stack = (char *)&new_thd; new_thd->store_globals(); new_thd->set_command(COM_DAEMON); new_thd->system_thread = SYSTEM_THREAD_NDBCLUSTER_BINLOG; @@ -6180,30 +5694,25 @@ class Ndb_binlog_index_table_util // Retry the write const int retry_result = write_rows_impl(new_thd, rows); - if (retry_result) - { - ndb_log_error("NDB Binlog: Failed writing to ndb_binlog_index table " - "while retrying after kill during shutdown"); - DBUG_ASSERT(false); // Crash in debug compile + if (retry_result) { + ndb_log_error( + "NDB Binlog: Failed writing to ndb_binlog_index table " + "while retrying after kill during shutdown"); + DBUG_ASSERT(false); // Crash in debug compile } new_thd->restore_globals(); delete new_thd; } -public: - + public: /* Write rows to the ndb_binlog_index table */ - static inline - int write_rows(THD *thd, - ndb_binlog_index_row *rows) - { + static inline int write_rows(THD *thd, ndb_binlog_index_row *rows) { return write_rows_impl(thd, rows); } - /* Retry write rows to the ndb_binlog_index table after the THD has been killed (which should only happen during mysqld shutdown). @@ -6216,9 +5725,8 @@ class Ndb_binlog_index_table_util a feature to have the THD in the list of global session since it should show up in SHOW PROCESSLIST. */ - static - void write_rows_retry_after_kill(THD* orig_thd, ndb_binlog_index_row *rows) - { + static void write_rows_retry_after_kill(THD *orig_thd, + ndb_binlog_index_row *rows) { // Should only be called when original THD has been killed DBUG_ASSERT(orig_thd->is_killed()); @@ -6259,39 +5767,35 @@ class Ndb_binlog_index_table_util const bool ignore_no_such_table = true; std::string where; where.append("File='").append(filename).append("'"); - if (mysqld.delete_rows(DB_NAME, TABLE_NAME, ignore_no_such_table, - where)) { + if (mysqld.delete_rows(DB_NAME, TABLE_NAME, ignore_no_such_table, where)) { // Failed return true; } return false; } }; -constexpr const char * const Ndb_binlog_index_table_util::DB_NAME; -constexpr const char * const Ndb_binlog_index_table_util::TABLE_NAME; - +constexpr const char *const Ndb_binlog_index_table_util::DB_NAME; +constexpr const char *const Ndb_binlog_index_table_util::TABLE_NAME; // Wrapper function allowing Ndb_binlog_index_table_util::remove_rows_for_file() // to be forward declared -static bool ndbcluster_binlog_index_remove_file(THD *thd, const char *filename) -{ +static bool ndbcluster_binlog_index_remove_file(THD *thd, + const char *filename) { return Ndb_binlog_index_table_util::remove_rows_for_file(thd, filename); } - /********************************************************************* Functions for start, stop, wait for ndbcluster binlog thread *********************************************************************/ -int ndbcluster_binlog_start() -{ +int ndbcluster_binlog_start() { DBUG_ENTER("ndbcluster_binlog_start"); - if (::server_id == 0) - { - ndb_log_warning("server id set to zero - changes logged to " - "binlog with server id zero will be logged with " - "another server id by slave mysqlds"); + if (::server_id == 0) { + ndb_log_warning( + "server id set to zero - changes logged to " + "binlog with server id zero will be logged with " + "another server id by slave mysqlds"); } /* @@ -6301,19 +5805,19 @@ int ndbcluster_binlog_start() if ((::server_id & 0x1 << 31) || // Reserved bit !ndbcluster_anyvalue_is_serverid_in_range(::server_id)) // server_id_bits { - ndb_log_error("server id provided is too large to be represented in " - "opt_server_id_bits or is reserved"); + ndb_log_error( + "server id provided is too large to be represented in " + "opt_server_id_bits or is reserved"); DBUG_RETURN(-1); } /* Check that v2 events are enabled if log-transaction-id is set */ - if (opt_ndb_log_transaction_id && - log_bin_use_v1_row_events) - { - ndb_log_error("--ndb-log-transaction-id requires v2 Binlog row events " - "but server is using v1."); + if (opt_ndb_log_transaction_id && log_bin_use_v1_row_events) { + ndb_log_error( + "--ndb-log-transaction-id requires v2 Binlog row events " + "but server is using v1."); DBUG_RETURN(-1); } @@ -6328,15 +5832,13 @@ int ndbcluster_binlog_start() mysql_mutex_init(PSI_INSTRUMENT_ME, &injector_event_mutex, MY_MUTEX_INIT_SLOW); mysql_cond_init(PSI_INSTRUMENT_ME, &injector_data_cond); - mysql_mutex_init(PSI_INSTRUMENT_ME, &injector_data_mutex, - MY_MUTEX_INIT_FAST); + mysql_mutex_init(PSI_INSTRUMENT_ME, &injector_data_mutex, MY_MUTEX_INIT_FAST); // The binlog thread globals has been initied and should be freed - ndbcluster_binlog_inited= 1; + ndbcluster_binlog_inited = 1; /* Start ndb binlog thread */ - if (ndb_binlog_thread.start()) - { + if (ndb_binlog_thread.start()) { DBUG_PRINT("error", ("Could not start ndb binlog thread")); DBUG_RETURN(-1); } @@ -6344,89 +5846,75 @@ int ndbcluster_binlog_start() DBUG_RETURN(0); } - -void ndbcluster_binlog_set_server_started() -{ +void ndbcluster_binlog_set_server_started() { ndb_binlog_thread.set_server_started(); } - -void -NDB_SHARE::set_binlog_flags(Ndb_binlog_type ndb_binlog_type) -{ +void NDB_SHARE::set_binlog_flags(Ndb_binlog_type ndb_binlog_type) { DBUG_ENTER("set_binlog_flags"); - switch (ndb_binlog_type) - { - case NBT_NO_LOGGING: - DBUG_PRINT("info", ("NBT_NO_LOGGING")); - flags |= NDB_SHARE::FLAG_NO_BINLOG; - DBUG_VOID_RETURN; - case NBT_DEFAULT: - DBUG_PRINT("info", ("NBT_DEFAULT")); - if (opt_ndb_log_updated_only) - { + switch (ndb_binlog_type) { + case NBT_NO_LOGGING: + DBUG_PRINT("info", ("NBT_NO_LOGGING")); + flags |= NDB_SHARE::FLAG_NO_BINLOG; + DBUG_VOID_RETURN; + case NBT_DEFAULT: + DBUG_PRINT("info", ("NBT_DEFAULT")); + if (opt_ndb_log_updated_only) { + flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_FULL; + } else { + flags |= NDB_SHARE::FLAG_BINLOG_MODE_FULL; + } + if (opt_ndb_log_update_as_write) { + flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; + } else { + flags |= NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; + } + if (opt_ndb_log_update_minimal) { + flags |= NDB_SHARE::FLAG_BINLOG_MODE_MINIMAL_UPDATE; + } + break; + case NBT_UPDATED_ONLY: + DBUG_PRINT("info", ("NBT_UPDATED_ONLY")); flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_FULL; - } - else - { + flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; + break; + case NBT_USE_UPDATE: + DBUG_PRINT("info", ("NBT_USE_UPDATE")); + // fall through + case NBT_UPDATED_ONLY_USE_UPDATE: + DBUG_PRINT("info", ("NBT_UPDATED_ONLY_USE_UPDATE")); + flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_FULL; + flags |= NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; + break; + case NBT_FULL: + DBUG_PRINT("info", ("NBT_FULL")); flags |= NDB_SHARE::FLAG_BINLOG_MODE_FULL; - } - if (opt_ndb_log_update_as_write) - { flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; - } - else - { + break; + case NBT_FULL_USE_UPDATE: + DBUG_PRINT("info", ("NBT_FULL_USE_UPDATE")); + flags |= NDB_SHARE::FLAG_BINLOG_MODE_FULL; + flags |= NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; + break; + case NBT_UPDATED_ONLY_MINIMAL: + DBUG_PRINT("info", ("NBT_UPDATED_ONLY_MINIMAL")); + flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_FULL; flags |= NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; - } - if (opt_ndb_log_update_minimal) - { flags |= NDB_SHARE::FLAG_BINLOG_MODE_MINIMAL_UPDATE; - } - break; - case NBT_UPDATED_ONLY: - DBUG_PRINT("info", ("NBT_UPDATED_ONLY")); - flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_FULL; - flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; - break; - case NBT_USE_UPDATE: - DBUG_PRINT("info", ("NBT_USE_UPDATE")); - // fall through - case NBT_UPDATED_ONLY_USE_UPDATE: - DBUG_PRINT("info", ("NBT_UPDATED_ONLY_USE_UPDATE")); - flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_FULL; - flags |= NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; - break; - case NBT_FULL: - DBUG_PRINT("info", ("NBT_FULL")); - flags |= NDB_SHARE::FLAG_BINLOG_MODE_FULL; - flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; - break; - case NBT_FULL_USE_UPDATE: - DBUG_PRINT("info", ("NBT_FULL_USE_UPDATE")); - flags |= NDB_SHARE::FLAG_BINLOG_MODE_FULL; - flags |= NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; - break; - case NBT_UPDATED_ONLY_MINIMAL: - DBUG_PRINT("info", ("NBT_UPDATED_ONLY_MINIMAL")); - flags &= ~NDB_SHARE::FLAG_BINLOG_MODE_FULL; - flags |= NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; - flags |= NDB_SHARE::FLAG_BINLOG_MODE_MINIMAL_UPDATE; - break; - case NBT_UPDATED_FULL_MINIMAL: - DBUG_PRINT("info", ("NBT_UPDATED_FULL_MINIMAL")); - flags |= NDB_SHARE::FLAG_BINLOG_MODE_FULL; - flags |= NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; - flags |= NDB_SHARE::FLAG_BINLOG_MODE_MINIMAL_UPDATE; - break; - default: - DBUG_VOID_RETURN; + break; + case NBT_UPDATED_FULL_MINIMAL: + DBUG_PRINT("info", ("NBT_UPDATED_FULL_MINIMAL")); + flags |= NDB_SHARE::FLAG_BINLOG_MODE_FULL; + flags |= NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; + flags |= NDB_SHARE::FLAG_BINLOG_MODE_MINIMAL_UPDATE; + break; + default: + DBUG_VOID_RETURN; } flags &= ~NDB_SHARE::FLAG_NO_BINLOG; DBUG_VOID_RETURN; } - /* Ndb_binlog_client::read_replication_info @@ -6436,23 +5924,15 @@ NDB_SHARE::set_binlog_flags(Ndb_binlog_type ndb_binlog_type) If the table is not found, or the table does not exist, then defaults are returned. */ -bool -Ndb_binlog_client::read_replication_info(Ndb *ndb, - const char* db, - const char* table_name, - uint server_id, - uint32* binlog_flags, - const st_conflict_fn_def** conflict_fn, - st_conflict_fn_arg* args, - uint* num_args) -{ +bool Ndb_binlog_client::read_replication_info( + Ndb *ndb, const char *db, const char *table_name, uint server_id, + uint32 *binlog_flags, const st_conflict_fn_def **conflict_fn, + st_conflict_fn_arg *args, uint *num_args) { DBUG_ENTER("Ndb_binlog_client::read_replication_info"); /* Override for ndb_apply_status when logging */ - if (opt_ndb_log_apply_status) - { - if (Ndb_apply_status_table::is_apply_status_table(db, table_name)) - { + if (opt_ndb_log_apply_status) { + if (Ndb_apply_status_table::is_apply_status_table(db, table_name)) { // Ensure to get all columns from ndb_apply_status updates and that events // are always logged as WRITES. ndb_log_info( @@ -6467,122 +5947,87 @@ Ndb_binlog_client::read_replication_info(Ndb *ndb, Ndb_rep_tab_reader rep_tab_reader; - int const rc = rep_tab_reader.lookup(ndb, - db, - table_name, - server_id); - + int const rc = rep_tab_reader.lookup(ndb, db, table_name, server_id); - if (rc == 0) - { + if (rc == 0) { // lookup() may return a warning although it succeeds - const char* msg = rep_tab_reader.get_warning_message(); - if (msg != NULL) - { + const char *msg = rep_tab_reader.get_warning_message(); + if (msg != NULL) { push_warning_printf(m_thd, Sql_condition::SL_WARNING, - ER_NDB_REPLICATION_SCHEMA_ERROR, - ER_THD(m_thd, ER_NDB_REPLICATION_SCHEMA_ERROR), - msg); + ER_NDB_REPLICATION_SCHEMA_ERROR, + ER_THD(m_thd, ER_NDB_REPLICATION_SCHEMA_ERROR), msg); ndb_log_warning("NDB Binlog: %s", msg); } - } - else - { + } else { /* When rep_tab_reader.lookup() returns with non-zero error code, it must give a warning message describing why it failed*/ - const char* msg = rep_tab_reader.get_warning_message(); + const char *msg = rep_tab_reader.get_warning_message(); DBUG_ASSERT(msg); my_error(ER_NDB_REPLICATION_SCHEMA_ERROR, MYF(0), msg); ndb_log_warning("NDB Binlog: %s", msg); DBUG_RETURN(true); } - *binlog_flags= rep_tab_reader.get_binlog_flags(); - const char* conflict_fn_spec= rep_tab_reader.get_conflict_fn_spec(); + *binlog_flags = rep_tab_reader.get_binlog_flags(); + const char *conflict_fn_spec = rep_tab_reader.get_conflict_fn_spec(); - if (conflict_fn_spec != NULL) - { - char msgbuf[ FN_REFLEN ]; - if (parse_conflict_fn_spec(conflict_fn_spec, - conflict_fn, - args, - num_args, - msgbuf, - sizeof(msgbuf)) != 0) - { + if (conflict_fn_spec != NULL) { + char msgbuf[FN_REFLEN]; + if (parse_conflict_fn_spec(conflict_fn_spec, conflict_fn, args, num_args, + msgbuf, sizeof(msgbuf)) != 0) { my_error(ER_CONFLICT_FN_PARSE_ERROR, MYF(0), msgbuf); /* Log as well, useful for contexts where the thd's stack of warnings are ignored */ - ndb_log_warning("NDB Slave: Table %s.%s : Parse error on conflict fn : %s", - db, table_name, - msgbuf); + ndb_log_warning( + "NDB Slave: Table %s.%s : Parse error on conflict fn : %s", db, + table_name, msgbuf); DBUG_RETURN(true); } - } - else - { + } else { /* No conflict function specified */ - conflict_fn= NULL; - num_args= 0; + conflict_fn = NULL; + num_args = 0; } DBUG_RETURN(false); } - -int -Ndb_binlog_client::apply_replication_info(Ndb* ndb, NDB_SHARE *share, - const NdbDictionary::Table* ndbtab, - const st_conflict_fn_def* conflict_fn, - const st_conflict_fn_arg* args, - uint num_args, - uint32 binlog_flags) -{ +int Ndb_binlog_client::apply_replication_info( + Ndb *ndb, NDB_SHARE *share, const NdbDictionary::Table *ndbtab, + const st_conflict_fn_def *conflict_fn, const st_conflict_fn_arg *args, + uint num_args, uint32 binlog_flags) { DBUG_ENTER("Ndb_binlog_client::apply_replication_info"); char tmp_buf[FN_REFLEN]; DBUG_PRINT("info", ("Setting binlog flags to %u", binlog_flags)); share->set_binlog_flags((enum Ndb_binlog_type)binlog_flags); - if (conflict_fn != NULL) - { - if (setup_conflict_fn(ndb, - &share->m_cfn_share, - share->db, - share->table_name, - share->get_binlog_use_update(), - ndbtab, - tmp_buf, sizeof(tmp_buf), - conflict_fn, - args, - num_args) == 0) - { + if (conflict_fn != NULL) { + if (setup_conflict_fn(ndb, &share->m_cfn_share, share->db, + share->table_name, share->get_binlog_use_update(), + ndbtab, tmp_buf, sizeof(tmp_buf), conflict_fn, args, + num_args) == 0) { ndb_log_verbose(1, "NDB Slave: %s", tmp_buf); - } - else - { + } else { /* Dump setup failure message to error log for cases where thd warning stack is ignored */ - ndb_log_warning("NDB Slave: Table %s.%s : %s", - share->db, share->table_name, tmp_buf); + ndb_log_warning("NDB Slave: Table %s.%s : %s", share->db, + share->table_name, tmp_buf); push_warning_printf(m_thd, Sql_condition::SL_WARNING, ER_CONFLICT_FN_PARSE_ERROR, - ER_THD(m_thd, ER_CONFLICT_FN_PARSE_ERROR), - tmp_buf); + ER_THD(m_thd, ER_CONFLICT_FN_PARSE_ERROR), tmp_buf); DBUG_RETURN(-1); } - } - else - { + } else { /* No conflict function specified */ slave_reset_conflict_fn(share->m_cfn_share); } @@ -6590,47 +6035,32 @@ Ndb_binlog_client::apply_replication_info(Ndb* ndb, NDB_SHARE *share, DBUG_RETURN(0); } - -int -Ndb_binlog_client::read_and_apply_replication_info(Ndb *ndb, NDB_SHARE *share, - const NdbDictionary::Table* ndbtab, uint server_id) -{ +int Ndb_binlog_client::read_and_apply_replication_info( + Ndb *ndb, NDB_SHARE *share, const NdbDictionary::Table *ndbtab, + uint server_id) { DBUG_ENTER("Ndb_binlog_client::read_and_apply_replication_info"); uint32 binlog_flags; - const st_conflict_fn_def* conflict_fn= NULL; + const st_conflict_fn_def *conflict_fn = NULL; st_conflict_fn_arg args[MAX_CONFLICT_ARGS]; uint num_args = MAX_CONFLICT_ARGS; - if (read_replication_info(ndb, - share->db, - share->table_name, - server_id, - &binlog_flags, - &conflict_fn, - args, - &num_args) || - apply_replication_info(ndb, share, ndbtab, - conflict_fn, - args, - num_args, - binlog_flags)) - { + if (read_replication_info(ndb, share->db, share->table_name, server_id, + &binlog_flags, &conflict_fn, args, &num_args) || + apply_replication_info(ndb, share, ndbtab, conflict_fn, args, num_args, + binlog_flags)) { DBUG_RETURN(-1); } DBUG_RETURN(0); } - /* Common function for setting up everything for logging a table at create/discover. */ -static -int ndbcluster_setup_binlog_for_share(THD *thd, Ndb *ndb, - NDB_SHARE *share, - const dd::Table* table_def) -{ +static int ndbcluster_setup_binlog_for_share(THD *thd, Ndb *ndb, + NDB_SHARE *share, + const dd::Table *table_def) { DBUG_ENTER("ndbcluster_setup_binlog_for_share"); // This function should not be used to setup binlogging @@ -6638,8 +6068,7 @@ int ndbcluster_setup_binlog_for_share(THD *thd, Ndb *ndb, DBUG_ASSERT(!ndb_name_is_temp(share->table_name)); Mutex_guard share_g(share->mutex); - if (share->op != 0) - { + if (share->op != 0) { DBUG_PRINT("info", ("binlogging already setup")); DBUG_RETURN(0); } @@ -6647,46 +6076,40 @@ int ndbcluster_setup_binlog_for_share(THD *thd, Ndb *ndb, Ndb_binlog_client binlog_client(thd, share->db, share->table_name); Ndb_table_guard ndbtab_g(ndb, share->db, share->table_name); - const NDBTAB *ndbtab= ndbtab_g.get_table(); - if (ndbtab == 0) - { + const NDBTAB *ndbtab = ndbtab_g.get_table(); + if (ndbtab == 0) { const NdbError ndb_error = ndb->getDictionary()->getNdbError(); ndb_log_verbose(1, "NDB Binlog: Failed to open table '%s' from NDB, " "error: '%d - %s'", share->key_string(), ndb_error.code, ndb_error.message); - DBUG_RETURN(-1); // error + DBUG_RETURN(-1); // error } if (binlog_client.read_and_apply_replication_info(ndb, share, ndbtab, - ::server_id)) - { - ndb_log_error("NDB Binlog: Failed to read and apply replication " - "info for table '%s'", share->key_string()); + ::server_id)) { + ndb_log_error( + "NDB Binlog: Failed to read and apply replication " + "info for table '%s'", + share->key_string()); DBUG_RETURN(-1); } - if (binlog_client.table_should_have_event(share, ndbtab)) - { + if (binlog_client.table_should_have_event(share, ndbtab)) { // Check if the event already exists in NDB, otherwise create it - if (!binlog_client.event_exists_for_table(ndb, share)) - { + if (!binlog_client.event_exists_for_table(ndb, share)) { // The event din't exist, create the event in NDB - if (binlog_client.create_event(ndb, ndbtab, - share)) - { + if (binlog_client.create_event(ndb, ndbtab, share)) { // Failed to create event DBUG_RETURN(-1); } } - if (binlog_client.table_should_have_event_op(share)) - { + if (binlog_client.table_should_have_event_op(share)) { // Create the NDB event operation on the event - Ndb_event_data* event_data; + Ndb_event_data *event_data; if (!binlog_client.create_event_data(share, table_def, &event_data) || - binlog_client.create_event_op(share, ndbtab, event_data)) - { + binlog_client.create_event_op(share, ndbtab, event_data)) { // Failed to create event data or event operation DBUG_RETURN(-1); } @@ -6696,14 +6119,11 @@ int ndbcluster_setup_binlog_for_share(THD *thd, Ndb *ndb, DBUG_RETURN(0); } - -int ndbcluster_binlog_setup_table(THD *thd, Ndb *ndb, - const char *db, +int ndbcluster_binlog_setup_table(THD *thd, Ndb *ndb, const char *db, const char *table_name, - const dd::Table* table_def) -{ + const dd::Table *table_def) { DBUG_ENTER("ndbcluster_binlog_setup_table"); - DBUG_PRINT("enter",("db: '%s', table_name: '%s'", db, table_name)); + DBUG_PRINT("enter", ("db: '%s', table_name: '%s'", db, table_name)); DBUG_ASSERT(table_def); DBUG_ASSERT(!ndb_name_is_blob_prefix(table_name)); @@ -6711,25 +6131,20 @@ int ndbcluster_binlog_setup_table(THD *thd, Ndb *ndb, // Create key for ndbcluster_open_tables char key[FN_REFLEN + 1]; { - char *end= key + - build_table_filename(key, sizeof(key) - 1, db, "", "", 0); - end+= tablename_to_filename(table_name, end, - (uint)(sizeof(key)-(end-key))); + char *end = key + build_table_filename(key, sizeof(key) - 1, db, "", "", 0); + end += tablename_to_filename(table_name, end, + (uint)(sizeof(key) - (end - key))); } mysql_mutex_lock(&ndbcluster_mutex); // Check if NDB_SHARE for this table already exist - NDB_SHARE* share = - NDB_SHARE::acquire_reference_by_key_have_lock(key, - "create_binlog_setup"); - if (share == nullptr) - { + NDB_SHARE *share = + NDB_SHARE::acquire_reference_by_key_have_lock(key, "create_binlog_setup"); + if (share == nullptr) { // NDB_SHARE didn't exist, the normal case, try to create it - share = NDB_SHARE::create_and_acquire_reference(key, - "create_binlog_setup"); - if (share == nullptr) - { + share = NDB_SHARE::create_and_acquire_reference(key, "create_binlog_setup"); + if (share == nullptr) { // Could not create the NDB_SHARE. Unlikely, catch in debug DBUG_ASSERT(false); DBUG_RETURN(-1); @@ -6738,26 +6153,23 @@ int ndbcluster_binlog_setup_table(THD *thd, Ndb *ndb, mysql_mutex_unlock(&ndbcluster_mutex); // Before 'schema_dist_is_ready', Thd_ndb::ALLOW_BINLOG_SETUP is required - int ret= 0; + int ret = 0; if (Ndb_schema_dist::is_ready(thd) || - get_thd_ndb(thd)->check_option(Thd_ndb::ALLOW_BINLOG_SETUP)) - { - ret= ndbcluster_setup_binlog_for_share(thd, ndb, share, table_def); + get_thd_ndb(thd)->check_option(Thd_ndb::ALLOW_BINLOG_SETUP)) { + ret = ndbcluster_setup_binlog_for_share(thd, ndb, share, table_def); } - NDB_SHARE::release_reference(share, "create_binlog_setup"); // temporary ref. + NDB_SHARE::release_reference(share, "create_binlog_setup"); // temporary ref. DBUG_RETURN(ret); } - -int -Ndb_binlog_client::create_event(Ndb *ndb, const NdbDictionary::Table*ndbtab, - const NDB_SHARE* share) -{ +int Ndb_binlog_client::create_event(Ndb *ndb, + const NdbDictionary::Table *ndbtab, + const NDB_SHARE *share) { DBUG_ENTER("Ndb_binlog_client::create_event"); - DBUG_PRINT("enter", ("table: '%s', version: %d", - ndbtab->getName(), ndbtab->getObjectVersion())); + DBUG_PRINT("enter", ("table: '%s', version: %d", ndbtab->getName(), + ndbtab->getObjectVersion())); DBUG_PRINT("enter", ("share->key: '%s'", share->key_string())); DBUG_ASSERT(share); @@ -6775,67 +6187,52 @@ Ndb_binlog_client::create_event(Ndb *ndb, const NdbDictionary::Table*ndbtab, NDBEVENT my_event(event_name.c_str()); my_event.setTable(*ndbtab); my_event.addTableEvent(NDBEVENT::TE_ALL); - if (ndb_table_has_hidden_pk(ndbtab)) - { + if (ndb_table_has_hidden_pk(ndbtab)) { /* Hidden primary key, subscribe for all attributes */ - my_event.setReport((NDBEVENT::EventReport) - (NDBEVENT::ER_ALL | NDBEVENT::ER_DDL)); + my_event.setReport( + (NDBEVENT::EventReport)(NDBEVENT::ER_ALL | NDBEVENT::ER_DDL)); DBUG_PRINT("info", ("subscription all")); - } - else - { + } else { if (Ndb_schema_dist_client::is_schema_dist_table(share->db, share->table_name)) { /** * ER_SUBSCRIBE is only needed on schema distribution table */ - my_event.setReport((NDBEVENT::EventReport) - (NDBEVENT::ER_ALL | - NDBEVENT::ER_SUBSCRIBE | - NDBEVENT::ER_DDL)); + my_event.setReport((NDBEVENT::EventReport)( + NDBEVENT::ER_ALL | NDBEVENT::ER_SUBSCRIBE | NDBEVENT::ER_DDL)); DBUG_PRINT("info", ("subscription all and subscribe")); - } - else if (Ndb_schema_dist_client::is_schema_dist_result_table( - share->db, share->table_name)) { - - my_event.setReport((NDBEVENT::EventReport) - (NDBEVENT::ER_ALL | NDBEVENT::ER_DDL)); + } else if (Ndb_schema_dist_client::is_schema_dist_result_table( + share->db, share->table_name)) { + my_event.setReport( + (NDBEVENT::EventReport)(NDBEVENT::ER_ALL | NDBEVENT::ER_DDL)); DBUG_PRINT("info", ("subscription all")); - } - else - { - if (share->get_binlog_full()) - { - my_event.setReport((NDBEVENT::EventReport) - (NDBEVENT::ER_ALL | NDBEVENT::ER_DDL)); + } else { + if (share->get_binlog_full()) { + my_event.setReport( + (NDBEVENT::EventReport)(NDBEVENT::ER_ALL | NDBEVENT::ER_DDL)); DBUG_PRINT("info", ("subscription all")); - } - else - { - my_event.setReport((NDBEVENT::EventReport) - (NDBEVENT::ER_UPDATED | NDBEVENT::ER_DDL)); + } else { + my_event.setReport( + (NDBEVENT::EventReport)(NDBEVENT::ER_UPDATED | NDBEVENT::ER_DDL)); DBUG_PRINT("info", ("subscription only updated")); } } } - if (ndb_table_has_blobs(ndbtab)) - my_event.mergeEvents(true); + if (ndb_table_has_blobs(ndbtab)) my_event.mergeEvents(true); /* add all columns to the event */ const int n_cols = ndbtab->getNoOfColumns(); - for(int a= 0; a < n_cols; a++) - my_event.addEventColumn(a); + for (int a = 0; a < n_cols; a++) my_event.addEventColumn(a); - if (dict->createEvent(my_event)) // Add event to database + if (dict->createEvent(my_event)) // Add event to database { - if (dict->getNdbError().classification != NdbError::SchemaObjectExists) - { + if (dict->getNdbError().classification != NdbError::SchemaObjectExists) { // Failed to create event, log warning log_warning(ER_GET_ERRMSG, "Unable to create event in database. " "Event: %s Error Code: %d Message: %s", - event_name.c_str(), - dict->getNdbError().code, dict->getNdbError().message); + event_name.c_str(), dict->getNdbError().code, + dict->getNdbError().message); DBUG_RETURN(-1); } @@ -6844,36 +6241,33 @@ Ndb_binlog_client::create_event(Ndb *ndb, const NdbDictionary::Table*ndbtab, a valid event. Otherwise we have an old event from before */ const NDBEVENT *ev; - if ((ev= dict->getEvent(event_name.c_str()))) - { + if ((ev = dict->getEvent(event_name.c_str()))) { delete ev; DBUG_RETURN(0); } // Old event from before; an error, but try to correct it if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT && - dict->dropEvent(my_event.getName(), 1)) - { + dict->dropEvent(my_event.getName(), 1)) { // Failed to drop the old event, log warning log_warning(ER_GET_ERRMSG, "Unable to create event in database. " "Attempt to correct with drop failed. " "Event: %s Error Code: %d Message: %s", - event_name.c_str(), - dict->getNdbError().code, dict->getNdbError().message); + event_name.c_str(), dict->getNdbError().code, + dict->getNdbError().message); DBUG_RETURN(-1); } // Try to add the event again - if (dict->createEvent(my_event)) - { + if (dict->createEvent(my_event)) { // Still failed to create the event, log warning log_warning(ER_GET_ERRMSG, "Unable to create event in database. " "Attempt to correct with drop ok, but create failed. " "Event: %s Error Code: %d Message: %s", - event_name.c_str(), - dict->getNdbError().code,dict->getNdbError().message); + event_name.c_str(), dict->getNdbError().code, + dict->getNdbError().message); DBUG_RETURN(-1); } } @@ -6884,16 +6278,11 @@ Ndb_binlog_client::create_event(Ndb *ndb, const NdbDictionary::Table*ndbtab, DBUG_RETURN(0); } - -inline int is_ndb_compatible_type(Field *field) -{ - return - !(field->flags & BLOB_FLAG) && - field->type() != MYSQL_TYPE_BIT && - field->pack_length() != 0; +inline int is_ndb_compatible_type(Field *field) { + return !(field->flags & BLOB_FLAG) && field->type() != MYSQL_TYPE_BIT && + field->pack_length() != 0; } - /* - create NdbEventOperation for receiving log events - setup ndb recattrs for reception of log event data @@ -6901,19 +6290,17 @@ inline int is_ndb_compatible_type(Field *field) used at create/discover of tables */ -int -Ndb_binlog_client::create_event_op(NDB_SHARE* share, - const NdbDictionary::Table* ndbtab, - const Ndb_event_data* event_data) -{ +int Ndb_binlog_client::create_event_op(NDB_SHARE *share, + const NdbDictionary::Table *ndbtab, + const Ndb_event_data *event_data) { /* we are in either create table or rename table so table should be locked, hence we can work with the share without locks */ DBUG_ENTER("Ndb_binlog_client::create_event_op"); - DBUG_PRINT("enter", ("table: '%s', share->key: '%s'", - ndbtab->getName(), share->key_string())); + DBUG_PRINT("enter", ("table: '%s', share->key: '%s'", ndbtab->getName(), + share->key_string())); DBUG_ASSERT(share); DBUG_ASSERT(event_data); @@ -6942,41 +6329,34 @@ Ndb_binlog_client::create_event_op(NDB_SHARE* share, // There should be no NdbEventOperation assigned yet DBUG_ASSERT(!share->op); - TABLE *table= event_data->shadow_table; + TABLE *table = event_data->shadow_table; - int retries= 100; - int retry_sleep= 0; - while (1) - { - if (retry_sleep > 0) - { + int retries = 100; + int retry_sleep = 0; + while (1) { + if (retry_sleep > 0) { ndb_retry_sleep(retry_sleep); } Mutex_guard injector_mutex_g(injector_event_mutex); - Ndb *ndb= injector_ndb; - if (is_schema_dist_setup ) - ndb= schema_ndb; + Ndb *ndb = injector_ndb; + if (is_schema_dist_setup) ndb = schema_ndb; - if (ndb == NULL) - DBUG_RETURN(-1); + if (ndb == NULL) DBUG_RETURN(-1); - NdbEventOperation* op; - if (is_schema_dist_setup ) - op= ndb->createEventOperation(event_name.c_str()); - else - { + NdbEventOperation *op; + if (is_schema_dist_setup) + op = ndb->createEventOperation(event_name.c_str()); + else { // set injector_ndb database/schema from table internal name - int ret= ndb->setDatabaseAndSchemaName(ndbtab); + int ret = ndb->setDatabaseAndSchemaName(ndbtab); ndbcluster::ndbrequire(ret == 0); - op= ndb->createEventOperation(event_name.c_str()); + op = ndb->createEventOperation(event_name.c_str()); // reset to catch errors ndb->setDatabaseName(""); } - if (!op) - { - const NdbError& ndb_err = ndb->getNdbError(); - if (ndb_err.code == 4710) - { + if (!op) { + const NdbError &ndb_err = ndb->getNdbError(); + if (ndb_err.code == 4710) { // Error code 4710 is returned when table or event is not found. The // generic error message for 4710 says "Event not found" but should // be reported as "table not found" @@ -6993,104 +6373,86 @@ Ndb_binlog_client::create_event_op(NDB_SHARE* share, } if (ndb_table_has_blobs(ndbtab)) - op->mergeEvents(true); // currently not inherited from event + op->mergeEvents(true); // currently not inherited from event - const uint n_columns= ndbtab->getNoOfColumns(); - const uint n_stored_fields= Ndb_table_map::num_stored_fields(table); - const uint val_length= sizeof(NdbValue) * n_columns; + const uint n_columns = ndbtab->getNoOfColumns(); + const uint n_stored_fields = Ndb_table_map::num_stored_fields(table); + const uint val_length = sizeof(NdbValue) * n_columns; /* Allocate memory globally so it can be reused after online alter table */ - if (my_multi_malloc(PSI_INSTRUMENT_ME, - MYF(MY_WME), - &event_data->ndb_value[0], - val_length, - &event_data->ndb_value[1], - val_length, - NULL) == 0) - { + if (my_multi_malloc(PSI_INSTRUMENT_ME, MYF(MY_WME), + &event_data->ndb_value[0], val_length, + &event_data->ndb_value[1], val_length, NULL) == 0) { log_warning(ER_GET_ERRMSG, "Failed to allocate records for event operation"); DBUG_RETURN(-1); } Ndb_table_map map(table); - for (uint j= 0; j < n_columns; j++) - { - const char *col_name= ndbtab->getColumn(j)->getName(); + for (uint j = 0; j < n_columns; j++) { + const char *col_name = ndbtab->getColumn(j)->getName(); NdbValue attr0, attr1; - if (j < n_stored_fields) - { - Field *f= table->field[map.get_field_for_column(j)]; - if (is_ndb_compatible_type(f)) - { + if (j < n_stored_fields) { + Field *f = table->field[map.get_field_for_column(j)]; + if (is_ndb_compatible_type(f)) { DBUG_PRINT("info", ("%s compatible", col_name)); - attr0.rec= op->getValue(col_name, (char*) f->ptr); - attr1.rec= op->getPreValue(col_name, - (f->ptr - table->record[0]) + - (char*) table->record[1]); - } - else if (! (f->flags & BLOB_FLAG)) - { + attr0.rec = op->getValue(col_name, (char *)f->ptr); + attr1.rec = op->getPreValue( + col_name, (f->ptr - table->record[0]) + (char *)table->record[1]); + } else if (!(f->flags & BLOB_FLAG)) { DBUG_PRINT("info", ("%s non compatible", col_name)); - attr0.rec= op->getValue(col_name); - attr1.rec= op->getPreValue(col_name); - } - else - { + attr0.rec = op->getValue(col_name); + attr1.rec = op->getPreValue(col_name); + } else { DBUG_PRINT("info", ("%s blob", col_name)); DBUG_ASSERT(ndb_table_has_blobs(ndbtab)); - attr0.blob= op->getBlobHandle(col_name); - attr1.blob= op->getPreBlobHandle(col_name); - if (attr0.blob == NULL || attr1.blob == NULL) - { + attr0.blob = op->getBlobHandle(col_name); + attr1.blob = op->getPreBlobHandle(col_name); + if (attr0.blob == NULL || attr1.blob == NULL) { log_warning(ER_GET_ERRMSG, "Failed to cretate NdbEventOperation on '%s', " "blob field %u handles failed, error: %d - %s", - event_name.c_str(), j, - op->getNdbError().code, op->getNdbError().message); + event_name.c_str(), j, op->getNdbError().code, + op->getNdbError().message); ndb->dropEventOperation(op); DBUG_RETURN(-1); } } - } - else - { + } else { DBUG_PRINT("info", ("%s hidden key", col_name)); - attr0.rec= op->getValue(col_name); - attr1.rec= op->getPreValue(col_name); - } - event_data->ndb_value[0][j].ptr= attr0.ptr; - event_data->ndb_value[1][j].ptr= attr1.ptr; - DBUG_PRINT("info", ("&event_data->ndb_value[0][%d]: 0x%lx " - "event_data->ndb_value[0][%d]: 0x%lx", - j, (long) &event_data->ndb_value[0][j], - j, (long) attr0.ptr)); - DBUG_PRINT("info", ("&event_data->ndb_value[1][%d]: 0x%lx " - "event_data->ndb_value[1][%d]: 0x%lx", - j, (long) &event_data->ndb_value[0][j], - j, (long) attr1.ptr)); - } - op->setCustomData(const_cast(event_data)); // set before execute - share->op= op; // assign op in NDB_SHARE + attr0.rec = op->getValue(col_name); + attr1.rec = op->getPreValue(col_name); + } + event_data->ndb_value[0][j].ptr = attr0.ptr; + event_data->ndb_value[1][j].ptr = attr1.ptr; + DBUG_PRINT("info", + ("&event_data->ndb_value[0][%d]: 0x%lx " + "event_data->ndb_value[0][%d]: 0x%lx", + j, (long)&event_data->ndb_value[0][j], j, (long)attr0.ptr)); + DBUG_PRINT("info", + ("&event_data->ndb_value[1][%d]: 0x%lx " + "event_data->ndb_value[1][%d]: 0x%lx", + j, (long)&event_data->ndb_value[0][j], j, (long)attr1.ptr)); + } + op->setCustomData( + const_cast(event_data)); // set before execute + share->op = op; // assign op in NDB_SHARE /* Check if user explicitly requires monitoring of empty updates */ - if (opt_ndb_log_empty_update) - op->setAllowEmptyUpdate(true); + if (opt_ndb_log_empty_update) op->setAllowEmptyUpdate(true); - if (op->execute()) - { + if (op->execute()) { // Failed to create the NdbEventOperation - const NdbError& ndb_err = op->getNdbError(); - share->op= NULL; + const NdbError &ndb_err = op->getNdbError(); + share->op = NULL; retries--; if (ndb_err.status != NdbError::TemporaryError && ndb_err.code != 1407) { // Don't retry after these errors retries = 0; } - if (retries == 0) - { + if (retries == 0) { log_warning(ER_GET_ERRMSG, "Failed to activate NdbEventOperation for '%s', " "error: %d - %s", @@ -7098,11 +6460,10 @@ Ndb_binlog_client::create_event_op(NDB_SHARE* share, } op->setCustomData(NULL); ndb->dropEventOperation(op); - if (retries && !m_thd->killed) - { + if (retries && !m_thd->killed) { // fairly high retry sleep, temporary error on schema operation can // take some time to resolve - retry_sleep = 100; // milliseconds + retry_sleep = 100; // milliseconds continue; } // Delete the event data, caller should create new before calling @@ -7116,77 +6477,60 @@ Ndb_binlog_client::create_event_op(NDB_SHARE* share, /* ndb_share reference binlog */ NDB_SHARE::acquire_reference_on_existing(share, "binlog"); - if (do_ndb_apply_status_share) - { - ndb_apply_status_share = - NDB_SHARE::acquire_reference_on_existing(share, - "ndb_apply_status_share"); + if (do_ndb_apply_status_share) { + ndb_apply_status_share = NDB_SHARE::acquire_reference_on_existing( + share, "ndb_apply_status_share"); DBUG_ASSERT(get_thd_ndb(m_thd)->check_option(Thd_ndb::ALLOW_BINLOG_SETUP)); } - ndb_log_verbose(1, "NDB Binlog: logging %s (%s,%s)", - share->key_string(), + ndb_log_verbose(1, "NDB Binlog: logging %s (%s,%s)", share->key_string(), share->get_binlog_full() ? "FULL" : "UPDATED", share->get_binlog_use_update() ? "USE_UPDATE" : "USE_WRITE"); DBUG_RETURN(0); } - - - -void -Ndb_binlog_client::drop_events_for_table(THD *thd, Ndb *ndb, - const char *db, - const char *table_name) -{ +void Ndb_binlog_client::drop_events_for_table(THD *thd, Ndb *ndb, + const char *db, + const char *table_name) { DBUG_ENTER("Ndb_binlog_client::drop_events_for_table"); DBUG_PRINT("enter", ("db: %s, tabname: %s", db, table_name)); - if (DBUG_EVALUATE_IF("ndb_skip_drop_event", true, false)) - { - ndb_log_verbose(1, - "NDB Binlog: skipping drop event on '%s.%s'", - db, table_name); + if (DBUG_EVALUATE_IF("ndb_skip_drop_event", true, false)) { + ndb_log_verbose(1, "NDB Binlog: skipping drop event on '%s.%s'", db, + table_name); DBUG_VOID_RETURN; } - for (uint i= 0; i < 2; i++) - { - std::string event_name = - event_name_for_table(db, table_name, i); + for (uint i = 0; i < 2; i++) { + std::string event_name = event_name_for_table(db, table_name, i); NdbDictionary::Dictionary *dict = ndb->getDictionary(); - if (dict->dropEvent(event_name.c_str()) == 0) - { + if (dict->dropEvent(event_name.c_str()) == 0) { // Event dropped successfully continue; } - if (dict->getNdbError().code == 4710 || - dict->getNdbError().code == 1419) - { + if (dict->getNdbError().code == 4710 || dict->getNdbError().code == 1419) { // Failed to drop event but return code says it was // because the event didn't exist, ignore continue; } /* Failed to drop event, push warning and write to log */ - push_warning_printf(thd, Sql_condition::SL_WARNING, - ER_GET_ERRMSG, ER_THD(thd, ER_GET_ERRMSG), - dict->getNdbError().code, + push_warning_printf(thd, Sql_condition::SL_WARNING, ER_GET_ERRMSG, + ER_THD(thd, ER_GET_ERRMSG), dict->getNdbError().code, dict->getNdbError().message, "NDB"); - ndb_log_error("NDB Binlog: Unable to drop event for '%s.%s' from NDB, " - "event_name: '%s' error: '%d - %s'", - db, table_name, event_name.c_str(), - dict->getNdbError().code, - dict->getNdbError().message); + ndb_log_error( + "NDB Binlog: Unable to drop event for '%s.%s' from NDB, " + "event_name: '%s' error: '%d - %s'", + db, table_name, event_name.c_str(), dict->getNdbError().code, + dict->getNdbError().message); } DBUG_VOID_RETURN; } - /* Wait for the binlog thread to drop it's NdbEventOperations during a drop table @@ -7199,20 +6543,17 @@ Ndb_binlog_client::drop_events_for_table(THD *thd, Ndb *ndb, drop and release it's resources allocated in the NDB_SHARE. */ -int -ndbcluster_binlog_wait_synch_drop_table(THD *thd, NDB_SHARE *share) -{ +int ndbcluster_binlog_wait_synch_drop_table(THD *thd, NDB_SHARE *share) { DBUG_ENTER("ndbcluster_binlog_synch_drop_table"); DBUG_ASSERT(share); - const char *save_proc_info= thd->proc_info; - thd->proc_info= "Syncing ndb table schema operation and binlog"; + const char *save_proc_info = thd->proc_info; + thd->proc_info = "Syncing ndb table schema operation and binlog"; - int max_timeout= DEFAULT_SYNC_TIMEOUT; + int max_timeout = DEFAULT_SYNC_TIMEOUT; mysql_mutex_lock(&share->mutex); - while (share->op) - { + while (share->op) { struct timespec abstime; set_timespec(&abstime, 1); @@ -7221,68 +6562,53 @@ ndbcluster_binlog_wait_synch_drop_table(THD *thd, NDB_SHARE *share) // only use injector_data_cond with injector_data_mutex) mysql_mutex_unlock(&share->mutex); mysql_mutex_lock(&injector_data_mutex); - const int ret= mysql_cond_timedwait(&injector_data_cond, - &injector_data_mutex, - &abstime); + const int ret = mysql_cond_timedwait(&injector_data_cond, + &injector_data_mutex, &abstime); mysql_mutex_unlock(&injector_data_mutex); mysql_mutex_lock(&share->mutex); - if (thd->killed || - share->op == 0) - break; - if (ret) - { + if (thd->killed || share->op == 0) break; + if (ret) { max_timeout--; - if (max_timeout == 0) - { + if (max_timeout == 0) { ndb_log_error("%s, delete table timed out. Ignoring...", share->key_string()); DBUG_ASSERT(false); break; } if (ndb_log_get_verbose_level()) - ndb_report_waiting("delete table", max_timeout, - "delete table", share->key_string()); + ndb_report_waiting("delete table", max_timeout, "delete table", + share->key_string()); } } mysql_mutex_unlock(&share->mutex); - thd->proc_info= save_proc_info; + thd->proc_info = save_proc_info; DBUG_RETURN(0); } - -void Ndb_binlog_thread::validate_sync_blacklist(THD *thd) -{ +void Ndb_binlog_thread::validate_sync_blacklist(THD *thd) { metadata_sync.validate_blacklist(thd); } - -void ndbcluster_binlog_validate_sync_blacklist(THD *thd) -{ +void ndbcluster_binlog_validate_sync_blacklist(THD *thd) { ndb_binlog_thread.validate_sync_blacklist(thd); } - bool Ndb_binlog_thread::add_table_to_check(const std::string &db_name, - const std::string &table_name) -{ + const std::string &table_name) { return metadata_sync.add_table(db_name, table_name); } - bool ndbcluster_binlog_check_table_async(const std::string &db_name, - const std::string &table_name) -{ - if (db_name.empty()) - { + const std::string &table_name) { + if (db_name.empty()) { ndb_log_error("Database name of object to be synchronized not set"); return false; } - if (table_name.empty()) - { + if (table_name.empty()) { ndb_log_error("Table name of object to be synchronized not set"); return false; } @@ -7290,17 +6616,13 @@ bool ndbcluster_binlog_check_table_async(const std::string &db_name, return ndb_binlog_thread.add_table_to_check(db_name, table_name); } - -bool Ndb_binlog_thread::add_logfile_group_to_check(const std::string &lfg_name) -{ +bool Ndb_binlog_thread::add_logfile_group_to_check( + const std::string &lfg_name) { return metadata_sync.add_logfile_group(lfg_name); } - -bool ndbcluster_binlog_check_logfile_group_async(const std::string &lfg_name) -{ - if (lfg_name.empty()) - { +bool ndbcluster_binlog_check_logfile_group_async(const std::string &lfg_name) { + if (lfg_name.empty()) { ndb_log_error("Name of logfile group to be synchronized not set"); return false; } @@ -7308,19 +6630,14 @@ bool ndbcluster_binlog_check_logfile_group_async(const std::string &lfg_name) return ndb_binlog_thread.add_logfile_group_to_check(lfg_name); } - -bool -Ndb_binlog_thread::add_tablespace_to_check(const std::string &tablespace_name) -{ +bool Ndb_binlog_thread::add_tablespace_to_check( + const std::string &tablespace_name) { return metadata_sync.add_tablespace(tablespace_name); } - -bool -ndbcluster_binlog_check_tablespace_async(const std::string &tablespace_name) -{ - if (tablespace_name.empty()) - { +bool ndbcluster_binlog_check_tablespace_async( + const std::string &tablespace_name) { + if (tablespace_name.empty()) { ndb_log_error("Name of tablespace to be synchronized not set"); return false; } @@ -7328,14 +6645,13 @@ ndbcluster_binlog_check_tablespace_async(const std::string &tablespace_name) return ndb_binlog_thread.add_tablespace_to_check(tablespace_name); } - /******************************************************************** Internal helper functions for differentd events from the stoarage nodes used by the ndb injector thread ********************************************************************/ /* - Unpack a record read from NDB + Unpack a record read from NDB SYNOPSIS ndb_unpack_record() @@ -7343,74 +6659,61 @@ ndbcluster_binlog_check_tablespace_async(const std::string &tablespace_name) NOTE The data for each row is read directly into the - destination buffer. This function is primarily - called in order to check if any fields should be + destination buffer. This function is primarily + called in order to check if any fields should be set to null. */ -static void ndb_unpack_record(TABLE *table, NdbValue *value, - MY_BITMAP *defined, uchar *buf) -{ - Field **p_field= table->field, *field= *p_field; - ptrdiff_t row_offset= (ptrdiff_t) (buf - table->record[0]); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); +static void ndb_unpack_record(TABLE *table, NdbValue *value, MY_BITMAP *defined, + uchar *buf) { + Field **p_field = table->field, *field = *p_field; + ptrdiff_t row_offset = (ptrdiff_t)(buf - table->record[0]); + my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set); DBUG_ENTER("ndb_unpack_record"); /* Set the filler bits of the null byte, since they are not touched in the code below. - + The filler bits are the MSBs in the last null byte - */ + */ if (table->s->null_bytes > 0) - buf[table->s->null_bytes - 1]|= 256U - (1U << - table->s->last_null_bit_pos); + buf[table->s->null_bytes - 1] |= 256U - (1U << table->s->last_null_bit_pos); /* Set null flag(s) */ - for ( ; field; p_field++, field= *p_field) - { - if(field->is_virtual_gcol()) - { - if (field->flags & BLOB_FLAG) - { + for (; field; p_field++, field = *p_field) { + if (field->is_virtual_gcol()) { + if (field->flags & BLOB_FLAG) { /** * Valgrind shows Server binlog code uses length * of virtual blob fields for allocation decisions * even when the blob is not read */ - Field_blob* field_blob = (Field_blob*) field; + Field_blob *field_blob = (Field_blob *)field; DBUG_PRINT("info", ("[%u] is virtual blob, setting length 0", field->field_index)); Uint32 zerolen = 0; - field_blob->set_ptr((uchar*) &zerolen, NULL); + field_blob->set_ptr((uchar *)&zerolen, NULL); } continue; } field->set_notnull(row_offset); - if ((*value).ptr) - { - if (!(field->flags & BLOB_FLAG)) - { - int is_null= (*value).rec->isNULL(); - if (is_null) - { - if (is_null > 0) - { - DBUG_PRINT("info",("[%u] NULL", field->field_index)); + if ((*value).ptr) { + if (!(field->flags & BLOB_FLAG)) { + int is_null = (*value).rec->isNULL(); + if (is_null) { + if (is_null > 0) { + DBUG_PRINT("info", ("[%u] NULL", field->field_index)); field->set_null(row_offset); - } - else - { - DBUG_PRINT("info",("[%u] UNDEFINED", field->field_index)); + } else { + DBUG_PRINT("info", ("[%u] UNDEFINED", field->field_index)); bitmap_clear_bit(defined, field->field_index); } - } - else if (field->type() == MYSQL_TYPE_BIT) - { - Field_bit *field_bit= static_cast(field); + } else if (field->type() == MYSQL_TYPE_BIT) { + Field_bit *field_bit = static_cast(field); /* Move internal field pointer to point to 'buf'. Calling @@ -7418,30 +6721,25 @@ static void ndb_unpack_record(TABLE *table, NdbValue *value, type of the object. */ field_bit->Field_bit::move_field_offset(row_offset); - if (field->pack_length() < 5) - { - DBUG_PRINT("info", ("bit field H'%.8X", - (*value).rec->u_32_value())); - field_bit->Field_bit::store((longlong) (*value).rec->u_32_value(), + if (field->pack_length() < 5) { + DBUG_PRINT("info", + ("bit field H'%.8X", (*value).rec->u_32_value())); + field_bit->Field_bit::store((longlong)(*value).rec->u_32_value(), true); - } - else - { - DBUG_PRINT("info", ("bit field H'%.8X%.8X", - *(Uint32 *)(*value).rec->aRef(), - *((Uint32 *)(*value).rec->aRef()+1))); + } else { + DBUG_PRINT("info", + ("bit field H'%.8X%.8X", *(Uint32 *)(*value).rec->aRef(), + *((Uint32 *)(*value).rec->aRef() + 1))); #ifdef WORDS_BIGENDIAN /* lsw is stored first */ - Uint32 *buf= (Uint32 *)(*value).rec->aRef(); - field_bit->Field_bit::store((((longlong)*buf) - & 0x00000000FFFFFFFFLL) - | - ((((longlong)*(buf+1)) << 32) - & 0xFFFFFFFF00000000LL), - true); + Uint32 *buf = (Uint32 *)(*value).rec->aRef(); + field_bit->Field_bit::store( + (((longlong)*buf) & 0x00000000FFFFFFFFLL) | + ((((longlong) * (buf + 1)) << 32) & 0xFFFFFFFF00000000LL), + true); #else - field_bit->Field_bit::store((longlong) - (*value).rec->u_64_value(), true); + field_bit->Field_bit::store((longlong)(*value).rec->u_64_value(), + true); #endif } /* @@ -7449,49 +6747,41 @@ static void ndb_unpack_record(TABLE *table, NdbValue *value, value (usually record[0]). */ field_bit->Field_bit::move_field_offset(-row_offset); - DBUG_PRINT("info",("[%u] SET", - (*value).rec->getColumn()->getColumnNo())); - DBUG_DUMP("info", (const uchar*) field->ptr, field->pack_length()); - } - else - { - DBUG_ASSERT(!strcmp((*value).rec->getColumn()->getName(), field->field_name)); - DBUG_PRINT("info",("[%u] SET", - (*value).rec->getColumn()->getColumnNo())); - DBUG_DUMP("info", (const uchar*) field->ptr, field->pack_length()); + DBUG_PRINT("info", + ("[%u] SET", (*value).rec->getColumn()->getColumnNo())); + DBUG_DUMP("info", (const uchar *)field->ptr, field->pack_length()); + } else { + DBUG_ASSERT( + !strcmp((*value).rec->getColumn()->getName(), field->field_name)); + DBUG_PRINT("info", + ("[%u] SET", (*value).rec->getColumn()->getColumnNo())); + DBUG_DUMP("info", (const uchar *)field->ptr, field->pack_length()); } - } - else - { - NdbBlob *ndb_blob= (*value).blob; - const uint field_no= field->field_index; + } else { + NdbBlob *ndb_blob = (*value).blob; + const uint field_no = field->field_index; int isNull; ndb_blob->getDefined(isNull); - if (isNull == 1) - { - DBUG_PRINT("info",("[%u] NULL", field_no)); + if (isNull == 1) { + DBUG_PRINT("info", ("[%u] NULL", field_no)); field->set_null(row_offset); - } - else if (isNull == -1) - { - DBUG_PRINT("info",("[%u] UNDEFINED", field_no)); + } else if (isNull == -1) { + DBUG_PRINT("info", ("[%u] UNDEFINED", field_no)); bitmap_clear_bit(defined, field_no); - } - else - { + } else { #ifndef DBUG_OFF // pointer vas set in get_ndb_blobs_value - Field_blob *field_blob= (Field_blob*)field; - const uchar *ptr= field_blob->get_blob_data(row_offset); - uint32 len= field_blob->get_length(row_offset); - DBUG_PRINT("info",("[%u] SET ptr: 0x%lx len: %u", - field_no, (long) ptr, len)); + Field_blob *field_blob = (Field_blob *)field; + const uchar *ptr = field_blob->get_blob_data(row_offset); + uint32 len = field_blob->get_length(row_offset); + DBUG_PRINT("info", ("[%u] SET ptr: 0x%lx len: %u", field_no, + (long)ptr, len)); #endif } - } // else - } // if ((*value).ptr) + } // else + } // if ((*value).ptr) value++; // this field was not virtual - } // for() + } // for() dbug_tmp_restore_column_map(table->write_set, old_map); DBUG_VOID_RETURN; } @@ -7499,89 +6789,78 @@ static void ndb_unpack_record(TABLE *table, NdbValue *value, /* Handle error states on events from the storage nodes */ -static int -handle_error(NdbEventOperation *pOp) -{ - Ndb_event_data *event_data= (Ndb_event_data *) pOp->getCustomData(); - NDB_SHARE *share= event_data->share; +static int handle_error(NdbEventOperation *pOp) { + Ndb_event_data *event_data = (Ndb_event_data *)pOp->getCustomData(); + NDB_SHARE *share = event_data->share; DBUG_ENTER("handle_error"); - ndb_log_error("NDB Binlog: unhandled error %d for table %s", - pOp->hasError(), share->key_string()); + ndb_log_error("NDB Binlog: unhandled error %d for table %s", pOp->hasError(), + share->key_string()); pOp->clearError(); DBUG_RETURN(0); } - /* Handle _non_ data events from the storage nodes */ -static -void -handle_non_data_event(THD *thd, - NdbEventOperation *pOp, - ndb_binlog_index_row &row) -{ - const Ndb_event_data* event_data= - static_cast(pOp->getCustomData()); - NDB_SHARE *share= event_data->share; - const NDBEVENT::TableEvent type= pOp->getEventType(); +static void handle_non_data_event(THD *thd, NdbEventOperation *pOp, + ndb_binlog_index_row &row) { + const Ndb_event_data *event_data = + static_cast(pOp->getCustomData()); + NDB_SHARE *share = event_data->share; + const NDBEVENT::TableEvent type = pOp->getEventType(); DBUG_ENTER("handle_non_data_event"); - DBUG_PRINT("enter", ("pOp: %p, event_data: %p, share: %p", - pOp, event_data, share)); + DBUG_PRINT("enter", + ("pOp: %p, event_data: %p, share: %p", pOp, event_data, share)); DBUG_PRINT("enter", ("type: %d", type)); - if (type == NDBEVENT::TE_DROP || - type == NDBEVENT::TE_ALTER) - { + if (type == NDBEVENT::TE_DROP || type == NDBEVENT::TE_ALTER) { // Count schema events row.n_schemaops++; } - switch (type) - { - case NDBEVENT::TE_CLUSTER_FAILURE: - ndb_log_verbose(1, - "NDB Binlog: cluster failure for %s at epoch %u/%u.", - share->key_string(), - (uint)(pOp->getGCI() >> 32), - (uint)(pOp->getGCI())); - // fallthrough - case NDBEVENT::TE_DROP: - if (ndb_apply_status_share == share) - { - if (ndb_binlog_tables_inited && ndb_binlog_running) - ndb_log_verbose( - 1, "NDB Binlog: NDB tables initially readonly on reconnect."); + switch (type) { + case NDBEVENT::TE_CLUSTER_FAILURE: + ndb_log_verbose(1, "NDB Binlog: cluster failure for %s at epoch %u/%u.", + share->key_string(), (uint)(pOp->getGCI() >> 32), + (uint)(pOp->getGCI())); + // fallthrough + case NDBEVENT::TE_DROP: + if (ndb_apply_status_share == share) { + if (ndb_binlog_tables_inited && ndb_binlog_running) + ndb_log_verbose( + 1, "NDB Binlog: NDB tables initially readonly on reconnect."); - /* release the ndb_apply_status_share */ - NDB_SHARE::release_reference(ndb_apply_status_share, - "ndb_apply_status_share"); - ndb_apply_status_share= NULL; + /* release the ndb_apply_status_share */ + NDB_SHARE::release_reference(ndb_apply_status_share, + "ndb_apply_status_share"); + ndb_apply_status_share = NULL; - Mutex_guard injector_g(injector_data_mutex); - ndb_binlog_tables_inited= false; - } + Mutex_guard injector_g(injector_data_mutex); + ndb_binlog_tables_inited = false; + } - ndbcluster_binlog_event_operation_teardown(thd, injector_ndb, pOp); - break; + ndbcluster_binlog_event_operation_teardown(thd, injector_ndb, pOp); + break; - case NDBEVENT::TE_ALTER: - DBUG_PRINT("info", ("TE_ALTER")); - break; + case NDBEVENT::TE_ALTER: + DBUG_PRINT("info", ("TE_ALTER")); + break; - case NDBEVENT::TE_NODE_FAILURE: - case NDBEVENT::TE_SUBSCRIBE: - case NDBEVENT::TE_UNSUBSCRIBE: - /* ignore */ - break; + case NDBEVENT::TE_NODE_FAILURE: + case NDBEVENT::TE_SUBSCRIBE: + case NDBEVENT::TE_UNSUBSCRIBE: + /* ignore */ + break; - default: - ndb_log_error("NDB Binlog: unknown non data event %d for %s. " - "Ignoring...", (unsigned) type, share->key_string()); - break; + default: + ndb_log_error( + "NDB Binlog: unknown non data event %d for %s. " + "Ignoring...", + (unsigned)type, share->key_string()); + break; } DBUG_VOID_RETURN; @@ -7590,218 +6869,186 @@ handle_non_data_event(THD *thd, /* Handle data events from the storage nodes */ -inline ndb_binlog_index_row * -ndb_find_binlog_index_row(ndb_binlog_index_row **rows, - uint orig_server_id, int flag) -{ - ndb_binlog_index_row *row= *rows; - if (opt_ndb_log_orig) - { - ndb_binlog_index_row *first= row, *found_id= 0; - for (;;) - { - if (row->orig_server_id == orig_server_id) - { +inline ndb_binlog_index_row *ndb_find_binlog_index_row( + ndb_binlog_index_row **rows, uint orig_server_id, int flag) { + ndb_binlog_index_row *row = *rows; + if (opt_ndb_log_orig) { + ndb_binlog_index_row *first = row, *found_id = 0; + for (;;) { + if (row->orig_server_id == orig_server_id) { /* */ - if (!flag || !row->orig_epoch) - return row; - if (!found_id) - found_id= row; + if (!flag || !row->orig_epoch) return row; + if (!found_id) found_id = row; } - if (row->orig_server_id == 0) - break; - row= row->next; - if (row == NULL) - { + if (row->orig_server_id == 0) break; + row = row->next; + if (row == NULL) { // Allocate memory in current MEM_ROOT - row= (ndb_binlog_index_row*)(*THR_MALLOC)->Alloc(sizeof(ndb_binlog_index_row)); + row = (ndb_binlog_index_row *)(*THR_MALLOC) + ->Alloc(sizeof(ndb_binlog_index_row)); memset(row, 0, sizeof(ndb_binlog_index_row)); - row->next= first; - *rows= row; - if (found_id) - { + row->next = first; + *rows = row; + if (found_id) { /* If we found index_row with same server id already that row will contain the current stats. Copy stats over to new and reset old. */ - row->n_inserts= found_id->n_inserts; - row->n_updates= found_id->n_updates; - row->n_deletes= found_id->n_deletes; - found_id->n_inserts= 0; - found_id->n_updates= 0; - found_id->n_deletes= 0; + row->n_inserts = found_id->n_inserts; + row->n_updates = found_id->n_updates; + row->n_deletes = found_id->n_deletes; + found_id->n_inserts = 0; + found_id->n_updates = 0; + found_id->n_deletes = 0; } /* keep track of schema ops only on "first" index_row */ - row->n_schemaops= first->n_schemaops; - first->n_schemaops= 0; + row->n_schemaops = first->n_schemaops; + first->n_schemaops = 0; break; } } - row->orig_server_id= orig_server_id; + row->orig_server_id = orig_server_id; } return row; } - -static int -handle_data_event(NdbEventOperation *pOp, - ndb_binlog_index_row **rows, - injector::transaction &trans, - unsigned &trans_row_count, - unsigned &trans_slave_row_count) -{ - Ndb_event_data *event_data= (Ndb_event_data *) pOp->getCustomData(); - TABLE *table= event_data->shadow_table; - NDB_SHARE *share= event_data->share; +static int handle_data_event(NdbEventOperation *pOp, + ndb_binlog_index_row **rows, + injector::transaction &trans, + unsigned &trans_row_count, + unsigned &trans_slave_row_count) { + Ndb_event_data *event_data = (Ndb_event_data *)pOp->getCustomData(); + TABLE *table = event_data->shadow_table; + NDB_SHARE *share = event_data->share; bool reflected_op = false; bool refresh_op = false; bool read_op = false; - if (pOp != share->op) - { + if (pOp != share->op) { return 0; } - uint32 anyValue= pOp->getAnyValue(); - if (ndbcluster_anyvalue_is_reserved(anyValue)) - { - if (ndbcluster_anyvalue_is_nologging(anyValue)) - return 0; - - if (ndbcluster_anyvalue_is_reflect_op(anyValue)) - { + uint32 anyValue = pOp->getAnyValue(); + if (ndbcluster_anyvalue_is_reserved(anyValue)) { + if (ndbcluster_anyvalue_is_nologging(anyValue)) return 0; + + if (ndbcluster_anyvalue_is_reflect_op(anyValue)) { DBUG_PRINT("info", ("Anyvalue -> Reflect (%u)", anyValue)); reflected_op = true; anyValue = 0; - } - else if (ndbcluster_anyvalue_is_refresh_op(anyValue)) - { + } else if (ndbcluster_anyvalue_is_refresh_op(anyValue)) { DBUG_PRINT("info", ("Anyvalue -> Refresh")); refresh_op = true; anyValue = 0; - } - else if (ndbcluster_anyvalue_is_read_op(anyValue)) - { + } else if (ndbcluster_anyvalue_is_read_op(anyValue)) { DBUG_PRINT("info", ("Anyvalue -> Read")); read_op = true; anyValue = 0; - } - else - { - ndb_log_warning("unknown value for binlog signalling 0x%X, " - "event not logged", - anyValue); + } else { + ndb_log_warning( + "unknown value for binlog signalling 0x%X, " + "event not logged", + anyValue); return 0; } } - uint32 originating_server_id= ndbcluster_anyvalue_get_serverid(anyValue); + uint32 originating_server_id = ndbcluster_anyvalue_get_serverid(anyValue); bool log_this_slave_update = g_ndb_log_slave_updates; bool count_this_event = true; - if (share == ndb_apply_status_share) - { - /* - Note that option values are read without synchronisation w.r.t. + if (share == ndb_apply_status_share) { + /* + Note that option values are read without synchronisation w.r.t. thread setting option variable or epoch boundaries. */ - if (opt_ndb_log_apply_status || - opt_ndb_log_orig) - { - Uint32 ndb_apply_status_logging_server_id= originating_server_id; - Uint32 ndb_apply_status_server_id= 0; - Uint64 ndb_apply_status_epoch= 0; + if (opt_ndb_log_apply_status || opt_ndb_log_orig) { + Uint32 ndb_apply_status_logging_server_id = originating_server_id; + Uint32 ndb_apply_status_server_id = 0; + Uint64 ndb_apply_status_epoch = 0; bool event_has_data = false; - switch(pOp->getEventType()) - { - case NDBEVENT::TE_INSERT: - case NDBEVENT::TE_UPDATE: - event_has_data = true; - break; + switch (pOp->getEventType()) { + case NDBEVENT::TE_INSERT: + case NDBEVENT::TE_UPDATE: + event_has_data = true; + break; - case NDBEVENT::TE_DELETE: - break; - default: - /* We should REALLY never get here */ - abort(); + case NDBEVENT::TE_DELETE: + break; + default: + /* We should REALLY never get here */ + abort(); } - - if (likely( event_has_data )) - { + + if (likely(event_has_data)) { /* unpack data to fetch orig_server_id and orig_epoch */ MY_BITMAP b; uint32 bitbuf[128 / (sizeof(uint32) * 8)]; ndb_bitmap_init(b, bitbuf, table->s->fields); bitmap_copy(&b, &event_data->stored_columns); - ndb_unpack_record(table, event_data->ndb_value[0], &b, table->record[0]); - ndb_apply_status_server_id= (uint)((Field_long *)table->field[0])->val_int(); - ndb_apply_status_epoch= ((Field_longlong *)table->field[1])->val_int(); - - if (opt_ndb_log_apply_status) - { - /* + ndb_unpack_record(table, event_data->ndb_value[0], &b, + table->record[0]); + ndb_apply_status_server_id = + (uint)((Field_long *)table->field[0])->val_int(); + ndb_apply_status_epoch = ((Field_longlong *)table->field[1])->val_int(); + + if (opt_ndb_log_apply_status) { + /* Determine if event came from our immediate Master server - Ignore locally manually sourced and reserved events + Ignore locally manually sourced and reserved events */ if ((ndb_apply_status_logging_server_id != 0) && - (! ndbcluster_anyvalue_is_reserved(ndb_apply_status_logging_server_id))) - { + (!ndbcluster_anyvalue_is_reserved( + ndb_apply_status_logging_server_id))) { bool isFromImmediateMaster = (ndb_apply_status_server_id == ndb_apply_status_logging_server_id); - - if (isFromImmediateMaster) - { - /* - We log this event with our server-id so that it + + if (isFromImmediateMaster) { + /* + We log this event with our server-id so that it propagates back to the originating Master (our immediate Master) */ assert(ndb_apply_status_logging_server_id != ::server_id); - - originating_server_id= 0; /* Will be set to our ::serverid below */ + + originating_server_id = + 0; /* Will be set to our ::serverid below */ } } } - if (opt_ndb_log_orig) - { + if (opt_ndb_log_orig) { /* store */ - ndb_binlog_index_row *row= ndb_find_binlog_index_row - (rows, ndb_apply_status_server_id, 1); - row->orig_epoch= ndb_apply_status_epoch; + ndb_binlog_index_row *row = + ndb_find_binlog_index_row(rows, ndb_apply_status_server_id, 1); + row->orig_epoch = ndb_apply_status_epoch; } } - } // opt_ndb_log_apply_status || opt_ndb_log_orig) + } // opt_ndb_log_apply_status || opt_ndb_log_orig) - if (opt_ndb_log_apply_status) - { + if (opt_ndb_log_apply_status) { /* We are logging ndb_apply_status changes * Don't count this event as making an epoch non-empty * Log this event in the Binlog */ count_this_event = false; log_this_slave_update = true; - } - else - { + } else { /* Not logging ndb_apply_status updates, discard this event now */ return 0; } } - + if (originating_server_id == 0) - originating_server_id= ::server_id; - else - { + originating_server_id = ::server_id; + else { assert(!reflected_op && !refresh_op); /* Track that we received a replicated row event */ - if (likely( count_this_event )) - trans_slave_row_count++; - - if (!log_this_slave_update) - { + if (likely(count_this_event)) trans_slave_row_count++; + + if (!log_this_slave_update) { /* This event comes from a slave applier since it has an originating server id set. Since option to log slave updates is not set, skip it. @@ -7810,74 +7057,61 @@ handle_data_event(NdbEventOperation *pOp, } } - /* + /* Start with logged_server_id as AnyValue in case it's a composite (server_id_bits < 31). This way any user-values are passed-through to the Binlog in the high bits of the event's Server Id. In future it may be useful to support *not* mapping composite AnyValues to/from Binlogged server-ids. */ - uint32 logged_server_id= anyValue; + uint32 logged_server_id = anyValue; ndbcluster_anyvalue_set_serverid(logged_server_id, originating_server_id); /* Get NdbApi transaction id for this event to put into Binlog */ Ndb_binlog_extra_row_info extra_row_info; - const unsigned char* extra_row_info_ptr = NULL; + const unsigned char *extra_row_info_ptr = NULL; Uint16 erif_flags = 0; - if (opt_ndb_log_transaction_id) - { + if (opt_ndb_log_transaction_id) { erif_flags |= Ndb_binlog_extra_row_info::NDB_ERIF_TRANSID; extra_row_info.setTransactionId(pOp->getTransId()); } /* Set conflict flags member if necessary */ Uint16 event_conflict_flags = 0; - assert(! (reflected_op && refresh_op)); - if (reflected_op) - { + assert(!(reflected_op && refresh_op)); + if (reflected_op) { event_conflict_flags |= NDB_ERIF_CFT_REFLECT_OP; - } - else if (refresh_op) - { + } else if (refresh_op) { event_conflict_flags |= NDB_ERIF_CFT_REFRESH_OP; - } - else if (read_op) - { + } else if (read_op) { event_conflict_flags |= NDB_ERIF_CFT_READ_OP; } - - if (DBUG_EVALUATE_IF("ndb_injector_set_event_conflict_flags", true, false)) - { + + if (DBUG_EVALUATE_IF("ndb_injector_set_event_conflict_flags", true, false)) { event_conflict_flags = 0xfafa; } - if (event_conflict_flags != 0) - { + if (event_conflict_flags != 0) { erif_flags |= Ndb_binlog_extra_row_info::NDB_ERIF_CFT_FLAGS; extra_row_info.setConflictFlags(event_conflict_flags); } - if (erif_flags != 0) - { + if (erif_flags != 0) { extra_row_info.setFlags(erif_flags); - if (likely(!log_bin_use_v1_row_events)) - { + if (likely(!log_bin_use_v1_row_events)) { extra_row_info_ptr = extra_row_info.generateBuffer(); - } - else - { + } else { /** * Can't put the metadata in a v1 event * Produce 1 warning at most */ - if (!g_injector_v1_warning_emitted) - { - ndb_log_error("Binlog Injector discarding row event " - "meta data as server is using v1 row events. " - "(%u %x)", - opt_ndb_log_transaction_id, - event_conflict_flags); + if (!g_injector_v1_warning_emitted) { + ndb_log_error( + "Binlog Injector discarding row event " + "meta data as server is using v1 row events. " + "(%u %x)", + opt_ndb_log_transaction_id, event_conflict_flags); g_injector_v1_warning_emitted = true; } @@ -7892,13 +7126,12 @@ handle_data_event(NdbEventOperation *pOp, #endif MY_BITMAP b; - my_bitmap_map bitbuf[(NDB_MAX_ATTRIBUTES_IN_TABLE + - 8*sizeof(my_bitmap_map) - 1) / - (8*sizeof(my_bitmap_map))]; + my_bitmap_map + bitbuf[(NDB_MAX_ATTRIBUTES_IN_TABLE + 8 * sizeof(my_bitmap_map) - 1) / + (8 * sizeof(my_bitmap_map))]; ndb_bitmap_init(b, bitbuf, table->s->fields); bitmap_copy(&b, &event_data->stored_columns); - if (bitmap_is_clear_all(&b)) - { + if (bitmap_is_clear_all(&b)) { DBUG_PRINT("info", ("Skip logging of event without stored columns")); return 0; } @@ -7913,177 +7146,159 @@ handle_data_event(NdbEventOperation *pOp, for now malloc/free blobs buffer each time TODO if possible share single permanent buffer with handlers */ - uchar* blobs_buffer[2] = { 0, 0 }; - uint blobs_buffer_size[2] = { 0, 0 }; + uchar *blobs_buffer[2] = {0, 0}; + uint blobs_buffer_size[2] = {0, 0}; - ndb_binlog_index_row *row= - ndb_find_binlog_index_row(rows, originating_server_id, 0); + ndb_binlog_index_row *row = + ndb_find_binlog_index_row(rows, originating_server_id, 0); - switch(pOp->getEventType()) - { - case NDBEVENT::TE_INSERT: - if (likely( count_this_event )) - { - row->n_inserts++; - trans_row_count++; - } - DBUG_PRINT("info", ("INSERT INTO %s.%s", - table->s->db.str, table->s->table_name.str)); - { - int ret; - (void) ret; // Bug27150740 HANDLE_DATA_EVENT NEED ERROR HANDLING - if (event_data->have_blobs) - { - ptrdiff_t ptrdiff= 0; - ret = get_ndb_blobs_value(table, event_data->ndb_value[0], - blobs_buffer[0], - blobs_buffer_size[0], - ptrdiff); - assert(ret == 0); + switch (pOp->getEventType()) { + case NDBEVENT::TE_INSERT: + if (likely(count_this_event)) { + row->n_inserts++; + trans_row_count++; } - ndb_unpack_record(table, event_data->ndb_value[0], &b, table->record[0]); - ret = trans.write_row(logged_server_id, - injector::transaction::table(table, true), - &b, table->record[0], - extra_row_info_ptr); - assert(ret == 0); - } - break; - case NDBEVENT::TE_DELETE: - if (likely( count_this_event )) - { - row->n_deletes++; - trans_row_count++; - } - DBUG_PRINT("info",("DELETE FROM %s.%s", - table->s->db.str, table->s->table_name.str)); - { - /* - table->record[0] contains only the primary key in this case - since we do not have an after image - */ - int n; - if (!share->get_binlog_full() && table->s->primary_key != MAX_KEY) - n= 0; /* - use the primary key only as it save time and space and - it is the only thing needed to log the delete - */ - else - n= 1; /* - we use the before values since we don't have a primary key - since the mysql server does not handle the hidden primary - key - */ - - int ret; - (void) ret; // Bug27150740 HANDLE_DATA_EVENT NEED ERROR HANDLING - if (event_data->have_blobs) + DBUG_PRINT("info", ("INSERT INTO %s.%s", table->s->db.str, + table->s->table_name.str)); { - ptrdiff_t ptrdiff= table->record[n] - table->record[0]; - ret = get_ndb_blobs_value(table, event_data->ndb_value[n], - blobs_buffer[n], - blobs_buffer_size[n], - ptrdiff); + int ret; + (void)ret; // Bug27150740 HANDLE_DATA_EVENT NEED ERROR HANDLING + if (event_data->have_blobs) { + ptrdiff_t ptrdiff = 0; + ret = get_ndb_blobs_value(table, event_data->ndb_value[0], + blobs_buffer[0], blobs_buffer_size[0], + ptrdiff); + assert(ret == 0); + } + ndb_unpack_record(table, event_data->ndb_value[0], &b, + table->record[0]); + ret = trans.write_row(logged_server_id, + injector::transaction::table(table, true), &b, + table->record[0], extra_row_info_ptr); assert(ret == 0); } - ndb_unpack_record(table, event_data->ndb_value[n], &b, table->record[n]); - DBUG_EXECUTE("info", Ndb_table_map::print_record(table, - table->record[n]);); - ret = trans.delete_row(logged_server_id, - injector::transaction::table(table, true), - &b, table->record[n], - extra_row_info_ptr); - assert(ret == 0); - } - break; - case NDBEVENT::TE_UPDATE: - if (likely( count_this_event )) - { - row->n_updates++; - trans_row_count++; - } - DBUG_PRINT("info", ("UPDATE %s.%s", - table->s->db.str, table->s->table_name.str)); - { - int ret; - (void) ret; // Bug27150740 HANDLE_DATA_EVENT NEED ERROR HANDLING - if (event_data->have_blobs) - { - ptrdiff_t ptrdiff= 0; - ret = get_ndb_blobs_value(table, event_data->ndb_value[0], - blobs_buffer[0], - blobs_buffer_size[0], - ptrdiff); - assert(ret == 0); + break; + case NDBEVENT::TE_DELETE: + if (likely(count_this_event)) { + row->n_deletes++; + trans_row_count++; } - ndb_unpack_record(table, event_data->ndb_value[0], - &b, table->record[0]); - DBUG_EXECUTE("info", Ndb_table_map::print_record(table, - table->record[0]);); - if (table->s->primary_key != MAX_KEY && - !share->get_binlog_use_update()) + DBUG_PRINT("info", ("DELETE FROM %s.%s", table->s->db.str, + table->s->table_name.str)); { /* - since table has a primary key, we can do a write - using only after values + table->record[0] contains only the primary key in this case + since we do not have an after image */ - ret = trans.write_row(logged_server_id, - injector::transaction::table(table, true), - &b, table->record[0],// after values - extra_row_info_ptr); + int n; + if (!share->get_binlog_full() && table->s->primary_key != MAX_KEY) + n = 0; /* + use the primary key only as it save time and space and + it is the only thing needed to log the delete + */ + else + n = 1; /* + we use the before values since we don't have a primary key + since the mysql server does not handle the hidden primary + key + */ + + int ret; + (void)ret; // Bug27150740 HANDLE_DATA_EVENT NEED ERROR HANDLING + if (event_data->have_blobs) { + ptrdiff_t ptrdiff = table->record[n] - table->record[0]; + ret = get_ndb_blobs_value(table, event_data->ndb_value[n], + blobs_buffer[n], blobs_buffer_size[n], + ptrdiff); + assert(ret == 0); + } + ndb_unpack_record(table, event_data->ndb_value[n], &b, + table->record[n]); + DBUG_EXECUTE("info", + Ndb_table_map::print_record(table, table->record[n]);); + ret = trans.delete_row(logged_server_id, + injector::transaction::table(table, true), &b, + table->record[n], extra_row_info_ptr); assert(ret == 0); } - else + break; + case NDBEVENT::TE_UPDATE: + if (likely(count_this_event)) { + row->n_updates++; + trans_row_count++; + } + DBUG_PRINT("info", + ("UPDATE %s.%s", table->s->db.str, table->s->table_name.str)); { - /* - mysql server cannot handle the ndb hidden key and - therefore needs the before image as well - */ - if (event_data->have_blobs) - { - ptrdiff_t ptrdiff= table->record[1] - table->record[0]; - ret = get_ndb_blobs_value(table, event_data->ndb_value[1], - blobs_buffer[1], - blobs_buffer_size[1], + int ret; + (void)ret; // Bug27150740 HANDLE_DATA_EVENT NEED ERROR HANDLING + if (event_data->have_blobs) { + ptrdiff_t ptrdiff = 0; + ret = get_ndb_blobs_value(table, event_data->ndb_value[0], + blobs_buffer[0], blobs_buffer_size[0], ptrdiff); assert(ret == 0); } - ndb_unpack_record(table, event_data->ndb_value[1], &b, table->record[1]); - DBUG_EXECUTE("info", Ndb_table_map::print_record(table, - table->record[1]);); - - MY_BITMAP col_bitmap_before_update; - my_bitmap_map bitbuf[(NDB_MAX_ATTRIBUTES_IN_TABLE + - 8*sizeof(my_bitmap_map) - 1) / - (8*sizeof(my_bitmap_map))]; - ndb_bitmap_init(col_bitmap_before_update, bitbuf, table->s->fields); - if (share->get_binlog_update_minimal()) - { - event_data->generate_minimal_bitmap(&col_bitmap_before_update, &b); - } - else - { - bitmap_copy(&col_bitmap_before_update, &b); - } + ndb_unpack_record(table, event_data->ndb_value[0], &b, + table->record[0]); + DBUG_EXECUTE("info", + Ndb_table_map::print_record(table, table->record[0]);); + if (table->s->primary_key != MAX_KEY && + !share->get_binlog_use_update()) { + /* + since table has a primary key, we can do a write + using only after values + */ + ret = trans.write_row(logged_server_id, + injector::transaction::table(table, true), &b, + table->record[0], // after values + extra_row_info_ptr); + assert(ret == 0); + } else { + /* + mysql server cannot handle the ndb hidden key and + therefore needs the before image as well + */ + if (event_data->have_blobs) { + ptrdiff_t ptrdiff = table->record[1] - table->record[0]; + ret = get_ndb_blobs_value(table, event_data->ndb_value[1], + blobs_buffer[1], blobs_buffer_size[1], + ptrdiff); + assert(ret == 0); + } + ndb_unpack_record(table, event_data->ndb_value[1], &b, + table->record[1]); + DBUG_EXECUTE("info", + Ndb_table_map::print_record(table, table->record[1]);); + + MY_BITMAP col_bitmap_before_update; + my_bitmap_map bitbuf[(NDB_MAX_ATTRIBUTES_IN_TABLE + + 8 * sizeof(my_bitmap_map) - 1) / + (8 * sizeof(my_bitmap_map))]; + ndb_bitmap_init(col_bitmap_before_update, bitbuf, table->s->fields); + if (share->get_binlog_update_minimal()) { + event_data->generate_minimal_bitmap(&col_bitmap_before_update, &b); + } else { + bitmap_copy(&col_bitmap_before_update, &b); + } - ret = trans.update_row(logged_server_id, - injector::transaction::table(table, true), - &col_bitmap_before_update, &b, - table->record[1], // before values - table->record[0], // after values - extra_row_info_ptr); - assert(ret == 0); + ret = trans.update_row(logged_server_id, + injector::transaction::table(table, true), + &col_bitmap_before_update, &b, + table->record[1], // before values + table->record[0], // after values + extra_row_info_ptr); + assert(ret == 0); + } } - } - break; - default: - /* We should REALLY never get here. */ - DBUG_PRINT("info", ("default - uh oh, a brain exploded.")); - break; + break; + default: + /* We should REALLY never get here. */ + DBUG_PRINT("info", ("default - uh oh, a brain exploded.")); + break; } - if (event_data->have_blobs) - { + if (event_data->have_blobs) { my_free(blobs_buffer[0]); my_free(blobs_buffer[1]); } @@ -8091,33 +7306,30 @@ handle_data_event(NdbEventOperation *pOp, return 0; } - /**************************************************************** Injector thread main loop ****************************************************************/ -void -Ndb_binlog_thread::remove_event_operations(Ndb* ndb) const -{ +void Ndb_binlog_thread::remove_event_operations(Ndb *ndb) const { DBUG_ENTER("remove_event_operations"); NdbEventOperation *op; - while ((op= ndb->getEventOperation())) - { - DBUG_ASSERT(!ndb_name_is_blob_prefix(op->getEvent()->getTable()->getName())); - DBUG_PRINT("info", ("removing event operation on %s", - op->getEvent()->getName())); + while ((op = ndb->getEventOperation())) { + DBUG_ASSERT( + !ndb_name_is_blob_prefix(op->getEvent()->getTable()->getName())); + DBUG_PRINT("info", + ("removing event operation on %s", op->getEvent()->getName())); - Ndb_event_data *event_data= (Ndb_event_data *) op->getCustomData(); + Ndb_event_data *event_data = (Ndb_event_data *)op->getCustomData(); DBUG_ASSERT(event_data); - NDB_SHARE *share= event_data->share; + NDB_SHARE *share = event_data->share; DBUG_ASSERT(share != NULL); DBUG_ASSERT(share->op == op); Ndb_event_data::destroy(event_data); op->setCustomData(NULL); mysql_mutex_lock(&share->mutex); - share->op= 0; + share->op = 0; mysql_mutex_unlock(&share->mutex); NDB_SHARE::release_reference(share, "binlog"); @@ -8131,21 +7343,17 @@ void Ndb_binlog_thread::remove_all_event_operations(Ndb *s_ndb, Ndb *i_ndb) const { DBUG_ENTER("remove_all_event_operations"); - if (ndb_apply_status_share) - { + if (ndb_apply_status_share) { NDB_SHARE::release_reference(ndb_apply_status_share, "ndb_apply_status_share"); - ndb_apply_status_share= NULL; + ndb_apply_status_share = NULL; } - if (s_ndb) - remove_event_operations(s_ndb); + if (s_ndb) remove_event_operations(s_ndb); - if (i_ndb) - remove_event_operations(i_ndb); + if (i_ndb) remove_event_operations(i_ndb); - if (ndb_log_get_verbose_level() > 15) - { + if (ndb_log_get_verbose_level() > 15) { NDB_SHARE::print_remaining_open_tables(); } DBUG_VOID_RETURN; @@ -8157,15 +7365,13 @@ static long long g_event_bytes_count = 0; static void update_injector_stats(Ndb *schemaNdb, Ndb *dataNdb) { // Update globals to sum of totals from each listening Ndb object - g_event_data_count = - schemaNdb->getClientStat(Ndb::DataEventsRecvdCount) + - dataNdb->getClientStat(Ndb::DataEventsRecvdCount); - g_event_nondata_count = - schemaNdb->getClientStat(Ndb::NonDataEventsRecvdCount) + - dataNdb->getClientStat(Ndb::NonDataEventsRecvdCount); - g_event_bytes_count = - schemaNdb->getClientStat(Ndb::EventBytesRecvdCount) + - dataNdb->getClientStat(Ndb::EventBytesRecvdCount); + g_event_data_count = schemaNdb->getClientStat(Ndb::DataEventsRecvdCount) + + dataNdb->getClientStat(Ndb::DataEventsRecvdCount); + g_event_nondata_count = + schemaNdb->getClientStat(Ndb::NonDataEventsRecvdCount) + + dataNdb->getClientStat(Ndb::NonDataEventsRecvdCount); + g_event_bytes_count = schemaNdb->getClientStat(Ndb::EventBytesRecvdCount) + + dataNdb->getClientStat(Ndb::EventBytesRecvdCount); } static SHOW_VAR ndb_status_vars_injector[] = { @@ -8195,39 +7401,31 @@ int show_ndb_status_injector(THD *, SHOW_VAR *var, char *) { When applied on the Slave it gives a transactional position marker */ -static -bool -injectApplyStatusWriteRow(injector::transaction& trans, - ulonglong gci) -{ +static bool injectApplyStatusWriteRow(injector::transaction &trans, + ulonglong gci) { DBUG_ENTER("injectApplyStatusWriteRow"); - if (ndb_apply_status_share == NULL) - { + if (ndb_apply_status_share == NULL) { ndb_log_error("Could not get apply status share"); DBUG_ASSERT(ndb_apply_status_share != NULL); DBUG_RETURN(false); } - longlong gci_to_store = (longlong) gci; + longlong gci_to_store = (longlong)gci; #ifndef DBUG_OFF - if (DBUG_EVALUATE_IF("ndb_binlog_injector_cycle_gcis", true, false)) - { - ulonglong gciHi = ((gci_to_store >> 32) - & 0xffffffff); + if (DBUG_EVALUATE_IF("ndb_binlog_injector_cycle_gcis", true, false)) { + ulonglong gciHi = ((gci_to_store >> 32) & 0xffffffff); ulonglong gciLo = (gci_to_store & 0xffffffff); gciHi = (gciHi % 3); - ndb_log_warning("Binlog injector cycling gcis (%llu -> %llu)", - gci_to_store, (gciHi << 32) + gciLo); + ndb_log_warning("Binlog injector cycling gcis (%llu -> %llu)", gci_to_store, + (gciHi << 32) + gciLo); gci_to_store = (gciHi << 32) + gciLo; } - if (DBUG_EVALUATE_IF("ndb_binlog_injector_repeat_gcis", true, false)) - { - ulonglong gciHi = ((gci_to_store >> 32) - & 0xffffffff); + if (DBUG_EVALUATE_IF("ndb_binlog_injector_repeat_gcis", true, false)) { + ulonglong gciHi = ((gci_to_store >> 32) & 0xffffffff); ulonglong gciLo = (gci_to_store & 0xffffffff); - gciHi=0xffffff00; - gciLo=0; + gciHi = 0xffffff00; + gciLo = 0; ndb_log_warning("Binlog injector repeating gcis (%llu -> %llu)", gci_to_store, (gciHi << 32) + gciLo); gci_to_store = (gciHi << 32) + gciLo; @@ -8239,11 +7437,11 @@ injectApplyStatusWriteRow(injector::transaction& trans, First get the relevant table structure. */ DBUG_ASSERT(ndb_apply_status_share->op); - Ndb_event_data* event_data= - (Ndb_event_data *) ndb_apply_status_share->op->getCustomData(); + Ndb_event_data *event_data = + (Ndb_event_data *)ndb_apply_status_share->op->getCustomData(); DBUG_ASSERT(event_data); DBUG_ASSERT(event_data->shadow_table); - TABLE* apply_status_table= event_data->shadow_table; + TABLE *apply_status_table = event_data->shadow_table; /* Intialize apply_status_table->record[0] @@ -8253,8 +7451,8 @@ injectApplyStatusWriteRow(injector::transaction& trans, in record[0] would be overwritten here by a subsequent event on a normal table. So save and restore its record[0]. */ - static const ulong sav_max= 512; // current is 284 - const ulong sav_len= apply_status_table->s->reclength; + static const ulong sav_max = 512; // current is 284 + const ulong sav_len = apply_status_table->s->reclength; DBUG_ASSERT(sav_len <= sav_max); uchar sav_buf[sav_max]; memcpy(sav_buf, apply_status_table->record[0], sav_len); @@ -8266,19 +7464,16 @@ injectApplyStatusWriteRow(injector::transaction& trans, apply_status_table->field[3]->store((longlong)0, true); apply_status_table->field[4]->store((longlong)0, true); #ifndef DBUG_OFF - const LEX_CSTRING &name= apply_status_table->s->table_name; - DBUG_PRINT("info", ("use_table: %.*s", - (int) name.length, name.str)); + const LEX_CSTRING &name = apply_status_table->s->table_name; + DBUG_PRINT("info", ("use_table: %.*s", (int)name.length, name.str)); #endif injector::transaction::table tbl(apply_status_table, true); int ret = trans.use_table(::server_id, tbl); ndbcluster::ndbrequire(ret == 0); - ret= trans.write_row(::server_id, - injector::transaction::table(apply_status_table, - true), - &apply_status_table->s->all_set, - apply_status_table->record[0]); + ret = trans.write_row( + ::server_id, injector::transaction::table(apply_status_table, true), + &apply_status_table->s->all_set, apply_status_table->record[0]); assert(ret == 0); @@ -8286,25 +7481,16 @@ injectApplyStatusWriteRow(injector::transaction& trans, DBUG_RETURN(true); } - extern ulong opt_ndb_report_thresh_binlog_epoch_slip; extern ulong opt_ndb_report_thresh_binlog_mem_usage; extern ulong opt_ndb_eventbuffer_max_alloc; extern uint opt_ndb_eventbuffer_free_percent; -Ndb_binlog_thread::Ndb_binlog_thread() - : Ndb_component("Binlog") -{ -} - - -Ndb_binlog_thread::~Ndb_binlog_thread() -{ -} +Ndb_binlog_thread::Ndb_binlog_thread() : Ndb_component("Binlog") {} +Ndb_binlog_thread::~Ndb_binlog_thread() {} -void Ndb_binlog_thread::do_wakeup() -{ +void Ndb_binlog_thread::do_wakeup() { log_info("Wakeup"); /* @@ -8318,31 +7504,25 @@ void Ndb_binlog_thread::do_wakeup() */ } -bool -Ndb_binlog_thread::check_reconnect_incident(THD* thd, injector *inj, - Reconnect_type incident_id) const -{ +bool Ndb_binlog_thread::check_reconnect_incident( + THD *thd, injector *inj, Reconnect_type incident_id) const { log_verbose(1, "Check for incidents"); - if (incident_id == MYSQLD_STARTUP) - { + if (incident_id == MYSQLD_STARTUP) { LOG_INFO log_info; mysql_bin_log.get_current_log(&log_info); - log_verbose(60, " - current binlog file: %s", - log_info.log_file_name); + log_verbose(60, " - current binlog file: %s", log_info.log_file_name); uint log_number = 0; - if ((sscanf(strend(log_info.log_file_name) - 6, "%u", - &log_number) == 1) && - log_number == 1) - { + if ((sscanf(strend(log_info.log_file_name) - 6, "%u", &log_number) == 1) && + log_number == 1) { /* This is the fist binlog file, skip writing incident since there is really no log to have a gap in */ log_verbose(60, " - skipping incident for first log, log_number: %u", log_number); - return false; // No incident written + return false; // No incident written } log_verbose(60, " - current binlog file number: %u", log_number); } @@ -8362,7 +7542,7 @@ Ndb_binlog_thread::check_reconnect_incident(THD* thd, injector *inj, (void)inj->record_incident( thd, binary_log::Incident_event::INCIDENT_LOST_EVENTS, msg); - return true; // Incident written + return true; // Incident written } bool Ndb_binlog_thread::handle_purge(const char *filename) { @@ -8401,24 +7581,17 @@ void Ndb_binlog_thread::recall_pending_purges(THD *thd) { Events are handled one epoch at a time. Handle the lowest available epoch first. */ -static -Uint64 -find_epoch_to_handle(const NdbEventOperation *s_pOp, - const NdbEventOperation *i_pOp) -{ - if (i_pOp != NULL) - { - if (s_pOp != NULL) - { - return std::min(i_pOp->getEpoch(),s_pOp->getEpoch()); +static Uint64 find_epoch_to_handle(const NdbEventOperation *s_pOp, + const NdbEventOperation *i_pOp) { + if (i_pOp != NULL) { + if (s_pOp != NULL) { + return std::min(i_pOp->getEpoch(), s_pOp->getEpoch()); } return i_pOp->getEpoch(); } - if (s_pOp != NULL) - { - if (ndb_binlog_running) - { - return std::min(ndb_latest_received_binlog_epoch,s_pOp->getEpoch()); + if (s_pOp != NULL) { + if (ndb_binlog_running) { + return std::min(ndb_latest_received_binlog_epoch, s_pOp->getEpoch()); } return s_pOp->getEpoch(); } @@ -8426,7 +7599,6 @@ find_epoch_to_handle(const NdbEventOperation *s_pOp, return ndb_latest_received_binlog_epoch; } - static long long g_metadata_synced_count = 0; static void increment_metadata_synced_count() { g_metadata_synced_count++; } @@ -8442,19 +7614,14 @@ int show_ndb_metadata_synced(THD *, SHOW_VAR *var, char *) { return 0; } - -void -Ndb_binlog_thread::synchronize_detected_object(THD *thd) -{ - if (metadata_sync.object_queue_empty()) - { +void Ndb_binlog_thread::synchronize_detected_object(THD *thd) { + if (metadata_sync.object_queue_empty()) { // No objects pending sync return; } Ndb_global_schema_lock_guard global_schema_lock_guard(thd); - if (!global_schema_lock_guard.try_lock()) - { + if (!global_schema_lock_guard.try_lock()) { // Failed to obtain GSL return; } @@ -8463,99 +7630,81 @@ Ndb_binlog_thread::synchronize_detected_object(THD *thd) std::string db_name, object_name; object_detected_type object_type; metadata_sync.get_next_object(db_name, object_name, object_type); - switch (object_type) - { - case object_detected_type::LOGFILE_GROUP_OBJECT: - { + switch (object_type) { + case object_detected_type::LOGFILE_GROUP_OBJECT: { bool temp_error; - if (metadata_sync.sync_logfile_group(thd, object_name, temp_error)) - { + if (metadata_sync.sync_logfile_group(thd, object_name, temp_error)) { log_info("Logfile group '%s' successfully synchronized", object_name.c_str()); increment_metadata_synced_count(); - } - else if (temp_error) - { - log_info("Failed to synchronize logfile group '%s' due to a temporary " - "error", object_name.c_str()); - } - else - { + } else if (temp_error) { + log_info( + "Failed to synchronize logfile group '%s' due to a temporary " + "error", + object_name.c_str()); + } else { log_error("Failed to synchronize logfile group '%s'", object_name.c_str()); metadata_sync.add_object_to_blacklist(db_name, object_name, object_type); increment_metadata_synced_count(); } - } - break; - case object_detected_type::TABLESPACE_OBJECT: - { + } break; + case object_detected_type::TABLESPACE_OBJECT: { bool temp_error; - if (metadata_sync.sync_tablespace(thd, object_name, temp_error)) - { + if (metadata_sync.sync_tablespace(thd, object_name, temp_error)) { log_info("Tablespace '%s' successfully synchronized", object_name.c_str()); increment_metadata_synced_count(); - } - else if (temp_error) - { - log_info("Failed to synchronize tablespace '%s' due to a temporary " - "error", object_name.c_str()); - } - else - { + } else if (temp_error) { + log_info( + "Failed to synchronize tablespace '%s' due to a temporary " + "error", + object_name.c_str()); + } else { log_error("Failed to synchronize tablespace '%s'", object_name.c_str()); metadata_sync.add_object_to_blacklist(db_name, object_name, object_type); increment_metadata_synced_count(); } - } - break; - case object_detected_type::TABLE_OBJECT: - { + } break; + case object_detected_type::TABLE_OBJECT: { bool temp_error; - if (metadata_sync.sync_table(thd, db_name, object_name, temp_error)) - { + if (metadata_sync.sync_table(thd, db_name, object_name, temp_error)) { log_info("Table '%s.%s' successfully synchronized", db_name.c_str(), object_name.c_str()); increment_metadata_synced_count(); - } - else if (temp_error) - { + } else if (temp_error) { log_info("Failed to synchronize table '%s.%s' due to a temporary error", db_name.c_str(), object_name.c_str()); - } - else - { + } else { log_error("Failed to synchronize table '%s.%s'", db_name.c_str(), object_name.c_str()); metadata_sync.add_object_to_blacklist(db_name, object_name, object_type); increment_metadata_synced_count(); } - } - break; - default: - { + } break; + default: { // Unexpected type, should never happen DBUG_ASSERT(false); } } } - -void -Ndb_binlog_thread::do_run() -{ +void Ndb_binlog_thread::do_run() { THD *thd; /* needs to be first for thread_stack */ - Ndb *i_ndb= NULL; - Ndb *s_ndb= NULL; - Thd_ndb *thd_ndb=NULL; - injector *inj= injector::instance(); - Global_THD_manager *thd_manager= Global_THD_manager::get_instance(); - - enum { BCCC_starting, BCCC_running, BCCC_restart, } binlog_thread_state; + Ndb *i_ndb = NULL; + Ndb *s_ndb = NULL; + Thd_ndb *thd_ndb = NULL; + injector *inj = injector::instance(); + Global_THD_manager *thd_manager = Global_THD_manager::get_instance(); + + enum { + BCCC_starting, + BCCC_running, + BCCC_restart, + } binlog_thread_state; /* Controls that only one incident is written per reconnect */ bool do_reconnect_incident = true; @@ -8566,7 +7715,7 @@ Ndb_binlog_thread::do_run() log_info("Starting..."); - thd= new THD; /* note that constructor of THD uses DBUG_ */ + thd = new THD; /* note that constructor of THD uses DBUG_ */ THD_CHECK_SENTRY(thd); /* We need to set thd->thread_id before thd->store_globals, or it will @@ -8574,22 +7723,22 @@ Ndb_binlog_thread::do_run() */ thd->set_new_thread_id(); - thd->thread_stack= (char*) &thd; /* remember where our stack is */ + thd->thread_stack = (char *)&thd; /* remember where our stack is */ thd->store_globals(); thd->set_command(COM_DAEMON); - thd->system_thread= SYSTEM_THREAD_NDBCLUSTER_BINLOG; + thd->system_thread = SYSTEM_THREAD_NDBCLUSTER_BINLOG; thd->get_protocol_classic()->set_client_capabilities(0); thd->security_context()->skip_grants(); // Create thd->net vithout vio - thd->get_protocol_classic()->init_net((Vio *) 0); + thd->get_protocol_classic()->init_net((Vio *)0); // Ndb binlog thread always use row format thd->set_current_stmt_binlog_format_row(); - thd->real_id= my_thread_self(); + thd->real_id = my_thread_self(); thd_manager->add_thd(thd); - thd->lex->start_transaction_opt= 0; + thd->lex->start_transaction_opt = 0; log_info("Started"); @@ -8602,23 +7751,20 @@ Ndb_binlog_thread::do_run() * s_pOp and s_ndb handle events from the 'ndb_schema' dist table, * while i_pOp and i_ndb is for binlogging 'everything else'. */ - NdbEventOperation *s_pOp= NULL; - NdbEventOperation *i_pOp= NULL; - binlog_thread_state= BCCC_starting; + NdbEventOperation *s_pOp = NULL; + NdbEventOperation *i_pOp = NULL; + binlog_thread_state = BCCC_starting; log_verbose(1, "Setting up"); - if (!(thd_ndb= Thd_ndb::seize(thd))) - { + if (!(thd_ndb = Thd_ndb::seize(thd))) { log_error("Creating Thd_ndb object failed"); goto err; } - thd_ndb->set_option(Thd_ndb::NO_LOG_SCHEMA_OP); + thd_ndb->set_option(Thd_ndb::NO_LOG_SCHEMA_OP); - if (!(s_ndb= new (std::nothrow) Ndb(g_ndb_cluster_connection)) || - s_ndb->setNdbObjectName("schema change monitoring") || - s_ndb->init()) - { + if (!(s_ndb = new (std::nothrow) Ndb(g_ndb_cluster_connection)) || + s_ndb->setNdbObjectName("schema change monitoring") || s_ndb->init()) { log_error("Creating schema Ndb object failed"); goto err; } @@ -8626,10 +7772,8 @@ Ndb_binlog_thread::do_run() s_ndb->getReference(), s_ndb->getNdbObjectName()); // empty database - if (!(i_ndb= new (std::nothrow) Ndb(g_ndb_cluster_connection)) || - i_ndb->setNdbObjectName("data change monitoring") || - i_ndb->init()) - { + if (!(i_ndb = new (std::nothrow) Ndb(g_ndb_cluster_connection)) || + i_ndb->setNdbObjectName("data change monitoring") || i_ndb->init()) { log_error("Creating injector Ndb object failed"); goto err; } @@ -8637,8 +7781,7 @@ Ndb_binlog_thread::do_run() i_ndb->getReference(), i_ndb->getNdbObjectName()); /* Set free percent event buffer needed to resume buffering */ - if (i_ndb->set_eventbuffer_free_percent(opt_ndb_eventbuffer_free_percent)) - { + if (i_ndb->set_eventbuffer_free_percent(opt_ndb_eventbuffer_free_percent)) { log_error("Setting eventbuffer free percent failed"); goto err; } @@ -8651,17 +7794,16 @@ Ndb_binlog_thread::do_run() with the storage */ mysql_mutex_lock(&injector_event_mutex); - injector_thd= thd; - injector_ndb= i_ndb; - schema_ndb= s_ndb; + injector_thd = thd; + injector_ndb = i_ndb; + schema_ndb = s_ndb; DBUG_PRINT("info", ("set schema_ndb to s_ndb")); mysql_mutex_unlock(&injector_event_mutex); - if (opt_bin_log && opt_ndb_log_bin) - { + if (opt_bin_log && opt_ndb_log_bin) { // Binary log has been enabled for the server and changes // to NDB tables should be logged - ndb_binlog_running= true; + ndb_binlog_running = true; } log_verbose(1, "Setup completed"); @@ -8669,8 +7811,7 @@ Ndb_binlog_thread::do_run() Wait for the MySQL Server to start (so that the binlog is started and thus can receive the first GAP event) */ - if (!wait_for_server_started()) - { + if (!wait_for_server_started()) { goto err; } @@ -8680,27 +7821,24 @@ Ndb_binlog_thread::do_run() thd->init_query_mem_roots(); lex_start(thd); - if (do_reconnect_incident && ndb_binlog_running) - { - if (check_reconnect_incident(thd, inj, reconnect_incident_id)) - { + if (do_reconnect_incident && ndb_binlog_running) { + if (check_reconnect_incident(thd, inj, reconnect_incident_id)) { // Incident written, don't report incident again unless Ndb_binlog_thread // is restarted do_reconnect_incident = false; } } - reconnect_incident_id= CLUSTER_DISCONNECT; + reconnect_incident_id = CLUSTER_DISCONNECT; // Handle pending purge requests from before "server started" state recall_pending_purges(thd); { log_verbose(1, "Wait for cluster to start"); - thd->proc_info= "Waiting for ndbcluster to start"; + thd->proc_info = "Waiting for ndbcluster to start"; thd_set_thd_ndb(thd, thd_ndb); - while (!ndbcluster_is_connected(1) || !binlog_setup.setup(thd_ndb)) - { + while (!ndbcluster_is_connected(1) || !binlog_setup.setup(thd_ndb)) { // Failed to complete binlog_setup, remove all existing event // operations from potential partial setup remove_all_event_operations(s_ndb, i_ndb); @@ -8710,34 +7848,32 @@ Ndb_binlog_thread::do_run() NDB_SCHEMA_OBJECT::fail_all_schema_ops(Ndb_schema_dist::COORD_ABORT, "Aborted after setup"); - if (!thd_ndb->valid_ndb()) - { + if (!thd_ndb->valid_ndb()) { /* Cluster has gone away before setup was completed. Restart binlog thread to get rid of any garbage on the ndb objects */ - binlog_thread_state= BCCC_restart; + binlog_thread_state = BCCC_restart; goto err; } - if (is_stop_requested()) - { + if (is_stop_requested()) { goto err; } - if (thd->killed == THD::KILL_CONNECTION) - { + if (thd->killed == THD::KILL_CONNECTION) { /* Since the ndb binlog thread adds itself to the "global thread list" it need to look at the "killed" flag and stop the thread to avoid that the server hangs during shutdown while waiting for the "global thread list" to be emtpy. */ - log_info("Server shutdown detected while " - "waiting for ndbcluster to start..."); + log_info( + "Server shutdown detected while " + "waiting for ndbcluster to start..."); goto err; } ndb_milli_sleep(1000); - } //while (!ndb_binlog_setup()) + } // while (!ndb_binlog_setup()) DBUG_ASSERT(ndbcluster_hton->slot != ~(uint)0); @@ -8749,7 +7885,7 @@ Ndb_binlog_thread::do_run() } /* Apply privilege statements stored in snapshot */ - if(! Ndb_stored_grants::apply_stored_grants(thd)) { + if (!Ndb_stored_grants::apply_stored_grants(thd)) { ndb_log_error("stored grants: failed to apply stored grants."); } @@ -8758,14 +7894,12 @@ Ndb_binlog_thread::do_run() { log_verbose(1, "Wait for first event"); // wait for the first event - thd->proc_info= "Waiting for first event from ndbcluster"; + thd->proc_info = "Waiting for first event from ndbcluster"; Uint64 schema_gci; - do - { + do { DBUG_PRINT("info", ("Waiting for the first event")); - if (is_stop_requested()) - goto err; + if (is_stop_requested()) goto err; my_thread_yield(); mysql_mutex_lock(&injector_event_mutex); @@ -8773,54 +7907,49 @@ Ndb_binlog_thread::do_run() mysql_mutex_unlock(&injector_event_mutex); } while (schema_gci == 0 || ndb_latest_received_binlog_epoch == schema_gci); - if (ndb_binlog_running) - { - Uint64 gci= i_ndb->getLatestGCI(); - while (gci < schema_gci || gci == ndb_latest_received_binlog_epoch) - { - if (is_stop_requested()) - goto err; + if (ndb_binlog_running) { + Uint64 gci = i_ndb->getLatestGCI(); + while (gci < schema_gci || gci == ndb_latest_received_binlog_epoch) { + if (is_stop_requested()) goto err; my_thread_yield(); mysql_mutex_lock(&injector_event_mutex); (void)i_ndb->pollEvents(10, &gci); mysql_mutex_unlock(&injector_event_mutex); } - if (gci > schema_gci) - { - schema_gci= gci; + if (gci > schema_gci) { + schema_gci = gci; } } - // now check that we have epochs consistent with what we had before the restart + // now check that we have epochs consistent with what we had before the + // restart DBUG_PRINT("info", ("schema_gci: %u/%u", (uint)(schema_gci >> 32), (uint)(schema_gci))); { i_ndb->flushIncompleteEvents(schema_gci); s_ndb->flushIncompleteEvents(schema_gci); - if (schema_gci < ndb_latest_handled_binlog_epoch) - { - log_error("cluster has been restarted --initial or with older filesystem. " - "ndb_latest_handled_binlog_epoch: %u/%u, while current epoch: %u/%u. " - "RESET MASTER should be issued. Resetting ndb_latest_handled_binlog_epoch.", - (uint)(ndb_latest_handled_binlog_epoch >> 32), - (uint)(ndb_latest_handled_binlog_epoch), - (uint)(schema_gci >> 32), - (uint)(schema_gci)); + if (schema_gci < ndb_latest_handled_binlog_epoch) { + log_error( + "cluster has been restarted --initial or with older filesystem. " + "ndb_latest_handled_binlog_epoch: %u/%u, while current epoch: " + "%u/%u. " + "RESET MASTER should be issued. Resetting " + "ndb_latest_handled_binlog_epoch.", + (uint)(ndb_latest_handled_binlog_epoch >> 32), + (uint)(ndb_latest_handled_binlog_epoch), (uint)(schema_gci >> 32), + (uint)(schema_gci)); ndb_set_latest_trans_gci(0); - ndb_latest_handled_binlog_epoch= 0; - ndb_latest_applied_binlog_epoch= 0; - ndb_latest_received_binlog_epoch= 0; + ndb_latest_handled_binlog_epoch = 0; + ndb_latest_applied_binlog_epoch = 0; + ndb_latest_received_binlog_epoch = 0; ndb_index_stat_restart(); + } else if (ndb_latest_applied_binlog_epoch > 0) { + log_warning( + "cluster has reconnected. " + "Changes to the database that occurred while " + "disconnected will not be in the binlog"); } - else if (ndb_latest_applied_binlog_epoch > 0) - { - log_warning("cluster has reconnected. " - "Changes to the database that occurred while " - "disconnected will not be in the binlog"); - } - log_verbose(1, - "starting log at epoch %u/%u", - (uint)(schema_gci >> 32), + log_verbose(1, "starting log at epoch %u/%u", (uint)(schema_gci >> 32), (uint)(schema_gci)); } log_verbose(1, "Got first event"); @@ -8831,20 +7960,20 @@ Ndb_binlog_thread::do_run() no longer read only */ mysql_mutex_lock(&injector_data_mutex); - ndb_binlog_is_ready= true; + ndb_binlog_is_ready = true; mysql_mutex_unlock(&injector_data_mutex); log_verbose(1, "ndb tables writable"); ndb_tdc_close_cached_tables(); - /* + /* Signal any waiting thread that ndb table setup is now complete */ ndb_notify_tables_writable(); { - static LEX_CSTRING db_lex_cstr= EMPTY_CSTR; + static LEX_CSTRING db_lex_cstr = EMPTY_CSTR; thd->reset_db(db_lex_cstr); } @@ -8853,8 +7982,8 @@ Ndb_binlog_thread::do_run() /* Main NDB Injector loop */ - do_reconnect_incident = true; // Report incident if disconnected - binlog_thread_state= BCCC_running; + do_reconnect_incident = true; // Report incident if disconnected + binlog_thread_state = BCCC_running; /** * Injector loop runs until itself bring it out of 'BCCC_running' state, @@ -8862,9 +7991,8 @@ Ndb_binlog_thread::do_run() * all ongoing transaction epochs are completed first. */ while (binlog_thread_state == BCCC_running && - (!is_stop_requested() || - ndb_latest_handled_binlog_epoch < ndb_get_latest_trans_gci())) - { + (!is_stop_requested() || + ndb_latest_handled_binlog_epoch < ndb_get_latest_trans_gci())) { #ifndef DBUG_OFF /** * As the Binlog thread is not a client thread, the 'set debug' commands @@ -8880,7 +8008,7 @@ Ndb_binlog_thread::do_run() /* now we don't want any events before next gci is complete */ - thd->proc_info= "Waiting for event from ndbcluster"; + thd->proc_info = "Waiting for event from ndbcluster"; thd->set_time(); /** @@ -8898,53 +8026,48 @@ Ndb_binlog_thread::do_run() my_thread_yield(); /* Can't hold mutex too long, so wait for events in 10ms steps */ - int tot_poll_wait= 10; + int tot_poll_wait = 10; // If there are remaining unhandled injector eventOp we continue // handling of these, else poll for more. - if (i_pOp == NULL) - { + if (i_pOp == NULL) { // Capture any dynamic changes to max_alloc i_ndb->set_eventbuf_max_alloc(opt_ndb_eventbuffer_max_alloc); mysql_mutex_lock(&injector_event_mutex); - Uint64 latest_epoch= 0; - const int poll_wait= (ndb_binlog_running) ? tot_poll_wait : 0; - const int res= i_ndb->pollEvents(poll_wait, &latest_epoch); - (void)res; // Unused except DBUG_PRINT + Uint64 latest_epoch = 0; + const int poll_wait = (ndb_binlog_running) ? tot_poll_wait : 0; + const int res = i_ndb->pollEvents(poll_wait, &latest_epoch); + (void)res; // Unused except DBUG_PRINT mysql_mutex_unlock(&injector_event_mutex); i_pOp = i_ndb->nextEvent(); - if (ndb_binlog_running) - { - ndb_latest_received_binlog_epoch= latest_epoch; - tot_poll_wait= 0; + if (ndb_binlog_running) { + ndb_latest_received_binlog_epoch = latest_epoch; + tot_poll_wait = 0; } DBUG_PRINT("info", ("pollEvents res: %d", res)); } // Epoch to handle from i_ndb. Use latest 'empty epoch' if no events. - const Uint64 i_epoch = (i_pOp != NULL) - ? i_pOp->getEpoch() - : ndb_latest_received_binlog_epoch; + const Uint64 i_epoch = + (i_pOp != NULL) ? i_pOp->getEpoch() : ndb_latest_received_binlog_epoch; // If there are remaining unhandled schema eventOp we continue // handling of these, else poll for more. - if (s_pOp == NULL) - { + if (s_pOp == NULL) { if (DBUG_EVALUATE_IF("ndb_binlog_injector_yield_before_schema_pollEvent", - true, false)) - { + true, false)) { /** - * Simulate that the binlog thread yields the CPU inbetween + * Simulate that the binlog thread yields the CPU inbetween * these two pollEvents, which can result in reading a * 'schema_gci > gci'. (Likely due to mutex locking) */ ndb_milli_sleep(50); } - - Uint64 schema_epoch= 0; + + Uint64 schema_epoch = 0; mysql_mutex_lock(&injector_event_mutex); - int schema_res= s_ndb->pollEvents(tot_poll_wait, &schema_epoch); + int schema_res = s_ndb->pollEvents(tot_poll_wait, &schema_epoch); mysql_mutex_unlock(&injector_event_mutex); s_pOp = s_ndb->nextEvent(); @@ -8952,20 +8075,18 @@ Ndb_binlog_thread::do_run() Make sure we have seen any schema epochs upto the injector epoch, or we have an earlier schema event to handle. */ - while (s_pOp == NULL && i_epoch > schema_epoch && schema_res >= 0) - { + while (s_pOp == NULL && i_epoch > schema_epoch && schema_res >= 0) { static char buf[64]; - thd->proc_info= "Waiting for schema epoch"; + thd->proc_info = "Waiting for schema epoch"; snprintf(buf, sizeof(buf), "%s %u/%u(%u/%u)", thd->proc_info, - (uint)(schema_epoch >> 32), - (uint)(schema_epoch), - (uint)(ndb_latest_received_binlog_epoch >> 32), - (uint)(ndb_latest_received_binlog_epoch)); - thd->proc_info= buf; + (uint)(schema_epoch >> 32), (uint)(schema_epoch), + (uint)(ndb_latest_received_binlog_epoch >> 32), + (uint)(ndb_latest_received_binlog_epoch)); + thd->proc_info = buf; my_thread_yield(); mysql_mutex_lock(&injector_event_mutex); - schema_res= s_ndb->pollEvents(10, &schema_epoch); + schema_res = s_ndb->pollEvents(10, &schema_epoch); mysql_mutex_unlock(&injector_event_mutex); s_pOp = s_ndb->nextEvent(); } @@ -8981,7 +8102,7 @@ Ndb_binlog_thread::do_run() */ // Calculate the epoch to handle events from in this iteration. - const Uint64 current_epoch = find_epoch_to_handle(s_pOp,i_pOp); + const Uint64 current_epoch = find_epoch_to_handle(s_pOp, i_pOp); DBUG_ASSERT(current_epoch != 0 || !ndb_binlog_running); // Did someone else request injector thread to stop? @@ -8991,8 +8112,7 @@ Ndb_binlog_thread::do_run() !ndb_binlog_running)) break; /* Stopping thread */ - if (thd->killed == THD::KILL_CONNECTION) - { + if (thd->killed == THD::KILL_CONNECTION) { /* Since the ndb binlog thread adds itself to the "global thread list" it need to look at the "killed" flag and stop the thread to avoid @@ -9009,103 +8129,99 @@ Ndb_binlog_thread::do_run() break; } - MEM_ROOT **root_ptr= THR_MALLOC; - MEM_ROOT *old_root= *root_ptr; + MEM_ROOT **root_ptr = THR_MALLOC; + MEM_ROOT *old_root = *root_ptr; MEM_ROOT mem_root; init_sql_alloc(PSI_INSTRUMENT_ME, &mem_root, 4096, 0); // The Ndb_schema_event_handler does not necessarily need // to use the same memroot(or vice versa) - Ndb_schema_event_handler - schema_event_handler(thd, &mem_root, - g_ndb_cluster_connection->node_id(), - schema_dist_data); + Ndb_schema_event_handler schema_event_handler( + thd, &mem_root, g_ndb_cluster_connection->node_id(), schema_dist_data); - *root_ptr= &mem_root; + *root_ptr = &mem_root; - if (unlikely(s_pOp != NULL && s_pOp->getEpoch() == current_epoch)) - { - thd->proc_info= "Processing events from schema table"; - g_ndb_log_slave_updates= opt_log_slave_updates; - s_ndb-> - setReportThreshEventGCISlip(opt_ndb_report_thresh_binlog_epoch_slip); - s_ndb-> - setReportThreshEventFreeMem(opt_ndb_report_thresh_binlog_mem_usage); + if (unlikely(s_pOp != NULL && s_pOp->getEpoch() == current_epoch)) { + thd->proc_info = "Processing events from schema table"; + g_ndb_log_slave_updates = opt_log_slave_updates; + s_ndb->setReportThreshEventGCISlip( + opt_ndb_report_thresh_binlog_epoch_slip); + s_ndb->setReportThreshEventFreeMem( + opt_ndb_report_thresh_binlog_mem_usage); // Handle all schema event, limit within 'current_epoch' - while (s_pOp != NULL && s_pOp->getEpoch() == current_epoch) - { - if (!s_pOp->hasError()) - { + while (s_pOp != NULL && s_pOp->getEpoch() == current_epoch) { + if (!s_pOp->hasError()) { schema_event_handler.handle_event(s_ndb, s_pOp); - if (DBUG_EVALUATE_IF("ndb_binlog_slow_failure_handling", true, false)) - { - if (!ndb_binlog_is_ready) - { - log_info("Just lost schema connection, hanging around"); - ndb_milli_sleep(10*1000); // seconds * 1000 + if (DBUG_EVALUATE_IF("ndb_binlog_slow_failure_handling", true, + false)) { + if (!ndb_binlog_is_ready) { + log_info("Just lost schema connection, hanging around"); + ndb_milli_sleep(10 * 1000); // seconds * 1000 /* There could be a race where client side reconnect before we * are able to detect 's_ndb->getEventOperation() == NULL'. * Thus, we never restart the binlog thread as supposed to. - * -> 'ndb_binlog_is_ready' remains false and we get stuck in RO-mode + * -> 'ndb_binlog_is_ready' remains false and we get stuck in + * RO-mode */ - log_info("...and on our way"); + log_info("...and on our way"); } } - DBUG_PRINT("info", ("s_ndb first: %s", s_ndb->getEventOperation() ? - s_ndb->getEventOperation()->getEvent()->getTable()->getName() : - "")); - DBUG_PRINT("info", ("i_ndb first: %s", i_ndb->getEventOperation() ? - i_ndb->getEventOperation()->getEvent()->getTable()->getName() : - "")); - } - else - { + DBUG_PRINT("info", + ("s_ndb first: %s", s_ndb->getEventOperation() + ? s_ndb->getEventOperation() + ->getEvent() + ->getTable() + ->getName() + : "")); + DBUG_PRINT("info", + ("i_ndb first: %s", i_ndb->getEventOperation() + ? i_ndb->getEventOperation() + ->getEvent() + ->getTable() + ->getName() + : "")); + } else { log_error("error %d (%s) on handling binlog schema event", - s_pOp->getNdbError().code, - s_pOp->getNdbError().message); + s_pOp->getNdbError().code, s_pOp->getNdbError().message); } s_pOp = s_ndb->nextEvent(); } update_injector_stats(s_ndb, i_ndb); } - Uint64 inconsistent_epoch= 0; - if (!ndb_binlog_running) - { + Uint64 inconsistent_epoch = 0; + if (!ndb_binlog_running) { /* Just consume any events, not used if no binlogging e.g. node failure events */ - while (i_pOp != NULL && i_pOp->getEpoch() == current_epoch) - { - if ((unsigned) i_pOp->getEventType() >= - (unsigned) NDBEVENT::TE_FIRST_NON_DATA_EVENT) - { + while (i_pOp != NULL && i_pOp->getEpoch() == current_epoch) { + if ((unsigned)i_pOp->getEventType() >= + (unsigned)NDBEVENT::TE_FIRST_NON_DATA_EVENT) { ndb_binlog_index_row row; handle_non_data_event(thd, i_pOp, row); } - i_pOp= i_ndb->nextEvent(); + i_pOp = i_ndb->nextEvent(); } update_injector_stats(s_ndb, i_ndb); } // i_pOp == NULL means an inconsistent epoch or the queue is empty - else if (i_pOp == NULL && !i_ndb->isConsistent(inconsistent_epoch)) - { + else if (i_pOp == NULL && !i_ndb->isConsistent(inconsistent_epoch)) { char errmsg[72]; snprintf(errmsg, sizeof(errmsg), "Detected missing data in GCI %llu, " - "inserting GAP event", inconsistent_epoch); - DBUG_PRINT("info", - ("Detected missing data in GCI %llu, " - "inserting GAP event", inconsistent_epoch)); - LEX_CSTRING const msg= { errmsg, strlen(errmsg) }; - inj->record_incident(thd, - binary_log::Incident_event::INCIDENT_LOST_EVENTS, - msg); + "inserting GAP event", + inconsistent_epoch); + DBUG_PRINT("info", ("Detected missing data in GCI %llu, " + "inserting GAP event", + inconsistent_epoch)); + LEX_CSTRING const msg = {errmsg, strlen(errmsg)}; + inj->record_incident( + thd, binary_log::Incident_event::INCIDENT_LOST_EVENTS, msg); } /* Handle all events withing 'current_epoch', or possible @@ -9113,17 +8229,15 @@ Ndb_binlog_thread::do_run() */ else if ((i_pOp != NULL && i_pOp->getEpoch() == current_epoch) || (ndb_log_empty_epochs() && - current_epoch > ndb_latest_handled_binlog_epoch)) - { - thd->proc_info= "Processing events"; + current_epoch > ndb_latest_handled_binlog_epoch)) { + thd->proc_info = "Processing events"; ndb_binlog_index_row _row; - ndb_binlog_index_row *rows= &_row; + ndb_binlog_index_row *rows = &_row; injector::transaction trans; - unsigned trans_row_count= 0; - unsigned trans_slave_row_count= 0; + unsigned trans_row_count = 0; + unsigned trans_slave_row_count = 0; - if (i_pOp == NULL || i_pOp->getEpoch() != current_epoch) - { + if (i_pOp == NULL || i_pOp->getEpoch() != current_epoch) { /* Must be an empty epoch since the condition (ndb_log_empty_epochs() && @@ -9136,22 +8250,21 @@ Ndb_binlog_thread::do_run() DBUG_PRINT("info", ("Writing empty epoch for gci %llu", current_epoch)); DBUG_PRINT("info", ("Initializing transaction")); inj->new_trans(thd, &trans); - rows= &_row; + rows = &_row; memset(&_row, 0, sizeof(_row)); - thd->variables.character_set_client= &my_charset_latin1; + thd->variables.character_set_client = &my_charset_latin1; goto commit_to_binlog; - } - else - { + } else { assert(i_pOp != NULL && i_pOp->getEpoch() == current_epoch); - rows= &_row; + rows = &_row; - DBUG_PRINT("info", ("Handling epoch: %u/%u", - (uint)(current_epoch >> 32), - (uint)(current_epoch))); + DBUG_PRINT("info", + ("Handling epoch: %u/%u", (uint)(current_epoch >> 32), + (uint)(current_epoch))); // sometimes get TE_ALTER with invalid table - DBUG_ASSERT(i_pOp->getEventType() == NdbDictionary::Event::TE_ALTER || - !ndb_name_is_blob_prefix(i_pOp->getEvent()->getTable()->getName())); + DBUG_ASSERT( + i_pOp->getEventType() == NdbDictionary::Event::TE_ALTER || + !ndb_name_is_blob_prefix(i_pOp->getEvent()->getTable()->getName())); DBUG_ASSERT(current_epoch <= ndb_latest_received_binlog_epoch); /* Update our thread-local debug settings based on the global */ @@ -9169,90 +8282,84 @@ Ndb_binlog_thread::do_run() /* initialize some variables for this epoch */ i_ndb->set_eventbuf_max_alloc(opt_ndb_eventbuffer_max_alloc); - g_ndb_log_slave_updates= opt_log_slave_updates; - i_ndb-> - setReportThreshEventGCISlip(opt_ndb_report_thresh_binlog_epoch_slip); - i_ndb->setReportThreshEventFreeMem(opt_ndb_report_thresh_binlog_mem_usage); + g_ndb_log_slave_updates = opt_log_slave_updates; + i_ndb->setReportThreshEventGCISlip( + opt_ndb_report_thresh_binlog_epoch_slip); + i_ndb->setReportThreshEventFreeMem( + opt_ndb_report_thresh_binlog_mem_usage); memset(&_row, 0, sizeof(_row)); - thd->variables.character_set_client= &my_charset_latin1; + thd->variables.character_set_client = &my_charset_latin1; DBUG_PRINT("info", ("Initializing transaction")); inj->new_trans(thd, &trans); - trans_row_count= 0; - trans_slave_row_count= 0; + trans_row_count = 0; + trans_slave_row_count = 0; // pass table map before epoch { - Uint32 iter= 0; + Uint32 iter = 0; const NdbEventOperation *gci_op; Uint32 event_types; Uint32 cumulative_any_value; - while ((gci_op= i_ndb->getNextEventOpInEpoch3(&iter, &event_types, &cumulative_any_value)) - != NULL) - { - Ndb_event_data *event_data= - (Ndb_event_data *) gci_op->getCustomData(); - NDB_SHARE *share= (event_data)?event_data->share:NULL; - DBUG_PRINT("info", ("per gci_op: 0x%lx share: 0x%lx event_types: 0x%x", - (long) gci_op, (long) share, event_types)); + while ((gci_op = i_ndb->getNextEventOpInEpoch3( + &iter, &event_types, &cumulative_any_value)) != NULL) { + Ndb_event_data *event_data = + (Ndb_event_data *)gci_op->getCustomData(); + NDB_SHARE *share = (event_data) ? event_data->share : NULL; + DBUG_PRINT("info", + ("per gci_op: 0x%lx share: 0x%lx event_types: 0x%x", + (long)gci_op, (long)share, event_types)); // workaround for interface returning TE_STOP events // which are normally filtered out below in the nextEvent loop - if ((event_types & ~NdbDictionary::Event::TE_STOP) == 0) - { + if ((event_types & ~NdbDictionary::Event::TE_STOP) == 0) { DBUG_PRINT("info", ("Skipped TE_STOP on table %s", gci_op->getEvent()->getTable()->getName())); continue; } // this should not happen - if (share == NULL || event_data->shadow_table == NULL) - { + if (share == NULL || event_data->shadow_table == NULL) { DBUG_PRINT("info", ("no share or table %s!", gci_op->getEvent()->getTable()->getName())); continue; } - if (share == ndb_apply_status_share) - { + if (share == ndb_apply_status_share) { // skip this table, it is handled specially continue; } - TABLE *table= event_data->shadow_table; + TABLE *table = event_data->shadow_table; #ifndef DBUG_OFF - const LEX_CSTRING &name= table->s->table_name; + const LEX_CSTRING &name = table->s->table_name; #endif if ((event_types & (NdbDictionary::Event::TE_INSERT | NdbDictionary::Event::TE_UPDATE | - NdbDictionary::Event::TE_DELETE)) == 0) - { + NdbDictionary::Event::TE_DELETE)) == 0) { DBUG_PRINT("info", ("skipping non data event table: %.*s", - (int) name.length, name.str)); + (int)name.length, name.str)); continue; } - if (!trans.good()) - { + if (!trans.good()) { DBUG_PRINT("info", ("Found new data event, initializing transaction")); inj->new_trans(thd, &trans); } { - bool use_table= true; - if (ndbcluster_anyvalue_is_reserved(cumulative_any_value)) - { + bool use_table = true; + if (ndbcluster_anyvalue_is_reserved(cumulative_any_value)) { /* - All events for this table in this epoch are marked as nologging, - therefore we do not include the table in the epoch transaction. + All events for this table in this epoch are marked as + nologging, therefore we do not include the table in the epoch + transaction. */ - if (ndbcluster_anyvalue_is_nologging(cumulative_any_value)) - { + if (ndbcluster_anyvalue_is_nologging(cumulative_any_value)) { DBUG_PRINT("info", ("Skip binlogging table table: %.*s", - (int) name.length, name.str)); - use_table= false; + (int)name.length, name.str)); + use_table = false; } } - if (use_table) - { - DBUG_PRINT("info", ("use_table: %.*s, cols %u", - (int) name.length, name.str, - table->s->fields)); + if (use_table) { + DBUG_PRINT("info", + ("use_table: %.*s, cols %u", (int)name.length, + name.str, table->s->fields)); injector::transaction::table tbl(table, true); int ret = trans.use_table(::server_id, tbl); ndbcluster::ndbrequire(ret == 0); @@ -9260,68 +8367,67 @@ Ndb_binlog_thread::do_run() } } } - if (trans.good()) - { + if (trans.good()) { /* Inject ndb_apply_status WRITE_ROW event */ - if (!injectApplyStatusWriteRow(trans, current_epoch)) - { + if (!injectApplyStatusWriteRow(trans, current_epoch)) { log_error("Failed to inject apply status write row"); } } - do - { - if (i_pOp->hasError() && - handle_error(i_pOp) < 0) - goto err; + do { + if (i_pOp->hasError() && handle_error(i_pOp) < 0) goto err; #ifndef DBUG_OFF { - Ndb_event_data *event_data= - (Ndb_event_data *) i_pOp->getCustomData(); - NDB_SHARE *share= (event_data)?event_data->share:NULL; + Ndb_event_data *event_data = + (Ndb_event_data *)i_pOp->getCustomData(); + NDB_SHARE *share = (event_data) ? event_data->share : NULL; DBUG_PRINT("info", ("EVENT TYPE: %d Epoch: %u/%u last applied: %u/%u " - "share: 0x%lx (%s.%s)", i_pOp->getEventType(), - (uint)(current_epoch >> 32), + "share: 0x%lx (%s.%s)", + i_pOp->getEventType(), (uint)(current_epoch >> 32), (uint)(current_epoch), (uint)(ndb_latest_applied_binlog_epoch >> 32), - (uint)(ndb_latest_applied_binlog_epoch), - (long) share, - share ? share->db : "'NULL'", + (uint)(ndb_latest_applied_binlog_epoch), (long)share, + share ? share->db : "'NULL'", share ? share->table_name : "'NULL'")); DBUG_ASSERT(share != 0); } // assert that there is consistancy between gci op list // and event list { - Uint32 iter= 0; + Uint32 iter = 0; const NdbEventOperation *gci_op; Uint32 event_types; - while ((gci_op= i_ndb->getGCIEventOperations(&iter, &event_types)) - != NULL) - { - if (gci_op == i_pOp) - break; + while ((gci_op = i_ndb->getGCIEventOperations( + &iter, &event_types)) != NULL) { + if (gci_op == i_pOp) break; } DBUG_ASSERT(gci_op == i_pOp); DBUG_ASSERT((event_types & i_pOp->getEventType()) != 0); } #endif - if ((unsigned) i_pOp->getEventType() < - (unsigned) NDBEVENT::TE_FIRST_NON_DATA_EVENT) - handle_data_event(i_pOp, &rows, trans, - trans_row_count, trans_slave_row_count); - else - { + if ((unsigned)i_pOp->getEventType() < + (unsigned)NDBEVENT::TE_FIRST_NON_DATA_EVENT) + handle_data_event(i_pOp, &rows, trans, trans_row_count, + trans_slave_row_count); + else { handle_non_data_event(thd, i_pOp, *rows); - DBUG_PRINT("info", ("s_ndb first: %s", s_ndb->getEventOperation() ? - s_ndb->getEventOperation()->getEvent()->getTable()->getName() : - "")); - DBUG_PRINT("info", ("i_ndb first: %s", i_ndb->getEventOperation() ? - i_ndb->getEventOperation()->getEvent()->getTable()->getName() : - "")); + DBUG_PRINT("info", + ("s_ndb first: %s", s_ndb->getEventOperation() + ? s_ndb->getEventOperation() + ->getEvent() + ->getTable() + ->getName() + : "")); + DBUG_PRINT("info", + ("i_ndb first: %s", i_ndb->getEventOperation() + ? i_ndb->getEventOperation() + ->getEvent() + ->getTable() + ->getName() + : "")); } // Capture any dynamic changes to max_alloc @@ -9331,19 +8437,17 @@ Ndb_binlog_thread::do_run() } while (i_pOp && i_pOp->getEpoch() == current_epoch); update_injector_stats(s_ndb, i_ndb); - + /* NOTE: i_pOp is now referring to an event in the next epoch or is == NULL */ - while (trans.good()) - { - commit_to_binlog: - if (!ndb_log_empty_epochs()) - { + while (trans.good()) { + commit_to_binlog: + if (!ndb_log_empty_epochs()) { /* - If + If - We did not add any 'real' rows to the Binlog AND - We did not apply any slave row updates, only ndb_apply_status updates @@ -9355,12 +8459,9 @@ Ndb_binlog_thread::do_run() is propagating) */ if ((trans_row_count == 0) && - (! (opt_ndb_log_apply_status && - trans_slave_row_count) )) - { + (!(opt_ndb_log_apply_status && trans_slave_row_count))) { /* nothing to commit, rollback instead */ - if (int r= trans.rollback()) - { + if (int r = trans.rollback()) { log_error("Error during ROLLBACK of GCI %u/%u. Error: %d", uint(current_epoch >> 32), uint(current_epoch), r); /* TODO: Further handling? */ @@ -9368,52 +8469,46 @@ Ndb_binlog_thread::do_run() break; } } - thd->proc_info= "Committing events to binlog"; - if (int r= trans.commit()) - { + thd->proc_info = "Committing events to binlog"; + if (int r = trans.commit()) { log_error("Error during COMMIT of GCI. Error: %d", r); /* TODO: Further handling? */ } - injector::transaction::binlog_pos start= trans.start_pos(); + injector::transaction::binlog_pos start = trans.start_pos(); injector::transaction::binlog_pos next = trans.next_pos(); - rows->gci= (Uint32)(current_epoch >> 32); // Expose gci hi/lo - rows->epoch= current_epoch; - rows->start_master_log_file= start.file_name(); - rows->start_master_log_pos= start.file_pos(); - if ((next.file_pos() == 0) && - ndb_log_empty_epochs()) - { + rows->gci = (Uint32)(current_epoch >> 32); // Expose gci hi/lo + rows->epoch = current_epoch; + rows->start_master_log_file = start.file_name(); + rows->start_master_log_pos = start.file_pos(); + if ((next.file_pos() == 0) && ndb_log_empty_epochs()) { /* Empty transaction 'committed' due to log_empty_epochs * therefore no next position */ - rows->next_master_log_file= start.file_name(); - rows->next_master_log_pos= start.file_pos(); - } - else - { - rows->next_master_log_file= next.file_name(); - rows->next_master_log_pos= next.file_pos(); + rows->next_master_log_file = start.file_name(); + rows->next_master_log_pos = start.file_pos(); + } else { + rows->next_master_log_file = next.file_name(); + rows->next_master_log_pos = next.file_pos(); } - DBUG_PRINT("info", ("COMMIT epoch: %lu", (ulong) current_epoch)); - if (opt_ndb_log_binlog_index) - { - if (Ndb_binlog_index_table_util::write_rows(thd, rows)) - { - /* - Writing to ndb_binlog_index failed, check if it's because THD have - been killed and retry in such case + DBUG_PRINT("info", ("COMMIT epoch: %lu", (ulong)current_epoch)); + if (opt_ndb_log_binlog_index) { + if (Ndb_binlog_index_table_util::write_rows(thd, rows)) { + /* + Writing to ndb_binlog_index failed, check if it's because THD + have been killed and retry in such case */ - if (thd->killed) - { - DBUG_PRINT("error", ("Failed to write to ndb_binlog_index at shutdown, retrying")); - Ndb_binlog_index_table_util::write_rows_retry_after_kill(thd, rows); + if (thd->killed) { + DBUG_PRINT("error", ("Failed to write to ndb_binlog_index at " + "shutdown, retrying")); + Ndb_binlog_index_table_util::write_rows_retry_after_kill(thd, + rows); } } } - ndb_latest_applied_binlog_epoch= current_epoch; + ndb_latest_applied_binlog_epoch = current_epoch; break; - } //while (trans.good()) + } // while (trans.good()) /* NOTE: There are possible more i_pOp available. @@ -9421,19 +8516,18 @@ Ndb_binlog_thread::do_run() in next iteration of the binlog injector loop. */ } - } //end: 'handled a 'current_epoch' of i_pOp's + } // end: 'handled a 'current_epoch' of i_pOp's // Notify the schema event handler about post_epoch so it may finish // any outstanding business schema_event_handler.post_epoch(current_epoch); free_root(&mem_root, MYF(0)); - *root_ptr= old_root; + *root_ptr = old_root; - if (current_epoch > ndb_latest_handled_binlog_epoch) - { + if (current_epoch > ndb_latest_handled_binlog_epoch) { Mutex_guard injector_mutex_g(injector_data_mutex); - ndb_latest_handled_binlog_epoch= current_epoch; + ndb_latest_handled_binlog_epoch = current_epoch; // Signal ndbcluster_binlog_wait'ers mysql_cond_broadcast(&injector_data_cond); } @@ -9445,18 +8539,17 @@ Ndb_binlog_thread::do_run() // When all event operations has been removed from their respective Ndb // object, the thread should restart and try to connect to NDB again. if (i_ndb->getEventOperation() == NULL && - s_ndb->getEventOperation() == NULL) - { + s_ndb->getEventOperation() == NULL) { log_error("All event operations gone, restarting thread"); - binlog_thread_state= BCCC_restart; + binlog_thread_state = BCCC_restart; break; } - if (!ndb_binlog_tables_inited /* relaxed read without lock */ ) { + if (!ndb_binlog_tables_inited /* relaxed read without lock */) { // One(or more) of the ndbcluster util tables have been dropped, restart // the thread in order to create or setup the util table(s) again log_error("The util tables has been lost, restarting thread"); - binlog_thread_state= BCCC_restart; + binlog_thread_state = BCCC_restart; break; } @@ -9468,40 +8561,38 @@ Ndb_binlog_thread::do_run() // Check if loop has been terminated without properly handling all events if (ndb_binlog_running && ndb_latest_handled_binlog_epoch < ndb_get_latest_trans_gci()) { - log_error("latest transaction in epoch %u/%u not in binlog " - "as latest handled epoch is %u/%u", - (uint)(ndb_get_latest_trans_gci() >> 32), - (uint)(ndb_get_latest_trans_gci()), - (uint)(ndb_latest_handled_binlog_epoch >> 32), - (uint)(ndb_latest_handled_binlog_epoch)); + log_error( + "latest transaction in epoch %u/%u not in binlog " + "as latest handled epoch is %u/%u", + (uint)(ndb_get_latest_trans_gci() >> 32), + (uint)(ndb_get_latest_trans_gci()), + (uint)(ndb_latest_handled_binlog_epoch >> 32), + (uint)(ndb_latest_handled_binlog_epoch)); } - err: - if (binlog_thread_state != BCCC_restart) - { +err: + if (binlog_thread_state != BCCC_restart) { log_info("Shutting down"); - thd->proc_info= "Shutting down"; - } - else - { + thd->proc_info = "Shutting down"; + } else { log_info("Restarting"); - thd->proc_info= "Restarting"; + thd->proc_info = "Restarting"; } mysql_mutex_lock(&injector_event_mutex); /* don't mess with the injector_ndb anymore from other threads */ - injector_thd= NULL; - injector_ndb= NULL; - schema_ndb= NULL; + injector_thd = NULL; + injector_ndb = NULL; + schema_ndb = NULL; mysql_mutex_unlock(&injector_event_mutex); mysql_mutex_lock(&injector_data_mutex); - ndb_binlog_tables_inited= false; + ndb_binlog_tables_inited = false; mysql_mutex_unlock(&injector_data_mutex); Ndb_stored_grants::shutdown(thd_ndb); - thd->reset_db(NULL_CSTR); // as not to try to free memory + thd->reset_db(NULL_CSTR); // as not to try to free memory remove_all_event_operations(s_ndb, i_ndb); schema_dist_data.release(); @@ -9512,16 +8603,15 @@ Ndb_binlog_thread::do_run() "Aborted during shutdown"); delete s_ndb; - s_ndb= NULL; + s_ndb = NULL; delete i_ndb; - i_ndb= NULL; + i_ndb = NULL; - if (thd_ndb) - { + if (thd_ndb) { Thd_ndb::release(thd_ndb); thd_set_thd_ndb(thd, NULL); - thd_ndb= NULL; + thd_ndb = NULL; } /** @@ -9533,13 +8623,11 @@ Ndb_binlog_thread::do_run() log_info("Stopping..."); ndb_tdc_close_cached_tables(); - if (ndb_log_get_verbose_level() > 15) - { + if (ndb_log_get_verbose_level() > 15) { NDB_SHARE::print_remaining_open_tables(); } - if (binlog_thread_state == BCCC_restart) - { + if (binlog_thread_state == BCCC_restart) { goto restart_cluster_failure; } @@ -9549,7 +8637,7 @@ Ndb_binlog_thread::do_run() thd_manager->remove_thd(thd); delete thd; - ndb_binlog_running= false; + ndb_binlog_running = false; mysql_cond_broadcast(&injector_data_cond); log_info("Stopped"); @@ -9558,7 +8646,6 @@ Ndb_binlog_thread::do_run() DBUG_VOID_RETURN; } - /* Return string containing current status of ndb binlog as comma separated name value pairs. @@ -9573,35 +8660,28 @@ Ndb_binlog_thread::do_run() is printed */ -size_t -ndbcluster_show_status_binlog(char *buf, size_t buf_size) -{ +size_t ndbcluster_show_status_binlog(char *buf, size_t buf_size) { DBUG_ENTER("ndbcluster_show_status_binlog"); - + mysql_mutex_lock(&injector_event_mutex); - if (injector_ndb) - { - const ulonglong latest_epoch= injector_ndb->getLatestGCI(); + if (injector_ndb) { + const ulonglong latest_epoch = injector_ndb->getLatestGCI(); mysql_mutex_unlock(&injector_event_mutex); // Get highest trans gci seen by the cluster connections const ulonglong latest_trans_epoch = ndb_get_latest_trans_gci(); - const size_t buf_len = - snprintf(buf, buf_size, - "latest_epoch=%llu, " - "latest_trans_epoch=%llu, " - "latest_received_binlog_epoch=%llu, " - "latest_handled_binlog_epoch=%llu, " - "latest_applied_binlog_epoch=%llu", - latest_epoch, - latest_trans_epoch, - ndb_latest_received_binlog_epoch, - ndb_latest_handled_binlog_epoch, - ndb_latest_applied_binlog_epoch); - DBUG_RETURN(buf_len); - } - else + const size_t buf_len = snprintf( + buf, buf_size, + "latest_epoch=%llu, " + "latest_trans_epoch=%llu, " + "latest_received_binlog_epoch=%llu, " + "latest_handled_binlog_epoch=%llu, " + "latest_applied_binlog_epoch=%llu", + latest_epoch, latest_trans_epoch, ndb_latest_received_binlog_epoch, + ndb_latest_handled_binlog_epoch, ndb_latest_applied_binlog_epoch); + DBUG_RETURN(buf_len); + } else mysql_mutex_unlock(&injector_event_mutex); DBUG_RETURN(0); } diff --git a/storage/ndb/plugin/ha_ndbcluster_binlog.h b/storage/ndb/plugin/ha_ndbcluster_binlog.h index b443a935906b..d048a32615cb 100644 --- a/storage/ndb/plugin/ha_ndbcluster_binlog.h +++ b/storage/ndb/plugin/ha_ndbcluster_binlog.h @@ -34,14 +34,13 @@ class Table; /* Initialize the binlog part of the ndbcluster plugin */ -void ndbcluster_binlog_init(struct handlerton* hton); +void ndbcluster_binlog_init(struct handlerton *hton); -int ndbcluster_binlog_setup_table(THD* thd, class Ndb* ndb, - const char* db, const char* table_name, - const dd::Table* table_def); +int ndbcluster_binlog_setup_table(THD *thd, class Ndb *ndb, const char *db, + const char *table_name, + const dd::Table *table_def); -int ndbcluster_binlog_wait_synch_drop_table(THD* thd, - struct NDB_SHARE* share); +int ndbcluster_binlog_wait_synch_drop_table(THD *thd, struct NDB_SHARE *share); int ndbcluster_binlog_start(); @@ -58,7 +57,7 @@ int ndbcluster_binlog_end(); bool ndb_binlog_is_read_only(void); /* Prints ndb binlog status string in buf */ -size_t ndbcluster_show_status_binlog(char* buf, size_t buf_size); +size_t ndbcluster_show_status_binlog(char *buf, size_t buf_size); /* Called as part of SHOW STATUS or performance_schema @@ -66,7 +65,6 @@ size_t ndbcluster_show_status_binlog(char* buf, size_t buf_size); */ int show_ndb_status_injector(THD *, SHOW_VAR *var, char *); - /** @brief Validate the blacklist of objects @param thd Thread handle @@ -97,8 +95,8 @@ bool ndbcluster_binlog_check_logfile_group_async(const std::string &lfg_name); @param tablespace_name The name of tablespace to check. This cannot be empty @return true if the workitem was accepted, false if not */ -bool -ndbcluster_binlog_check_tablespace_async(const std::string &tablespace_name); +bool ndbcluster_binlog_check_tablespace_async( + const std::string &tablespace_name); /* Called as part of SHOW STATUS or performance_schema queries. Returns diff --git a/storage/ndb/plugin/ha_ndbcluster_cond.cc b/storage/ndb/plugin/ha_ndbcluster_cond.cc index 3ba91c75bc77..f23ca037e47b 100644 --- a/storage/ndb/plugin/ha_ndbcluster_cond.cc +++ b/storage/ndb/plugin/ha_ndbcluster_cond.cc @@ -30,22 +30,22 @@ #include "my_dbug.h" #include "sql/current_thd.h" -#include "sql/item.h" // Item -#include "sql/item_cmpfunc.h" // Item_func_like etc. -#include "sql/item_func.h" // Item_func +#include "sql/item.h" // Item +#include "sql/item_cmpfunc.h" // Item_func_like etc. +#include "sql/item_func.h" // Item_func #include "storage/ndb/plugin/ha_ndbcluster.h" #include "storage/ndb/plugin/ndb_log.h" #include "storage/ndb/plugin/ndb_thd.h" -// Typedefs for long names +// Typedefs for long names typedef NdbDictionary::Column NDBCOL; typedef NdbDictionary::Table NDBTAB; typedef enum ndb_item_type { - NDB_VALUE = 0, // Qualified more with Item::Type - NDB_FIELD = 1, // Qualified from table definition - NDB_FUNCTION = 2,// Qualified from Item_func::Functype - NDB_END_COND = 3 // End marker for condition group + NDB_VALUE = 0, // Qualified more with Item::Type + NDB_FIELD = 1, // Qualified from table definition + NDB_FUNCTION = 2, // Qualified from Item_func::Functype + NDB_END_COND = 3 // End marker for condition group } NDB_ITEM_TYPE; typedef enum ndb_func_type { @@ -65,14 +65,13 @@ typedef enum ndb_func_type { NDB_UNSUPPORTED_FUNC = 13 } NDB_FUNC_TYPE; - typedef union ndb_item_value { - const Item *item; // NDB_VALUE - struct { // NDB_FIELD - Field* field; + const Item *item; // NDB_VALUE + struct { // NDB_FIELD + Field *field; int column_no; }; - struct { // NDB_FUNCTION + struct { // NDB_FUNCTION NDB_FUNC_TYPE func_type; uint arg_count; }; @@ -83,8 +82,7 @@ typedef union ndb_item_value { - 'not op1 func op2' -> 'op1 neg_func op2' - 'op1 func op2' -> ''op2 swap_func op1' */ -struct function_mapping -{ +struct function_mapping { NDB_FUNC_TYPE func; NDB_FUNC_TYPE neg_func; NDB_FUNC_TYPE swap_func; @@ -94,24 +92,21 @@ struct function_mapping Define what functions can be negated in condition pushdown. Note, these HAVE to be in the same order as in definition enum */ -static const function_mapping func_map[]= -{ - {NDB_EQ_FUNC, NDB_NE_FUNC, NDB_EQ_FUNC}, - {NDB_NE_FUNC, NDB_EQ_FUNC, NDB_NE_FUNC}, - {NDB_LT_FUNC, NDB_GE_FUNC, NDB_GT_FUNC}, - {NDB_LE_FUNC, NDB_GT_FUNC, NDB_GE_FUNC}, - {NDB_GT_FUNC, NDB_LE_FUNC, NDB_LT_FUNC}, - {NDB_GE_FUNC, NDB_LT_FUNC, NDB_LE_FUNC}, - {NDB_ISNULL_FUNC, NDB_ISNOTNULL_FUNC, NDB_UNSUPPORTED_FUNC}, - {NDB_ISNOTNULL_FUNC, NDB_ISNULL_FUNC, NDB_UNSUPPORTED_FUNC}, - {NDB_LIKE_FUNC, NDB_NOTLIKE_FUNC, NDB_UNSUPPORTED_FUNC}, - {NDB_NOTLIKE_FUNC, NDB_LIKE_FUNC, NDB_UNSUPPORTED_FUNC}, - {NDB_NOT_FUNC, NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC}, - {NDB_COND_AND_FUNC, NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC}, - {NDB_COND_OR_FUNC, NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC}, - {NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC} -}; - +static const function_mapping func_map[] = { + {NDB_EQ_FUNC, NDB_NE_FUNC, NDB_EQ_FUNC}, + {NDB_NE_FUNC, NDB_EQ_FUNC, NDB_NE_FUNC}, + {NDB_LT_FUNC, NDB_GE_FUNC, NDB_GT_FUNC}, + {NDB_LE_FUNC, NDB_GT_FUNC, NDB_GE_FUNC}, + {NDB_GT_FUNC, NDB_LE_FUNC, NDB_LT_FUNC}, + {NDB_GE_FUNC, NDB_LT_FUNC, NDB_LE_FUNC}, + {NDB_ISNULL_FUNC, NDB_ISNOTNULL_FUNC, NDB_UNSUPPORTED_FUNC}, + {NDB_ISNOTNULL_FUNC, NDB_ISNULL_FUNC, NDB_UNSUPPORTED_FUNC}, + {NDB_LIKE_FUNC, NDB_NOTLIKE_FUNC, NDB_UNSUPPORTED_FUNC}, + {NDB_NOTLIKE_FUNC, NDB_LIKE_FUNC, NDB_UNSUPPORTED_FUNC}, + {NDB_NOT_FUNC, NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC}, + {NDB_COND_AND_FUNC, NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC}, + {NDB_COND_OR_FUNC, NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC}, + {NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC}}; /* This class is the construction element for serialization of Item tree @@ -126,138 +121,140 @@ static const function_mapping func_map[]= Ndb_item with type == NDB_END_COND. NOT items represent negated conditions and generate NAND/NOR groups. */ -class Ndb_item -{ -public: +class Ndb_item { + public: Ndb_item(NDB_ITEM_TYPE item_type) : type(item_type) {} // A Ndb_Item where an Item expression defines the value (a const) - Ndb_item(const Item *item_value) : type(NDB_VALUE) - { - value.item= item_value; + Ndb_item(const Item *item_value) : type(NDB_VALUE) { + value.item = item_value; } // A Ndb_Item refering a Field from 'this' table - Ndb_item(Field *field, int column_no) : type(NDB_FIELD) - { - value.field= field; - value.column_no= column_no; + Ndb_item(Field *field, int column_no) : type(NDB_FIELD) { + value.field = field; + value.column_no = column_no; } Ndb_item(Item_func::Functype func_type, const Item_func *item_func) - : type(NDB_FUNCTION) - { - value.func_type= item_func_to_ndb_func(func_type); - value.arg_count= item_func->argument_count(); + : type(NDB_FUNCTION) { + value.func_type = item_func_to_ndb_func(func_type); + value.arg_count = item_func->argument_count(); } - Ndb_item(Item_func::Functype func_type, uint no_args) - : type(NDB_FUNCTION) - { - value.func_type= item_func_to_ndb_func(func_type); - value.arg_count= no_args; + Ndb_item(Item_func::Functype func_type, uint no_args) : type(NDB_FUNCTION) { + value.func_type = item_func_to_ndb_func(func_type); + value.arg_count = no_args; } - ~Ndb_item() - {} + ~Ndb_item() {} - Field *get_field() const - { + Field *get_field() const { DBUG_ASSERT(type == NDB_FIELD); return value.field; } - int get_field_no() const - { + int get_field_no() const { DBUG_ASSERT(type == NDB_FIELD); return value.column_no; } - NDB_FUNC_TYPE get_func_type() const - { + NDB_FUNC_TYPE get_func_type() const { DBUG_ASSERT(type == NDB_FUNCTION); return value.func_type; } - int get_argument_count() const - { + int get_argument_count() const { DBUG_ASSERT(type == NDB_FUNCTION); return value.arg_count; } - uint32 pack_length() const - { - return get_field()->pack_length(); - } + uint32 pack_length() const { return get_field()->pack_length(); } - const uchar* get_val() const - { - return get_field()->ptr; - } + const uchar *get_val() const { return get_field()->ptr; } - const CHARSET_INFO *get_field_charset() const - { - const Field *field= get_field(); - if (field) - return field->charset(); + const CHARSET_INFO *get_field_charset() const { + const Field *field = get_field(); + if (field) return field->charset(); return NULL; } - const Item *get_item() const - { + const Item *get_item() const { DBUG_ASSERT(this->type == NDB_VALUE); return value.item; } - int save_in_field(const Ndb_item *field_item) const - { + int save_in_field(const Ndb_item *field_item) const { DBUG_ENTER("save_in_field"); Field *field = field_item->get_field(); - const Item *item= get_item(); - if (unlikely(item == nullptr || field == nullptr)) - DBUG_RETURN(-1); + const Item *item = get_item(); + if (unlikely(item == nullptr || field == nullptr)) DBUG_RETURN(-1); - my_bitmap_map *old_map= - dbug_tmp_use_all_columns(field->table, field->table->write_set); - const type_conversion_status status = const_cast(item)->save_in_field(field, false); + my_bitmap_map *old_map = + dbug_tmp_use_all_columns(field->table, field->table->write_set); + const type_conversion_status status = + const_cast(item)->save_in_field(field, false); dbug_tmp_restore_column_map(field->table->write_set, old_map); - if (unlikely(status != TYPE_OK)) - DBUG_RETURN(-1); - - DBUG_RETURN(0); //OK + if (unlikely(status != TYPE_OK)) DBUG_RETURN(-1); + + DBUG_RETURN(0); // OK } - static NDB_FUNC_TYPE item_func_to_ndb_func(Item_func::Functype fun) - { + static NDB_FUNC_TYPE item_func_to_ndb_func(Item_func::Functype fun) { switch (fun) { - case (Item_func::EQ_FUNC): { return NDB_EQ_FUNC; } - case (Item_func::NE_FUNC): { return NDB_NE_FUNC; } - case (Item_func::LT_FUNC): { return NDB_LT_FUNC; } - case (Item_func::LE_FUNC): { return NDB_LE_FUNC; } - case (Item_func::GT_FUNC): { return NDB_GT_FUNC; } - case (Item_func::GE_FUNC): { return NDB_GE_FUNC; } - case (Item_func::ISNULL_FUNC): { return NDB_ISNULL_FUNC; } - case (Item_func::ISNOTNULL_FUNC): { return NDB_ISNOTNULL_FUNC; } - case (Item_func::LIKE_FUNC): { return NDB_LIKE_FUNC; } - case (Item_func::NOT_FUNC): { return NDB_NOT_FUNC; } - case (Item_func::COND_AND_FUNC): { return NDB_COND_AND_FUNC; } - case (Item_func::COND_OR_FUNC): { return NDB_COND_OR_FUNC; } - default: { return NDB_UNSUPPORTED_FUNC; } + case (Item_func::EQ_FUNC): { + return NDB_EQ_FUNC; + } + case (Item_func::NE_FUNC): { + return NDB_NE_FUNC; + } + case (Item_func::LT_FUNC): { + return NDB_LT_FUNC; + } + case (Item_func::LE_FUNC): { + return NDB_LE_FUNC; + } + case (Item_func::GT_FUNC): { + return NDB_GT_FUNC; + } + case (Item_func::GE_FUNC): { + return NDB_GE_FUNC; + } + case (Item_func::ISNULL_FUNC): { + return NDB_ISNULL_FUNC; + } + case (Item_func::ISNOTNULL_FUNC): { + return NDB_ISNOTNULL_FUNC; + } + case (Item_func::LIKE_FUNC): { + return NDB_LIKE_FUNC; + } + case (Item_func::NOT_FUNC): { + return NDB_NOT_FUNC; + } + case (Item_func::COND_AND_FUNC): { + return NDB_COND_AND_FUNC; + } + case (Item_func::COND_OR_FUNC): { + return NDB_COND_OR_FUNC; + } + default: { + return NDB_UNSUPPORTED_FUNC; + } } } - static NDB_FUNC_TYPE negate(NDB_FUNC_TYPE fun) - { - uint i= (uint) fun; + static NDB_FUNC_TYPE negate(NDB_FUNC_TYPE fun) { + uint i = (uint)fun; DBUG_ASSERT(fun == func_map[i].func); - return func_map[i].neg_func; + return func_map[i].neg_func; } - static NDB_FUNC_TYPE swap(NDB_FUNC_TYPE fun) - { - uint i= (uint) fun; + static NDB_FUNC_TYPE swap(NDB_FUNC_TYPE fun) { + uint i = (uint)fun; DBUG_ASSERT(fun == func_map[i].func); - return func_map[i].swap_func; + return func_map[i].swap_func; } const NDB_ITEM_TYPE type; + private: NDB_ITEM_VALUE value; }; @@ -272,117 +269,86 @@ class Ndb_item to check specific order (currently used for detecting support for LIKE |, but not | LIKE ). */ -class Ndb_expect_stack -{ +class Ndb_expect_stack { static const uint MAX_EXPECT_ITEMS = Item::VIEW_FIXER_ITEM + 1; static const uint MAX_EXPECT_FIELD_TYPES = MYSQL_TYPE_GEOMETRY + 1; static const uint MAX_EXPECT_FIELD_RESULTS = DECIMAL_RESULT + 1; + public: - Ndb_expect_stack(): other_field(nullptr), collation(nullptr), - length(0), max_length(0), next(nullptr) - { + Ndb_expect_stack() + : other_field(nullptr), + collation(nullptr), + length(0), + max_length(0), + next(nullptr) { // Allocate type checking bitmaps using fixed size buffers // since max size is known at compile time - bitmap_init(&expect_mask, m_expect_buf, - MAX_EXPECT_ITEMS, false); + bitmap_init(&expect_mask, m_expect_buf, MAX_EXPECT_ITEMS, false); bitmap_init(&expect_field_type_mask, m_expect_field_type_buf, MAX_EXPECT_FIELD_TYPES, false); bitmap_init(&expect_field_result_mask, m_expect_field_result_buf, MAX_EXPECT_FIELD_RESULTS, false); } - ~Ndb_expect_stack() - { - if (next) - destroy(next); - next= NULL; - } - void push(Ndb_expect_stack* expect_next) - { - next= expect_next; + ~Ndb_expect_stack() { + if (next) destroy(next); + next = NULL; } - void pop() - { - if (next) - { - Ndb_expect_stack* expect_next= next; + void push(Ndb_expect_stack *expect_next) { next = expect_next; } + void pop() { + if (next) { + Ndb_expect_stack *expect_next = next; bitmap_copy(&expect_mask, &next->expect_mask); bitmap_copy(&expect_field_type_mask, &next->expect_field_type_mask); bitmap_copy(&expect_field_result_mask, &next->expect_field_result_mask); - other_field= next->other_field; - collation= next->collation; - next= next->next; + other_field = next->other_field; + collation = next->collation; + next = next->next; destroy(expect_next); } } - void expect(Item::Type type) - { - bitmap_set_bit(&expect_mask, (uint) type); - } - void dont_expect(Item::Type type) - { - bitmap_clear_bit(&expect_mask, (uint) type); + void expect(Item::Type type) { bitmap_set_bit(&expect_mask, (uint)type); } + void dont_expect(Item::Type type) { + bitmap_clear_bit(&expect_mask, (uint)type); } - bool expecting(Item::Type type) - { - if (unlikely((uint)type > MAX_EXPECT_ITEMS)) - { + bool expecting(Item::Type type) { + if (unlikely((uint)type > MAX_EXPECT_ITEMS)) { // Unknown type, can't be expected return false; } - return bitmap_is_set(&expect_mask, (uint) type); - } - void expect_nothing() - { - bitmap_clear_all(&expect_mask); + return bitmap_is_set(&expect_mask, (uint)type); } - bool expecting_nothing() - { - return bitmap_is_clear_all(&expect_mask); - } - void expect_only(Item::Type type) - { + void expect_nothing() { bitmap_clear_all(&expect_mask); } + bool expecting_nothing() { return bitmap_is_clear_all(&expect_mask); } + void expect_only(Item::Type type) { expect_nothing(); expect(type); } - bool expecting_only(Item::Type type) - { + bool expecting_only(Item::Type type) { return (expecting(type) && bitmap_bits_set(&expect_mask) == 1); } - void expect_field_type(enum_field_types type) - { - bitmap_set_bit(&expect_field_type_mask, (uint) type); - } - void dont_expect_field_type(enum_field_types type) - { - bitmap_clear_bit(&expect_field_type_mask, (uint) type); + void expect_field_type(enum_field_types type) { + bitmap_set_bit(&expect_field_type_mask, (uint)type); } - void expect_all_field_types() - { - bitmap_set_all(&expect_field_type_mask); + void dont_expect_field_type(enum_field_types type) { + bitmap_clear_bit(&expect_field_type_mask, (uint)type); } - bool expecting_field_type(enum_field_types type) - { - if (unlikely((uint)type > MAX_EXPECT_FIELD_TYPES)) - { + void expect_all_field_types() { bitmap_set_all(&expect_field_type_mask); } + bool expecting_field_type(enum_field_types type) { + if (unlikely((uint)type > MAX_EXPECT_FIELD_TYPES)) { // Unknown type, can't be expected return false; } - return bitmap_is_set(&expect_field_type_mask, (uint) type); + return bitmap_is_set(&expect_field_type_mask, (uint)type); } - void expect_only_field_type(enum_field_types type) - { + void expect_only_field_type(enum_field_types type) { bitmap_clear_all(&expect_field_type_mask); expect_field_type(type); } - void expect_comparable_field(const Field *field) - { - other_field = field; - } - bool expecting_comparable_field(const Field *field) - { + void expect_comparable_field(const Field *field) { other_field = field; } + bool expecting_comparable_field(const Field *field) { if (other_field == nullptr) // No Field to be comparable with return true; @@ -391,86 +357,56 @@ class Ndb_expect_stack return other_field->eq_def(field); } - void expect_field_result(Item_result result) - { - bitmap_set_bit(&expect_field_result_mask, (uint) result); + void expect_field_result(Item_result result) { + bitmap_set_bit(&expect_field_result_mask, (uint)result); } - bool expecting_field_result(Item_result result) - { - if (unlikely((uint)result > MAX_EXPECT_FIELD_RESULTS)) - { + bool expecting_field_result(Item_result result) { + if (unlikely((uint)result > MAX_EXPECT_FIELD_RESULTS)) { // Unknown result, can't be expected return false; } - return bitmap_is_set(&expect_field_result_mask, - (uint) result); - } - void expect_no_field_result() - { - bitmap_clear_all(&expect_field_result_mask); + return bitmap_is_set(&expect_field_result_mask, (uint)result); } - bool expecting_no_field_result() - { + void expect_no_field_result() { bitmap_clear_all(&expect_field_result_mask); } + bool expecting_no_field_result() { return bitmap_is_clear_all(&expect_field_result_mask); } - void expect_collation(const CHARSET_INFO* col) - { - collation= col; - } - bool expecting_collation(const CHARSET_INFO* col) - { - bool matching= (!collation) - ? true - : (collation == col); - collation= NULL; + void expect_collation(const CHARSET_INFO *col) { collation = col; } + bool expecting_collation(const CHARSET_INFO *col) { + bool matching = (!collation) ? true : (collation == col); + collation = NULL; return matching; } - void expect_length(Uint32 len) - { - length= len; - } - void expect_max_length(Uint32 max) - { - max_length= max; - } - bool expecting_length(Uint32 len) - { + void expect_length(Uint32 len) { length = len; } + void expect_max_length(Uint32 max) { max_length = max; } + bool expecting_length(Uint32 len) { return max_length == 0 || len <= max_length; } - bool expecting_max_length(Uint32 max) - { - return max >= length; - } - void expect_no_length() - { - length= max_length= 0; - } + bool expecting_max_length(Uint32 max) { return max >= length; } + void expect_no_length() { length = max_length = 0; } -private: - my_bitmap_map - m_expect_buf[bitmap_buffer_size(MAX_EXPECT_ITEMS)]; + private: + my_bitmap_map m_expect_buf[bitmap_buffer_size(MAX_EXPECT_ITEMS)]; my_bitmap_map - m_expect_field_type_buf[bitmap_buffer_size(MAX_EXPECT_FIELD_TYPES)]; + m_expect_field_type_buf[bitmap_buffer_size(MAX_EXPECT_FIELD_TYPES)]; my_bitmap_map - m_expect_field_result_buf[bitmap_buffer_size(MAX_EXPECT_FIELD_RESULTS)]; + m_expect_field_result_buf[bitmap_buffer_size(MAX_EXPECT_FIELD_RESULTS)]; MY_BITMAP expect_mask; MY_BITMAP expect_field_type_mask; MY_BITMAP expect_field_result_mask; - const Field* other_field; - const CHARSET_INFO* collation; + const Field *other_field; + const CHARSET_INFO *collation; Uint32 length; Uint32 max_length; - Ndb_expect_stack* next; + Ndb_expect_stack *next; }; -class Ndb_rewrite_context -{ -public: +class Ndb_rewrite_context { + public: Ndb_rewrite_context(const Item_func *func) - : func_item(func), left_hand_item(NULL), count(0) {} - ~Ndb_rewrite_context() - { + : func_item(func), left_hand_item(NULL), count(0) {} + ~Ndb_rewrite_context() { if (next) destroy(next); } const Item_func *func_item; @@ -486,126 +422,90 @@ class Ndb_rewrite_context if the condition found is supported, and information what is expected next in the tree inorder for the condition to be supported. */ -class Ndb_cond_traverse_context -{ -public: +class Ndb_cond_traverse_context { + public: Ndb_cond_traverse_context(TABLE *tab, const NdbDictionary::Table *ndb_tab) - : table(tab), ndb_table(ndb_tab), - supported(true), skip(0), rewrite_stack(NULL) - {} - ~Ndb_cond_traverse_context() - { + : table(tab), + ndb_table(ndb_tab), + supported(true), + skip(0), + rewrite_stack(NULL) {} + ~Ndb_cond_traverse_context() { if (rewrite_stack) destroy(rewrite_stack); } - inline void expect_field_from_table() - { + inline void expect_field_from_table() { expect_stack.expect(Item::FIELD_ITEM); expect_stack.expect_all_field_types(); expect_stack.expect_comparable_field(nullptr); } - inline void expect_only_field_from_table() - { + inline void expect_only_field_from_table() { expect_stack.expect_nothing(); expect_field_from_table(); } - inline void expect(Item::Type type) - { - expect_stack.expect(type); - } - inline void dont_expect(Item::Type type) - { - expect_stack.dont_expect(type); - } - inline bool expecting(Item::Type type) - { + inline void expect(Item::Type type) { expect_stack.expect(type); } + inline void dont_expect(Item::Type type) { expect_stack.dont_expect(type); } + inline bool expecting(Item::Type type) { return expect_stack.expecting(type); } - inline void expect_nothing() - { - expect_stack.expect_nothing(); - } - inline bool expecting_nothing() - { - return expect_stack.expecting_nothing(); - } - inline void expect_only(Item::Type type) - { - expect_stack.expect_only(type); - } + inline void expect_nothing() { expect_stack.expect_nothing(); } + inline bool expecting_nothing() { return expect_stack.expecting_nothing(); } + inline void expect_only(Item::Type type) { expect_stack.expect_only(type); } - inline void expect_field_type(enum_field_types type) - { + inline void expect_field_type(enum_field_types type) { expect_stack.expect_field_type(type); } - inline void dont_expect_field_type(enum_field_types type) - { + inline void dont_expect_field_type(enum_field_types type) { expect_stack.dont_expect_field_type(type); } - inline void expect_only_field_type(enum_field_types result) - { + inline void expect_only_field_type(enum_field_types result) { expect_stack.expect_only_field_type(result); } - inline void expect_comparable_field(const Field *field) - { + inline void expect_comparable_field(const Field *field) { expect_stack.expect_only_field_type(field->real_type()); expect_stack.expect_comparable_field(field); } - inline bool expecting_comparable_field(const Field *field) - { + inline bool expecting_comparable_field(const Field *field) { return expect_stack.expecting_field_type(field->real_type()) && expect_stack.expecting_comparable_field(field); } - inline void expect_field_result(Item_result result) - { + inline void expect_field_result(Item_result result) { expect_stack.expect_field_result(result); } - inline bool expecting_field_result(Item_result result) - { + inline bool expecting_field_result(Item_result result) { return expect_stack.expecting_field_result(result); } - inline void expect_no_field_result() - { + inline void expect_no_field_result() { expect_stack.expect_no_field_result(); } - inline bool expecting_no_field_result() - { + inline bool expecting_no_field_result() { return expect_stack.expecting_no_field_result(); } - inline void expect_collation(const CHARSET_INFO* col) - { + inline void expect_collation(const CHARSET_INFO *col) { expect_stack.expect_collation(col); } - inline bool expecting_collation(const CHARSET_INFO* col) - { + inline bool expecting_collation(const CHARSET_INFO *col) { return expect_stack.expecting_collation(col); } - inline void expect_length(Uint32 length) - { + inline void expect_length(Uint32 length) { expect_stack.expect_length(length); } - inline void expect_max_length(Uint32 max) - { + inline void expect_max_length(Uint32 max) { expect_stack.expect_max_length(max); } - inline bool expecting_length(Uint32 length) - { + inline bool expecting_length(Uint32 length) { return expect_stack.expecting_length(length); } - inline bool expecting_max_length(Uint32 max) - { + inline bool expecting_max_length(Uint32 max) { return expect_stack.expecting_max_length(max); } - inline void expect_no_length() - { - expect_stack.expect_no_length(); - } + inline void expect_no_length() { expect_stack.expect_no_length(); } - TABLE* const table; - const NdbDictionary::Table* const ndb_table; + TABLE *const table; + const NdbDictionary::Table *const ndb_table; bool supported; List items; Ndb_expect_stack expect_stack; @@ -613,20 +513,18 @@ class Ndb_cond_traverse_context Ndb_rewrite_context *rewrite_stack; }; -static bool -is_supported_temporal_type(enum_field_types type) -{ - switch(type) { - case MYSQL_TYPE_TIME: - case MYSQL_TYPE_TIME2: - case MYSQL_TYPE_DATE: - case MYSQL_TYPE_NEWDATE: - case MYSQL_TYPE_YEAR: - case MYSQL_TYPE_DATETIME: - case MYSQL_TYPE_DATETIME2: - return true; - default: - return false; +static bool is_supported_temporal_type(enum_field_types type) { + switch (type) { + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_TIME2: + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: + case MYSQL_TYPE_YEAR: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_DATETIME2: + return true; + default: + return false; } } @@ -635,18 +533,15 @@ is_supported_temporal_type(enum_field_types type) Note that traverse_cond() only traverse any operands for FUNC_ITEM and COND_ITEM, which is reflected by operand_count(). */ -static uint -operand_count(const Item *item) -{ +static uint operand_count(const Item *item) { switch (item->type()) { - case Item::FUNC_ITEM: - { - const Item_func *func_item= static_cast(item); + case Item::FUNC_ITEM: { + const Item_func *func_item = static_cast(item); return func_item->argument_count(); } - case Item::COND_ITEM: - { - Item_cond *cond_item= const_cast(static_cast(item)); + case Item::COND_ITEM: { + Item_cond *cond_item = + const_cast(static_cast(item)); List *arguments = cond_item->argument_list(); // A COND_ITEM (And/or) is visited both infix and postfix, so need '+1' return arguments->elements + 1; @@ -662,133 +557,119 @@ operand_count(const Item *item) position of fields that is not directly available in the Item tree. Also checks if condition is supported. */ -static void -ndb_serialize_cond(const Item *item, void *arg) -{ - Ndb_cond_traverse_context *context= (Ndb_cond_traverse_context *) arg; +static void ndb_serialize_cond(const Item *item, void *arg) { + Ndb_cond_traverse_context *context = (Ndb_cond_traverse_context *)arg; DBUG_ENTER("ndb_serialize_cond"); // Check if we are skipping arguments to a function to be evaluated - if (context->skip) - { + if (context->skip) { DBUG_PRINT("info", ("Skipping argument %d", context->skip)); context->skip--; - if (item != nullptr) - { - context->skip+= operand_count(item); + if (item != nullptr) { + context->skip += operand_count(item); } DBUG_VOID_RETURN; } - - if (context->supported) - { - Ndb_rewrite_context *rewrite_context= context->rewrite_stack; + + if (context->supported) { + Ndb_rewrite_context *rewrite_context = context->rewrite_stack; // Check if we are rewriting some unsupported function call - if (rewrite_context) - { + if (rewrite_context) { rewrite_context->count++; - if (rewrite_context->count == 1) - { + if (rewrite_context->count == 1) { // This is the , save it in the rewrite context - rewrite_context->left_hand_item= item; - } - else - { + rewrite_context->left_hand_item = item; + } else { // Has already seen the 'left_hand_item', this 'item' is one of // the right hand items in the in/between predicate to be rewritten. Item *cmp_func = nullptr; - const Item_func *rewrite_func_item= rewrite_context->func_item; + const Item_func *rewrite_func_item = rewrite_context->func_item; switch (rewrite_func_item->functype()) { - case Item_func::BETWEEN: - { - /* - Rewrite BETWEEN AND - to >= AND - <= - */ - if (rewrite_context->count == 2) // Lower 'between-limit' - { - // Lower limit of BETWEEN - DBUG_PRINT("info", ("GE_FUNC")); - cmp_func = new (*THR_MALLOC) Item_func_ge(const_cast(rewrite_context->left_hand_item), - const_cast(item)); + case Item_func::BETWEEN: { + /* + Rewrite BETWEEN AND + to >= AND + <= + */ + if (rewrite_context->count == 2) // Lower 'between-limit' + { + // Lower limit of BETWEEN + DBUG_PRINT("info", ("GE_FUNC")); + cmp_func = new (*THR_MALLOC) Item_func_ge( + const_cast(rewrite_context->left_hand_item), + const_cast(item)); + } else if (rewrite_context->count == 3) // Upper 'between-limit' + { + // Upper limit of BETWEEN + DBUG_PRINT("info", ("LE_FUNC")); + cmp_func = new (*THR_MALLOC) Item_func_le( + const_cast(rewrite_context->left_hand_item), + const_cast(item)); + } else { + // Illegal BETWEEN expression + DBUG_PRINT("info", ("Illegal BETWEEN expression")); + context->supported = false; + DBUG_VOID_RETURN; + } + break; } - else if (rewrite_context->count == 3) // Upper 'between-limit' - { - // Upper limit of BETWEEN - DBUG_PRINT("info", ("LE_FUNC")); - cmp_func = new (*THR_MALLOC) Item_func_le(const_cast(rewrite_context->left_hand_item), - const_cast(item)); + case Item_func::IN_FUNC: { + /* + Rewrite IN(, ,..) + to = OR + = ... + */ + DBUG_PRINT("info", ("EQ_FUNC")); + cmp_func = new (*THR_MALLOC) Item_func_eq( + const_cast(rewrite_context->left_hand_item), + const_cast(item)); + break; } - else - { - // Illegal BETWEEN expression - DBUG_PRINT("info", ("Illegal BETWEEN expression")); - context->supported= false; + default: + // Only BETWEEN/IN can be rewritten. + // If we add support for rewrite of others, handling must be added + // above + DBUG_ASSERT(false); + context->supported = false; DBUG_VOID_RETURN; - } - break; - } - case Item_func::IN_FUNC: - { - /* - Rewrite IN(, ,..) - to = OR - = ... - */ - DBUG_PRINT("info", ("EQ_FUNC")); - cmp_func = new (*THR_MALLOC) Item_func_eq(const_cast(rewrite_context->left_hand_item), - const_cast(item)); - break; - } - default: - // Only BETWEEN/IN can be rewritten. - // If we add support for rewrite of others, handling must be added above - DBUG_ASSERT(false); - context->supported= false; - DBUG_VOID_RETURN; } cmp_func->fix_fields(current_thd, &cmp_func); cmp_func->update_used_tables(); // Traverse and serialize the rewritten predicate - context->rewrite_stack= NULL; // Disable rewrite mode + context->rewrite_stack = NULL; // Disable rewrite mode context->expect_only(Item::FUNC_ITEM); cmp_func->traverse_cond(&ndb_serialize_cond, context, Item::PREFIX); - context->rewrite_stack= rewrite_context; // Re-enable rewrite mode + context->rewrite_stack = rewrite_context; // Re-enable rewrite mode // Possibly terminate the rewrite_context if (context->supported && rewrite_context->count == - rewrite_context->func_item->argument_count()) - { + rewrite_context->func_item->argument_count()) { // Rewrite is done, wrap an END() at the end DBUG_PRINT("info", ("End of rewrite condition group")); context->items.push_back(new (*THR_MALLOC) Ndb_item(NDB_END_COND)); // Pop rewrite stack - context->rewrite_stack= rewrite_context->next; - rewrite_context->next= NULL; + context->rewrite_stack = rewrite_context->next; + rewrite_context->next = NULL; destroy(rewrite_context); } } - DBUG_PRINT("info", ("Skip 'item' (to be) handled in rewritten predicate")); - context->skip= operand_count(item); + DBUG_PRINT("info", + ("Skip 'item' (to be) handled in rewritten predicate")); + context->skip = operand_count(item); DBUG_VOID_RETURN; - } - else //not in a 'rewrite_context' + } else // not in a 'rewrite_context' { const Ndb_item *ndb_item = nullptr; // Check for end of AND/OR expression - if (!item) - { + if (!item) { // End marker for condition group DBUG_PRINT("info", ("End of condition group")); context->expect_no_length(); - ndb_item= new (*THR_MALLOC) Ndb_item(NDB_END_COND); - } - else - { - bool pop= true; + ndb_item = new (*THR_MALLOC) Ndb_item(NDB_END_COND); + } else { + bool pop = true; /* Based on which tables being used from an item expression, we might be able to evaluate its value immediately. @@ -796,8 +677,7 @@ ndb_serialize_cond(const Item *item, void *arg) now, same is true for expressions being entirely 'const'. */ const table_map this_table = context->table->pos_in_table_list->map(); - if (!(item->used_tables() & this_table)) - { + if (!(item->used_tables() & this_table)) { /* Item value can be evaluated right away, and its value used in the condition, instead of the Item-expression. Note that this will @@ -808,43 +688,35 @@ ndb_serialize_cond(const Item *item, void *arg) String str; item->print(current_thd, &str, QT_ORDINARY); #endif - if (item->is_bool_func()) - { + if (item->is_bool_func()) { // Item is a boolean func, (e.g. an EQ_FUNC) DBUG_ASSERT(item->result_type() == INT_RESULT); - DBUG_PRINT("info", ("BOOLEAN 'VALUE' expression: '%s'", str.c_ptr_safe())); - ndb_item= new (*THR_MALLOC) Ndb_item(item); + DBUG_PRINT("info", + ("BOOLEAN 'VALUE' expression: '%s'", str.c_ptr_safe())); + ndb_item = new (*THR_MALLOC) Ndb_item(item); // Expect another logical expression context->expect_only(Item::FUNC_ITEM); context->expect(Item::COND_ITEM); - } - else if (item->type() == Item::VARBIN_ITEM) - { - // VARBIN_ITEM is special as no similar VARBIN_RESULT type is defined, - // so it need to be explicitely handled here. - DBUG_PRINT("info", ("VARBIN_ITEM 'VALUE' expression: '%s'", str.c_ptr_safe())); - if (context->expecting(Item::VARBIN_ITEM)) - { - ndb_item= new (*THR_MALLOC) Ndb_item(item); - if (context->expecting_no_field_result()) - { + } else if (item->type() == Item::VARBIN_ITEM) { + // VARBIN_ITEM is special as no similar VARBIN_RESULT type is + // defined, so it need to be explicitely handled here. + DBUG_PRINT("info", ("VARBIN_ITEM 'VALUE' expression: '%s'", + str.c_ptr_safe())); + if (context->expecting(Item::VARBIN_ITEM)) { + ndb_item = new (*THR_MALLOC) Ndb_item(item); + if (context->expecting_no_field_result()) { // We have not seen the field argument referring this table yet context->expect_only_field_from_table(); context->expect_field_result(STRING_RESULT); - } - else - { + } else { // Expect another logical expression context->expect_only(Item::FUNC_ITEM); context->expect(Item::COND_ITEM); } - } - else - context->supported= false; - } - else - { + } else + context->supported = false; + } else { // For the INT, REAL, DECIMAL and STRING Item type, we use // the similar result_type() as a 'catch it all' synonym to // handle both an Item and any expression of the specific type. @@ -860,556 +732,510 @@ ndb_serialize_cond(const Item *item, void *arg) item->result_type() == STRING_RESULT); switch (item->result_type()) { - case INT_RESULT: - DBUG_PRINT("info", ("INTEGER 'VALUE' expression: '%s'", str.c_ptr_safe())); - if (context->expecting(Item::INT_ITEM)) - { - ndb_item= new (*THR_MALLOC) Ndb_item(item); - if (context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only_field_from_table(); - context->expect_field_result(INT_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(DECIMAL_RESULT); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - } - } - else - context->supported= false; - break; + case INT_RESULT: + DBUG_PRINT("info", ("INTEGER 'VALUE' expression: '%s'", + str.c_ptr_safe())); + if (context->expecting(Item::INT_ITEM)) { + ndb_item = new (*THR_MALLOC) Ndb_item(item); + if (context->expecting_no_field_result()) { + // We have not seen the field argument yet + context->expect_only_field_from_table(); + context->expect_field_result(INT_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(DECIMAL_RESULT); + } else { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + } else + context->supported = false; + break; - case REAL_RESULT: - DBUG_PRINT("info", ("REAL 'VALUE' expression: '%s'", str.c_ptr_safe())); - if (context->expecting(Item::REAL_ITEM)) - { - ndb_item= new (*THR_MALLOC) Ndb_item(item); - if (context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only_field_from_table(); - context->expect_field_result(REAL_RESULT); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - } - } - else - context->supported= false; - break; + case REAL_RESULT: + DBUG_PRINT("info", + ("REAL 'VALUE' expression: '%s'", str.c_ptr_safe())); + if (context->expecting(Item::REAL_ITEM)) { + ndb_item = new (*THR_MALLOC) Ndb_item(item); + if (context->expecting_no_field_result()) { + // We have not seen the field argument yet + context->expect_only_field_from_table(); + context->expect_field_result(REAL_RESULT); + } else { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + } else + context->supported = false; + break; - case DECIMAL_RESULT: - DBUG_PRINT("info", ("DECIMAL 'VALUE' expression: '%s'", str.c_ptr_safe())); - if (context->expecting(Item::DECIMAL_ITEM)) - { - ndb_item= new (*THR_MALLOC) Ndb_item(item); - if (context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only_field_from_table(); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(DECIMAL_RESULT); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - } - } - else - context->supported= false; - break; + case DECIMAL_RESULT: + DBUG_PRINT("info", ("DECIMAL 'VALUE' expression: '%s'", + str.c_ptr_safe())); + if (context->expecting(Item::DECIMAL_ITEM)) { + ndb_item = new (*THR_MALLOC) Ndb_item(item); + if (context->expecting_no_field_result()) { + // We have not seen the field argument yet + context->expect_only_field_from_table(); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(DECIMAL_RESULT); + } else { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + } else + context->supported = false; + break; - case STRING_RESULT: - DBUG_PRINT("info", ("STRING 'VALUE' expression: '%s'", str.c_ptr_safe())); - // Check that we do support pushing the item value length - if (context->expecting(Item::STRING_ITEM) && - context->expecting_length(item->max_length)) - { - ndb_item= new (*THR_MALLOC) Ndb_item(item); - if (context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only_field_from_table(); - context->expect_field_result(STRING_RESULT); - context->expect_collation(item->collation.collation); - context->expect_length(item->max_length); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - context->expect_no_length(); - // Check that we are comparing with a field with same collation - if (!context->expecting_collation(item->collation.collation)) - { - DBUG_PRINT("info", ("Found non-matching collation %s", - item->collation.collation->name)); - context->supported= false; + case STRING_RESULT: + DBUG_PRINT("info", ("STRING 'VALUE' expression: '%s'", + str.c_ptr_safe())); + // Check that we do support pushing the item value length + if (context->expecting(Item::STRING_ITEM) && + context->expecting_length(item->max_length)) { + ndb_item = new (*THR_MALLOC) Ndb_item(item); + if (context->expecting_no_field_result()) { + // We have not seen the field argument yet + context->expect_only_field_from_table(); + context->expect_field_result(STRING_RESULT); + context->expect_collation(item->collation.collation); + context->expect_length(item->max_length); + } else { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + context->expect_no_length(); + // Check that we are comparing with a field with same + // collation + if (!context->expecting_collation( + item->collation.collation)) { + DBUG_PRINT("info", ("Found non-matching collation %s", + item->collation.collation->name)); + context->supported = false; + } } - } - } - else - context->supported= false; - break; + } else + context->supported = false; + break; - default: - DBUG_ASSERT(false); - context->supported= false; - break; + default: + DBUG_ASSERT(false); + context->supported = false; + break; } } - if (context->supported) - { + if (context->supported) { DBUG_ASSERT(ndb_item != nullptr); context->items.push_back(ndb_item); - } + } // Skip any arguments since we will evaluate this expression instead - context->skip= operand_count(item); - DBUG_PRINT("info", ("Skip until end of arguments marker, operands:%d", context->skip)); + context->skip = operand_count(item); + DBUG_PRINT("info", ("Skip until end of arguments marker, operands:%d", + context->skip)); DBUG_VOID_RETURN; } switch (item->type()) { - case Item::FIELD_ITEM: - { - const Item_field *field_item= down_cast(item); - Field *field= field_item->field; - const enum_field_types type= field->real_type(); + case Item::FIELD_ITEM: { + const Item_field *field_item = down_cast(item); + Field *field = field_item->field; + const enum_field_types type = field->real_type(); + + /* Check whether field is computed at MySQL layer */ + if (field->is_virtual_gcol()) { + context->supported = false; + break; + } - /* Check whether field is computed at MySQL layer */ - if (field->is_virtual_gcol()) - { - context->supported= false; - break; - } + DBUG_PRINT("info", ("FIELD_ITEM")); + DBUG_PRINT("info", ("table %s", field->table->alias)); + DBUG_PRINT("info", ("column %s", field->field_name)); + DBUG_PRINT("info", ("column length %u", field->field_length)); + DBUG_PRINT("info", ("type %d", type)); + DBUG_PRINT("info", ("result type %d", field->result_type())); + + // Check that we are expecting a field with the correct + // type, and possibly being 'comparable' with a previous Field. + if (context->expecting(Item::FIELD_ITEM) && + context->expecting_comparable_field(field) && + // Bit fields not yet supported in scan filter + type != MYSQL_TYPE_BIT && + /* Char(0) field is treated as Bit fields inside NDB + Hence not supported in scan filter */ + (!(type == MYSQL_TYPE_STRING && field->pack_length() == 0)) && + // No BLOB support in scan filter + type != MYSQL_TYPE_TINY_BLOB && + type != MYSQL_TYPE_MEDIUM_BLOB && + type != MYSQL_TYPE_LONG_BLOB && type != MYSQL_TYPE_BLOB && + type != MYSQL_TYPE_JSON && type != MYSQL_TYPE_GEOMETRY) { + // Found a Field_item of a supported type. and from 'this' table + DBUG_ASSERT(context->table == field->table); + + const NDBCOL *col = + context->ndb_table->getColumn(field->field_name); + DBUG_ASSERT(col); + ndb_item = new (*THR_MALLOC) Ndb_item(field, col->getColumnNo()); + + /* + Check, or set, further expectations for the operand(s). + For an operation taking multiple operands, the first operand + sets the requirement for the next to be compatible. + 'expecting_*_field_result' is used to check if this is the + first operand or not: If there are no 'field_result' + expectations set yet, this is the first operand, and it is used + to set expectations for the next one(s). + */ + if (!context->expecting_no_field_result()) { + // Have some result type expectations to check. + // Note that STRING and INT(Year) are always allowed + // to be used together with temporal data types. + if (!(context->expecting_field_result(field->result_type()) || + // Date and year can be written as string or int + (is_supported_temporal_type(type) && + (context->expecting_field_result(STRING_RESULT) || + context->expecting_field_result(INT_RESULT))))) { + DBUG_PRINT("info", + ("Was not expecting field of result_type %u(%u)", + field->result_type(), type)); + context->supported = false; + break; + } - DBUG_PRINT("info", ("FIELD_ITEM")); - DBUG_PRINT("info", ("table %s", field->table->alias)); - DBUG_PRINT("info", ("column %s", field->field_name)); - DBUG_PRINT("info", ("column length %u", field->field_length)); - DBUG_PRINT("info", ("type %d", type)); - DBUG_PRINT("info", ("result type %d", field->result_type())); - - // Check that we are expecting a field with the correct - // type, and possibly being 'comparable' with a previous Field. - if (context->expecting(Item::FIELD_ITEM) && - context->expecting_comparable_field(field) && - // Bit fields not yet supported in scan filter - type != MYSQL_TYPE_BIT && - /* Char(0) field is treated as Bit fields inside NDB - Hence not supported in scan filter */ - (!(type == MYSQL_TYPE_STRING && field->pack_length() == 0)) && - // No BLOB support in scan filter - type != MYSQL_TYPE_TINY_BLOB && - type != MYSQL_TYPE_MEDIUM_BLOB && - type != MYSQL_TYPE_LONG_BLOB && - type != MYSQL_TYPE_BLOB && - type != MYSQL_TYPE_JSON && - type != MYSQL_TYPE_GEOMETRY) - { - // Found a Field_item of a supported type. and from 'this' table - DBUG_ASSERT(context->table == field->table); - - const NDBCOL *col= context->ndb_table->getColumn(field->field_name); - DBUG_ASSERT(col); - ndb_item= new (*THR_MALLOC) Ndb_item(field, col->getColumnNo()); - - /* - Check, or set, further expectations for the operand(s). - For an operation taking multiple operands, the first operand - sets the requirement for the next to be compatible. - 'expecting_*_field_result' is used to check if this is the - first operand or not: If there are no 'field_result' expectations - set yet, this is the first operand, and it is used to set expectations - for the next one(s). - */ - if (!context->expecting_no_field_result()) - { - // Have some result type expectations to check. - // Note that STRING and INT(Year) are always allowed - // to be used together with temporal data types. - if (!(context->expecting_field_result(field->result_type()) || - // Date and year can be written as string or int - (is_supported_temporal_type(type) && - (context->expecting_field_result(STRING_RESULT) || - context->expecting_field_result(INT_RESULT))))) - { - DBUG_PRINT("info", ("Was not expecting field of result_type %u(%u)", - field->result_type(), type)); - context->supported= false; - break; - } + // STRING results has to be checked for correct 'length' and + // collation, except if it is a result from a temporal data + // type. + if (field->result_type() == STRING_RESULT && + !is_supported_temporal_type(type)) { + if (!context->expecting_max_length(field->field_length)) { + DBUG_PRINT("info", ("Found non-matching string length %s", + field->field_name)); + context->supported = false; + break; + } + // Check that field and string constant collations are the + // same + if (!context->expecting_collation( + item->collation.collation)) { + DBUG_PRINT("info", ("Found non-matching collation %s", + item->collation.collation->name)); + context->supported = false; + break; + } + } - // STRING results has to be checked for correct 'length' and - // collation, except if it is a result from a temporal data type. - if (field->result_type() == STRING_RESULT && - !is_supported_temporal_type(type)) + // Seen expected arguments, expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } else // is not 'expecting_field_result' { - if (!context->expecting_max_length(field->field_length)) - { - DBUG_PRINT("info", ("Found non-matching string length %s", - field->field_name)); - context->supported= false; - break; + // This is the first operand, it decides expectations for + // the next operand, required to be compatible with this one. + if (is_supported_temporal_type(type)) { + context->expect_only(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + } else { + switch (field->result_type()) { + case STRING_RESULT: + // Expect char string or binary string + context->expect_only(Item::STRING_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect_collation( + field_item->collation.collation); + context->expect_max_length(field->field_length); + break; + case REAL_RESULT: + context->expect_only(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::INT_ITEM); + break; + case INT_RESULT: + context->expect_only(Item::INT_ITEM); + context->expect(Item::VARBIN_ITEM); + break; + case DECIMAL_RESULT: + context->expect_only(Item::DECIMAL_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::INT_ITEM); + break; + default: + DBUG_ASSERT(false); + break; + } } - // Check that field and string constant collations are the same - if (!context->expecting_collation(item->collation.collation)) - { - DBUG_PRINT("info", ("Found non-matching collation %s", - item->collation.collation->name)); - context->supported= false; - break; + const Ndb *ndb = get_thd_ndb(current_thd)->ndb; + if (ndbd_support_column_cmp(ndb->getMinDbNodeVersion())) { + // Since WL#13120: Two columns may be compared in + // NdbScanFilter: + // -> Second argument can also be a FIELD_ITEM, referring + // another Field from this table. Need to ensure that these + // Fields are of identical type, length, precision etc. + context->expect(Item::FIELD_ITEM); + context->expect_comparable_field(field); } + context->expect_field_result(field->result_type()); } - - // Seen expected arguments, expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); + } else { + DBUG_PRINT("info", ("Was not expecting field of type %u(%u)", + field->result_type(), type)); + context->supported = false; } - else //is not 'expecting_field_result' - { - // This is the first operand, it decides expectations for - // the next operand, required to be compatible with this one. - if (is_supported_temporal_type(type)) - { - context->expect_only(Item::STRING_ITEM); + break; + } + case Item::FUNC_ITEM: { + // Check that we expect a function here + if (!context->expecting(Item::FUNC_ITEM)) { + context->supported = false; + break; + } + + context->expect_nothing(); + context->expect_no_length(); + + const Item_func *func_item = static_cast(item); + switch (func_item->functype()) { + case Item_func::EQ_FUNC: { + DBUG_PRINT("info", ("EQ_FUNC")); + ndb_item = new (*THR_MALLOC) + Ndb_item(func_item->functype(), func_item); + context->expect(Item::STRING_ITEM); context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect_field_from_table(); + context->expect_no_field_result(); + break; } - else - { - switch (field->result_type()) { - case STRING_RESULT: - // Expect char string or binary string - context->expect_only(Item::STRING_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect_collation(field_item->collation.collation); - context->expect_max_length(field->field_length); - break; - case REAL_RESULT: - context->expect_only(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::INT_ITEM); - break; - case INT_RESULT: - context->expect_only(Item::INT_ITEM); - context->expect(Item::VARBIN_ITEM); - break; - case DECIMAL_RESULT: - context->expect_only(Item::DECIMAL_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::INT_ITEM); - break; - default: - DBUG_ASSERT(false); - break; + case Item_func::NE_FUNC: { + DBUG_PRINT("info", ("NE_FUNC")); + ndb_item = new (*THR_MALLOC) + Ndb_item(func_item->functype(), func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect_field_from_table(); + context->expect_no_field_result(); + break; + } + case Item_func::LT_FUNC: { + DBUG_PRINT("info", ("LT_FUNC")); + ndb_item = new (*THR_MALLOC) + Ndb_item(func_item->functype(), func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect_field_from_table(); + context->expect_no_field_result(); + // Enum can only be compared by equality. + context->dont_expect_field_type(MYSQL_TYPE_ENUM); + break; + } + case Item_func::LE_FUNC: { + DBUG_PRINT("info", ("LE_FUNC")); + ndb_item = new (*THR_MALLOC) + Ndb_item(func_item->functype(), func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect_field_from_table(); + context->expect_no_field_result(); + // Enum can only be compared by equality. + context->dont_expect_field_type(MYSQL_TYPE_ENUM); + break; + } + case Item_func::GE_FUNC: { + DBUG_PRINT("info", ("GE_FUNC")); + ndb_item = new (*THR_MALLOC) + Ndb_item(func_item->functype(), func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect_field_from_table(); + context->expect_no_field_result(); + // Enum can only be compared by equality. + context->dont_expect_field_type(MYSQL_TYPE_ENUM); + break; + } + case Item_func::GT_FUNC: { + DBUG_PRINT("info", ("GT_FUNC")); + ndb_item = new (*THR_MALLOC) + Ndb_item(func_item->functype(), func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect_field_from_table(); + context->expect_no_field_result(); + // Enum can only be compared by equality. + context->dont_expect_field_type(MYSQL_TYPE_ENUM); + break; + } + case Item_func::LIKE_FUNC: { + Ndb_expect_stack *expect_next = + new (*THR_MALLOC) Ndb_expect_stack(); + DBUG_PRINT("info", ("LIKE_FUNC")); + + const Item_func_like *like_func = + static_cast(func_item); + if (like_func->escape_was_used_in_parsing()) { + DBUG_PRINT("info", + ("LIKE expressions with ESCAPE not supported")); + context->supported = false; } + ndb_item = new (*THR_MALLOC) + Ndb_item(func_item->functype(), func_item); + + /* + Ndb currently only supports pushing + LIKE | + we thus push | + on the expect stack to catch that we + don't support LIKE . + */ + context->expect_field_from_table(); + context->expect_only_field_type(MYSQL_TYPE_STRING); + context->expect_field_type(MYSQL_TYPE_VAR_STRING); + context->expect_field_type(MYSQL_TYPE_VARCHAR); + context->expect_field_result(STRING_RESULT); + expect_next->expect(Item::STRING_ITEM); + expect_next->expect(Item::FUNC_ITEM); + context->expect_stack.push(expect_next); + pop = false; + break; + } + case Item_func::ISNULL_FUNC: { + DBUG_PRINT("info", ("ISNULL_FUNC")); + ndb_item = new (*THR_MALLOC) + Ndb_item(func_item->functype(), func_item); + context->expect_field_from_table(); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case Item_func::ISNOTNULL_FUNC: { + DBUG_PRINT("info", ("ISNOTNULL_FUNC")); + ndb_item = new (*THR_MALLOC) + Ndb_item(func_item->functype(), func_item); + context->expect_field_from_table(); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case Item_func::NOT_FUNC: { + DBUG_PRINT("info", ("NOT_FUNC")); + ndb_item = new (*THR_MALLOC) + Ndb_item(func_item->functype(), func_item); + context->expect(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + break; + } + case Item_func::BETWEEN: { + DBUG_PRINT("info", ("BETWEEN, rewriting using AND")); + const Item_func_between *between_func = + static_cast(func_item); + Ndb_rewrite_context *rewrite_context = + new (*THR_MALLOC) Ndb_rewrite_context(func_item); + rewrite_context->next = context->rewrite_stack; + context->rewrite_stack = rewrite_context; + if (between_func->negated) { + DBUG_PRINT("info", ("NOT_FUNC")); + context->items.push_back( + new (*THR_MALLOC) Ndb_item(Item_func::NOT_FUNC, 1)); + } + DBUG_PRINT("info", ("COND_AND_FUNC")); + ndb_item = new (*THR_MALLOC) Ndb_item( + Item_func::COND_AND_FUNC, func_item->argument_count() - 1); + // We do not 'expect' anything yet, added later as part of + // rewrite, + break; + } + case Item_func::IN_FUNC: { + DBUG_PRINT("info", ("IN_FUNC, rewriting using OR")); + const Item_func_in *in_func = + static_cast(func_item); + Ndb_rewrite_context *rewrite_context = + new (*THR_MALLOC) Ndb_rewrite_context(func_item); + rewrite_context->next = context->rewrite_stack; + context->rewrite_stack = rewrite_context; + if (in_func->negated) { + DBUG_PRINT("info", ("NOT_FUNC")); + context->items.push_back( + new (*THR_MALLOC) Ndb_item(Item_func::NOT_FUNC, 1)); + } + DBUG_PRINT("info", ("COND_OR_FUNC")); + ndb_item = new (*THR_MALLOC) Ndb_item( + Item_func::COND_OR_FUNC, func_item->argument_count() - 1); + // We do not 'expect' anything yet, added later as part of + // rewrite, + break; + } + default: { + DBUG_PRINT("info", ("Found func_item of type %d", + func_item->functype())); + context->supported = false; } - const Ndb *ndb = get_thd_ndb(current_thd)->ndb; - if (ndbd_support_column_cmp(ndb->getMinDbNodeVersion())) - { - // Since WL#13120: Two columns may be compared in NdbScanFilter: - // -> Second argument can also be a FIELD_ITEM, referring - // another Field from this table. Need to ensure that these Fields - // are of identical type, length, precision etc. - context->expect(Item::FIELD_ITEM); - context->expect_comparable_field(field); - } - context->expect_field_result(field->result_type()); } - } - else - { - DBUG_PRINT("info", ("Was not expecting field of type %u(%u)", - field->result_type(), type)); - context->supported= false; - } - break; - } - case Item::FUNC_ITEM: - { - // Check that we expect a function here - if (!context->expecting(Item::FUNC_ITEM)) - { - context->supported= false; break; } - context->expect_nothing(); - context->expect_no_length(); - - const Item_func *func_item= static_cast(item); - switch (func_item->functype()) { - case Item_func::EQ_FUNC: - { - DBUG_PRINT("info", ("EQ_FUNC")); - ndb_item= - new (*THR_MALLOC) Ndb_item(func_item->functype(), func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect_field_from_table(); - context->expect_no_field_result(); - break; - } - case Item_func::NE_FUNC: - { - DBUG_PRINT("info", ("NE_FUNC")); - ndb_item= - new (*THR_MALLOC) Ndb_item(func_item->functype(), func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect_field_from_table(); - context->expect_no_field_result(); - break; - } - case Item_func::LT_FUNC: - { - DBUG_PRINT("info", ("LT_FUNC")); - ndb_item= - new (*THR_MALLOC) Ndb_item(func_item->functype(), func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect_field_from_table(); - context->expect_no_field_result(); - // Enum can only be compared by equality. - context->dont_expect_field_type(MYSQL_TYPE_ENUM); - break; - } - case Item_func::LE_FUNC: - { - DBUG_PRINT("info", ("LE_FUNC")); - ndb_item= - new (*THR_MALLOC) Ndb_item(func_item->functype(), func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect_field_from_table(); - context->expect_no_field_result(); - // Enum can only be compared by equality. - context->dont_expect_field_type(MYSQL_TYPE_ENUM); - break; - } - case Item_func::GE_FUNC: - { - DBUG_PRINT("info", ("GE_FUNC")); - ndb_item= - new (*THR_MALLOC) Ndb_item(func_item->functype(), func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect_field_from_table(); - context->expect_no_field_result(); - // Enum can only be compared by equality. - context->dont_expect_field_type(MYSQL_TYPE_ENUM); - break; - } - case Item_func::GT_FUNC: - { - DBUG_PRINT("info", ("GT_FUNC")); - ndb_item= - new (*THR_MALLOC) Ndb_item(func_item->functype(), func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect_field_from_table(); - context->expect_no_field_result(); - // Enum can only be compared by equality. - context->dont_expect_field_type(MYSQL_TYPE_ENUM); - break; - } - case Item_func::LIKE_FUNC: - { - Ndb_expect_stack* expect_next= new (*THR_MALLOC) Ndb_expect_stack(); - DBUG_PRINT("info", ("LIKE_FUNC")); - - const Item_func_like *like_func = - static_cast(func_item); - if (like_func->escape_was_used_in_parsing()) - { - DBUG_PRINT("info", ("LIKE expressions with ESCAPE not supported")); - context->supported= false; - } - ndb_item= - new (*THR_MALLOC) Ndb_item(func_item->functype(), func_item); - - /* - Ndb currently only supports pushing - LIKE | - we thus push | - on the expect stack to catch that we - don't support LIKE . - */ - context->expect_field_from_table(); - context->expect_only_field_type(MYSQL_TYPE_STRING); - context->expect_field_type(MYSQL_TYPE_VAR_STRING); - context->expect_field_type(MYSQL_TYPE_VARCHAR); - context->expect_field_result(STRING_RESULT); - expect_next->expect(Item::STRING_ITEM); - expect_next->expect(Item::FUNC_ITEM); - context->expect_stack.push(expect_next); - pop= false; - break; - } - case Item_func::ISNULL_FUNC: - { - DBUG_PRINT("info", ("ISNULL_FUNC")); - ndb_item= - new (*THR_MALLOC) Ndb_item(func_item->functype(), func_item); - context->expect_field_from_table(); - context->expect_field_result(STRING_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(INT_RESULT); - context->expect_field_result(DECIMAL_RESULT); - break; - } - case Item_func::ISNOTNULL_FUNC: - { - DBUG_PRINT("info", ("ISNOTNULL_FUNC")); - ndb_item= - new (*THR_MALLOC) Ndb_item(func_item->functype(), func_item); - context->expect_field_from_table(); - context->expect_field_result(STRING_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(INT_RESULT); - context->expect_field_result(DECIMAL_RESULT); - break; - } - case Item_func::NOT_FUNC: - { - DBUG_PRINT("info", ("NOT_FUNC")); - ndb_item= - new (*THR_MALLOC) Ndb_item(func_item->functype(), func_item); - context->expect(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - break; - } - case Item_func::BETWEEN: - { - DBUG_PRINT("info", ("BETWEEN, rewriting using AND")); - const Item_func_between *between_func = - static_cast(func_item); - Ndb_rewrite_context *rewrite_context= - new (*THR_MALLOC) Ndb_rewrite_context(func_item); - rewrite_context->next= context->rewrite_stack; - context->rewrite_stack= rewrite_context; - if (between_func->negated) - { - DBUG_PRINT("info", ("NOT_FUNC")); - context->items.push_back( - new (*THR_MALLOC) Ndb_item(Item_func::NOT_FUNC, 1)); - } - DBUG_PRINT("info", ("COND_AND_FUNC")); - ndb_item= - new (*THR_MALLOC) Ndb_item(Item_func::COND_AND_FUNC, - func_item->argument_count() - 1); - // We do not 'expect' anything yet, added later as part of rewrite, - break; - } - case Item_func::IN_FUNC: - { - DBUG_PRINT("info", ("IN_FUNC, rewriting using OR")); - const Item_func_in *in_func = - static_cast(func_item); - Ndb_rewrite_context *rewrite_context= - new (*THR_MALLOC) Ndb_rewrite_context(func_item); - rewrite_context->next= context->rewrite_stack; - context->rewrite_stack= rewrite_context; - if (in_func->negated) - { - DBUG_PRINT("info", ("NOT_FUNC")); - context->items.push_back( - new (*THR_MALLOC) Ndb_item(Item_func::NOT_FUNC, 1)); + case Item::COND_ITEM: { + const Item_cond *cond_item = static_cast(item); + if (context->expecting(Item::COND_ITEM)) { + switch (cond_item->functype()) { + case Item_func::COND_AND_FUNC: + DBUG_PRINT("info", ("COND_AND_FUNC")); + ndb_item = new (*THR_MALLOC) + Ndb_item(cond_item->functype(), cond_item); + break; + case Item_func::COND_OR_FUNC: + DBUG_PRINT("info", ("COND_OR_FUNC")); + ndb_item = new (*THR_MALLOC) + Ndb_item(cond_item->functype(), cond_item); + break; + default: + DBUG_PRINT("info", ("COND_ITEM %d", cond_item->functype())); + context->supported = false; + break; + } + } else { + /* Did not expect condition */ + context->supported = false; } - DBUG_PRINT("info", ("COND_OR_FUNC")); - ndb_item= new (*THR_MALLOC) Ndb_item( - Item_func::COND_OR_FUNC, func_item->argument_count() - 1); - // We do not 'expect' anything yet, added later as part of rewrite, break; } + case Item::STRING_ITEM: + case Item::INT_ITEM: + case Item::REAL_ITEM: + case Item::VARBIN_ITEM: + case Item::DECIMAL_ITEM: + case Item::CACHE_ITEM: + DBUG_ASSERT(false); // Expression folded under 'used_tables' + // Fall through default: - { - DBUG_PRINT("info", ("Found func_item of type %d", - func_item->functype())); - context->supported= false; - } - } - break; + DBUG_PRINT("info", + ("Found unsupported item of type %d", item->type())); + context->supported = false; } - - case Item::COND_ITEM: - { - const Item_cond *cond_item = static_cast(item); - if (context->expecting(Item::COND_ITEM)) - { - switch (cond_item->functype()) { - case Item_func::COND_AND_FUNC: - DBUG_PRINT("info", ("COND_AND_FUNC")); - ndb_item= new (*THR_MALLOC) Ndb_item(cond_item->functype(), - cond_item); - break; - case Item_func::COND_OR_FUNC: - DBUG_PRINT("info", ("COND_OR_FUNC")); - ndb_item= new (*THR_MALLOC) Ndb_item(cond_item->functype(), - cond_item); - break; - default: - DBUG_PRINT("info", ("COND_ITEM %d", cond_item->functype())); - context->supported= false; - break; - } - } - else - { - /* Did not expect condition */ - context->supported= false; - } - break; - } - case Item::STRING_ITEM: - case Item::INT_ITEM: - case Item::REAL_ITEM: - case Item::VARBIN_ITEM: - case Item::DECIMAL_ITEM: - case Item::CACHE_ITEM: - DBUG_ASSERT(false); // Expression folded under 'used_tables' - // Fall through - default: - DBUG_PRINT("info", ("Found unsupported item of type %d", - item->type())); - context->supported= false; - } - if (pop) - context->expect_stack.pop(); + if (pop) context->expect_stack.pop(); } - if (context->supported) - { + if (context->supported) { DBUG_ASSERT(ndb_item != nullptr); context->items.push_back(ndb_item); } @@ -1418,25 +1244,15 @@ ndb_serialize_cond(const Item *item, void *arg) DBUG_VOID_RETURN; } - ha_ndbcluster_cond::ha_ndbcluster_cond() - : m_ndb_cond(), m_scan_filter_code(nullptr), m_unpushed_cond(nullptr) -{ -} - - -ha_ndbcluster_cond::~ha_ndbcluster_cond() -{ - m_ndb_cond.destroy_elements(); -} + : m_ndb_cond(), m_scan_filter_code(nullptr), m_unpushed_cond(nullptr) {} +ha_ndbcluster_cond::~ha_ndbcluster_cond() { m_ndb_cond.destroy_elements(); } /* Clear the condition stack */ -void -ha_ndbcluster_cond::cond_clear() -{ +void ha_ndbcluster_cond::cond_clear() { DBUG_ENTER("cond_clear"); m_ndb_cond.destroy_elements(); m_scan_filter_code.reset(); @@ -1444,7 +1260,6 @@ ha_ndbcluster_cond::cond_clear() DBUG_VOID_RETURN; } - /** Construct the AND conjunction of the pushed- and remainder predicate terms. If the the original condition was either @@ -1459,20 +1274,16 @@ ha_ndbcluster_cond::cond_clear() @return '1' in case of failure, else '0'. */ -static int -create_and_conditions(Item_cond *cond, - List pushed_list, List remainder_list, - Item *&pushed_cond, Item *&remainder_cond) -{ - if (remainder_list.is_empty()) - { +static int create_and_conditions(Item_cond *cond, List pushed_list, + List remainder_list, Item *&pushed_cond, + Item *&remainder_cond) { + if (remainder_list.is_empty()) { // Entire cond pushed, no remainder pushed_cond = cond; remainder_cond = nullptr; return 0; } - if (pushed_list.is_empty()) - { + if (pushed_list.is_empty()) { // Nothing pushed, entire 'cond' is remainder pushed_cond = nullptr; remainder_cond = cond; @@ -1480,32 +1291,24 @@ create_and_conditions(Item_cond *cond, } // Condition was partly pushed, with some remainder - if (pushed_list.elements == 1) - { + if (pushed_list.elements == 1) { // Single boolean term pushed, return it pushed_cond = pushed_list.head(); - } - else - { + } else { // Construct an AND'ed condition of pushed boolean terms pushed_cond = new Item_cond_and(pushed_list); - if (unlikely(pushed_cond == nullptr)) - return 1; + if (unlikely(pushed_cond == nullptr)) return 1; pushed_cond->quick_fix_field(); pushed_cond->update_used_tables(); } - if (remainder_list.elements == 1) - { + if (remainder_list.elements == 1) { // A single boolean term as remainder, return it remainder_cond = remainder_list.head(); - } - else - { + } else { // Construct a remainder as an AND'ed condition of the boolean terms remainder_cond = new Item_cond_and(remainder_list); - if (unlikely(remainder_cond == nullptr)) - return 1; + if (unlikely(remainder_cond == nullptr)) return 1; remainder_cond->quick_fix_field(); remainder_cond->update_used_tables(); } @@ -1536,29 +1339,23 @@ create_and_conditions(Item_cond *cond, @return '1' in case of failure, else '0'. */ -static int -create_or_conditions(Item_cond *cond, - List pushed_list, List remainder_list, - Item *&pushed_cond, Item *&remainder_cond) -{ +static int create_or_conditions(Item_cond *cond, List pushed_list, + List remainder_list, Item *&pushed_cond, + Item *&remainder_cond) { DBUG_ASSERT(pushed_list.elements == cond->argument_list()->elements); - if (remainder_list.is_empty()) - { + if (remainder_list.is_empty()) { // Entire cond pushed, no remainder pushed_cond = cond; remainder_cond = nullptr; - } - else - { + } else { // When condition was partially pushed, we need to reevaluate // original OR-cond on the server side: remainder_cond = cond; // Construct an OR'ed condition of pushed boolean terms pushed_cond = new Item_cond_or(pushed_list); - if (unlikely(pushed_cond == nullptr)) - return 1; + if (unlikely(pushed_cond == nullptr)) return 1; pushed_cond->quick_fix_field(); pushed_cond->update_used_tables(); } @@ -1582,82 +1379,69 @@ create_or_conditions(Item_cond *cond, @return a List of Ndb_item objects representing the serialized form of the 'pushed_cond'. */ -static List -cond_push_boolean_term(Item *term, - TABLE *table, const NDBTAB *ndb_table, - bool other_tbls_ok, - Item *&pushed_cond, Item *&remainder_cond) +static List cond_push_boolean_term(Item *term, TABLE *table, + const NDBTAB *ndb_table, + bool other_tbls_ok, + Item *&pushed_cond, + Item *&remainder_cond) { DBUG_ENTER("ha_ndbcluster::cond_push_boolean_term"); static const List empty_list; - if (term->type() == Item::COND_ITEM) - { + if (term->type() == Item::COND_ITEM) { // Build lists of the boolean terms either 'pushed', or being a 'remainder' List pushed_list; List remainder_list; List code; - Item_cond *cond = (Item_cond *) term; - if (cond->functype() == Item_func::COND_AND_FUNC) - { + Item_cond *cond = (Item_cond *)term; + if (cond->functype() == Item_func::COND_AND_FUNC) { DBUG_PRINT("info", ("COND_AND_FUNC")); List_iterator li(*cond->argument_list()); Item *boolean_term; - while ((boolean_term = li++)) - { + while ((boolean_term = li++)) { Item *pushed = nullptr, *remainder = nullptr; - List code_stub = - cond_push_boolean_term(boolean_term, table, ndb_table, - other_tbls_ok, pushed, remainder); + List code_stub = cond_push_boolean_term( + boolean_term, table, ndb_table, other_tbls_ok, pushed, remainder); // Collect all bits we pushed, and its leftovers. - if (!code_stub.is_empty()) - code.concat(&code_stub); - if (pushed != nullptr) - pushed_list.push_back(pushed); - if (remainder != nullptr) - remainder_list.push_back(remainder); + if (!code_stub.is_empty()) code.concat(&code_stub); + if (pushed != nullptr) pushed_list.push_back(pushed); + if (remainder != nullptr) remainder_list.push_back(remainder); } // Transform the list of pushed and the remainder conditions // into its respective AND'ed conditions. - if (create_and_conditions(cond, pushed_list, remainder_list, - pushed_cond, remainder_cond)) - { - //Failed, discard pushed conditions and generated code. + if (create_and_conditions(cond, pushed_list, remainder_list, pushed_cond, + remainder_cond)) { + // Failed, discard pushed conditions and generated code. pushed_cond = nullptr; remainder_cond = cond; code.destroy_elements(); DBUG_RETURN(empty_list); } // Serialized code has to be embedded in an AND-group - if (!code.is_empty()) - { - code.push_front(new (*THR_MALLOC) Ndb_item(Item_func::COND_AND_FUNC, cond)); - code.push_back (new (*THR_MALLOC) Ndb_item(NDB_END_COND)); + if (!code.is_empty()) { + code.push_front(new (*THR_MALLOC) + Ndb_item(Item_func::COND_AND_FUNC, cond)); + code.push_back(new (*THR_MALLOC) Ndb_item(NDB_END_COND)); } DBUG_PRINT("info", ("COND_AND_FUNC, end")); - } - else - { + } else { DBUG_ASSERT(cond->functype() == Item_func::COND_OR_FUNC); DBUG_PRINT("info", ("COND_OR_FUNC")); List_iterator li(*cond->argument_list()); Item *boolean_term; - while ((boolean_term = li++)) - { + while ((boolean_term = li++)) { Item *pushed = nullptr, *remainder = nullptr; - List code_stub = - cond_push_boolean_term(boolean_term, table, ndb_table, - other_tbls_ok, pushed, remainder); + List code_stub = cond_push_boolean_term( + boolean_term, table, ndb_table, other_tbls_ok, pushed, remainder); - if (pushed == nullptr) - { - //Failure of pushing one of the OR-terms fails entire OR'ed cond + if (pushed == nullptr) { + // Failure of pushing one of the OR-terms fails entire OR'ed cond //(Else the rows matching that term would be missing in result set) // Also see comments in create_or_conditions(). pushed_cond = nullptr; @@ -1665,55 +1449,46 @@ cond_push_boolean_term(Item *term, code.destroy_elements(); DBUG_RETURN(empty_list); } - + // Collect all bits we pushed, and its leftovers. - if (!code_stub.is_empty()) - code.concat(&code_stub); - if (pushed != nullptr) - pushed_list.push_back(pushed); - if (remainder != nullptr) - remainder_list.push_back(remainder); + if (!code_stub.is_empty()) code.concat(&code_stub); + if (pushed != nullptr) pushed_list.push_back(pushed); + if (remainder != nullptr) remainder_list.push_back(remainder); } // Transform the list of pushed and the remainder conditions // into its respective OR'ed conditions. - if (create_or_conditions(cond, pushed_list, remainder_list, - pushed_cond, remainder_cond)) - { - //Failed, discard pushed conditions and generated code. + if (create_or_conditions(cond, pushed_list, remainder_list, pushed_cond, + remainder_cond)) { + // Failed, discard pushed conditions and generated code. pushed_cond = nullptr; remainder_cond = cond; code.destroy_elements(); DBUG_RETURN(empty_list); } // Serialized code has to be embedded in an OR-group - if (!code.is_empty()) - { - code.push_front(new (*THR_MALLOC) Ndb_item(Item_func::COND_OR_FUNC, cond)); - code.push_back (new (*THR_MALLOC) Ndb_item(NDB_END_COND)); + if (!code.is_empty()) { + code.push_front(new (*THR_MALLOC) + Ndb_item(Item_func::COND_OR_FUNC, cond)); + code.push_back(new (*THR_MALLOC) Ndb_item(NDB_END_COND)); } DBUG_PRINT("info", ("COND_OR_FUNC, end")); } DBUG_RETURN(code); - } - else if (term->type() == Item::FUNC_ITEM) - { + } else if (term->type() == Item::FUNC_ITEM) { const Item_func *item_func = static_cast(term); - if (item_func->functype() == Item_func::TRIG_COND_FUNC) - { + if (item_func->functype() == Item_func::TRIG_COND_FUNC) { const Item_func_trig_cond *func_trig = - static_cast(item_func); + static_cast(item_func); - if (func_trig->get_trig_type() == Item_func_trig_cond::IS_NOT_NULL_COMPL) - { + if (func_trig->get_trig_type() == + Item_func_trig_cond::IS_NOT_NULL_COMPL) { DBUG_ASSERT(item_func->argument_count() == 1); Item *cond_arg = item_func->arguments()[0]; Item *remainder = nullptr; - List code = - cond_push_boolean_term(cond_arg, table, ndb_table, - other_tbls_ok, pushed_cond, remainder); - if (remainder != nullptr) - { + List code = cond_push_boolean_term( + cond_arg, table, ndb_table, other_tbls_ok, pushed_cond, remainder); + if (remainder != nullptr) { item_func->arguments()[0] = remainder; remainder_cond = term; } @@ -1726,14 +1501,12 @@ cond_push_boolean_term(Item *term, *_TABLE_BIT. These can not be referred from a pushed condition. */ const table_map dont_use_tables = - INNER_TABLE_BIT | // Condition contain a subquery - RAND_TABLE_BIT; // 'non-stable' value + INNER_TABLE_BIT | // Condition contain a subquery + RAND_TABLE_BIT; // 'non-stable' value - if (term->used_tables() & dont_use_tables) - {} - else if (other_tbls_ok || - !(term->used_tables() & ~table->pos_in_table_list->map())) - { + if (term->used_tables() & dont_use_tables) { + } else if (other_tbls_ok || + !(term->used_tables() & ~table->pos_in_table_list->map())) { // Has broken down the condition into predicate terms, or sub conditions, // which either has to be accepted or rejected for pushdown Ndb_cond_traverse_context context(table, ndb_table); @@ -1741,7 +1514,7 @@ cond_push_boolean_term(Item *term, context.expect(Item::COND_ITEM); term->traverse_cond(&ndb_serialize_cond, &context, Item::PREFIX); - if (context.supported) // 'term' was pushed + if (context.supported) // 'term' was pushed { pushed_cond = term; remainder_cond = nullptr; @@ -1753,33 +1526,29 @@ cond_push_boolean_term(Item *term, // Failed to push pushed_cond = nullptr; remainder_cond = term; - DBUG_RETURN(empty_list); //Discard any generated Ndb_cond's + DBUG_RETURN(empty_list); // Discard any generated Ndb_cond's } - /* Push a condition, return any remainder condition */ -const Item* -ha_ndbcluster_cond::cond_push(const Item *cond, - TABLE *table, const NDBTAB *ndb_table, - bool other_tbls_ok, - Item *&pushed_cond) -{ +const Item *ha_ndbcluster_cond::cond_push(const Item *cond, TABLE *table, + const NDBTAB *ndb_table, + bool other_tbls_ok, + Item *&pushed_cond) { DBUG_ENTER("ha_ndbcluster_cond::cond_push"); // Build lists of the boolean terms either 'pushed', or being a 'remainder' - Item *item= const_cast(cond); + Item *item = const_cast(cond); Item *remainder = nullptr; - List code = - cond_push_boolean_term(item, table, ndb_table, other_tbls_ok, pushed_cond, remainder); + List code = cond_push_boolean_term( + item, table, ndb_table, other_tbls_ok, pushed_cond, remainder); // Save the serialized representation of the code m_ndb_cond = code; if (pushed_cond != nullptr && - !(pushed_cond->used_tables() & ~table->pos_in_table_list->map())) - { + !(pushed_cond->used_tables() & ~table->pos_in_table_list->map())) { /** * pushed_cond had no dependencies outside of this 'table'. * Code for pushed condition can be generated now, and reused @@ -1787,16 +1556,13 @@ ha_ndbcluster_cond::cond_push(const Item *cond, */ NdbInterpretedCode code(ndb_table); NdbScanFilter filter(&code); - const int ret= generate_scan_filter_from_cond(filter); - if (unlikely(ret != 0)) - { + const int ret = generate_scan_filter_from_cond(filter); + if (unlikely(ret != 0)) { // Failed to 'generate' the pushed code. pushed_cond = nullptr; m_ndb_cond.destroy_elements(); remainder = item; - } - else - { + } else { // Success, save the generated code. DBUG_ASSERT(code.getWordsUsed() > 0); m_scan_filter_code.copy(code); @@ -1805,456 +1571,395 @@ ha_ndbcluster_cond::cond_push(const Item *cond, DBUG_RETURN(remainder); } - -int -ha_ndbcluster_cond::build_scan_filter_predicate(List_iterator &cond, - NdbScanFilter *filter, - bool negated) const -{ - DBUG_ENTER("build_scan_filter_predicate"); +int ha_ndbcluster_cond::build_scan_filter_predicate( + List_iterator &cond, NdbScanFilter *filter, + bool negated) const { + DBUG_ENTER("build_scan_filter_predicate"); const Ndb_item *ndb_item = *cond.ref(); switch (ndb_item->type) { - case NDB_FUNCTION: - { - const Ndb_item *b, *field1, *field2 = nullptr, *value = nullptr; - const Ndb_item *a = cond++; - if (a == nullptr) - break; - - enum ndb_func_type function_type = (negated) - ? Ndb_item::negate(ndb_item->get_func_type()) - : ndb_item->get_func_type(); - - switch (ndb_item->get_argument_count()) { - case 1: - field1 = (a->type == NDB_FIELD)? a : NULL; - break; - case 2: - b = cond++; - if (b == nullptr) - { - field1 = nullptr; - break; - } - if (a->type == NDB_FIELD) - { - field1 = a; - if (b->type == NDB_VALUE) - value = b; - else if (b->type == NDB_FIELD) - field2 = b; + case NDB_FUNCTION: { + const Ndb_item *b, *field1, *field2 = nullptr, *value = nullptr; + const Ndb_item *a = cond++; + if (a == nullptr) break; + + enum ndb_func_type function_type = + (negated) ? Ndb_item::negate(ndb_item->get_func_type()) + : ndb_item->get_func_type(); + + switch (ndb_item->get_argument_count()) { + case 1: + field1 = (a->type == NDB_FIELD) ? a : NULL; + break; + case 2: + b = cond++; + if (b == nullptr) { + field1 = nullptr; + break; + } + if (a->type == NDB_FIELD) { + field1 = a; + if (b->type == NDB_VALUE) + value = b; + else if (b->type == NDB_FIELD) + field2 = b; + } else { + DBUG_ASSERT(a->type == NDB_VALUE); + DBUG_ASSERT(b->type == NDB_FIELD); + field1 = b; + value = a; + } + if (value == a) function_type = Ndb_item::swap(function_type); + break; + default: + DBUG_PRINT("info", ("condition had unexpected number of arguments")); + DBUG_RETURN(1); } - else - { - DBUG_ASSERT(a->type == NDB_VALUE); - DBUG_ASSERT(b->type == NDB_FIELD); - field1 = b; - value = a; + if (field1 == nullptr) { + DBUG_PRINT("info", ("condition missing 'field' argument")); + DBUG_RETURN(1); } - if (value == a) - function_type = Ndb_item::swap(function_type); - break; - default: - DBUG_PRINT("info", ("condition had unexpected number of arguments")); - DBUG_RETURN(1); - } - if (field1 == nullptr) - { - DBUG_PRINT("info", ("condition missing 'field' argument")); - DBUG_RETURN(1); - } - if (value != nullptr) - { - const Item *item = value->get_item(); + if (value != nullptr) { + const Item *item = value->get_item(); #ifndef DBUG_OFF - if (!item->basic_const_item()) - { - String expr; - String buf, *val = const_cast(item)->val_str(&buf); - item->print(current_thd, &expr, QT_ORDINARY); - DBUG_PRINT("info", ("Value evaluated to: '%s', expression '%s'", - val ? val->c_ptr_safe() : "NULL", - expr.c_ptr_safe())); - } + if (!item->basic_const_item()) { + String expr; + String buf, *val = const_cast(item)->val_str(&buf); + item->print(current_thd, &expr, QT_ORDINARY); + DBUG_PRINT("info", + ("Value evaluated to: '%s', expression '%s'", + val ? val->c_ptr_safe() : "NULL", expr.c_ptr_safe())); + } #endif - /* - The NdbInterpreter handles a NULL value as being less than any - non-NULL value. However, MySQL server (and SQL std spec) specifies - that a NULL-value in a comparison predicate should result in an - UNKNOWN boolean result, which is 'not TRUE' -> the row being eliminated. - - Thus, extra checks for both 'field' and 'value' being a - NULL-value has to be added to mitigate this semantic difference. - */ - if (const_cast(item)->is_null()) - { /* - 'value' known to be a NULL-value. - Condition will be 'not TRUE' -> false, independent of the 'field' - value. Encapsulate in own group, as only this predicate become - 'false', not entire group it is part of. + The NdbInterpreter handles a NULL value as being less than any + non-NULL value. However, MySQL server (and SQL std spec) specifies + that a NULL-value in a comparison predicate should result in an + UNKNOWN boolean result, which is 'not TRUE' -> the row being + eliminated. + + Thus, extra checks for both 'field' and 'value' being a + NULL-value has to be added to mitigate this semantic difference. */ - if (filter->begin() == -1 || - filter->isfalse() == -1 || - filter->end() == -1) - DBUG_RETURN(1); - DBUG_RETURN(0); + if (const_cast(item)->is_null()) { + /* + 'value' known to be a NULL-value. + Condition will be 'not TRUE' -> false, independent of the 'field' + value. Encapsulate in own group, as only this predicate become + 'false', not entire group it is part of. + */ + if (filter->begin() == -1 || filter->isfalse() == -1 || + filter->end() == -1) + DBUG_RETURN(1); + DBUG_RETURN(0); + } } - } - const bool field1_maybe_null = field1->get_field()->maybe_null(); - const bool field2_maybe_null = field2 && field2->get_field()->maybe_null(); - bool added_null_check = false; + const bool field1_maybe_null = field1->get_field()->maybe_null(); + const bool field2_maybe_null = + field2 && field2->get_field()->maybe_null(); + bool added_null_check = false; - if (field1_maybe_null || field2_maybe_null) - { - switch (function_type) { - /* - The NdbInterpreter handles a NULL value as being less than any - non-NULL value. Thus any NULL value columns will evaluate to - 'TRUE' (and pass the filter) in the predicate expression: - - - This is not according to how the server expect NULL valued - predicates to be evaluated: Any NULL values in a comparison - predicate should result in an UNKNOWN boolean result - and the row being eliminated. - - This is mitigated by adding an extra isnotnull-check to - eliminate NULL valued rows which otherwise would have passed - a ' < ' check in the ScanFilter. - */ - case NDB_LT_FUNC: - case NDB_LE_FUNC: - // NdbInterpreter incorrectly compare ' < f2' as 'true' - // -> NULL filter f1 - - case NDB_LIKE_FUNC: - case NDB_NOTLIKE_FUNC: - // NdbInterpreter incorrectly compare ' [not] like ' as 'true' - // -> NULL filter f1 - if (field1_maybe_null) - { - DBUG_PRINT("info", ("Appending extra field1 ISNOTNULL check")); - if (filter->begin(NdbScanFilter::AND) == -1 || - filter->isnotnull(field1->get_field_no()) == -1) - DBUG_RETURN(1); - added_null_check = true; + if (field1_maybe_null || field2_maybe_null) { + switch (function_type) { + /* + The NdbInterpreter handles a NULL value as being less than any + non-NULL value. Thus any NULL value columns will evaluate to + 'TRUE' (and pass the filter) in the predicate expression: + + + This is not according to how the server expect NULL valued + predicates to be evaluated: Any NULL values in a comparison + predicate should result in an UNKNOWN boolean result + and the row being eliminated. + + This is mitigated by adding an extra isnotnull-check to + eliminate NULL valued rows which otherwise would have passed + a ' < ' check in the ScanFilter. + */ + case NDB_LT_FUNC: + case NDB_LE_FUNC: + // NdbInterpreter incorrectly compare ' < f2' as 'true' + // -> NULL filter f1 + + case NDB_LIKE_FUNC: + case NDB_NOTLIKE_FUNC: + // NdbInterpreter incorrectly compare ' [not] like ' as + // 'true' + // -> NULL filter f1 + if (field1_maybe_null) { + DBUG_PRINT("info", ("Appending extra field1 ISNOTNULL check")); + if (filter->begin(NdbScanFilter::AND) == -1 || + filter->isnotnull(field1->get_field_no()) == -1) + DBUG_RETURN(1); + added_null_check = true; + } + break; + + case NDB_EQ_FUNC: + // NdbInterpreter incorrectly compare = as 'true' + // -> At least either f1 or f2 need a NULL filter to ensure + // not both are NULL. + if (!field1_maybe_null) break; + // Fall through to check 'field2_maybe_null' + + case NDB_GE_FUNC: + case NDB_GT_FUNC: + // NdbInterpreter incorrectly compare f1 > as true -> NULL + // filter f2 + if (field2_maybe_null) { + DBUG_PRINT("info", ("Appending extra field2 ISNOTNULL check")); + if (filter->begin(NdbScanFilter::AND) == -1 || + filter->isnotnull(field2->get_field_no()) == -1) + DBUG_RETURN(1); + added_null_check = true; + } + break; + + case NDB_NE_FUNC: + // f1 '<>' f2 -> f1 < f2 or f2 < f1: Both f1 and f2 need NULL + // filters + DBUG_PRINT("info", + ("Appending extra field1 & field2 ISNOTNULL check")); + if (filter->begin(NdbScanFilter::AND) == -1 || + (field1_maybe_null && + filter->isnotnull(field1->get_field_no()) == -1) || + (field2_maybe_null && + filter->isnotnull(field2->get_field_no()) == -1)) + DBUG_RETURN(1); + added_null_check = true; + break; + + default: + break; } - break; + } - case NDB_EQ_FUNC: - // NdbInterpreter incorrectly compare = as 'true' - // -> At least either f1 or f2 need a NULL filter to ensure - // not both are NULL. - if (!field1_maybe_null) + NdbScanFilter::BinaryCondition cond; + switch (function_type) { + case NDB_EQ_FUNC: { + DBUG_PRINT("info", ("Generating EQ filter")); + cond = NdbScanFilter::COND_EQ; break; - // Fall through to check 'field2_maybe_null' - - case NDB_GE_FUNC: - case NDB_GT_FUNC: - // NdbInterpreter incorrectly compare f1 > as true -> NULL filter f2 - if (field2_maybe_null) - { - DBUG_PRINT("info", ("Appending extra field2 ISNOTNULL check")); - if (filter->begin(NdbScanFilter::AND) == -1 || - filter->isnotnull(field2->get_field_no()) == -1) + } + case NDB_NE_FUNC: { + DBUG_PRINT("info", ("Generating NE filter")); + cond = NdbScanFilter::COND_NE; + break; + } + case NDB_LT_FUNC: { + DBUG_PRINT("info", ("Generating LT filter")); + cond = NdbScanFilter::COND_LT; + break; + } + case NDB_LE_FUNC: { + DBUG_PRINT("info", ("Generating LE filter")); + cond = NdbScanFilter::COND_LE; + break; + } + case NDB_GE_FUNC: { + DBUG_PRINT("info", ("Generating GE filter")); + cond = NdbScanFilter::COND_GE; + break; + } + case NDB_GT_FUNC: { + DBUG_PRINT("info", ("Generating GT filter")); + cond = NdbScanFilter::COND_GT; + break; + } + case NDB_LIKE_FUNC: { + DBUG_PRINT("info", ("Generating LIKE filter")); + cond = NdbScanFilter::COND_LIKE; + break; + } + case NDB_NOTLIKE_FUNC: { + DBUG_PRINT("info", ("Generating NOT LIKE filter")); + cond = NdbScanFilter::COND_NOT_LIKE; + break; + } + case NDB_ISNULL_FUNC: { + DBUG_PRINT("info", ("Generating ISNULL filter")); + if (filter->isnull(field1->get_field_no()) == -1) DBUG_RETURN(1); + DBUG_RETURN(0); + } + case NDB_ISNOTNULL_FUNC: { + DBUG_PRINT("info", ("Generating ISNOTNULL filter")); + if (filter->isnotnull(field1->get_field_no()) == -1) DBUG_RETURN(1); + DBUG_RETURN(0); + } + default: + DBUG_ASSERT(false); + DBUG_RETURN(1); + } + + if (cond <= NdbScanFilter::COND_NE) { + if (value != nullptr) { + // Save value in right format for the field type + if (unlikely(value->save_in_field(field1) == -1)) DBUG_RETURN(1); + if (filter->cmp(cond, field1->get_field_no(), field1->get_val(), + field1->pack_length()) == -1) + DBUG_RETURN(1); + } else { + DBUG_ASSERT(field2 != nullptr); + DBUG_ASSERT(ndbd_support_column_cmp( + get_thd_ndb(current_thd)->ndb->getMinDbNodeVersion())); + if (filter->cmp(cond, field1->get_field_no(), + field2->get_field_no()) == -1) DBUG_RETURN(1); - added_null_check = true; } - break; + } else // [NOT] LIKE + { + DBUG_ASSERT(cond == NdbScanFilter::COND_LIKE || + cond == NdbScanFilter::COND_NOT_LIKE); + DBUG_ASSERT(field1 == a && value == b); - case NDB_NE_FUNC: - // f1 '<>' f2 -> f1 < f2 or f2 < f1: Both f1 and f2 need NULL filters - DBUG_PRINT("info", ("Appending extra field1 & field2 ISNOTNULL check")); - if (filter->begin(NdbScanFilter::AND) == -1 || - (field1_maybe_null && filter->isnotnull(field1->get_field_no()) == -1) || - (field2_maybe_null && filter->isnotnull(field2->get_field_no()) == -1)) - DBUG_RETURN(1); - added_null_check = true; - break; + char buff[MAX_FIELD_WIDTH]; + String str(buff, sizeof(buff), field1->get_field_charset()); + Item *value_item = const_cast(value->get_item()); + const String *pattern = value_item->val_str(&str); - default: - break; + if (filter->cmp(cond, field1->get_field_no(), pattern->ptr(), + pattern->length()) == -1) + DBUG_RETURN(1); } - } - NdbScanFilter::BinaryCondition cond; - switch (function_type) { - case NDB_EQ_FUNC: - { - DBUG_PRINT("info", ("Generating EQ filter")); - cond = NdbScanFilter::COND_EQ; - break; - } - case NDB_NE_FUNC: - { - DBUG_PRINT("info", ("Generating NE filter")); - cond = NdbScanFilter::COND_NE; - break; - } - case NDB_LT_FUNC: - { - DBUG_PRINT("info", ("Generating LT filter")); - cond = NdbScanFilter::COND_LT; - break; - } - case NDB_LE_FUNC: - { - DBUG_PRINT("info", ("Generating LE filter")); - cond = NdbScanFilter::COND_LE; - break; - } - case NDB_GE_FUNC: - { - DBUG_PRINT("info", ("Generating GE filter")); - cond = NdbScanFilter::COND_GE; - break; - } - case NDB_GT_FUNC: - { - DBUG_PRINT("info", ("Generating GT filter")); - cond = NdbScanFilter::COND_GT; - break; - } - case NDB_LIKE_FUNC: - { - DBUG_PRINT("info", ("Generating LIKE filter")); - cond = NdbScanFilter::COND_LIKE; - break; - } - case NDB_NOTLIKE_FUNC: - { - DBUG_PRINT("info", ("Generating NOT LIKE filter")); - cond = NdbScanFilter::COND_NOT_LIKE; - break; - } - case NDB_ISNULL_FUNC: - { - DBUG_PRINT("info", ("Generating ISNULL filter")); - if (filter->isnull(field1->get_field_no()) == -1) + if (added_null_check && filter->end() == -1) // Local AND group DBUG_RETURN(1); DBUG_RETURN(0); } - case NDB_ISNOTNULL_FUNC: - { - DBUG_PRINT("info", ("Generating ISNOTNULL filter")); - if (filter->isnotnull(field1->get_field_no()) == -1) - DBUG_RETURN(1); - DBUG_RETURN(0); - } default: - DBUG_ASSERT(false); - DBUG_RETURN(1); - } - - if (cond <= NdbScanFilter::COND_NE) - { - if (value != nullptr) - { - // Save value in right format for the field type - if (unlikely(value->save_in_field(field1) == -1)) - DBUG_RETURN(1); - if (filter->cmp(cond, - field1->get_field_no(), - field1->get_val(), - field1->pack_length()) == -1) - DBUG_RETURN(1); - } - else - { - DBUG_ASSERT(field2 != nullptr); - DBUG_ASSERT(ndbd_support_column_cmp( - get_thd_ndb(current_thd)->ndb->getMinDbNodeVersion())); - if (filter->cmp(cond, - field1->get_field_no(), - field2->get_field_no()) == -1) - DBUG_RETURN(1); - } - } - else // [NOT] LIKE - { - DBUG_ASSERT(cond == NdbScanFilter::COND_LIKE || - cond == NdbScanFilter::COND_NOT_LIKE); - DBUG_ASSERT(field1 == a && value == b); - - char buff[MAX_FIELD_WIDTH]; - String str(buff, sizeof(buff), field1->get_field_charset()); - Item *value_item = const_cast(value->get_item()); - const String *pattern = value_item->val_str(&str); - - if (filter->cmp(cond, - field1->get_field_no(), - pattern->ptr(), - pattern->length()) == -1) - DBUG_RETURN(1); - } - - if (added_null_check && filter->end() == -1) //Local AND group - DBUG_RETURN(1); - DBUG_RETURN(0); - } - default: - break; + break; } DBUG_PRINT("info", ("Found illegal condition")); DBUG_RETURN(1); } - -int -ha_ndbcluster_cond::build_scan_filter_group(List_iterator &cond, - NdbScanFilter *filter, - const bool negated) const -{ - uint level=0; +int ha_ndbcluster_cond::build_scan_filter_group( + List_iterator &cond, NdbScanFilter *filter, + const bool negated) const { + uint level = 0; DBUG_ENTER("build_scan_filter_group"); - do - { + do { const Ndb_item *ndb_item = cond++; - if (ndb_item == nullptr) - DBUG_RETURN(1); + if (ndb_item == nullptr) DBUG_RETURN(1); switch (ndb_item->type) { - case NDB_FUNCTION: - { - switch (ndb_item->get_func_type()) { - case NDB_COND_AND_FUNC: - { - level++; - DBUG_PRINT("info", ("Generating %s group %u", (negated)?"OR":"AND", - level)); - if ((negated) ? filter->begin(NdbScanFilter::OR) - : filter->begin(NdbScanFilter::AND) == -1) - DBUG_RETURN(1); - break; - } - case NDB_COND_OR_FUNC: - { - level++; - DBUG_PRINT("info", ("Generating %s group %u", (negated)?"AND":"OR", - level)); - if ((negated) ? filter->begin(NdbScanFilter::AND) - : filter->begin(NdbScanFilter::OR) == -1) - DBUG_RETURN(1); - break; - } - case NDB_NOT_FUNC: - { - DBUG_PRINT("info", ("Generating negated query")); - if (build_scan_filter_group(cond, filter, !negated)) - DBUG_RETURN(1); - break; - } - default: - if (build_scan_filter_predicate(cond, filter, negated)) - DBUG_RETURN(1); + case NDB_FUNCTION: { + switch (ndb_item->get_func_type()) { + case NDB_COND_AND_FUNC: { + level++; + DBUG_PRINT("info", ("Generating %s group %u", + (negated) ? "OR" : "AND", level)); + if ((negated) ? filter->begin(NdbScanFilter::OR) + : filter->begin(NdbScanFilter::AND) == -1) + DBUG_RETURN(1); + break; + } + case NDB_COND_OR_FUNC: { + level++; + DBUG_PRINT("info", ("Generating %s group %u", + (negated) ? "AND" : "OR", level)); + if ((negated) ? filter->begin(NdbScanFilter::AND) + : filter->begin(NdbScanFilter::OR) == -1) + DBUG_RETURN(1); + break; + } + case NDB_NOT_FUNC: { + DBUG_PRINT("info", ("Generating negated query")); + if (build_scan_filter_group(cond, filter, !negated)) DBUG_RETURN(1); + break; + } + default: + if (build_scan_filter_predicate(cond, filter, negated)) + DBUG_RETURN(1); + break; + } break; } - break; - } - case NDB_VALUE: - { - // (Boolean-)VALUE known at generate - const Item *item= ndb_item->get_item(); + case NDB_VALUE: { + // (Boolean-)VALUE known at generate + const Item *item = ndb_item->get_item(); #ifndef DBUG_OFF - String str; - item->print(current_thd, &str, QT_ORDINARY); + String str; + item->print(current_thd, &str, QT_ORDINARY); #endif - if (const_cast(item)->is_null()) - { - // Note that boolean 'unknown' -> 'not true' - DBUG_PRINT("info", ("BOOLEAN value 'UNKNOWN', expression '%s'", str.c_ptr_safe())); - if (filter->begin(NdbScanFilter::AND) == -1 || - filter->isfalse() == -1 || - filter->end() == -1) - DBUG_RETURN(1); - } - else if (const_cast(item)->val_bool() == !negated) - { - DBUG_PRINT("info", ("BOOLEAN value 'TRUE', expression '%s'", str.c_ptr_safe())); - if (filter->begin(NdbScanFilter::OR) == -1 || - filter->istrue() == -1 || - filter->end() == -1) - DBUG_RETURN(1); - } - else - { - DBUG_PRINT("info", ("BOOLEAN value 'FALSE', expression '%s'", str.c_ptr_safe())); - if (filter->begin(NdbScanFilter::AND) == -1 || - filter->isfalse() == -1 || - filter->end() == -1) - DBUG_RETURN(1); + if (const_cast(item)->is_null()) { + // Note that boolean 'unknown' -> 'not true' + DBUG_PRINT("info", ("BOOLEAN value 'UNKNOWN', expression '%s'", + str.c_ptr_safe())); + if (filter->begin(NdbScanFilter::AND) == -1 || + filter->isfalse() == -1 || filter->end() == -1) + DBUG_RETURN(1); + } else if (const_cast(item)->val_bool() == !negated) { + DBUG_PRINT("info", ("BOOLEAN value 'TRUE', expression '%s'", + str.c_ptr_safe())); + if (filter->begin(NdbScanFilter::OR) == -1 || + filter->istrue() == -1 || filter->end() == -1) + DBUG_RETURN(1); + } else { + DBUG_PRINT("info", ("BOOLEAN value 'FALSE', expression '%s'", + str.c_ptr_safe())); + if (filter->begin(NdbScanFilter::AND) == -1 || + filter->isfalse() == -1 || filter->end() == -1) + DBUG_RETURN(1); + } + break; } - break; - } - case NDB_END_COND: - DBUG_PRINT("info", ("End of group %u", level)); - level--; - if (filter->end() == -1) + case NDB_END_COND: + DBUG_PRINT("info", ("End of group %u", level)); + level--; + if (filter->end() == -1) DBUG_RETURN(1); + break; + default: { + DBUG_PRINT("info", ("Illegal scan filter")); + DBUG_ASSERT(false); DBUG_RETURN(1); - break; - default: - { - DBUG_PRINT("info", ("Illegal scan filter")); - DBUG_ASSERT(false); - DBUG_RETURN(1); - } + } } - } while (level > 0); - + } while (level > 0); + DBUG_RETURN(0); } - -int -ha_ndbcluster_cond::generate_scan_filter_from_cond(NdbScanFilter& filter) -{ +int ha_ndbcluster_cond::generate_scan_filter_from_cond(NdbScanFilter &filter) { bool need_group = true; DBUG_ENTER("generate_scan_filter_from_cond"); // Determine if we need to wrap an AND group around condition(s) const Ndb_item *ndb_item = m_ndb_cond.head(); - if (ndb_item->type == NDB_FUNCTION) - { + if (ndb_item->type == NDB_FUNCTION) { switch (ndb_item->get_func_type()) { - case NDB_COND_AND_FUNC: - case NDB_COND_OR_FUNC: - // A single AND/OR condition has its own AND/OR-group - // .. in all other cases we start a AND group now - need_group = false; - break; - default: - break; + case NDB_COND_AND_FUNC: + case NDB_COND_OR_FUNC: + // A single AND/OR condition has its own AND/OR-group + // .. in all other cases we start a AND group now + need_group = false; + break; + default: + break; } } - if (need_group && filter.begin() == -1) - DBUG_RETURN(1); + if (need_group && filter.begin() == -1) DBUG_RETURN(1); List_iterator cond(m_ndb_cond); - if (build_scan_filter_group(cond, &filter, false)) - { + if (build_scan_filter_group(cond, &filter, false)) { DBUG_PRINT("info", ("build_scan_filter_group failed")); - const NdbError& err= filter.getNdbError(); - if (err.code == NdbScanFilter::FilterTooLarge) - { + const NdbError &err = filter.getNdbError(); + if (err.code == NdbScanFilter::FilterTooLarge) { DBUG_PRINT("info", ("%s", err.message)); - push_warning(current_thd, Sql_condition::SL_WARNING, - err.code, err.message); + push_warning(current_thd, Sql_condition::SL_WARNING, err.code, + err.message); } DBUG_RETURN(1); } - if (need_group && filter.end() == -1) - DBUG_RETURN(1); + if (need_group && filter.end() == -1) DBUG_RETURN(1); DBUG_RETURN(0); } - /* Optimizer sometimes does hash index lookup of a key where some key parts are null. The set of cases where this happens makes @@ -2262,41 +1967,33 @@ ha_ndbcluster_cond::generate_scan_filter_from_cond(NdbScanFilter& filter) to be filtered accordingly. The scan is actually on the table and the index bounds are pushed down. */ -int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanFilter &filter, - const KEY *key_info, - const key_range *start_key, - const key_range *end_key) -{ +int ha_ndbcluster_cond::generate_scan_filter_from_key( + NdbScanFilter &filter, const KEY *key_info, const key_range *start_key, + const key_range *end_key) { DBUG_ENTER("generate_scan_filter_from_key"); #ifndef DBUG_OFF { - DBUG_PRINT("info", ("key parts:%u length:%u", - key_info->user_defined_key_parts, key_info->key_length)); - const key_range* keylist[2]={ start_key, end_key }; - for (uint j=0; j <= 1; j++) - { + DBUG_PRINT("info", + ("key parts:%u length:%u", key_info->user_defined_key_parts, + key_info->key_length)); + const key_range *keylist[2] = {start_key, end_key}; + for (uint j = 0; j <= 1; j++) { char buf[8192]; - const key_range* key=keylist[j]; - if (key == 0) - { + const key_range *key = keylist[j]; + if (key == 0) { sprintf(buf, "key range %u: none", j); - } - else - { + } else { sprintf(buf, "key range %u: flag:%u part", j, key->flag); - const KEY_PART_INFO* key_part=key_info->key_part; - const uchar* ptr=key->key; - for (uint i=0; i < key_info->user_defined_key_parts; i++) - { - sprintf(buf+strlen(buf), " %u:", i); - for (uint k=0; k < key_part->store_length; k++) - { - sprintf(buf+strlen(buf), " %02x", ptr[k]); + const KEY_PART_INFO *key_part = key_info->key_part; + const uchar *ptr = key->key; + for (uint i = 0; i < key_info->user_defined_key_parts; i++) { + sprintf(buf + strlen(buf), " %u:", i); + for (uint k = 0; k < key_part->store_length; k++) { + sprintf(buf + strlen(buf), " %02x", ptr[k]); } - ptr+=key_part->store_length; - if (ptr - key->key >= (ptrdiff_t)key->length) - { + ptr += key_part->store_length; + if (ptr - key->key >= (ptrdiff_t)key->length) { /* key_range has no count of parts so must test byte length. But this is not the place for following assert. @@ -2312,29 +2009,24 @@ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanFilter &filter, } #endif - do - { + do { /* Case "x is not null". Seen with index(x) where it becomes range "null < x". Not seen with index(x,y) for any combination of bounds which include "is not null". */ - if (start_key != 0 && - start_key->flag == HA_READ_AFTER_KEY && - end_key == 0 && - key_info->user_defined_key_parts == 1) - { - const KEY_PART_INFO* key_part=key_info->key_part; - if (key_part->null_bit != 0) // nullable (must be) + if (start_key != 0 && start_key->flag == HA_READ_AFTER_KEY && + end_key == 0 && key_info->user_defined_key_parts == 1) { + const KEY_PART_INFO *key_part = key_info->key_part; + if (key_part->null_bit != 0) // nullable (must be) { - const uchar* ptr= start_key->key; - if (ptr[0] != 0) // null (in "null < x") + const uchar *ptr = start_key->key; + if (ptr[0] != 0) // null (in "null < x") { DBUG_PRINT("info", ("Generating ISNOTNULL filter for nullable %s", key_part->field->field_name)); - if (filter.isnotnull(key_part->fieldnr-1) == -1) - DBUG_RETURN(1); + if (filter.isnotnull(key_part->fieldnr - 1) == -1) DBUG_RETURN(1); break; } } @@ -2348,51 +2040,38 @@ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanFilter &filter, Seen only when all key parts are present (but there is no reason to limit the code to this case). */ - if (start_key != 0 && - start_key->flag == HA_READ_KEY_EXACT && - end_key != 0 && - end_key->flag == HA_READ_AFTER_KEY && + if (start_key != 0 && start_key->flag == HA_READ_KEY_EXACT && + end_key != 0 && end_key->flag == HA_READ_AFTER_KEY && start_key->length == end_key->length && - memcmp(start_key->key, end_key->key, start_key->length) == 0) - { - const KEY_PART_INFO* key_part=key_info->key_part; - const uchar* ptr=start_key->key; - for (uint i=0; i < key_info->user_defined_key_parts; i++) - { - const Field* field=key_part->field; - if (key_part->null_bit) // nullable + memcmp(start_key->key, end_key->key, start_key->length) == 0) { + const KEY_PART_INFO *key_part = key_info->key_part; + const uchar *ptr = start_key->key; + for (uint i = 0; i < key_info->user_defined_key_parts; i++) { + const Field *field = key_part->field; + if (key_part->null_bit) // nullable { - if (ptr[0] != 0) // null + if (ptr[0] != 0) // null { DBUG_PRINT("info", ("Generating ISNULL filter for nullable %s", field->field_name)); - if (filter.isnull(key_part->fieldnr-1) == -1) - DBUG_RETURN(1); - } - else - { + if (filter.isnull(key_part->fieldnr - 1) == -1) DBUG_RETURN(1); + } else { DBUG_PRINT("info", ("Generating EQ filter for nullable %s", field->field_name)); - if (filter.cmp(NdbScanFilter::COND_EQ, - key_part->fieldnr-1, - ptr + 1, // skip null-indicator byte + if (filter.cmp(NdbScanFilter::COND_EQ, key_part->fieldnr - 1, + ptr + 1, // skip null-indicator byte field->pack_length()) == -1) DBUG_RETURN(1); } - } - else - { + } else { DBUG_PRINT("info", ("Generating EQ filter for non-nullable %s", field->field_name)); - if (filter.cmp(NdbScanFilter::COND_EQ, - key_part->fieldnr-1, - ptr, + if (filter.cmp(NdbScanFilter::COND_EQ, key_part->fieldnr - 1, ptr, field->pack_length()) == -1) DBUG_RETURN(1); } - ptr+=key_part->store_length; - if (ptr - start_key->key >= (ptrdiff_t)start_key->length) - { + ptr += key_part->store_length; + if (ptr - start_key->key >= (ptrdiff_t)start_key->length) { break; } key_part++; @@ -2403,8 +2082,7 @@ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanFilter &filter, DBUG_PRINT("info", ("Unknown hash index scan")); // Catch new cases when optimizer changes DBUG_ASSERT(false); - } - while (0); + } while (0); DBUG_RETURN(0); } @@ -2416,9 +2094,7 @@ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanFilter &filter, @param cond The condition to be evaluated by the handler */ -void -ha_ndbcluster_cond::set_condition(const Item *cond) -{ +void ha_ndbcluster_cond::set_condition(const Item *cond) { m_unpushed_cond = cond; } @@ -2428,58 +2104,45 @@ ha_ndbcluster_cond::set_condition(const Item *cond) @return true if the condition is evaluated to true. */ -bool -ha_ndbcluster_cond::eval_condition() const -{ - return const_cast(m_unpushed_cond)->val_int()==1; +bool ha_ndbcluster_cond::eval_condition() const { + return const_cast(m_unpushed_cond)->val_int() == 1; } - /** Add any columns referred by 'cond' to the read_set of the table. @param table The table to update the read_set for. @param cond The condition referring columns in 'table' */ -void -ha_ndbcluster_cond::add_read_set(TABLE *table, const Item *cond) -{ - if (cond != nullptr) - { +void ha_ndbcluster_cond::add_read_set(TABLE *table, const Item *cond) { + if (cond != nullptr) { Mark_field mf(table, MARK_COLUMNS_READ); const_cast(cond)->walk(&Item::mark_field_in_map, enum_walk::PREFIX, - (uchar *)&mf); + (uchar *)&mf); } } /* Interface layer between ha_ndbcluster and ha_ndbcluster_cond */ -void -ha_ndbcluster::generate_scan_filter(NdbInterpretedCode *code, - NdbScanOperation::ScanOptions *options) -{ +void ha_ndbcluster::generate_scan_filter( + NdbInterpretedCode *code, NdbScanOperation::ScanOptions *options) { DBUG_ENTER("generate_scan_filter"); - if (pushed_cond == nullptr) - { + if (pushed_cond == nullptr) { DBUG_PRINT("info", ("Empty stack")); DBUG_VOID_RETURN; } - if (m_cond.get_interpreter_code().getWordsUsed() > 0) - { + if (m_cond.get_interpreter_code().getWordsUsed() > 0) { /** * We had already generated the NdbInterpreterCode for the scan_filter. * Just use what we had. */ - if (options != nullptr) - { - options->interpretedCode= &m_cond.get_interpreter_code(); - options->optionsPresent|= NdbScanOperation::ScanOptions::SO_INTERPRETED; - } - else - { + if (options != nullptr) { + options->interpretedCode = &m_cond.get_interpreter_code(); + options->optionsPresent |= NdbScanOperation::ScanOptions::SO_INTERPRETED; + } else { code->copy(m_cond.get_interpreter_code()); } DBUG_VOID_RETURN; @@ -2487,45 +2150,36 @@ ha_ndbcluster::generate_scan_filter(NdbInterpretedCode *code, // Generate the scan_filter from previously 'serialized' condition code NdbScanFilter filter(code); - const int ret= m_cond.generate_scan_filter_from_cond(filter); - if (unlikely(ret != 0)) - { + const int ret = m_cond.generate_scan_filter_from_cond(filter); + if (unlikely(ret != 0)) { /** * Failed to generate a scan filter, fallback to let * ha_ndbcluster evaluate the condition. */ m_cond.set_condition(pushed_cond); - } - else if (options != nullptr) - { - options->interpretedCode= code; - options->optionsPresent|= NdbScanOperation::ScanOptions::SO_INTERPRETED; + } else if (options != nullptr) { + options->interpretedCode = code; + options->optionsPresent |= NdbScanOperation::ScanOptions::SO_INTERPRETED; } DBUG_VOID_RETURN; } -int ha_ndbcluster::generate_scan_filter_with_key(NdbInterpretedCode *code, - NdbScanOperation::ScanOptions *options, - const KEY *key_info, - const key_range *start_key, - const key_range *end_key) -{ +int ha_ndbcluster::generate_scan_filter_with_key( + NdbInterpretedCode *code, NdbScanOperation::ScanOptions *options, + const KEY *key_info, const key_range *start_key, const key_range *end_key) { DBUG_ENTER("generate_scan_filter_with_key"); NdbScanFilter filter(code); - if (filter.begin(NdbScanFilter::AND) == -1) - DBUG_RETURN(1); + if (filter.begin(NdbScanFilter::AND) == -1) DBUG_RETURN(1); // Generate a scanFilter from a prepared pushed conditions - if (pushed_cond != nullptr) - { + if (pushed_cond != nullptr) { /** * Note, that in this case we can not use the pre-generated scan_filter, * as it does not contain the code for the additional 'key'. */ const int ret = m_cond.generate_scan_filter_from_cond(filter); - if (unlikely(ret != 0)) - { + if (unlikely(ret != 0)) { /** * Failed to generate a scan filter, fallback to let * ha_ndbcluster evaluate the condition. @@ -2534,27 +2188,22 @@ int ha_ndbcluster::generate_scan_filter_with_key(NdbInterpretedCode *code, // Discard the failed scanFilter and prepare for 'key' filter.reset(); - if (filter.begin(NdbScanFilter::AND) == -1) - DBUG_RETURN(1); + if (filter.begin(NdbScanFilter::AND) == -1) DBUG_RETURN(1); } } // Generate a scanFilter from the key definition - if (key_info != nullptr) - { + if (key_info != nullptr) { const int ret = ha_ndbcluster_cond::generate_scan_filter_from_key( - filter, key_info, start_key, end_key); - if (unlikely(ret != 0)) - DBUG_RETURN(ret); + filter, key_info, start_key, end_key); + if (unlikely(ret != 0)) DBUG_RETURN(ret); } - if (filter.end() == -1) - DBUG_RETURN(1); + if (filter.end() == -1) DBUG_RETURN(1); - if (options != nullptr) - { - options->interpretedCode= code; - options->optionsPresent|= NdbScanOperation::ScanOptions::SO_INTERPRETED; + if (options != nullptr) { + options->interpretedCode = code; + options->optionsPresent |= NdbScanOperation::ScanOptions::SO_INTERPRETED; } DBUG_RETURN(0); diff --git a/storage/ndb/plugin/ha_ndbcluster_cond.h b/storage/ndb/plugin/ha_ndbcluster_cond.h index 22a6ab85b44a..3df9e7ec4e3e 100644 --- a/storage/ndb/plugin/ha_ndbcluster_cond.h +++ b/storage/ndb/plugin/ha_ndbcluster_cond.h @@ -38,51 +38,41 @@ struct key_range; struct TABLE; class Ndb_item; -class ha_ndbcluster_cond -{ -public: +class ha_ndbcluster_cond { + public: ha_ndbcluster_cond(); ~ha_ndbcluster_cond(); - const Item *cond_push(const Item *cond, - TABLE *table, const NdbDictionary::Table *ndb_table, - bool other_tbls_ok, - Item *&pushed_cond); + const Item *cond_push(const Item *cond, TABLE *table, + const NdbDictionary::Table *ndb_table, + bool other_tbls_ok, Item *&pushed_cond); void cond_clear(); - int generate_scan_filter_from_cond(NdbScanFilter& filter); + int generate_scan_filter_from_cond(NdbScanFilter &filter); - static - int generate_scan_filter_from_key(NdbScanFilter& filter, - const class KEY* key_info, - const key_range *start_key, - const key_range *end_key); + static int generate_scan_filter_from_key(NdbScanFilter &filter, + const class KEY *key_info, + const key_range *start_key, + const key_range *end_key); // Get a possibly pre-generated Interpreter code for the pushed condition - const NdbInterpretedCode& get_interpreter_code() - { + const NdbInterpretedCode &get_interpreter_code() { return m_scan_filter_code; } void set_condition(const Item *cond); - bool check_condition() const - { + bool check_condition() const { return (m_unpushed_cond == nullptr || eval_condition()); } static void add_read_set(TABLE *table, const Item *cond); - void add_read_set(TABLE *table) - { - add_read_set(table, m_unpushed_cond); - } + void add_read_set(TABLE *table) { add_read_set(table, m_unpushed_cond); } -private: + private: int build_scan_filter_predicate(List_iterator &cond, - NdbScanFilter* filter, - bool negated) const; + NdbScanFilter *filter, bool negated) const; int build_scan_filter_group(List_iterator &cond, - NdbScanFilter* filter, - bool negated) const; + NdbScanFilter *filter, bool negated) const; bool eval_condition() const; diff --git a/storage/ndb/plugin/ha_ndbcluster_connection.cc b/storage/ndb/plugin/ha_ndbcluster_connection.cc index b2bdbfe04adc..2d0760f1d25f 100644 --- a/storage/ndb/plugin/ha_ndbcluster_connection.cc +++ b/storage/ndb/plugin/ha_ndbcluster_connection.cc @@ -30,7 +30,7 @@ #include "mysql/plugin.h" #include "mysqld_error.h" #include "sql/auth/auth_acls.h" -#include "sql/mysqld.h" // server_id, connection_events_loop_aborted +#include "sql/mysqld.h" // server_id, connection_events_loop_aborted #include "sql/sql_class.h" #include "sql/sql_lex.h" #include "storage/ndb/include/kernel/ndb_limits.h" @@ -39,21 +39,20 @@ #include "storage/ndb/include/util/BaseString.hpp" #include "storage/ndb/include/util/Vector.hpp" #ifndef _WIN32 -#include // getservbyname +#include // getservbyname #endif #include "sql/table.h" #include "storage/ndb/plugin/ndb_log.h" #include "storage/ndb/plugin/ndb_sleep.h" -Ndb* g_ndb= NULL; -Ndb_cluster_connection* g_ndb_cluster_connection= NULL; -static Ndb_cluster_connection **g_pool= NULL; -static uint g_pool_alloc= 0; -static uint g_pool_pos= 0; +Ndb *g_ndb = NULL; +Ndb_cluster_connection *g_ndb_cluster_connection = NULL; +static Ndb_cluster_connection **g_pool = NULL; +static uint g_pool_alloc = 0; +static uint g_pool_pos = 0; static mysql_mutex_t g_pool_mutex; - /** @brief Parse the --ndb-cluster-connection-pool-nodeids=nodeid[,nodeidN] comma separated list of nodeids to use for the pool @@ -65,14 +64,9 @@ static mysql_mutex_t g_pool_mutex; @return true or false when option parsing failed. Error message describing the problem has been printed to error log. */ -static -bool parse_pool_nodeids(const char* opt_str, - uint pool_size, - uint force_nodeid, - Vector& nodeids) -{ - if (!opt_str) - { +static bool parse_pool_nodeids(const char *opt_str, uint pool_size, + uint force_nodeid, Vector &nodeids) { + if (!opt_str) { // The option was not specified. return true; } @@ -81,47 +75,44 @@ bool parse_pool_nodeids(const char* opt_str, Vector list(pool_size); tmp.split(list, ","); - for (unsigned i = 0; i MAX_NODES_ID) - { - ndb_log_error("Invalid nodeid %d in " - "--ndb-cluster-connection-pool-nodeids='%s'.", - nodeid, opt_str); + if (nodeid <= 0 || nodeid > MAX_NODES_ID) { + ndb_log_error( + "Invalid nodeid %d in " + "--ndb-cluster-connection-pool-nodeids='%s'.", + nodeid, opt_str); return false; } // Check that nodeid is unique(not already in the list) - for(unsigned j = 0; js_port); /* purecov: inspected */ - } - else if(env) + port = ntohs((u_short)serv_ptr->s_port); /* purecov: inspected */ + } else if (env) port = atoi(env); else port = MYSQL_PORT; @@ -232,17 +215,14 @@ static const char *get_processinfo_host() { #define URI_PATH_SOCKET MYSQL_UNIX_ADDR; #endif -static const char * -get_processinfo_path() -{ - const char * uri_path = mysqld_unix_port; - char * env; - if (!uri_path) - { +static const char *get_processinfo_path() { + const char *uri_path = mysqld_unix_port; + char *env; + if (!uri_path) { if ((env = getenv("MYSQL_UNIX_PORT"))) - uri_path= env; /* purecov: inspected */ + uri_path = env; /* purecov: inspected */ else - uri_path= URI_PATH_SOCKET; + uri_path = URI_PATH_SOCKET; } return uri_path; } @@ -256,31 +236,27 @@ get_processinfo_path() */ extern int global_flag_skip_waiting_for_clean_cache; -int -ndbcluster_connect(int (*connect_callback)(void), - ulong wait_connected, // Timeout in seconds - uint connection_pool_size, - const char* connection_pool_nodeids_str, - bool optimized_node_select, - const char* connect_string, - uint force_nodeid, - uint recv_thread_activation_threshold, - uint data_node_neighbour) -{ - const char mysqld_name[]= "mysqld"; +int ndbcluster_connect(int (*connect_callback)(void), + ulong wait_connected, // Timeout in seconds + uint connection_pool_size, + const char *connection_pool_nodeids_str, + bool optimized_node_select, const char *connect_string, + uint force_nodeid, uint recv_thread_activation_threshold, + uint data_node_neighbour) { + const char mysqld_name[] = "mysqld"; int res; DBUG_ENTER("ndbcluster_connect"); - DBUG_PRINT("enter", ("connect_string: %s, force_nodeid: %d", - connect_string, force_nodeid)); + DBUG_PRINT("enter", ("connect_string: %s, force_nodeid: %d", connect_string, + force_nodeid)); /* For Service URI in ndbinfo */ const int processinfo_port = get_processinfo_port(); - const char * processinfo_host = get_processinfo_host(); - const char * processinfo_path = processinfo_port ? "" : get_processinfo_path(); + const char *processinfo_host = get_processinfo_host(); + const char *processinfo_path = processinfo_port ? "" : get_processinfo_path(); char server_id_string[64]; - if(server_id > 0) + if (server_id > 0) snprintf(server_id_string, sizeof(server_id_string), "?server-id=%lu", - server_id); + server_id); else server_id_string[0] = '\0'; @@ -288,36 +264,31 @@ ndbcluster_connect(int (*connect_callback)(void), // comma separated list of nodeids to use for the pool Vector nodeids; if (!parse_pool_nodeids(connection_pool_nodeids_str, connection_pool_size, - force_nodeid, nodeids)) - { + force_nodeid, nodeids)) { // Error message already printed DBUG_RETURN(-1); } // Find specified nodeid for first connection and let it override // force_nodeid(if both has been specified they are equal). - if (nodeids.size()) - { + if (nodeids.size()) { assert(force_nodeid == 0 || force_nodeid == nodeids[0]); force_nodeid = nodeids[0]; ndb_log_info("using nodeid %u", force_nodeid); } - global_flag_skip_waiting_for_clean_cache= 1; + global_flag_skip_waiting_for_clean_cache = 1; g_ndb_cluster_connection = - new (std::nothrow) Ndb_cluster_connection(connect_string, - force_nodeid); - if (g_ndb_cluster_connection == nullptr) - { + new (std::nothrow) Ndb_cluster_connection(connect_string, force_nodeid); + if (g_ndb_cluster_connection == nullptr) { ndb_log_error("failed to allocate global ndb cluster connection"); DBUG_PRINT("error", ("Ndb_cluster_connection(%s)", connect_string)); DBUG_RETURN(-1); } { char buf[128]; - snprintf(buf, sizeof(buf), "%s --server-id=%lu", - mysqld_name, server_id); + snprintf(buf, sizeof(buf), "%s --server-id=%lu", mysqld_name, server_id); g_ndb_cluster_connection->set_name(buf); snprintf(buf, sizeof(buf), "%s%s", processinfo_path, server_id_string); g_ndb_cluster_connection->set_service_uri("mysql", processinfo_host, @@ -325,358 +296,266 @@ ndbcluster_connect(int (*connect_callback)(void), } g_ndb_cluster_connection->set_optimized_node_selection(optimized_node_select); g_ndb_cluster_connection->set_recv_thread_activation_threshold( - recv_thread_activation_threshold); + recv_thread_activation_threshold); g_ndb_cluster_connection->set_data_node_neighbour(data_node_neighbour); // Create a Ndb object to open the connection to NDB - g_ndb = - new (std::nothrow) Ndb(g_ndb_cluster_connection, - "sys"); - if (g_ndb == nullptr) - { + g_ndb = new (std::nothrow) Ndb(g_ndb_cluster_connection, "sys"); + if (g_ndb == nullptr) { ndb_log_error("failed to allocate global ndb object"); DBUG_PRINT("error", ("failed to create global ndb object")); DBUG_RETURN(-1); } - if (g_ndb->init() != 0) - { - DBUG_PRINT("error", ("%d message: %s", - g_ndb->getNdbError().code, + if (g_ndb->init() != 0) { + DBUG_PRINT("error", ("%d message: %s", g_ndb->getNdbError().code, g_ndb->getNdbError().message)); DBUG_RETURN(-1); } /* Connect to management server */ - const NDB_TICKS start= NdbTick_getCurrentTicks(); + const NDB_TICKS start = NdbTick_getCurrentTicks(); - while ((res= g_ndb_cluster_connection->connect(0,0,0)) == 1) - { + while ((res = g_ndb_cluster_connection->connect(0, 0, 0)) == 1) { const NDB_TICKS now = NdbTick_getCurrentTicks(); - if (NdbTick_Elapsed(start,now).seconds() > wait_connected) - break; + if (NdbTick_Elapsed(start, now).seconds() > wait_connected) break; ndb_retry_sleep(100); - if (connection_events_loop_aborted()) - DBUG_RETURN(-1); + if (connection_events_loop_aborted()) DBUG_RETURN(-1); } { - g_pool_alloc= connection_pool_size; - g_pool= (Ndb_cluster_connection**) - my_malloc(PSI_INSTRUMENT_ME, - g_pool_alloc * sizeof(Ndb_cluster_connection*), - MYF(MY_WME | MY_ZEROFILL)); - mysql_mutex_init(PSI_INSTRUMENT_ME, - &g_pool_mutex, - MY_MUTEX_INIT_FAST); - g_pool[0]= g_ndb_cluster_connection; - for (uint i= 1; i < g_pool_alloc; i++) - { + g_pool_alloc = connection_pool_size; + g_pool = (Ndb_cluster_connection **)my_malloc( + PSI_INSTRUMENT_ME, g_pool_alloc * sizeof(Ndb_cluster_connection *), + MYF(MY_WME | MY_ZEROFILL)); + mysql_mutex_init(PSI_INSTRUMENT_ME, &g_pool_mutex, MY_MUTEX_INIT_FAST); + g_pool[0] = g_ndb_cluster_connection; + for (uint i = 1; i < g_pool_alloc; i++) { // Find specified nodeid for this connection or use default zero uint nodeid = 0; - if (i < nodeids.size()) - { + if (i < nodeids.size()) { nodeid = nodeids[i]; ndb_log_info("connection[%u], using nodeid %u", i, nodeid); } - g_pool[i] = - new (std::nothrow) Ndb_cluster_connection(connect_string, - g_ndb_cluster_connection, - nodeid); - if (g_pool[i] == nullptr) - { + g_pool[i] = new (std::nothrow) Ndb_cluster_connection( + connect_string, g_ndb_cluster_connection, nodeid); + if (g_pool[i] == nullptr) { ndb_log_error("connection[%u], failed to allocate connect object", i); - DBUG_PRINT("error",("Ndb_cluster_connection[%u](%s)", - i, connect_string)); + DBUG_PRINT("error", + ("Ndb_cluster_connection[%u](%s)", i, connect_string)); DBUG_RETURN(-1); } { char buf[128]; snprintf(buf, sizeof(buf), "%s --server-id=%lu (connection %u)", - mysqld_name, server_id, i+1); + mysqld_name, server_id, i + 1); g_pool[i]->set_name(buf); - const char * uri_sep = server_id ? ";" : "?"; - snprintf(buf, sizeof(buf), "%s%s%sconnection=%u", - processinfo_path, server_id_string, uri_sep, i+1); - g_pool[i]->set_service_uri("mysql", processinfo_host, processinfo_port, buf); + const char *uri_sep = server_id ? ";" : "?"; + snprintf(buf, sizeof(buf), "%s%s%sconnection=%u", processinfo_path, + server_id_string, uri_sep, i + 1); + g_pool[i]->set_service_uri("mysql", processinfo_host, processinfo_port, + buf); } g_pool[i]->set_optimized_node_selection(optimized_node_select); - g_pool[i]->set_recv_thread_activation_threshold(recv_thread_activation_threshold); + g_pool[i]->set_recv_thread_activation_threshold( + recv_thread_activation_threshold); g_pool[i]->set_data_node_neighbour(data_node_neighbour); } } - if (res == 0) - { + if (res == 0) { connect_callback(); - for (uint i= 0; i < g_pool_alloc; i++) - { - int node_id= g_pool[i]->node_id(); - if (node_id == 0) - { + for (uint i = 0; i < g_pool_alloc; i++) { + int node_id = g_pool[i]->node_id(); + if (node_id == 0) { // not connected to mgmd yet, try again - g_pool[i]->connect(0,0,0); - if (g_pool[i]->node_id() == 0) - { + g_pool[i]->connect(0, 0, 0); + if (g_pool[i]->node_id() == 0) { ndb_log_info("connection[%u], starting connect thread", i); g_pool[i]->start_connect_thread(); continue; } - node_id= g_pool[i]->node_id(); + node_id = g_pool[i]->node_id(); } - DBUG_PRINT("info", - ("NDBCLUSTER storage engine (%u) at %s on port %d", i, - g_pool[i]->get_connected_host(), - g_pool[i]->get_connected_port())); + DBUG_PRINT("info", ("NDBCLUSTER storage engine (%u) at %s on port %d", i, + g_pool[i]->get_connected_host(), + g_pool[i]->get_connected_port())); Uint64 waited; - do - { - res= g_pool[i]->wait_until_ready(1, 1); + do { + res = g_pool[i]->wait_until_ready(1, 1); const NDB_TICKS now = NdbTick_getCurrentTicks(); - waited = NdbTick_Elapsed(start,now).seconds(); + waited = NdbTick_Elapsed(start, now).seconds(); } while (res != 0 && waited < wait_connected); - const char *msg= 0; - if (res == 0) - { - msg= "all storage nodes connected"; - } - else if (res > 0) - { - msg= "some storage nodes connected"; - } - else if (res < 0) - { - msg= "no storage nodes connected (timed out)"; + const char *msg = 0; + if (res == 0) { + msg = "all storage nodes connected"; + } else if (res > 0) { + msg = "some storage nodes connected"; + } else if (res < 0) { + msg = "no storage nodes connected (timed out)"; } ndb_log_info("connection[%u], NodeID: %d, %s", i, node_id, msg); } - } - else if (res == 1) - { - for (uint i= 0; i < g_pool_alloc; i++) - { - if (g_pool[i]-> - start_connect_thread(i == 0 ? connect_callback : NULL)) - { + } else if (res == 1) { + for (uint i = 0; i < g_pool_alloc; i++) { + if (g_pool[i]->start_connect_thread(i == 0 ? connect_callback : NULL)) { ndb_log_error("connection[%u], failed to start connect thread", i); - DBUG_PRINT("error", ("g_ndb_cluster_connection->start_connect_thread()")); + DBUG_PRINT("error", + ("g_ndb_cluster_connection->start_connect_thread()")); DBUG_RETURN(-1); } } #ifndef DBUG_OFF { char buf[1024]; - DBUG_PRINT("info", - ("NDBCLUSTER storage engine not started, " - "will connect using %s", - g_ndb_cluster_connection-> - get_connectstring(buf,sizeof(buf)))); + DBUG_PRINT("info", ("NDBCLUSTER storage engine not started, " + "will connect using %s", + g_ndb_cluster_connection->get_connectstring( + buf, sizeof(buf)))); } #endif - } - else - { + } else { DBUG_ASSERT(res == -1); DBUG_PRINT("error", ("permanent error")); - ndb_log_error("error (%u) %s", - g_ndb_cluster_connection->get_latest_error(), + ndb_log_error("error (%u) %s", g_ndb_cluster_connection->get_latest_error(), g_ndb_cluster_connection->get_latest_error_msg()); DBUG_RETURN(-1); } DBUG_RETURN(0); } -void ndbcluster_disconnect(void) -{ +void ndbcluster_disconnect(void) { DBUG_ENTER("ndbcluster_disconnect"); - if (g_ndb) - delete g_ndb; - g_ndb= NULL; + if (g_ndb) delete g_ndb; + g_ndb = NULL; { - if (g_pool) - { + if (g_pool) { /* first in pool is the main one, wait with release */ - for (uint i= 1; i < g_pool_alloc; i++) - { - if (g_pool[i]) - delete g_pool[i]; + for (uint i = 1; i < g_pool_alloc; i++) { + if (g_pool[i]) delete g_pool[i]; } my_free(g_pool); mysql_mutex_destroy(&g_pool_mutex); - g_pool= 0; + g_pool = 0; } - g_pool_alloc= 0; - g_pool_pos= 0; + g_pool_alloc = 0; + g_pool_pos = 0; } - if (g_ndb_cluster_connection) - delete g_ndb_cluster_connection; - g_ndb_cluster_connection= NULL; + if (g_ndb_cluster_connection) delete g_ndb_cluster_connection; + g_ndb_cluster_connection = NULL; DBUG_VOID_RETURN; } -Ndb_cluster_connection *ndb_get_cluster_connection() -{ +Ndb_cluster_connection *ndb_get_cluster_connection() { mysql_mutex_lock(&g_pool_mutex); - Ndb_cluster_connection *connection= g_pool[g_pool_pos]; + Ndb_cluster_connection *connection = g_pool[g_pool_pos]; g_pool_pos++; - if (g_pool_pos == g_pool_alloc) - g_pool_pos= 0; + if (g_pool_pos == g_pool_alloc) g_pool_pos = 0; mysql_mutex_unlock(&g_pool_mutex); return connection; } -ulonglong ndb_get_latest_trans_gci() -{ - ulonglong val= *g_ndb_cluster_connection->get_latest_trans_gci(); - for (uint i= 1; i < g_pool_alloc; i++) - { - ulonglong tmp= *g_pool[i]->get_latest_trans_gci(); - if (tmp > val) - val= tmp; +ulonglong ndb_get_latest_trans_gci() { + ulonglong val = *g_ndb_cluster_connection->get_latest_trans_gci(); + for (uint i = 1; i < g_pool_alloc; i++) { + ulonglong tmp = *g_pool[i]->get_latest_trans_gci(); + if (tmp > val) val = tmp; } return val; } -void ndb_set_latest_trans_gci(ulonglong val) -{ - for (uint i= 0; i < g_pool_alloc; i++) - { - *g_pool[i]->get_latest_trans_gci()= val; +void ndb_set_latest_trans_gci(ulonglong val) { + for (uint i = 0; i < g_pool_alloc; i++) { + *g_pool[i]->get_latest_trans_gci() = val; } } -int ndb_has_node_id(uint id) -{ - for (uint i= 0; i < g_pool_alloc; i++) - { - if (id == g_pool[i]->node_id()) - return 1; +int ndb_has_node_id(uint id) { + for (uint i = 0; i < g_pool_alloc; i++) { + if (id == g_pool[i]->node_id()) return 1; } return 0; } -int ndb_set_recv_thread_activation_threshold(Uint32 threshold) -{ - for (uint i= 0; i < g_pool_alloc; i++) - { +int ndb_set_recv_thread_activation_threshold(Uint32 threshold) { + for (uint i = 0; i < g_pool_alloc; i++) { g_pool[i]->set_recv_thread_activation_threshold(threshold); } return 0; } -int -ndb_set_recv_thread_cpu(Uint16 *cpuid_array, - Uint32 cpuid_array_size) -{ +int ndb_set_recv_thread_cpu(Uint16 *cpuid_array, Uint32 cpuid_array_size) { int ret_code = 0; Uint32 num_cpu_needed = g_pool_alloc; - if (cpuid_array_size == 0) - { - for (Uint32 i = 0; i < g_pool_alloc; i++) - { + if (cpuid_array_size == 0) { + for (Uint32 i = 0; i < g_pool_alloc; i++) { ret_code = g_pool[i]->unset_recv_thread_cpu(0); } return ret_code; } - if (cpuid_array_size < num_cpu_needed) - { + if (cpuid_array_size < num_cpu_needed) { /* Ignore cpu masks that is too short */ - ndb_log_info("Ignored receive thread CPU mask, mask too short," - " %u CPUs needed in mask, only %u CPUs provided", - num_cpu_needed, cpuid_array_size); + ndb_log_info( + "Ignored receive thread CPU mask, mask too short," + " %u CPUs needed in mask, only %u CPUs provided", + num_cpu_needed, cpuid_array_size); return 1; } - for (Uint32 i = 0; i < g_pool_alloc; i++) - { - ret_code = g_pool[i]->set_recv_thread_cpu(&cpuid_array[i], - (Uint32)1, - 0); + for (Uint32 i = 0; i < g_pool_alloc; i++) { + ret_code = g_pool[i]->set_recv_thread_cpu(&cpuid_array[i], (Uint32)1, 0); } return ret_code; } -void -ndb_set_data_node_neighbour(ulong data_node_neighbour) -{ - for (uint i= 0; i < g_pool_alloc; i++) +void ndb_set_data_node_neighbour(ulong data_node_neighbour) { + for (uint i = 0; i < g_pool_alloc; i++) g_pool[i]->set_data_node_neighbour(data_node_neighbour); } -void ndb_get_connection_stats(Uint64* statsArr) -{ - Uint64 connectionStats[ Ndb::NumClientStatistics ]; +void ndb_get_connection_stats(Uint64 *statsArr) { + Uint64 connectionStats[Ndb::NumClientStatistics]; memset(statsArr, 0, sizeof(connectionStats)); - - for (uint i=0; i < g_pool_alloc; i++) - { + + for (uint i = 0; i < g_pool_alloc; i++) { g_pool[i]->collect_client_stats(connectionStats, Ndb::NumClientStatistics); - - for (Uint32 s=0; s < Ndb::NumClientStatistics; s++) - statsArr[s]+= connectionStats[s]; + + for (Uint32 s = 0; s < Ndb::NumClientStatistics; s++) + statsArr[s] += connectionStats[s]; } } -static ST_FIELD_INFO ndb_transid_mysql_connection_map_fields_info[] = -{ - { - "mysql_connection_id", - MY_INT64_NUM_DECIMAL_DIGITS, - MYSQL_TYPE_LONGLONG, - 0, - MY_I_S_UNSIGNED, - "", - 0 - }, +static ST_FIELD_INFO ndb_transid_mysql_connection_map_fields_info[] = { + {"mysql_connection_id", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0, + MY_I_S_UNSIGNED, "", 0}, - { - "node_id", - MY_INT64_NUM_DECIMAL_DIGITS, - MYSQL_TYPE_LONG, - 0, - MY_I_S_UNSIGNED, - "", - 0 - }, - { - "ndb_transid", - MY_INT64_NUM_DECIMAL_DIGITS, - MYSQL_TYPE_LONGLONG, - 0, - MY_I_S_UNSIGNED, - "", - 0 - }, - - { 0, 0, MYSQL_TYPE_NULL, 0, 0, "", 0 } -}; - -static -int -ndb_transid_mysql_connection_map_fill_table(THD* thd, TABLE_LIST* tables, - Item*) -{ + {"node_id", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, + MY_I_S_UNSIGNED, "", 0}, + {"ndb_transid", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG, 0, + MY_I_S_UNSIGNED, "", 0}, + + {0, 0, MYSQL_TYPE_NULL, 0, 0, "", 0}}; + +static int ndb_transid_mysql_connection_map_fill_table(THD *thd, + TABLE_LIST *tables, + Item *) { DBUG_ENTER("ndb_transid_mysql_connection_map_fill_table"); const bool all = (check_global_access(thd, PROCESS_ACL) == 0); const ulonglong self = thd_get_thread_id(thd); - TABLE* table= tables->table; - for (uint i = 0; itable; + for (uint i = 0; i < g_pool_alloc; i++) { + if (g_pool[i]) { g_pool[i]->lock_ndb_objects(); - const Ndb * p = g_pool[i]->get_next_ndb_object(0); - while (p) - { + const Ndb *p = g_pool[i]->get_next_ndb_object(0); + while (p) { Uint64 connection_id = p->getCustomData64(); - if ((connection_id == self) || all) - { + if ((connection_id == self) || all) { table->field[0]->set_notnull(); table->field[0]->store(p->getCustomData64(), true); table->field[1]->set_notnull(); @@ -694,40 +573,33 @@ ndb_transid_mysql_connection_map_fill_table(THD* thd, TABLE_LIST* tables, DBUG_RETURN(0); } -static -int -ndb_transid_mysql_connection_map_init(void *p) -{ +static int ndb_transid_mysql_connection_map_init(void *p) { DBUG_ENTER("ndb_transid_mysql_connection_map_init"); - ST_SCHEMA_TABLE* schema = reinterpret_cast(p); + ST_SCHEMA_TABLE *schema = reinterpret_cast(p); schema->fields_info = ndb_transid_mysql_connection_map_fields_info; schema->fill_table = ndb_transid_mysql_connection_map_fill_table; DBUG_RETURN(0); } -static struct st_mysql_information_schema i_s_info = -{ - MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION -}; +static struct st_mysql_information_schema i_s_info = { + MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION}; /* information_schema table plugin providing a list of MySQL connection ID's and their corresponding NDB transaction ID */ -struct st_mysql_plugin ndb_transid_mysql_connection_map_table = -{ - MYSQL_INFORMATION_SCHEMA_PLUGIN, - &i_s_info, - "ndb_transid_mysql_connection_map", - "Oracle Corporation", - "Map between MySQL connection ID and NDB transaction ID", - PLUGIN_LICENSE_GPL, - ndb_transid_mysql_connection_map_init, - NULL, - NULL, - 0x0001, - NULL, - NULL, - NULL, - 0 -}; +struct st_mysql_plugin ndb_transid_mysql_connection_map_table = { + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &i_s_info, + "ndb_transid_mysql_connection_map", + "Oracle Corporation", + "Map between MySQL connection ID and NDB transaction ID", + PLUGIN_LICENSE_GPL, + ndb_transid_mysql_connection_map_init, + NULL, + NULL, + 0x0001, + NULL, + NULL, + NULL, + 0}; diff --git a/storage/ndb/plugin/ha_ndbcluster_connection.h b/storage/ndb/plugin/ha_ndbcluster_connection.h index 7ed51c475cee..ec2c4249e76a 100644 --- a/storage/ndb/plugin/ha_ndbcluster_connection.h +++ b/storage/ndb/plugin/ha_ndbcluster_connection.h @@ -27,14 +27,11 @@ class Ndb_cluster_connection; -int ndbcluster_connect(int (*connect_callback)(void), - ulong wait_connected, +int ndbcluster_connect(int (*connect_callback)(void), ulong wait_connected, uint connection_pool_size, - const char* connection_pool_nodeids_str, - bool optimized_node_select, - const char* connect_string, - uint force_nodeid, - uint recv_thread_activation_threshold, + const char *connection_pool_nodeids_str, + bool optimized_node_select, const char *connect_string, + uint force_nodeid, uint recv_thread_activation_threshold, uint data_node_neighbour); bool ndbcluster_is_connected(uint max_wait_sec); @@ -45,11 +42,9 @@ ulonglong ndb_get_latest_trans_gci(); void ndb_set_latest_trans_gci(ulonglong val); int ndb_has_node_id(uint id); int ndb_set_recv_thread_activation_threshold(Uint32 threshold); -int ndb_set_recv_thread_cpu(Uint16 *cpuid_array, - Uint32 cpuid_array_size); +int ndb_set_recv_thread_cpu(Uint16 *cpuid_array, Uint32 cpuid_array_size); void ndb_set_data_node_neighbour(ulong data_node_neighbour); -void ndb_get_connection_stats(Uint64* statsArr); +void ndb_get_connection_stats(Uint64 *statsArr); // The information_schema.ndb_transid_mysql_connection_map table plugin extern struct st_mysql_plugin ndb_transid_mysql_connection_map_table; - diff --git a/storage/ndb/plugin/ha_ndbcluster_push.cc b/storage/ndb/plugin/ha_ndbcluster_push.cc index 4f099301fa46..744e6d4ea9ff 100644 --- a/storage/ndb/plugin/ha_ndbcluster_push.cc +++ b/storage/ndb/plugin/ha_ndbcluster_push.cc @@ -199,12 +199,9 @@ bool ndb_pushed_join::match_definition(int type, // NdbQueryOperationDef::Type, * Determine if a specific column type is represented in a format which is * sensitive to the endian format of the underlying platform. */ -static bool -is_endian_sensible_type(const Field *field) -{ +static bool is_endian_sensible_type(const Field *field) { const enum_field_types type = field->real_type(); - switch(type) - { + switch (type) { // Most numerics are endian sensible, note the int24 though. // Note: Enum dont have its own type, represented as an int. case MYSQL_TYPE_SHORT: @@ -272,21 +269,20 @@ NdbQuery *ndb_pushed_join::make_query_instance( for (uint i = 0; i < outer_fields; i++) { Field *field = m_referred_fields[i]; DBUG_ASSERT(!field->is_real_null()); // Checked by ::check_if_pushable() - uchar* raw = field->ptr; + uchar *raw = field->ptr; #ifdef WORDS_BIGENDIAN - if (field->table->s->db_low_byte_first && is_endian_sensible_type(field)) - { + if (field->table->s->db_low_byte_first && + is_endian_sensible_type(field)) { const uint32 field_length = field->pack_length(); - raw = static_cast(my_alloca(field_length)); + raw = static_cast(my_alloca(field_length)); // Byte order is swapped to get the correct endian format. - const uchar *last = field->ptr+field_length; - for (uint pos = 0; pos < field_length; pos++) - raw[pos] = *(--last); + const uchar *last = field->ptr + field_length; + for (uint pos = 0; pos < field_length; pos++) raw[pos] = *(--last); } #else - //Little endian platforms are expected to be only 'low_byte_first' + // Little endian platforms are expected to be only 'low_byte_first' DBUG_ASSERT(field->table->s->db_low_byte_first); #endif diff --git a/storage/ndb/plugin/ha_ndbcluster_push.h b/storage/ndb/plugin/ha_ndbcluster_push.h index b58dc21a125f..6451bed65983 100644 --- a/storage/ndb/plugin/ha_ndbcluster_push.h +++ b/storage/ndb/plugin/ha_ndbcluster_push.h @@ -34,156 +34,135 @@ class NdbQueryOperationDef; class ndb_pushed_builder_ctx; struct NdbError; -namespace AQP{ - class Join_plan; - class Table_access; -} +namespace AQP { +class Join_plan; +class Table_access; +} // namespace AQP -void ndbcluster_build_key_map(const NdbDictionary::Table* table, - const NDB_INDEX_DATA& index, - const KEY *key_def, - uint ix_map[]); +void ndbcluster_build_key_map(const NdbDictionary::Table *table, + const NDB_INDEX_DATA &index, const KEY *key_def, + uint ix_map[]); - -/** - * This type is used in conjunction with AQP::Join_plan and represents a set - * of the table access operations of the join plan. +/** + * This type is used in conjunction with AQP::Join_plan and represents a set + * of the table access operations of the join plan. * Had to subclass Bitmap as the default Bitmap<64> c'tor didn't initialize its * map. */ typedef Bitmap<(MAX_TABLES > 64 ? MAX_TABLES : 64)> table_bitmap; -class ndb_table_access_map : public table_bitmap -{ -public: - explicit ndb_table_access_map() - : table_bitmap(0) - {} - explicit ndb_table_access_map(uint table_no) - : table_bitmap(0) - { set_bit(table_no); +class ndb_table_access_map : public table_bitmap { + public: + explicit ndb_table_access_map() : table_bitmap(0) {} + explicit ndb_table_access_map(uint table_no) : table_bitmap(0) { + set_bit(table_no); } - void add(const ndb_table_access_map& table_map) - { // Require const_cast as signature of class Bitmap::merge is not const correct - merge(const_cast(table_map)); - } - void add(uint table_no) - { - set_bit(table_no); + void add(const ndb_table_access_map + &table_map) { // Require const_cast as signature of class + // Bitmap::merge is not const correct + merge(const_cast(table_map)); } + void add(uint table_no) { set_bit(table_no); } - bool contain(const ndb_table_access_map& table_map) const - { + bool contain(const ndb_table_access_map &table_map) const { return table_map.is_subset(*this); } - bool contain(uint table_no) const - { - return is_set(table_no); - } - - uint first_table(uint start= 0) const; - uint last_table(uint start= MAX_TABLES) const; + bool contain(uint table_no) const { return is_set(table_no); } -}; // class ndb_table_access_map + uint first_table(uint start = 0) const; + uint last_table(uint start = MAX_TABLES) const; +}; // class ndb_table_access_map /** This class represents a prepared pushed (N-way) join operation. * * It might be instantiated multiple times whenever the query, * or this subpart of the query, is being (re-)executed by - * ::createQuery() or it's wrapper method + * ::createQuery() or it's wrapper method * ha_ndbcluster::create_pushed_join(). */ -class ndb_pushed_join -{ -public: - explicit ndb_pushed_join(const ndb_pushed_builder_ctx& builder_ctx, - const NdbQueryDef* query_def); - - ~ndb_pushed_join(); +class ndb_pushed_join { + public: + explicit ndb_pushed_join(const ndb_pushed_builder_ctx &builder_ctx, + const NdbQueryDef *query_def); + + ~ndb_pushed_join(); /** * Check that this prepared pushed query matches the type * of operation specified by the arguments. */ - bool match_definition(int type, //NdbQueryOperationDef::Type, - const NDB_INDEX_DATA* idx) const; + bool match_definition(int type, // NdbQueryOperationDef::Type, + const NDB_INDEX_DATA *idx) const; /** Create an executable instance of this defined query. */ - NdbQuery* make_query_instance( - NdbTransaction* trans, - const NdbQueryParamValue* keyFieldParams, - uint paramCnt) const; + NdbQuery *make_query_instance(NdbTransaction *trans, + const NdbQueryParamValue *keyFieldParams, + uint paramCnt) const; /** Get the number of pushed table access operations.*/ - uint get_operation_count() const - { return m_operation_count; } + uint get_operation_count() const { return m_operation_count; } /** - * In a pushed join, fields in lookup keys and scan bounds may refer to + * In a pushed join, fields in lookup keys and scan bounds may refer to * result fields of table access operation that execute prior to the pushed * join. This method returns the number of such references. */ - uint get_field_referrences_count() const - { return m_field_count; } + uint get_field_referrences_count() const { return m_field_count; } - const NdbQueryDef& get_query_def() const - { return *m_query_def; } + const NdbQueryDef &get_query_def() const { return *m_query_def; } /** Get the table that is accessed by the i'th table access operation.*/ - TABLE* get_table(uint i) const - { + TABLE *get_table(uint i) const { DBUG_ASSERT(i < m_operation_count); return m_tables[i]; } - /** + /** * This is the maximal number of fields in the key of any pushed table * access operation. */ - static const uint MAX_KEY_PART= MAX_KEY; + static const uint MAX_KEY_PART = MAX_KEY; /** - * In a pushed join, fields in lookup keys and scan bounds may refer to + * In a pushed join, fields in lookup keys and scan bounds may refer to * result fields of table access operation that execute prior to the pushed - * join. This constant specifies the maximal number of such references for + * join. This constant specifies the maximal number of such references for * a query. */ - static const uint MAX_REFERRED_FIELDS= 16; + static const uint MAX_REFERRED_FIELDS = 16; /** - * For each table access operation in a pushed join, this is the maximal + * For each table access operation in a pushed join, this is the maximal * number of key fields that may refer to the fields of the parent operation. */ - static const uint MAX_LINKED_KEYS= MAX_KEY; - /** - * This is the maximal number of table access operations there can be in a + static const uint MAX_LINKED_KEYS = MAX_KEY; + /** + * This is the maximal number of table access operations there can be in a * single pushed join. */ - static const uint MAX_PUSHED_OPERATIONS= MAX_TABLES; + static const uint MAX_PUSHED_OPERATIONS = MAX_TABLES; -private: - const NdbQueryDef* const m_query_def; // Definition of pushed join query + private: + const NdbQueryDef *const m_query_def; // Definition of pushed join query /** This is the number of table access operations in the pushed join.*/ uint m_operation_count; /** This is the tables that are accessed by the pushed join.*/ - TABLE* m_tables[MAX_PUSHED_OPERATIONS]; + TABLE *m_tables[MAX_PUSHED_OPERATIONS]; /** - * This is the number of referred fields of table access operation that + * This is the number of referred fields of table access operation that * execute prior to the pushed join. */ const uint m_field_count; /** - * These are the referred fields of table access operation that execute + * These are the referred fields of table access operation that execute * prior to the pushed join. */ - Field* m_referred_fields[MAX_REFERRED_FIELDS]; -}; // class ndb_pushed_join - - + Field *m_referred_fields[MAX_REFERRED_FIELDS]; +}; // class ndb_pushed_join /** * Contains the context and helper methods used during ::make_pushed_join(). @@ -191,16 +170,15 @@ class ndb_pushed_join * Interacts with the AQP which provides interface to the query prepared by * the mysqld optimizer. * - * Execution plans built for pushed joins are stored inside this builder context. + * Execution plans built for pushed joins are stored inside this builder + * context. */ -class ndb_pushed_builder_ctx -{ +class ndb_pushed_builder_ctx { friend ndb_pushed_join::ndb_pushed_join( - const ndb_pushed_builder_ctx& builder_ctx, - const NdbQueryDef* query_def); + const ndb_pushed_builder_ctx &builder_ctx, const NdbQueryDef *query_def); -public: - ndb_pushed_builder_ctx(const AQP::Join_plan& plan); + public: + ndb_pushed_builder_ctx(const AQP::Join_plan &plan); ~ndb_pushed_builder_ctx(); /** @@ -210,48 +188,43 @@ class ndb_pushed_builder_ctx * > 0: Returned value is the error code. * < 0: There is a pending NdbError to be retrieved with getNdbError() */ - int make_pushed_join(const AQP::Table_access* join_root, - const ndb_pushed_join* &pushed_join); + int make_pushed_join(const AQP::Table_access *join_root, + const ndb_pushed_join *&pushed_join); - const NdbError& getNdbError() const; + const NdbError &getNdbError() const; -private: + private: /** * Collect all tables which may be pushed together with 'root'. * Returns 'true' if anything is pushable. */ - bool is_pushable_with_root( - const AQP::Table_access* root); + bool is_pushable_with_root(const AQP::Table_access *root); - bool is_pushable_as_child( - const AQP::Table_access* table); + bool is_pushable_as_child(const AQP::Table_access *table); - bool is_const_item_pushable( - const Item* key_item, - const KEY_PART_INFO* key_part); + bool is_const_item_pushable(const Item *key_item, + const KEY_PART_INFO *key_part); - bool is_field_item_pushable( - const AQP::Table_access* table, - const Item* key_item, - const KEY_PART_INFO* key_part, - ndb_table_access_map& parents); + bool is_field_item_pushable(const AQP::Table_access *table, + const Item *key_item, + const KEY_PART_INFO *key_part, + ndb_table_access_map &parents); int optimize_query_plan(); int build_query(); - void collect_key_refs( - const AQP::Table_access* table, - const Item* key_refs[]) const; + void collect_key_refs(const AQP::Table_access *table, + const Item *key_refs[]) const; - int build_key(const AQP::Table_access* table, - const NdbQueryOperand* op_key[]); + int build_key(const AQP::Table_access *table, + const NdbQueryOperand *op_key[]); - uint get_table_no(const Item* key_item) const; + uint get_table_no(const Item *key_item) const; -private: - const AQP::Join_plan& m_plan; - const AQP::Table_access* m_join_root; + private: + const AQP::Join_plan &m_plan; + const AQP::Table_access *m_join_root; // Scope of tables covered by this pushed join ndb_table_access_map m_join_scope; @@ -264,40 +237,37 @@ class ndb_pushed_builder_ctx uint m_internal_op_count; uint m_fld_refs; - Field* m_referred_fields[ndb_pushed_join::MAX_REFERRED_FIELDS]; + Field *m_referred_fields[ndb_pushed_join::MAX_REFERRED_FIELDS]; // Handle to the NdbQuery factory. // Possibly reused if multiple NdbQuery's are pushed. - NdbQueryBuilder* m_builder; + NdbQueryBuilder *m_builder; - enum pushability - { - PUSHABLE_AS_PARENT= 0x01, - PUSHABLE_AS_CHILD= 0x02 + enum pushability { + PUSHABLE_AS_PARENT = 0x01, + PUSHABLE_AS_CHILD = 0x02 } enum_pushability; - struct pushed_tables - { - pushed_tables() : - m_maybe_pushable(0), - m_common_parents(), - m_extend_parents(), - m_depend_parents(), - m_parent(MAX_TABLES), - m_ancestors(), - m_fanout(1.0), - m_child_fanout(1.0), - m_op(NULL) - {} + struct pushed_tables { + pushed_tables() + : m_maybe_pushable(0), + m_common_parents(), + m_extend_parents(), + m_depend_parents(), + m_parent(MAX_TABLES), + m_ancestors(), + m_fanout(1.0), + m_child_fanout(1.0), + m_op(NULL) {} int m_maybe_pushable; // OR'ed bits from 'enum_pushability' /** - * We maintain two sets of parent candidates for each table: + * We maintain two sets of parent candidates for each table: * - 'common' are those parents for which ::collect_key_refs() * will find key_refs[] (possibly through the EQ-sets) such that all * linkedValues() refer fields from the same parent. - * - 'extended' are those parents refered from some of the + * - 'extended' are those parents refered from some of the * key_refs[], and having the rest of the key_refs[] available as * 'grandparent refs'. */ @@ -309,14 +279,14 @@ class ndb_pushed_builder_ctx * due to dependencies on these parents tables. * * NOTE1: When the 'm_parent' has been choosen by - * ::optimize_query_plan(), any remaining grandparent - * dependencies has to be added the 'depend_parents' + * ::optimize_query_plan(), any remaining grandparent + * dependencies has to be added the 'depend_parents' * of the choosen parents such that it is taken into account * when calculating the ancestor tables. * * NOTE2: These 'depend_parents' place restrictions on which of the * 'common', 'extend' parents above we actually may use: - * As all 'depend_parents' must be joined as (grand)parents + * As all 'depend_parents' must be joined as (grand)parents * prior to one of the selected common/extend parents, only * parents >= the last 'depend_parents' are the real candidates. * @@ -347,18 +317,15 @@ class ndb_pushed_builder_ctx */ double m_child_fanout; - const NdbQueryOperationDef* m_op; + const NdbQueryOperationDef *m_op; } m_tables[MAX_TABLES]; /** * There are two different table enumerations used: */ - struct table_remap - { + struct table_remap { Uint16 to_external; // m_remap[] is indexed with internal table_no Uint16 to_internal; // m_remap[] is indexed with external tablenr } m_remap[MAX_TABLES]; -}; // class ndb_pushed_builder_ctx - - +}; // class ndb_pushed_builder_ctx diff --git a/storage/ndb/plugin/ha_ndbinfo.cc b/storage/ndb/plugin/ha_ndbinfo.cc index dfdfe1b8d6ff..dbcba6555e36 100644 --- a/storage/ndb/plugin/ha_ndbinfo.cc +++ b/storage/ndb/plugin/ha_ndbinfo.cc @@ -29,7 +29,7 @@ #include "my_compiler.h" #include "my_dbug.h" #include "sql/current_thd.h" -#include "sql/derror.h" // ER_THD +#include "sql/derror.h" // ER_THD #include "sql/field.h" #include "sql/sql_class.h" #include "sql/sql_table.h" // build_table_filename @@ -40,86 +40,77 @@ #include "storage/ndb/src/ndbapi/NdbInfo.hpp" static MYSQL_THDVAR_UINT( - max_rows, /* name */ - PLUGIN_VAR_RQCMDARG, - "Specify max number of rows to fetch per roundtrip to cluster", - NULL, /* check func. */ - NULL, /* update func. */ - 10, /* default */ - 1, /* min */ - 256, /* max */ - 0 /* block */ + max_rows, /* name */ + PLUGIN_VAR_RQCMDARG, + "Specify max number of rows to fetch per roundtrip to cluster", + NULL, /* check func. */ + NULL, /* update func. */ + 10, /* default */ + 1, /* min */ + 256, /* max */ + 0 /* block */ ); static MYSQL_THDVAR_UINT( - max_bytes, /* name */ - PLUGIN_VAR_RQCMDARG, - "Specify approx. max number of bytes to fetch per roundtrip to cluster", - NULL, /* check func. */ - NULL, /* update func. */ - 0, /* default */ - 0, /* min */ - 65535, /* max */ - 0 /* block */ + max_bytes, /* name */ + PLUGIN_VAR_RQCMDARG, + "Specify approx. max number of bytes to fetch per roundtrip to cluster", + NULL, /* check func. */ + NULL, /* update func. */ + 0, /* default */ + 0, /* min */ + 65535, /* max */ + 0 /* block */ ); -static MYSQL_THDVAR_BOOL( - show_hidden, /* name */ - PLUGIN_VAR_RQCMDARG, - "Control if tables should be visible or not", - NULL, /* check func. */ - NULL, /* update func. */ - false /* default */ +static MYSQL_THDVAR_BOOL(show_hidden, /* name */ + PLUGIN_VAR_RQCMDARG, + "Control if tables should be visible or not", + NULL, /* check func. */ + NULL, /* update func. */ + false /* default */ ); -static char* opt_ndbinfo_dbname = const_cast("ndbinfo"); -static MYSQL_SYSVAR_STR( - database, /* name */ - opt_ndbinfo_dbname, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Name of the database used by ndbinfo", - NULL, /* check func. */ - NULL, /* update func. */ - NULL /* default */ +static char *opt_ndbinfo_dbname = const_cast("ndbinfo"); +static MYSQL_SYSVAR_STR(database, /* name */ + opt_ndbinfo_dbname, /* var */ + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Name of the database used by ndbinfo", + NULL, /* check func. */ + NULL, /* update func. */ + NULL /* default */ ); -static char* opt_ndbinfo_table_prefix = const_cast("ndb$"); -static MYSQL_SYSVAR_STR( - table_prefix, /* name */ - opt_ndbinfo_table_prefix, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Prefix to use for all virtual tables loaded from NDB", - NULL, /* check func. */ - NULL, /* update func. */ - NULL /* default */ +static char *opt_ndbinfo_table_prefix = const_cast("ndb$"); +static MYSQL_SYSVAR_STR(table_prefix, /* name */ + opt_ndbinfo_table_prefix, /* var */ + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Prefix to use for all virtual tables loaded from NDB", + NULL, /* check func. */ + NULL, /* update func. */ + NULL /* default */ ); static Uint32 opt_ndbinfo_version = NDB_VERSION_D; -static MYSQL_SYSVAR_UINT( - version, /* name */ - opt_ndbinfo_version, /* var */ - PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOPERSIST, - "Compile version for ndbinfo", - NULL, /* check func. */ - NULL, /* update func. */ - 0, /* default */ - 0, /* min */ - 0, /* max */ - 0 /* block */ +static MYSQL_SYSVAR_UINT(version, /* name */ + opt_ndbinfo_version, /* var */ + PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY | + PLUGIN_VAR_NOPERSIST, + "Compile version for ndbinfo", NULL, /* check func. */ + NULL, /* update func. */ + 0, /* default */ + 0, /* min */ + 0, /* max */ + 0 /* block */ ); static bool opt_ndbinfo_offline; -static -void -offline_update(THD*, SYS_VAR*, void*, const void* save) -{ +static void offline_update(THD *, SYS_VAR *, void *, const void *save) { DBUG_ENTER("offline_update"); - const bool new_offline = - (*(static_cast(save)) != 0); - if (new_offline == opt_ndbinfo_offline) - { + const bool new_offline = (*(static_cast(save)) != 0); + if (new_offline == opt_ndbinfo_offline) { // No change DBUG_VOID_RETURN; } @@ -134,47 +125,40 @@ offline_update(THD*, SYS_VAR*, void*, const void* save) DBUG_VOID_RETURN; } -static MYSQL_SYSVAR_BOOL( - offline, /* name */ - opt_ndbinfo_offline, /* var */ - PLUGIN_VAR_NOCMDOPT, - "Set ndbinfo in offline mode, tables and views can " - "be opened even if they don't exist or have different " - "definition in NDB. No rows will be returned.", - NULL, /* check func. */ - offline_update, /* update func. */ - 0 /* default */ +static MYSQL_SYSVAR_BOOL(offline, /* name */ + opt_ndbinfo_offline, /* var */ + PLUGIN_VAR_NOCMDOPT, + "Set ndbinfo in offline mode, tables and views can " + "be opened even if they don't exist or have different " + "definition in NDB. No rows will be returned.", + NULL, /* check func. */ + offline_update, /* update func. */ + 0 /* default */ ); +static NdbInfo *g_ndbinfo; -static NdbInfo* g_ndbinfo; - -extern Ndb_cluster_connection* g_ndb_cluster_connection; +extern Ndb_cluster_connection *g_ndb_cluster_connection; -static bool -ndbcluster_is_disabled(void) -{ +static bool ndbcluster_is_disabled(void) { /* ndbinfo uses the same connection as ndbcluster to avoid using up another nodeid, this also means that if ndbcluster is not enabled, ndbinfo won't start */ - if (g_ndb_cluster_connection) - return false; + if (g_ndb_cluster_connection) return false; assert(g_ndbinfo == NULL); return true; } -static handler* -create_handler(handlerton *hton, TABLE_SHARE *table, bool, MEM_ROOT *mem_root) -{ +static handler *create_handler(handlerton *hton, TABLE_SHARE *table, bool, + MEM_ROOT *mem_root) { return new (mem_root) ha_ndbinfo(hton, table); } -struct ha_ndbinfo_impl -{ - const NdbInfo::Table* m_table; - NdbInfoScanOperation* m_scan_op; +struct ha_ndbinfo_impl { + const NdbInfo::Table *m_table; + NdbInfoScanOperation *m_scan_op; Vector m_columns; bool m_first_use; @@ -182,48 +166,30 @@ struct ha_ndbinfo_impl // can only be reset by closing the table bool m_offline; - ha_ndbinfo_impl() : - m_table(NULL), - m_scan_op(NULL), - m_first_use(true), - m_offline(false) - { - } + ha_ndbinfo_impl() + : m_table(NULL), m_scan_op(NULL), m_first_use(true), m_offline(false) {} }; ha_ndbinfo::ha_ndbinfo(handlerton *hton, TABLE_SHARE *table_arg) -: handler(hton, table_arg), m_impl(*new ha_ndbinfo_impl) -{ -} + : handler(hton, table_arg), m_impl(*new ha_ndbinfo_impl) {} -ha_ndbinfo::~ha_ndbinfo() -{ - delete &m_impl; -} +ha_ndbinfo::~ha_ndbinfo() { delete &m_impl; } -enum ndbinfo_error_codes { - ERR_INCOMPAT_TABLE_DEF = 40001 -}; +enum ndbinfo_error_codes { ERR_INCOMPAT_TABLE_DEF = 40001 }; -static -struct error_message { +static struct error_message { int error; - const char* message; + const char *message; } error_messages[] = { - { ERR_INCOMPAT_TABLE_DEF, "Incompatible table definitions" }, - { HA_ERR_NO_CONNECTION, "Connection to NDB failed" }, + {ERR_INCOMPAT_TABLE_DEF, "Incompatible table definitions"}, + {HA_ERR_NO_CONNECTION, "Connection to NDB failed"}, - { 0, 0 } -}; + {0, 0}}; -static -const char* find_error_message(int error) -{ - struct error_message* err = error_messages; - while (err->error && err->message) - { - if (err->error == error) - { +static const char *find_error_message(int error) { + struct error_message *err = error_messages; + while (err->error && err->message) { + if (err->error == error) { assert(err->message); return err->message; } @@ -232,21 +198,19 @@ const char* find_error_message(int error) return NULL; } -static int err2mysql(int error) -{ +static int err2mysql(int error) { DBUG_ENTER("err2mysql"); DBUG_PRINT("enter", ("error: %d", error)); assert(error != 0); - switch(error) - { - case NdbInfo::ERR_ClusterFailure: - DBUG_RETURN(HA_ERR_NO_CONNECTION); - break; - case NdbInfo::ERR_OutOfMemory: - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - break; - default: - break; + switch (error) { + case NdbInfo::ERR_ClusterFailure: + DBUG_RETURN(HA_ERR_NO_CONNECTION); + break; + case NdbInfo::ERR_OutOfMemory: + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + break; + default: + break; } { char errbuf[MYSQL_ERRMSG_SIZE]; @@ -257,51 +221,45 @@ static int err2mysql(int error) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } -bool ha_ndbinfo::get_error_message(int error, String *buf) -{ +bool ha_ndbinfo::get_error_message(int error, String *buf) { DBUG_ENTER("ha_ndbinfo::get_error_message"); DBUG_PRINT("enter", ("error: %d", error)); - const char* message = find_error_message(error); - if (!message) - DBUG_RETURN(false); + const char *message = find_error_message(error); + if (!message) DBUG_RETURN(false); buf->set(message, (uint32)strlen(message), &my_charset_bin); DBUG_PRINT("exit", ("message: %s", buf->ptr())); DBUG_RETURN(false); } -static void -generate_sql(const NdbInfo::Table* ndb_tab, BaseString& sql) -{ - sql.appfmt("'CREATE TABLE `%s`.`%s%s` (", - opt_ndbinfo_dbname, opt_ndbinfo_table_prefix, ndb_tab->getName()); +static void generate_sql(const NdbInfo::Table *ndb_tab, BaseString &sql) { + sql.appfmt("'CREATE TABLE `%s`.`%s%s` (", opt_ndbinfo_dbname, + opt_ndbinfo_table_prefix, ndb_tab->getName()); - const char* separator = ""; - for (unsigned i = 0; i < ndb_tab->columns(); i++) - { - const NdbInfo::Column* col = ndb_tab->getColumn(i); + const char *separator = ""; + for (unsigned i = 0; i < ndb_tab->columns(); i++) { + const NdbInfo::Column *col = ndb_tab->getColumn(i); sql.appfmt("%s", separator); separator = ", "; sql.appfmt("`%s` ", col->m_name.c_str()); - switch(col->m_type) - { - case NdbInfo::Column::Number: - sql.appfmt("INT UNSIGNED"); - break; - case NdbInfo::Column::Number64: - sql.appfmt("BIGINT UNSIGNED"); - break; - case NdbInfo::Column::String: - sql.appfmt("VARCHAR(512)"); - break; - default: - sql.appfmt("UNKNOWN"); - assert(false); - break; + switch (col->m_type) { + case NdbInfo::Column::Number: + sql.appfmt("INT UNSIGNED"); + break; + case NdbInfo::Column::Number64: + sql.appfmt("BIGINT UNSIGNED"); + break; + case NdbInfo::Column::String: + sql.appfmt("VARCHAR(512)"); + break; + default: + sql.appfmt("UNKNOWN"); + assert(false); + break; } } sql.appfmt(") ENGINE=NDBINFO'"); @@ -312,87 +270,71 @@ generate_sql(const NdbInfo::Table* ndb_tab, BaseString& sql) proper SQL so the user can regenerate the table definition */ -static void -warn_incompatible(const NdbInfo::Table* ndb_tab, bool fatal, - const char* format, ...) - MY_ATTRIBUTE((format(printf, 3, 4))); +static void warn_incompatible(const NdbInfo::Table *ndb_tab, bool fatal, + const char *format, ...) + MY_ATTRIBUTE((format(printf, 3, 4))); -static void -warn_incompatible(const NdbInfo::Table* ndb_tab, bool fatal, - const char* format, ...) -{ +static void warn_incompatible(const NdbInfo::Table *ndb_tab, bool fatal, + const char *format, ...) { BaseString msg; DBUG_ENTER("warn_incompatible"); - DBUG_PRINT("enter",("table_name: %s, fatal: %d", ndb_tab->getName(), fatal)); + DBUG_PRINT("enter", ("table_name: %s, fatal: %d", ndb_tab->getName(), fatal)); DBUG_ASSERT(format != NULL); va_list args; char explanation[128]; - va_start(args,format); + va_start(args, format); vsnprintf(explanation, sizeof(explanation), format, args); va_end(args); - msg.assfmt("Table '%s%s' is defined differently in NDB, %s. The " - "SQL to regenerate is: ", - opt_ndbinfo_table_prefix, ndb_tab->getName(), explanation); + msg.assfmt( + "Table '%s%s' is defined differently in NDB, %s. The " + "SQL to regenerate is: ", + opt_ndbinfo_table_prefix, ndb_tab->getName(), explanation); generate_sql(ndb_tab, msg); const Sql_condition::enum_severity_level level = - (fatal ? Sql_condition::SL_WARNING : Sql_condition::SL_NOTE); + (fatal ? Sql_condition::SL_WARNING : Sql_condition::SL_NOTE); push_warning(current_thd, level, ERR_INCOMPAT_TABLE_DEF, msg.c_str()); DBUG_VOID_RETURN; } -int ha_ndbinfo::create(const char*, TABLE*, HA_CREATE_INFO*, dd::Table*) -{ +int ha_ndbinfo::create(const char *, TABLE *, HA_CREATE_INFO *, dd::Table *) { DBUG_ENTER("ha_ndbinfo::create"); DBUG_RETURN(0); } -bool ha_ndbinfo::is_open(void) const -{ - return m_impl.m_table != NULL; -} +bool ha_ndbinfo::is_open(void) const { return m_impl.m_table != NULL; } -bool ha_ndbinfo::is_offline(void) const -{ - return m_impl.m_offline; -} +bool ha_ndbinfo::is_offline(void) const { return m_impl.m_offline; } -int ha_ndbinfo::open(const char* name, int mode, uint, const dd::Table*) -{ +int ha_ndbinfo::open(const char *name, int mode, uint, const dd::Table *) { DBUG_ENTER("ha_ndbinfo::open"); DBUG_PRINT("enter", ("name: %s, mode: %d", name, mode)); assert(is_closed()); - assert(!is_offline()); // Closed table can not be offline + assert(!is_offline()); // Closed table can not be offline - if (mode == O_RDWR) - { - if (table->db_stat & HA_TRY_READ_ONLY) - { + if (mode == O_RDWR) { + if (table->db_stat & HA_TRY_READ_ONLY) { DBUG_PRINT("info", ("Telling server to use readonly mode")); - DBUG_RETURN(EROFS); // Read only fs + DBUG_RETURN(EROFS); // Read only fs } // Find any commands that does not allow open readonly DBUG_ASSERT(false); } - if (opt_ndbinfo_offline || - ndbcluster_is_disabled()) - { + if (opt_ndbinfo_offline || ndbcluster_is_disabled()) { // Mark table as being offline and allow it to be opened m_impl.m_offline = true; DBUG_RETURN(0); } int err = g_ndbinfo->openTable(name, &m_impl.m_table); - if (err) - { + if (err) { assert(m_impl.m_table == 0); - if (err == NdbInfo::ERR_NoSuchTable) - DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); + if (err == NdbInfo::ERR_NoSuchTable) DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); DBUG_RETURN(err2mysql(err)); } @@ -402,57 +344,49 @@ int ha_ndbinfo::open(const char* name, int mode, uint, const dd::Table*) is checked on first use */ DBUG_PRINT("info", ("Comparing MySQL's table def against NDB")); - const NdbInfo::Table* ndb_tab = m_impl.m_table; - for (uint i = 0; i < table->s->fields; i++) - { - const Field* field = table->field[i]; + const NdbInfo::Table *ndb_tab = m_impl.m_table; + for (uint i = 0; i < table->s->fields; i++) { + const Field *field = table->field[i]; // Check if field is NULLable - if (const_cast(field)->real_maybe_null() == false) - { + if (const_cast(field)->real_maybe_null() == false) { // Only NULLable fields supported - warn_incompatible(ndb_tab, true, - "column '%s' is NOT NULL", + warn_incompatible(ndb_tab, true, "column '%s' is NOT NULL", field->field_name); - delete m_impl.m_table; m_impl.m_table= 0; + delete m_impl.m_table; + m_impl.m_table = 0; DBUG_RETURN(ERR_INCOMPAT_TABLE_DEF); } // Check if column exist in NDB - const NdbInfo::Column* col = ndb_tab->getColumn(field->field_name); - if (!col) - { + const NdbInfo::Column *col = ndb_tab->getColumn(field->field_name); + if (!col) { // The column didn't exist continue; } // Check compatible field and column type bool compatible = false; - switch(col->m_type) - { - case NdbInfo::Column::Number: - if (field->type() == MYSQL_TYPE_LONG) - compatible = true; - break; - case NdbInfo::Column::Number64: - if (field->type() == MYSQL_TYPE_LONGLONG) - compatible = true; - break; - case NdbInfo::Column::String: - if (field->type() == MYSQL_TYPE_VARCHAR) - compatible = true; - break; - default: - assert(false); - break; + switch (col->m_type) { + case NdbInfo::Column::Number: + if (field->type() == MYSQL_TYPE_LONG) compatible = true; + break; + case NdbInfo::Column::Number64: + if (field->type() == MYSQL_TYPE_LONGLONG) compatible = true; + break; + case NdbInfo::Column::String: + if (field->type() == MYSQL_TYPE_VARCHAR) compatible = true; + break; + default: + assert(false); + break; } - if (!compatible) - { + if (!compatible) { // The column type is not compatible - warn_incompatible(ndb_tab, true, - "column '%s' is not compatible", + warn_incompatible(ndb_tab, true, "column '%s' is not compatible", field->field_name); - delete m_impl.m_table; m_impl.m_table= 0; + delete m_impl.m_table; + m_impl.m_table = 0; DBUG_RETURN(ERR_INCOMPAT_TABLE_DEF); } } @@ -466,29 +400,24 @@ int ha_ndbinfo::open(const char* name, int mode, uint, const dd::Table*) DBUG_RETURN(0); } -int ha_ndbinfo::close(void) -{ +int ha_ndbinfo::close(void) { DBUG_ENTER("ha_ndbinfo::close"); - if (is_offline()) - DBUG_RETURN(0); + if (is_offline()) DBUG_RETURN(0); assert(is_open()); - if (m_impl.m_table) - { + if (m_impl.m_table) { g_ndbinfo->closeTable(m_impl.m_table); m_impl.m_table = NULL; } DBUG_RETURN(0); } -int ha_ndbinfo::rnd_init(bool scan) -{ +int ha_ndbinfo::rnd_init(bool scan) { DBUG_ENTER("ha_ndbinfo::rnd_init"); DBUG_PRINT("info", ("scan: %d", scan)); - if (is_offline()) - { + if (is_offline()) { push_warning(current_thd, Sql_condition::SL_NOTE, 1, "'NDBINFO' has been started in offline mode " "since the 'NDBCLUSTER' engine is disabled " @@ -499,8 +428,7 @@ int ha_ndbinfo::rnd_init(bool scan) assert(is_open()); - if (m_impl.m_scan_op) - { + if (m_impl.m_scan_op) { /* It should be impossible to come here with an already open scan, assumption is that rnd_end() would be called to indicate @@ -513,7 +441,7 @@ int ha_ndbinfo::rnd_init(bool scan) it back to first row. For ha_ndbinfo this means closing the scan and letting it be reopened. */ - assert(scan); // "only makes sense if scan=1" (from rnd_init() description) + assert(scan); // "only makes sense if scan=1" (from rnd_init() description) DBUG_PRINT("info", ("Closing scan to position it back to first row")); @@ -525,10 +453,9 @@ int ha_ndbinfo::rnd_init(bool scan) m_impl.m_columns.clear(); } - assert(m_impl.m_scan_op == NULL); // No scan already ongoing + assert(m_impl.m_scan_op == NULL); // No scan already ongoing - if (m_impl.m_first_use) - { + if (m_impl.m_first_use) { m_impl.m_first_use = false; /* @@ -539,56 +466,47 @@ int ha_ndbinfo::rnd_init(bool scan) are lost). */ uint fields_found_in_ndb = 0; - const NdbInfo::Table* ndb_tab = m_impl.m_table; - for (uint i = 0; i < table->s->fields; i++) - { - const Field* field = table->field[i]; - const NdbInfo::Column* col = ndb_tab->getColumn(field->field_name); - if (!col) - { + const NdbInfo::Table *ndb_tab = m_impl.m_table; + for (uint i = 0; i < table->s->fields; i++) { + const Field *field = table->field[i]; + const NdbInfo::Column *col = ndb_tab->getColumn(field->field_name); + if (!col) { // The column didn't exist - warn_incompatible(ndb_tab, true, - "column '%s' does not exist", + warn_incompatible(ndb_tab, true, "column '%s' does not exist", field->field_name); continue; } fields_found_in_ndb++; } - if (fields_found_in_ndb < ndb_tab->columns()) - { + if (fields_found_in_ndb < ndb_tab->columns()) { // There are more columns available in NDB - warn_incompatible(ndb_tab, false, - "there are more columns available"); + warn_incompatible(ndb_tab, false, "there are more columns available"); } } - if (!scan) - { + if (!scan) { // Just an init to read using 'rnd_pos' DBUG_PRINT("info", ("not scan")); DBUG_RETURN(0); } - THD* thd = current_thd; + THD *thd = current_thd; int err; - NdbInfoScanOperation* scan_op = NULL; - if ((err = g_ndbinfo->createScanOperation(m_impl.m_table, - &scan_op, + NdbInfoScanOperation *scan_op = NULL; + if ((err = g_ndbinfo->createScanOperation(m_impl.m_table, &scan_op, THDVAR(thd, max_rows), THDVAR(thd, max_bytes))) != 0) DBUG_RETURN(err2mysql(err)); - if ((err = scan_op->readTuples()) != 0) - { + if ((err = scan_op->readTuples()) != 0) { // Release the scan operation g_ndbinfo->releaseScanOperation(scan_op); DBUG_RETURN(err2mysql(err)); } /* Read all columns specified in read_set */ - for (uint i = 0; i < table->s->fields; i++) - { + for (uint i = 0; i < table->s->fields; i++) { Field *field = table->field[i]; if (bitmap_is_set(table->read_set, i)) m_impl.m_columns.push_back(scan_op->getValue(field->field_name)); @@ -596,8 +514,7 @@ int ha_ndbinfo::rnd_init(bool scan) m_impl.m_columns.push_back(NULL); } - if ((err = scan_op->execute()) != 0) - { + if ((err = scan_op->execute()) != 0) { // Release pointers to the columns m_impl.m_columns.clear(); // Release the scan operation @@ -609,17 +526,14 @@ int ha_ndbinfo::rnd_init(bool scan) DBUG_RETURN(0); } -int ha_ndbinfo::rnd_end() -{ +int ha_ndbinfo::rnd_end() { DBUG_ENTER("ha_ndbinfo::rnd_end"); - if (is_offline()) - DBUG_RETURN(0); + if (is_offline()) DBUG_RETURN(0); assert(is_open()); - if (m_impl.m_scan_op) - { + if (m_impl.m_scan_op) { g_ndbinfo->releaseScanOperation(m_impl.m_scan_op); m_impl.m_scan_op = NULL; } @@ -628,18 +542,15 @@ int ha_ndbinfo::rnd_end() DBUG_RETURN(0); } -int ha_ndbinfo::rnd_next(uchar *buf) -{ +int ha_ndbinfo::rnd_next(uchar *buf) { int err; DBUG_ENTER("ha_ndbinfo::rnd_next"); - if (is_offline()) - DBUG_RETURN(HA_ERR_END_OF_FILE); + if (is_offline()) DBUG_RETURN(HA_ERR_END_OF_FILE); assert(is_open()); - if (!m_impl.m_scan_op) - { + if (!m_impl.m_scan_op) { /* It should be impossible to come here without a scan operation. But apparently it's not safe to assume that rnd_next() isn't @@ -652,30 +563,26 @@ int ha_ndbinfo::rnd_next(uchar *buf) if ((err = m_impl.m_scan_op->nextResult()) == 0) DBUG_RETURN(HA_ERR_END_OF_FILE); - if (err != 1) - DBUG_RETURN(err2mysql(err)); + if (err != 1) DBUG_RETURN(err2mysql(err)); unpack_record(buf); DBUG_RETURN(0); } -int ha_ndbinfo::rnd_pos(uchar *buf, uchar *pos) -{ +int ha_ndbinfo::rnd_pos(uchar *buf, uchar *pos) { DBUG_ENTER("ha_ndbinfo::rnd_pos"); assert(is_open()); - assert(m_impl.m_scan_op == NULL); // No scan started + assert(m_impl.m_scan_op == NULL); // No scan started /* Copy the saved row into "buf" and set all fields to not null */ memcpy(buf, pos, ref_length); - for (uint i = 0; i < table->s->fields; i++) - table->field[i]->set_notnull(); + for (uint i = 0; i < table->s->fields; i++) table->field[i]->set_notnull(); DBUG_RETURN(0); } -void ha_ndbinfo::position(const uchar *record) -{ +void ha_ndbinfo::position(const uchar *record) { DBUG_ENTER("ha_ndbinfo::position"); assert(is_open()); assert(m_impl.m_scan_op); @@ -686,95 +593,76 @@ void ha_ndbinfo::position(const uchar *record) DBUG_VOID_RETURN; } -int ha_ndbinfo::info(uint) -{ +int ha_ndbinfo::info(uint) { DBUG_ENTER("ha_ndbinfo::info"); DBUG_RETURN(0); } -void -ha_ndbinfo::unpack_record(uchar *dst_row) -{ +void ha_ndbinfo::unpack_record(uchar *dst_row) { DBUG_ENTER("ha_ndbinfo::unpack_record"); ptrdiff_t dst_offset = dst_row - table->record[0]; - for (uint i = 0; i < table->s->fields; i++) - { + for (uint i = 0; i < table->s->fields; i++) { Field *field = table->field[i]; - const NdbInfoRecAttr* record = m_impl.m_columns[i]; - if (record && !record->isNULL()) - { + const NdbInfoRecAttr *record = m_impl.m_columns[i]; + if (record && !record->isNULL()) { field->set_notnull(); field->move_field_offset(dst_offset); switch (field->type()) { - - case (MYSQL_TYPE_VARCHAR): - { - DBUG_PRINT("info", ("str: %s", record->c_str())); - Field_varstring* vfield = (Field_varstring *) field; - /* Field_bit in DBUG requires the bit set in write_set for store(). */ - my_bitmap_map *old_map = - dbug_tmp_use_all_columns(table, table->write_set); - (void)vfield->store(record->c_str(), - MIN(record->length(), field->field_length)-1, - field->charset()); - dbug_tmp_restore_column_map(table->write_set, old_map); - break; - } - - case (MYSQL_TYPE_LONG): - { - memcpy(field->ptr, record->ptr(), sizeof(Uint32)); - break; - } - - case (MYSQL_TYPE_LONGLONG): - { - memcpy(field->ptr, record->ptr(), sizeof(Uint64)); - break; - } - - default: - ndb_log_error("Found unexpected field type %u", field->type()); - break; + case (MYSQL_TYPE_VARCHAR): { + DBUG_PRINT("info", ("str: %s", record->c_str())); + Field_varstring *vfield = (Field_varstring *)field; + /* Field_bit in DBUG requires the bit set in write_set for store(). */ + my_bitmap_map *old_map = + dbug_tmp_use_all_columns(table, table->write_set); + (void)vfield->store(record->c_str(), + MIN(record->length(), field->field_length) - 1, + field->charset()); + dbug_tmp_restore_column_map(table->write_set, old_map); + break; + } + + case (MYSQL_TYPE_LONG): { + memcpy(field->ptr, record->ptr(), sizeof(Uint32)); + break; + } + + case (MYSQL_TYPE_LONGLONG): { + memcpy(field->ptr, record->ptr(), sizeof(Uint64)); + break; + } + + default: + ndb_log_error("Found unexpected field type %u", field->type()); + break; } field->move_field_offset(-dst_offset); - } - else - { + } else { field->set_null(); } } DBUG_VOID_RETURN; } - -static int -ndbinfo_find_files(handlerton*, THD* thd, - const char *db, const char*, - const char*, bool dir, List *files) -{ +static int ndbinfo_find_files(handlerton *, THD *thd, const char *db, + const char *, const char *, bool dir, + List *files) { DBUG_ENTER("ndbinfo_find_files"); DBUG_PRINT("enter", ("db: '%s', dir: %d", db, dir)); const bool show_hidden = THDVAR(thd, show_hidden); - if(show_hidden) - DBUG_RETURN(0); // Don't filter out anything + if (show_hidden) DBUG_RETURN(0); // Don't filter out anything - if (dir) - { - if (!ndbcluster_is_disabled()) - DBUG_RETURN(0); + if (dir) { + if (!ndbcluster_is_disabled()) DBUG_RETURN(0); // Hide our database when ndbcluster is disabled LEX_STRING *dir_name; List_iterator it(*files); - while ((dir_name=it++)) - { - if (strcmp(dir_name->str, opt_ndbinfo_dbname)) - continue; + while ((dir_name = it++)) { + if (strcmp(dir_name->str, opt_ndbinfo_dbname)) continue; DBUG_PRINT("info", ("Hiding own database '%s'", dir_name->str)); it.remove(); @@ -785,15 +673,13 @@ ndbinfo_find_files(handlerton*, THD* thd, DBUG_ASSERT(db); if (strcmp(db, opt_ndbinfo_dbname)) - DBUG_RETURN(0); // Only hide files in "our" db + DBUG_RETURN(0); // Only hide files in "our" db /* Hide all files that start with "our" prefix */ LEX_STRING *file_name; List_iterator it(*files); - while ((file_name=it++)) - { - if (is_prefix(file_name->str, opt_ndbinfo_table_prefix)) - { + while ((file_name = it++)) { + if (is_prefix(file_name->str, opt_ndbinfo_table_prefix)) { DBUG_PRINT("info", ("Hiding '%s'", file_name->str)); it.remove(); } @@ -802,17 +688,12 @@ ndbinfo_find_files(handlerton*, THD* thd, DBUG_RETURN(0); } -static -int -ndbinfo_init(void *plugin) -{ +static int ndbinfo_init(void *plugin) { DBUG_ENTER("ndbinfo_init"); - handlerton *hton = (handlerton *) plugin; + handlerton *hton = (handlerton *)plugin; hton->create = create_handler; - hton->flags = - HTON_TEMPORARY_NOT_SUPPORTED | - HTON_ALTER_NOT_SUPPORTED; + hton->flags = HTON_TEMPORARY_NOT_SUPPORTED | HTON_ALTER_NOT_SUPPORTED; hton->find_files = ndbinfo_find_files; { @@ -827,28 +708,25 @@ ndbinfo_init(void *plugin) hton->sdi_delete = ndb_dummy_ts::sdi_delete; } - if (ndbcluster_is_disabled()) - { + if (ndbcluster_is_disabled()) { // Starting in limited mode since ndbcluster is disabled - DBUG_RETURN(0); + DBUG_RETURN(0); } char prefix[FN_REFLEN]; - build_table_filename(prefix, sizeof(prefix) - 1, - opt_ndbinfo_dbname, opt_ndbinfo_table_prefix, "", 0); + build_table_filename(prefix, sizeof(prefix) - 1, opt_ndbinfo_dbname, + opt_ndbinfo_table_prefix, "", 0); DBUG_PRINT("info", ("prefix: '%s'", prefix)); assert(g_ndb_cluster_connection); - g_ndbinfo = new (std::nothrow) NdbInfo(g_ndb_cluster_connection, prefix, - opt_ndbinfo_dbname, - opt_ndbinfo_table_prefix); - if (!g_ndbinfo) - { + g_ndbinfo = + new (std::nothrow) NdbInfo(g_ndb_cluster_connection, prefix, + opt_ndbinfo_dbname, opt_ndbinfo_table_prefix); + if (!g_ndbinfo) { ndb_log_error("Failed to create NdbInfo"); DBUG_RETURN(1); } - if (!g_ndbinfo->init()) - { + if (!g_ndbinfo->init()) { ndb_log_error("Failed to init NdbInfo"); delete g_ndbinfo; @@ -860,14 +738,10 @@ ndbinfo_init(void *plugin) DBUG_RETURN(0); } -static -int -ndbinfo_deinit(void*) -{ +static int ndbinfo_deinit(void *) { DBUG_ENTER("ndbinfo_deinit"); - if (g_ndbinfo) - { + if (g_ndbinfo) { delete g_ndbinfo; g_ndbinfo = NULL; } @@ -875,39 +749,33 @@ ndbinfo_deinit(void*) DBUG_RETURN(0); } -SYS_VAR* ndbinfo_system_variables[]= { - MYSQL_SYSVAR(max_rows), - MYSQL_SYSVAR(max_bytes), - MYSQL_SYSVAR(show_hidden), - MYSQL_SYSVAR(database), - MYSQL_SYSVAR(table_prefix), - MYSQL_SYSVAR(version), - MYSQL_SYSVAR(offline), - - NULL -}; - -struct st_mysql_storage_engine ndbinfo_storage_engine= -{ - MYSQL_HANDLERTON_INTERFACE_VERSION -}; - -struct st_mysql_plugin ndbinfo_plugin = -{ - MYSQL_STORAGE_ENGINE_PLUGIN, - &ndbinfo_storage_engine, - "ndbinfo", - "Sun Microsystems Inc.", - "MySQL Cluster system information storage engine", - PLUGIN_LICENSE_GPL, - ndbinfo_init, /* plugin init */ - NULL, /* plugin uninstall check */ - ndbinfo_deinit, /* plugin deinit */ - 0x0001, /* plugin version */ - NULL, /* status variables */ - ndbinfo_system_variables, /* system variables */ - NULL, /* config options */ - 0 -}; - -template class Vector; +SYS_VAR *ndbinfo_system_variables[] = {MYSQL_SYSVAR(max_rows), + MYSQL_SYSVAR(max_bytes), + MYSQL_SYSVAR(show_hidden), + MYSQL_SYSVAR(database), + MYSQL_SYSVAR(table_prefix), + MYSQL_SYSVAR(version), + MYSQL_SYSVAR(offline), + + NULL}; + +struct st_mysql_storage_engine ndbinfo_storage_engine = { + MYSQL_HANDLERTON_INTERFACE_VERSION}; + +struct st_mysql_plugin ndbinfo_plugin = { + MYSQL_STORAGE_ENGINE_PLUGIN, + &ndbinfo_storage_engine, + "ndbinfo", + "Sun Microsystems Inc.", + "MySQL Cluster system information storage engine", + PLUGIN_LICENSE_GPL, + ndbinfo_init, /* plugin init */ + NULL, /* plugin uninstall check */ + ndbinfo_deinit, /* plugin deinit */ + 0x0001, /* plugin version */ + NULL, /* status variables */ + ndbinfo_system_variables, /* system variables */ + NULL, /* config options */ + 0}; + +template class Vector; diff --git a/storage/ndb/plugin/ha_ndbinfo.h b/storage/ndb/plugin/ha_ndbinfo.h index d1f661168013..cea168e3e19f 100644 --- a/storage/ndb/plugin/ha_ndbinfo.h +++ b/storage/ndb/plugin/ha_ndbinfo.h @@ -25,9 +25,8 @@ #include "sql/handler.h" -class ha_ndbinfo: public handler -{ -public: +class ha_ndbinfo : public handler { + public: ha_ndbinfo(handlerton *hton, TABLE_SHARE *table_arg); ~ha_ndbinfo() override; @@ -35,12 +34,9 @@ class ha_ndbinfo: public handler ulonglong table_flags() const override { return HA_NO_TRANSACTIONS | HA_NO_BLOBS | HA_NO_AUTO_INCREMENT; } - ulong index_flags(uint, uint, bool) const override { - return 0; - } + ulong index_flags(uint, uint, bool) const override { return 0; } - int create(const char *name, TABLE *form, - HA_CREATE_INFO *create_info, + int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info, dd::Table *table_def) override; int open(const char *name, int mode, uint test_if_locked, @@ -78,16 +74,15 @@ class ha_ndbinfo: public handler return HA_POS_ERROR; } -private: + private: void unpack_record(uchar *dst_row); bool is_open(void) const; - bool is_closed(void) const { return ! is_open(); } + bool is_closed(void) const { return !is_open(); } bool is_offline(void) const; - struct ha_ndbinfo_impl& m_impl; - + struct ha_ndbinfo_impl &m_impl; }; #endif diff --git a/storage/ndb/plugin/ndb_anyvalue.cc b/storage/ndb/plugin/ndb_anyvalue.cc index db14348667a0..018ee4c87543 100644 --- a/storage/ndb/plugin/ndb_anyvalue.cc +++ b/storage/ndb/plugin/ndb_anyvalue.cc @@ -29,15 +29,15 @@ Bits from opt_server_id_bits to 30 may carry other data so we ignore them when reading/setting AnyValue. - The idea with supporting 'other data' is to allow NdbApi + The idea with supporting 'other data' is to allow NdbApi users to tag their NdbApi operations in some way that can be picked up at NdbApi event receivers, *without* interacting badly with / disabling normal binlogging and replication. - + To achieve this, we have a variable sized mask of bits in the *middle* of the AnyValue word which can be used to mask out the user data for the purpose of the MySQL Server. - + A better future approach would be to support > 1 tag word per operation. @@ -57,12 +57,12 @@ At least 7 bits will be available for serverid or reserved codes - Implications : + Implications : Reserved codes can use values between 0x80000000 and 0x8000007f inclusive (256 values). 0x8000007f was always the 'nologging' - code, so the others have started + code, so the others have started 'counting' down from there Examples : @@ -75,10 +75,10 @@ opt_server_id_bits= 7 - ServerIds can be up to 2^7-1 - User specific data can be up to 2^24-1 - - ServerIds have 0 top bit, 24 user bits, then + - ServerIds have 0 top bit, 24 user bits, then the serverid - Reserved codes have 1 top bit, 24 user bits (prob - not used much), then the bottom lsbs of the + not used much), then the bottom lsbs of the reserved code. */ @@ -88,106 +88,89 @@ extern ulong opt_server_id_mask; -#define NDB_ANYVALUE_RESERVED_BIT 0x80000000 -#define NDB_ANYVALUE_RESERVED_MASK 0x8000007f +#define NDB_ANYVALUE_RESERVED_BIT 0x80000000 +#define NDB_ANYVALUE_RESERVED_MASK 0x8000007f #define NDB_ANYVALUE_NOLOGGING_CODE 0x8000007f #define NDB_ANYVALUE_REFRESH_OP_CODE 0x8000007e #define NDB_ANYVALUE_REFLECT_OP_CODE 0x8000007d -#define NDB_ANYVALUE_READ_OP_CODE 0x8000007c +#define NDB_ANYVALUE_READ_OP_CODE 0x8000007c /* Next reserved code : 0x8000007c */ - #ifndef DBUG_OFF -void dbug_ndbcluster_anyvalue_set_userbits(Uint32& anyValue) -{ +void dbug_ndbcluster_anyvalue_set_userbits(Uint32 &anyValue) { /* Set userData part of AnyValue (if there is one) to all 1s to test that it is ignored */ - const Uint32 userDataMask = ~(opt_server_id_mask | - NDB_ANYVALUE_RESERVED_BIT); + const Uint32 userDataMask = ~(opt_server_id_mask | NDB_ANYVALUE_RESERVED_BIT); anyValue |= userDataMask; } #endif -bool ndbcluster_anyvalue_is_reserved(Uint32 anyValue) -{ +bool ndbcluster_anyvalue_is_reserved(Uint32 anyValue) { return ((anyValue & NDB_ANYVALUE_RESERVED_BIT) != 0); } -bool ndbcluster_anyvalue_is_nologging(Uint32 anyValue) -{ +bool ndbcluster_anyvalue_is_nologging(Uint32 anyValue) { return ((anyValue & NDB_ANYVALUE_RESERVED_MASK) == NDB_ANYVALUE_NOLOGGING_CODE); } -void ndbcluster_anyvalue_set_nologging(Uint32& anyValue) -{ +void ndbcluster_anyvalue_set_nologging(Uint32 &anyValue) { anyValue |= NDB_ANYVALUE_NOLOGGING_CODE; } -bool ndbcluster_anyvalue_is_refresh_op(Uint32 anyValue) -{ +bool ndbcluster_anyvalue_is_refresh_op(Uint32 anyValue) { return ((anyValue & NDB_ANYVALUE_RESERVED_MASK) == NDB_ANYVALUE_REFRESH_OP_CODE); } -void ndbcluster_anyvalue_set_refresh_op(Uint32& anyValue) -{ +void ndbcluster_anyvalue_set_refresh_op(Uint32 &anyValue) { anyValue &= ~NDB_ANYVALUE_RESERVED_MASK; anyValue |= NDB_ANYVALUE_REFRESH_OP_CODE; } -bool ndbcluster_anyvalue_is_read_op(Uint32 anyValue) -{ - return ((anyValue & NDB_ANYVALUE_RESERVED_MASK) == - NDB_ANYVALUE_READ_OP_CODE); +bool ndbcluster_anyvalue_is_read_op(Uint32 anyValue) { + return ((anyValue & NDB_ANYVALUE_RESERVED_MASK) == NDB_ANYVALUE_READ_OP_CODE); } -void ndbcluster_anyvalue_set_read_op(Uint32& anyValue) -{ +void ndbcluster_anyvalue_set_read_op(Uint32 &anyValue) { anyValue &= ~NDB_ANYVALUE_RESERVED_MASK; anyValue |= NDB_ANYVALUE_READ_OP_CODE; } -bool ndbcluster_anyvalue_is_reflect_op(Uint32 anyValue) -{ +bool ndbcluster_anyvalue_is_reflect_op(Uint32 anyValue) { return ((anyValue & NDB_ANYVALUE_RESERVED_MASK) == NDB_ANYVALUE_REFLECT_OP_CODE); } -void ndbcluster_anyvalue_set_reflect_op(Uint32& anyValue) -{ +void ndbcluster_anyvalue_set_reflect_op(Uint32 &anyValue) { anyValue &= ~NDB_ANYVALUE_RESERVED_MASK; anyValue |= NDB_ANYVALUE_REFLECT_OP_CODE; } -void ndbcluster_anyvalue_set_normal(Uint32& anyValue) -{ +void ndbcluster_anyvalue_set_normal(Uint32 &anyValue) { /* Clear reserved bit and serverid bits */ anyValue &= ~(NDB_ANYVALUE_RESERVED_BIT); anyValue &= ~(opt_server_id_mask); } -bool ndbcluster_anyvalue_is_serverid_in_range(Uint32 serverId) -{ +bool ndbcluster_anyvalue_is_serverid_in_range(Uint32 serverId) { return ((serverId & ~opt_server_id_mask) == 0); } -Uint32 ndbcluster_anyvalue_get_serverid(Uint32 anyValue) -{ - assert(! (anyValue & NDB_ANYVALUE_RESERVED_BIT) ); +Uint32 ndbcluster_anyvalue_get_serverid(Uint32 anyValue) { + assert(!(anyValue & NDB_ANYVALUE_RESERVED_BIT)); return (anyValue & opt_server_id_mask); } -void ndbcluster_anyvalue_set_serverid(Uint32& anyValue, Uint32 serverId) -{ - assert(! (anyValue & NDB_ANYVALUE_RESERVED_BIT) ); +void ndbcluster_anyvalue_set_serverid(Uint32 &anyValue, Uint32 serverId) { + assert(!(anyValue & NDB_ANYVALUE_RESERVED_BIT)); anyValue &= ~(opt_server_id_mask); anyValue |= (serverId & opt_server_id_mask); } diff --git a/storage/ndb/plugin/ndb_anyvalue.h b/storage/ndb/plugin/ndb_anyvalue.h index 15c5ea45f632..667bfe7b5468 100644 --- a/storage/ndb/plugin/ndb_anyvalue.h +++ b/storage/ndb/plugin/ndb_anyvalue.h @@ -30,24 +30,24 @@ bool ndbcluster_anyvalue_is_reserved(Uint32 anyValue); bool ndbcluster_anyvalue_is_nologging(Uint32 anyValue); -void ndbcluster_anyvalue_set_nologging(Uint32& anyValue); +void ndbcluster_anyvalue_set_nologging(Uint32 &anyValue); bool ndbcluster_anyvalue_is_refresh_op(Uint32 anyValue); -void ndbcluster_anyvalue_set_refresh_op(Uint32& anyValue); +void ndbcluster_anyvalue_set_refresh_op(Uint32 &anyValue); bool ndbcluster_anyvalue_is_reflect_op(Uint32 anyValue); -void ndbcluster_anyvalue_set_reflect_op(Uint32& anyValue); +void ndbcluster_anyvalue_set_reflect_op(Uint32 &anyValue); bool ndbcluster_anyvalue_is_read_op(Uint32 anyValue); -void ndbcluster_anyvalue_set_read_op(Uint32& anyValue); +void ndbcluster_anyvalue_set_read_op(Uint32 &anyValue); bool ndbcluster_anyvalue_is_serverid_in_range(Uint32 serverId); -void ndbcluster_anyvalue_set_normal(Uint32& anyValue); +void ndbcluster_anyvalue_set_normal(Uint32 &anyValue); Uint32 ndbcluster_anyvalue_get_serverid(Uint32 anyValue); -void ndbcluster_anyvalue_set_serverid(Uint32& anyValue, Uint32 serverId); +void ndbcluster_anyvalue_set_serverid(Uint32 &anyValue, Uint32 serverId); #ifndef DBUG_OFF -void dbug_ndbcluster_anyvalue_set_userbits(Uint32& anyValue); +void dbug_ndbcluster_anyvalue_set_userbits(Uint32 &anyValue); #endif #endif diff --git a/storage/ndb/plugin/ndb_apply_status_table.cc b/storage/ndb/plugin/ndb_apply_status_table.cc index 22e519e9fe8b..9ef5f5e983bd 100644 --- a/storage/ndb/plugin/ndb_apply_status_table.cc +++ b/storage/ndb/plugin/ndb_apply_status_table.cc @@ -32,13 +32,13 @@ const std::string Ndb_apply_status_table::DB_NAME = "mysql"; const std::string Ndb_apply_status_table::TABLE_NAME = "ndb_apply_status"; -static const char* COL_SERVER_ID = "server_id"; -static const char* COL_EPOCH = "epoch"; -static const char* COL_LOG_NAME = "log_name"; -static const char* COL_START_POS = "start_pos"; -static const char* COL_END_POS = "end_pos"; +static const char *COL_SERVER_ID = "server_id"; +static const char *COL_EPOCH = "epoch"; +static const char *COL_LOG_NAME = "log_name"; +static const char *COL_START_POS = "start_pos"; +static const char *COL_END_POS = "end_pos"; -Ndb_apply_status_table::Ndb_apply_status_table(Thd_ndb* thd_ndb) +Ndb_apply_status_table::Ndb_apply_status_table(Thd_ndb *thd_ndb) : Ndb_util_table(thd_ndb, DB_NAME, TABLE_NAME, false) {} Ndb_apply_status_table::~Ndb_apply_status_table() {} @@ -173,23 +173,20 @@ bool Ndb_apply_status_table::define_table_ndb(NdbDictionary::Table &new_table, if (!define_table_add_column(new_table, col_end_pos)) return false; } - (void)mysql_version; // Only one version can be created + (void)mysql_version; // Only one version can be created return true; } -bool Ndb_apply_status_table::drop_events_in_NDB() const -{ +bool Ndb_apply_status_table::drop_events_in_NDB() const { // Drop the default event - if (!drop_event_in_NDB("REPL$mysql/ndb_apply_status")) - return false; + if (!drop_event_in_NDB("REPL$mysql/ndb_apply_status")) return false; return true; } bool Ndb_apply_status_table::need_upgrade() const { return false; } -std::string Ndb_apply_status_table::define_table_dd() const -{ +std::string Ndb_apply_status_table::define_table_dd() const { std::stringstream ss; ss << "CREATE TABLE " << db_name() << "." << table_name() << "(\n"; ss << "server_id INT UNSIGNED NOT NULL," @@ -202,8 +199,8 @@ std::string Ndb_apply_status_table::define_table_dd() const return ss.str(); } -bool Ndb_apply_status_table::is_apply_status_table(const char* db, - const char* table_name) { +bool Ndb_apply_status_table::is_apply_status_table(const char *db, + const char *table_name) { if (db == Ndb_apply_status_table::DB_NAME && table_name == Ndb_apply_status_table::TABLE_NAME) { // This is the NDB table used for apply status information diff --git a/storage/ndb/plugin/ndb_apply_status_table.h b/storage/ndb/plugin/ndb_apply_status_table.h index d6ba5799e6a1..5b7d678e87bc 100644 --- a/storage/ndb/plugin/ndb_apply_status_table.h +++ b/storage/ndb/plugin/ndb_apply_status_table.h @@ -60,7 +60,7 @@ class Ndb_apply_status_table : public Ndb_util_table { @param table_name table name @return true if table is the apply status table */ - static bool is_apply_status_table(const char* db, const char* table_name); + static bool is_apply_status_table(const char *db, const char *table_name); }; #endif diff --git a/storage/ndb/plugin/ndb_binlog_client.cc b/storage/ndb/plugin/ndb_binlog_client.cc index 108432798d06..4c00314909d5 100644 --- a/storage/ndb/plugin/ndb_binlog_client.cc +++ b/storage/ndb/plugin/ndb_binlog_client.cc @@ -35,14 +35,14 @@ #include "storage/ndb/plugin/ndb_schema_dist.h" #include "storage/ndb/plugin/ndb_share.h" -Ndb_binlog_client::Ndb_binlog_client(THD* thd, const char* dbname, - const char* tabname) +Ndb_binlog_client::Ndb_binlog_client(THD *thd, const char *dbname, + const char *tabname) : m_thd(thd), m_dbname(dbname), m_tabname(tabname) {} Ndb_binlog_client::~Ndb_binlog_client() {} bool Ndb_binlog_client::table_should_have_event( - NDB_SHARE* share, const NdbDictionary::Table* ndbtab) const { + NDB_SHARE *share, const NdbDictionary::Table *ndbtab) const { DBUG_ENTER("table_should_have_event"); // Never create event(or event operation) for legacy distributed @@ -80,7 +80,7 @@ bool Ndb_binlog_client::table_should_have_event( extern bool ndb_binlog_running; -bool Ndb_binlog_client::table_should_have_event_op(const NDB_SHARE* share) { +bool Ndb_binlog_client::table_should_have_event_op(const NDB_SHARE *share) { DBUG_ENTER("table_should_have_event_op"); if (!share->get_have_event()) { @@ -133,8 +133,8 @@ bool Ndb_binlog_client::table_should_have_event_op(const NDB_SHARE* share) { DBUG_RETURN(true); } -std::string Ndb_binlog_client::event_name_for_table(const char* db, - const char* table_name, +std::string Ndb_binlog_client::event_name_for_table(const char *db, + const char *table_name, bool full) { if (Ndb_schema_dist_client::is_schema_dist_table(db, table_name) || Ndb_schema_dist_client::is_schema_dist_result_table(db, table_name)) { @@ -157,8 +157,8 @@ std::string Ndb_binlog_client::event_name_for_table(const char* db, return name; } -bool Ndb_binlog_client::event_exists_for_table(Ndb* ndb, - const NDB_SHARE* share) const { +bool Ndb_binlog_client::event_exists_for_table(Ndb *ndb, + const NDB_SHARE *share) const { DBUG_ENTER("Ndb_binlog_client::event_exists_for_table()"); // Generate event name @@ -166,8 +166,8 @@ bool Ndb_binlog_client::event_exists_for_table(Ndb* ndb, event_name_for_table(m_dbname, m_tabname, share->get_binlog_full()); // Get event from NDB - NdbDictionary::Dictionary* dict = ndb->getDictionary(); - const NdbDictionary::Event* existing_event = + NdbDictionary::Dictionary *dict = ndb->getDictionary(); + const NdbDictionary::Event *existing_event = dict->getEvent(event_name.c_str()); if (existing_event) { // The event exist @@ -181,7 +181,7 @@ bool Ndb_binlog_client::event_exists_for_table(Ndb* ndb, DBUG_RETURN(false); // Does not exist } -void Ndb_binlog_client::log_warning(uint code, const char* fmt, ...) const { +void Ndb_binlog_client::log_warning(uint code, const char *fmt, ...) const { char buf[1024]; va_list args; va_start(args, fmt); diff --git a/storage/ndb/plugin/ndb_binlog_client.h b/storage/ndb/plugin/ndb_binlog_client.h index 7523870272db..fd1df7e2f45b 100644 --- a/storage/ndb/plugin/ndb_binlog_client.h +++ b/storage/ndb/plugin/ndb_binlog_client.h @@ -51,7 +51,7 @@ class Ndb_binlog_client { * @param fmt */ void log_warning(uint code, const char *fmt, ...) const - MY_ATTRIBUTE((format(printf, 3, 4))); + MY_ATTRIBUTE((format(printf, 3, 4))); /** * @brief event_name_for_table, generate name for the event for this table diff --git a/storage/ndb/plugin/ndb_binlog_extra_row_info.cc b/storage/ndb/plugin/ndb_binlog_extra_row_info.cc index fdf47a9f0f1b..0629892cacfd 100644 --- a/storage/ndb/plugin/ndb_binlog_extra_row_info.cc +++ b/storage/ndb/plugin/ndb_binlog_extra_row_info.cc @@ -24,126 +24,91 @@ #include "storage/ndb/plugin/ndb_binlog_extra_row_info.h" -#include // memcpy +#include // memcpy #include "my_byteorder.h" -Ndb_binlog_extra_row_info:: -Ndb_binlog_extra_row_info() -{ +Ndb_binlog_extra_row_info::Ndb_binlog_extra_row_info() { flags = 0; transactionId = InvalidTransactionId; conflictFlags = UnsetConflictFlags; /* Prepare buffer with extra row info buffer bytes */ - buff[ EXTRA_ROW_INFO_LEN_OFFSET ] = 0; - buff[ EXTRA_ROW_INFO_FORMAT_OFFSET ] = ERIF_NDB; + buff[EXTRA_ROW_INFO_LEN_OFFSET] = 0; + buff[EXTRA_ROW_INFO_FORMAT_OFFSET] = ERIF_NDB; } -void -Ndb_binlog_extra_row_info:: -setFlags(Uint16 _flags) -{ - flags = _flags; -} +void Ndb_binlog_extra_row_info::setFlags(Uint16 _flags) { flags = _flags; } -void -Ndb_binlog_extra_row_info:: -setTransactionId(Uint64 _transactionId) -{ +void Ndb_binlog_extra_row_info::setTransactionId(Uint64 _transactionId) { assert(_transactionId != InvalidTransactionId); transactionId = _transactionId; } -void -Ndb_binlog_extra_row_info:: -setConflictFlags(Uint16 _conflictFlags) -{ +void Ndb_binlog_extra_row_info::setConflictFlags(Uint16 _conflictFlags) { conflictFlags = _conflictFlags; } -int -Ndb_binlog_extra_row_info:: -loadFromBuffer(const uchar* extra_row_info) -{ +int Ndb_binlog_extra_row_info::loadFromBuffer(const uchar *extra_row_info) { assert(extra_row_info); - Uint8 length = extra_row_info[ EXTRA_ROW_INFO_LEN_OFFSET ]; + Uint8 length = extra_row_info[EXTRA_ROW_INFO_LEN_OFFSET]; assert(length >= EXTRA_ROW_INFO_HEADER_LENGTH); Uint8 payload_length = length - EXTRA_ROW_INFO_HEADER_LENGTH; - Uint8 format = extra_row_info[ EXTRA_ROW_INFO_FORMAT_OFFSET ]; + Uint8 format = extra_row_info[EXTRA_ROW_INFO_FORMAT_OFFSET]; - if (likely(format == ERIF_NDB)) - { - if (likely(payload_length >= FLAGS_SIZE)) - { - const unsigned char* data = &extra_row_info[ EXTRA_ROW_INFO_HEADER_LENGTH ]; + if (likely(format == ERIF_NDB)) { + if (likely(payload_length >= FLAGS_SIZE)) { + const unsigned char *data = &extra_row_info[EXTRA_ROW_INFO_HEADER_LENGTH]; Uint8 nextPos = 0; /* Have flags at least */ bool error = false; Uint16 netFlags; - memcpy(&netFlags, &data[ nextPos ], FLAGS_SIZE); + memcpy(&netFlags, &data[nextPos], FLAGS_SIZE); nextPos += FLAGS_SIZE; - flags = uint2korr((const char*) &netFlags); + flags = uint2korr((const char *)&netFlags); - if (flags & NDB_ERIF_TRANSID) - { - if (likely((nextPos + TRANSID_SIZE) <= payload_length)) - { + if (flags & NDB_ERIF_TRANSID) { + if (likely((nextPos + TRANSID_SIZE) <= payload_length)) { /* Correct length, retrieve transaction id, converting from little endian if necessary. */ Uint64 netTransId; - memcpy(&netTransId, - &data[ nextPos ], - TRANSID_SIZE); + memcpy(&netTransId, &data[nextPos], TRANSID_SIZE); nextPos += TRANSID_SIZE; - transactionId = uint8korr((const char*) &netTransId); - } - else - { + transactionId = uint8korr((const char *)&netTransId); + } else { flags = 0; /* No more processing */ error = true; } } - - if (flags & NDB_ERIF_CFT_FLAGS) - { - if (likely((nextPos + CFT_FLAGS_SIZE) <= payload_length)) - { + + if (flags & NDB_ERIF_CFT_FLAGS) { + if (likely((nextPos + CFT_FLAGS_SIZE) <= payload_length)) { /** - * Correct length, retrieve conflict flags, converting if + * Correct length, retrieve conflict flags, converting if * necessary */ Uint16 netCftFlags; - memcpy(&netCftFlags, - &data[ nextPos ], - CFT_FLAGS_SIZE); + memcpy(&netCftFlags, &data[nextPos], CFT_FLAGS_SIZE); nextPos += CFT_FLAGS_SIZE; - conflictFlags = uint2korr((const char*) & netCftFlags); - } - else - { + conflictFlags = uint2korr((const char *)&netCftFlags); + } else { flags = 0; /* No more processing */ error = true; } } - if (likely(!error)) - { + if (likely(!error)) { return 0; - } - else - { + } else { /* Error - malformed buffer, dump some debugging info */ - fprintf(stderr, + fprintf(stderr, "Ndb_binlog_extra_row_info::loadFromBuffer()" "malformed buffer - flags : %x nextPos %u " "payload_length %u\n", - uint2korr((const char*) &netFlags), - nextPos, - payload_length); + uint2korr((const char *)&netFlags), nextPos, payload_length); return -1; } } @@ -156,40 +121,35 @@ loadFromBuffer(const uchar* extra_row_info) return 0; } -uchar* -Ndb_binlog_extra_row_info::generateBuffer() -{ +uchar *Ndb_binlog_extra_row_info::generateBuffer() { /* Here we write out the buffer in network format, based on the current member settings. */ Uint8 nextPos = EXTRA_ROW_INFO_HEADER_LENGTH; - if (flags) - { + if (flags) { /* Write current flags into buff */ - Uint16 netFlags = uint2korr((const char*) &flags); - memcpy(&buff[ nextPos ], &netFlags, FLAGS_SIZE); + Uint16 netFlags = uint2korr((const char *)&flags); + memcpy(&buff[nextPos], &netFlags, FLAGS_SIZE); nextPos += FLAGS_SIZE; - if (flags & NDB_ERIF_TRANSID) - { - Uint64 netTransactionId = uint8korr((const char*) &transactionId); - memcpy(&buff[ nextPos ], &netTransactionId, TRANSID_SIZE); + if (flags & NDB_ERIF_TRANSID) { + Uint64 netTransactionId = uint8korr((const char *)&transactionId); + memcpy(&buff[nextPos], &netTransactionId, TRANSID_SIZE); nextPos += TRANSID_SIZE; } - - if (flags & NDB_ERIF_CFT_FLAGS) - { - Uint16 netCftFlags = uint2korr((const char*) &conflictFlags); - memcpy(&buff[ nextPos ], &netCftFlags, CFT_FLAGS_SIZE); + + if (flags & NDB_ERIF_CFT_FLAGS) { + Uint16 netCftFlags = uint2korr((const char *)&conflictFlags); + memcpy(&buff[nextPos], &netCftFlags, CFT_FLAGS_SIZE); nextPos += CFT_FLAGS_SIZE; } - assert( nextPos <= MaxLen ); + assert(nextPos <= MaxLen); /* Set length */ - assert( buff[ EXTRA_ROW_INFO_FORMAT_OFFSET ] == ERIF_NDB ); - buff[ EXTRA_ROW_INFO_LEN_OFFSET ] = nextPos; + assert(buff[EXTRA_ROW_INFO_FORMAT_OFFSET] == ERIF_NDB); + buff[EXTRA_ROW_INFO_LEN_OFFSET] = nextPos; return buff; } diff --git a/storage/ndb/plugin/ndb_binlog_extra_row_info.h b/storage/ndb/plugin/ndb_binlog_extra_row_info.h index 44a8c558d351..e528c775e38a 100644 --- a/storage/ndb/plugin/ndb_binlog_extra_row_info.h +++ b/storage/ndb/plugin/ndb_binlog_extra_row_info.h @@ -37,30 +37,22 @@ in the thd variable when writing binlog entries if the object stays in scope around the write. */ -class Ndb_binlog_extra_row_info -{ -public: +class Ndb_binlog_extra_row_info { + public: static const Uint32 FLAGS_SIZE = sizeof(Uint16); static const Uint32 TRANSID_SIZE = sizeof(Uint64); static const Uint32 CFT_FLAGS_SIZE = sizeof(Uint16); static const Uint32 MaxLen = - EXTRA_ROW_INFO_HEADER_LENGTH + - FLAGS_SIZE + - TRANSID_SIZE + - CFT_FLAGS_SIZE; + EXTRA_ROW_INFO_HEADER_LENGTH + FLAGS_SIZE + TRANSID_SIZE + CFT_FLAGS_SIZE; static const Uint64 InvalidTransactionId = ~Uint64(0); static const Uint16 UnsetConflictFlags = 0; - enum Flags - { - NDB_ERIF_TRANSID = 0x1, - NDB_ERIF_CFT_FLAGS = 0x2 - }; + enum Flags { NDB_ERIF_TRANSID = 0x1, NDB_ERIF_CFT_FLAGS = 0x2 }; Ndb_binlog_extra_row_info(); - int loadFromBuffer(const uchar* extra_row_info_ptr); + int loadFromBuffer(const uchar *extra_row_info_ptr); Uint16 getFlags() const { return flags; } void setFlags(Uint16 _flags); @@ -71,14 +63,14 @@ class Ndb_binlog_extra_row_info Uint16 getConflictFlags() const { return conflictFlags; } void setConflictFlags(Uint16 _conflictFlags); - uchar* getBuffPtr() { return buff; } - uchar* generateBuffer(); -private: + uchar *getBuffPtr() { return buff; } + uchar *generateBuffer(); + + private: uchar buff[MaxLen]; Uint16 flags; Uint64 transactionId; Uint16 conflictFlags; }; - #endif diff --git a/storage/ndb/plugin/ndb_binlog_hooks.cc b/storage/ndb/plugin/ndb_binlog_hooks.cc index 1d0fc20ab076..a4e381f33a51 100644 --- a/storage/ndb/plugin/ndb_binlog_hooks.cc +++ b/storage/ndb/plugin/ndb_binlog_hooks.cc @@ -32,7 +32,7 @@ #include "storage/ndb/plugin/ndb_plugin_reference.h" bool Ndb_binlog_hooks::register_hooks( - after_reset_master_hook_t* after_reset_master) { + after_reset_master_hook_t *after_reset_master) { // Only allow hooks to be installed once DBUG_ASSERT(!m_binlog_transmit_observer); @@ -45,11 +45,11 @@ bool Ndb_binlog_hooks::register_hooks( m_binlog_transmit_observer = new Binlog_transmit_observer{ sizeof(Binlog_transmit_observer), - nullptr, // transmit_start - nullptr, // transmit_stop - nullptr, // reserve_header - nullptr, // before_send_event - nullptr, // after_send_event + nullptr, // transmit_start + nullptr, // transmit_stop + nullptr, // reserve_header + nullptr, // before_send_event + nullptr, // after_send_event (after_reset_master_t)after_reset_master, // after_reset_master }; diff --git a/storage/ndb/plugin/ndb_binlog_hooks.h b/storage/ndb/plugin/ndb_binlog_hooks.h index eb8fdc610977..e1fec465a280 100644 --- a/storage/ndb/plugin/ndb_binlog_hooks.h +++ b/storage/ndb/plugin/ndb_binlog_hooks.h @@ -26,14 +26,14 @@ #define NDB_BINLOG_HOOKS_H class Ndb_binlog_hooks { + using after_reset_master_hook_t = int(void *); - using after_reset_master_hook_t = int(void*); + struct Binlog_transmit_observer *m_binlog_transmit_observer = nullptr; - struct Binlog_transmit_observer* m_binlog_transmit_observer = nullptr; public: ~Ndb_binlog_hooks(); - bool register_hooks(after_reset_master_hook_t* after_reset_master); + bool register_hooks(after_reset_master_hook_t *after_reset_master); void unregister_all(void); }; diff --git a/storage/ndb/plugin/ndb_binlog_thread.cc b/storage/ndb/plugin/ndb_binlog_thread.cc index f3685e052e4b..45cb364b5eb4 100644 --- a/storage/ndb/plugin/ndb_binlog_thread.cc +++ b/storage/ndb/plugin/ndb_binlog_thread.cc @@ -50,8 +50,7 @@ int Ndb_binlog_thread::do_deinit() { @return 0 on sucess */ -int Ndb_binlog_thread::do_after_reset_master(void*) -{ +int Ndb_binlog_thread::do_after_reset_master(void *) { DBUG_ENTER("Ndb_binlog_thread::do_after_reset_master"); // Truncate the mysql.ndb_binlog_index table @@ -60,8 +59,7 @@ int Ndb_binlog_thread::do_after_reset_master(void*) Ndb_local_connection mysqld(current_thd); const bool ignore_no_such_table = true; if (mysqld.truncate_table("mysql", "ndb_binlog_index", - ignore_no_such_table)) - { + ignore_no_such_table)) { // Failed to truncate table DBUG_RETURN(1); } diff --git a/storage/ndb/plugin/ndb_binlog_thread.h b/storage/ndb/plugin/ndb_binlog_thread.h index edeb280b9849..ae2d1b1454bf 100644 --- a/storage/ndb/plugin/ndb_binlog_thread.h +++ b/storage/ndb/plugin/ndb_binlog_thread.h @@ -35,12 +35,12 @@ class Ndb; -class Ndb_binlog_thread : public Ndb_component -{ +class Ndb_binlog_thread : public Ndb_component { Ndb_binlog_hooks binlog_hooks; - static int do_after_reset_master(void*); + static int do_after_reset_master(void *); Ndb_metadata_sync metadata_sync; -public: + + public: Ndb_binlog_thread(); virtual ~Ndb_binlog_thread(); @@ -97,7 +97,8 @@ class Ndb_binlog_thread : public Ndb_component */ bool add_table_to_check(const std::string &db_name, const std::string &table_name); -private: + + private: virtual int do_init(); virtual void do_run(); virtual int do_deinit(); @@ -119,7 +120,7 @@ class Ndb_binlog_thread : public Ndb_component // from the cluster CLUSTER_DISCONNECT }; - bool check_reconnect_incident(THD* thd, class injector* inj, + bool check_reconnect_incident(THD *thd, class injector *inj, Reconnect_type incident_id) const; /** @@ -128,8 +129,8 @@ class Ndb_binlog_thread : public Ndb_component @param thd Thread handle */ void recall_pending_purges(THD *thd); - std::mutex m_purge_mutex; // Protects m_pending_purges - std::vector m_pending_purges; // List of pending purges + std::mutex m_purge_mutex; // Protects m_pending_purges + std::vector m_pending_purges; // List of pending purges /** @brief Remove event operations belonging to one Ndb object @@ -159,7 +160,6 @@ class Ndb_binlog_thread : public Ndb_component @return void */ void synchronize_detected_object(THD *thd); - }; #endif diff --git a/storage/ndb/plugin/ndb_bitmap.cc b/storage/ndb/plugin/ndb_bitmap.cc index 4c207898d000..8fe7297c77db 100644 --- a/storage/ndb/plugin/ndb_bitmap.cc +++ b/storage/ndb/plugin/ndb_bitmap.cc @@ -31,7 +31,7 @@ std::string ndb_bitmap_to_hex_string(const MY_BITMAP *bitmap) { std::ostringstream os; os << "{"; - const char* separator = ""; + const char *separator = ""; // The MY_BITMAP buffer size is always rounded up to 32 bit words, print // word by word for (size_t i = no_words_in_map(bitmap); i-- > 0;) { diff --git a/storage/ndb/plugin/ndb_bitmap.h b/storage/ndb/plugin/ndb_bitmap.h index 6d69088274a4..81d5111d1660 100644 --- a/storage/ndb/plugin/ndb_bitmap.h +++ b/storage/ndb/plugin/ndb_bitmap.h @@ -29,7 +29,6 @@ #include "my_bitmap.h" - /** Initialize bitmap using provided buffer. @param bitmap The MY_BITMAP to initialize @@ -45,12 +44,9 @@ */ -template -static inline -void ndb_bitmap_init(MY_BITMAP& bitmap, - my_bitmap_map (&buf)[sz], - uint num_bits) -{ +template +static inline void ndb_bitmap_init(MY_BITMAP &bitmap, my_bitmap_map (&buf)[sz], + uint num_bits) { assert(num_bits > 0); assert(bitmap_buffer_size(num_bits) <= (sz * sizeof(my_bitmap_map))); @@ -63,6 +59,6 @@ void ndb_bitmap_init(MY_BITMAP& bitmap, * @param bitmap The bitmap to format * @return string representation of the bitmap */ -std::string ndb_bitmap_to_hex_string(const MY_BITMAP* bitmap); +std::string ndb_bitmap_to_hex_string(const MY_BITMAP *bitmap); #endif diff --git a/storage/ndb/plugin/ndb_component.cc b/storage/ndb/plugin/ndb_component.cc index 169a711bf538..1655141edc4a 100644 --- a/storage/ndb/plugin/ndb_component.cc +++ b/storage/ndb/plugin/ndb_component.cc @@ -26,63 +26,46 @@ #include -#include "my_systime.h" // set_timespec +#include "my_systime.h" // set_timespec Ndb_component::Ndb_component(const char *name) - : m_thread_state(TS_UNINIT), - m_server_started(false), - m_name(name) -{ -} - -Ndb_component::~Ndb_component() -{ + : m_thread_state(TS_UNINIT), m_server_started(false), m_name(name) {} -} +Ndb_component::~Ndb_component() {} -int -Ndb_component::init() -{ +int Ndb_component::init() { assert(m_thread_state == TS_UNINIT); - mysql_mutex_init(PSI_INSTRUMENT_ME, &m_start_stop_mutex, - MY_MUTEX_INIT_FAST); + mysql_mutex_init(PSI_INSTRUMENT_ME, &m_start_stop_mutex, MY_MUTEX_INIT_FAST); mysql_cond_init(PSI_INSTRUMENT_ME, &m_start_stop_cond); - int res= do_init(); - if (res == 0) - { - m_thread_state= TS_INIT; + int res = do_init(); + if (res == 0) { + m_thread_state = TS_INIT; } return res; } -extern "C" void * -Ndb_component_run_C(void * arg) -{ +extern "C" void *Ndb_component_run_C(void *arg) { my_thread_init(); - Ndb_component * self = reinterpret_cast(arg); + Ndb_component *self = reinterpret_cast(arg); self->run_impl(); my_thread_end(); my_thread_exit(0); - return NULL; // Avoid compiler warnings + return NULL; // Avoid compiler warnings } -extern my_thread_attr_t connection_attrib; // mysql global pthread attr +extern my_thread_attr_t connection_attrib; // mysql global pthread attr -int -Ndb_component::start() -{ +int Ndb_component::start() { assert(m_thread_state == TS_INIT); mysql_mutex_lock(&m_start_stop_mutex); - m_thread_state= TS_STARTING; - int res= my_thread_create(&m_thread, &connection_attrib, Ndb_component_run_C, - this); - - if (res == 0) - { - while (m_thread_state == TS_STARTING) - { + m_thread_state = TS_STARTING; + int res = my_thread_create(&m_thread, &connection_attrib, Ndb_component_run_C, + this); + + if (res == 0) { + while (m_thread_state == TS_STARTING) { mysql_cond_wait(&m_start_stop_cond, &m_start_stop_mutex); } mysql_mutex_unlock(&m_start_stop_mutex); @@ -93,13 +76,10 @@ Ndb_component::start() return res; } -void -Ndb_component::run_impl() -{ +void Ndb_component::run_impl() { mysql_mutex_lock(&m_start_stop_mutex); - if (m_thread_state == TS_STARTING) - { - m_thread_state= TS_RUNNING; + if (m_thread_state == TS_STARTING) { + m_thread_state = TS_RUNNING; mysql_cond_signal(&m_start_stop_cond); mysql_mutex_unlock(&m_start_stop_mutex); do_run(); @@ -110,9 +90,7 @@ Ndb_component::run_impl() mysql_mutex_unlock(&m_start_stop_mutex); } -bool -Ndb_component::is_stop_requested() -{ +bool Ndb_component::is_stop_requested() { bool res = false; mysql_mutex_lock(&m_start_stop_mutex); res = m_thread_state != TS_RUNNING; @@ -120,18 +98,14 @@ Ndb_component::is_stop_requested() return res; } -int -Ndb_component::stop() -{ +int Ndb_component::stop() { log_info("Stop"); mysql_mutex_lock(&m_start_stop_mutex); - assert(m_thread_state == TS_RUNNING || - m_thread_state == TS_STOPPING || + assert(m_thread_state == TS_RUNNING || m_thread_state == TS_STOPPING || m_thread_state == TS_STOPPED); - if (m_thread_state == TS_RUNNING) - { - m_thread_state= TS_STOPPING; + if (m_thread_state == TS_RUNNING) { + m_thread_state = TS_STOPPING; } // Give subclass a call, should wake itself up to quickly @@ -143,10 +117,8 @@ Ndb_component::stop() do_wakeup(); mysql_mutex_lock(&m_start_stop_mutex); - if (m_thread_state == TS_STOPPING) - { - while (m_thread_state != TS_STOPPED) - { + if (m_thread_state == TS_STOPPING) { + while (m_thread_state != TS_STOPPED) { mysql_cond_signal(&m_start_stop_cond); mysql_cond_wait(&m_start_stop_cond, &m_start_stop_mutex); } @@ -157,18 +129,14 @@ Ndb_component::stop() return 0; } -int -Ndb_component::deinit() -{ +int Ndb_component::deinit() { assert(m_thread_state == TS_STOPPED); mysql_mutex_destroy(&m_start_stop_mutex); mysql_cond_destroy(&m_start_stop_cond); return do_deinit(); } - -void Ndb_component::set_server_started() -{ +void Ndb_component::set_server_started() { mysql_mutex_lock(&m_start_stop_mutex); // Can only transition to "server started" once @@ -179,8 +147,7 @@ void Ndb_component::set_server_started() mysql_mutex_unlock(&m_start_stop_mutex); } -bool Ndb_component::is_server_started() -{ +bool Ndb_component::is_server_started() { bool server_started; mysql_mutex_lock(&m_start_stop_mutex); server_started = m_server_started; @@ -188,23 +155,19 @@ bool Ndb_component::is_server_started() return server_started; } -bool Ndb_component::wait_for_server_started(void) -{ +bool Ndb_component::wait_for_server_started(void) { log_verbose(1, "Wait for server start"); mysql_mutex_lock(&m_start_stop_mutex); - while (!m_server_started) - { + while (!m_server_started) { // Wait max one second before checking again if server has been // started or shutdown has been requested struct timespec abstime; set_timespec(&abstime, 1); - mysql_cond_timedwait(&m_start_stop_cond, &m_start_stop_mutex, - &abstime); + mysql_cond_timedwait(&m_start_stop_cond, &m_start_stop_mutex, &abstime); // Has shutdown been requested - if (m_thread_state != TS_RUNNING) - { + if (m_thread_state != TS_RUNNING) { mysql_mutex_unlock(&m_start_stop_mutex); return false; } @@ -216,16 +179,12 @@ bool Ndb_component::wait_for_server_started(void) return true; } - #include "storage/ndb/plugin/ndb_log.h" - -void Ndb_component::log_verbose(unsigned verbose_level, - const char *fmt, ...) const -{ +void Ndb_component::log_verbose(unsigned verbose_level, const char *fmt, + ...) const { // Print message only if verbose level is set high enough - if (ndb_log_get_verbose_level() < verbose_level) - return; + if (ndb_log_get_verbose_level() < verbose_level) return; va_list args; va_start(args, fmt); @@ -233,27 +192,21 @@ void Ndb_component::log_verbose(unsigned verbose_level, va_end(args); } - -void Ndb_component::log_error(const char *fmt, ...) const -{ +void Ndb_component::log_error(const char *fmt, ...) const { va_list args; va_start(args, fmt); ndb_log_print(NDB_LOG_ERROR_LEVEL, m_name, fmt, args); va_end(args); } - -void Ndb_component::log_warning(const char *fmt, ...) const -{ +void Ndb_component::log_warning(const char *fmt, ...) const { va_list args; va_start(args, fmt); ndb_log_print(NDB_LOG_WARNING_LEVEL, m_name, fmt, args); va_end(args); } - -void Ndb_component::log_info(const char *fmt, ...) const -{ +void Ndb_component::log_info(const char *fmt, ...) const { va_list args; va_start(args, fmt); ndb_log_print(NDB_LOG_INFORMATION_LEVEL, m_name, fmt, args); diff --git a/storage/ndb/plugin/ndb_component.h b/storage/ndb/plugin/ndb_component.h index 5c4ea52a1275..9642bee04517 100644 --- a/storage/ndb/plugin/ndb_component.h +++ b/storage/ndb/plugin/ndb_component.h @@ -29,7 +29,7 @@ #include "mysql/psi/mysql_mutex.h" #include "mysql/psi/mysql_thread.h" -extern "C" void * Ndb_component_run_C(void *); +extern "C" void *Ndb_component_run_C(void *); /** * Baseclass encapsulating the different components @@ -40,9 +40,8 @@ extern "C" void * Ndb_component_run_C(void *); * component. */ -class Ndb_component -{ -public: +class Ndb_component { + public: virtual int init(); virtual int start(); virtual int stop(); @@ -55,7 +54,7 @@ class Ndb_component */ void set_server_started(); -protected: + protected: /* Check if the server has started. This checks if the Ndb_component has been informed that the server has started. @@ -65,7 +64,7 @@ class Ndb_component /** * Con/de-structor is protected...so that sub-class needs to provide own */ - Ndb_component(const char* name); + Ndb_component(const char *name); virtual ~Ndb_component(); /** @@ -95,15 +94,15 @@ class Ndb_component */ bool is_stop_requested(); -protected: - void log_verbose(unsigned verbose_level, const char* fmt, ...) - const MY_ATTRIBUTE((format(printf, 3, 4))); + protected: + void log_verbose(unsigned verbose_level, const char *fmt, ...) const + MY_ATTRIBUTE((format(printf, 3, 4))); void log_error(const char *fmt, ...) const - MY_ATTRIBUTE((format(printf, 2, 3))); + MY_ATTRIBUTE((format(printf, 2, 3))); void log_warning(const char *fmt, ...) const - MY_ATTRIBUTE((format(printf, 2, 3))); + MY_ATTRIBUTE((format(printf, 2, 3))); void log_info(const char *fmt, ...) const - MY_ATTRIBUTE((format(printf, 2, 3))); + MY_ATTRIBUTE((format(printf, 2, 3))); /* Wait for the server started. The Ndb_component(and its thread(s)) @@ -114,28 +113,26 @@ class Ndb_component */ bool wait_for_server_started(void); -private: - - enum ThreadState - { - TS_UNINIT = 0, - TS_INIT = 1, + private: + enum ThreadState { + TS_UNINIT = 0, + TS_INIT = 1, TS_STARTING = 2, - TS_RUNNING = 3, + TS_RUNNING = 3, TS_STOPPING = 4, - TS_STOPPED = 5 + TS_STOPPED = 5 }; ThreadState m_thread_state; my_thread_handle m_thread; mysql_mutex_t m_start_stop_mutex; mysql_cond_t m_start_stop_cond; - bool m_server_started; // Protected by m_start_stop_mutex + bool m_server_started; // Protected by m_start_stop_mutex - const char* m_name; + const char *m_name; void run_impl(); - friend void * Ndb_component_run_C(void *); + friend void *Ndb_component_run_C(void *); }; #endif diff --git a/storage/ndb/plugin/ndb_conflict.cc b/storage/ndb/plugin/ndb_conflict.cc index 4f7847f4f518..2e4e14530f6b 100644 --- a/storage/ndb/plugin/ndb_conflict.cc +++ b/storage/ndb/plugin/ndb_conflict.cc @@ -26,9 +26,9 @@ #include -#include "my_base.h" // HA_ERR_ROWS_EVENT_APPLY +#include "my_base.h" // HA_ERR_ROWS_EVENT_APPLY #include "my_dbug.h" -#include "sql/mysqld.h" // lower_case_table_names +#include "sql/mysqld.h" // lower_case_table_names #include "storage/ndb/plugin/ndb_binlog_extra_row_info.h" #include "storage/ndb/plugin/ndb_log.h" #include "storage/ndb/plugin/ndb_ndbapi_util.h" @@ -53,25 +53,19 @@ typedef NdbDictionary::Column NDBCOL; #define NDB_EXCEPTIONS_TABLE_COLUMN_OLD_SUFFIX "$OLD" #define NDB_EXCEPTIONS_TABLE_COLUMN_NEW_SUFFIX "$NEW" - /* Return true if a column has a specific prefix. */ -bool -ExceptionsTableWriter::has_prefix_ci(const char *col_name, - const char *prefix, - CHARSET_INFO *cs) -{ - uint col_len= strlen(col_name); - uint prefix_len= strlen(prefix); - if (col_len < prefix_len) - return false; +bool ExceptionsTableWriter::has_prefix_ci(const char *col_name, + const char *prefix, + CHARSET_INFO *cs) { + uint col_len = strlen(col_name); + uint prefix_len = strlen(prefix); + if (col_len < prefix_len) return false; char col_name_prefix[FN_HEADLEN]; strncpy(col_name_prefix, col_name, prefix_len); - col_name_prefix[prefix_len]= '\0'; - return (my_strcasecmp(cs, - col_name_prefix, - prefix) == 0); + col_name_prefix[prefix_len] = '\0'; + return (my_strcasecmp(cs, col_name_prefix, prefix) == 0); } /* @@ -79,22 +73,16 @@ ExceptionsTableWriter::has_prefix_ci(const char *col_name, and sets the column_real_name to the column name without the suffix. */ -bool -ExceptionsTableWriter::has_suffix_ci(const char *col_name, - const char *suffix, - CHARSET_INFO *cs, - char *col_name_real) -{ - uint col_len= strlen(col_name); - uint suffix_len= strlen(suffix); - const char *col_name_endp= col_name + col_len; +bool ExceptionsTableWriter::has_suffix_ci(const char *col_name, + const char *suffix, CHARSET_INFO *cs, + char *col_name_real) { + uint col_len = strlen(col_name); + uint suffix_len = strlen(suffix); + const char *col_name_endp = col_name + col_len; strcpy(col_name_real, col_name); if (col_len > suffix_len && - my_strcasecmp(cs, - col_name_endp - suffix_len, - suffix) == 0) - { - col_name_real[col_len - suffix_len]= '\0'; + my_strcasecmp(cs, col_name_endp - suffix_len, suffix) == 0) { + col_name_real[col_len - suffix_len] = '\0'; return true; } return false; @@ -106,33 +94,24 @@ ExceptionsTableWriter::has_suffix_ci(const char *col_name, position column was found in pos and possible position in the primary key in key_pos. */ -bool -ExceptionsTableWriter::find_column_name_ci(CHARSET_INFO *cs, - const char *col_name, - const NdbDictionary::Table* table, - int *pos, - int *key_pos) -{ - int ncol= table->getNoOfColumns(); - for(int m= 0; m < ncol; m++) - { - const NdbDictionary::Column* col= table->getColumn(m); - const char *tcol_name= col->getName(); - if (col->getPrimaryKey()) - (*key_pos)++; - if (my_strcasecmp(cs, col_name, tcol_name) == 0) - { - *pos= m; +bool ExceptionsTableWriter::find_column_name_ci( + CHARSET_INFO *cs, const char *col_name, const NdbDictionary::Table *table, + int *pos, int *key_pos) { + int ncol = table->getNoOfColumns(); + for (int m = 0; m < ncol; m++) { + const NdbDictionary::Column *col = table->getColumn(m); + const char *tcol_name = col->getName(); + if (col->getPrimaryKey()) (*key_pos)++; + if (my_strcasecmp(cs, col_name, tcol_name) == 0) { + *pos = m; return true; } } return false; } - -bool -ExceptionsTableWriter::check_mandatory_columns(const NdbDictionary::Table* exceptionsTable) -{ +bool ExceptionsTableWriter::check_mandatory_columns( + const NdbDictionary::Table *exceptionsTable) { DBUG_ENTER("ExceptionsTableWriter::check_mandatory_columns"); if (/* server id */ exceptionsTable->getColumn(0)->getType() == NDBCOL::Unsigned && @@ -145,70 +124,60 @@ ExceptionsTableWriter::check_mandatory_columns(const NdbDictionary::Table* excep exceptionsTable->getColumn(2)->getPrimaryKey() && /* count */ exceptionsTable->getColumn(3)->getType() == NDBCOL::Unsigned && - exceptionsTable->getColumn(3)->getPrimaryKey() - ) + exceptionsTable->getColumn(3)->getPrimaryKey()) DBUG_RETURN(true); else - DBUG_RETURN(false); + DBUG_RETURN(false); } -bool -ExceptionsTableWriter::check_pk_columns(const NdbDictionary::Table* mainTable, - const NdbDictionary::Table* exceptionsTable, - int &k) -{ +bool ExceptionsTableWriter::check_pk_columns( + const NdbDictionary::Table *mainTable, + const NdbDictionary::Table *exceptionsTable, int &k) { DBUG_ENTER("ExceptionsTableWriter::check_pk_columns"); - const int fixed_cols= 4; - int ncol= mainTable->getNoOfColumns(); - int nkey= mainTable->getNoOfPrimaryKeys(); + const int fixed_cols = 4; + int ncol = mainTable->getNoOfColumns(); + int nkey = mainTable->getNoOfPrimaryKeys(); /* Check columns that are part of the primary key */ - for (int i= k= 0; i < ncol && k < nkey; i++) - { - const NdbDictionary::Column* col= mainTable->getColumn(i); - if (col->getPrimaryKey()) - { - const NdbDictionary::Column* ex_col= + for (int i = k = 0; i < ncol && k < nkey; i++) { + const NdbDictionary::Column *col = mainTable->getColumn(i); + if (col->getPrimaryKey()) { + const NdbDictionary::Column *ex_col = exceptionsTable->getColumn(fixed_cols + k); - if(!(ex_col != NULL && - col->getType() == ex_col->getType() && - col->getLength() == ex_col->getLength() && - col->getNullable() == ex_col->getNullable())) - { - /* - Primary key type of the original table doesn't match - the primary key column of the execption table. - Assume that the table format has been extended and - check more below. - */ - DBUG_PRINT("info", ("Primary key column columns don't match, assume extended table")); - m_extended= true; - break; - } + if (!(ex_col != NULL && col->getType() == ex_col->getType() && + col->getLength() == ex_col->getLength() && + col->getNullable() == ex_col->getNullable())) { /* - Store mapping of Exception table key# to - orig table attrid + Primary key type of the original table doesn't match + the primary key column of the execption table. + Assume that the table format has been extended and + check more below. */ - DBUG_PRINT("info", ("%u: Setting m_key_attrids[%i]= %i", __LINE__, k, i)); - m_key_attrids[k]= i; - k++; + DBUG_PRINT( + "info", + ("Primary key column columns don't match, assume extended table")); + m_extended = true; + break; } + /* + Store mapping of Exception table key# to + orig table attrid + */ + DBUG_PRINT("info", ("%u: Setting m_key_attrids[%i]= %i", __LINE__, k, i)); + m_key_attrids[k] = i; + k++; } + } DBUG_RETURN(true); } -bool -ExceptionsTableWriter::check_optional_columns(const NdbDictionary::Table* mainTable, - const NdbDictionary::Table* exceptionsTable, - char* msg_buf, - uint msg_buf_len, - const char** msg, - int &k, - char *error_details, - uint error_details_len) -{ +bool ExceptionsTableWriter::check_optional_columns( + const NdbDictionary::Table *mainTable, + const NdbDictionary::Table *exceptionsTable, char *msg_buf, + uint msg_buf_len, const char **msg, int &k, char *error_details, + uint error_details_len) { DBUG_ENTER("ExceptionsTableWriter::check_optional_columns"); /* - Check optional columns. + Check optional columns. Check if table has been extended by looking for the NDB$ prefix. By looking at the columns in reverse order we can determine if table has been @@ -219,104 +188,86 @@ ExceptionsTableWriter::check_optional_columns(const NdbDictionary::Table* mainTa original table then table is also assumed to be extended. */ - const char* ex_tab_name= exceptionsTable->getName(); - const int fixed_cols= 4; - bool ok= true; - int xncol= exceptionsTable->getNoOfColumns(); + const char *ex_tab_name = exceptionsTable->getName(); + const int fixed_cols = 4; + bool ok = true; + int xncol = exceptionsTable->getNoOfColumns(); int i; - for (i= xncol - 1; i >= 0; i--) - { - const NdbDictionary::Column* col= exceptionsTable->getColumn(i); - const char* col_name= col->getName(); + for (i = xncol - 1; i >= 0; i--) { + const NdbDictionary::Column *col = exceptionsTable->getColumn(i); + const char *col_name = col->getName(); /* We really need the CHARSET_INFO from when the table was created but NdbDictionary::Table doesn't save this. This means we cannot handle tables and execption tables defined with a charset different than the system charset. */ - CHARSET_INFO *cs= system_charset_info; - bool has_prefix= false; - - if (has_prefix_ci(col_name, NDB_EXCEPTIONS_TABLE_COLUMN_PREFIX, cs)) - { - has_prefix= true; - m_extended= true; - DBUG_PRINT("info", - ("Exceptions table %s is extended with column %s", - ex_tab_name, col_name)); + CHARSET_INFO *cs = system_charset_info; + bool has_prefix = false; + + if (has_prefix_ci(col_name, NDB_EXCEPTIONS_TABLE_COLUMN_PREFIX, cs)) { + has_prefix = true; + m_extended = true; + DBUG_PRINT("info", ("Exceptions table %s is extended with column %s", + ex_tab_name, col_name)); } /* Check that mandatory columns have NDB$ prefix */ - if (i < 4) - { - if (m_extended && !has_prefix) - { + if (i < 4) { + if (m_extended && !has_prefix) { snprintf(msg_buf, msg_buf_len, - "Exceptions table %s is extended, but mandatory column %s doesn't have the \'%s\' prefix", - ex_tab_name, - col_name, - NDB_EXCEPTIONS_TABLE_COLUMN_PREFIX); - *msg= msg_buf; + "Exceptions table %s is extended, but mandatory column %s " + "doesn't have the \'%s\' prefix", + ex_tab_name, col_name, NDB_EXCEPTIONS_TABLE_COLUMN_PREFIX); + *msg = msg_buf; DBUG_RETURN(false); } } - k= i - fixed_cols; + k = i - fixed_cols; /* Check for extended columns */ - if (my_strcasecmp(cs, - col_name, - NDB_EXCEPTIONS_TABLE_OP_TYPE) == 0) - { + if (my_strcasecmp(cs, col_name, NDB_EXCEPTIONS_TABLE_OP_TYPE) == 0) { /* Check if ENUM or INT UNSIGNED */ if (exceptionsTable->getColumn(i)->getType() != NDBCOL::Char && - exceptionsTable->getColumn(i)->getType() != NDBCOL::Unsigned) - { + exceptionsTable->getColumn(i)->getType() != NDBCOL::Unsigned) { snprintf(error_details, error_details_len, - "Table %s has incorrect type %u for NDB$OP_TYPE", - exceptionsTable->getName(), - exceptionsTable->getColumn(i)->getType()); + "Table %s has incorrect type %u for NDB$OP_TYPE", + exceptionsTable->getName(), + exceptionsTable->getColumn(i)->getType()); DBUG_PRINT("info", ("%s", error_details)); - ok= false; + ok = false; break; } - m_extended= true; - m_op_type_pos= i; + m_extended = true; + m_op_type_pos = i; continue; } - if (my_strcasecmp(cs, - col_name, - NDB_EXCEPTIONS_TABLE_CONFLICT_CAUSE) == 0) - { + if (my_strcasecmp(cs, col_name, NDB_EXCEPTIONS_TABLE_CONFLICT_CAUSE) == 0) { /* Check if ENUM or INT UNSIGNED */ if (exceptionsTable->getColumn(i)->getType() != NDBCOL::Char && - exceptionsTable->getColumn(i)->getType() != NDBCOL::Unsigned) - { + exceptionsTable->getColumn(i)->getType() != NDBCOL::Unsigned) { snprintf(error_details, error_details_len, - "Table %s has incorrect type %u for NDB$CFT_CAUSE", - exceptionsTable->getName(), - exceptionsTable->getColumn(i)->getType()); + "Table %s has incorrect type %u for NDB$CFT_CAUSE", + exceptionsTable->getName(), + exceptionsTable->getColumn(i)->getType()); DBUG_PRINT("info", ("%s", error_details)); - ok= false; + ok = false; break; } - m_extended= true; - m_conflict_cause_pos= i; + m_extended = true; + m_conflict_cause_pos = i; continue; } - if (my_strcasecmp(cs, - col_name, - NDB_EXCEPTIONS_TABLE_ORIG_TRANSID) == 0) - { - if (exceptionsTable->getColumn(i)->getType() != NDBCOL::Bigunsigned) - { + if (my_strcasecmp(cs, col_name, NDB_EXCEPTIONS_TABLE_ORIG_TRANSID) == 0) { + if (exceptionsTable->getColumn(i)->getType() != NDBCOL::Bigunsigned) { snprintf(error_details, error_details_len, - "Table %s has incorrect type %u for NDB$ORIG_TRANSID", - exceptionsTable->getName(), - exceptionsTable->getColumn(i)->getType()); + "Table %s has incorrect type %u for NDB$ORIG_TRANSID", + exceptionsTable->getName(), + exceptionsTable->getColumn(i)->getType()); DBUG_PRINT("info", ("%s", error_details)); - ok= false; + ok = false; break; } - m_extended= true; - m_orig_transid_pos= i; + m_extended = true; + m_orig_transid_pos = i; continue; } /* @@ -325,28 +276,20 @@ ExceptionsTableWriter::check_optional_columns(const NdbDictionary::Table* mainTa the original table. If a non-primary key column is found we assume that the table is extended. */ - if (i >= fixed_cols) - { - int match= -1; - int match_k= -1; - COLUMN_VERSION column_version= DEFAULT; + if (i >= fixed_cols) { + int match = -1; + int match_k = -1; + COLUMN_VERSION column_version = DEFAULT; char col_name_real[FN_HEADLEN]; /* Check for old or new column reference */ - if (has_suffix_ci(col_name, - NDB_EXCEPTIONS_TABLE_COLUMN_OLD_SUFFIX, - cs, - col_name_real)) - { + if (has_suffix_ci(col_name, NDB_EXCEPTIONS_TABLE_COLUMN_OLD_SUFFIX, cs, + col_name_real)) { DBUG_PRINT("info", ("Found reference to old column %s", col_name)); - column_version= OLD; - } - else if (has_suffix_ci(col_name, - NDB_EXCEPTIONS_TABLE_COLUMN_NEW_SUFFIX, - cs, - col_name_real)) - { + column_version = OLD; + } else if (has_suffix_ci(col_name, NDB_EXCEPTIONS_TABLE_COLUMN_NEW_SUFFIX, + cs, col_name_real)) { DBUG_PRINT("info", ("Found reference to new column %s", col_name)); - column_version= NEW; + column_version = NEW; } DBUG_PRINT("info", ("Checking for original column %s", col_name_real)); /* @@ -355,20 +298,19 @@ ExceptionsTableWriter::check_optional_columns(const NdbDictionary::Table* mainTa means we cannot handle tables end execption tables defined with a charset different than the system charset. */ - CHARSET_INFO *mcs= system_charset_info; - if (! find_column_name_ci(mcs, col_name_real, mainTable, &match, &match_k)) - { - if (! strcmp(col_name, col_name_real)) - { + CHARSET_INFO *mcs = system_charset_info; + if (!find_column_name_ci(mcs, col_name_real, mainTable, &match, + &match_k)) { + if (!strcmp(col_name, col_name_real)) { /* Column did have $OLD or $NEW suffix, but it didn't match. Check if that is the real name of the column. */ - match_k= -1; - if (find_column_name_ci(mcs, col_name, mainTable, &match, &match_k)) - { - DBUG_PRINT("info", ("Column %s in main table %s has an unfortunate name", - col_name, mainTable->getName())); + match_k = -1; + if (find_column_name_ci(mcs, col_name, mainTable, &match, &match_k)) { + DBUG_PRINT("info", + ("Column %s in main table %s has an unfortunate name", + col_name, mainTable->getName())); } } } @@ -376,207 +318,183 @@ ExceptionsTableWriter::check_optional_columns(const NdbDictionary::Table* mainTa Check that old or new references are nullable or have a default value. */ - if (column_version != DEFAULT && - match_k != -1) - { - if ((! col->getNullable()) && - col->getDefaultValue() == NULL) - { + if (column_version != DEFAULT && match_k != -1) { + if ((!col->getNullable()) && col->getDefaultValue() == NULL) { snprintf(error_details, error_details_len, - "Old or new column reference %s in table %s is not nullable and doesn't have a default value", - col->getName(), exceptionsTable->getName()); + "Old or new column reference %s in table %s is not nullable " + "and doesn't have a default value", + col->getName(), exceptionsTable->getName()); DBUG_PRINT("info", ("%s", error_details)); - ok= false; + ok = false; break; } } - - if (match == -1) - { - /* + + if (match == -1) { + /* Column do not have the same name, could be allowed if column is nullable or has a default value, continue checking, but give a warning to user */ - if ((! col->getNullable()) && - col->getDefaultValue() == NULL) - { + if ((!col->getNullable()) && col->getDefaultValue() == NULL) { snprintf(error_details, error_details_len, - "Extra column %s in table %s is not nullable and doesn't have a default value", - col->getName(), exceptionsTable->getName()); + "Extra column %s in table %s is not nullable and doesn't " + "have a default value", + col->getName(), exceptionsTable->getName()); DBUG_PRINT("info", ("%s", error_details)); - ok= false; + ok = false; break; } snprintf(error_details, error_details_len, - "Column %s in extension table %s not found in %s", - col->getName(), exceptionsTable->getName(), - mainTable->getName()); + "Column %s in extension table %s not found in %s", + col->getName(), exceptionsTable->getName(), + mainTable->getName()); DBUG_PRINT("info", ("%s", error_details)); snprintf(msg_buf, msg_buf_len, - "exceptions table %s has suspicious " - "definition ((column %d): %s", - ex_tab_name, fixed_cols + k, error_details); + "exceptions table %s has suspicious " + "definition ((column %d): %s", + ex_tab_name, fixed_cols + k, error_details); continue; } /* We have a matching name */ - const NdbDictionary::Column* mcol= mainTable->getColumn(match); - if (col->getType() == mcol->getType()) - { - DBUG_PRINT("info", ("Comparing column %s in exceptions table with column %s in main table", col->getName(), mcol->getName())); + const NdbDictionary::Column *mcol = mainTable->getColumn(match); + if (col->getType() == mcol->getType()) { + DBUG_PRINT("info", ("Comparing column %s in exceptions table with " + "column %s in main table", + col->getName(), mcol->getName())); /* We have matching type */ - if (!mcol->getPrimaryKey()) - { + if (!mcol->getPrimaryKey()) { /* Matching non-key column found. Check that column is nullable or has a default value. */ - if (col->getNullable() || - col->getDefaultValue() != NULL) - { + if (col->getNullable() || col->getDefaultValue() != NULL) { DBUG_PRINT("info", ("Mapping column %s %s(%i) to %s(%i)", - col->getName(), - mainTable->getName(), match, + col->getName(), mainTable->getName(), match, exceptionsTable->getName(), i)); /* Save position */ - m_data_pos[i]= match; - m_column_version[i]= column_version; - } - else - { + m_data_pos[i] = match; + m_column_version[i] = column_version; + } else { snprintf(error_details, error_details_len, - "Data column %s in table %s is not nullable and doesn't have a default value", - col->getName(), exceptionsTable->getName()); + "Data column %s in table %s is not nullable and doesn't " + "have a default value", + col->getName(), exceptionsTable->getName()); DBUG_PRINT("info", ("%s", error_details)); - ok= false; + ok = false; break; } - } - else - { + } else { /* Column is part of the primary key */ - if (column_version != DEFAULT) - { - snprintf(error_details, error_details_len, - "Old or new values of primary key columns cannot be referenced since primary keys cannot be updated, column %s in table %s", - col->getName(), exceptionsTable->getName()); + if (column_version != DEFAULT) { + snprintf( + error_details, error_details_len, + "Old or new values of primary key columns cannot be referenced " + "since primary keys cannot be updated, column %s in table %s", + col->getName(), exceptionsTable->getName()); DBUG_PRINT("info", ("%s", error_details)); - ok= false; + ok = false; break; } - if (col->getNullable() == mcol->getNullable()) - { + if (col->getNullable() == mcol->getNullable()) { /* Columns are both nullable or not nullable. Save position. */ - if (m_key_data_pos[match_k] != -1) - { - snprintf(error_details, error_details_len, - "Multiple references to the same key column %s in table %s", - col->getName(), exceptionsTable->getName()); + if (m_key_data_pos[match_k] != -1) { + snprintf( + error_details, error_details_len, + "Multiple references to the same key column %s in table %s", + col->getName(), exceptionsTable->getName()); DBUG_PRINT("info", ("%s", error_details)); - ok= false; + ok = false; break; } DBUG_PRINT("info", ("Setting m_key_data_pos[%i]= %i", match_k, i)); - m_key_data_pos[match_k]= i; - - if (i == fixed_cols + match_k) - { + m_key_data_pos[match_k] = i; + + if (i == fixed_cols + match_k) { /* Found key column in correct position */ - if (!m_extended) - continue; + if (!m_extended) continue; } /* Store mapping of Exception table key# to orig table attrid */ - DBUG_PRINT("info", ("%u: Setting m_key_attrids[%i]= %i", __LINE__, match_k, match)); - m_key_attrids[match_k]= match; - m_extended= true; - } - else if (column_version == DEFAULT) - { - /* + DBUG_PRINT("info", ("%u: Setting m_key_attrids[%i]= %i", __LINE__, + match_k, match)); + m_key_attrids[match_k] = match; + m_extended = true; + } else if (column_version == DEFAULT) { + /* Columns have same name and same type Column with this name is part of primary key, but both columns are not declared not null - */ + */ snprintf(error_details, error_details_len, - "Pk column %s not declared not null in both tables", - col->getName()); + "Pk column %s not declared not null in both tables", + col->getName()); DBUG_PRINT("info", ("%s", error_details)); - ok= false; + ok = false; break; } } - } - else - { - /* + } else { + /* Columns have same name, but not the same type - */ + */ snprintf(error_details, error_details_len, - "Column %s has matching name to column %s for table %s, but wrong type, %u versus %u", - col->getName(), mcol->getName(), - mainTable->getName(), - col->getType(), mcol->getType()); + "Column %s has matching name to column %s for table %s, but " + "wrong type, %u versus %u", + col->getName(), mcol->getName(), mainTable->getName(), + col->getType(), mcol->getType()); DBUG_PRINT("info", ("%s", error_details)); - ok= false; + ok = false; break; } } } - + DBUG_RETURN(ok); } -int -ExceptionsTableWriter::init(const NdbDictionary::Table* mainTable, - const NdbDictionary::Table* exceptionsTable, - char* msg_buf, - uint msg_buf_len, - const char** msg) -{ +int ExceptionsTableWriter::init(const NdbDictionary::Table *mainTable, + const NdbDictionary::Table *exceptionsTable, + char *msg_buf, uint msg_buf_len, + const char **msg) { DBUG_ENTER("ExceptionsTableWriter::init"); - const char* ex_tab_name= exceptionsTable->getName(); - const int fixed_cols= 4; - *msg= NULL; - *msg_buf= '\0'; + const char *ex_tab_name = exceptionsTable->getName(); + const int fixed_cols = 4; + *msg = NULL; + *msg_buf = '\0'; - DBUG_PRINT("info", ("Checking definition of exceptions table %s", - ex_tab_name)); + DBUG_PRINT("info", + ("Checking definition of exceptions table %s", ex_tab_name)); /* Check that the table have the corrct number of columns and the mandatory columns. */ - bool ok= - exceptionsTable->getNoOfColumns() >= fixed_cols && - exceptionsTable->getNoOfPrimaryKeys() == 4 && - check_mandatory_columns(exceptionsTable); - - if (ok) - { - char error_details[ FN_REFLEN ]; - uint error_details_len= sizeof(error_details); - error_details[0]= '\0'; - int ncol= mainTable->getNoOfColumns(); - int nkey= mainTable->getNoOfPrimaryKeys(); - int xncol= exceptionsTable->getNoOfColumns(); + bool ok = exceptionsTable->getNoOfColumns() >= fixed_cols && + exceptionsTable->getNoOfPrimaryKeys() == 4 && + check_mandatory_columns(exceptionsTable); + + if (ok) { + char error_details[FN_REFLEN]; + uint error_details_len = sizeof(error_details); + error_details[0] = '\0'; + int ncol = mainTable->getNoOfColumns(); + int nkey = mainTable->getNoOfPrimaryKeys(); + int xncol = exceptionsTable->getNoOfColumns(); int i, k; /* Initialize position arrays */ - for(k=0; k < nkey; k++) - m_key_data_pos[k]= -1; - for(i=0; i < xncol; i++) - m_data_pos[i]= -1; + for (k = 0; k < nkey; k++) m_key_data_pos[k] = -1; + for (i = 0; i < xncol; i++) m_data_pos[i] = -1; /* Initialize nullability information */ - for(i=0; i < ncol; i++) - { - const NdbDictionary::Column* col= mainTable->getColumn(i); - m_col_nullable[i]= col->getNullable(); + for (i = 0; i < ncol; i++) { + const NdbDictionary::Column *col = mainTable->getColumn(i); + m_col_nullable[i] = col->getNullable(); } /* @@ -585,179 +503,136 @@ ExceptionsTableWriter::init(const NdbDictionary::Table* mainTable, Then check if the table is extended with optional columns. */ - ok= - check_pk_columns(mainTable, exceptionsTable, k) && - check_optional_columns(mainTable, - exceptionsTable, - msg_buf, - msg_buf_len, - msg, - k, - error_details, - error_details_len); - if (ok) - { - m_ex_tab= exceptionsTable; - m_pk_cols= nkey; - m_cols= ncol; - m_xcols= xncol; - if (m_extended && strlen(msg_buf) > 0) - *msg= msg_buf; + ok = + check_pk_columns(mainTable, exceptionsTable, k) && + check_optional_columns(mainTable, exceptionsTable, msg_buf, msg_buf_len, + msg, k, error_details, error_details_len); + if (ok) { + m_ex_tab = exceptionsTable; + m_pk_cols = nkey; + m_cols = ncol; + m_xcols = xncol; + if (m_extended && strlen(msg_buf) > 0) *msg = msg_buf; DBUG_RETURN(0); - } - else + } else snprintf(msg_buf, msg_buf_len, - "exceptions table %s has wrong " - "definition (column %d): %s", - ex_tab_name, fixed_cols + k, error_details); - } - else + "exceptions table %s has wrong " + "definition (column %d): %s", + ex_tab_name, fixed_cols + k, error_details); + } else snprintf(msg_buf, msg_buf_len, - "exceptions table %s has wrong " - "definition (initial %d columns)", - ex_tab_name, fixed_cols); + "exceptions table %s has wrong " + "definition (initial %d columns)", + ex_tab_name, fixed_cols); - *msg= msg_buf; + *msg = msg_buf; DBUG_RETURN(-1); } -void -ExceptionsTableWriter::mem_free(Ndb* ndb) -{ - if (m_ex_tab) - { - NdbDictionary::Dictionary* dict = ndb->getDictionary(); +void ExceptionsTableWriter::mem_free(Ndb *ndb) { + if (m_ex_tab) { + NdbDictionary::Dictionary *dict = ndb->getDictionary(); dict->removeTableGlobal(*m_ex_tab, 0); - m_ex_tab= 0; + m_ex_tab = 0; } } -int -ExceptionsTableWriter::writeRow(NdbTransaction* trans, - const NdbRecord* keyRecord, - const NdbRecord* dataRecord, - uint32 server_id, - uint32 master_server_id, - uint64 master_epoch, - const uchar* oldRowPtr, - const uchar* newRowPtr, - enum_conflicting_op_type op_type, - enum_conflict_cause conflict_cause, - uint64 orig_transid, - const MY_BITMAP *write_set, - NdbError& err) -{ +int ExceptionsTableWriter::writeRow( + NdbTransaction *trans, const NdbRecord *keyRecord, + const NdbRecord *dataRecord, uint32 server_id, uint32 master_server_id, + uint64 master_epoch, const uchar *oldRowPtr, const uchar *newRowPtr, + enum_conflicting_op_type op_type, enum_conflict_cause conflict_cause, + uint64 orig_transid, const MY_BITMAP *write_set, NdbError &err) { DBUG_ENTER("ExceptionsTableWriter::writeRow"); - DBUG_PRINT("info", ("op_type(pos):%u(%u), conflict_cause(pos):%u(%u), orig_transid:%" PRIu64 "(%u)", - op_type, m_op_type_pos, - conflict_cause, m_conflict_cause_pos, - orig_transid, m_orig_transid_pos)); + DBUG_PRINT( + "info", + ("op_type(pos):%u(%u), conflict_cause(pos):%u(%u), orig_transid:%" PRIu64 + "(%u)", + op_type, m_op_type_pos, conflict_cause, m_conflict_cause_pos, + orig_transid, m_orig_transid_pos)); DBUG_ASSERT(write_set != NULL); assert(err.code == 0); - const uchar* rowPtr= (op_type == DELETE_ROW)? oldRowPtr : newRowPtr; + const uchar *rowPtr = (op_type == DELETE_ROW) ? oldRowPtr : newRowPtr; - do - { + do { /* Have exceptions table, add row to it */ - const NDBTAB *ex_tab= m_ex_tab; + const NDBTAB *ex_tab = m_ex_tab; /* get insert op */ - NdbOperation *ex_op= trans->getNdbOperation(ex_tab); - if (ex_op == NULL) - { - err= trans->getNdbError(); + NdbOperation *ex_op = trans->getNdbOperation(ex_tab); + if (ex_op == NULL) { + err = trans->getNdbError(); break; } - if (ex_op->insertTuple() == -1) - { - err= ex_op->getNdbError(); + if (ex_op->insertTuple() == -1) { + err = ex_op->getNdbError(); break; } { - uint32 count= (uint32)++m_count; + uint32 count = (uint32)++m_count; /* Set mandatory columns */ if (ex_op->setValue((Uint32)0, (const char *)&(server_id)) || ex_op->setValue((Uint32)1, (const char *)&(master_server_id)) || ex_op->setValue((Uint32)2, (const char *)&(master_epoch)) || - ex_op->setValue((Uint32)3, (const char *)&(count))) - { - err= ex_op->getNdbError(); + ex_op->setValue((Uint32)3, (const char *)&(count))) { + err = ex_op->getNdbError(); break; } /* Set optional columns */ - if (m_extended) - { - if (m_op_type_pos) - { - if (m_ex_tab->getColumn(m_op_type_pos)->getType() - == NDBCOL::Char) - { + if (m_extended) { + if (m_op_type_pos) { + if (m_ex_tab->getColumn(m_op_type_pos)->getType() == NDBCOL::Char) { /* Defined as ENUM */ - char op_type_val= (char)op_type; + char op_type_val = (char)op_type; if (ex_op->setValue((Uint32)m_op_type_pos, - (const char *)&(op_type_val))) - { - err= ex_op->getNdbError(); + (const char *)&(op_type_val))) { + err = ex_op->getNdbError(); break; } - } - else - { - uint32 op_type_val= op_type; + } else { + uint32 op_type_val = op_type; if (ex_op->setValue((Uint32)m_op_type_pos, - (const char *)&(op_type_val))) - { - err= ex_op->getNdbError(); + (const char *)&(op_type_val))) { + err = ex_op->getNdbError(); break; } } } - if (m_conflict_cause_pos) - { - if (m_ex_tab->getColumn(m_conflict_cause_pos)->getType() - == NDBCOL::Char) - { + if (m_conflict_cause_pos) { + if (m_ex_tab->getColumn(m_conflict_cause_pos)->getType() == + NDBCOL::Char) { /* Defined as ENUM */ - char conflict_cause_val= (char)conflict_cause; + char conflict_cause_val = (char)conflict_cause; if (ex_op->setValue((Uint32)m_conflict_cause_pos, - (const char *)&(conflict_cause_val))) - { - err= ex_op->getNdbError(); + (const char *)&(conflict_cause_val))) { + err = ex_op->getNdbError(); break; } - } - else - { - uint32 conflict_cause_val= conflict_cause; + } else { + uint32 conflict_cause_val = conflict_cause; if (ex_op->setValue((Uint32)m_conflict_cause_pos, - (const char *)&(conflict_cause_val))) - { - err= ex_op->getNdbError(); + (const char *)&(conflict_cause_val))) { + err = ex_op->getNdbError(); break; } } } - if (m_orig_transid_pos != 0) - { - const NdbDictionary::Column* col= m_ex_tab->getColumn(m_orig_transid_pos); - if (orig_transid == Ndb_binlog_extra_row_info::InvalidTransactionId - && - col->getNullable()) - { - if (ex_op->setValue((Uint32) m_orig_transid_pos, (char*)NULL)) - { - err= ex_op->getNdbError(); + if (m_orig_transid_pos != 0) { + const NdbDictionary::Column *col = + m_ex_tab->getColumn(m_orig_transid_pos); + if (orig_transid == Ndb_binlog_extra_row_info::InvalidTransactionId && + col->getNullable()) { + if (ex_op->setValue((Uint32)m_orig_transid_pos, (char *)NULL)) { + err = ex_op->getNdbError(); break; } - } - else - { - DBUG_PRINT("info", ("Setting orig_transid (%u) for table %s", m_orig_transid_pos, ex_tab->getName())); - uint64 orig_transid_val= orig_transid; + } else { + DBUG_PRINT("info", ("Setting orig_transid (%u) for table %s", + m_orig_transid_pos, ex_tab->getName())); + uint64 orig_transid_val = orig_transid; if (ex_op->setValue((Uint32)m_orig_transid_pos, - (const char *)&(orig_transid_val))) - { - err= ex_op->getNdbError(); + (const char *)&(orig_transid_val))) { + err = ex_op->getNdbError(); break; } } @@ -766,94 +641,79 @@ ExceptionsTableWriter::writeRow(NdbTransaction* trans, } /* copy primary keys */ { - int nkey= m_pk_cols; + int nkey = m_pk_cols; int k; - for (k= 0; k < nkey; k++) - { + for (k = 0; k < nkey; k++) { DBUG_ASSERT(rowPtr != NULL); - if (m_key_data_pos[k] != -1) - { - const uchar* data= - (const uchar*) NdbDictionary::getValuePtr(keyRecord, - (const char*) rowPtr, - m_key_attrids[k]); - if (ex_op->setValue((Uint32) m_key_data_pos[k], (const char*)data) == -1) - { - err= ex_op->getNdbError(); + if (m_key_data_pos[k] != -1) { + const uchar *data = (const uchar *)NdbDictionary::getValuePtr( + keyRecord, (const char *)rowPtr, m_key_attrids[k]); + if (ex_op->setValue((Uint32)m_key_data_pos[k], (const char *)data) == + -1) { + err = ex_op->getNdbError(); break; } } } } /* Copy additional data */ - if (m_extended) - { - int xncol= m_xcols; + if (m_extended) { + int xncol = m_xcols; int i; - for (i= 0; i < xncol; i++) - { - const NdbDictionary::Column* col= m_ex_tab->getColumn(i); - const uchar* default_value= (const uchar*) col->getDefaultValue(); + for (i = 0; i < xncol; i++) { + const NdbDictionary::Column *col = m_ex_tab->getColumn(i); + const uchar *default_value = (const uchar *)col->getDefaultValue(); DBUG_PRINT("info", ("Checking column %s(%i)%s", col->getName(), i, - (default_value)?", has default value":"")); + (default_value) ? ", has default value" : "")); DBUG_ASSERT(rowPtr != NULL); - if (m_data_pos[i] != -1) - { - const uchar* row_vPtr= NULL; + if (m_data_pos[i] != -1) { + const uchar *row_vPtr = NULL; switch (m_column_version[i]) { - case DEFAULT: - row_vPtr= rowPtr; - break; - case OLD: - if (op_type != WRITE_ROW) - row_vPtr= oldRowPtr; - break; - case NEW: - if (op_type != DELETE_ROW) - row_vPtr= newRowPtr; + case DEFAULT: + row_vPtr = rowPtr; + break; + case OLD: + if (op_type != WRITE_ROW) row_vPtr = oldRowPtr; + break; + case NEW: + if (op_type != DELETE_ROW) row_vPtr = newRowPtr; } if (row_vPtr == NULL || (m_col_nullable[m_data_pos[i]] && - NdbDictionary::isNull(dataRecord, - (const char*) row_vPtr, - m_data_pos[i]))) - { - DBUG_PRINT("info", ("Column %s is set to NULL because it is NULL", col->getName())); - if (ex_op->setValue((Uint32) i, (char*)NULL)) - { - err= ex_op->getNdbError(); + NdbDictionary::isNull(dataRecord, (const char *)row_vPtr, + m_data_pos[i]))) { + DBUG_PRINT("info", ("Column %s is set to NULL because it is NULL", + col->getName())); + if (ex_op->setValue((Uint32)i, (char *)NULL)) { + err = ex_op->getNdbError(); break; } - } - else if (write_set != NULL && bitmap_is_set(write_set, m_data_pos[i])) - { + } else if (write_set != NULL && + bitmap_is_set(write_set, m_data_pos[i])) { DBUG_PRINT("info", ("Column %s is set", col->getName())); - const uchar* data= - (const uchar*) NdbDictionary::getValuePtr(dataRecord, - (const char*) row_vPtr, - m_data_pos[i]); - if (ex_op->setValue((Uint32) i, (const char*)data) == -1) - { - err= ex_op->getNdbError(); + const uchar *data = (const uchar *)NdbDictionary::getValuePtr( + dataRecord, (const char *)row_vPtr, m_data_pos[i]); + if (ex_op->setValue((Uint32)i, (const char *)data) == -1) { + err = ex_op->getNdbError(); break; } - } - else if (default_value != NULL) - { - DBUG_PRINT("info", ("Column %s is not set to NULL because it has a default value", col->getName())); + } else if (default_value != NULL) { + DBUG_PRINT( + "info", + ("Column %s is not set to NULL because it has a default value", + col->getName())); /* * Column has a default value * Since no value was set in write_set * we let the default value be set from * Ndb instead. */ - } - else - { - DBUG_PRINT("info", ("Column %s is set to NULL because it not in write_set", col->getName())); - if (ex_op->setValue((Uint32) i, (char*)NULL)) - { - err= ex_op->getNdbError(); + } else { + DBUG_PRINT("info", + ("Column %s is set to NULL because it not in write_set", + col->getName())); + if (ex_op->setValue((Uint32)i, (char *)NULL)) { + err = ex_op->getNdbError(); break; } } @@ -862,18 +722,16 @@ ExceptionsTableWriter::writeRow(NdbTransaction* trans, } } while (0); - if (err.code != 0) - { - if (err.classification == NdbError::SchemaError) - { - /* + if (err.code != 0) { + if (err.classification == NdbError::SchemaError) { + /* * Something up with Exceptions table schema, forget it. * No further exceptions will be recorded. * Caller will log this and slave will stop. */ - NdbDictionary::Dictionary* dict= trans->getNdb()->getDictionary(); + NdbDictionary::Dictionary *dict = trans->getNdb()->getDictionary(); dict->removeTableGlobal(*m_ex_tab, false); - m_ex_tab= NULL; + m_ex_tab = NULL; DBUG_RETURN(0); } DBUG_RETURN(-1); @@ -881,63 +739,56 @@ ExceptionsTableWriter::writeRow(NdbTransaction* trans, DBUG_RETURN(0); } - /** st_ndb_slave_state constructor Initialise Ndb Slave state object */ st_ndb_slave_state::st_ndb_slave_state() - : current_delete_delete_count(0), - current_reflect_op_prepare_count(0), - current_reflect_op_discard_count(0), - current_refresh_op_count(0), - current_master_server_epoch(0), - current_master_server_epoch_committed(false), - current_max_rep_epoch(0), - conflict_flags(0), - retry_trans_count(0), - current_trans_row_conflict_count(0), - current_trans_row_reject_count(0), - current_trans_in_conflict_count(0), - last_conflicted_epoch(0), - last_stable_epoch(0), - total_delete_delete_count(0), - total_reflect_op_prepare_count(0), - total_reflect_op_discard_count(0), - total_refresh_op_count(0), - max_rep_epoch(0), - sql_run_id(~Uint32(0)), - trans_row_conflict_count(0), - trans_row_reject_count(0), - trans_detect_iter_count(0), - trans_in_conflict_count(0), - trans_conflict_commit_count(0), - trans_conflict_apply_state(SAS_NORMAL), - trans_dependency_tracker(NULL) -{ + : current_delete_delete_count(0), + current_reflect_op_prepare_count(0), + current_reflect_op_discard_count(0), + current_refresh_op_count(0), + current_master_server_epoch(0), + current_master_server_epoch_committed(false), + current_max_rep_epoch(0), + conflict_flags(0), + retry_trans_count(0), + current_trans_row_conflict_count(0), + current_trans_row_reject_count(0), + current_trans_in_conflict_count(0), + last_conflicted_epoch(0), + last_stable_epoch(0), + total_delete_delete_count(0), + total_reflect_op_prepare_count(0), + total_reflect_op_discard_count(0), + total_refresh_op_count(0), + max_rep_epoch(0), + sql_run_id(~Uint32(0)), + trans_row_conflict_count(0), + trans_row_reject_count(0), + trans_detect_iter_count(0), + trans_in_conflict_count(0), + trans_conflict_commit_count(0), + trans_conflict_apply_state(SAS_NORMAL), + trans_dependency_tracker(NULL) { memset(current_violation_count, 0, sizeof(current_violation_count)); memset(total_violation_count, 0, sizeof(total_violation_count)); /* Init conflict handling state memroot */ const size_t CONFLICT_MEMROOT_BLOCK_SIZE = 32768; - init_alloc_root(PSI_INSTRUMENT_ME, - &conflict_mem_root, CONFLICT_MEMROOT_BLOCK_SIZE, 0); + init_alloc_root(PSI_INSTRUMENT_ME, &conflict_mem_root, + CONFLICT_MEMROOT_BLOCK_SIZE, 0); } -st_ndb_slave_state::~st_ndb_slave_state() -{ - free_root(&conflict_mem_root, 0); -} +st_ndb_slave_state::~st_ndb_slave_state() { free_root(&conflict_mem_root, 0); } /** resetPerAttemptCounters Reset the per-epoch-transaction-application-attempt counters */ -void -st_ndb_slave_state::resetPerAttemptCounters() -{ +void st_ndb_slave_state::resetPerAttemptCounters() { memset(current_violation_count, 0, sizeof(current_violation_count)); current_delete_delete_count = 0; current_reflect_op_prepare_count = 0; @@ -956,9 +807,7 @@ st_ndb_slave_state::resetPerAttemptCounters() Called by Slave SQL thread during transaction abort. */ -void -st_ndb_slave_state::atTransactionAbort() -{ +void st_ndb_slave_state::atTransactionAbort() { /* Reset any gathered transaction dependency information */ atEndTransConflictHandling(); trans_conflict_apply_state = SAS_NORMAL; @@ -967,100 +816,83 @@ st_ndb_slave_state::atTransactionAbort() resetPerAttemptCounters(); } - - /** atTransactionCommit() Called by Slave SQL thread after transaction commit */ -void -st_ndb_slave_state::atTransactionCommit(Uint64 epoch) -{ - assert( ((trans_dependency_tracker == NULL) && - (trans_conflict_apply_state == SAS_NORMAL)) || - ((trans_dependency_tracker != NULL) && - (trans_conflict_apply_state == SAS_TRACK_TRANS_DEPENDENCIES)) ); - assert( trans_conflict_apply_state != SAS_APPLY_TRANS_DEPENDENCIES ); +void st_ndb_slave_state::atTransactionCommit(Uint64 epoch) { + assert(((trans_dependency_tracker == NULL) && + (trans_conflict_apply_state == SAS_NORMAL)) || + ((trans_dependency_tracker != NULL) && + (trans_conflict_apply_state == SAS_TRACK_TRANS_DEPENDENCIES))); + assert(trans_conflict_apply_state != SAS_APPLY_TRANS_DEPENDENCIES); /* Merge committed transaction counters into total state * Then reset current transaction counters */ Uint32 total_conflicts = 0; - for (int i=0; i < CFT_NUMBER_OF_CFTS; i++) - { - total_conflicts+= current_violation_count[i]; - total_violation_count[i]+= current_violation_count[i]; + for (int i = 0; i < CFT_NUMBER_OF_CFTS; i++) { + total_conflicts += current_violation_count[i]; + total_violation_count[i] += current_violation_count[i]; } - total_delete_delete_count+= current_delete_delete_count; - total_reflect_op_prepare_count+= current_reflect_op_prepare_count; - total_reflect_op_discard_count+= current_reflect_op_discard_count; - total_refresh_op_count+= current_refresh_op_count; - trans_row_conflict_count+= current_trans_row_conflict_count; - trans_row_reject_count+= current_trans_row_reject_count; - trans_in_conflict_count+= current_trans_in_conflict_count; - - if (current_trans_in_conflict_count) - trans_conflict_commit_count++; - - if (current_max_rep_epoch > max_rep_epoch) - { + total_delete_delete_count += current_delete_delete_count; + total_reflect_op_prepare_count += current_reflect_op_prepare_count; + total_reflect_op_discard_count += current_reflect_op_discard_count; + total_refresh_op_count += current_refresh_op_count; + trans_row_conflict_count += current_trans_row_conflict_count; + trans_row_reject_count += current_trans_row_reject_count; + trans_in_conflict_count += current_trans_in_conflict_count; + + if (current_trans_in_conflict_count) trans_conflict_commit_count++; + + if (current_max_rep_epoch > max_rep_epoch) { DBUG_PRINT("info", ("Max replicated epoch increases from %llu to %llu", - max_rep_epoch, - current_max_rep_epoch)); + max_rep_epoch, current_max_rep_epoch)); max_rep_epoch = current_max_rep_epoch; } { bool hadConflict = false; - if (total_conflicts > 0) - { + if (total_conflicts > 0) { /** * Conflict detected locally */ DBUG_PRINT("info", ("Last conflicted epoch increases from %llu to %llu", - last_conflicted_epoch, - epoch)); + last_conflicted_epoch, epoch)); hadConflict = true; - } - else - { + } else { /** * Update last_conflicted_epoch if we applied reflected or refresh ops * (Implies Secondary role in asymmetric algorithms) */ - assert(current_reflect_op_prepare_count >= current_reflect_op_discard_count); - Uint32 current_reflect_op_apply_count = current_reflect_op_prepare_count - - current_reflect_op_discard_count; - if (current_reflect_op_apply_count > 0 || - current_refresh_op_count > 0) - { - DBUG_PRINT("info", ("Reflected (%u) or Refresh (%u) operations applied this " - "epoch, increasing last conflicted epoch from %llu to %llu.", - current_reflect_op_apply_count, - current_refresh_op_count, - last_conflicted_epoch, - epoch)); + assert(current_reflect_op_prepare_count >= + current_reflect_op_discard_count); + Uint32 current_reflect_op_apply_count = + current_reflect_op_prepare_count - current_reflect_op_discard_count; + if (current_reflect_op_apply_count > 0 || current_refresh_op_count > 0) { + DBUG_PRINT( + "info", + ("Reflected (%u) or Refresh (%u) operations applied this " + "epoch, increasing last conflicted epoch from %llu to %llu.", + current_reflect_op_apply_count, current_refresh_op_count, + last_conflicted_epoch, epoch)); hadConflict = true; } } /* Update status vars */ - if (hadConflict) - { + if (hadConflict) { last_conflicted_epoch = epoch; - } - else - { - if (max_rep_epoch >= last_conflicted_epoch) - { + } else { + if (max_rep_epoch >= last_conflicted_epoch) { /** - * This epoch which has looped the circle was stable - + * This epoch which has looped the circle was stable - * no new conflicts have been found / corrected since * it was logged */ last_stable_epoch = max_rep_epoch; - + /** * Note that max_rep_epoch >= last_conflicted_epoch * implies that there are no currently known-about @@ -1068,7 +900,7 @@ st_ndb_slave_state::atTransactionCommit(Uint64 epoch) * On the primary this is a definitive fact as it * finds out about all conflicts immediately. * On the secondary it does not mean that there - * are not committed conflicts, just that they + * are not committed conflicts, just that they * have not started being corrected yet. */ } @@ -1082,8 +914,7 @@ st_ndb_slave_state::atTransactionCommit(Uint64 epoch) current_master_server_epoch_committed = true; - if (DBUG_EVALUATE_IF("ndb_slave_fail_marking_epoch_committed", true, false)) - { + if (DBUG_EVALUATE_IF("ndb_slave_fail_marking_epoch_committed", true, false)) { fprintf(stderr, "Slave clearing epoch committed flag " "for epoch %llu/%llu (%llu)\n", @@ -1103,10 +934,8 @@ st_ndb_slave_state::atTransactionCommit(Uint64 epoch) This is checking Generic replication errors, with a user warning thrown in too. */ -bool -st_ndb_slave_state::verifyNextEpoch(Uint64 next_epoch, - Uint32 master_server_id) const -{ +bool st_ndb_slave_state::verifyNextEpoch(Uint64 next_epoch, + Uint32 master_server_id) const { DBUG_ENTER("verifyNextEpoch"); /** @@ -1117,74 +946,61 @@ st_ndb_slave_state::verifyNextEpoch(Uint64 next_epoch, epoch - to make sure that we are getting a sensible sequence of epochs. */ - bool first_epoch_since_slave_start = (ndb_mi_get_slave_run_id() != sql_run_id); - - DBUG_PRINT("info", ("ndb_apply_status write from upstream master." - "ServerId %u, Epoch %llu/%llu (%llu) " - "Current master server epoch %llu/%llu (%llu)" - "Current master server epoch committed? %u", - master_server_id, - next_epoch >> 32, - next_epoch & 0xffffffff, - next_epoch, - current_master_server_epoch >> 32, - current_master_server_epoch & 0xffffffff, - current_master_server_epoch, - current_master_server_epoch_committed)); + bool first_epoch_since_slave_start = + (ndb_mi_get_slave_run_id() != sql_run_id); + + DBUG_PRINT( + "info", + ("ndb_apply_status write from upstream master." + "ServerId %u, Epoch %llu/%llu (%llu) " + "Current master server epoch %llu/%llu (%llu)" + "Current master server epoch committed? %u", + master_server_id, next_epoch >> 32, next_epoch & 0xffffffff, next_epoch, + current_master_server_epoch >> 32, + current_master_server_epoch & 0xffffffff, current_master_server_epoch, + current_master_server_epoch_committed)); DBUG_PRINT("info", ("mi_slave_run_id=%u, ndb_slave_state_run_id=%u", ndb_mi_get_slave_run_id(), sql_run_id)); DBUG_PRINT("info", ("First epoch since slave start : %u", first_epoch_since_slave_start)); - + /* Analysis of nextEpoch generally depends on whether it's the first or not */ - if (first_epoch_since_slave_start) - { + if (first_epoch_since_slave_start) { /** First epoch since slave start - might've had a CHANGE MASTER command, since we were last running, so we are not too strict about epoch changes, but we will warn. */ - if (next_epoch < current_master_server_epoch) - { - ndb_log_warning("NDB Slave: At SQL thread start " - "applying epoch %llu/%llu (%llu) from " - "Master ServerId %u which is lower than " - "previously applied epoch %llu/%llu (%llu). " - "Group Master Log : %s " - "Group Master Log Pos : %" PRIu64 ". " - "Slave run id from slave's master info %u, " - "Slave run id %u. " - "Check slave positioning. ", - next_epoch >> 32, - next_epoch & 0xffffffff, - next_epoch, - master_server_id, - current_master_server_epoch >> 32, - current_master_server_epoch & 0xffffffff, - current_master_server_epoch, - ndb_mi_get_group_master_log_name(), - ndb_mi_get_group_master_log_pos(), - ndb_mi_get_slave_run_id(), - sql_run_id); + if (next_epoch < current_master_server_epoch) { + ndb_log_warning( + "NDB Slave: At SQL thread start " + "applying epoch %llu/%llu (%llu) from " + "Master ServerId %u which is lower than " + "previously applied epoch %llu/%llu (%llu). " + "Group Master Log : %s " + "Group Master Log Pos : %" PRIu64 + ". " + "Slave run id from slave's master info %u, " + "Slave run id %u. " + "Check slave positioning. ", + next_epoch >> 32, next_epoch & 0xffffffff, next_epoch, + master_server_id, current_master_server_epoch >> 32, + current_master_server_epoch & 0xffffffff, current_master_server_epoch, + ndb_mi_get_group_master_log_name(), ndb_mi_get_group_master_log_pos(), + ndb_mi_get_slave_run_id(), sql_run_id); /* Slave not stopped */ - } - else if (next_epoch == current_master_server_epoch) - { + } else if (next_epoch == current_master_server_epoch) { /** Could warn that started on already applied epoch, but this is often harmless. */ - } - else - { + } else { /* next_epoch > current_master_server_epoch - fine. */ } - } - else - { + } else { /** ! first_epoch_since_slave_start - + Slave has already applied some epoch in this run, so we expect either : a) previous epoch committed ok and next epoch is higher @@ -1192,102 +1008,85 @@ st_ndb_slave_state::verifyNextEpoch(Uint64 next_epoch, b) previous epoch not committed and next epoch is the same (Retry case) */ - if (next_epoch < current_master_server_epoch) - { + if (next_epoch < current_master_server_epoch) { /* Should never happen */ - ndb_log_error("NDB Slave: SQL thread stopped as " - "applying epoch %llu/%llu (%llu) from " - "Master ServerId %u which is lower than " - "previously applied epoch %llu/%llu (%llu). " - "Group Master Log : %s " - "Group Master Log Pos : %" PRIu64 ". " - "Slave run id from slave's master info %u, " - "Slave run id %u. ", - next_epoch >> 32, - next_epoch & 0xffffffff, - next_epoch, - master_server_id, - current_master_server_epoch >> 32, - current_master_server_epoch & 0xffffffff, - current_master_server_epoch, - ndb_mi_get_group_master_log_name(), - ndb_mi_get_group_master_log_pos(), - ndb_mi_get_slave_run_id(), - sql_run_id); + ndb_log_error( + "NDB Slave: SQL thread stopped as " + "applying epoch %llu/%llu (%llu) from " + "Master ServerId %u which is lower than " + "previously applied epoch %llu/%llu (%llu). " + "Group Master Log : %s " + "Group Master Log Pos : %" PRIu64 + ". " + "Slave run id from slave's master info %u, " + "Slave run id %u. ", + next_epoch >> 32, next_epoch & 0xffffffff, next_epoch, + master_server_id, current_master_server_epoch >> 32, + current_master_server_epoch & 0xffffffff, current_master_server_epoch, + ndb_mi_get_group_master_log_name(), ndb_mi_get_group_master_log_pos(), + ndb_mi_get_slave_run_id(), sql_run_id); /* Stop the slave */ DBUG_RETURN(false); - } - else if (next_epoch == current_master_server_epoch) - { + } else if (next_epoch == current_master_server_epoch) { /** - This is ok if we are retrying - e.g. the + This is ok if we are retrying - e.g. the last epoch was not committed */ - if (current_master_server_epoch_committed) - { + if (current_master_server_epoch_committed) { /* This epoch is committed already, why are we replaying it? */ - ndb_log_error("NDB Slave: SQL thread stopped as attempted to " - "reapply already committed epoch %llu/%llu (%llu) " - "from server id %u. " - "Group Master Log : %s " - "Group Master Log Pos : %" PRIu64 ". " - "Slave run id from slave's master info %u, " - "Slave run id %u. ", - current_master_server_epoch >> 32, - current_master_server_epoch & 0xffffffff, - current_master_server_epoch, - master_server_id, - ndb_mi_get_group_master_log_name(), - ndb_mi_get_group_master_log_pos(), - ndb_mi_get_slave_run_id(), - sql_run_id); + ndb_log_error( + "NDB Slave: SQL thread stopped as attempted to " + "reapply already committed epoch %llu/%llu (%llu) " + "from server id %u. " + "Group Master Log : %s " + "Group Master Log Pos : %" PRIu64 + ". " + "Slave run id from slave's master info %u, " + "Slave run id %u. ", + current_master_server_epoch >> 32, + current_master_server_epoch & 0xffffffff, + current_master_server_epoch, master_server_id, + ndb_mi_get_group_master_log_name(), + ndb_mi_get_group_master_log_pos(), ndb_mi_get_slave_run_id(), + sql_run_id); /* Stop the slave */ DBUG_RETURN(false); - } - else - { + } else { /* Probably a retry, no problem. */ } - } - else - { + } else { /** next_epoch > current_master_server_epoch - + This is the normal case, *unless* the previous epoch - did not commit - in which case it may be a bug in + did not commit - in which case it may be a bug in transaction retry. */ - if (!current_master_server_epoch_committed) - { + if (!current_master_server_epoch_committed) { /** We've moved onto a new epoch without committing the last - probably a bug in transaction retry */ - ndb_log_error("NDB Slave: SQL thread stopped as attempting to " - "apply new epoch %llu/%llu (%llu) while lower " - "received epoch %llu/%llu (%llu) has not been " - "committed. Master server id : %u. " - "Group Master Log : %s " - "Group Master Log Pos : %" PRIu64 ". " - "Slave run id from slave's master info %u, " - "Slave run id %u. ", - next_epoch >> 32, - next_epoch & 0xffffffff, - next_epoch, - current_master_server_epoch >> 32, - current_master_server_epoch & 0xffffffff, - current_master_server_epoch, - master_server_id, - ndb_mi_get_group_master_log_name(), - ndb_mi_get_group_master_log_pos(), - ndb_mi_get_slave_run_id(), - sql_run_id); + ndb_log_error( + "NDB Slave: SQL thread stopped as attempting to " + "apply new epoch %llu/%llu (%llu) while lower " + "received epoch %llu/%llu (%llu) has not been " + "committed. Master server id : %u. " + "Group Master Log : %s " + "Group Master Log Pos : %" PRIu64 + ". " + "Slave run id from slave's master info %u, " + "Slave run id %u. ", + next_epoch >> 32, next_epoch & 0xffffffff, next_epoch, + current_master_server_epoch >> 32, + current_master_server_epoch & 0xffffffff, + current_master_server_epoch, master_server_id, + ndb_mi_get_group_master_log_name(), + ndb_mi_get_group_master_log_pos(), ndb_mi_get_slave_run_id(), + sql_run_id); /* Stop the slave */ DBUG_RETURN(false); - } - else - { + } else { /* Normal case of next epoch after committing last */ } } @@ -1303,20 +1102,15 @@ st_ndb_slave_state::verifyNextEpoch(Uint64 next_epoch, Called by Slave SQL thread when applying an event to the ndb_apply_status table */ -int -st_ndb_slave_state::atApplyStatusWrite(Uint32 master_server_id, - Uint32 row_server_id, - Uint64 row_epoch, - bool is_row_server_id_local) -{ +int st_ndb_slave_state::atApplyStatusWrite(Uint32 master_server_id, + Uint32 row_server_id, + Uint64 row_epoch, + bool is_row_server_id_local) { DBUG_ENTER("atApplyStatusWrite"); - if (row_server_id == master_server_id) - { + if (row_server_id == master_server_id) { /* This is an apply status write from the immediate master */ - if (!verifyNextEpoch(row_epoch, - master_server_id)) - { + if (!verifyNextEpoch(row_epoch, master_server_id)) { /* Problem with the next epoch, stop the slave SQL thread */ DBUG_RETURN(HA_ERR_ROWS_EVENT_APPLY); } @@ -1325,17 +1119,15 @@ st_ndb_slave_state::atApplyStatusWrite(Uint32 master_server_id, current_master_server_epoch = row_epoch; current_master_server_epoch_committed = false; - assert(! is_row_server_id_local); - } - else if (is_row_server_id_local) - { + assert(!is_row_server_id_local); + } else if (is_row_server_id_local) { DBUG_PRINT("info", ("Recording application of local server %u epoch %llu " " which is %s.", row_server_id, row_epoch, - (row_epoch > current_max_rep_epoch)? - " new highest." : " older than previously applied")); - if (row_epoch > current_max_rep_epoch) - { + (row_epoch > current_max_rep_epoch) + ? " new highest." + : " older than previously applied")); + if (row_epoch > current_max_rep_epoch) { /* Store new highest epoch in thdvar. If we commit successfully then this can become the new global max @@ -1351,9 +1143,7 @@ st_ndb_slave_state::atApplyStatusWrite(Uint32 master_server_id, Called when RESET SLAVE command issued - in context of command client. */ -void -st_ndb_slave_state::atResetSlave() -{ +void st_ndb_slave_state::atResetSlave() { /* Reset the Maximum replicated epoch vars * on slave reset * No need to touch the sql_run_id as that @@ -1376,18 +1166,14 @@ st_ndb_slave_state::atResetSlave() current_master_server_epoch_committed = false; } - /** atStartSlave() Called by Slave SQL thread when first applying a row to Ndb after a START SLAVE command. */ -void -st_ndb_slave_state::atStartSlave() -{ - if (trans_conflict_apply_state != SAS_NORMAL) - { +void st_ndb_slave_state::atStartSlave() { + if (trans_conflict_apply_state != SAS_NORMAL) { /* Remove conflict handling state on a SQL thread restart @@ -1397,14 +1183,11 @@ st_ndb_slave_state::atStartSlave() } } -bool -st_ndb_slave_state::checkSlaveConflictRoleChange(enum_slave_conflict_role old_role, - enum_slave_conflict_role new_role, - const char** failure_cause) -{ - if (old_role == new_role) - return true; - +bool st_ndb_slave_state::checkSlaveConflictRoleChange( + enum_slave_conflict_role old_role, enum_slave_conflict_role new_role, + const char **failure_cause) { + if (old_role == new_role) return true; + /** * Initial role is SCR_NONE * Allowed transitions : @@ -1426,56 +1209,49 @@ st_ndb_slave_state::checkSlaveConflictRoleChange(enum_slave_conflict_role old_ro bool bad_transition = false; *failure_cause = "Internal error"; - switch (old_role) - { - case SCR_NONE: - break; - case SCR_PRIMARY: - case SCR_SECONDARY: - bad_transition = (new_role == SCR_PASS); - break; - case SCR_PASS: - bad_transition = ((new_role == SCR_PRIMARY) || - (new_role == SCR_SECONDARY)); - break; - default: - assert(false); - return false; + switch (old_role) { + case SCR_NONE: + break; + case SCR_PRIMARY: + case SCR_SECONDARY: + bad_transition = (new_role == SCR_PASS); + break; + case SCR_PASS: + bad_transition = + ((new_role == SCR_PRIMARY) || (new_role == SCR_SECONDARY)); + break; + default: + assert(false); + return false; } - if (bad_transition) - { + if (bad_transition) { *failure_cause = "Invalid role change."; return false; } - + /* Check that Slave SQL thread is not running */ - if (ndb_mi_get_slave_sql_running()) - { - *failure_cause = "Cannot change role while Slave SQL " - "thread is running. Use STOP SLAVE first."; + if (ndb_mi_get_slave_sql_running()) { + *failure_cause = + "Cannot change role while Slave SQL " + "thread is running. Use STOP SLAVE first."; return false; } return true; } - - /** atEndTransConflictHandling Called when transactional conflict handling has completed. */ -void -st_ndb_slave_state::atEndTransConflictHandling() -{ +void st_ndb_slave_state::atEndTransConflictHandling() { DBUG_ENTER("atEndTransConflictHandling"); /* Release any conflict handling state */ - if (trans_dependency_tracker) - { + if (trans_dependency_tracker) { current_trans_in_conflict_count = - trans_dependency_tracker->get_conflict_count(); + trans_dependency_tracker->get_conflict_count(); trans_dependency_tracker = NULL; free_root(&conflict_mem_root, MY_MARK_BLOCKS_FREE); } @@ -1488,16 +1264,15 @@ st_ndb_slave_state::atEndTransConflictHandling() Called by Slave SQL thread when it determines that Transactional Conflict handling is required */ -void -st_ndb_slave_state::atBeginTransConflictHandling() -{ +void st_ndb_slave_state::atBeginTransConflictHandling() { DBUG_ENTER("atBeginTransConflictHandling"); /* Allocate and initialise Transactional Conflict Resolution Handling Structures */ assert(trans_dependency_tracker == NULL); - trans_dependency_tracker = DependencyTracker::newDependencyTracker(&conflict_mem_root); + trans_dependency_tracker = + DependencyTracker::newDependencyTracker(&conflict_mem_root); DBUG_VOID_RETURN; } @@ -1507,78 +1282,68 @@ st_ndb_slave_state::atBeginTransConflictHandling() Called by Slave SQL thread prior to defining an operation on a table with conflict detection defined. */ -int -st_ndb_slave_state::atPrepareConflictDetection(const NdbDictionary::Table* table, - const NdbRecord* key_rec, - const uchar* row_data, - Uint64 transaction_id, - bool& handle_conflict_now) -{ +int st_ndb_slave_state::atPrepareConflictDetection( + const NdbDictionary::Table *table, const NdbRecord *key_rec, + const uchar *row_data, Uint64 transaction_id, bool &handle_conflict_now) { DBUG_ENTER("atPrepareConflictDetection"); /* Slave is preparing to apply an operation with conflict detection. If we're performing Transactional Conflict Resolution, take extra steps */ - switch( trans_conflict_apply_state ) - { - case SAS_NORMAL: - DBUG_PRINT("info", ("SAS_NORMAL : No special handling")); - /* No special handling */ - break; - case SAS_TRACK_TRANS_DEPENDENCIES: - { - DBUG_PRINT("info", ("SAS_TRACK_TRANS_DEPENDENCIES : Tracking operation")); - /* - Track this operation and its transaction id, to determine - inter-transaction dependencies by {table, primary key} - */ - assert( trans_dependency_tracker ); - - int res = trans_dependency_tracker - ->track_operation(table, - key_rec, - row_data, - transaction_id); - if (res != 0) - { - ndb_log_error("%s", trans_dependency_tracker->get_error_text()); - DBUG_RETURN(res); + switch (trans_conflict_apply_state) { + case SAS_NORMAL: + DBUG_PRINT("info", ("SAS_NORMAL : No special handling")); + /* No special handling */ + break; + case SAS_TRACK_TRANS_DEPENDENCIES: { + DBUG_PRINT("info", ("SAS_TRACK_TRANS_DEPENDENCIES : Tracking operation")); + /* + Track this operation and its transaction id, to determine + inter-transaction dependencies by {table, primary key} + */ + assert(trans_dependency_tracker); + + int res = trans_dependency_tracker->track_operation( + table, key_rec, row_data, transaction_id); + if (res != 0) { + ndb_log_error("%s", trans_dependency_tracker->get_error_text()); + DBUG_RETURN(res); + } + /* Proceed as normal */ + break; } - /* Proceed as normal */ - break; - } - case SAS_APPLY_TRANS_DEPENDENCIES: - { - DBUG_PRINT("info", ("SAS_APPLY_TRANS_DEPENDENCIES : Deciding whether to apply")); - /* - Check if this operation's transaction id is marked in-conflict. - If it is, we tell the caller to perform conflict resolution now instead - of attempting to apply the operation. - */ - assert( trans_dependency_tracker ); + case SAS_APPLY_TRANS_DEPENDENCIES: { + DBUG_PRINT("info", + ("SAS_APPLY_TRANS_DEPENDENCIES : Deciding whether to apply")); + /* + Check if this operation's transaction id is marked in-conflict. + If it is, we tell the caller to perform conflict resolution now instead + of attempting to apply the operation. + */ + assert(trans_dependency_tracker); + + if (trans_dependency_tracker->in_conflict(transaction_id)) { + DBUG_PRINT("info", + ("Event for transaction %llu is conflicting. Handling.", + transaction_id)); + current_trans_row_reject_count++; + handle_conflict_now = true; + DBUG_RETURN(0); + } - if (trans_dependency_tracker->in_conflict(transaction_id)) - { - DBUG_PRINT("info", ("Event for transaction %llu is conflicting. Handling.", + /* + This transaction is not marked in-conflict, so continue with normal + processing. + Note that normal processing may subsequently detect a conflict which + didn't exist at the time of the previous TRACK_DEPENDENCIES pass. + In this case, we will rollback and repeat the TRACK_DEPENDENCIES + stage. + */ + DBUG_PRINT("info", ("Event for transaction %llu is OK, applying", transaction_id)); - current_trans_row_reject_count++; - handle_conflict_now = true; - DBUG_RETURN(0); + break; } - - /* - This transaction is not marked in-conflict, so continue with normal - processing. - Note that normal processing may subsequently detect a conflict which - didn't exist at the time of the previous TRACK_DEPENDENCIES pass. - In this case, we will rollback and repeat the TRACK_DEPENDENCIES - stage. - */ - DBUG_PRINT("info", ("Event for transaction %llu is OK, applying", - transaction_id)); - break; - } } DBUG_RETURN(0); } @@ -1589,9 +1354,7 @@ st_ndb_slave_state::atPrepareConflictDetection(const NdbDictionary::Table* table Called by the Slave SQL thread when a conflict is detected on an executed operation. */ -int -st_ndb_slave_state::atTransConflictDetected(Uint64 transaction_id) -{ +int st_ndb_slave_state::atTransConflictDetected(Uint64 transaction_id) { DBUG_ENTER("atTransConflictDetected"); /* @@ -1602,62 +1365,59 @@ st_ndb_slave_state::atTransConflictDetected(Uint64 transaction_id) conflict_flags |= SCS_TRANS_CONFLICT_DETECTED_THIS_PASS; current_trans_row_conflict_count++; - switch (trans_conflict_apply_state) - { - case SAS_NORMAL: - { - DBUG_PRINT("info", ("SAS_NORMAL : Conflict on op on table with trans detection." - "Requires multi-pass resolution. Will transition to " - "SAS_TRACK_TRANS_DEPENDENCIES at Commit.")); - /* - Conflict on table with transactional conflict resolution - defined. - This is the trigger that we will do transactional conflict - resolution. - Record that we need to do multiple passes to correctly - perform resolution. - TODO : Early exit from applying epoch? - */ - break; - } - case SAS_TRACK_TRANS_DEPENDENCIES: - { - DBUG_PRINT("info", ("SAS_TRACK_TRANS_DEPENDENCIES : Operation in transaction %llu " - "had conflict", - transaction_id)); - /* - Conflict on table with transactional conflict resolution - defined. - We will mark the operation's transaction_id as in-conflict, - so that any other operations on the transaction are also - considered in-conflict, and any dependent transactions are also - considered in-conflict. - */ - assert(trans_dependency_tracker != NULL); - int res = trans_dependency_tracker - ->mark_conflict(transaction_id); + switch (trans_conflict_apply_state) { + case SAS_NORMAL: { + DBUG_PRINT("info", + ("SAS_NORMAL : Conflict on op on table with trans detection." + "Requires multi-pass resolution. Will transition to " + "SAS_TRACK_TRANS_DEPENDENCIES at Commit.")); + /* + Conflict on table with transactional conflict resolution + defined. + This is the trigger that we will do transactional conflict + resolution. + Record that we need to do multiple passes to correctly + perform resolution. + TODO : Early exit from applying epoch? + */ + break; + } + case SAS_TRACK_TRANS_DEPENDENCIES: { + DBUG_PRINT( + "info", + ("SAS_TRACK_TRANS_DEPENDENCIES : Operation in transaction %llu " + "had conflict", + transaction_id)); + /* + Conflict on table with transactional conflict resolution + defined. + We will mark the operation's transaction_id as in-conflict, + so that any other operations on the transaction are also + considered in-conflict, and any dependent transactions are also + considered in-conflict. + */ + assert(trans_dependency_tracker != NULL); + int res = trans_dependency_tracker->mark_conflict(transaction_id); - if (res != 0) - { - ndb_log_error("%s", trans_dependency_tracker->get_error_text()); - DBUG_RETURN(res); + if (res != 0) { + ndb_log_error("%s", trans_dependency_tracker->get_error_text()); + DBUG_RETURN(res); + } + break; } - break; - } - case SAS_APPLY_TRANS_DEPENDENCIES: - { - /* - This must be a new conflict, not noticed on the previous - pass. - */ - DBUG_PRINT("info", ("SAS_APPLY_TRANS_DEPENDENCIES : Conflict detected. " - "Must be further conflict. Will return to " - "SAS_TRACK_TRANS_DEPENDENCIES state at commit.")); - // TODO : Early exit from applying epoch - break; - } - default: - break; + case SAS_APPLY_TRANS_DEPENDENCIES: { + /* + This must be a new conflict, not noticed on the previous + pass. + */ + DBUG_PRINT("info", ("SAS_APPLY_TRANS_DEPENDENCIES : Conflict detected. " + "Must be further conflict. Will return to " + "SAS_TRACK_TRANS_DEPENDENCIES state at commit.")); + // TODO : Early exit from applying epoch + break; + } + default: + break; } DBUG_RETURN(0); @@ -1732,8 +1492,8 @@ st_ndb_slave_state::atTransConflictDetected(Uint64 transaction_id) 1) Normally, there is no transaction dependency tracking overhead paid by the slave. - 2) On first detecting a transactional conflict, the epoch transaction must be - applied at least three times, with two rollbacks. + 2) On first detecting a transactional conflict, the epoch transaction must + be applied at least three times, with two rollbacks. 3) Transactional conflicts detected in subsequent epochs require the epoch transaction to be applied two times, with one rollback. @@ -1741,23 +1501,21 @@ st_ndb_slave_state::atTransConflictDetected(Uint64 transaction_id) 4) A loop between states SAS_TRACK_TRANS_DEPENDENCIES and SAS_APPLY_TRANS_ DEPENDENCIES occurs when further transactional conflicts are discovered in SAS_APPLY_TRANS_DEPENDENCIES state. This implies that the conflicts - discovered in the SAS_TRACK_TRANS_DEPENDENCIES state must not be complete, - so we revisit that state to get a more complete picture. + discovered in the SAS_TRACK_TRANS_DEPENDENCIES state must not be + complete, so we revisit that state to get a more complete picture. - 5) The number of iterations of this loop is fixed to a hard coded limit, after - which the Slave will stop with an error. This should be an unlikely - occurrence, as it requires not just n conflicts, but at least 1 new conflict - appearing between the transactions in the epoch transaction and the + 5) The number of iterations of this loop is fixed to a hard coded limit, + after which the Slave will stop with an error. This should be an unlikely + occurrence, as it requires not just n conflicts, but at least 1 new + conflict appearing between the transactions in the epoch transaction and the database between the two states, n times in a row. - 6) Where conflicts are occasional, as expected, the post-commit transition to - SAS_TRACK_TRANS_DEPENDENCIES rather than SAS_NORMAL results in one epoch + 6) Where conflicts are occasional, as expected, the post-commit transition + to SAS_TRACK_TRANS_DEPENDENCIES rather than SAS_NORMAL results in one epoch transaction having its transaction dependencies needlessly tracked. */ -int -st_ndb_slave_state::atConflictPreCommit(bool& retry_slave_trans) -{ +int st_ndb_slave_state::atConflictPreCommit(bool &retry_slave_trans) { DBUG_ENTER("atConflictPreCommit"); /* @@ -1766,102 +1524,92 @@ st_ndb_slave_state::atConflictPreCommit(bool& retry_slave_trans) us to retry the slave transaction */ retry_slave_trans = false; - switch(trans_conflict_apply_state) - { - case SAS_NORMAL: - { - DBUG_PRINT("info", ("SAS_NORMAL")); - /* - Normal case. Only if we defined conflict detection on a table - with transactional conflict detection, and saw conflicts (on any table) - do we go to another state - */ - if (conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS) - { - DBUG_PRINT("info", ("Conflict(s) detected this pass, transitioning to " - "SAS_TRACK_TRANS_DEPENDENCIES.")); - assert(conflict_flags & SCS_OPS_DEFINED); - /* Transactional conflict resolution required, switch state */ - atBeginTransConflictHandling(); - resetPerAttemptCounters(); - trans_conflict_apply_state = SAS_TRACK_TRANS_DEPENDENCIES; - retry_slave_trans = true; + switch (trans_conflict_apply_state) { + case SAS_NORMAL: { + DBUG_PRINT("info", ("SAS_NORMAL")); + /* + Normal case. Only if we defined conflict detection on a table + with transactional conflict detection, and saw conflicts (on any table) + do we go to another state + */ + if (conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS) { + DBUG_PRINT("info", ("Conflict(s) detected this pass, transitioning to " + "SAS_TRACK_TRANS_DEPENDENCIES.")); + assert(conflict_flags & SCS_OPS_DEFINED); + /* Transactional conflict resolution required, switch state */ + atBeginTransConflictHandling(); + resetPerAttemptCounters(); + trans_conflict_apply_state = SAS_TRACK_TRANS_DEPENDENCIES; + retry_slave_trans = true; + } + break; } - break; - } - case SAS_TRACK_TRANS_DEPENDENCIES: - { - DBUG_PRINT("info", ("SAS_TRACK_TRANS_DEPENDENCIES")); + case SAS_TRACK_TRANS_DEPENDENCIES: { + DBUG_PRINT("info", ("SAS_TRACK_TRANS_DEPENDENCIES")); - if (conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS) - { - /* - Conflict on table with transactional detection - this pass, we have collected the details and - dependencies, now transition to - SAS_APPLY_TRANS_DEPENDENCIES and - reapply the epoch transaction without the - conflicting transactions. - */ - assert(conflict_flags & SCS_OPS_DEFINED); - DBUG_PRINT("info", ("Transactional conflicts, transitioning to " - "SAS_APPLY_TRANS_DEPENDENCIES")); + if (conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS) { + /* + Conflict on table with transactional detection + this pass, we have collected the details and + dependencies, now transition to + SAS_APPLY_TRANS_DEPENDENCIES and + reapply the epoch transaction without the + conflicting transactions. + */ + assert(conflict_flags & SCS_OPS_DEFINED); + DBUG_PRINT("info", ("Transactional conflicts, transitioning to " + "SAS_APPLY_TRANS_DEPENDENCIES")); - trans_conflict_apply_state = SAS_APPLY_TRANS_DEPENDENCIES; - trans_detect_iter_count++; - retry_slave_trans = true; - break; + trans_conflict_apply_state = SAS_APPLY_TRANS_DEPENDENCIES; + trans_detect_iter_count++; + retry_slave_trans = true; + break; + } else { + /* + No transactional conflicts detected this pass, lets + return to SAS_NORMAL state after commit for more efficient + application of epoch transactions + */ + DBUG_PRINT("info", ("No transactional conflicts, transitioning to " + "SAS_NORMAL")); + atEndTransConflictHandling(); + trans_conflict_apply_state = SAS_NORMAL; + break; + } } - else - { + case SAS_APPLY_TRANS_DEPENDENCIES: { + DBUG_PRINT("info", ("SAS_APPLY_TRANS_DEPENDENCIES")); + assert(conflict_flags & SCS_OPS_DEFINED); /* - No transactional conflicts detected this pass, lets - return to SAS_NORMAL state after commit for more efficient - application of epoch transactions + We've applied the Slave epoch transaction subject to the + conflict detection. If any further transactional + conflicts have been observed, then we must repeat the + process. */ - DBUG_PRINT("info", ("No transactional conflicts, transitioning to " - "SAS_NORMAL")); atEndTransConflictHandling(); - trans_conflict_apply_state = SAS_NORMAL; - break; - } - } - case SAS_APPLY_TRANS_DEPENDENCIES: - { - DBUG_PRINT("info", ("SAS_APPLY_TRANS_DEPENDENCIES")); - assert(conflict_flags & SCS_OPS_DEFINED); - /* - We've applied the Slave epoch transaction subject to the - conflict detection. If any further transactional - conflicts have been observed, then we must repeat the - process. - */ - atEndTransConflictHandling(); - atBeginTransConflictHandling(); - trans_conflict_apply_state = SAS_TRACK_TRANS_DEPENDENCIES; + atBeginTransConflictHandling(); + trans_conflict_apply_state = SAS_TRACK_TRANS_DEPENDENCIES; - if (unlikely(conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS)) - { - DBUG_PRINT("info", ("Further conflict(s) detected, repeating the " - "TRACK_TRANS_DEPENDENCIES pass")); + if (unlikely(conflict_flags & SCS_TRANS_CONFLICT_DETECTED_THIS_PASS)) { + DBUG_PRINT("info", ("Further conflict(s) detected, repeating the " + "TRACK_TRANS_DEPENDENCIES pass")); + /* + Further conflict observed when applying, need + to re-determine dependencies + */ + resetPerAttemptCounters(); + retry_slave_trans = true; + break; + } + + DBUG_PRINT("info", ("No further conflicts detected, committing and " + "returning to SAS_TRACK_TRANS_DEPENDENCIES state")); /* - Further conflict observed when applying, need - to re-determine dependencies + With dependencies taken into account, no further + conflicts detected, can now proceed to commit */ - resetPerAttemptCounters(); - retry_slave_trans = true; break; } - - - DBUG_PRINT("info", ("No further conflicts detected, committing and " - "returning to SAS_TRACK_TRANS_DEPENDENCIES state")); - /* - With dependencies taken into account, no further - conflicts detected, can now proceed to commit - */ - break; - } } /* @@ -1869,8 +1617,7 @@ st_ndb_slave_state::atConflictPreCommit(bool& retry_slave_trans) */ conflict_flags = 0; - if (retry_slave_trans) - { + if (retry_slave_trans) { DBUG_PRINT("info", ("Requesting transaction restart")); DBUG_RETURN(1); } @@ -1879,13 +1626,10 @@ st_ndb_slave_state::atConflictPreCommit(bool& retry_slave_trans) DBUG_RETURN(0); } - - /** * Conflict function interpreted programs */ - /** CFT_NDB_OLD @@ -1899,39 +1643,33 @@ st_ndb_slave_state::atConflictPreCommit(bool& retry_slave_trans) As an independent feature, phase 2 also saves the conflicts into the table's exceptions table. */ -static int -row_conflict_fn_old(NDB_CONFLICT_FN_SHARE* cfn_share, - enum_conflicting_op_type, - const NdbRecord* data_record, - const uchar* old_data, - const uchar*, - const MY_BITMAP* bi_cols, - const MY_BITMAP*, - NdbInterpretedCode* code) -{ +static int row_conflict_fn_old(NDB_CONFLICT_FN_SHARE *cfn_share, + enum_conflicting_op_type, + const NdbRecord *data_record, + const uchar *old_data, const uchar *, + const MY_BITMAP *bi_cols, const MY_BITMAP *, + NdbInterpretedCode *code) { DBUG_ENTER("row_conflict_fn_old"); - uint32 resolve_column= cfn_share->m_resolve_column; - uint32 resolve_size= cfn_share->m_resolve_size; - const uchar* field_ptr = (const uchar*) - NdbDictionary::getValuePtr(data_record, - (const char*) old_data, - cfn_share->m_resolve_column); + uint32 resolve_column = cfn_share->m_resolve_column; + uint32 resolve_size = cfn_share->m_resolve_size; + const uchar *field_ptr = (const uchar *)NdbDictionary::getValuePtr( + data_record, (const char *)old_data, cfn_share->m_resolve_column); assert((resolve_size == 4) || (resolve_size == 8)); - if (unlikely(!bitmap_is_set(bi_cols, resolve_column))) - { + if (unlikely(!bitmap_is_set(bi_cols, resolve_column))) { ndb_log_info("NDB Slave: missing data for %s timestamp column %u.", cfn_share->m_conflict_fn->name, resolve_column); DBUG_RETURN(1); } - const uint label_0= 0; - const Uint32 RegOldValue= 1, RegCurrentValue= 2; + const uint label_0 = 0; + const Uint32 RegOldValue = 1, RegCurrentValue = 2; int r; - DBUG_PRINT("info", - ("Adding interpreted filter, existing value must eq event old value")); + DBUG_PRINT( + "info", + ("Adding interpreted filter, existing value must eq event old value")); /* * read old value from record */ @@ -1940,16 +1678,13 @@ row_conflict_fn_old(NDB_CONFLICT_FN_SHARE* cfn_share, uint64 old_value_64; }; { - if (resolve_size == 4) - { + if (resolve_size == 4) { memcpy(&old_value_32, field_ptr, resolve_size); DBUG_PRINT("info", (" old_value_32: %u", old_value_32)); - } - else - { + } else { memcpy(&old_value_64, field_ptr, resolve_size); - DBUG_PRINT("info", (" old_value_64: %llu", - (unsigned long long) old_value_64)); + DBUG_PRINT("info", + (" old_value_64: %llu", (unsigned long long)old_value_64)); } } @@ -1957,62 +1692,54 @@ row_conflict_fn_old(NDB_CONFLICT_FN_SHARE* cfn_share, * Load registers RegOldValue and RegCurrentValue */ if (resolve_size == 4) - r= code->load_const_u32(RegOldValue, old_value_32); + r = code->load_const_u32(RegOldValue, old_value_32); else - r= code->load_const_u64(RegOldValue, old_value_64); + r = code->load_const_u64(RegOldValue, old_value_64); DBUG_ASSERT(r == 0); - r= code->read_attr(RegCurrentValue, resolve_column); + r = code->read_attr(RegCurrentValue, resolve_column); DBUG_ASSERT(r == 0); /* * if RegOldValue == RegCurrentValue goto label_0 * else raise error for this row */ - r= code->branch_eq(RegOldValue, RegCurrentValue, label_0); + r = code->branch_eq(RegOldValue, RegCurrentValue, label_0); DBUG_ASSERT(r == 0); - r= code->interpret_exit_nok(error_conflict_fn_violation); + r = code->interpret_exit_nok(error_conflict_fn_violation); DBUG_ASSERT(r == 0); - r= code->def_label(label_0); + r = code->def_label(label_0); DBUG_ASSERT(r == 0); - r= code->interpret_exit_ok(); + r = code->interpret_exit_ok(); DBUG_ASSERT(r == 0); - r= code->finalise(); + r = code->finalise(); DBUG_ASSERT(r == 0); DBUG_RETURN(r); } -static int -row_conflict_fn_max_update_only(NDB_CONFLICT_FN_SHARE* cfn_share, - enum_conflicting_op_type, - const NdbRecord* data_record, - const uchar*, - const uchar* new_data, - const MY_BITMAP*, - const MY_BITMAP* ai_cols, - NdbInterpretedCode* code) -{ +static int row_conflict_fn_max_update_only( + NDB_CONFLICT_FN_SHARE *cfn_share, enum_conflicting_op_type, + const NdbRecord *data_record, const uchar *, const uchar *new_data, + const MY_BITMAP *, const MY_BITMAP *ai_cols, NdbInterpretedCode *code) { DBUG_ENTER("row_conflict_fn_max_update_only"); - uint32 resolve_column= cfn_share->m_resolve_column; - uint32 resolve_size= cfn_share->m_resolve_size; - const uchar* field_ptr = (const uchar*) - NdbDictionary::getValuePtr(data_record, - (const char*) new_data, - cfn_share->m_resolve_column); + uint32 resolve_column = cfn_share->m_resolve_column; + uint32 resolve_size = cfn_share->m_resolve_size; + const uchar *field_ptr = (const uchar *)NdbDictionary::getValuePtr( + data_record, (const char *)new_data, cfn_share->m_resolve_column); assert((resolve_size == 4) || (resolve_size == 8)); - if (unlikely(!bitmap_is_set(ai_cols, resolve_column))) - { + if (unlikely(!bitmap_is_set(ai_cols, resolve_column))) { ndb_log_info("NDB Slave: missing data for %s timestamp column %u.", cfn_share->m_conflict_fn->name, resolve_column); DBUG_RETURN(1); } - const uint label_0= 0; - const Uint32 RegNewValue= 1, RegCurrentValue= 2; + const uint label_0 = 0; + const Uint32 RegNewValue = 1, RegCurrentValue = 2; int r; - DBUG_PRINT("info", - ("Adding interpreted filter, existing value must be lt event new")); + DBUG_PRINT( + "info", + ("Adding interpreted filter, existing value must be lt event new")); /* * read new value from record */ @@ -2021,41 +1748,38 @@ row_conflict_fn_max_update_only(NDB_CONFLICT_FN_SHARE* cfn_share, uint64 new_value_64; }; { - if (resolve_size == 4) - { + if (resolve_size == 4) { memcpy(&new_value_32, field_ptr, resolve_size); DBUG_PRINT("info", (" new_value_32: %u", new_value_32)); - } - else - { + } else { memcpy(&new_value_64, field_ptr, resolve_size); - DBUG_PRINT("info", (" new_value_64: %llu", - (unsigned long long) new_value_64)); + DBUG_PRINT("info", + (" new_value_64: %llu", (unsigned long long)new_value_64)); } } /* * Load registers RegNewValue and RegCurrentValue */ if (resolve_size == 4) - r= code->load_const_u32(RegNewValue, new_value_32); + r = code->load_const_u32(RegNewValue, new_value_32); else - r= code->load_const_u64(RegNewValue, new_value_64); + r = code->load_const_u64(RegNewValue, new_value_64); DBUG_ASSERT(r == 0); - r= code->read_attr(RegCurrentValue, resolve_column); + r = code->read_attr(RegCurrentValue, resolve_column); DBUG_ASSERT(r == 0); /* * if RegNewValue > RegCurrentValue goto label_0 * else raise error for this row */ - r= code->branch_gt(RegNewValue, RegCurrentValue, label_0); + r = code->branch_gt(RegNewValue, RegCurrentValue, label_0); DBUG_ASSERT(r == 0); - r= code->interpret_exit_nok(error_conflict_fn_violation); + r = code->interpret_exit_nok(error_conflict_fn_violation); DBUG_ASSERT(r == 0); - r= code->def_label(label_0); + r = code->def_label(label_0); DBUG_ASSERT(r == 0); - r= code->interpret_exit_ok(); + r = code->interpret_exit_ok(); DBUG_ASSERT(r == 0); - r= code->finalise(); + r = code->finalise(); DBUG_ASSERT(r == 0); DBUG_RETURN(r); } @@ -2072,50 +1796,34 @@ row_conflict_fn_max_update_only(NDB_CONFLICT_FN_SHARE* cfn_share, Note that for delete, this algorithm reverts to the OLD algorithm. */ -static int -row_conflict_fn_max(NDB_CONFLICT_FN_SHARE* cfn_share, - enum_conflicting_op_type op_type, - const NdbRecord* data_record, - const uchar* old_data, - const uchar* new_data, - const MY_BITMAP* bi_cols, - const MY_BITMAP* ai_cols, - NdbInterpretedCode* code) -{ - switch(op_type) - { - case WRITE_ROW: - abort(); - return 1; - case UPDATE_ROW: - return row_conflict_fn_max_update_only(cfn_share, - op_type, - data_record, - old_data, - new_data, - bi_cols, - ai_cols, - code); - case DELETE_ROW: - /* Can't use max of new image, as there's no new image - * for DELETE - * Use OLD instead - */ - return row_conflict_fn_old(cfn_share, - op_type, - data_record, - old_data, - new_data, - bi_cols, - ai_cols, - code); - default: - abort(); - return 1; +static int row_conflict_fn_max(NDB_CONFLICT_FN_SHARE *cfn_share, + enum_conflicting_op_type op_type, + const NdbRecord *data_record, + const uchar *old_data, const uchar *new_data, + const MY_BITMAP *bi_cols, + const MY_BITMAP *ai_cols, + NdbInterpretedCode *code) { + switch (op_type) { + case WRITE_ROW: + abort(); + return 1; + case UPDATE_ROW: + return row_conflict_fn_max_update_only(cfn_share, op_type, data_record, + old_data, new_data, bi_cols, + ai_cols, code); + case DELETE_ROW: + /* Can't use max of new image, as there's no new image + * for DELETE + * Use OLD instead + */ + return row_conflict_fn_old(cfn_share, op_type, data_record, old_data, + new_data, bi_cols, ai_cols, code); + default: + abort(); + return 1; } } - /** CFT_NDB_MAX_DEL_WIN @@ -2130,108 +1838,89 @@ row_conflict_fn_max(NDB_CONFLICT_FN_SHARE* cfn_share, to them. */ -static int -row_conflict_fn_max_del_win(NDB_CONFLICT_FN_SHARE* cfn_share, - enum_conflicting_op_type op_type, - const NdbRecord* data_record, - const uchar* old_data, - const uchar* new_data, - const MY_BITMAP* bi_cols, - const MY_BITMAP* ai_cols, - NdbInterpretedCode* code) -{ - switch(op_type) - { - case WRITE_ROW: - abort(); - return 1; - case UPDATE_ROW: - return row_conflict_fn_max_update_only(cfn_share, - op_type, - data_record, - old_data, - new_data, - bi_cols, - ai_cols, - code); - case DELETE_ROW: - /* This variant always lets a received DELETE_ROW - * succeed. - */ - return 0; - default: - abort(); - return 1; +static int row_conflict_fn_max_del_win( + NDB_CONFLICT_FN_SHARE *cfn_share, enum_conflicting_op_type op_type, + const NdbRecord *data_record, const uchar *old_data, const uchar *new_data, + const MY_BITMAP *bi_cols, const MY_BITMAP *ai_cols, + NdbInterpretedCode *code) { + switch (op_type) { + case WRITE_ROW: + abort(); + return 1; + case UPDATE_ROW: + return row_conflict_fn_max_update_only(cfn_share, op_type, data_record, + old_data, new_data, bi_cols, + ai_cols, code); + case DELETE_ROW: + /* This variant always lets a received DELETE_ROW + * succeed. + */ + return 0; + default: + abort(); + return 1; } } - /** CFT_NDB_EPOCH */ -static int -row_conflict_fn_epoch(NDB_CONFLICT_FN_SHARE*, - enum_conflicting_op_type op_type, - const NdbRecord*, - const uchar*, - const uchar*, - const MY_BITMAP*, - const MY_BITMAP*, - NdbInterpretedCode* code) -{ +static int row_conflict_fn_epoch(NDB_CONFLICT_FN_SHARE *, + enum_conflicting_op_type op_type, + const NdbRecord *, const uchar *, + const uchar *, const MY_BITMAP *, + const MY_BITMAP *, NdbInterpretedCode *code) { DBUG_ENTER("row_conflict_fn_epoch"); - switch(op_type) - { - case WRITE_ROW: - abort(); - DBUG_RETURN(1); - case UPDATE_ROW: - case DELETE_ROW: - case READ_ROW: /* Read tracking */ - { - const uint label_0= 0; - const Uint32 - RegAuthor= 1, RegZero= 2, - RegMaxRepEpoch= 1, RegRowEpoch= 2; - int r; - - r= code->load_const_u32(RegZero, 0); - assert(r == 0); - r= code->read_attr(RegAuthor, NdbDictionary::Column::ROW_AUTHOR); - assert(r == 0); - /* If last author was not local, assume no conflict */ - r= code->branch_ne(RegZero, RegAuthor, label_0); - assert(r == 0); + switch (op_type) { + case WRITE_ROW: + abort(); + DBUG_RETURN(1); + case UPDATE_ROW: + case DELETE_ROW: + case READ_ROW: /* Read tracking */ + { + const uint label_0 = 0; + const Uint32 RegAuthor = 1, RegZero = 2, RegMaxRepEpoch = 1, + RegRowEpoch = 2; + int r; + + r = code->load_const_u32(RegZero, 0); + assert(r == 0); + r = code->read_attr(RegAuthor, NdbDictionary::Column::ROW_AUTHOR); + assert(r == 0); + /* If last author was not local, assume no conflict */ + r = code->branch_ne(RegZero, RegAuthor, label_0); + assert(r == 0); - /* - * Load registers RegMaxRepEpoch and RegRowEpoch - */ - r= code->load_const_u64(RegMaxRepEpoch, g_ndb_slave_state.max_rep_epoch); - assert(r == 0); - r= code->read_attr(RegRowEpoch, NdbDictionary::Column::ROW_GCI64); - assert(r == 0); + /* + * Load registers RegMaxRepEpoch and RegRowEpoch + */ + r = code->load_const_u64(RegMaxRepEpoch, g_ndb_slave_state.max_rep_epoch); + assert(r == 0); + r = code->read_attr(RegRowEpoch, NdbDictionary::Column::ROW_GCI64); + assert(r == 0); - /* - * if RegRowEpoch <= RegMaxRepEpoch goto label_0 - * else raise error for this row - */ - r= code->branch_le(RegRowEpoch, RegMaxRepEpoch, label_0); - assert(r == 0); - r= code->interpret_exit_nok(error_conflict_fn_violation); - assert(r == 0); - r= code->def_label(label_0); - assert(r == 0); - r= code->interpret_exit_ok(); - assert(r == 0); - r= code->finalise(); - assert(r == 0); - DBUG_RETURN(r); - } - default: - abort(); - DBUG_RETURN(1); + /* + * if RegRowEpoch <= RegMaxRepEpoch goto label_0 + * else raise error for this row + */ + r = code->branch_le(RegRowEpoch, RegMaxRepEpoch, label_0); + assert(r == 0); + r = code->interpret_exit_nok(error_conflict_fn_violation); + assert(r == 0); + r = code->def_label(label_0); + assert(r == 0); + r = code->interpret_exit_ok(); + assert(r == 0); + r = code->finalise(); + assert(r == 0); + DBUG_RETURN(r); + } + default: + abort(); + DBUG_RETURN(1); } } @@ -2239,132 +1928,99 @@ row_conflict_fn_epoch(NDB_CONFLICT_FN_SHARE*, * CFT_NDB_EPOCH2 */ -static int -row_conflict_fn_epoch2_primary(NDB_CONFLICT_FN_SHARE* cfn_share, - enum_conflicting_op_type op_type, - const NdbRecord* data_record, - const uchar* old_data, - const uchar* new_data, - const MY_BITMAP* bi_cols, - const MY_BITMAP* ai_cols, - NdbInterpretedCode* code) -{ +static int row_conflict_fn_epoch2_primary( + NDB_CONFLICT_FN_SHARE *cfn_share, enum_conflicting_op_type op_type, + const NdbRecord *data_record, const uchar *old_data, const uchar *new_data, + const MY_BITMAP *bi_cols, const MY_BITMAP *ai_cols, + NdbInterpretedCode *code) { DBUG_ENTER("row_conflict_fn_epoch2_primary"); - + /* We use the normal NDB$EPOCH detection function */ - DBUG_RETURN(row_conflict_fn_epoch(cfn_share, - op_type, - data_record, - old_data, - new_data, - bi_cols, - ai_cols, - code)); + DBUG_RETURN(row_conflict_fn_epoch(cfn_share, op_type, data_record, old_data, + new_data, bi_cols, ai_cols, code)); } -static int -row_conflict_fn_epoch2_secondary(NDB_CONFLICT_FN_SHARE*, - enum_conflicting_op_type op_type, - const NdbRecord*, - const uchar*, - const uchar*, - const MY_BITMAP*, - const MY_BITMAP*, - NdbInterpretedCode* code) -{ +static int row_conflict_fn_epoch2_secondary(NDB_CONFLICT_FN_SHARE *, + enum_conflicting_op_type op_type, + const NdbRecord *, const uchar *, + const uchar *, const MY_BITMAP *, + const MY_BITMAP *, + NdbInterpretedCode *code) { DBUG_ENTER("row_conflict_fn_epoch2_secondary"); /* Only called for reflected update and delete operations * on the secondary. - * These are returning operations which should only be + * These are returning operations which should only be * applied if the row in the database was last written * remotely (by the Primary) */ - switch(op_type) - { - case WRITE_ROW: - abort(); - DBUG_RETURN(1); - case UPDATE_ROW: - case DELETE_ROW: - { - const uint label_0= 0; - const Uint32 - RegAuthor= 1, RegZero= 2; - int r; - - r= code->load_const_u32(RegZero, 0); - assert(r == 0); - r= code->read_attr(RegAuthor, NdbDictionary::Column::ROW_AUTHOR); - assert(r == 0); - r= code->branch_eq(RegZero, RegAuthor, label_0); - assert(r == 0); - /* Last author was not local, no conflict, apply */ - r= code->interpret_exit_ok(); - assert(r == 0); - r= code->def_label(label_0); - assert(r == 0); - /* Last author was secondary-local, conflict, do not apply */ - r= code->interpret_exit_nok(error_conflict_fn_violation); - assert(r == 0); - - - r= code->finalise(); - assert(r == 0); - DBUG_RETURN(r); - } - default: - abort(); - DBUG_RETURN(1); + switch (op_type) { + case WRITE_ROW: + abort(); + DBUG_RETURN(1); + case UPDATE_ROW: + case DELETE_ROW: { + const uint label_0 = 0; + const Uint32 RegAuthor = 1, RegZero = 2; + int r; + + r = code->load_const_u32(RegZero, 0); + assert(r == 0); + r = code->read_attr(RegAuthor, NdbDictionary::Column::ROW_AUTHOR); + assert(r == 0); + r = code->branch_eq(RegZero, RegAuthor, label_0); + assert(r == 0); + /* Last author was not local, no conflict, apply */ + r = code->interpret_exit_ok(); + assert(r == 0); + r = code->def_label(label_0); + assert(r == 0); + /* Last author was secondary-local, conflict, do not apply */ + r = code->interpret_exit_nok(error_conflict_fn_violation); + assert(r == 0); + + r = code->finalise(); + assert(r == 0); + DBUG_RETURN(r); + } + default: + abort(); + DBUG_RETURN(1); } } -static int -row_conflict_fn_epoch2(NDB_CONFLICT_FN_SHARE* cfn_share, - enum_conflicting_op_type op_type, - const NdbRecord* data_record, - const uchar* old_data, - const uchar* new_data, - const MY_BITMAP* bi_cols, - const MY_BITMAP* ai_cols, - NdbInterpretedCode* code) -{ +static int row_conflict_fn_epoch2(NDB_CONFLICT_FN_SHARE *cfn_share, + enum_conflicting_op_type op_type, + const NdbRecord *data_record, + const uchar *old_data, const uchar *new_data, + const MY_BITMAP *bi_cols, + const MY_BITMAP *ai_cols, + NdbInterpretedCode *code) { DBUG_ENTER("row_conflict_fn_epoch2"); - + /** * NdbEpoch2 behaviour depends on the Slave conflict role variable * */ - switch(opt_ndb_slave_conflict_role) - { - case SCR_NONE: - /* This is a problem */ - DBUG_RETURN(1); - case SCR_PRIMARY: - DBUG_RETURN(row_conflict_fn_epoch2_primary(cfn_share, - op_type, - data_record, - old_data, - new_data, - bi_cols, - ai_cols, - code)); - case SCR_SECONDARY: - DBUG_RETURN(row_conflict_fn_epoch2_secondary(cfn_share, - op_type, - data_record, - old_data, - new_data, - bi_cols, - ai_cols, - code)); - case SCR_PASS: - /* Do nothing */ - DBUG_RETURN(0); - - default: - break; + switch (opt_ndb_slave_conflict_role) { + case SCR_NONE: + /* This is a problem */ + DBUG_RETURN(1); + case SCR_PRIMARY: + DBUG_RETURN(row_conflict_fn_epoch2_primary( + cfn_share, op_type, data_record, old_data, new_data, bi_cols, ai_cols, + code)); + case SCR_SECONDARY: + DBUG_RETURN(row_conflict_fn_epoch2_secondary( + cfn_share, op_type, data_record, old_data, new_data, bi_cols, ai_cols, + code)); + case SCR_PASS: + /* Do nothing */ + DBUG_RETURN(0); + + default: + break; } abort(); @@ -2375,196 +2031,162 @@ row_conflict_fn_epoch2(NDB_CONFLICT_FN_SHARE* cfn_share, /** * Conflict function setup infrastructure */ - -static const st_conflict_fn_arg_def resolve_col_args[]= -{ - /* Arg type Optional */ - { CFAT_COLUMN_NAME, false }, - { CFAT_END, false } -}; - -static const st_conflict_fn_arg_def epoch_fn_args[]= -{ - /* Arg type Optional */ - { CFAT_EXTRA_GCI_BITS, true }, - { CFAT_END, false } -}; - -static const st_conflict_fn_def conflict_fns[]= -{ - { "NDB$MAX_DELETE_WIN", CFT_NDB_MAX_DEL_WIN, - &resolve_col_args[0], row_conflict_fn_max_del_win, 0 }, - { "NDB$MAX", CFT_NDB_MAX, - &resolve_col_args[0], row_conflict_fn_max, 0 }, - { "NDB$OLD", CFT_NDB_OLD, - &resolve_col_args[0], row_conflict_fn_old, 0 }, - { "NDB$EPOCH2_TRANS", CFT_NDB_EPOCH2_TRANS, - &epoch_fn_args[0], row_conflict_fn_epoch2, - CF_REFLECT_SEC_OPS | CF_USE_ROLE_VAR | - CF_TRANSACTIONAL | CF_DEL_DEL_CFT }, - { "NDB$EPOCH2", CFT_NDB_EPOCH2, - &epoch_fn_args[0], row_conflict_fn_epoch2, - CF_REFLECT_SEC_OPS | CF_USE_ROLE_VAR - }, - { "NDB$EPOCH_TRANS", CFT_NDB_EPOCH_TRANS, - &epoch_fn_args[0], row_conflict_fn_epoch, CF_TRANSACTIONAL}, - { "NDB$EPOCH", CFT_NDB_EPOCH, - &epoch_fn_args[0], row_conflict_fn_epoch, 0 } -}; - -static unsigned n_conflict_fns= - sizeof(conflict_fns) / sizeof(struct st_conflict_fn_def); - - -int -parse_conflict_fn_spec(const char* conflict_fn_spec, - const st_conflict_fn_def** conflict_fn, - st_conflict_fn_arg* args, - Uint32* max_args, - char *msg, uint msg_len) -{ + +static const st_conflict_fn_arg_def resolve_col_args[] = { + /* Arg type Optional */ + {CFAT_COLUMN_NAME, false}, + {CFAT_END, false}}; + +static const st_conflict_fn_arg_def epoch_fn_args[] = { + /* Arg type Optional */ + {CFAT_EXTRA_GCI_BITS, true}, + {CFAT_END, false}}; + +static const st_conflict_fn_def conflict_fns[] = { + {"NDB$MAX_DELETE_WIN", CFT_NDB_MAX_DEL_WIN, &resolve_col_args[0], + row_conflict_fn_max_del_win, 0}, + {"NDB$MAX", CFT_NDB_MAX, &resolve_col_args[0], row_conflict_fn_max, 0}, + {"NDB$OLD", CFT_NDB_OLD, &resolve_col_args[0], row_conflict_fn_old, 0}, + {"NDB$EPOCH2_TRANS", CFT_NDB_EPOCH2_TRANS, &epoch_fn_args[0], + row_conflict_fn_epoch2, + CF_REFLECT_SEC_OPS | CF_USE_ROLE_VAR | CF_TRANSACTIONAL | CF_DEL_DEL_CFT}, + {"NDB$EPOCH2", CFT_NDB_EPOCH2, &epoch_fn_args[0], row_conflict_fn_epoch2, + CF_REFLECT_SEC_OPS | CF_USE_ROLE_VAR}, + {"NDB$EPOCH_TRANS", CFT_NDB_EPOCH_TRANS, &epoch_fn_args[0], + row_conflict_fn_epoch, CF_TRANSACTIONAL}, + {"NDB$EPOCH", CFT_NDB_EPOCH, &epoch_fn_args[0], row_conflict_fn_epoch, 0}}; + +static unsigned n_conflict_fns = + sizeof(conflict_fns) / sizeof(struct st_conflict_fn_def); + +int parse_conflict_fn_spec(const char *conflict_fn_spec, + const st_conflict_fn_def **conflict_fn, + st_conflict_fn_arg *args, Uint32 *max_args, + char *msg, uint msg_len) { DBUG_ENTER("parse_conflict_fn_spec"); Uint32 no_args = 0; - const char *ptr= conflict_fn_spec; - const char *error_str= "unknown conflict resolution function"; + const char *ptr = conflict_fn_spec; + const char *error_str = "unknown conflict resolution function"; /* remove whitespace */ while (*ptr == ' ' && *ptr != '\0') ptr++; DBUG_PRINT("info", ("parsing %s", conflict_fn_spec)); - for (unsigned i= 0; i < n_conflict_fns; i++) - { - const st_conflict_fn_def &fn= conflict_fns[i]; + for (unsigned i = 0; i < n_conflict_fns; i++) { + const st_conflict_fn_def &fn = conflict_fns[i]; - uint len= (uint)strlen(fn.name); - if (strncmp(ptr, fn.name, len)) - continue; + uint len = (uint)strlen(fn.name); + if (strncmp(ptr, fn.name, len)) continue; DBUG_PRINT("info", ("found function %s", fn.name)); /* skip function name */ - ptr+= len; + ptr += len; /* remove whitespace */ while (*ptr == ' ' && *ptr != '\0') ptr++; /* next '(' */ - if (*ptr != '(') - { - error_str= "missing '('"; + if (*ptr != '(') { + error_str = "missing '('"; DBUG_PRINT("info", ("parse error %s", error_str)); break; } ptr++; /* find all arguments */ - for (;;) - { - if (no_args >= *max_args) - { - error_str= "too many arguments"; + for (;;) { + if (no_args >= *max_args) { + error_str = "too many arguments"; DBUG_PRINT("info", ("parse error %s", error_str)); break; } /* expected type */ - enum enum_conflict_fn_arg_type type= - conflict_fns[i].arg_defs[no_args].arg_type; + enum enum_conflict_fn_arg_type type = + conflict_fns[i].arg_defs[no_args].arg_type; /* remove whitespace */ while (*ptr == ' ' && *ptr != '\0') ptr++; - if (type == CFAT_END) - { - args[no_args].type= type; - error_str= NULL; + if (type == CFAT_END) { + args[no_args].type = type; + error_str = NULL; break; } /* arg */ /* Todo : Should support comma as an arg separator? */ - const char *start_arg= ptr; + const char *start_arg = ptr; while (*ptr != ')' && *ptr != ' ' && *ptr != '\0') ptr++; - const char *end_arg= ptr; + const char *end_arg = ptr; bool optional_arg = conflict_fns[i].arg_defs[no_args].optional; /* any arg given? */ - if (start_arg == end_arg) - { - if (!optional_arg) - { - error_str= "missing function argument"; + if (start_arg == end_arg) { + if (!optional_arg) { + error_str = "missing function argument"; DBUG_PRINT("info", ("parse error %s", error_str)); break; - } - else - { + } else { /* Arg was optional, and not present * Must be at end of args, finish parsing */ - args[no_args].type= CFAT_END; - error_str= NULL; + args[no_args].type = CFAT_END; + error_str = NULL; break; } } - uint len= (uint)(end_arg - start_arg); - args[no_args].type= type; - + uint len = (uint)(end_arg - start_arg); + args[no_args].type = type; + DBUG_PRINT("info", ("found argument %s %u", start_arg, len)); bool arg_processing_error = false; - switch (type) - { - case CFAT_COLUMN_NAME: - { - /* Copy column name out into argument's buffer */ - char* dest= &args[no_args].resolveColNameBuff[0]; - - memcpy(dest, start_arg, (len < (uint) NAME_CHAR_LEN ? - len : - NAME_CHAR_LEN)); - dest[len]= '\0'; - break; - } - case CFAT_EXTRA_GCI_BITS: - { - /* Map string to number and check it's in range etc */ - char* end_of_arg = const_cast(end_arg); - Uint32 bits = strtoul(start_arg, &end_of_arg, 0); - DBUG_PRINT("info", ("Using %u as the number of extra bits", bits)); - - if (bits > 31) - { - arg_processing_error= true; - error_str= "Too many extra Gci bits"; - DBUG_PRINT("info", ("%s", error_str)); + switch (type) { + case CFAT_COLUMN_NAME: { + /* Copy column name out into argument's buffer */ + char *dest = &args[no_args].resolveColNameBuff[0]; + + memcpy(dest, start_arg, + (len < (uint)NAME_CHAR_LEN ? len : NAME_CHAR_LEN)); + dest[len] = '\0'; break; } - /* Num bits seems ok */ - args[no_args].extraGciBits = bits; - break; - } - case CFAT_END: - abort(); + case CFAT_EXTRA_GCI_BITS: { + /* Map string to number and check it's in range etc */ + char *end_of_arg = const_cast(end_arg); + Uint32 bits = strtoul(start_arg, &end_of_arg, 0); + DBUG_PRINT("info", ("Using %u as the number of extra bits", bits)); + + if (bits > 31) { + arg_processing_error = true; + error_str = "Too many extra Gci bits"; + DBUG_PRINT("info", ("%s", error_str)); + break; + } + /* Num bits seems ok */ + args[no_args].extraGciBits = bits; + break; + } + case CFAT_END: + abort(); } - if (arg_processing_error) - break; + if (arg_processing_error) break; no_args++; } - if (error_str) - break; + if (error_str) break; /* remove whitespace */ while (*ptr == ' ' && *ptr != '\0') ptr++; /* next ')' */ - if (*ptr != ')') - { - error_str= "missing ')'"; + if (*ptr != ')') { + error_str = "missing ')'"; break; } ptr++; @@ -2573,9 +2195,8 @@ parse_conflict_fn_spec(const char* conflict_fn_spec, while (*ptr == ' ' && *ptr != '\0') ptr++; /* garbage in the end? */ - if (*ptr != '\0') - { - error_str= "garbage in the end"; + if (*ptr != '\0') { + error_str = "garbage in the end"; break; } @@ -2586,111 +2207,84 @@ parse_conflict_fn_spec(const char* conflict_fn_spec, DBUG_RETURN(0); } /* parse error */ - snprintf(msg, msg_len, "%s, %s at '%s'", - conflict_fn_spec, error_str, ptr); + snprintf(msg, msg_len, "%s, %s at '%s'", conflict_fn_spec, error_str, ptr); DBUG_PRINT("info", ("%s", msg)); DBUG_RETURN(-1); } -static uint -slave_check_resolve_col_type(const NDBTAB *ndbtab, - uint field_index) -{ +static uint slave_check_resolve_col_type(const NDBTAB *ndbtab, + uint field_index) { DBUG_ENTER("slave_check_resolve_col_type"); - const NDBCOL *c= ndbtab->getColumn(field_index); - uint sz= 0; - switch (c->getType()) - { - case NDBCOL::Unsigned: - sz= sizeof(Uint32); - DBUG_PRINT("info", ("resolve column Uint32 %u", - field_index)); - break; - case NDBCOL::Bigunsigned: - sz= sizeof(Uint64); - DBUG_PRINT("info", ("resolve column Uint64 %u", - field_index)); - break; - default: - DBUG_PRINT("info", ("resolve column %u has wrong type", - field_index)); - break; + const NDBCOL *c = ndbtab->getColumn(field_index); + uint sz = 0; + switch (c->getType()) { + case NDBCOL::Unsigned: + sz = sizeof(Uint32); + DBUG_PRINT("info", ("resolve column Uint32 %u", field_index)); + break; + case NDBCOL::Bigunsigned: + sz = sizeof(Uint64); + DBUG_PRINT("info", ("resolve column Uint64 %u", field_index)); + break; + default: + DBUG_PRINT("info", ("resolve column %u has wrong type", field_index)); + break; } DBUG_RETURN(sz); } -static int -slave_set_resolve_fn(Ndb* ndb, - NDB_CONFLICT_FN_SHARE** ppcfn_share, - const char* dbName, - const char* tabName, - const NDBTAB *ndbtab, uint field_index, - uint resolve_col_sz, - const st_conflict_fn_def* conflict_fn, - uint8 flags) -{ +static int slave_set_resolve_fn(Ndb *ndb, NDB_CONFLICT_FN_SHARE **ppcfn_share, + const char *dbName, const char *tabName, + const NDBTAB *ndbtab, uint field_index, + uint resolve_col_sz, + const st_conflict_fn_def *conflict_fn, + uint8 flags) { DBUG_ENTER("slave_set_resolve_fn"); - NdbDictionary::Dictionary *dict= ndb->getDictionary(); - NDB_CONFLICT_FN_SHARE *cfn_share= *ppcfn_share; - const char *ex_suffix= NDB_EXCEPTIONS_TABLE_SUFFIX; - if (cfn_share == NULL) - { - *ppcfn_share= cfn_share= - (NDB_CONFLICT_FN_SHARE*) my_malloc(PSI_INSTRUMENT_ME, - sizeof(NDB_CONFLICT_FN_SHARE), - MYF(MY_WME | ME_FATALERROR)); + NdbDictionary::Dictionary *dict = ndb->getDictionary(); + NDB_CONFLICT_FN_SHARE *cfn_share = *ppcfn_share; + const char *ex_suffix = NDB_EXCEPTIONS_TABLE_SUFFIX; + if (cfn_share == NULL) { + *ppcfn_share = cfn_share = (NDB_CONFLICT_FN_SHARE *)my_malloc( + PSI_INSTRUMENT_ME, sizeof(NDB_CONFLICT_FN_SHARE), + MYF(MY_WME | ME_FATALERROR)); slave_reset_conflict_fn(cfn_share); } - cfn_share->m_conflict_fn= conflict_fn; + cfn_share->m_conflict_fn = conflict_fn; /* Calculate resolve col stuff (if relevant) */ - cfn_share->m_resolve_size= resolve_col_sz; - cfn_share->m_resolve_column= field_index; + cfn_share->m_resolve_size = resolve_col_sz; + cfn_share->m_resolve_column = field_index; cfn_share->m_flags = flags; /* Init Exceptions Table Writer */ new (&cfn_share->m_ex_tab_writer) ExceptionsTableWriter(); /* Check for '$EX' or '$ex' suffix in table name */ - for (int tries= 2; - tries-- > 0; - ex_suffix= - (tries == 1) - ? (const char *)NDB_EXCEPTIONS_TABLE_SUFFIX_LOWER - : NullS) - { + for (int tries = 2; tries-- > 0; + ex_suffix = (tries == 1) + ? (const char *)NDB_EXCEPTIONS_TABLE_SUFFIX_LOWER + : NullS) { /* get exceptions table */ char ex_tab_name[FN_REFLEN]; - strxnmov(ex_tab_name, sizeof(ex_tab_name), tabName, - ex_suffix, NullS); + strxnmov(ex_tab_name, sizeof(ex_tab_name), tabName, ex_suffix, NullS); ndb->setDatabaseName(dbName); Ndb_table_guard ndbtab_g(dict, ex_tab_name); - const NDBTAB *ex_tab= ndbtab_g.get_table(); - if (ex_tab) - { - char msgBuf[ FN_REFLEN ]; - const char* msg = NULL; - if (cfn_share->m_ex_tab_writer.init(ndbtab, - ex_tab, - msgBuf, - sizeof(msgBuf), - &msg) == 0) - { + const NDBTAB *ex_tab = ndbtab_g.get_table(); + if (ex_tab) { + char msgBuf[FN_REFLEN]; + const char *msg = NULL; + if (cfn_share->m_ex_tab_writer.init(ndbtab, ex_tab, msgBuf, + sizeof(msgBuf), &msg) == 0) { /* Ok */ /* Hold our table reference outside the table_guard scope */ ndbtab_g.release(); /* Table looked suspicious, warn user */ - if (msg) - ndb_log_warning("NDB Slave: %s", msg); + if (msg) ndb_log_warning("NDB Slave: %s", msg); - ndb_log_verbose(1, - "NDB Slave: Table %s.%s logging exceptions to %s.%s", - dbName, tabName, - dbName, ex_tab_name); - } - else - { + ndb_log_verbose(1, "NDB Slave: Table %s.%s logging exceptions to %s.%s", + dbName, tabName, dbName, ex_tab_name); + } else { ndb_log_warning("NDB Slave: %s", msg); } break; @@ -2699,213 +2293,158 @@ slave_set_resolve_fn(Ndb* ndb, DBUG_RETURN(0); } - -bool -is_exceptions_table(const char *table_name) -{ +bool is_exceptions_table(const char *table_name) { size_t len = strlen(table_name); size_t suffixlen = strlen(NDB_EXCEPTIONS_TABLE_SUFFIX); - if(len > suffixlen && - (strcmp(table_name + len - suffixlen, - lower_case_table_names ? NDB_EXCEPTIONS_TABLE_SUFFIX_LOWER : - NDB_EXCEPTIONS_TABLE_SUFFIX) == 0)) - { - return true; + if (len > suffixlen && + (strcmp(table_name + len - suffixlen, + lower_case_table_names ? NDB_EXCEPTIONS_TABLE_SUFFIX_LOWER + : NDB_EXCEPTIONS_TABLE_SUFFIX) == 0)) { + return true; } return false; } -int -setup_conflict_fn(Ndb* ndb, - NDB_CONFLICT_FN_SHARE** ppcfn_share, - const char* dbName, - const char* tabName, - bool tableBinlogUseUpdate, - const NdbDictionary::Table* ndbtab, - char *msg, uint msg_len, - const st_conflict_fn_def* conflict_fn, - const st_conflict_fn_arg* args, - const Uint32 num_args) -{ +int setup_conflict_fn(Ndb *ndb, NDB_CONFLICT_FN_SHARE **ppcfn_share, + const char *dbName, const char *tabName, + bool tableBinlogUseUpdate, + const NdbDictionary::Table *ndbtab, char *msg, + uint msg_len, const st_conflict_fn_def *conflict_fn, + const st_conflict_fn_arg *args, const Uint32 num_args) { DBUG_ENTER("setup_conflict_fn"); - if(is_exceptions_table(tabName)) - { - snprintf(msg, msg_len, - "Table %s.%s is exceptions table: not using conflict function %s", - dbName, - tabName, - conflict_fn->name); + if (is_exceptions_table(tabName)) { + snprintf(msg, msg_len, + "Table %s.%s is exceptions table: not using conflict function %s", + dbName, tabName, conflict_fn->name); DBUG_PRINT("info", ("%s", msg)); DBUG_RETURN(0); - } - + } + /* setup the function */ - switch (conflict_fn->type) - { - case CFT_NDB_MAX: - case CFT_NDB_OLD: - case CFT_NDB_MAX_DEL_WIN: - { - if (num_args != 1) - { - snprintf(msg, msg_len, - "Incorrect arguments to conflict function"); - DBUG_PRINT("info", ("%s", msg)); - DBUG_RETURN(-1); - } + switch (conflict_fn->type) { + case CFT_NDB_MAX: + case CFT_NDB_OLD: + case CFT_NDB_MAX_DEL_WIN: { + if (num_args != 1) { + snprintf(msg, msg_len, "Incorrect arguments to conflict function"); + DBUG_PRINT("info", ("%s", msg)); + DBUG_RETURN(-1); + } - /* Now try to find the column in the table */ - int colNum = -1; - const char* resolveColName = args[0].resolveColNameBuff; - int resolveColNameLen = (int)strlen(resolveColName); + /* Now try to find the column in the table */ + int colNum = -1; + const char *resolveColName = args[0].resolveColNameBuff; + int resolveColNameLen = (int)strlen(resolveColName); - for (int j=0; j< ndbtab->getNoOfColumns(); j++) - { - const char* colName = ndbtab->getColumn(j)->getName(); - - if (strncmp(colName, - resolveColName, - resolveColNameLen) == 0 && - colName[resolveColNameLen] == '\0') - { - colNum = j; - break; + for (int j = 0; j < ndbtab->getNoOfColumns(); j++) { + const char *colName = ndbtab->getColumn(j)->getName(); + + if (strncmp(colName, resolveColName, resolveColNameLen) == 0 && + colName[resolveColNameLen] == '\0') { + colNum = j; + break; + } + } + if (colNum == -1) { + snprintf(msg, msg_len, "Could not find resolve column %s.", + resolveColName); + DBUG_PRINT("info", ("%s", msg)); + DBUG_RETURN(-1); } - } - if (colNum == -1) - { - snprintf(msg, msg_len, - "Could not find resolve column %s.", - resolveColName); - DBUG_PRINT("info", ("%s", msg)); - DBUG_RETURN(-1); - } - const uint resolve_col_sz= slave_check_resolve_col_type(ndbtab, colNum); - if (resolve_col_sz == 0) - { - /* wrong data type */ - slave_reset_conflict_fn(*ppcfn_share); - snprintf(msg, msg_len, - "Column '%s' has wrong datatype", - resolveColName); - DBUG_PRINT("info", ("%s", msg)); - DBUG_RETURN(-1); - } + const uint resolve_col_sz = slave_check_resolve_col_type(ndbtab, colNum); + if (resolve_col_sz == 0) { + /* wrong data type */ + slave_reset_conflict_fn(*ppcfn_share); + snprintf(msg, msg_len, "Column '%s' has wrong datatype", + resolveColName); + DBUG_PRINT("info", ("%s", msg)); + DBUG_RETURN(-1); + } - if (slave_set_resolve_fn(ndb, - ppcfn_share, - dbName, - tabName, - ndbtab, - colNum, resolve_col_sz, - conflict_fn, CFF_NONE)) - { - snprintf(msg, msg_len, - "Unable to setup conflict resolution using column '%s'", - resolveColName); - DBUG_PRINT("info", ("%s", msg)); - DBUG_RETURN(-1); - } + if (slave_set_resolve_fn(ndb, ppcfn_share, dbName, tabName, ndbtab, + colNum, resolve_col_sz, conflict_fn, CFF_NONE)) { + snprintf(msg, msg_len, + "Unable to setup conflict resolution using column '%s'", + resolveColName); + DBUG_PRINT("info", ("%s", msg)); + DBUG_RETURN(-1); + } - /* Success, update message */ - snprintf(msg, msg_len, - "Table %s.%s using conflict_fn %s on attribute %s.", - dbName, - tabName, - conflict_fn->name, - resolveColName); - break; - } - case CFT_NDB_EPOCH2: - case CFT_NDB_EPOCH2_TRANS: - { - /* Check how updates will be logged... */ - const bool log_update_as_write = (!tableBinlogUseUpdate); - if (log_update_as_write) { + /* Success, update message */ snprintf(msg, msg_len, - "Table %s.%s configured to log updates as writes. " - "Not suitable for %s.", - dbName, tabName, conflict_fn->name); - DBUG_PRINT("info", ("%s", msg)); - DBUG_RETURN(-1); + "Table %s.%s using conflict_fn %s on attribute %s.", dbName, + tabName, conflict_fn->name, resolveColName); + break; } - } - /* Fall through - for the rest of the EPOCH* processing... */ - case CFT_NDB_EPOCH: - case CFT_NDB_EPOCH_TRANS: - { - if (num_args > 1) - { - snprintf(msg, msg_len, - "Too many arguments to conflict function"); - DBUG_PRINT("info", ("%s", msg)); - DBUG_RETURN(-1); + case CFT_NDB_EPOCH2: + case CFT_NDB_EPOCH2_TRANS: { + /* Check how updates will be logged... */ + const bool log_update_as_write = (!tableBinlogUseUpdate); + if (log_update_as_write) { + snprintf(msg, msg_len, + "Table %s.%s configured to log updates as writes. " + "Not suitable for %s.", + dbName, tabName, conflict_fn->name); + DBUG_PRINT("info", ("%s", msg)); + DBUG_RETURN(-1); + } } + /* Fall through - for the rest of the EPOCH* processing... */ + case CFT_NDB_EPOCH: + case CFT_NDB_EPOCH_TRANS: { + if (num_args > 1) { + snprintf(msg, msg_len, "Too many arguments to conflict function"); + DBUG_PRINT("info", ("%s", msg)); + DBUG_RETURN(-1); + } - /* Check that table doesn't have Blobs as we don't support that */ - if (ndb_table_has_blobs(ndbtab)) - { - snprintf(msg, msg_len, "Table has Blob column(s), not suitable for %s.", - conflict_fn->name); - DBUG_PRINT("info", ("%s", msg)); - DBUG_RETURN(-1); - } + /* Check that table doesn't have Blobs as we don't support that */ + if (ndb_table_has_blobs(ndbtab)) { + snprintf(msg, msg_len, "Table has Blob column(s), not suitable for %s.", + conflict_fn->name); + DBUG_PRINT("info", ("%s", msg)); + DBUG_RETURN(-1); + } - /* Check that table has required extra meta-columns */ - /* Todo : Could warn if extra gcibits is insufficient to - * represent SavePeriod/EpochPeriod - */ - if (ndbtab->getExtraRowGciBits() == 0) - ndb_log_info("NDB Slave: Table %s.%s : %s, low epoch resolution", - dbName, tabName, conflict_fn->name); + /* Check that table has required extra meta-columns */ + /* Todo : Could warn if extra gcibits is insufficient to + * represent SavePeriod/EpochPeriod + */ + if (ndbtab->getExtraRowGciBits() == 0) + ndb_log_info("NDB Slave: Table %s.%s : %s, low epoch resolution", + dbName, tabName, conflict_fn->name); + + if (ndbtab->getExtraRowAuthorBits() == 0) { + snprintf(msg, msg_len, "No extra row author bits in table."); + DBUG_PRINT("info", ("%s", msg)); + DBUG_RETURN(-1); + } - if (ndbtab->getExtraRowAuthorBits() == 0) - { - snprintf(msg, msg_len, "No extra row author bits in table."); - DBUG_PRINT("info", ("%s", msg)); - DBUG_RETURN(-1); - } + if (slave_set_resolve_fn(ndb, ppcfn_share, dbName, tabName, ndbtab, + 0, // field_no + 0, // resolve_col_sz + conflict_fn, CFF_REFRESH_ROWS)) { + snprintf(msg, msg_len, "unable to setup conflict resolution"); + DBUG_PRINT("info", ("%s", msg)); + DBUG_RETURN(-1); + } + /* Success, update message */ + snprintf(msg, msg_len, "Table %s.%s using conflict_fn %s.", dbName, + tabName, conflict_fn->name); - if (slave_set_resolve_fn(ndb, - ppcfn_share, - dbName, - tabName, - ndbtab, - 0, // field_no - 0, // resolve_col_sz - conflict_fn, CFF_REFRESH_ROWS)) - { - snprintf(msg, msg_len, - "unable to setup conflict resolution"); - DBUG_PRINT("info", ("%s", msg)); - DBUG_RETURN(-1); + break; } - /* Success, update message */ - snprintf(msg, msg_len, - "Table %s.%s using conflict_fn %s.", - dbName, - tabName, - conflict_fn->name); - - break; - } - case CFT_NUMBER_OF_CFTS: - case CFT_NDB_UNDEF: - abort(); + case CFT_NUMBER_OF_CFTS: + case CFT_NDB_UNDEF: + abort(); } DBUG_RETURN(0); } - -void -teardown_conflict_fn(Ndb* ndb, NDB_CONFLICT_FN_SHARE* cfn_share) -{ - if (cfn_share && - cfn_share->m_ex_tab_writer.hasTable() && - ndb) - { +void teardown_conflict_fn(Ndb *ndb, NDB_CONFLICT_FN_SHARE *cfn_share) { + if (cfn_share && cfn_share->m_ex_tab_writer.hasTable() && ndb) { cfn_share->m_ex_tab_writer.mem_free(ndb); } @@ -2914,52 +2453,73 @@ teardown_conflict_fn(Ndb* ndb, NDB_CONFLICT_FN_SHARE* cfn_share) my_free(cfn_share); } - -void slave_reset_conflict_fn(NDB_CONFLICT_FN_SHARE *cfn_share) -{ - if (cfn_share) - { - cfn_share->m_conflict_fn= nullptr; - cfn_share->m_resolve_size= 0; - cfn_share->m_resolve_column= 0; +void slave_reset_conflict_fn(NDB_CONFLICT_FN_SHARE *cfn_share) { + if (cfn_share) { + cfn_share->m_conflict_fn = nullptr; + cfn_share->m_resolve_size = 0; + cfn_share->m_resolve_column = 0; cfn_share->m_flags = 0; } } - /** * Variables related to conflict handling * All prefixed 'ndb_conflict' */ -SHOW_VAR ndb_status_conflict_variables[]= { - {"fn_max", (char*) &g_ndb_slave_state.total_violation_count[CFT_NDB_MAX], SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"fn_old", (char*) &g_ndb_slave_state.total_violation_count[CFT_NDB_OLD], SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"fn_max_del_win", (char*) &g_ndb_slave_state.total_violation_count[CFT_NDB_MAX_DEL_WIN], SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"fn_epoch", (char*) &g_ndb_slave_state.total_violation_count[CFT_NDB_EPOCH], SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"fn_epoch_trans", (char*) &g_ndb_slave_state.total_violation_count[CFT_NDB_EPOCH_TRANS], SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"fn_epoch2", (char*) &g_ndb_slave_state.total_violation_count[CFT_NDB_EPOCH2], SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"fn_epoch2_trans", (char*) &g_ndb_slave_state.total_violation_count[CFT_NDB_EPOCH2_TRANS], SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"trans_row_conflict_count", (char*) &g_ndb_slave_state.trans_row_conflict_count, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"trans_row_reject_count", (char*) &g_ndb_slave_state.trans_row_reject_count, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"trans_reject_count", (char*) &g_ndb_slave_state.trans_in_conflict_count, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"trans_detect_iter_count", (char*) &g_ndb_slave_state.trans_detect_iter_count, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"trans_conflict_commit_count", - (char*) &g_ndb_slave_state.trans_conflict_commit_count, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"epoch_delete_delete_count", (char*) &g_ndb_slave_state.total_delete_delete_count, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"reflected_op_prepare_count", (char*) &g_ndb_slave_state.total_reflect_op_prepare_count, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"reflected_op_discard_count", (char*) &g_ndb_slave_state.total_reflect_op_discard_count, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"refresh_op_count", (char*) &g_ndb_slave_state.total_refresh_op_count, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"last_conflict_epoch", (char*) &g_ndb_slave_state.last_conflicted_epoch, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {"last_stable_epoch", (char*) &g_ndb_slave_state.last_stable_epoch, SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, - {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL} -}; - -int -show_ndb_status_conflict(THD*, SHOW_VAR* var, char*) -{ +SHOW_VAR ndb_status_conflict_variables[] = { + {"fn_max", (char *)&g_ndb_slave_state.total_violation_count[CFT_NDB_MAX], + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"fn_old", (char *)&g_ndb_slave_state.total_violation_count[CFT_NDB_OLD], + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"fn_max_del_win", + (char *)&g_ndb_slave_state.total_violation_count[CFT_NDB_MAX_DEL_WIN], + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"fn_epoch", + (char *)&g_ndb_slave_state.total_violation_count[CFT_NDB_EPOCH], + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"fn_epoch_trans", + (char *)&g_ndb_slave_state.total_violation_count[CFT_NDB_EPOCH_TRANS], + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"fn_epoch2", + (char *)&g_ndb_slave_state.total_violation_count[CFT_NDB_EPOCH2], + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"fn_epoch2_trans", + (char *)&g_ndb_slave_state.total_violation_count[CFT_NDB_EPOCH2_TRANS], + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"trans_row_conflict_count", + (char *)&g_ndb_slave_state.trans_row_conflict_count, SHOW_LONGLONG, + SHOW_SCOPE_GLOBAL}, + {"trans_row_reject_count", + (char *)&g_ndb_slave_state.trans_row_reject_count, SHOW_LONGLONG, + SHOW_SCOPE_GLOBAL}, + {"trans_reject_count", (char *)&g_ndb_slave_state.trans_in_conflict_count, + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"trans_detect_iter_count", + (char *)&g_ndb_slave_state.trans_detect_iter_count, SHOW_LONGLONG, + SHOW_SCOPE_GLOBAL}, + {"trans_conflict_commit_count", + (char *)&g_ndb_slave_state.trans_conflict_commit_count, SHOW_LONGLONG, + SHOW_SCOPE_GLOBAL}, + {"epoch_delete_delete_count", + (char *)&g_ndb_slave_state.total_delete_delete_count, SHOW_LONGLONG, + SHOW_SCOPE_GLOBAL}, + {"reflected_op_prepare_count", + (char *)&g_ndb_slave_state.total_reflect_op_prepare_count, SHOW_LONGLONG, + SHOW_SCOPE_GLOBAL}, + {"reflected_op_discard_count", + (char *)&g_ndb_slave_state.total_reflect_op_discard_count, SHOW_LONGLONG, + SHOW_SCOPE_GLOBAL}, + {"refresh_op_count", (char *)&g_ndb_slave_state.total_refresh_op_count, + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"last_conflict_epoch", (char *)&g_ndb_slave_state.last_conflicted_epoch, + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {"last_stable_epoch", (char *)&g_ndb_slave_state.last_stable_epoch, + SHOW_LONGLONG, SHOW_SCOPE_GLOBAL}, + {NullS, NullS, SHOW_LONG, SHOW_SCOPE_GLOBAL}}; + +int show_ndb_status_conflict(THD *, SHOW_VAR *var, char *) { var->type = SHOW_ARRAY; - var->value = (char*) &ndb_status_conflict_variables; + var->value = (char *)&ndb_status_conflict_variables; return 0; } - diff --git a/storage/ndb/plugin/ndb_conflict.h b/storage/ndb/plugin/ndb_conflict.h index 352395c9ca07..e2c5c88936b7 100644 --- a/storage/ndb/plugin/ndb_conflict.h +++ b/storage/ndb/plugin/ndb_conflict.h @@ -26,24 +26,23 @@ #define NDB_CONFLICT_H #include "my_bitmap.h" -#include "mysql/plugin.h" // SHOW_VAR -#include "mysql_com.h" // NAME_CHAR_LEN -#include "sql/sql_const.h" // MAX_REF_PARTS +#include "mysql/plugin.h" // SHOW_VAR +#include "mysql_com.h" // NAME_CHAR_LEN +#include "sql/sql_const.h" // MAX_REF_PARTS #include "storage/ndb/include/ndbapi/NdbDictionary.hpp" #include "storage/ndb/include/ndbapi/NdbTransaction.hpp" #include "storage/ndb/plugin/ndb_conflict_trans.h" -enum enum_conflict_fn_type -{ - CFT_NDB_UNDEF = 0 - ,CFT_NDB_MAX - ,CFT_NDB_OLD - ,CFT_NDB_MAX_DEL_WIN - ,CFT_NDB_EPOCH - ,CFT_NDB_EPOCH_TRANS - ,CFT_NDB_EPOCH2 - ,CFT_NDB_EPOCH2_TRANS - ,CFT_NUMBER_OF_CFTS /* End marker */ +enum enum_conflict_fn_type { + CFT_NDB_UNDEF = 0, + CFT_NDB_MAX, + CFT_NDB_OLD, + CFT_NDB_MAX_DEL_WIN, + CFT_NDB_EPOCH, + CFT_NDB_EPOCH_TRANS, + CFT_NDB_EPOCH2, + CFT_NDB_EPOCH2_TRANS, + CFT_NUMBER_OF_CFTS /* End marker */ }; /** @@ -51,46 +50,40 @@ enum enum_conflict_fn_type * member of the 'extra row info' on a Binlog row * event */ -enum enum_binlog_extra_info_conflict_flags -{ +enum enum_binlog_extra_info_conflict_flags { NDB_ERIF_CFT_REFLECT_OP = 0x1, NDB_ERIF_CFT_REFRESH_OP = 0x2, NDB_ERIF_CFT_READ_OP = 0x4 }; -static const uint MAX_CONFLICT_ARGS= 8; +static const uint MAX_CONFLICT_ARGS = 8; -enum enum_conflict_fn_arg_type -{ - CFAT_END - ,CFAT_COLUMN_NAME - ,CFAT_EXTRA_GCI_BITS +enum enum_conflict_fn_arg_type { + CFAT_END, + CFAT_COLUMN_NAME, + CFAT_EXTRA_GCI_BITS }; -struct st_conflict_fn_arg -{ +struct st_conflict_fn_arg { enum_conflict_fn_arg_type type; - union - { - char resolveColNameBuff[ NAME_CHAR_LEN + 1 ]; // CFAT_COLUMN_NAME - uint32 extraGciBits; // CFAT_EXTRA_GCI_BITS + union { + char resolveColNameBuff[NAME_CHAR_LEN + 1]; // CFAT_COLUMN_NAME + uint32 extraGciBits; // CFAT_EXTRA_GCI_BITS }; }; -struct st_conflict_fn_arg_def -{ +struct st_conflict_fn_arg_def { enum enum_conflict_fn_arg_type arg_type; bool optional; }; /* What type of operation was issued */ -enum enum_conflicting_op_type -{ /* NdbApi */ - WRITE_ROW = 1, /* insert (!write) */ - UPDATE_ROW = 2, /* update */ - DELETE_ROW = 3, /* delete */ - REFRESH_ROW = 4, /* refresh */ - READ_ROW = 5 /* read tracking */ +enum enum_conflicting_op_type { /* NdbApi */ + WRITE_ROW = 1, /* insert (!write) */ + UPDATE_ROW = 2, /* update */ + DELETE_ROW = 3, /* delete */ + REFRESH_ROW = 4, /* refresh */ + READ_ROW = 5 /* read tracking */ }; /* @@ -105,16 +98,15 @@ enum enum_conflicting_op_type Type of function used to prepare for conflict detection on an NdbApi operation */ -typedef int (* prepare_detect_func) (struct NDB_CONFLICT_FN_SHARE* cfn_share, - enum_conflicting_op_type op_type, - const NdbRecord* data_record, - const uchar* old_data, - const uchar* new_data, - /* Before image columns bitmap */ - const MY_BITMAP* bi_cols, - /* After image columns bitmap */ - const MY_BITMAP* ai_cols, - class NdbInterpretedCode* code); +typedef int (*prepare_detect_func)(struct NDB_CONFLICT_FN_SHARE *cfn_share, + enum_conflicting_op_type op_type, + const NdbRecord *data_record, + const uchar *old_data, const uchar *new_data, + /* Before image columns bitmap */ + const MY_BITMAP *bi_cols, + /* After image columns bitmap */ + const MY_BITMAP *ai_cols, + class NdbInterpretedCode *code); /** * enum_conflict_fn_flags @@ -123,51 +115,44 @@ typedef int (* prepare_detect_func) (struct NDB_CONFLICT_FN_SHARE* cfn_share, * controlled on a per-table basis. * TODO : Encapsulate all these per-algorithm details inside the algorithm */ -enum enum_conflict_fn_flags -{ - CF_TRANSACTIONAL = 0x1, /* Conflicts are handled per transaction */ - CF_REFLECT_SEC_OPS = 0x2, /* Secondary operations are reflected back */ - CF_USE_ROLE_VAR = 0x4, /* Functionality controlled by role variable */ - CF_DEL_DEL_CFT = 0x8 /* Delete finding no row is a conflict */ +enum enum_conflict_fn_flags { + CF_TRANSACTIONAL = 0x1, /* Conflicts are handled per transaction */ + CF_REFLECT_SEC_OPS = 0x2, /* Secondary operations are reflected back */ + CF_USE_ROLE_VAR = 0x4, /* Functionality controlled by role variable */ + CF_DEL_DEL_CFT = 0x8 /* Delete finding no row is a conflict */ }; -struct st_conflict_fn_def -{ +struct st_conflict_fn_def { const char *name; enum_conflict_fn_type type; - const st_conflict_fn_arg_def* arg_defs; + const st_conflict_fn_arg_def *arg_defs; prepare_detect_func prep_func; uint8 flags; /* enum_conflict_fn_flags */ }; /* What sort of conflict was found */ -enum enum_conflict_cause -{ +enum enum_conflict_cause { ROW_ALREADY_EXISTS = 1, /* On insert */ ROW_DOES_NOT_EXIST = 2, /* On Update, Delete */ - ROW_IN_CONFLICT = 3, /* On Update, Delete */ - TRANS_IN_CONFLICT = 4 /* Any of above, or implied by transaction */ + ROW_IN_CONFLICT = 3, /* On Update, Delete */ + TRANS_IN_CONFLICT = 4 /* Any of above, or implied by transaction */ }; /* NdbOperation custom data which points out handler and record. */ struct Ndb_exceptions_data { - struct NDB_SHARE* share; - const NdbRecord* key_rec; - const NdbRecord* data_rec; - const uchar* old_row; - const uchar* new_row; - my_bitmap_map* bitmap_buf; /* Buffer for write_set */ - MY_BITMAP* write_set; + struct NDB_SHARE *share; + const NdbRecord *key_rec; + const NdbRecord *data_rec; + const uchar *old_row; + const uchar *new_row; + my_bitmap_map *bitmap_buf; /* Buffer for write_set */ + MY_BITMAP *write_set; enum_conflicting_op_type op_type; bool reflected_operation; Uint64 trans_id; }; -enum enum_conflict_fn_table_flags -{ - CFF_NONE = 0, - CFF_REFRESH_ROWS = 1 -}; +enum enum_conflict_fn_table_flags { CFF_NONE = 0, CFF_REFRESH_ROWS = 1 }; /* Maximum supported key parts (16) @@ -181,33 +166,29 @@ static const int NDB_MAX_KEY_PARTS = MAX_REF_PARTS; Helper class for inserting entries into an exceptions table */ -class ExceptionsTableWriter -{ - enum COLUMN_VERSION { - DEFAULT = 0, - OLD = 1, - NEW = 2 - }; +class ExceptionsTableWriter { + enum COLUMN_VERSION { DEFAULT = 0, OLD = 1, NEW = 2 }; -public: + public: ExceptionsTableWriter() - : m_pk_cols(0), m_cols(0), m_xcols(0), m_ex_tab(NULL), - m_count(0), m_extended(false), m_op_type_pos(0), m_conflict_cause_pos(0), - m_orig_transid_pos(0) - {} - - ~ExceptionsTableWriter() - {} + : m_pk_cols(0), + m_cols(0), + m_xcols(0), + m_ex_tab(NULL), + m_count(0), + m_extended(false), + m_op_type_pos(0), + m_conflict_cause_pos(0), + m_orig_transid_pos(0) {} + + ~ExceptionsTableWriter() {} /** hasTable Returns true if there is an Exceptions table */ - bool hasTable() const - { - return m_ex_tab != NULL; - } + bool hasTable() const { return m_ex_tab != NULL; } /** init @@ -217,18 +198,16 @@ class ExceptionsTableWriter May set a warning message on success or error. */ - int init(const NdbDictionary::Table* mainTable, - const NdbDictionary::Table* exceptionsTable, - char* msg_buf, - uint msg_buf_len, - const char** msg); + int init(const NdbDictionary::Table *mainTable, + const NdbDictionary::Table *exceptionsTable, char *msg_buf, + uint msg_buf_len, const char **msg); /** free Release reference to exceptions table */ - void mem_free(Ndb* ndb); + void mem_free(Ndb *ndb); /** writeRow @@ -236,40 +215,30 @@ class ExceptionsTableWriter Write a row to the Exceptions Table for the given key */ - int writeRow(NdbTransaction* trans, - const NdbRecord* keyRecord, - const NdbRecord* dataRecord, - uint32 server_id, - uint32 master_server_id, - uint64 master_epoch, - const uchar* oldRowPtr, - const uchar* newRowPtr, + int writeRow(NdbTransaction *trans, const NdbRecord *keyRecord, + const NdbRecord *dataRecord, uint32 server_id, + uint32 master_server_id, uint64 master_epoch, + const uchar *oldRowPtr, const uchar *newRowPtr, enum_conflicting_op_type op_type, - enum_conflict_cause conflict_cause, - uint64 orig_transid, - const MY_BITMAP *write_set, - NdbError& err); + enum_conflict_cause conflict_cause, uint64 orig_transid, + const MY_BITMAP *write_set, NdbError &err); -private: + private: /* Help methods for checking exception table definition */ - bool check_mandatory_columns(const NdbDictionary::Table* exceptionsTable); - bool check_pk_columns(const NdbDictionary::Table* mainTable, - const NdbDictionary::Table* exceptionsTable, - int &k); - bool check_optional_columns(const NdbDictionary::Table* mainTable, - const NdbDictionary::Table* exceptionsTable, - char* msg_buf, - uint msg_buf_len, - const char** msg, - int &k, - char *error_details, + bool check_mandatory_columns(const NdbDictionary::Table *exceptionsTable); + bool check_pk_columns(const NdbDictionary::Table *mainTable, + const NdbDictionary::Table *exceptionsTable, int &k); + bool check_optional_columns(const NdbDictionary::Table *mainTable, + const NdbDictionary::Table *exceptionsTable, + char *msg_buf, uint msg_buf_len, const char **msg, + int &k, char *error_details, uint error_details_len); /* info about original table */ uint8 m_pk_cols; uint16 m_cols; /* Specifies if a column in the original table is nullable */ - bool m_col_nullable[ NDB_MAX_ATTRIBUTES_IN_TABLE ]; + bool m_col_nullable[NDB_MAX_ATTRIBUTES_IN_TABLE]; /* info about exceptions table */ uint16 m_xcols; @@ -288,50 +257,47 @@ class ExceptionsTableWriter Mapping of where the referenced primary key fields are in the original table. Doesn't have to include all fields. */ - uint16 m_key_attrids[ NDB_MAX_KEY_PARTS ]; + uint16 m_key_attrids[NDB_MAX_KEY_PARTS]; /* Mapping of pk columns in original table to conflict table */ - int m_key_data_pos[ NDB_MAX_KEY_PARTS ]; + int m_key_data_pos[NDB_MAX_KEY_PARTS]; /* Mapping of non-pk columns in original table to conflict table */ - int m_data_pos[ NDB_MAX_ATTRIBUTES_IN_TABLE ]; + int m_data_pos[NDB_MAX_ATTRIBUTES_IN_TABLE]; /* Specifies what version of a column is reference (before- or after-image) */ - COLUMN_VERSION m_column_version[ NDB_MAX_ATTRIBUTES_IN_TABLE ]; + COLUMN_VERSION m_column_version[NDB_MAX_ATTRIBUTES_IN_TABLE]; /* has_prefix_ci Return true if a column has a specific prefix. */ - bool has_prefix_ci(const char *col_name, const char *prefix, CHARSET_INFO *cs); + bool has_prefix_ci(const char *col_name, const char *prefix, + CHARSET_INFO *cs); -/* - has_suffix_ci - - Return true if a column has a specific suffix - and sets the column_real_name to the column name - without the suffix. -*/ -bool has_suffix_ci(const char *col_name, - const char *suffix, - CHARSET_INFO *cs, - char *col_name_real); + /* + has_suffix_ci -/* - find_column_name_ci + Return true if a column has a specific suffix + and sets the column_real_name to the column name + without the suffix. + */ + bool has_suffix_ci(const char *col_name, const char *suffix, CHARSET_INFO *cs, + char *col_name_real); - Search for column_name in table and - return true if found. Also return what - position column was found in pos and possible - position in the primary key in key_pos. - */ -bool find_column_name_ci(CHARSET_INFO *cs, - const char *col_name, - const NdbDictionary::Table* table, - int *pos, - int *no_key_cols); + /* + find_column_name_ci + + Search for column_name in table and + return true if found. Also return what + position column was found in pos and possible + position in the primary key in key_pos. + */ + bool find_column_name_ci(CHARSET_INFO *cs, const char *col_name, + const NdbDictionary::Table *table, int *pos, + int *no_key_cols); }; -struct NDB_CONFLICT_FN_SHARE{ - const st_conflict_fn_def* m_conflict_fn; +struct NDB_CONFLICT_FN_SHARE { + const st_conflict_fn_def *m_conflict_fn; /* info about original table */ uint16 m_resolve_column; @@ -341,7 +307,6 @@ struct NDB_CONFLICT_FN_SHARE{ ExceptionsTableWriter m_ex_tab_writer; }; - /** * enum_slave_conflict_role * @@ -349,16 +314,14 @@ struct NDB_CONFLICT_FN_SHARE{ * in asymmetric conflict algorithms */ -enum enum_slave_conflict_role -{ +enum enum_slave_conflict_role { SCR_NONE = 0, SCR_SECONDARY = 1, SCR_PRIMARY = 2, SCR_PASS = 3 }; -enum enum_slave_trans_conflict_apply_state -{ +enum enum_slave_trans_conflict_apply_state { /* Normal with optional row-level conflict detection */ SAS_NORMAL, @@ -375,8 +338,7 @@ enum enum_slave_trans_conflict_apply_state SAS_APPLY_TRANS_DEPENDENCIES }; -enum enum_slave_conflict_flags -{ +enum enum_slave_conflict_flags { /* Conflict detection Ops defined */ SCS_OPS_DEFINED = 1, /* Conflict detected on table with transactional resolution */ @@ -387,8 +349,7 @@ enum enum_slave_conflict_flags State associated with the Slave thread (From the Ndb handler's point of view) */ -struct st_ndb_slave_state -{ +struct st_ndb_slave_state { /* Counter values for current slave transaction */ Uint32 current_violation_count[CFT_NUMBER_OF_CFTS]; @@ -403,27 +364,27 @@ struct st_ndb_slave_state * prepared (defined) to be executed. */ Uint32 current_reflect_op_prepare_count; - + /** * Number of reflected operations that were not applied as * they hit some error during execution */ Uint32 current_reflect_op_discard_count; - + /** * Number of refresh operations that have been prepared */ Uint32 current_refresh_op_count; - + /* Track the current epoch from the immediate master, * and whether we've committed it */ Uint64 current_master_server_epoch; bool current_master_server_epoch_committed; - + Uint64 current_max_rep_epoch; uint8 conflict_flags; /* enum_slave_conflict_flags */ - /* Transactional conflict detection */ + /* Transactional conflict detection */ Uint32 retry_trans_count; Uint32 current_trans_row_conflict_count; Uint32 current_trans_row_reject_count; @@ -460,17 +421,16 @@ struct st_ndb_slave_state enum_slave_trans_conflict_apply_state trans_conflict_apply_state; MEM_ROOT conflict_mem_root; - class DependencyTracker* trans_dependency_tracker; + class DependencyTracker *trans_dependency_tracker; /* Methods */ void atStartSlave(); - int atPrepareConflictDetection(const NdbDictionary::Table* table, - const NdbRecord* key_rec, - const uchar* row_data, - Uint64 transaction_id, - bool& handle_conflict_now); - int atTransConflictDetected(Uint64 transaction_id); - int atConflictPreCommit(bool& retry_slave_trans); + int atPrepareConflictDetection(const NdbDictionary::Table *table, + const NdbRecord *key_rec, + const uchar *row_data, Uint64 transaction_id, + bool &handle_conflict_now); + int atTransConflictDetected(Uint64 transaction_id); + int atConflictPreCommit(bool &retry_slave_trans); void atBeginTransConflictHandling(); void atEndTransConflictHandling(); @@ -479,58 +439,41 @@ struct st_ndb_slave_state void atTransactionAbort(); void atResetSlave(); - int atApplyStatusWrite(Uint32 master_server_id, - Uint32 row_server_id, - Uint64 row_epoch, - bool is_row_server_id_local); - bool verifyNextEpoch(Uint64 next_epoch, - Uint32 master_server_id) const; + int atApplyStatusWrite(Uint32 master_server_id, Uint32 row_server_id, + Uint64 row_epoch, bool is_row_server_id_local); + bool verifyNextEpoch(Uint64 next_epoch, Uint32 master_server_id) const; void resetPerAttemptCounters(); - static - bool checkSlaveConflictRoleChange(enum_slave_conflict_role old_role, - enum_slave_conflict_role new_role, - const char** failure_cause); + static bool checkSlaveConflictRoleChange(enum_slave_conflict_role old_role, + enum_slave_conflict_role new_role, + const char **failure_cause); st_ndb_slave_state(); ~st_ndb_slave_state(); }; - -const uint error_conflict_fn_violation= 9999; +const uint error_conflict_fn_violation = 9999; /** * Conflict function setup infrastructure */ -int -parse_conflict_fn_spec(const char* conflict_fn_spec, - const st_conflict_fn_def** conflict_fn, - st_conflict_fn_arg* args, - Uint32* max_args, - char *msg, uint msg_len); -int -setup_conflict_fn(Ndb* ndb, - NDB_CONFLICT_FN_SHARE** ppcfn_share, - const char* dbName, - const char* tabName, - bool tableBinlogUseUpdate, - const NdbDictionary::Table *ndbtab, - char *msg, uint msg_len, - const st_conflict_fn_def* conflict_fn, - const st_conflict_fn_arg* args, - const Uint32 num_args); - -void -teardown_conflict_fn(Ndb* ndb, - NDB_CONFLICT_FN_SHARE* cfn_share); - -void -slave_reset_conflict_fn(NDB_CONFLICT_FN_SHARE *cfn_share); - -bool -is_exceptions_table(const char *table_name); +int parse_conflict_fn_spec(const char *conflict_fn_spec, + const st_conflict_fn_def **conflict_fn, + st_conflict_fn_arg *args, Uint32 *max_args, + char *msg, uint msg_len); +int setup_conflict_fn(Ndb *ndb, NDB_CONFLICT_FN_SHARE **ppcfn_share, + const char *dbName, const char *tabName, + bool tableBinlogUseUpdate, + const NdbDictionary::Table *ndbtab, char *msg, + uint msg_len, const st_conflict_fn_def *conflict_fn, + const st_conflict_fn_arg *args, const Uint32 num_args); + +void teardown_conflict_fn(Ndb *ndb, NDB_CONFLICT_FN_SHARE *cfn_share); + +void slave_reset_conflict_fn(NDB_CONFLICT_FN_SHARE *cfn_share); +bool is_exceptions_table(const char *table_name); /** show_ndb_status_conflict @@ -539,7 +482,6 @@ is_exceptions_table(const char *table_name); queries. Returns info about ndb_conflict related status variables. */ -int -show_ndb_status_conflict(THD* thd, SHOW_VAR* var, char* buff); +int show_ndb_status_conflict(THD *thd, SHOW_VAR *var, char *buff); #endif diff --git a/storage/ndb/plugin/ndb_conflict_trans.cc b/storage/ndb/plugin/ndb_conflict_trans.cc index d0a6eba4ff8a..2cd423a90bb5 100644 --- a/storage/ndb/plugin/ndb_conflict_trans.cc +++ b/storage/ndb/plugin/ndb_conflict_trans.cc @@ -41,201 +41,127 @@ /* st_row_event_key_info implementation */ -st_row_event_key_info:: -st_row_event_key_info(const NdbDictionary::Table* _table, - const uchar* _key_buff, - Uint32 _key_buff_len, - Uint64 _transaction_id): - tableObj(_table), - packed_key(_key_buff), - packed_key_len(_key_buff_len), - transaction_id(_transaction_id), - hash_next(NULL) -{ -} - -Uint64 -st_row_event_key_info::getTransactionId() const -{ +st_row_event_key_info::st_row_event_key_info(const NdbDictionary::Table *_table, + const uchar *_key_buff, + Uint32 _key_buff_len, + Uint64 _transaction_id) + : tableObj(_table), + packed_key(_key_buff), + packed_key_len(_key_buff_len), + transaction_id(_transaction_id), + hash_next(NULL) {} + +Uint64 st_row_event_key_info::getTransactionId() const { return transaction_id; } -void -st_row_event_key_info::updateRowTransactionId(Uint64 mostRecentTransId) -{ +void st_row_event_key_info::updateRowTransactionId(Uint64 mostRecentTransId) { transaction_id = mostRecentTransId; } -Uint32 -st_row_event_key_info::hashValue() const -{ +Uint32 st_row_event_key_info::hashValue() const { /* Include Table Object Id + primary key */ Uint32 h = (17 * 37) + tableObj->getObjectId(); - for (Uint32 i=0; i < packed_key_len; i++) - h = (37 * h) + packed_key[i]; + for (Uint32 i = 0; i < packed_key_len; i++) h = (37 * h) + packed_key[i]; return h; } -bool -st_row_event_key_info::equal(const st_row_event_key_info* other) const -{ +bool st_row_event_key_info::equal(const st_row_event_key_info *other) const { /* Check same table + same PK */ - return - ((tableObj == other->tableObj) && - (packed_key_len == other->packed_key_len) && - (memcmp(packed_key, other->packed_key, packed_key_len) == 0)); + return ((tableObj == other->tableObj) && + (packed_key_len == other->packed_key_len) && + (memcmp(packed_key, other->packed_key, packed_key_len) == 0)); } -st_row_event_key_info* -st_row_event_key_info::getNext() const -{ +st_row_event_key_info *st_row_event_key_info::getNext() const { return hash_next; } -void -st_row_event_key_info::setNext(st_row_event_key_info* _next) -{ +void st_row_event_key_info::setNext(st_row_event_key_info *_next) { hash_next = _next; } - /* st_trans_dependency implementation */ -st_trans_dependency:: -st_trans_dependency(st_transaction* _target_transaction, - st_transaction* _dependent_transaction, - const st_trans_dependency* _next) - : target_transaction(_target_transaction), - dependent_transaction(_dependent_transaction), - next_entry(_next), - hash_next(NULL) -{ -} +st_trans_dependency::st_trans_dependency(st_transaction *_target_transaction, + st_transaction *_dependent_transaction, + const st_trans_dependency *_next) + : target_transaction(_target_transaction), + dependent_transaction(_dependent_transaction), + next_entry(_next), + hash_next(NULL) {} -st_transaction* -st_trans_dependency::getTargetTransaction() const -{ +st_transaction *st_trans_dependency::getTargetTransaction() const { return target_transaction; } -st_transaction* -st_trans_dependency::getDependentTransaction() const -{ +st_transaction *st_trans_dependency::getDependentTransaction() const { return dependent_transaction; } -const st_trans_dependency* -st_trans_dependency::getNextDependency() const -{ +const st_trans_dependency *st_trans_dependency::getNextDependency() const { return next_entry; } -Uint32 -st_trans_dependency::hashValue() const -{ +Uint32 st_trans_dependency::hashValue() const { /* Hash the ptrs in a rather nasty way */ - UintPtr p = - ((UintPtr) target_transaction) ^ - ((UintPtr) dependent_transaction);; + UintPtr p = ((UintPtr)target_transaction) ^ ((UintPtr)dependent_transaction); + ; - if (sizeof(p) == 8) - { + if (sizeof(p) == 8) { /* Xor two words of 64 bit ptr */ - p = - (p & 0xffffffff) ^ - ((((Uint64) p) >> 32) & 0xffffffff); + p = (p & 0xffffffff) ^ ((((Uint64)p) >> 32) & 0xffffffff); } return 17 + (37 * (Uint32)p); } -bool -st_trans_dependency::equal(const st_trans_dependency* other) const -{ +bool st_trans_dependency::equal(const st_trans_dependency *other) const { return ((target_transaction == other->target_transaction) && (dependent_transaction == other->dependent_transaction)); } -st_trans_dependency* -st_trans_dependency::getNext() const -{ - return hash_next; -} +st_trans_dependency *st_trans_dependency::getNext() const { return hash_next; } -void -st_trans_dependency::setNext(st_trans_dependency* _next) -{ +void st_trans_dependency::setNext(st_trans_dependency *_next) { hash_next = _next; } - /* st_transaction implementation */ st_transaction::st_transaction(Uint64 _transaction_id) - : transaction_id(_transaction_id), - in_conflict(false), - dependency_list_head(NULL), - hash_next(NULL) -{ -} + : transaction_id(_transaction_id), + in_conflict(false), + dependency_list_head(NULL), + hash_next(NULL) {} -Uint64 -st_transaction::getTransactionId() const -{ - return transaction_id; -} +Uint64 st_transaction::getTransactionId() const { return transaction_id; } -bool -st_transaction::getInConflict() const -{ - return in_conflict; -} +bool st_transaction::getInConflict() const { return in_conflict; } -void -st_transaction::setInConflict() -{ - in_conflict = true; -} +void st_transaction::setInConflict() { in_conflict = true; } -const st_trans_dependency* -st_transaction::getDependencyListHead() const -{ +const st_trans_dependency *st_transaction::getDependencyListHead() const { return dependency_list_head; } -void -st_transaction::setDependencyListHead(st_trans_dependency* head) -{ +void st_transaction::setDependencyListHead(st_trans_dependency *head) { dependency_list_head = head; } /* Hash Api */ -Uint32 -st_transaction::hashValue() const -{ +Uint32 st_transaction::hashValue() const { return 17 + (37 * ((transaction_id & 0xffffffff) ^ (transaction_id >> 32 & 0xffffffff))); } -bool -st_transaction::equal(const st_transaction* other) const -{ +bool st_transaction::equal(const st_transaction *other) const { return transaction_id == other->transaction_id; } -st_transaction* -st_transaction::getNext() const -{ - return hash_next; -} - -void -st_transaction::setNext(st_transaction* _next) -{ - hash_next = _next; -} - +st_transaction *st_transaction::getNext() const { return hash_next; } +void st_transaction::setNext(st_transaction *_next) { hash_next = _next; } /* Unique HashMap(Set) of st_row_event_key_info ptrs, with bucket storage @@ -246,7 +172,6 @@ template class HashMap2; template class HashMap2; template class LinkedStack; - /** * pack_key_to_buffer * @@ -255,14 +180,9 @@ template class LinkedStack; * and if a buffer is passed, will copy the bytes into the * buffer. */ -static -int -pack_key_to_buffer(const NdbDictionary::Table* table, - const NdbRecord* key_rec, - const uchar* record, - uchar* buffer, - Uint32& buff_len) -{ +static int pack_key_to_buffer(const NdbDictionary::Table *table, + const NdbRecord *key_rec, const uchar *record, + uchar *buffer, Uint32 &buff_len) { /* Loop over attributes in key record, determining their actual * size based on column type and contents of record * If buffer supplied, copy them contiguously to buffer @@ -272,176 +192,137 @@ pack_key_to_buffer(const NdbDictionary::Table* table, Uint32 buff_offset = 0; NdbDictionary::getFirstAttrId(key_rec, attr_id); - do - { + do { Uint32 from_offset = 0; Uint32 byte_len = 0; - const NdbDictionary::Column* key_col = table->getColumn(attr_id); + const NdbDictionary::Column *key_col = table->getColumn(attr_id); NdbDictionary::getOffset(key_rec, attr_id, from_offset); - assert( ! NdbDictionary::isNull(key_rec, (const char*) record, attr_id)); - - switch(key_col->getArrayType()) - { - case NDB_ARRAYTYPE_FIXED: - byte_len = key_col->getSizeInBytes(); - break; - case NDB_ARRAYTYPE_SHORT_VAR: - byte_len = record[from_offset]; - from_offset++; - break; - case NDB_ARRAYTYPE_MEDIUM_VAR: - byte_len = uint2korr(&record[from_offset]); - from_offset+= 2; - break; + assert(!NdbDictionary::isNull(key_rec, (const char *)record, attr_id)); + + switch (key_col->getArrayType()) { + case NDB_ARRAYTYPE_FIXED: + byte_len = key_col->getSizeInBytes(); + break; + case NDB_ARRAYTYPE_SHORT_VAR: + byte_len = record[from_offset]; + from_offset++; + break; + case NDB_ARRAYTYPE_MEDIUM_VAR: + byte_len = uint2korr(&record[from_offset]); + from_offset += 2; + break; }; - assert( (buff_offset + byte_len) <= buff_len ); + assert((buff_offset + byte_len) <= buff_len); - if (buffer) - memcpy(&buffer[buff_offset], &record[from_offset], byte_len); + if (buffer) memcpy(&buffer[buff_offset], &record[from_offset], byte_len); - buff_offset+= byte_len; + buff_offset += byte_len; } while (NdbDictionary::getNextAttrId(key_rec, attr_id)); buff_len = buff_offset; return 0; } -static -Uint32 determine_packed_key_size(const NdbDictionary::Table* table, - const NdbRecord* key_rec, - const uchar* record) -{ +static Uint32 determine_packed_key_size(const NdbDictionary::Table *table, + const NdbRecord *key_rec, + const uchar *record) { Uint32 key_size = ~Uint32(0); /* Use pack_key_to_buffer to calculate length required */ - pack_key_to_buffer(table, - key_rec, - record, - NULL, - key_size); + pack_key_to_buffer(table, key_rec, record, NULL, key_size); return key_size; } /* st_mem_root_allocator implementation */ -void* -st_mem_root_allocator::alloc(void* ctx, size_t bytes) -{ - st_mem_root_allocator* a = (st_mem_root_allocator*) ctx; +void *st_mem_root_allocator::alloc(void *ctx, size_t bytes) { + st_mem_root_allocator *a = (st_mem_root_allocator *)ctx; return a->mem_root->Alloc(bytes); } -void* -st_mem_root_allocator::mem_calloc(void* ctx, size_t nelem, size_t bytes) -{ - st_mem_root_allocator* a = (st_mem_root_allocator*) ctx; +void *st_mem_root_allocator::mem_calloc(void *ctx, size_t nelem, size_t bytes) { + st_mem_root_allocator *a = (st_mem_root_allocator *)ctx; return a->mem_root->Alloc(nelem * bytes); } -void st_mem_root_allocator::mem_free(void*, void*) { +void st_mem_root_allocator::mem_free(void *, void *) { /* Do nothing, will be globally freed when arena (mem_root) * released */ } -st_mem_root_allocator::st_mem_root_allocator(MEM_ROOT* _mem_root) - : mem_root(_mem_root) -{ -} - +st_mem_root_allocator::st_mem_root_allocator(MEM_ROOT *_mem_root) + : mem_root(_mem_root) {} /* DependencyTracker implementation */ -DependencyTracker* -DependencyTracker::newDependencyTracker(MEM_ROOT* mem_root) -{ - DependencyTracker* dt = NULL; +DependencyTracker *DependencyTracker::newDependencyTracker(MEM_ROOT *mem_root) { + DependencyTracker *dt = NULL; // Allocate memory from MEM_ROOT - void* mem = mem_root->Alloc(sizeof(DependencyTracker)); - if (mem) - { + void *mem = mem_root->Alloc(sizeof(DependencyTracker)); + if (mem) { dt = new (mem) DependencyTracker(mem_root); } return dt; } - -DependencyTracker:: -DependencyTracker(MEM_ROOT* mem_root) - : mra(mem_root), key_hash(&mra), trans_hash(&mra), - dependency_hash(&mra), - iteratorTodo(ITERATOR_STACK_BLOCKSIZE, &mra), - conflicting_trans_count(0), - error_text(NULL) -{ +DependencyTracker::DependencyTracker(MEM_ROOT *mem_root) + : mra(mem_root), + key_hash(&mra), + trans_hash(&mra), + dependency_hash(&mra), + iteratorTodo(ITERATOR_STACK_BLOCKSIZE, &mra), + conflicting_trans_count(0), + error_text(NULL) { /* TODO Get sizes from somewhere */ key_hash.setSize(1024); trans_hash.setSize(100); dependency_hash.setSize(100); } - -int -DependencyTracker:: -track_operation(const NdbDictionary::Table* table, - const NdbRecord* key_rec, - const uchar* row, - Uint64 transaction_id) -{ +int DependencyTracker::track_operation(const NdbDictionary::Table *table, + const NdbRecord *key_rec, + const uchar *row, + Uint64 transaction_id) { DBUG_ENTER("track_operation"); - Uint32 required_buff_size = determine_packed_key_size(table, - key_rec, - row); + Uint32 required_buff_size = determine_packed_key_size(table, key_rec, row); DBUG_PRINT("info", ("Required length for key : %u", required_buff_size)); /* Alloc space for packed key and struct in MEM_ROOT */ - uchar* packed_key_buff = (uchar*) mra.mem_root->Alloc(required_buff_size); - void* element_mem = mra.mem_root->Alloc(sizeof(st_row_event_key_info)); - - if (pack_key_to_buffer(table, - key_rec, - row, - packed_key_buff, - required_buff_size)) - { - if (!error_text) - error_text="track_operation : Failed packing key"; + uchar *packed_key_buff = (uchar *)mra.mem_root->Alloc(required_buff_size); + void *element_mem = mra.mem_root->Alloc(sizeof(st_row_event_key_info)); + + if (pack_key_to_buffer(table, key_rec, row, packed_key_buff, + required_buff_size)) { + if (!error_text) error_text = "track_operation : Failed packing key"; DBUG_RETURN(-1); } - if (TRACK_ALL_TRANSACTIONS) - { - st_transaction* transEntry = get_or_create_transaction(transaction_id); - if (!transEntry) - { + if (TRACK_ALL_TRANSACTIONS) { + st_transaction *transEntry = get_or_create_transaction(transaction_id); + if (!transEntry) { error_text = "track_operation : Failed to get or create transaction"; DBUG_RETURN(HA_ERR_OUT_OF_MEM); } } - st_row_event_key_info* key_info = new (element_mem) - st_row_event_key_info(table, - packed_key_buff, - required_buff_size, - transaction_id); + st_row_event_key_info *key_info = new (element_mem) st_row_event_key_info( + table, packed_key_buff, required_buff_size, transaction_id); /* Now try to add element to hash */ - if (! key_hash.add(key_info)) - { + if (!key_hash.add(key_info)) { /* Already an element in the keyhash with this primary key If it's for the same transaction then ignore, otherwise it's an inter-transaction dependency */ - st_row_event_key_info* existing = key_hash.get(key_info); + st_row_event_key_info *existing = key_hash.get(key_info); Uint64 existingTransIdOnRow = existing->getTransactionId(); Uint64 newTransIdOnRow = key_info->getTransactionId(); - if (existingTransIdOnRow != newTransIdOnRow) - { - int res = add_dependency(existingTransIdOnRow, - newTransIdOnRow); + if (existingTransIdOnRow != newTransIdOnRow) { + int res = add_dependency(existingTransIdOnRow, newTransIdOnRow); /* Update stored transaction_id to be latest for key. Further key operations on this row will depend on this @@ -453,9 +334,7 @@ track_operation(const NdbDictionary::Table* table, assert(res == 0 || error_text != NULL); DBUG_RETURN(res); - } - else - { + } else { /* How can we have two updates to the same row with the same transaction id? Only if the transaction id @@ -468,10 +347,10 @@ track_operation(const NdbDictionary::Table* table, This could be relaxed for more complex upstream topologies, but acts as a sanity guard currently. */ - if (existingTransIdOnRow != InvalidTransactionId) - { + if (existingTransIdOnRow != InvalidTransactionId) { assert(false); - error_text= "Two row operations to same key sharing user transaction id"; + error_text = + "Two row operations to same key sharing user transaction id"; DBUG_RETURN(-1); } } @@ -480,104 +359,86 @@ track_operation(const NdbDictionary::Table* table, DBUG_RETURN(0); } -int -DependencyTracker:: -mark_conflict(Uint64 trans_id) -{ +int DependencyTracker::mark_conflict(Uint64 trans_id) { DBUG_ENTER("mark_conflict"); DBUG_PRINT("info", ("trans_id : %llu", trans_id)); - st_transaction* entry = get_or_create_transaction(trans_id); - if (!entry) - { + st_transaction *entry = get_or_create_transaction(trans_id); + if (!entry) { error_text = "mark_conflict : get_or_create_transaction() failure"; DBUG_RETURN(HA_ERR_OUT_OF_MEM); } - if (entry->getInConflict()) - { + if (entry->getInConflict()) { /* Nothing to do here */ DBUG_RETURN(0); } /* Have entry, mark it, and any dependents */ bool fetch_node_dependents; - st_transaction* dependent = entry; + st_transaction *dependent = entry; reset_dependency_iterator(); - do - { - DBUG_PRINT("info", ("Visiting transaction %llu, conflict : %u", - dependent->getTransactionId(), - dependent->getInConflict())); + do { + DBUG_PRINT("info", + ("Visiting transaction %llu, conflict : %u", + dependent->getTransactionId(), dependent->getInConflict())); /* If marked already, don't fetch dependents, as they will also be marked already */ fetch_node_dependents = false; - if (!dependent->getInConflict()) - { + if (!dependent->getInConflict()) { dependent->setInConflict(); conflicting_trans_count++; fetch_node_dependents = true; } } while ((dependent = get_next_dependency(dependent, fetch_node_dependents))); - assert( verify_graph() ); + assert(verify_graph()); DBUG_RETURN(0); } -bool -DependencyTracker::in_conflict(Uint64 trans_id) -{ +bool DependencyTracker::in_conflict(Uint64 trans_id) { DBUG_ENTER("in_conflict"); DBUG_PRINT("info", ("trans_id %llu", trans_id)); st_transaction key(trans_id); - const st_transaction* entry = NULL; + const st_transaction *entry = NULL; /* If transaction hash entry exists, check it for conflicts. If it doesn't exist, no conflict */ - if ((entry = trans_hash.get(&key))) - { + if ((entry = trans_hash.get(&key))) { DBUG_PRINT("info", ("in_conflict : %u", entry->getInConflict())); DBUG_RETURN(entry->getInConflict()); - } - else - { - assert(! TRACK_ALL_TRANSACTIONS); + } else { + assert(!TRACK_ALL_TRANSACTIONS); } DBUG_RETURN(false); } -st_transaction* -DependencyTracker:: -get_or_create_transaction(Uint64 trans_id) -{ +st_transaction *DependencyTracker::get_or_create_transaction(Uint64 trans_id) { DBUG_ENTER("get_or_create_transaction"); st_transaction transKey(trans_id); - st_transaction* transEntry = NULL; + st_transaction *transEntry = NULL; - if (! (transEntry = trans_hash.get(&transKey))) - { + if (!(transEntry = trans_hash.get(&transKey))) { /* Transaction does not exist. Allocate it and add to the hash */ - DBUG_PRINT("info", ("Creating new hash entry for transaction (%llu)", - trans_id)); + DBUG_PRINT("info", + ("Creating new hash entry for transaction (%llu)", trans_id)); - transEntry = (st_transaction*) - st_mem_root_allocator::alloc(&mra, sizeof(st_transaction)); + transEntry = (st_transaction *)st_mem_root_allocator::alloc( + &mra, sizeof(st_transaction)); - if (transEntry) - { + if (transEntry) { new (transEntry) st_transaction(trans_id); - if (!trans_hash.add(transEntry)) - { + if (!trans_hash.add(transEntry)) { st_mem_root_allocator::mem_free(&mra, transEntry); /* For show */ transEntry = NULL; } @@ -587,39 +448,35 @@ get_or_create_transaction(Uint64 trans_id) DBUG_RETURN(transEntry); } -int -DependencyTracker:: -add_dependency(Uint64 trans_id, Uint64 dependent_trans_id) -{ +int DependencyTracker::add_dependency(Uint64 trans_id, + Uint64 dependent_trans_id) { DBUG_ENTER("add_dependency"); DBUG_PRINT("info", ("Recording dependency of %llu on %llu", dependent_trans_id, trans_id)); - st_transaction* targetEntry = get_or_create_transaction(trans_id); - if (!targetEntry) - { + st_transaction *targetEntry = get_or_create_transaction(trans_id); + if (!targetEntry) { error_text = "add_dependency : Failed get_or_create_transaction"; DBUG_RETURN(HA_ERR_OUT_OF_MEM); } - st_transaction* dependentEntry = get_or_create_transaction(dependent_trans_id); - if (!dependentEntry) - { + st_transaction *dependentEntry = + get_or_create_transaction(dependent_trans_id); + if (!dependentEntry) { error_text = "add_dependency : Failed get_or_create_transaction"; DBUG_RETURN(HA_ERR_OUT_OF_MEM); } /* Now lookup dependency. Add it if not already present */ st_trans_dependency depKey(targetEntry, dependentEntry, NULL); - st_trans_dependency* dep = NULL; - if (! (dep = dependency_hash.get(&depKey))) - { + st_trans_dependency *dep = NULL; + if (!(dep = dependency_hash.get(&depKey))) { DBUG_PRINT("info", ("Creating new dependency hash entry for " "dependency of %llu on %llu.", dependentEntry->getTransactionId(), targetEntry->getTransactionId())); - dep = (st_trans_dependency*) - st_mem_root_allocator::alloc(&mra, sizeof(st_trans_dependency)); + dep = (st_trans_dependency *)st_mem_root_allocator::alloc( + &mra, sizeof(st_trans_dependency)); new (dep) st_trans_dependency(targetEntry, dependentEntry, targetEntry->getDependencyListHead()); @@ -627,8 +484,7 @@ add_dependency(Uint64 trans_id, Uint64 dependent_trans_id) targetEntry->setDependencyListHead(dep); /* New dependency, propagate in_conflict if necessary */ - if (targetEntry->getInConflict()) - { + if (targetEntry->getInConflict()) { DBUG_PRINT("info", ("Marking new dependent as in-conflict")); DBUG_RETURN(mark_conflict(dependentEntry->getTransactionId())); } @@ -639,49 +495,38 @@ add_dependency(Uint64 trans_id, Uint64 dependent_trans_id) DBUG_RETURN(0); } -void -DependencyTracker:: -reset_dependency_iterator() -{ - iteratorTodo.reset(); -} +void DependencyTracker::reset_dependency_iterator() { iteratorTodo.reset(); } -st_transaction* -DependencyTracker:: -get_next_dependency(const st_transaction* current, - bool include_dependents_of_current) -{ +st_transaction *DependencyTracker::get_next_dependency( + const st_transaction *current, bool include_dependents_of_current) { DBUG_ENTER("get_next_dependency"); DBUG_PRINT("info", ("node : %llu", current->getTransactionId())); /* Depth first traverse, with option to ignore sub graphs. */ - if (include_dependents_of_current) - { + if (include_dependents_of_current) { /* Add dependents to stack */ - const st_trans_dependency* dependency = current->getDependencyListHead(); + const st_trans_dependency *dependency = current->getDependencyListHead(); - while (dependency) - { + while (dependency) { assert(dependency->getTargetTransaction() == current); - DBUG_PRINT("info", ("Adding dependency %llu->%llu", - dependency->getDependentTransaction()->getTransactionId(), - dependency->getTargetTransaction()->getTransactionId())); + DBUG_PRINT("info", + ("Adding dependency %llu->%llu", + dependency->getDependentTransaction()->getTransactionId(), + dependency->getTargetTransaction()->getTransactionId())); Uint64 dependentTransactionId = - dependency->getDependentTransaction()->getTransactionId(); + dependency->getDependentTransaction()->getTransactionId(); iteratorTodo.push(dependentTransactionId); - dependency= dependency->getNextDependency(); + dependency = dependency->getNextDependency(); } } Uint64 nextId; - if (iteratorTodo.pop(nextId)) - { - DBUG_PRINT("info", ("Returning transaction id %llu", - nextId)); + if (iteratorTodo.pop(nextId)) { + DBUG_PRINT("info", ("Returning transaction id %llu", nextId)); st_transaction key(nextId); - st_transaction* dependent = trans_hash.get(&key); + st_transaction *dependent = trans_hash.get(&key); assert(dependent); DBUG_RETURN(dependent); } @@ -691,42 +536,31 @@ get_next_dependency(const st_transaction* current, DBUG_RETURN(NULL); } -void -DependencyTracker:: -dump_dependents(Uint64 trans_id) -{ +void DependencyTracker::dump_dependents(Uint64 trans_id) { fprintf(stderr, "Dumping dependents of transid %llu : ", trans_id); st_transaction key(trans_id); - const st_transaction* dependent = NULL; + const st_transaction *dependent = NULL; - if ((dependent = trans_hash.get(&key))) - { + if ((dependent = trans_hash.get(&key))) { reset_dependency_iterator(); - const char* comma = ", "; - const char* sep = ""; - do - { + const char *comma = ", "; + const char *sep = ""; + do { { fprintf(stderr, "%s%llu%s", sep, dependent->getTransactionId(), - (dependent->getInConflict()?"-C":"")); + (dependent->getInConflict() ? "-C" : "")); sep = comma; } } while ((dependent = get_next_dependency(dependent))); fprintf(stderr, "\n"); - } - else - { + } else { fprintf(stderr, "None\n"); } } -bool -DependencyTracker:: -verify_graph() -{ - if (! CHECK_TRANS_GRAPH) - return true; +bool DependencyTracker::verify_graph() { + if (!CHECK_TRANS_GRAPH) return true; /* Check the graph structure obeys its invariants @@ -740,32 +574,29 @@ verify_graph() This is expensive to verify, so not always on */ - HashMap2::Iterator it(trans_hash); + HashMap2::Iterator it( + trans_hash); - st_transaction* root = NULL; + st_transaction *root = NULL; - while ((root = it.next())) - { + while ((root = it.next())) { bool in_conflict = root->getInConflict(); /* Now visit all dependents */ - st_transaction* dependent = root; + st_transaction *dependent = root; reset_dependency_iterator(); - while((dependent = get_next_dependency(dependent, true))) - { - if (dependent == root) - { + while ((dependent = get_next_dependency(dependent, true))) { + if (dependent == root) { /* Must exit, or we'll be here forever */ fprintf(stderr, "Error : Cycle discovered in graph\n"); abort(); return false; } - if (in_conflict && - ! dependent->getInConflict()) - { - fprintf(stderr, "Error : Dependent transaction not marked in-conflict\n"); + if (in_conflict && !dependent->getInConflict()) { + fprintf(stderr, + "Error : Dependent transaction not marked in-conflict\n"); abort(); return false; } @@ -775,15 +606,8 @@ verify_graph() return true; } +const char *DependencyTracker::get_error_text() const { return error_text; } -const char* -DependencyTracker::get_error_text() const -{ - return error_text; -} - -Uint32 -DependencyTracker::get_conflict_count() const -{ +Uint32 DependencyTracker::get_conflict_count() const { return conflicting_trans_count; } diff --git a/storage/ndb/plugin/ndb_conflict_trans.h b/storage/ndb/plugin/ndb_conflict_trans.h index 0384369bacfe..309ef8a71eab 100644 --- a/storage/ndb/plugin/ndb_conflict_trans.h +++ b/storage/ndb/plugin/ndb_conflict_trans.h @@ -25,7 +25,6 @@ #ifndef NDB_CONFLICT_TRANS_H #define NDB_CONFLICT_TRANS_H - #include "storage/ndb/include/ndbapi/NdbApi.hpp" #include "storage/ndb/include/util/HashMap2.hpp" #include "storage/ndb/include/util/LinkedStack.hpp" @@ -48,15 +47,13 @@ * epoch affect the same row, which implies a dependency between * the transactions. */ -class st_row_event_key_info -{ -public: +class st_row_event_key_info { + public: /** * User api */ - st_row_event_key_info(const NdbDictionary::Table* _table, - const uchar* _key_buff, - Uint32 _key_buff_len, + st_row_event_key_info(const NdbDictionary::Table *_table, + const uchar *_key_buff, Uint32 _key_buff_len, Uint64 _transaction_id); Uint64 getTransactionId() const; void updateRowTransactionId(Uint64 mostRecentTransId); @@ -65,24 +62,23 @@ class st_row_event_key_info * Hash Api */ Uint32 hashValue() const; - bool equal(const st_row_event_key_info* other) const; - st_row_event_key_info* getNext() const; - void setNext(st_row_event_key_info* _next); + bool equal(const st_row_event_key_info *other) const; + st_row_event_key_info *getNext() const; + void setNext(st_row_event_key_info *_next); -private: + private: /* Key : Table and Primary Key */ - const NdbDictionary::Table* tableObj; - const uchar* packed_key; + const NdbDictionary::Table *tableObj; + const uchar *packed_key; Uint32 packed_key_len; /* Data : Transaction id */ Uint64 transaction_id; /* Next ptr for hash */ - st_row_event_key_info* hash_next; + st_row_event_key_info *hash_next; }; - class st_transaction; /** @@ -91,62 +87,57 @@ class st_transaction; Describes inter-transaction dependency, and comprises part of list of other dependents of target_transaction */ -class st_trans_dependency -{ -public: +class st_trans_dependency { + public: /* User Api */ - st_trans_dependency(st_transaction* _target_transaction, - st_transaction* _dependent_transaction, - const st_trans_dependency* _next); - - st_transaction* getTargetTransaction() const; - st_transaction* getDependentTransaction() const; - const st_trans_dependency* getNextDependency() const; + st_trans_dependency(st_transaction *_target_transaction, + st_transaction *_dependent_transaction, + const st_trans_dependency *_next); + st_transaction *getTargetTransaction() const; + st_transaction *getDependentTransaction() const; + const st_trans_dependency *getNextDependency() const; /* Hash Api */ Uint32 hashValue() const; - bool equal(const st_trans_dependency* other) const; - st_trans_dependency* getNext() const; - void setNext(st_trans_dependency* _next); + bool equal(const st_trans_dependency *other) const; + st_trans_dependency *getNext() const; + void setNext(st_trans_dependency *_next); -private: + private: /* Key */ - st_transaction* target_transaction; - st_transaction* dependent_transaction; + st_transaction *target_transaction; + st_transaction *dependent_transaction; /* Rest of co-dependents of target_transaction */ - const st_trans_dependency* next_entry; + const st_trans_dependency *next_entry; - st_trans_dependency* hash_next; + st_trans_dependency *hash_next; }; - - /** st_transaction Entry in transaction hash, indicates whether transaction is in conflict, and has list of dependents */ -class st_transaction -{ -public: +class st_transaction { + public: /* User Api */ st_transaction(Uint64 _transaction_id); Uint64 getTransactionId() const; bool getInConflict() const; void setInConflict(); - const st_trans_dependency* getDependencyListHead() const; - void setDependencyListHead(st_trans_dependency* head); + const st_trans_dependency *getDependencyListHead() const; + void setDependencyListHead(st_trans_dependency *head); /* Hash Api */ Uint32 hashValue() const; - bool equal(const st_transaction* other) const; - st_transaction* getNext() const; - void setNext(st_transaction* _next); + bool equal(const st_transaction *other) const; + st_transaction *getNext() const; + void setNext(st_transaction *_next); -private: + private: /* Key */ Uint64 transaction_id; @@ -154,10 +145,10 @@ class st_transaction /* Is this transaction (and therefore its dependents) in conflict? */ bool in_conflict; /* Head of list of dependencies */ - st_trans_dependency* dependency_list_head; + st_trans_dependency *dependency_list_head; /* Hash ptr */ - st_transaction* hash_next; + st_transaction *hash_next; }; struct MEM_ROOT; @@ -166,20 +157,17 @@ struct MEM_ROOT; * Allocator type which internally uses a MySQLD mem_root * Used as a template parameter for Ndb ADTs */ -struct st_mem_root_allocator -{ - MEM_ROOT* mem_root; - - static void* alloc(void* ctx, size_t bytes); - static void* mem_calloc(void* ctx, size_t nelem, size_t bytes); - static void mem_free(void* ctx, void* mem); - st_mem_root_allocator(MEM_ROOT* _mem_root); -}; +struct st_mem_root_allocator { + MEM_ROOT *mem_root; + static void *alloc(void *ctx, size_t bytes); + static void *mem_calloc(void *ctx, size_t nelem, size_t bytes); + static void mem_free(void *ctx, void *mem); + st_mem_root_allocator(MEM_ROOT *_mem_root); +}; -class DependencyTracker -{ -public: +class DependencyTracker { + public: static const Uint64 InvalidTransactionId = ~Uint64(0); /** @@ -189,7 +177,7 @@ class DependencyTracker memory from the passed mem_root. To discard dependency tracker, just free the passed mem_root. */ - static DependencyTracker* newDependencyTracker(MEM_ROOT* mem_root); + static DependencyTracker *newDependencyTracker(MEM_ROOT *mem_root); /** track_operation @@ -202,9 +190,8 @@ class DependencyTracker passed table + primary key from a different transaction then a transaction dependency is recorded. */ - int track_operation(const NdbDictionary::Table* table, - const NdbRecord* key_rec, - const uchar* row, + int track_operation(const NdbDictionary::Table *table, + const NdbRecord *key_rec, const uchar *row, Uint64 transaction_id); /** @@ -230,7 +217,7 @@ class DependencyTracker Returns string containing error description. NULL if no error. */ - const char* get_error_text() const; + const char *get_error_text() const; /** get_conflict_count @@ -239,8 +226,8 @@ class DependencyTracker */ Uint32 get_conflict_count() const; -private: - DependencyTracker(MEM_ROOT* mem_root); + private: + DependencyTracker(MEM_ROOT *mem_root); /** get_or_create_transaction @@ -249,7 +236,7 @@ class DependencyTracker given transaction id. Returns Null on allocation failure. */ - st_transaction* get_or_create_transaction(Uint64 trans_id); + st_transaction *get_or_create_transaction(Uint64 trans_id); /** add_dependency @@ -275,8 +262,8 @@ class DependencyTracker include_dependents_of_current = false causes the traversal to skip dependents of the current node. */ - st_transaction* get_next_dependency(const st_transaction* current, - bool include_dependents_of_current = true); + st_transaction *get_next_dependency( + const st_transaction *current, bool include_dependents_of_current = true); /** dump_dependents @@ -332,7 +319,7 @@ class DependencyTracker Uint32 conflicting_trans_count; - const char* error_text; + const char *error_text; }; #endif diff --git a/storage/ndb/plugin/ndb_create_helper.h b/storage/ndb/plugin/ndb_create_helper.h index e489d2bb7c6e..80a1694606c4 100644 --- a/storage/ndb/plugin/ndb_create_helper.h +++ b/storage/ndb/plugin/ndb_create_helper.h @@ -57,7 +57,7 @@ class Ndb_create_helper { * and message will be pushed as warning before setting * the "Can't create table" error. * @return error code to be returned as command result - */ + */ int failed(uint code, const char *message) const; public: @@ -69,7 +69,7 @@ class Ndb_create_helper { * should already have been pushed. The "Can't create table" error * will be set * @return error code to be returned as command result - */ + */ int failed_warning_already_pushed() const; /** @@ -77,15 +77,15 @@ class Ndb_create_helper { * error will be pushed as warning before setting the * "Can't create table" error. * @return error code to be returned as command result - */ - int failed_in_NDB(const NdbError& ndb_err) const; + */ + int failed_in_NDB(const NdbError &ndb_err) const; /** * @brief Failed to create the table due to some internal error. * The internal error code and message will be pushed as * warning before setting the "Can't create table" error. * @return error code to be returned as command result - */ + */ int failed_internal_error(const char *message) const; /** @@ -93,7 +93,7 @@ class Ndb_create_helper { * The out of memory error code and message will be pushed as * warning before setting the "Can't create table" error. * @return error code to be returned as command result - */ + */ int failed_oom(const char *message) const; /** @@ -107,7 +107,7 @@ class Ndb_create_helper { * longer, simply push a warning before calling his method. * * @return error code to be returned as command result - */ + */ int failed_illegal_create_option(const char *reason) const; /** @@ -119,7 +119,7 @@ class Ndb_create_helper { * @description Describes which create option is missing. * * @return error code to be returned as command result - */ + */ int failed_missing_create_option(const char *description) const; /** @@ -128,7 +128,6 @@ class Ndb_create_helper { * @return error code to be returned as command result */ int succeeded(); - }; #endif diff --git a/storage/ndb/plugin/ndb_dd.cc b/storage/ndb/plugin/ndb_dd.cc index 28960c2ea4a4..3c865d144084 100644 --- a/storage/ndb/plugin/ndb_dd.cc +++ b/storage/ndb/plugin/ndb_dd.cc @@ -42,11 +42,8 @@ #include "storage/ndb/plugin/ndb_dd_table.h" #include "storage/ndb/plugin/ndb_name_util.h" -bool ndb_sdi_serialize(THD *thd, - const dd::Table *table_def, - const char* schema_name_str, - dd::sdi_t& sdi) -{ +bool ndb_sdi_serialize(THD *thd, const dd::Table *table_def, + const char *schema_name_str, dd::sdi_t &sdi) { const dd::String_type schema_name(schema_name_str); // Require the table to be visible, hidden by SE(like mysql.ndb_schema) // or else have temporary name @@ -74,10 +71,9 @@ bool ndb_sdi_serialize(THD *thd, if (sdi.empty()) { return false; // Failed to serialize } - return true; // OK + return true; // OK } - /* Workaround for BUG#25657041 @@ -91,9 +87,8 @@ bool ndb_sdi_serialize(THD *thd, visible table and restoring the original table name */ -void ndb_dd_fix_inplace_alter_table_def(dd::Table* table_def, - const char* proper_table_name) -{ +void ndb_dd_fix_inplace_alter_table_def(dd::Table *table_def, + const char *proper_table_name) { DBUG_ENTER("ndb_dd_fix_inplace_alter_table_def"); DBUG_PRINT("enter", ("table_name: %s", table_def->name().c_str())); DBUG_PRINT("enter", ("proper_table_name: %s", proper_table_name)); @@ -107,7 +102,6 @@ void ndb_dd_fix_inplace_alter_table_def(dd::Table* table_def, DBUG_VOID_RETURN; } - /** Update the version of the Schema object in DD. All the DDLs creating/altering a database will be associated with a unique counter @@ -127,14 +121,12 @@ void ndb_dd_fix_inplace_alter_table_def(dd::Table* table_def, @return true On success. @return false On failure */ -bool -ndb_dd_update_schema_version(THD *thd, const char* schema_name, - unsigned int counter, unsigned int node_id, - bool skip_commit) -{ +bool ndb_dd_update_schema_version(THD *thd, const char *schema_name, + unsigned int counter, unsigned int node_id, + bool skip_commit) { DBUG_ENTER("ndb_dd_update_schema_version"); - DBUG_PRINT("enter", ("Schema : %s, counter : %u, node_id : %u", - schema_name, counter, node_id)); + DBUG_PRINT("enter", ("Schema : %s, counter : %u, node_id : %u", schema_name, + counter, node_id)); Ndb_dd_client dd_client(thd); @@ -157,27 +149,23 @@ ndb_dd_update_schema_version(THD *thd, const char* schema_name, DBUG_RETURN(true); } - -bool ndb_dd_has_local_tables_in_schema(THD *thd, const char* schema_name, - bool &tables_exist_in_database) -{ +bool ndb_dd_has_local_tables_in_schema(THD *thd, const char *schema_name, + bool &tables_exist_in_database) { DBUG_ENTER("ndb_dd_has_tables_in_schema"); - DBUG_PRINT("enter", ("Checking if schema '%s' has local tables", - schema_name)); + DBUG_PRINT("enter", + ("Checking if schema '%s' has local tables", schema_name)); Ndb_dd_client dd_client(thd); /* Lock the schema in DD */ - if (!dd_client.mdl_lock_schema(schema_name)) - { + if (!dd_client.mdl_lock_schema(schema_name)) { DBUG_PRINT("error", ("Failed to MDL lock schema : '%s'", schema_name)); DBUG_RETURN(false); } /* Check if there are any local tables */ if (!dd_client.have_local_tables_in_schema(schema_name, - &tables_exist_in_database)) - { + &tables_exist_in_database)) { DBUG_PRINT("error", ("Failed to check if the Schema '%s' has any tables", schema_name)); DBUG_RETURN(false); @@ -186,11 +174,9 @@ bool ndb_dd_has_local_tables_in_schema(THD *thd, const char* schema_name, DBUG_RETURN(true); } - -const std::string ndb_dd_fs_name_case(const dd::String_type &name) -{ +const std::string ndb_dd_fs_name_case(const dd::String_type &name) { char name_buf[NAME_LEN + 1]; - const std::string lc_name = dd::Object_table_definition_impl::fs_name_case( - name, name_buf); + const std::string lc_name = + dd::Object_table_definition_impl::fs_name_case(name, name_buf); return lc_name; } diff --git a/storage/ndb/plugin/ndb_dd.h b/storage/ndb/plugin/ndb_dd.h index 478735f0e0f9..92d8771d0965 100644 --- a/storage/ndb/plugin/ndb_dd.h +++ b/storage/ndb/plugin/ndb_dd.h @@ -30,27 +30,23 @@ #include "sql/dd/string_type.h" namespace dd { - class Table; - typedef String_type sdi_t; -} +class Table; +typedef String_type sdi_t; +} // namespace dd class THD; -bool ndb_sdi_serialize(THD *thd, - const dd::Table *table_def, - const char* schema_name, - dd::sdi_t& sdi); - +bool ndb_sdi_serialize(THD *thd, const dd::Table *table_def, + const char *schema_name, dd::sdi_t &sdi); void ndb_dd_fix_inplace_alter_table_def(dd::Table *table_def, - const char* proper_table_name); + const char *proper_table_name); -bool ndb_dd_update_schema_version(THD *thd, const char* schema_name, +bool ndb_dd_update_schema_version(THD *thd, const char *schema_name, unsigned int counter, unsigned int node_id, bool skip_commit = false); -bool ndb_dd_has_local_tables_in_schema(THD *thd, - const char* schema_name, +bool ndb_dd_has_local_tables_in_schema(THD *thd, const char *schema_name, bool &tables_exist_in_database); const std::string ndb_dd_fs_name_case(const dd::String_type &name); diff --git a/storage/ndb/plugin/ndb_dd_client.cc b/storage/ndb/plugin/ndb_dd_client.cc index 971a6a51b022..db8d51cf21f0 100644 --- a/storage/ndb/plugin/ndb_dd_client.cc +++ b/storage/ndb/plugin/ndb_dd_client.cc @@ -39,8 +39,8 @@ #include "sql/sql_class.h" // THD #include "sql/sql_trigger.h" // remove_all_triggers_from_perfschema #include "sql/system_variables.h" -#include "sql/transaction.h" // trans_* -#include "storage/ndb/plugin/ndb_dd.h" // ndb_dd_fs_name_case +#include "sql/transaction.h" // trans_* +#include "storage/ndb/plugin/ndb_dd.h" // ndb_dd_fs_name_case #include "storage/ndb/plugin/ndb_dd_disk_data.h" #include "storage/ndb/plugin/ndb_dd_schema.h" #include "storage/ndb/plugin/ndb_dd_sdi.h" @@ -50,65 +50,51 @@ #include "storage/ndb/plugin/ndb_log.h" #include "storage/ndb/plugin/ndb_tdc.h" -Ndb_dd_client::Ndb_dd_client(THD* thd) : - m_thd(thd), - m_client(thd->dd_client()) -{ +Ndb_dd_client::Ndb_dd_client(THD *thd) + : m_thd(thd), m_client(thd->dd_client()) { disable_autocommit(); // Create dictionary client auto releaser, stored as // opaque pointer in order to avoid including all of // Dictionary_client in the ndb_dd_client header file m_auto_releaser = - (void*)new dd::cache::Dictionary_client::Auto_releaser(m_client); + (void *)new dd::cache::Dictionary_client::Auto_releaser(m_client); } - -Ndb_dd_client::~Ndb_dd_client() -{ - +Ndb_dd_client::~Ndb_dd_client() { // Automatically release acquired MDL locks mdl_locks_release(); // Automatically restore the option_bits in THD if they have // been modified - if (m_save_option_bits) - m_thd->variables.option_bits = m_save_option_bits; + if (m_save_option_bits) m_thd->variables.option_bits = m_save_option_bits; - if (m_auto_rollback) - { + if (m_auto_rollback) { // Automatically rollback unless commit has been called - if (!m_comitted) - rollback(); + if (!m_comitted) rollback(); } // Free the dictionary client auto releaser - dd::cache::Dictionary_client::Auto_releaser* ar = - (dd::cache::Dictionary_client::Auto_releaser*)m_auto_releaser; + dd::cache::Dictionary_client::Auto_releaser *ar = + (dd::cache::Dictionary_client::Auto_releaser *)m_auto_releaser; delete ar; } - -bool -Ndb_dd_client::mdl_lock_table(const char* schema_name, - const char* table_name) -{ +bool Ndb_dd_client::mdl_lock_table(const char *schema_name, + const char *table_name) { MDL_request_list mdl_requests; MDL_request schema_request; MDL_request mdl_request; - MDL_REQUEST_INIT(&schema_request, - MDL_key::SCHEMA, schema_name, "", MDL_INTENTION_EXCLUSIVE, - MDL_EXPLICIT); - MDL_REQUEST_INIT(&mdl_request, - MDL_key::TABLE, schema_name, table_name, MDL_SHARED, - MDL_EXPLICIT); + MDL_REQUEST_INIT(&schema_request, MDL_key::SCHEMA, schema_name, "", + MDL_INTENTION_EXCLUSIVE, MDL_EXPLICIT); + MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, schema_name, table_name, + MDL_SHARED, MDL_EXPLICIT); mdl_requests.push_front(&schema_request); mdl_requests.push_front(&mdl_request); if (m_thd->mdl_context.acquire_locks(&mdl_requests, - m_thd->variables.lock_wait_timeout)) - { + m_thd->variables.lock_wait_timeout)) { return false; } @@ -119,7 +105,6 @@ Ndb_dd_client::mdl_lock_table(const char* schema_name, return true; } - /** Acquire MDL locks for the Schema. @@ -131,9 +116,8 @@ Ndb_dd_client::mdl_lock_table(const char* schema_name, @return true On success. @return false On failure */ -bool -Ndb_dd_client::mdl_lock_schema(const char* schema_name, bool exclusive_lock) -{ +bool Ndb_dd_client::mdl_lock_schema(const char *schema_name, + bool exclusive_lock) { MDL_request_list mdl_requests; MDL_request schema_request; MDL_request backup_lock_request; @@ -158,8 +142,7 @@ Ndb_dd_client::mdl_lock_schema(const char* schema_name, bool exclusive_lock) mdl_requests.push_front(&schema_request); if (m_thd->mdl_context.acquire_locks(&mdl_requests, - m_thd->variables.lock_wait_timeout)) - { + m_thd->variables.lock_wait_timeout)) { return false; } @@ -173,28 +156,23 @@ Ndb_dd_client::mdl_lock_schema(const char* schema_name, bool exclusive_lock) return true; } -bool -Ndb_dd_client::mdl_lock_logfile_group_exclusive(const char* logfile_group_name, - bool custom_lock_wait, - ulong lock_wait_timeout) -{ +bool Ndb_dd_client::mdl_lock_logfile_group_exclusive( + const char *logfile_group_name, bool custom_lock_wait, + ulong lock_wait_timeout) { MDL_request_list mdl_requests; MDL_request logfile_group_request; MDL_request backup_lock_request; MDL_request grl_request; // If protection against GRL can't be acquired, err out early. - if (m_thd->global_read_lock.can_acquire_protection()) - { + if (m_thd->global_read_lock.can_acquire_protection()) { return false; } - MDL_REQUEST_INIT(&logfile_group_request, - MDL_key::TABLESPACE, "", logfile_group_name, - MDL_EXCLUSIVE, MDL_EXPLICIT); - MDL_REQUEST_INIT(&backup_lock_request, - MDL_key::BACKUP_LOCK, "", "", MDL_INTENTION_EXCLUSIVE, - MDL_EXPLICIT); + MDL_REQUEST_INIT(&logfile_group_request, MDL_key::TABLESPACE, "", + logfile_group_name, MDL_EXCLUSIVE, MDL_EXPLICIT); + MDL_REQUEST_INIT(&backup_lock_request, MDL_key::BACKUP_LOCK, "", "", + MDL_INTENTION_EXCLUSIVE, MDL_EXPLICIT); MDL_REQUEST_INIT(&grl_request, MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE, MDL_EXPLICIT); @@ -202,13 +180,11 @@ Ndb_dd_client::mdl_lock_logfile_group_exclusive(const char* logfile_group_name, mdl_requests.push_front(&backup_lock_request); mdl_requests.push_front(&grl_request); - if (!custom_lock_wait) - { + if (!custom_lock_wait) { lock_wait_timeout = m_thd->variables.lock_wait_timeout; } - if (m_thd->mdl_context.acquire_locks(&mdl_requests, lock_wait_timeout)) - { + if (m_thd->mdl_context.acquire_locks(&mdl_requests, lock_wait_timeout)) { return false; } @@ -220,25 +196,20 @@ Ndb_dd_client::mdl_lock_logfile_group_exclusive(const char* logfile_group_name, return true; } - -bool -Ndb_dd_client::mdl_lock_logfile_group(const char* logfile_group_name, - bool intention_exclusive) -{ +bool Ndb_dd_client::mdl_lock_logfile_group(const char *logfile_group_name, + bool intention_exclusive) { MDL_request_list mdl_requests; MDL_request logfile_group_request; - enum_mdl_type mdl_type = intention_exclusive ? MDL_INTENTION_EXCLUSIVE : - MDL_SHARED_READ; - MDL_REQUEST_INIT(&logfile_group_request, - MDL_key::TABLESPACE, "", logfile_group_name, - mdl_type, MDL_EXPLICIT); + enum_mdl_type mdl_type = + intention_exclusive ? MDL_INTENTION_EXCLUSIVE : MDL_SHARED_READ; + MDL_REQUEST_INIT(&logfile_group_request, MDL_key::TABLESPACE, "", + logfile_group_name, mdl_type, MDL_EXPLICIT); mdl_requests.push_front(&logfile_group_request); if (m_thd->mdl_context.acquire_locks(&mdl_requests, - m_thd->variables.lock_wait_timeout)) - { + m_thd->variables.lock_wait_timeout)) { return false; } @@ -248,29 +219,23 @@ Ndb_dd_client::mdl_lock_logfile_group(const char* logfile_group_name, return true; } - -bool -Ndb_dd_client::mdl_lock_tablespace_exclusive(const char* tablespace_name, - bool custom_lock_wait, - ulong lock_wait_timeout) -{ +bool Ndb_dd_client::mdl_lock_tablespace_exclusive(const char *tablespace_name, + bool custom_lock_wait, + ulong lock_wait_timeout) { MDL_request_list mdl_requests; MDL_request tablespace_request; MDL_request backup_lock_request; MDL_request grl_request; // If protection against GRL can't be acquired, err out early. - if (m_thd->global_read_lock.can_acquire_protection()) - { + if (m_thd->global_read_lock.can_acquire_protection()) { return false; } - MDL_REQUEST_INIT(&tablespace_request, - MDL_key::TABLESPACE, "", tablespace_name, - MDL_EXCLUSIVE, MDL_EXPLICIT); - MDL_REQUEST_INIT(&backup_lock_request, - MDL_key::BACKUP_LOCK, "", "", MDL_INTENTION_EXCLUSIVE, - MDL_EXPLICIT); + MDL_REQUEST_INIT(&tablespace_request, MDL_key::TABLESPACE, "", + tablespace_name, MDL_EXCLUSIVE, MDL_EXPLICIT); + MDL_REQUEST_INIT(&backup_lock_request, MDL_key::BACKUP_LOCK, "", "", + MDL_INTENTION_EXCLUSIVE, MDL_EXPLICIT); MDL_REQUEST_INIT(&grl_request, MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE, MDL_EXPLICIT); @@ -278,13 +243,11 @@ Ndb_dd_client::mdl_lock_tablespace_exclusive(const char* tablespace_name, mdl_requests.push_front(&backup_lock_request); mdl_requests.push_front(&grl_request); - if (!custom_lock_wait) - { + if (!custom_lock_wait) { lock_wait_timeout = m_thd->variables.lock_wait_timeout; } - if (m_thd->mdl_context.acquire_locks(&mdl_requests, lock_wait_timeout)) - { + if (m_thd->mdl_context.acquire_locks(&mdl_requests, lock_wait_timeout)) { return false; } @@ -296,25 +259,20 @@ Ndb_dd_client::mdl_lock_tablespace_exclusive(const char* tablespace_name, return true; } - -bool -Ndb_dd_client::mdl_lock_tablespace(const char* tablespace_name, - bool intention_exclusive) -{ +bool Ndb_dd_client::mdl_lock_tablespace(const char *tablespace_name, + bool intention_exclusive) { MDL_request_list mdl_requests; MDL_request tablespace_request; - enum_mdl_type mdl_type = intention_exclusive ? MDL_INTENTION_EXCLUSIVE : - MDL_SHARED_READ; - MDL_REQUEST_INIT(&tablespace_request, - MDL_key::TABLESPACE, "", tablespace_name, - mdl_type, MDL_EXPLICIT); + enum_mdl_type mdl_type = + intention_exclusive ? MDL_INTENTION_EXCLUSIVE : MDL_SHARED_READ; + MDL_REQUEST_INIT(&tablespace_request, MDL_key::TABLESPACE, "", + tablespace_name, mdl_type, MDL_EXPLICIT); mdl_requests.push_front(&tablespace_request); if (m_thd->mdl_context.acquire_locks(&mdl_requests, - m_thd->variables.lock_wait_timeout)) - { + m_thd->variables.lock_wait_timeout)) { return false; } @@ -324,13 +282,10 @@ Ndb_dd_client::mdl_lock_tablespace(const char* tablespace_name, return true; } - -bool -Ndb_dd_client::mdl_locks_acquire_exclusive(const char* schema_name, - const char* table_name, - bool custom_lock_wait, - ulong lock_wait_timeout) -{ +bool Ndb_dd_client::mdl_locks_acquire_exclusive(const char *schema_name, + const char *table_name, + bool custom_lock_wait, + ulong lock_wait_timeout) { MDL_request_list mdl_requests; MDL_request schema_request; MDL_request mdl_request; @@ -340,15 +295,12 @@ Ndb_dd_client::mdl_locks_acquire_exclusive(const char* schema_name, // If we cannot acquire protection against GRL, err out early. if (m_thd->global_read_lock.can_acquire_protection()) return false; - MDL_REQUEST_INIT(&schema_request, - MDL_key::SCHEMA, schema_name, "", MDL_INTENTION_EXCLUSIVE, - MDL_EXPLICIT); - MDL_REQUEST_INIT(&mdl_request, - MDL_key::TABLE, schema_name, table_name, MDL_EXCLUSIVE, - MDL_EXPLICIT); - MDL_REQUEST_INIT(&backup_lock_request, - MDL_key::BACKUP_LOCK, "", "", MDL_INTENTION_EXCLUSIVE, - MDL_EXPLICIT); + MDL_REQUEST_INIT(&schema_request, MDL_key::SCHEMA, schema_name, "", + MDL_INTENTION_EXCLUSIVE, MDL_EXPLICIT); + MDL_REQUEST_INIT(&mdl_request, MDL_key::TABLE, schema_name, table_name, + MDL_EXCLUSIVE, MDL_EXPLICIT); + MDL_REQUEST_INIT(&backup_lock_request, MDL_key::BACKUP_LOCK, "", "", + MDL_INTENTION_EXCLUSIVE, MDL_EXPLICIT); MDL_REQUEST_INIT(&grl_request, MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE, MDL_EXPLICIT); @@ -357,13 +309,11 @@ Ndb_dd_client::mdl_locks_acquire_exclusive(const char* schema_name, mdl_requests.push_front(&backup_lock_request); mdl_requests.push_front(&grl_request); - if (!custom_lock_wait) - { + if (!custom_lock_wait) { lock_wait_timeout = m_thd->variables.lock_wait_timeout; } - if (m_thd->mdl_context.acquire_locks(&mdl_requests, lock_wait_timeout)) - { + if (m_thd->mdl_context.acquire_locks(&mdl_requests, lock_wait_timeout)) { return false; } @@ -376,18 +326,13 @@ Ndb_dd_client::mdl_locks_acquire_exclusive(const char* schema_name, return true; } - -void Ndb_dd_client::mdl_locks_release() -{ - for (MDL_ticket* ticket : m_acquired_mdl_tickets) - { +void Ndb_dd_client::mdl_locks_release() { + for (MDL_ticket *ticket : m_acquired_mdl_tickets) { m_thd->mdl_context.release_lock(ticket); } } - -void Ndb_dd_client::disable_autocommit() -{ +void Ndb_dd_client::disable_autocommit() { /* Implementation details from which storage the DD uses leaks out and the user of these functions magically need to turn auto commit @@ -403,40 +348,29 @@ void Ndb_dd_client::disable_autocommit() assert(m_thd->variables.option_bits); m_save_option_bits = m_thd->variables.option_bits; - m_thd->variables.option_bits&= ~OPTION_AUTOCOMMIT; - m_thd->variables.option_bits|= OPTION_NOT_AUTOCOMMIT; - + m_thd->variables.option_bits &= ~OPTION_AUTOCOMMIT; + m_thd->variables.option_bits |= OPTION_NOT_AUTOCOMMIT; } - -void Ndb_dd_client::commit() -{ +void Ndb_dd_client::commit() { trans_commit_stmt(m_thd); trans_commit(m_thd); m_comitted = true; } - -void Ndb_dd_client::rollback() -{ +void Ndb_dd_client::rollback() { trans_rollback_stmt(m_thd); trans_rollback(m_thd); } - -bool -Ndb_dd_client::get_engine(const char* schema_name, - const char* table_name, - dd::String_type* engine) -{ - const dd::Table *existing= nullptr; - if (m_client->acquire(schema_name, table_name, &existing)) - { +bool Ndb_dd_client::get_engine(const char *schema_name, const char *table_name, + dd::String_type *engine) { + const dd::Table *existing = nullptr; + if (m_client->acquire(schema_name, table_name, &existing)) { return false; } - if (existing == nullptr) - { + if (existing == nullptr) { // Table does not exist in DD return false; } @@ -446,40 +380,30 @@ Ndb_dd_client::get_engine(const char* schema_name, return true; } - -bool -Ndb_dd_client::rename_table(const char* old_schema_name, - const char* old_table_name, - const char* new_schema_name, - const char* new_table_name, - int new_table_id, int new_table_version, - Ndb_referenced_tables_invalidator *invalidator) -{ +bool Ndb_dd_client::rename_table( + const char *old_schema_name, const char *old_table_name, + const char *new_schema_name, const char *new_table_name, int new_table_id, + int new_table_version, Ndb_referenced_tables_invalidator *invalidator) { // Read new schema from DD - const dd::Schema *new_schema= nullptr; - if (m_client->acquire(new_schema_name, &new_schema)) - { + const dd::Schema *new_schema = nullptr; + if (m_client->acquire(new_schema_name, &new_schema)) { return false; } - if (new_schema == nullptr) - { + if (new_schema == nullptr) { // Database does not exist, unexpected DBUG_ASSERT(false); return false; } // Read table from DD - dd::Table *to_table_def= nullptr; + dd::Table *to_table_def = nullptr; if (m_client->acquire_for_modification(old_schema_name, old_table_name, &to_table_def)) return false; if (invalidator != nullptr && - !invalidator->fetch_referenced_tables_to_invalidate(old_schema_name, - old_table_name, - to_table_def, - true)) - { + !invalidator->fetch_referenced_tables_to_invalidate( + old_schema_name, old_table_name, to_table_def, true)) { return false; } @@ -487,21 +411,19 @@ Ndb_dd_client::rename_table(const char* old_schema_name, to_table_def->set_schema_id(new_schema->id()); to_table_def->set_name(new_table_name); - ndb_dd_table_set_object_id_and_version(to_table_def, - new_table_id, new_table_version); + ndb_dd_table_set_object_id_and_version(to_table_def, new_table_id, + new_table_version); // Rename foreign keys if (dd::rename_foreign_keys(m_thd, old_schema_name, old_table_name, - new_schema_name, to_table_def)) - { + new_schema_name, to_table_def)) { // Failed to rename foreign keys or commit/rollback, unexpected DBUG_ASSERT(false); return false; } // Save table in DD - if (m_client->update(to_table_def)) - { + if (m_client->update(to_table_def)) { // Failed to save, unexpected DBUG_ASSERT(false); return false; @@ -510,34 +432,28 @@ Ndb_dd_client::rename_table(const char* old_schema_name, return true; } - -bool -Ndb_dd_client::remove_table(const char* schema_name, - const char* table_name, - Ndb_referenced_tables_invalidator* invalidator) +bool Ndb_dd_client::remove_table(const char *schema_name, + const char *table_name, + Ndb_referenced_tables_invalidator *invalidator) { DBUG_ENTER("Ndb_dd_client::remove_table"); DBUG_PRINT("enter", ("schema_name: '%s', table_name: '%s'", schema_name, table_name)); - const dd::Table *existing= nullptr; - if (m_client->acquire(schema_name, table_name, &existing)) - { + const dd::Table *existing = nullptr; + if (m_client->acquire(schema_name, table_name, &existing)) { DBUG_RETURN(false); } - if (existing == nullptr) - { + if (existing == nullptr) { // Table does not exist DBUG_RETURN(true); } if (invalidator != nullptr && - !invalidator->fetch_referenced_tables_to_invalidate(schema_name, - table_name, - existing, true)) - { + !invalidator->fetch_referenced_tables_to_invalidate( + schema_name, table_name, existing, true)) { DBUG_RETURN(false); } @@ -547,33 +463,27 @@ Ndb_dd_client::remove_table(const char* schema_name, #endif DBUG_PRINT("info", ("removing existing table")); - if (m_client->drop(existing)) - { + if (m_client->drop(existing)) { // Failed to remove existing - DBUG_ASSERT(false); // Catch in debug, unexpected error + DBUG_ASSERT(false); // Catch in debug, unexpected error DBUG_RETURN(false); } DBUG_RETURN(true); } - -bool -Ndb_dd_client::store_table(dd::Table* install_table, int ndb_table_id) -{ +bool Ndb_dd_client::store_table(dd::Table *install_table, int ndb_table_id) { DBUG_ENTER("Ndb_dd_client::store_table"); - if (!m_client->store(install_table)) - { - DBUG_RETURN(true); // OK + if (!m_client->store(install_table)) { + DBUG_RETURN(true); // OK } DBUG_PRINT("error", ("Failed to store table, error: '%d %s'", m_thd->get_stmt_da()->mysql_errno(), m_thd->get_stmt_da()->message_text())); - if (m_thd->get_stmt_da()->mysql_errno() == ER_DUP_ENTRY) - { + if (m_thd->get_stmt_da()->mysql_errno() == ER_DUP_ENTRY) { // Try to handle the failure which may occur when the DD already // have a table definition from an old NDB table which used the // same table id but with a different name. @@ -588,56 +498,48 @@ Ndb_dd_client::store_table(dd::Table* install_table, int ndb_table_id) m_thd->clear_error(); // Find old table using the NDB tables id - dd::Table* old_table_def; - if (m_client->acquire_uncached_table_by_se_private_id("ndbcluster", - ndb_table_id, - &old_table_def)) - { + dd::Table *old_table_def; + if (m_client->acquire_uncached_table_by_se_private_id( + "ndbcluster", ndb_table_id, &old_table_def)) { // There was no old table DBUG_RETURN(false); } // Double check that old table is in NDB - if (old_table_def->engine() != "ndbcluster") - { + if (old_table_def->engine() != "ndbcluster") { DBUG_ASSERT(false); DBUG_RETURN(false); } // Lookup schema name of old table dd::Schema *old_schema; - if (m_client->acquire_uncached(old_table_def->schema_id(), &old_schema)) - { + if (m_client->acquire_uncached(old_table_def->schema_id(), &old_schema)) { DBUG_RETURN(false); } - if (old_schema == nullptr) - { - DBUG_ASSERT(false); // Database does not exist + if (old_schema == nullptr) { + DBUG_ASSERT(false); // Database does not exist DBUG_RETURN(false); } - const char* old_schema_name = old_schema->name().c_str(); - const char* old_table_name = old_table_def->name().c_str(); + const char *old_schema_name = old_schema->name().c_str(); + const char *old_table_name = old_table_def->name().c_str(); DBUG_PRINT("info", ("Found old table '%s.%s', will try to remove it", old_schema_name, old_table_name)); // Take exclusive locks on old table - if (!mdl_locks_acquire_exclusive(old_schema_name, old_table_name)) - { + if (!mdl_locks_acquire_exclusive(old_schema_name, old_table_name)) { // Failed to MDL lock old table DBUG_RETURN(false); } - if (!remove_table(old_schema_name, old_table_name)) - { + if (!remove_table(old_schema_name, old_table_name)) { // Failed to remove old table from DD DBUG_RETURN(false); } // Try to store the new table again - if (m_client->store(install_table)) - { + if (m_client->store(install_table)) { DBUG_PRINT("error", ("Failed to store table, error: '%d %s'", m_thd->get_stmt_da()->mysql_errno(), m_thd->get_stmt_da()->message_text())); @@ -652,32 +554,23 @@ Ndb_dd_client::store_table(dd::Table* install_table, int ndb_table_id) DBUG_RETURN(false); } +bool Ndb_dd_client::install_table( + const char *schema_name, const char *table_name, const dd::sdi_t &sdi, + int ndb_table_id, int ndb_table_version, size_t ndb_num_partitions, + const std::string &tablespace_name, bool force_overwrite, + Ndb_referenced_tables_invalidator *invalidator) { + const dd::Schema *schema = nullptr; - -bool -Ndb_dd_client::install_table(const char* schema_name, const char* table_name, - const dd::sdi_t& sdi, - int ndb_table_id, int ndb_table_version, - size_t ndb_num_partitions, - const std::string &tablespace_name, - bool force_overwrite, - Ndb_referenced_tables_invalidator *invalidator) -{ - const dd::Schema *schema= nullptr; - - if (m_client->acquire(schema_name, &schema)) - { + if (m_client->acquire(schema_name, &schema)) { return false; } - if (schema == nullptr) - { - DBUG_ASSERT(false); // Database does not exist + if (schema == nullptr) { + DBUG_ASSERT(false); // Database does not exist return false; } std::unique_ptr install_table{dd::create_object()}; - if (ndb_dd_sdi_deserialize(m_thd, sdi, install_table.get())) - { + if (ndb_dd_sdi_deserialize(m_thd, sdi, install_table.get())) { return false; } @@ -695,53 +588,43 @@ Ndb_dd_client::install_table(const char* schema_name, const char* table_name, install_table->set_schema_id(schema->id()); // Assign NDB id and version of the table - ndb_dd_table_set_object_id_and_version(install_table.get(), - ndb_table_id, ndb_table_version); + ndb_dd_table_set_object_id_and_version(install_table.get(), ndb_table_id, + ndb_table_version); // Check if the DD table object has the correct number of partitions. // Correct the number of partitions in the DD table object in case of // a mismatch - const bool check_partition_count_result = - ndb_dd_table_check_partition_count(install_table.get(), - ndb_num_partitions); - if (!check_partition_count_result) - { - ndb_dd_table_fix_partition_count(install_table.get(), - ndb_num_partitions); + const bool check_partition_count_result = ndb_dd_table_check_partition_count( + install_table.get(), ndb_num_partitions); + if (!check_partition_count_result) { + ndb_dd_table_fix_partition_count(install_table.get(), ndb_num_partitions); } // Set the tablespace id if applicable - if (!tablespace_name.empty()) - { + if (!tablespace_name.empty()) { dd::Object_id tablespace_id; - if (!lookup_tablespace_id(tablespace_name.c_str(), &tablespace_id)) - { + if (!lookup_tablespace_id(tablespace_name.c_str(), &tablespace_id)) { return false; } ndb_dd_table_set_tablespace_id(install_table.get(), tablespace_id); } - const dd::Table *existing= nullptr; - if (m_client->acquire(schema_name, table_name, &existing)) - { + const dd::Table *existing = nullptr; + if (m_client->acquire(schema_name, table_name, &existing)) { return false; } if (invalidator != nullptr && - !invalidator->fetch_referenced_tables_to_invalidate(schema_name, - table_name, - existing)) - { + !invalidator->fetch_referenced_tables_to_invalidate( + schema_name, table_name, existing)) { return false; } - if (existing != nullptr) - { + if (existing != nullptr) { // Get id and version of existing table int object_id, object_version; - if (!ndb_dd_table_get_object_id_and_version(existing, - object_id, object_version)) - { + if (!ndb_dd_table_get_object_id_and_version(existing, object_id, + object_version)) { DBUG_PRINT("error", ("Could not extract object_id and object_version " "from table definition")); DBUG_ASSERT(false); @@ -751,18 +634,14 @@ Ndb_dd_client::install_table(const char* schema_name, const char* table_name, // Check that id and version of the existing table in DD // matches NDB, otherwise it's a programming error // not to request "force_overwrite" - if (ndb_table_id == object_id && - ndb_table_version == object_version) - { - + if (ndb_table_id == object_id && ndb_table_version == object_version) { // Table is already installed, with same id and version // return sucess return true; } // Table already exists - if (!force_overwrite) - { + if (!force_overwrite) { // Don't overwrite existing table DBUG_ASSERT(false); return false; @@ -771,16 +650,14 @@ Ndb_dd_client::install_table(const char* schema_name, const char* table_name, // Continue and remove the old table before // installing the new DBUG_PRINT("info", ("dropping existing table")); - if (m_client->drop(existing)) - { + if (m_client->drop(existing)) { // Failed to drop existing - DBUG_ASSERT(false); // Catch in debug, unexpected error + DBUG_ASSERT(false); // Catch in debug, unexpected error return false; } } - if (!store_table(install_table.get(), ndb_table_id)) - { + if (!store_table(install_table.get(), ndb_table_id)) { ndb_log_error("Failed to store table: '%s.%s'", schema_name, table_name); ndb_log_error_dump("sdi for new table: %s", ndb_dd_sdi_prettify(sdi).c_str()); @@ -794,69 +671,52 @@ Ndb_dd_client::install_table(const char* schema_name, const char* table_name, return false; } - return true; // OK + return true; // OK } - -bool -Ndb_dd_client::migrate_table(const char* schema_name, const char* table_name, - const unsigned char* frm_data, - unsigned int unpacked_len, - bool force_overwrite) -{ - if (force_overwrite) - { +bool Ndb_dd_client::migrate_table(const char *schema_name, + const char *table_name, + const unsigned char *frm_data, + unsigned int unpacked_len, + bool force_overwrite) { + if (force_overwrite) { // Remove the old table before migrating DBUG_PRINT("info", ("dropping existing table")); - if (!mdl_locks_acquire_exclusive(schema_name, table_name)) - { + if (!mdl_locks_acquire_exclusive(schema_name, table_name)) { return false; } - if (!remove_table(schema_name, table_name)) - { + if (!remove_table(schema_name, table_name)) { return false; } commit(); - } - const bool migrate_result= - dd::ndb_upgrade::migrate_table_to_dd(m_thd, schema_name, - table_name, frm_data, - unpacked_len, - false); + const bool migrate_result = dd::ndb_upgrade::migrate_table_to_dd( + m_thd, schema_name, table_name, frm_data, unpacked_len, false); return migrate_result; } -bool -Ndb_dd_client::get_table(const char *schema_name, const char *table_name, - const dd::Table **table_def) -{ - if (m_client->acquire(schema_name, table_name, table_def)) - { +bool Ndb_dd_client::get_table(const char *schema_name, const char *table_name, + const dd::Table **table_def) { + if (m_client->acquire(schema_name, table_name, table_def)) { my_error(ER_NO_SUCH_TABLE, MYF(0), schema_name, table_name); return false; } return true; } - -bool -Ndb_dd_client::table_exists(const char *schema_name, const char *table_name, - bool &exists) -{ +bool Ndb_dd_client::table_exists(const char *schema_name, + const char *table_name, bool &exists) { const dd::Table *table; - if (m_client->acquire(schema_name, table_name, &table)) - { + if (m_client->acquire(schema_name, table_name, &table)) { // Failed to acquire the requested table return false; } - if (table == nullptr) - { + if (table == nullptr) { // The table doesn't exist exists = false; return true; @@ -867,27 +727,21 @@ Ndb_dd_client::table_exists(const char *schema_name, const char *table_name, return true; } - -bool -Ndb_dd_client::set_tablespace_id_in_table(const char *schema_name, - const char *table_name, - dd::Object_id tablespace_id) -{ +bool Ndb_dd_client::set_tablespace_id_in_table(const char *schema_name, + const char *table_name, + dd::Object_id tablespace_id) { dd::Table *table_def = nullptr; - if (m_client->acquire_for_modification(schema_name, table_name, &table_def)) - { + if (m_client->acquire_for_modification(schema_name, table_name, &table_def)) { return false; } - if (table_def == nullptr) - { + if (table_def == nullptr) { DBUG_ASSERT(false); return false; } ndb_dd_table_set_tablespace_id(table_def, tablespace_id); - if (m_client->update(table_def)) - { + if (m_client->update(table_def)) { return false; } return true; @@ -917,81 +771,64 @@ bool Ndb_dd_client::set_object_id_and_version_in_table(const char *schema_name, return true; } -bool -Ndb_dd_client::fetch_all_schemas( - std::map &schemas) { +bool Ndb_dd_client::fetch_all_schemas( + std::map &schemas) { DBUG_ENTER("Ndb_dd_client::fetch_all_schemas"); - std::vector schemas_list; - if (m_client->fetch_global_components(&schemas_list)) - { + std::vector schemas_list; + if (m_client->fetch_global_components(&schemas_list)) { DBUG_PRINT("error", ("Failed to fetch all schemas")); DBUG_RETURN(false); } - for (const dd::Schema* schema : schemas_list) - { + for (const dd::Schema *schema : schemas_list) { schemas.insert(std::make_pair(schema->name().c_str(), schema)); } DBUG_RETURN(true); } - -bool -Ndb_dd_client::fetch_schema_names(std::vector* names) -{ +bool Ndb_dd_client::fetch_schema_names(std::vector *names) { DBUG_ENTER("Ndb_dd_client::fetch_schema_names"); - std::vector schemas; - if (m_client->fetch_global_components(&schemas)) - { + std::vector schemas; + if (m_client->fetch_global_components(&schemas)) { DBUG_RETURN(false); } - for (const dd::Schema* schema : schemas) - { + for (const dd::Schema *schema : schemas) { names->push_back(schema->name().c_str()); } DBUG_RETURN(true); } - -bool -Ndb_dd_client::get_ndb_table_names_in_schema(const char* schema_name, - std::unordered_set* names) -{ +bool Ndb_dd_client::get_ndb_table_names_in_schema( + const char *schema_name, std::unordered_set *names) { DBUG_ENTER("Ndb_dd_client::get_ndb_table_names_in_schema"); - const dd::Schema* schema; - if (m_client->acquire(schema_name, &schema)) - { + const dd::Schema *schema; + if (m_client->acquire(schema_name, &schema)) { // Failed to open the requested Schema object DBUG_RETURN(false); } - if (schema == nullptr) - { + if (schema == nullptr) { // Database does not exist DBUG_RETURN(false); } - std::vector tables; - if (m_client->fetch_schema_components(schema, &tables)) - { + std::vector tables; + if (m_client->fetch_schema_components(schema, &tables)) { DBUG_RETURN(false); } - for (const dd::Table* table: tables) - { - if (table->engine() != "ndbcluster") - { + for (const dd::Table *table : tables) { + if (table->engine() != "ndbcluster") { // Skip non NDB tables continue; } // Lock the table in DD - if (!mdl_lock_table(schema_name, table->name().c_str())) - { + if (!mdl_lock_table(schema_name, table->name().c_str())) { // Failed to MDL lock table DBUG_RETURN(false); } @@ -1004,57 +841,45 @@ Ndb_dd_client::get_ndb_table_names_in_schema(const char* schema_name, DBUG_RETURN(true); } - -bool -Ndb_dd_client::get_table_names_in_schema( - const char* schema_name, std::unordered_set *ndb_tables, - std::unordered_set *local_tables) -{ +bool Ndb_dd_client::get_table_names_in_schema( + const char *schema_name, std::unordered_set *ndb_tables, + std::unordered_set *local_tables) { DBUG_ENTER("Ndb_dd_client::get_table_names_in_schema"); const dd::Schema *schema; - if (m_client->acquire(schema_name, &schema)) - { + if (m_client->acquire(schema_name, &schema)) { // Failed to open the requested Schema object DBUG_RETURN(false); } - if (schema == nullptr) - { + if (schema == nullptr) { // Database does not exist DBUG_RETURN(false); } - std::vector tables; - if (m_client->fetch_schema_components(schema, &tables)) - { + std::vector tables; + if (m_client->fetch_schema_components(schema, &tables)) { DBUG_RETURN(false); } - for (const dd::Table *table: tables) - { + for (const dd::Table *table : tables) { // Lock the table in DD - if (!mdl_lock_table(schema_name, table->name().c_str())) - { + if (!mdl_lock_table(schema_name, table->name().c_str())) { // Failed to acquire MDL DBUG_RETURN(false); } // Convert the table name to lower case on platforms that have // lower_case_table_names set to 2 const std::string table_name = ndb_dd_fs_name_case(table->name()); - if (table->engine() == "ndbcluster") - { + if (table->engine() == "ndbcluster") { ndb_tables->insert(table_name); - } - else - { + } else { local_tables->insert(table_name); } } DBUG_RETURN(true); } - /* Check given schema for local tables(i.e not in NDB) @@ -1066,29 +891,24 @@ Ndb_dd_client::get_table_names_in_schema( @return true Success. */ -bool -Ndb_dd_client::have_local_tables_in_schema(const char* schema_name, - bool* found_local_tables) -{ +bool Ndb_dd_client::have_local_tables_in_schema(const char *schema_name, + bool *found_local_tables) { DBUG_ENTER("Ndb_dd_client::have_local_tables_in_schema"); - const dd::Schema* schema; - if (m_client->acquire(schema_name, &schema)) - { + const dd::Schema *schema; + if (m_client->acquire(schema_name, &schema)) { // Failed to open the requested schema DBUG_RETURN(false); } - if (schema == nullptr) - { + if (schema == nullptr) { // The schema didn't exist, thus it can't have any local tables *found_local_tables = false; DBUG_RETURN(true); } - std::vector tables; - if (m_client->fetch_schema_components(schema, &tables)) - { + std::vector tables; + if (m_client->fetch_schema_components(schema, &tables)) { DBUG_RETURN(false); } @@ -1096,10 +916,8 @@ Ndb_dd_client::have_local_tables_in_schema(const char* schema_name, // return on first table not in NDB *found_local_tables = false; - for (const dd::Table* table: tables) - { - if (table->engine() != "ndbcluster") - { + for (const dd::Table *table : tables) { + if (table->engine() != "ndbcluster") { // Found local table *found_local_tables = true; break; @@ -1109,18 +927,14 @@ Ndb_dd_client::have_local_tables_in_schema(const char* schema_name, DBUG_RETURN(true); } - -bool Ndb_dd_client::is_local_table(const char* schema_name, - const char* table_name, bool &local_table) -{ +bool Ndb_dd_client::is_local_table(const char *schema_name, + const char *table_name, bool &local_table) { const dd::Table *table; - if (m_client->acquire(schema_name, table_name, &table)) - { + if (m_client->acquire(schema_name, table_name, &table)) { // Failed to acquire the requested table return false; } - if (table == nullptr) - { + if (table == nullptr) { // The table doesn't exist DBUG_ASSERT(false); return false; @@ -1129,22 +943,17 @@ bool Ndb_dd_client::is_local_table(const char* schema_name, return true; } - -bool -Ndb_dd_client::schema_exists(const char* schema_name, - bool* schema_exists) -{ +bool Ndb_dd_client::schema_exists(const char *schema_name, + bool *schema_exists) { DBUG_ENTER("Ndb_dd_client::schema_exists"); - const dd::Schema* schema; - if (m_client->acquire(schema_name, &schema)) - { + const dd::Schema *schema; + if (m_client->acquire(schema_name, &schema)) { // Failed to open the requested schema DBUG_RETURN(false); } - if (schema == nullptr) - { + if (schema == nullptr) { // The schema didn't exist *schema_exists = false; DBUG_RETURN(true); @@ -1155,24 +964,20 @@ Ndb_dd_client::schema_exists(const char* schema_name, DBUG_RETURN(true); } - -bool -Ndb_dd_client::update_schema_version(const char* schema_name, - unsigned int counter, - unsigned int node_id) -{ +bool Ndb_dd_client::update_schema_version(const char *schema_name, + unsigned int counter, + unsigned int node_id) { DBUG_ENTER("Ndb_dd_client::update_schema_version"); - DBUG_PRINT("enter", ("Schema : %s, counter : %u, node_id : %u", - schema_name, counter, node_id)); + DBUG_PRINT("enter", ("Schema : %s, counter : %u, node_id : %u", schema_name, + counter, node_id)); DBUG_ASSERT(m_thd->mdl_context.owns_equal_or_stronger_lock( - MDL_key::SCHEMA, schema_name, "", MDL_EXCLUSIVE)); + MDL_key::SCHEMA, schema_name, "", MDL_EXCLUSIVE)); dd::Schema *schema; if (m_client->acquire_for_modification(schema_name, &schema) || - schema == nullptr) - { + schema == nullptr) { DBUG_PRINT("error", ("Failed to fetch the Schema object")); DBUG_RETURN(false); } @@ -1181,8 +986,7 @@ Ndb_dd_client::update_schema_version(const char* schema_name, ndb_dd_schema_set_counter_and_nodeid(schema, counter, node_id); // Update Schema in DD - if (m_client->update(schema)) - { + if (m_client->update(schema)) { DBUG_PRINT("error", ("Failed to update the Schema in DD")); DBUG_RETURN(false); } @@ -1190,28 +994,22 @@ Ndb_dd_client::update_schema_version(const char* schema_name, DBUG_RETURN(true); } - -bool Ndb_dd_client::lookup_tablespace_id(const char* tablespace_name, - dd::Object_id* tablespace_id) -{ +bool Ndb_dd_client::lookup_tablespace_id(const char *tablespace_name, + dd::Object_id *tablespace_id) { DBUG_ENTER("lookup_tablespace_id"); DBUG_PRINT("enter", ("tablespace_name: %s", tablespace_name)); DBUG_ASSERT(m_thd->mdl_context.owns_equal_or_stronger_lock( - MDL_key::TABLESPACE, - "", tablespace_name, - MDL_INTENTION_EXCLUSIVE)); + MDL_key::TABLESPACE, "", tablespace_name, MDL_INTENTION_EXCLUSIVE)); // Acquire tablespace. - const dd::Tablespace* ts_obj= NULL; - if (m_client->acquire(tablespace_name, &ts_obj)) - { + const dd::Tablespace *ts_obj = NULL; + if (m_client->acquire(tablespace_name, &ts_obj)) { // acquire() always fails with an error being reported. DBUG_RETURN(false); } - if (!ts_obj) - { + if (!ts_obj) { my_error(ER_TABLESPACE_MISSING_WITH_NAME, MYF(0), tablespace_name); DBUG_RETURN(false); } @@ -1222,31 +1020,23 @@ bool Ndb_dd_client::lookup_tablespace_id(const char* tablespace_name, DBUG_RETURN(true); } - -bool -Ndb_dd_client::get_tablespace(const char *tablespace_name, - const dd::Tablespace **tablespace_def) -{ - if (m_client->acquire(tablespace_name, tablespace_def)) - { +bool Ndb_dd_client::get_tablespace(const char *tablespace_name, + const dd::Tablespace **tablespace_def) { + if (m_client->acquire(tablespace_name, tablespace_def)) { return false; } return true; } - -bool -Ndb_dd_client::tablespace_exists(const char* tablespace_name, bool& exists) -{ - const dd::Tablespace* tablespace; - if (m_client->acquire(tablespace_name, &tablespace)) - { +bool Ndb_dd_client::tablespace_exists(const char *tablespace_name, + bool &exists) { + const dd::Tablespace *tablespace; + if (m_client->acquire(tablespace_name, &tablespace)) { // Failed to acquire the requested tablespace return false; } - if (tablespace == nullptr) - { + if (tablespace == nullptr) { // The tablespace doesn't exist exists = false; return true; @@ -1257,22 +1047,17 @@ Ndb_dd_client::tablespace_exists(const char* tablespace_name, bool& exists) return true; } - bool Ndb_dd_client::fetch_ndb_tablespace_names( - std::unordered_set& names) -{ + std::unordered_set &names) { DBUG_ENTER("Ndb_dd_client::fetch_ndb_tablespace_names"); - std::vector tablespaces; - if (m_client->fetch_global_components(&tablespaces)) - { + std::vector tablespaces; + if (m_client->fetch_global_components(&tablespaces)) { DBUG_RETURN(false); } - for (const dd::Tablespace* tablespace: tablespaces) - { - if (tablespace->engine() != "ndbcluster") - { + for (const dd::Tablespace *tablespace : tablespaces) { + if (tablespace->engine() != "ndbcluster") { // Skip non-NDB objects continue; } @@ -1282,16 +1067,14 @@ bool Ndb_dd_client::fetch_ndb_tablespace_names( ndb_dd_disk_data_get_object_type(tablespace->se_private_data(), type); - if (type != object_type::TABLESPACE) - { + if (type != object_type::TABLESPACE) { // Skip logfile groups continue; } // Acquire lock in DD if (!mdl_lock_tablespace(tablespace->name().c_str(), - false /* intention_exclusive */)) - { + false /* intention_exclusive */)) { // Failed to acquire MDL lock DBUG_RETURN(false); } @@ -1301,44 +1084,33 @@ bool Ndb_dd_client::fetch_ndb_tablespace_names( DBUG_RETURN(true); } - -bool -Ndb_dd_client::install_tablespace(const char* tablespace_name, - const std::vector& - data_file_names, - int tablespace_id, - int tablespace_version, - bool force_overwrite) -{ +bool Ndb_dd_client::install_tablespace( + const char *tablespace_name, + const std::vector &data_file_names, int tablespace_id, + int tablespace_version, bool force_overwrite) { DBUG_ENTER("Ndb_dd_client::install_tablespace"); bool exists; - if (!tablespace_exists(tablespace_name, exists)) - { + if (!tablespace_exists(tablespace_name, exists)) { // Could not detect if the tablespace exists or not DBUG_RETURN(false); } - if (exists) - { - if (force_overwrite) - { - if (!drop_tablespace(tablespace_name)) - { + if (exists) { + if (force_overwrite) { + if (!drop_tablespace(tablespace_name)) { // Failed to drop tablespace DBUG_RETURN(false); } - } - else - { + } else { // Error since tablespace exists but force_overwrite not set by caller // No point continuing since the subsequent store() will fail DBUG_RETURN(false); } } - std::unique_ptr - tablespace(dd::create_object()); + std::unique_ptr tablespace( + dd::create_object()); // Set name tablespace->set_name(tablespace_name); @@ -1347,15 +1119,12 @@ Ndb_dd_client::install_tablespace(const char* tablespace_name, tablespace->set_engine("ndbcluster"); // Add data files - for (const auto data_file_name : data_file_names) - { - ndb_dd_disk_data_add_file(tablespace.get(), - data_file_name.c_str()); + for (const auto data_file_name : data_file_names) { + ndb_dd_disk_data_add_file(tablespace.get(), data_file_name.c_str()); } // Assign id and version - ndb_dd_disk_data_set_object_id_and_version(tablespace.get(), - tablespace_id, + ndb_dd_disk_data_set_object_id_and_version(tablespace.get(), tablespace_id, tablespace_version); // Assign object type as tablespace @@ -1363,73 +1132,56 @@ Ndb_dd_client::install_tablespace(const char* tablespace_name, object_type::TABLESPACE); // Write changes to dictionary. - if (m_client->store(tablespace.get())) - { + if (m_client->store(tablespace.get())) { DBUG_RETURN(false); } DBUG_RETURN(true); - } - -bool -Ndb_dd_client::drop_tablespace(const char* tablespace_name, - bool fail_if_not_exists) +bool Ndb_dd_client::drop_tablespace(const char *tablespace_name, + bool fail_if_not_exists) { DBUG_ENTER("Ndb_dd_client::drop_tablespace"); const dd::Tablespace *existing = nullptr; - if (m_client->acquire(tablespace_name, &existing)) - { + if (m_client->acquire(tablespace_name, &existing)) { DBUG_RETURN(false); } - if (existing == nullptr) - { + if (existing == nullptr) { // Tablespace does not exist - if (fail_if_not_exists) - { + if (fail_if_not_exists) { DBUG_RETURN(false); } DBUG_RETURN(true); } - if (m_client->drop(existing)) - { + if (m_client->drop(existing)) { DBUG_RETURN(false); } DBUG_RETURN(true); } - -bool -Ndb_dd_client::get_logfile_group(const char *logfile_group_name, - const dd::Tablespace **logfile_group_def) -{ - if (m_client->acquire(logfile_group_name, logfile_group_def)) - { +bool Ndb_dd_client::get_logfile_group( + const char *logfile_group_name, const dd::Tablespace **logfile_group_def) { + if (m_client->acquire(logfile_group_name, logfile_group_def)) { return false; } return true; } - -bool -Ndb_dd_client::logfile_group_exists(const char* logfile_group_name, - bool& exists) -{ - const dd::Tablespace* logfile_group; - if (m_client->acquire(logfile_group_name, &logfile_group)) - { +bool Ndb_dd_client::logfile_group_exists(const char *logfile_group_name, + bool &exists) { + const dd::Tablespace *logfile_group; + if (m_client->acquire(logfile_group_name, &logfile_group)) { // Failed to acquire the requested logfile group return false; } - if (logfile_group == nullptr) - { + if (logfile_group == nullptr) { // The logfile group doesn't exist exists = false; return true; @@ -1440,22 +1192,17 @@ Ndb_dd_client::logfile_group_exists(const char* logfile_group_name, return true; } - bool Ndb_dd_client::fetch_ndb_logfile_group_names( - std::unordered_set& names) -{ + std::unordered_set &names) { DBUG_ENTER("Ndb_dd_client::fetch_ndb_logfile_group_names"); - std::vector tablespaces; - if (m_client->fetch_global_components(&tablespaces)) - { + std::vector tablespaces; + if (m_client->fetch_global_components(&tablespaces)) { DBUG_RETURN(false); } - for (const dd::Tablespace* tablespace: tablespaces) - { - if (tablespace->engine() != "ndbcluster") - { + for (const dd::Tablespace *tablespace : tablespaces) { + if (tablespace->engine() != "ndbcluster") { // Skip non-NDB objects continue; } @@ -1465,16 +1212,14 @@ bool Ndb_dd_client::fetch_ndb_logfile_group_names( ndb_dd_disk_data_get_object_type(tablespace->se_private_data(), type); - if (type != object_type::LOGFILE_GROUP) - { + if (type != object_type::LOGFILE_GROUP) { // Skip tablespaces continue; } // Acquire lock in DD if (!mdl_lock_logfile_group(tablespace->name().c_str(), - false /* intention_exclusive */)) - { + false /* intention_exclusive */)) { // Failed to acquire MDL lock DBUG_RETURN(false); } @@ -1484,15 +1229,10 @@ bool Ndb_dd_client::fetch_ndb_logfile_group_names( DBUG_RETURN(true); } - -bool -Ndb_dd_client::install_logfile_group(const char* logfile_group_name, - const std::vector& - undo_file_names, - int logfile_group_id, - int logfile_group_version, - bool force_overwrite) -{ +bool Ndb_dd_client::install_logfile_group( + const char *logfile_group_name, + const std::vector &undo_file_names, int logfile_group_id, + int logfile_group_version, bool force_overwrite) { DBUG_ENTER("Ndb_dd_client::install_logfile_group"); /* @@ -1505,32 +1245,26 @@ Ndb_dd_client::install_logfile_group(const char* logfile_group_name, */ bool exists; - if (!logfile_group_exists(logfile_group_name, exists)) - { + if (!logfile_group_exists(logfile_group_name, exists)) { // Could not detect if the logfile group exists or not DBUG_RETURN(false); } - if (exists) - { - if (force_overwrite) - { - if (!drop_logfile_group(logfile_group_name)) - { + if (exists) { + if (force_overwrite) { + if (!drop_logfile_group(logfile_group_name)) { // Failed to drop logfile group DBUG_RETURN(false); } - } - else - { + } else { // Error since logfile group exists but force_overwrite not set to true by // caller. No point continuing since the subsequent store() will fail DBUG_RETURN(false); } } - std::unique_ptr - logfile_group(dd::create_object()); + std::unique_ptr logfile_group( + dd::create_object()); // Set name logfile_group->set_name(logfile_group_name); @@ -1539,62 +1273,50 @@ Ndb_dd_client::install_logfile_group(const char* logfile_group_name, logfile_group->set_engine("ndbcluster"); // Add undofiles - for (const auto undo_file_name : undo_file_names) - { - ndb_dd_disk_data_add_file(logfile_group.get(), - undo_file_name.c_str()); + for (const auto undo_file_name : undo_file_names) { + ndb_dd_disk_data_add_file(logfile_group.get(), undo_file_name.c_str()); } // Assign id and version - ndb_dd_disk_data_set_object_id_and_version(logfile_group.get(), - logfile_group_id, - logfile_group_version); + ndb_dd_disk_data_set_object_id_and_version( + logfile_group.get(), logfile_group_id, logfile_group_version); // Assign object type as logfile group ndb_dd_disk_data_set_object_type(logfile_group.get()->se_private_data(), object_type::LOGFILE_GROUP); // Write changes to dictionary. - if (m_client->store(logfile_group.get())) - { + if (m_client->store(logfile_group.get())) { DBUG_RETURN(false); } DBUG_RETURN(true); - } -bool -Ndb_dd_client::install_undo_file(const char* logfile_group_name, - const char* undo_file_name) -{ +bool Ndb_dd_client::install_undo_file(const char *logfile_group_name, + const char *undo_file_name) { DBUG_ENTER("Ndb_dd_client::install_undo_file"); // Read logfile group from DD - dd::Tablespace *new_logfile_group_def= nullptr; + dd::Tablespace *new_logfile_group_def = nullptr; if (m_client->acquire_for_modification(logfile_group_name, &new_logfile_group_def)) DBUG_RETURN(false); - if (!new_logfile_group_def) - DBUG_RETURN(false); + if (!new_logfile_group_def) DBUG_RETURN(false); ndb_dd_disk_data_add_file(new_logfile_group_def, undo_file_name); // Write changes to dictionary. - if (m_client->update(new_logfile_group_def)) - { + if (m_client->update(new_logfile_group_def)) { DBUG_RETURN(false); } DBUG_RETURN(true); - } -bool -Ndb_dd_client::drop_logfile_group(const char* logfile_group_name, - bool fail_if_not_exists) -{ +bool Ndb_dd_client::drop_logfile_group(const char *logfile_group_name, + bool fail_if_not_exists) { DBUG_ENTER("Ndb_dd_client::drop_logfile_group"); /* @@ -1607,30 +1329,25 @@ Ndb_dd_client::drop_logfile_group(const char* logfile_group_name, */ const dd::Tablespace *existing = nullptr; - if (m_client->acquire(logfile_group_name, &existing)) - { + if (m_client->acquire(logfile_group_name, &existing)) { DBUG_RETURN(false); } - if (existing == nullptr) - { + if (existing == nullptr) { // Logfile group does not exist - if (fail_if_not_exists) - { + if (fail_if_not_exists) { DBUG_RETURN(false); } DBUG_RETURN(true); } - if (m_client->drop(existing)) - { + if (m_client->drop(existing)) { DBUG_RETURN(false); } DBUG_RETURN(true); } - /** Lock and add the given referenced table to the set of referenced tables maintained by the invalidator. @@ -1641,21 +1358,14 @@ Ndb_dd_client::drop_logfile_group(const char* logfile_group_name, @return true On success. @return false Unable to lock the table to the list. */ -bool -Ndb_referenced_tables_invalidator::add_and_lock_referenced_table( - const char* schema_name, const char* table_name) -{ +bool Ndb_referenced_tables_invalidator::add_and_lock_referenced_table( + const char *schema_name, const char *table_name) { auto result = - m_referenced_tables.insert(std::make_pair(schema_name, - table_name)); - if (result.second) - { + m_referenced_tables.insert(std::make_pair(schema_name, table_name)); + if (result.second) { // New parent added to invalidator. Lock it down - DBUG_PRINT("info", ("Locking '%s.%s'", - schema_name, table_name)); - if (!m_dd_client.mdl_locks_acquire_exclusive(schema_name, - table_name)) - { + DBUG_PRINT("info", ("Locking '%s.%s'", schema_name, table_name)); + if (!m_dd_client.mdl_locks_acquire_exclusive(schema_name, table_name)) { DBUG_PRINT("error", ("Unable to acquire lock to parent table '%s.%s'", schema_name, table_name)); return false; @@ -1664,7 +1374,6 @@ Ndb_referenced_tables_invalidator::add_and_lock_referenced_table( return true; } - /** Fetch the list of referenced tables to add from the local Data Dictionary if available and also from the NDB Dictionary if available. Then lock @@ -1685,55 +1394,44 @@ Ndb_referenced_tables_invalidator::add_and_lock_referenced_table( @return true On success. @return false Fetching failed. */ -bool -Ndb_referenced_tables_invalidator::fetch_referenced_tables_to_invalidate( - const char* schema_name, const char* table_name, - const dd::Table* table_def, bool skip_ndb_dict_fetch) -{ +bool Ndb_referenced_tables_invalidator::fetch_referenced_tables_to_invalidate( + const char *schema_name, const char *table_name, const dd::Table *table_def, + bool skip_ndb_dict_fetch) { DBUG_ENTER("Ndb_dd_client::fetch_referenced_tables_to_invalidate"); DBUG_PRINT("info", ("Collecting parent tables of '%s.%s' that are to be invalidated", schema_name, table_name)); - if (table_def != nullptr) - { + if (table_def != nullptr) { /* Table exists in DD already. Lock and add the parents */ - for (const dd::Foreign_key *fk : table_def->foreign_keys()) - { - const char* parent_db = fk->referenced_table_schema_name().c_str(); - const char* parent_table = fk->referenced_table_name().c_str(); + for (const dd::Foreign_key *fk : table_def->foreign_keys()) { + const char *parent_db = fk->referenced_table_schema_name().c_str(); + const char *parent_table = fk->referenced_table_name().c_str(); if (strcmp(parent_db, schema_name) == 0 && - strcmp(parent_table, table_name) == 0) - { + strcmp(parent_table, table_name) == 0) { // Given table is the parent of this FK. Skip adding. continue; } - if (!add_and_lock_referenced_table(parent_db, parent_table)) - { + if (!add_and_lock_referenced_table(parent_db, parent_table)) { DBUG_RETURN(false); } } } - if(!skip_ndb_dict_fetch) - { + if (!skip_ndb_dict_fetch) { std::set> referenced_tables; /* fetch the foreign key definitions from NDB dictionary */ - if (!fetch_referenced_tables_from_ndb_dictionary(m_thd, - schema_name, table_name, - referenced_tables)) - { + if (!fetch_referenced_tables_from_ndb_dictionary( + m_thd, schema_name, table_name, referenced_tables)) { DBUG_RETURN(false); } /* lock and add any missing parents */ - for (auto const& parent_name : referenced_tables) - { + for (auto const &parent_name : referenced_tables) { if (!add_and_lock_referenced_table(parent_name.first.c_str(), - parent_name.second.c_str())) - { + parent_name.second.c_str())) { DBUG_RETURN(false); } } @@ -1742,7 +1440,6 @@ Ndb_referenced_tables_invalidator::fetch_referenced_tables_to_invalidate( DBUG_RETURN(true); } - /** Invalidate all the tables in the referenced_tables set by closing any cached instances in the table definition cache and invalidating @@ -1751,21 +1448,18 @@ Ndb_referenced_tables_invalidator::fetch_referenced_tables_to_invalidate( @return true On success. @return false Invalidation failed. */ -bool -Ndb_referenced_tables_invalidator::invalidate() const -{ +bool Ndb_referenced_tables_invalidator::invalidate() const { DBUG_ENTER("Ndb_foreign_key_parents_invalidator::invalidate"); for (auto parent_it : m_referenced_tables) { // Invalidate Table and Table Definition Caches too. - const char* schema_name = parent_it.first.c_str(); - const char* table_name = parent_it.second.c_str(); - DBUG_PRINT("info", ("Invalidating parent table '%s.%s'", - schema_name, table_name)); + const char *schema_name = parent_it.first.c_str(); + const char *table_name = parent_it.second.c_str(); + DBUG_PRINT("info", + ("Invalidating parent table '%s.%s'", schema_name, table_name)); if (ndb_tdc_close_cached_table(m_thd, schema_name, table_name) || - m_thd->dd_client()->invalidate(schema_name, table_name) != 0) - { - DBUG_PRINT("error", ("Unable to invalidate table '%s.%s'", - schema_name, table_name)); + m_thd->dd_client()->invalidate(schema_name, table_name) != 0) { + DBUG_PRINT("error", ("Unable to invalidate table '%s.%s'", schema_name, + table_name)); DBUG_RETURN(false); } } diff --git a/storage/ndb/plugin/ndb_dd_client.h b/storage/ndb/plugin/ndb_dd_client.h index fdd568b5af74..3008044e0b0d 100644 --- a/storage/ndb/plugin/ndb_dd_client.h +++ b/storage/ndb/plugin/ndb_dd_client.h @@ -36,14 +36,14 @@ #include "sql/dd/string_type.h" namespace dd { - typedef String_type sdi_t; - namespace cache { - class Dictionary_client; - } - class Schema; - class Table; - class Tablespace; +typedef String_type sdi_t; +namespace cache { +class Dictionary_client; } +class Schema; +class Table; +class Tablespace; +} // namespace dd /* * Helper class to Ndb_dd_client to fetch and @@ -52,22 +52,23 @@ namespace dd { */ class Ndb_referenced_tables_invalidator { std::set> m_referenced_tables; - class THD* const m_thd; - class Ndb_dd_client& m_dd_client; - - bool add_and_lock_referenced_table(const char* schema_name, - const char* table_name); -public: - Ndb_referenced_tables_invalidator(class THD* thd, - class Ndb_dd_client& dd_client) - :m_thd(thd), m_dd_client(dd_client) {} - bool fetch_referenced_tables_to_invalidate( - const char* schema_name, const char* table_name, - const dd::Table* table_def, bool skip_ndb_dict_fetch = false); + class THD *const m_thd; + class Ndb_dd_client &m_dd_client; + + bool add_and_lock_referenced_table(const char *schema_name, + const char *table_name); + + public: + Ndb_referenced_tables_invalidator(class THD *thd, + class Ndb_dd_client &dd_client) + : m_thd(thd), m_dd_client(dd_client) {} + bool fetch_referenced_tables_to_invalidate(const char *schema_name, + const char *table_name, + const dd::Table *table_def, + bool skip_ndb_dict_fetch = false); bool invalidate() const; }; - /* Class encapculating the code for accessing the DD from ndbcluster @@ -81,38 +82,38 @@ class Ndb_referenced_tables_invalidator { */ class Ndb_dd_client { - class THD* const m_thd; - dd::cache::Dictionary_client* m_client; - void* m_auto_releaser; // Opaque pointer - std::vector m_acquired_mdl_tickets; + class THD *const m_thd; + dd::cache::Dictionary_client *m_client; + void *m_auto_releaser; // Opaque pointer + std::vector m_acquired_mdl_tickets; ulonglong m_save_option_bits{0}; bool m_comitted{false}; bool m_auto_rollback{true}; void disable_autocommit(); - bool store_table(dd::Table* install_table, int ndb_table_id); + bool store_table(dd::Table *install_table, int ndb_table_id); -public: - Ndb_dd_client(class THD* thd); + public: + Ndb_dd_client(class THD *thd); ~Ndb_dd_client(); // Metadata lock functions - bool mdl_lock_schema(const char* schema_name, bool exclusive_lock = false); - bool mdl_lock_table(const char* schema_name, const char* table_name); - bool mdl_locks_acquire_exclusive(const char* schema_name, - const char* table_name, + bool mdl_lock_schema(const char *schema_name, bool exclusive_lock = false); + bool mdl_lock_table(const char *schema_name, const char *table_name); + bool mdl_locks_acquire_exclusive(const char *schema_name, + const char *table_name, bool custom_lock_wait = false, ulong lock_wait_timeout = 0); - bool mdl_lock_logfile_group(const char* logfile_group_name, + bool mdl_lock_logfile_group(const char *logfile_group_name, bool intention_exclusive); - bool mdl_lock_logfile_group_exclusive(const char* logfile_group_name, + bool mdl_lock_logfile_group_exclusive(const char *logfile_group_name, bool custom_lock_wait = false, ulong lock_wait_timeout = 0); - bool mdl_lock_tablespace(const char* tablespace_name, + bool mdl_lock_tablespace(const char *tablespace_name, bool intention_exclusive); - bool mdl_lock_tablespace_exclusive(const char* tablespace_name, + bool mdl_lock_tablespace_exclusive(const char *tablespace_name, bool custom_lock_wait = false, ulong lock_wait_timeout = 0); void mdl_locks_release(); @@ -130,26 +131,24 @@ class Ndb_dd_client { */ void disable_auto_rollback() { m_auto_rollback = false; } - bool get_engine(const char* schema_name, const char* table_name, - dd::String_type* engine); + bool get_engine(const char *schema_name, const char *table_name, + dd::String_type *engine); bool rename_table(const char *old_schema_name, const char *old_table_name, const char *new_schema_name, const char *new_table_name, int new_table_id, int new_table_version, - Ndb_referenced_tables_invalidator *invalidator= nullptr); - bool remove_table(const char* schema_name, const char* table_name, - Ndb_referenced_tables_invalidator *invalidator= nullptr); - bool install_table(const char* schema_name, const char* table_name, - const dd::sdi_t &sdi, - int ndb_table_id, int ndb_table_version, - size_t ndb_num_partitions, + Ndb_referenced_tables_invalidator *invalidator = nullptr); + bool remove_table(const char *schema_name, const char *table_name, + Ndb_referenced_tables_invalidator *invalidator = nullptr); + bool install_table(const char *schema_name, const char *table_name, + const dd::sdi_t &sdi, int ndb_table_id, + int ndb_table_version, size_t ndb_num_partitions, const std::string &tablespace_name, bool force_overwrite, - Ndb_referenced_tables_invalidator *invalidator= nullptr); - bool migrate_table(const char* schema_name, const char* table_name, - const unsigned char* frm_data, - unsigned int unpacked_len, + Ndb_referenced_tables_invalidator *invalidator = nullptr); + bool migrate_table(const char *schema_name, const char *table_name, + const unsigned char *frm_data, unsigned int unpacked_len, bool force_overwrite); - bool get_table(const char* schema_name, const char* table_name, + bool get_table(const char *schema_name, const char *table_name, const dd::Table **table_def); bool table_exists(const char *schema_name, const char *table_name, bool &exists); @@ -157,23 +156,23 @@ class Ndb_dd_client { const char *table_name, dd::Object_id tablespace_id); bool set_object_id_and_version_in_table(const char *schema_name, - const char *table_name, - int object_id, int object_version); + const char *table_name, int object_id, + int object_version); - bool fetch_all_schemas(std::map&); - bool fetch_schema_names(std::vector*); - bool get_ndb_table_names_in_schema(const char* schema_name, + bool fetch_all_schemas(std::map &); + bool fetch_schema_names(std::vector *); + bool get_ndb_table_names_in_schema(const char *schema_name, std::unordered_set *names); - bool get_table_names_in_schema(const char* schema_name, + bool get_table_names_in_schema(const char *schema_name, std::unordered_set *ndb_tables, std::unordered_set *local_tables); - bool have_local_tables_in_schema(const char* schema_name, - bool* found_local_tables); - bool is_local_table(const char* schema_name, const char* table_name, + bool have_local_tables_in_schema(const char *schema_name, + bool *found_local_tables); + bool is_local_table(const char *schema_name, const char *table_name, bool &local_table); - bool schema_exists(const char* schema_name, bool* schema_exists); - bool update_schema_version(const char* schema_name, - unsigned int counter, unsigned int node_id); + bool schema_exists(const char *schema_name, bool *schema_exists); + bool update_schema_version(const char *schema_name, unsigned int counter, + unsigned int node_id); /* @brief Lookup tablespace id from tablespace name @@ -183,35 +182,30 @@ class Ndb_dd_client { @return true if tablespace found */ - bool lookup_tablespace_id(const char* tablespace_name, - dd::Object_id* tablespace_id); - bool get_tablespace(const char* tablespace_name, + bool lookup_tablespace_id(const char *tablespace_name, + dd::Object_id *tablespace_id); + bool get_tablespace(const char *tablespace_name, const dd::Tablespace **tablespace_def); - bool tablespace_exists(const char* tablespace_name, bool& exists); - bool fetch_ndb_tablespace_names(std::unordered_set& names); - bool install_tablespace(const char* tablespace_name, - const std::vector& data_file_names, - int tablespace_id, - int tablespace_version, + bool tablespace_exists(const char *tablespace_name, bool &exists); + bool fetch_ndb_tablespace_names(std::unordered_set &names); + bool install_tablespace(const char *tablespace_name, + const std::vector &data_file_names, + int tablespace_id, int tablespace_version, bool force_overwrite); - bool drop_tablespace(const char* tablespace_name, + bool drop_tablespace(const char *tablespace_name, bool fail_if_not_exists = true); - bool get_logfile_group(const char* logfile_group_name, + bool get_logfile_group(const char *logfile_group_name, const dd::Tablespace **logfile_group_def); - bool logfile_group_exists(const char* logfile_group_name, bool& exists); - bool fetch_ndb_logfile_group_names(std::unordered_set& names); - bool install_logfile_group(const char* logfile_group_name, - const std::vector& undo_file_names, - int logfile_group_id, - int logfile_group_version, + bool logfile_group_exists(const char *logfile_group_name, bool &exists); + bool fetch_ndb_logfile_group_names(std::unordered_set &names); + bool install_logfile_group(const char *logfile_group_name, + const std::vector &undo_file_names, + int logfile_group_id, int logfile_group_version, bool force_overwrite); - bool install_undo_file(const char* logfile_group_name, - const char* undo_file_name); - bool drop_logfile_group(const char* logfile_group_name, + bool install_undo_file(const char *logfile_group_name, + const char *undo_file_name); + bool drop_logfile_group(const char *logfile_group_name, bool fail_if_not_exists = true); }; - - - #endif diff --git a/storage/ndb/plugin/ndb_dd_disk_data.cc b/storage/ndb/plugin/ndb_dd_disk_data.cc index 595dfeb1a4ed..572459aac093 100644 --- a/storage/ndb/plugin/ndb_dd_disk_data.cc +++ b/storage/ndb/plugin/ndb_dd_disk_data.cc @@ -31,135 +31,106 @@ // The keys used to store the id, version, and type of object // in se_private_data field of DD -static const char* object_id_key = "object_id"; -static const char* object_version_key = "object_version"; -static const char* object_type_key = "object_type"; +static const char *object_id_key = "object_id"; +static const char *object_version_key = "object_version"; +static const char *object_type_key = "object_type"; - -void -ndb_dd_disk_data_set_object_id_and_version(dd::Tablespace* object_def, - int object_id, - int object_version) -{ +void ndb_dd_disk_data_set_object_id_and_version(dd::Tablespace *object_def, + int object_id, + int object_version) { DBUG_ENTER("ndb_dd_disk_data_set_object_id_and_version"); - DBUG_PRINT("enter", ("object_id: %d, object_version: %d", - object_id, object_version)); + DBUG_PRINT("enter", + ("object_id: %d, object_version: %d", object_id, object_version)); object_def->se_private_data().set(object_id_key, object_id); object_def->se_private_data().set(object_version_key, object_version); DBUG_VOID_RETURN; } - -bool -ndb_dd_disk_data_get_object_id_and_version(const dd::Tablespace* object_def, - int& object_id, - int& object_version) -{ +bool ndb_dd_disk_data_get_object_id_and_version( + const dd::Tablespace *object_def, int &object_id, int &object_version) { DBUG_ENTER("ndb_dd_disk_data_get_object_id_and_version"); - if (!object_def->se_private_data().exists(object_id_key)) - { + if (!object_def->se_private_data().exists(object_id_key)) { DBUG_PRINT("error", ("Disk data definition didn't contain property '%s'", object_id_key)); DBUG_RETURN(false); } - if (object_def->se_private_data().get(object_id_key, &object_id)) - { + if (object_def->se_private_data().get(object_id_key, &object_id)) { DBUG_PRINT("error", ("Disk data definition didn't have a valid number " - "for '%s'", object_id_key)); + "for '%s'", + object_id_key)); DBUG_RETURN(false); } - if (!object_def->se_private_data().exists(object_version_key)) - { + if (!object_def->se_private_data().exists(object_version_key)) { DBUG_PRINT("error", ("Disk data definition didn't contain property '%s'", object_version_key)); DBUG_RETURN(false); } - if (object_def->se_private_data().get(object_version_key, &object_version)) - { + if (object_def->se_private_data().get(object_version_key, &object_version)) { DBUG_PRINT("error", ("Disk data definition didn't have a valid number " - "for '%s'", object_version_key)); + "for '%s'", + object_version_key)); DBUG_RETURN(false); } - DBUG_PRINT("exit", ("object_id: %d, object_version: %d", - object_id, object_version)); + DBUG_PRINT("exit", + ("object_id: %d, object_version: %d", object_id, object_version)); DBUG_RETURN(true); } - void ndb_dd_disk_data_set_object_type(dd::Properties &se_private_data, - const enum object_type type) -{ + const enum object_type type) { DBUG_ENTER("ndb_dd_disk_data_set_object_type"); dd::String_type type_str; - if (type == object_type::TABLESPACE) - { + if (type == object_type::TABLESPACE) { type_str = "tablespace"; - } - else if (type == object_type::LOGFILE_GROUP) - { + } else if (type == object_type::LOGFILE_GROUP) { type_str = "logfile_group"; - } - else - { + } else { // Should never reach here DBUG_ASSERT(false); } DBUG_PRINT("enter", ("object_type: %s", type_str.c_str())); - se_private_data.set(object_type_key, - type_str.c_str()); + se_private_data.set(object_type_key, type_str.c_str()); DBUG_VOID_RETURN; } - void ndb_dd_disk_data_set_object_type(dd::Tablespace *object_def, - enum object_type type) -{ + enum object_type type) { ndb_dd_disk_data_set_object_type(object_def->se_private_data(), type); } - -bool -ndb_dd_disk_data_get_object_type(const dd::Properties &se_private_data, - enum object_type &type) -{ +bool ndb_dd_disk_data_get_object_type(const dd::Properties &se_private_data, + enum object_type &type) { DBUG_ENTER("ndb_dd_disk_data_get_object_type"); - if (!se_private_data.exists(object_type_key)) - { + if (!se_private_data.exists(object_type_key)) { DBUG_PRINT("error", ("Disk data definition didn't contain property '%s'", object_type_key)); DBUG_RETURN(false); } dd::String_type type_str; - if (se_private_data.get(object_type_key, - &type_str)) - { + if (se_private_data.get(object_type_key, &type_str)) { DBUG_PRINT("error", ("Disk data definition didn't have a valid value for" - " '%s'", object_type_key)); + " '%s'", + object_type_key)); DBUG_RETURN(false); } - if (type_str == "tablespace") - { + if (type_str == "tablespace") { type = object_type::TABLESPACE; - } - else if (type_str == "logfile_group") - { + } else if (type_str == "logfile_group") { type = object_type::LOGFILE_GROUP; - } - else - { + } else { // Should never reach here DBUG_ASSERT(false); DBUG_RETURN(false); @@ -170,33 +141,23 @@ ndb_dd_disk_data_get_object_type(const dd::Properties &se_private_data, DBUG_RETURN(true); } - -void -ndb_dd_disk_data_add_file(dd::Tablespace* object_def, - const char* file_name) -{ +void ndb_dd_disk_data_add_file(dd::Tablespace *object_def, + const char *file_name) { object_def->add_file()->set_filename(file_name); } - -void ndb_dd_disk_data_get_file_names(const dd::Tablespace* object_def, - std::vector& file_names) -{ - for(const auto file : object_def->files()) - { +void ndb_dd_disk_data_get_file_names(const dd::Tablespace *object_def, + std::vector &file_names) { + for (const auto file : object_def->files()) { file_names.push_back((file->filename()).c_str()); } } - -bool ndb_dd_disk_data_get_table_refs(THD *thd, const dd::Tablespace &object_def, - std::vector - &table_refs) -{ - if (dd::fetch_tablespace_table_refs(thd, object_def, &table_refs)) - { +bool ndb_dd_disk_data_get_table_refs( + THD *thd, const dd::Tablespace &object_def, + std::vector &table_refs) { + if (dd::fetch_tablespace_table_refs(thd, object_def, &table_refs)) { return false; } return true; } - diff --git a/storage/ndb/plugin/ndb_dd_disk_data.h b/storage/ndb/plugin/ndb_dd_disk_data.h index 5d228b639cf5..aee8ebb4d732 100644 --- a/storage/ndb/plugin/ndb_dd_disk_data.h +++ b/storage/ndb/plugin/ndb_dd_disk_data.h @@ -30,9 +30,9 @@ #include "sql/dd/properties.h" namespace dd { - class Tablespace; - struct Tablespace_table_ref; -} +class Tablespace; +struct Tablespace_table_ref; +} // namespace dd class THD; @@ -42,63 +42,52 @@ class THD; prefixed with ndb_dd_disk_data */ - /* Save the disk data object's id and version in the definition */ -void -ndb_dd_disk_data_set_object_id_and_version(dd::Tablespace* object_def, - int object_id, int object_version); - +void ndb_dd_disk_data_set_object_id_and_version(dd::Tablespace *object_def, + int object_id, + int object_version); /* Return the definition's object id and version */ -bool -ndb_dd_disk_data_get_object_id_and_version( - const dd::Tablespace* object_def, - int& object_id, int& object_version); +bool ndb_dd_disk_data_get_object_id_and_version( + const dd::Tablespace *object_def, int &object_id, int &object_version); - -enum object_type -{ - TABLESPACE, - LOGFILE_GROUP -}; +enum object_type { TABLESPACE, LOGFILE_GROUP }; /* Save the type of the disk data object */ void ndb_dd_disk_data_set_object_type(dd::Properties &se_private_data, const enum object_type type); -void ndb_dd_disk_data_set_object_type(dd::Tablespace* object_def, +void ndb_dd_disk_data_set_object_type(dd::Tablespace *object_def, const enum object_type type); /* Return the disk data object type */ -bool -ndb_dd_disk_data_get_object_type(const dd::Properties &se_private_data, - enum object_type &type); - +bool ndb_dd_disk_data_get_object_type(const dd::Properties &se_private_data, + enum object_type &type); /* Add undo/data file to logfile group/tablespace */ -void ndb_dd_disk_data_add_file(dd::Tablespace* object_def, - const char* file_name); +void ndb_dd_disk_data_add_file(dd::Tablespace *object_def, + const char *file_name); /* Retrieve file names belonging to the disk data object */ -void ndb_dd_disk_data_get_file_names(const dd::Tablespace* object_def, - std::vector& file_names); +void ndb_dd_disk_data_get_file_names(const dd::Tablespace *object_def, + std::vector &file_names); /* Fetch information about tables in a tablespace */ -bool ndb_dd_disk_data_get_table_refs(THD *thd, const dd::Tablespace &object_def, - std::vector - &table_refs); +bool ndb_dd_disk_data_get_table_refs( + THD *thd, const dd::Tablespace &object_def, + std::vector &table_refs); #endif diff --git a/storage/ndb/plugin/ndb_dd_sdi.cc b/storage/ndb/plugin/ndb_dd_sdi.cc index cea0e4d50614..36f4f470c2dc 100644 --- a/storage/ndb/plugin/ndb_dd_sdi.cc +++ b/storage/ndb/plugin/ndb_dd_sdi.cc @@ -26,12 +26,12 @@ #include "storage/ndb/plugin/ndb_dd_sdi.h" // Using -#include "my_rapidjson_size_t.h" // IWYU pragma: keep +#include "my_rapidjson_size_t.h" // IWYU pragma: keep -#include // rapidjson::Document -#include // rapidjson::Writer -#include // rapidjson::PrettyWriter +#include // rapidjson::Document +#include // rapidjson::PrettyWriter #include +#include // rapidjson::Writer #include "sql/dd/impl/sdi.h" #include "sql/dd/sdi_fwd.h" @@ -57,20 +57,17 @@ typedef rapidjson::PrettyWriter(sdi.c_str()); - if (doc.HasParseError()) - { + if (doc.HasParseError()) { return ""; } dd::RJ_StringBuffer buf; MinifyWriter w(buf); - if (!doc.Accept(w)) - { + if (!doc.Accept(w)) { return ""; } @@ -95,17 +92,12 @@ dd::sdi_t ndb_dd_sdi_prettify(dd::sdi_t sdi) { return buf.GetString(); } -bool -ndb_dd_sdi_deserialize(THD* thd, const dd::sdi_t& sdi, dd::Table* table) -{ +bool ndb_dd_sdi_deserialize(THD *thd, const dd::sdi_t &sdi, dd::Table *table) { return dd::deserialize(thd, sdi, table); } - -dd::sdi_t -ndb_dd_sdi_serialize(THD* thd, const dd::Table& table, - const dd::String_type& schema_name) -{ +dd::sdi_t ndb_dd_sdi_serialize(THD *thd, const dd::Table &table, + const dd::String_type &schema_name) { #ifndef DBUG_OFF // Verify that dd::serialize generates SDI in minimzed format dd::sdi_t sdi = dd::serialize(thd, table, schema_name); diff --git a/storage/ndb/plugin/ndb_dd_sdi.h b/storage/ndb/plugin/ndb_dd_sdi.h index 924659b4944c..683979992bc9 100644 --- a/storage/ndb/plugin/ndb_dd_sdi.h +++ b/storage/ndb/plugin/ndb_dd_sdi.h @@ -34,11 +34,11 @@ #include "sql/dd/string_type.h" namespace dd { - class Table; - typedef String_type sdi_t; -} +class Table; +typedef String_type sdi_t; +} // namespace dd -bool ndb_dd_sdi_deserialize(class THD* thd, const dd::sdi_t &sdi, +bool ndb_dd_sdi_deserialize(class THD *thd, const dd::sdi_t &sdi, dd::Table *table); dd::sdi_t ndb_dd_sdi_serialize(class THD *thd, const dd::Table &table, diff --git a/storage/ndb/plugin/ndb_dd_table.cc b/storage/ndb/plugin/ndb_dd_table.cc index eba9b184d7fc..f67bb4d8ad55 100644 --- a/storage/ndb/plugin/ndb_dd_table.cc +++ b/storage/ndb/plugin/ndb_dd_table.cc @@ -37,61 +37,49 @@ // The key used to store the NDB tables object version in the // se_private_data field of DD -static const char* object_version_key = "object_version"; +static const char *object_version_key = "object_version"; -void -ndb_dd_table_set_object_id_and_version(dd::Table* table_def, - int object_id, int object_version) -{ +void ndb_dd_table_set_object_id_and_version(dd::Table *table_def, int object_id, + int object_version) { DBUG_ENTER("ndb_dd_table_set_object_id_and_version"); - DBUG_PRINT("enter", ("object_id: %d, object_version: %d", - object_id, object_version)); + DBUG_PRINT("enter", + ("object_id: %d, object_version: %d", object_id, object_version)); table_def->set_se_private_id(object_id); - table_def->se_private_data().set(object_version_key, - object_version); + table_def->se_private_data().set(object_version_key, object_version); DBUG_VOID_RETURN; } - -bool -ndb_dd_table_get_object_id_and_version(const dd::Table* table_def, - int& object_id, int& object_version) -{ +bool ndb_dd_table_get_object_id_and_version(const dd::Table *table_def, + int &object_id, + int &object_version) { DBUG_ENTER("ndb_dd_table_get_object_id_and_version"); - if (table_def->se_private_id() == dd::INVALID_OBJECT_ID) - { + if (table_def->se_private_id() == dd::INVALID_OBJECT_ID) { DBUG_PRINT("error", ("Table definition contained an invalid object id")); DBUG_RETURN(false); } object_id = table_def->se_private_id(); - if (!table_def->se_private_data().exists(object_version_key)) - { + if (!table_def->se_private_data().exists(object_version_key)) { DBUG_PRINT("error", ("Table definition didn't contain property '%s'", object_version_key)); DBUG_RETURN(false); } - if (table_def->se_private_data().get(object_version_key, - &object_version)) - { + if (table_def->se_private_data().get(object_version_key, &object_version)) { DBUG_PRINT("error", ("Table definition didn't have a valid number for '%s'", object_version_key)); DBUG_RETURN(false); } - DBUG_PRINT("exit", ("object_id: %d, object_version: %d", - object_id, object_version)); + DBUG_PRINT("exit", + ("object_id: %d, object_version: %d", object_id, object_version)); DBUG_RETURN(true); } - -void -ndb_dd_table_mark_as_hidden(dd::Table* table_def) -{ +void ndb_dd_table_mark_as_hidden(dd::Table *table_def) { DBUG_ENTER("ndb_dd_table_mark_as_hidden"); DBUG_PRINT("enter", ("table_name: %s", table_def->name().c_str())); @@ -102,71 +90,56 @@ ndb_dd_table_mark_as_hidden(dd::Table* table_def) DBUG_VOID_RETURN; } - -dd::String_type ndb_dd_table_get_engine(const dd::Table* table_def) -{ +dd::String_type ndb_dd_table_get_engine(const dd::Table *table_def) { return table_def->engine(); } -size_t ndb_dd_table_get_num_columns(const dd::Table* table_def) -{ - const dd::Abstract_table::Column_collection& cols = table_def->columns(); +size_t ndb_dd_table_get_num_columns(const dd::Table *table_def) { + const dd::Abstract_table::Column_collection &cols = table_def->columns(); return cols.size(); } -bool ndb_dd_table_is_using_fixed_row_format(const dd::Table* table_def) -{ +bool ndb_dd_table_is_using_fixed_row_format(const dd::Table *table_def) { return table_def->row_format() == dd::Table::RF_FIXED; } -void -ndb_dd_table_set_row_format(dd::Table* table_def, - const bool force_var_part) -{ - if (force_var_part == false) - { +void ndb_dd_table_set_row_format(dd::Table *table_def, + const bool force_var_part) { + if (force_var_part == false) { table_def->set_row_format(dd::Table::RF_FIXED); - } - else - { + } else { table_def->set_row_format(dd::Table::RF_DYNAMIC); } } -bool ndb_dd_table_check_partition_count(const dd::Table* table_def, - size_t ndb_num_partitions) -{ +bool ndb_dd_table_check_partition_count(const dd::Table *table_def, + size_t ndb_num_partitions) { return table_def->partitions().size() == ndb_num_partitions; } -void ndb_dd_table_fix_partition_count(dd::Table* table_def, - size_t ndb_num_partitions) -{ - +void ndb_dd_table_fix_partition_count(dd::Table *table_def, + size_t ndb_num_partitions) { DBUG_ENTER("ndb_dd_table_fix_partition_count"); DBUG_PRINT("enter", ("ndb_num_partitions: %zu", ndb_num_partitions)); const size_t dd_num_partitions = table_def->partitions()->size(); - if (ndb_num_partitions < dd_num_partitions) - { + if (ndb_num_partitions < dd_num_partitions) { // Remove extra partitions from DD - dd::Collection* dd_partitions = table_def->partitions(); + dd::Collection *dd_partitions = table_def->partitions(); // Check if the extra partitions have been stored in the DD // Checking only one of the partitions is sufficient const bool partition_object_stored_in_DD = - dd_partitions->at(ndb_num_partitions)->is_persistent(); + dd_partitions->at(ndb_num_partitions)->is_persistent(); - for (size_t i = ndb_num_partitions; i < dd_num_partitions; i++) - { + for (size_t i = ndb_num_partitions; i < dd_num_partitions; i++) { auto partition = dd_partitions->at(ndb_num_partitions); dd_partitions->remove(dynamic_cast(partition)); } - if (!partition_object_stored_in_DD) - { + if (!partition_object_stored_in_DD) { // This case has to handled differently. When the partitions // are removed from the collection above, they are dropped // from the DD later. In case the partitions have not @@ -178,12 +151,9 @@ void ndb_dd_table_fix_partition_count(dd::Table* table_def, // DD dd_partitions->clear_removed_items(); } - } - else if (dd_num_partitions < ndb_num_partitions) - { + } else if (dd_num_partitions < ndb_num_partitions) { // Add missing partitions to DD - for (size_t i = dd_num_partitions; i < ndb_num_partitions; i++) - { + for (size_t i = dd_num_partitions; i < ndb_num_partitions; i++) { dd::Partition *partition_def = table_def->add_partition(); const std::string partition_name = "p" + std::to_string(i); partition_def->set_name(partition_name.c_str()); @@ -198,11 +168,10 @@ void ndb_dd_table_fix_partition_count(dd::Table* table_def, // The key used to store the NDB table's previous mysql version in the // se_private_data field of DD -static const char* previous_mysql_version_key = "previous_mysql_version"; +static const char *previous_mysql_version_key = "previous_mysql_version"; -void ndb_dd_table_set_previous_mysql_version(dd::Table* table_def, - ulong previous_mysql_version) -{ +void ndb_dd_table_set_previous_mysql_version(dd::Table *table_def, + ulong previous_mysql_version) { DBUG_ENTER("ndb_dd_table_set_previous_mysql_version"); DBUG_PRINT("enter", ("previous_mysql_version: %lu", previous_mysql_version)); @@ -211,19 +180,16 @@ void ndb_dd_table_set_previous_mysql_version(dd::Table* table_def, DBUG_VOID_RETURN; } -bool ndb_dd_table_get_previous_mysql_version(const dd::Table* table_def, - ulong& previous_mysql_version) -{ +bool ndb_dd_table_get_previous_mysql_version(const dd::Table *table_def, + ulong &previous_mysql_version) { DBUG_ENTER("ndb_dd_table_get_previous_mysql_version"); - if (!table_def->se_private_data().exists(previous_mysql_version_key)) - { + if (!table_def->se_private_data().exists(previous_mysql_version_key)) { DBUG_RETURN(false); } if (table_def->se_private_data().get(previous_mysql_version_key, - &previous_mysql_version)) - { + &previous_mysql_version)) { DBUG_PRINT("error", ("Table definition didn't have a valid number for '%s'", previous_mysql_version_key)); DBUG_RETURN(false); @@ -232,10 +198,8 @@ bool ndb_dd_table_get_previous_mysql_version(const dd::Table* table_def, DBUG_RETURN(true); } - void ndb_dd_table_set_tablespace_id(dd::Table *table_def, - dd::Object_id tablespace_id) -{ + dd::Object_id tablespace_id) { DBUG_ENTER("ndb_dd_table_set_tablespace_id"); DBUG_PRINT("enter", ("tablespace_id: %llu", tablespace_id)); @@ -243,16 +207,13 @@ void ndb_dd_table_set_tablespace_id(dd::Table *table_def, DBUG_VOID_RETURN; } - -Ndb_dd_table::Ndb_dd_table(THD* thd) +Ndb_dd_table::Ndb_dd_table(THD *thd) : m_thd(thd), m_table_def{dd::create_object()} {} Ndb_dd_table::~Ndb_dd_table() { delete m_table_def; } -bool Ndb_dd_table::deserialize(const dd::sdi_t &sdi) -{ - if (ndb_dd_sdi_deserialize(m_thd, sdi, m_table_def)) - { +bool Ndb_dd_table::deserialize(const dd::sdi_t &sdi) { + if (ndb_dd_sdi_deserialize(m_thd, sdi, m_table_def)) { return false; } return true; diff --git a/storage/ndb/plugin/ndb_dd_table.h b/storage/ndb/plugin/ndb_dd_table.h index 478ce419a2e0..2a9cff0c3d50 100644 --- a/storage/ndb/plugin/ndb_dd_table.h +++ b/storage/ndb/plugin/ndb_dd_table.h @@ -30,52 +30,51 @@ #include "sql/dd/string_type.h" namespace dd { - class Table; - typedef String_type sdi_t; - typedef unsigned long long Object_id; -} +class Table; +typedef String_type sdi_t; +typedef unsigned long long Object_id; +} // namespace dd /* Functions operating on dd::Table*, prefixed with ndb_dd_table_ */ /* Save the tables object id and version in table definition */ -void ndb_dd_table_set_object_id_and_version(dd::Table* table_def, - int object_id, int object_version); +void ndb_dd_table_set_object_id_and_version(dd::Table *table_def, int object_id, + int object_version); /* Return table definitions object id and version */ -bool -ndb_dd_table_get_object_id_and_version(const dd::Table* table_def, - int& object_id, int& object_version); +bool ndb_dd_table_get_object_id_and_version(const dd::Table *table_def, + int &object_id, + int &object_version); /* Return engine of table definition */ -dd::String_type ndb_dd_table_get_engine(const dd::Table* table_def); - +dd::String_type ndb_dd_table_get_engine(const dd::Table *table_def); /* Mark the table as being hidden, thus avoiding that it shows up in SHOW TABLES and information_schema queries. */ -void ndb_dd_table_mark_as_hidden(dd::Table* table_def); +void ndb_dd_table_mark_as_hidden(dd::Table *table_def); /* Return number of columns in the table definition */ -size_t ndb_dd_table_get_num_columns(const dd::Table* table_def); +size_t ndb_dd_table_get_num_columns(const dd::Table *table_def); /* Return true if table is using fixed row format */ -bool ndb_dd_table_is_using_fixed_row_format(const dd::Table* table_def); +bool ndb_dd_table_is_using_fixed_row_format(const dd::Table *table_def); /* Set the row format of the table */ -void ndb_dd_table_set_row_format(dd::Table* table_def, +void ndb_dd_table_set_row_format(dd::Table *table_def, const bool force_var_part); /* @@ -83,7 +82,7 @@ void ndb_dd_table_set_row_format(dd::Table* table_def, partitions in NDB Dictionary. Return true if they are equal, false if not */ -bool ndb_dd_table_check_partition_count(const dd::Table* table_def, +bool ndb_dd_table_check_partition_count(const dd::Table *table_def, size_t ndb_num_partitions); /* @@ -95,22 +94,22 @@ bool ndb_dd_table_check_partition_count(const dd::Table* table_def, NOTE: Whether the number of partitions should be decided upstream at all is another question */ -void ndb_dd_table_fix_partition_count(dd::Table* table_def, +void ndb_dd_table_fix_partition_count(dd::Table *table_def, size_t ndb_num_partitions); /* Save the previous mysql version of the table. Applicable only for tables that have been upgraded */ -void ndb_dd_table_set_previous_mysql_version(dd::Table* table_def, +void ndb_dd_table_set_previous_mysql_version(dd::Table *table_def, ulong previous_mysql_version); /* Return the previous mysql version of the table. Returns false if previous_mysql_version is not set or invalid, true on success */ -bool ndb_dd_table_get_previous_mysql_version(const dd::Table* table_def, - ulong& previous_mysql_version); +bool ndb_dd_table_get_previous_mysql_version(const dd::Table *table_def, + ulong &previous_mysql_version); /* Set tablespace id for the table @@ -118,15 +117,15 @@ bool ndb_dd_table_get_previous_mysql_version(const dd::Table* table_def, void ndb_dd_table_set_tablespace_id(dd::Table *table_def, dd::Object_id tablespace_id); - /* interface for working with opaque dd::Table RAII-style */ class Ndb_dd_table { - class THD * const m_thd; - dd::Table* m_table_def; -public: + class THD *const m_thd; + dd::Table *m_table_def; + + public: Ndb_dd_table() = delete; - Ndb_dd_table(const Ndb_dd_table&) = delete; - Ndb_dd_table(THD* thd); + Ndb_dd_table(const Ndb_dd_table &) = delete; + Ndb_dd_table(THD *thd); ~Ndb_dd_table(); @@ -135,5 +134,4 @@ class Ndb_dd_table { dd::Table *get_table_def() { return m_table_def; } }; - #endif diff --git a/storage/ndb/plugin/ndb_dd_upgrade_table.cc b/storage/ndb/plugin/ndb_dd_upgrade_table.cc index 523e50176568..ec435d69016a 100644 --- a/storage/ndb/plugin/ndb_dd_upgrade_table.cc +++ b/storage/ndb/plugin/ndb_dd_upgrade_table.cc @@ -66,8 +66,8 @@ #include "sql/lock.h" // Tablespace_hash_set #include "sql/log.h" #include "sql/mdl.h" -#include "sql/mysqld.h" // mysql_real_data_home -#include "sql/parse_file.h" // File_option +#include "sql/mysqld.h" // mysql_real_data_home +#include "sql/parse_file.h" // File_option #include "sql/partition_element.h" #include "sql/partition_info.h" // partition_info #include "sql/psi_memory_key.h" // key_memory_TABLE diff --git a/storage/ndb/plugin/ndb_dd_upgrade_table.h b/storage/ndb/plugin/ndb_dd_upgrade_table.h index 2ad0a3852c91..439206f70178 100644 --- a/storage/ndb/plugin/ndb_dd_upgrade_table.h +++ b/storage/ndb/plugin/ndb_dd_upgrade_table.h @@ -44,15 +44,13 @@ namespace ndb_upgrade { @retval true ON SUCCESS @retval false ON FAILURE */ -bool migrate_table_to_dd(THD *thd, - const String_type &schema_name, +bool migrate_table_to_dd(THD *thd, const String_type &schema_name, const String_type &table_name, - const unsigned char* frm_data, + const unsigned char *frm_data, const unsigned int unpacked_len, bool is_fix_view_cols_and_deps); +} // namespace ndb_upgrade +} // namespace dd -} // namespace ndb_upgrade -} // namespace dd - -#endif // NDB_DD_UPGRADE_TABLE_H_INCLUDED +#endif // NDB_DD_UPGRADE_TABLE_H_INCLUDED diff --git a/storage/ndb/plugin/ndb_ddl_definitions.h b/storage/ndb/plugin/ndb_ddl_definitions.h index c3d7d59020e1..ea384d4534a4 100644 --- a/storage/ndb/plugin/ndb_ddl_definitions.h +++ b/storage/ndb/plugin/ndb_ddl_definitions.h @@ -25,10 +25,10 @@ #ifndef NDB_DDL_DEFINITIONS_H #define NDB_DDL_DEFINITIONS_H -#include "ndbapi/NdbDictionary.hpp" // Required for NdbDictionary::Table +#include "ndbapi/NdbDictionary.hpp" // Required for NdbDictionary::Table namespace dd { - class Table; +class Table; } int rename_table_impl(class THD *thd, class Ndb *ndb, diff --git a/storage/ndb/plugin/ndb_dist_priv_util.h b/storage/ndb/plugin/ndb_dist_priv_util.h index 221c312e45f1..1e1104237d87 100644 --- a/storage/ndb/plugin/ndb_dist_priv_util.h +++ b/storage/ndb/plugin/ndb_dist_priv_util.h @@ -29,29 +29,30 @@ class THD; class Ndb_dist_priv_util { size_t m_iter_curr_table; -public: - Ndb_dist_priv_util() - { - iter_reset(); - } - const char* database() const { return "mysql"; } + public: + Ndb_dist_priv_util() { iter_reset(); } + + const char *database() const { return "mysql"; } // Iterator for distributed priv tables name - const char* iter_next_table() - { - static const char* tables[] = - { "user", "db", "tables_priv", - "columns_priv", "procs_priv", "proxies_priv" + const char *iter_next_table() { + static const char *tables[] = { + "user", + "db", + "tables_priv", + "columns_priv", + "procs_priv", + "proxies_priv" #ifndef DBUG_OFF - , "mtr__acl_test_table" // For test ndb_ddl.dist_priv_migration + , + "mtr__acl_test_table" // For test ndb_ddl.dist_priv_migration #endif - }; + }; - if (m_iter_curr_table >= (sizeof(tables) / sizeof(tables[0]))) - return NULL; + if (m_iter_curr_table >= (sizeof(tables) / sizeof(tables[0]))) return NULL; m_iter_curr_table++; - return tables[m_iter_curr_table-1]; + return tables[m_iter_curr_table - 1]; } // Reset iterator to start at first table name @@ -59,27 +60,21 @@ class Ndb_dist_priv_util { // Determine if a given table name is in the list // of distributed priv tables - static - bool - is_distributed_priv_table(const char *db, const char *table) - { + static bool is_distributed_priv_table(const char *db, const char *table) { Ndb_dist_priv_util dist_priv; - if (strcmp(db, dist_priv.database())) - { - return false; // Ignore tables not in dist_priv database + if (strcmp(db, dist_priv.database())) { + return false; // Ignore tables not in dist_priv database } - const char* priv_table_name; - while((priv_table_name= dist_priv.iter_next_table())) - { - if (strcmp(table, priv_table_name) == 0) - { + const char *priv_table_name; + while ((priv_table_name = dist_priv.iter_next_table())) { + if (strcmp(table, priv_table_name) == 0) { return true; } } return false; } - static bool priv_tables_are_in_ndb(THD*); + static bool priv_tables_are_in_ndb(THD *); }; #endif diff --git a/storage/ndb/plugin/ndb_dummy_ts.h b/storage/ndb/plugin/ndb_dummy_ts.h index b9abe2cac5f4..7c3ca0973f39 100644 --- a/storage/ndb/plugin/ndb_dummy_ts.h +++ b/storage/ndb/plugin/ndb_dummy_ts.h @@ -37,103 +37,86 @@ namespace ndb_dummy_ts { - /** - Create SDI in a tablespace. This API should be used when - upgrading a tablespace with no SDI. - @param[in,out] tablespace tablespace object - @retval false success - @retval true failure - */ - static - bool sdi_create(dd::Tablespace*) - { - DBUG_ASSERT(false); // Never called - return false; // Success - } - - - /** - Drop SDI in a tablespace. This API should be used only - when SDI is corrupted. - @param[in,out] tablespace tablespace object - @retval false success - @retval true failure - */ - static - bool sdi_drop(dd::Tablespace*) - { - DBUG_ASSERT(false); // Never called - return false; // Success - } - +/** + Create SDI in a tablespace. This API should be used when + upgrading a tablespace with no SDI. + @param[in,out] tablespace tablespace object + @retval false success + @retval true failure +*/ +static bool sdi_create(dd::Tablespace *) { + DBUG_ASSERT(false); // Never called + return false; // Success +} - /** - Get the SDI keys in a tablespace into the vector provided. - @param[in] tablespace tablespace object - @param[in,out] vector vector to hold SDI keys - @retval false success - @retval true failure - */ - static - bool sdi_get_keys(const dd::Tablespace&, sdi_vector_t&) - { - DBUG_ASSERT(false); // Never called - return false; // Success - } +/** + Drop SDI in a tablespace. This API should be used only + when SDI is corrupted. + @param[in,out] tablespace tablespace object + @retval false success + @retval true failure +*/ +static bool sdi_drop(dd::Tablespace *) { + DBUG_ASSERT(false); // Never called + return false; // Success +} +/** + Get the SDI keys in a tablespace into the vector provided. + @param[in] tablespace tablespace object + @param[in,out] vector vector to hold SDI keys + @retval false success + @retval true failure +*/ +static bool sdi_get_keys(const dd::Tablespace &, sdi_vector_t &) { + DBUG_ASSERT(false); // Never called + return false; // Success +} - /** Retrieve SDI from tablespace - @param[in] tablespace tablespace object - @param[in] sdi_key SDI key - @param[in,out] sdi SDI retrieved from tablespace - @param[in,out] sdi_len in: size of memory allocated - out: actual length of SDI - @retval false success - @retval true failure - */ - static - bool sdi_get(const dd::Tablespace&, const sdi_key_t*, - void*, uint64*) - { - DBUG_ASSERT(false); // Never called - return false; // Success - } +/** Retrieve SDI from tablespace + @param[in] tablespace tablespace object + @param[in] sdi_key SDI key + @param[in,out] sdi SDI retrieved from tablespace + @param[in,out] sdi_len in: size of memory allocated + out: actual length of SDI + @retval false success + @retval true failure +*/ +static bool sdi_get(const dd::Tablespace &, const sdi_key_t *, void *, + uint64 *) { + DBUG_ASSERT(false); // Never called + return false; // Success +} +/** Insert/Update SDI in tablespace + @param[in] hton handlerton object + @param[in] tablespace tablespace object + @param[in] table table object + @param[in] sdi_key SDI key to uniquely identify the tablespace + object + @param[in] sdi SDI to be stored in tablespace + @param[in] sdi_len SDI length + @retval false success + @retval true failure +*/ +static bool sdi_set(handlerton *, const dd::Tablespace &, const dd::Table *, + const sdi_key_t *, const void *, uint64) { + return false; // Success +} - /** Insert/Update SDI in tablespace - @param[in] hton handlerton object - @param[in] tablespace tablespace object - @param[in] table table object - @param[in] sdi_key SDI key to uniquely identify the tablespace +/** + Delete SDI from tablespace + @param[in] tablespace tablespace object + @param[in] table table object + @param[in] sdi_key SDI key to uniquely identify the tablespace object - @param[in] sdi SDI to be stored in tablespace - @param[in] sdi_len SDI length - @retval false success - @retval true failure - */ - static - bool sdi_set(handlerton *, const dd::Tablespace&, const dd::Table*, - const sdi_key_t*, const void*, uint64) - { - return false; // Success - } - - - /** - Delete SDI from tablespace - @param[in] tablespace tablespace object - @param[in] table table object - @param[in] sdi_key SDI key to uniquely identify the tablespace - object - @retval false success - @retval true failure - */ - static - bool sdi_delete(const dd::Tablespace&, const dd::Table*, - const sdi_key_t*) - { - return false; // Success - } + @retval false success + @retval true failure +*/ +static bool sdi_delete(const dd::Tablespace &, const dd::Table *, + const sdi_key_t *) { + return false; // Success } +} // namespace ndb_dummy_ts #endif diff --git a/storage/ndb/plugin/ndb_event_data.cc b/storage/ndb/plugin/ndb_event_data.cc index 5992597d150a..6e15a2775a3b 100644 --- a/storage/ndb/plugin/ndb_event_data.cc +++ b/storage/ndb/plugin/ndb_event_data.cc @@ -34,11 +34,8 @@ #include "storage/ndb/plugin/ndb_ndbapi_util.h" #include "storage/ndb/plugin/ndb_table_map.h" - -Ndb_event_data::Ndb_event_data(NDB_SHARE *the_share, size_t num_columns) : - shadow_table(nullptr), - share(the_share) -{ +Ndb_event_data::Ndb_event_data(NDB_SHARE *the_share, size_t num_columns) + : shadow_table(nullptr), share(the_share) { ndb_value[0] = nullptr; ndb_value[1] = nullptr; @@ -50,11 +47,8 @@ Ndb_event_data::Ndb_event_data(NDB_SHARE *the_share, size_t num_columns) : init_sql_alloc(PSI_INSTRUMENT_ME, &mem_root, 1024, 0); } - -Ndb_event_data::~Ndb_event_data() -{ - if (shadow_table) - closefrm(shadow_table, 1); +Ndb_event_data::~Ndb_event_data() { + if (shadow_table) closefrm(shadow_table, 1); shadow_table = nullptr; bitmap_free(&stored_columns); @@ -69,7 +63,6 @@ Ndb_event_data::~Ndb_event_data() my_free(ndb_value[0]); } - /* * While writing an UPDATE_ROW event to the binlog, a bitmap is * used to indicate which columns should be written. An @@ -85,25 +78,21 @@ Ndb_event_data::~Ndb_event_data() * columns from the After Image. A bitmap of primary key columns is * created for this purpose. */ -void Ndb_event_data::init_pk_bitmap() -{ - if (shadow_table->s->primary_key == MAX_KEY) - { +void Ndb_event_data::init_pk_bitmap() { + if (shadow_table->s->primary_key == MAX_KEY) { // Table without pk, no need for pk_bitmap since minimal is full return; } - KEY* key = shadow_table->key_info + shadow_table->s->primary_key; - KEY_PART_INFO* key_part_info = key->key_part; + KEY *key = shadow_table->key_info + shadow_table->s->primary_key; + KEY_PART_INFO *key_part_info = key->key_part; const uint key_parts = key->user_defined_key_parts; - for (uint i = 0; i < key_parts; i++, key_part_info++) - { + for (uint i = 0; i < key_parts; i++, key_part_info++) { bitmap_set_bit(&pk_bitmap, key_part_info->fieldnr - 1); } assert(!bitmap_is_clear_all(&pk_bitmap)); } - /* * Modify the column bitmaps generated for UPDATE_ROW as per * the MINIMAL binlog format type. Expected arguments: @@ -117,10 +106,8 @@ void Ndb_event_data::init_pk_bitmap() * - bitmaps contain all/updated cols as per ndb_log_updated_only */ void Ndb_event_data::generate_minimal_bitmap(MY_BITMAP *before, - MY_BITMAP *after) const -{ - if (shadow_table->s->primary_key == MAX_KEY) - { + MY_BITMAP *after) const { + if (shadow_table->s->primary_key == MAX_KEY) { // no usable PK bitmap, set Before Image = After Image bitmap_copy(before, after); return; @@ -133,37 +120,29 @@ void Ndb_event_data::generate_minimal_bitmap(MY_BITMAP *before, bitmap_subtract(after, &pk_bitmap); } - - -void Ndb_event_data::init_stored_columns() -{ - if (Ndb_table_map::has_virtual_gcol(shadow_table)) - { - for(uint i = 0 ; i < shadow_table->s->fields; i++) - { - Field * field = shadow_table->field[i]; - if (field->stored_in_db) - bitmap_set_bit(&stored_columns, i); +void Ndb_event_data::init_stored_columns() { + if (Ndb_table_map::has_virtual_gcol(shadow_table)) { + for (uint i = 0; i < shadow_table->s->fields; i++) { + Field *field = shadow_table->field[i]; + if (field->stored_in_db) bitmap_set_bit(&stored_columns, i); } - } - else - { + } else { bitmap_set_all(&stored_columns); // all columns are stored } } -TABLE* Ndb_event_data::open_shadow_table(THD* thd, const char* db, - const char* table_name, - const char* key, - const dd::Table* table_def, - THD* owner_thd) { +TABLE *Ndb_event_data::open_shadow_table(THD *thd, const char *db, + const char *table_name, + const char *key, + const dd::Table *table_def, + THD *owner_thd) { DBUG_ENTER("Ndb_event_data::open_shadow_table"); DBUG_ASSERT(table_def); // Allocate memory for shadow table from MEM_ROOT - TABLE_SHARE* shadow_table_share = - (TABLE_SHARE*)mem_root.Alloc(sizeof(TABLE_SHARE)); - TABLE* shadow_table = (TABLE*)mem_root.Alloc(sizeof(TABLE)); + TABLE_SHARE *shadow_table_share = + (TABLE_SHARE *)mem_root.Alloc(sizeof(TABLE_SHARE)); + TABLE *shadow_table = (TABLE *)mem_root.Alloc(sizeof(TABLE)); init_tmp_table_share(thd, shadow_table_share, db, 0, table_name, key, nullptr); @@ -172,8 +151,8 @@ TABLE* Ndb_event_data::open_shadow_table(THD* thd, const char* db, if ((error = open_table_def(thd, shadow_table_share, *table_def)) || (error = open_table_from_share( thd, shadow_table_share, "", 0, - (uint)(SKIP_NEW_HANDLER | DELAYED_OPEN | READ_ALL), 0, - shadow_table, false, table_def))) { + (uint)(SKIP_NEW_HANDLER | DELAYED_OPEN | READ_ALL), 0, shadow_table, + false, table_def))) { DBUG_PRINT("error", ("failed to open shadow table, error: %d", error)); free_table_share(shadow_table_share); DBUG_RETURN(nullptr); @@ -186,7 +165,8 @@ TABLE* Ndb_event_data::open_shadow_table(THD* thd, const char* db, // Allocate strings for db and table_name for shadow_table // in event_data's MEM_ROOT(where the shadow_table itself is allocated) lex_string_strmake(&mem_root, &shadow_table->s->db, db, strlen(db)); - lex_string_strmake(&mem_root, &shadow_table->s->table_name, table_name, strlen(table_name)); + lex_string_strmake(&mem_root, &shadow_table->s->table_name, table_name, + strlen(table_name)); shadow_table->in_use = owner_thd; @@ -205,24 +185,24 @@ TABLE* Ndb_event_data::open_shadow_table(THD* thd, const char* db, to the binlog injector. */ -Ndb_event_data* Ndb_event_data::create_event_data( - THD* thd, NDB_SHARE* share, const char* db, const char* table_name, - const char* key, THD* owner_thd, const dd::Table* table_def) { +Ndb_event_data *Ndb_event_data::create_event_data( + THD *thd, NDB_SHARE *share, const char *db, const char *table_name, + const char *key, THD *owner_thd, const dd::Table *table_def) { DBUG_ENTER("Ndb_event_data::create_event_data"); DBUG_ASSERT(table_def); const size_t num_columns = ndb_dd_table_get_num_columns(table_def); - Ndb_event_data* event_data = new Ndb_event_data(share, num_columns); + Ndb_event_data *event_data = new Ndb_event_data(share, num_columns); // Setup THR_MALLOC to allocate memory from the MEM_ROOT in the // newly created Ndb_event_data - MEM_ROOT** root_ptr = THR_MALLOC; - MEM_ROOT* old_root = *root_ptr; + MEM_ROOT **root_ptr = THR_MALLOC; + MEM_ROOT *old_root = *root_ptr; *root_ptr = &event_data->mem_root; // Create the shadow table - TABLE* shadow_table = event_data->open_shadow_table(thd, db, table_name, key, + TABLE *shadow_table = event_data->open_shadow_table(thd, db, table_name, key, table_def, owner_thd); if (!shadow_table) { DBUG_PRINT("error", ("failed to open shadow table")); @@ -251,8 +231,7 @@ Ndb_event_data* Ndb_event_data::create_event_data( DBUG_RETURN(event_data); } -void Ndb_event_data::destroy(const Ndb_event_data* event_data) -{ +void Ndb_event_data::destroy(const Ndb_event_data *event_data) { DBUG_ENTER("Ndb_event_data::destroy"); delete event_data; @@ -264,6 +243,6 @@ uint32 Ndb_event_data::unpack_uint32(unsigned attr_id) const { return ndb_value[0][attr_id].rec->u_32_value(); } -const char* Ndb_event_data::unpack_string(unsigned attr_id) const { +const char *Ndb_event_data::unpack_string(unsigned attr_id) const { return ndb_value[0][attr_id].rec->aRef(); } diff --git a/storage/ndb/plugin/ndb_event_data.h b/storage/ndb/plugin/ndb_event_data.h index a1856fab8ebd..42a96f60fe38 100644 --- a/storage/ndb/plugin/ndb_event_data.h +++ b/storage/ndb/plugin/ndb_event_data.h @@ -25,11 +25,11 @@ #ifndef NDB_EVENT_DATA_H #define NDB_EVENT_DATA_H -#include "my_alloc.h" // MEM_ROOT +#include "my_alloc.h" // MEM_ROOT #include "storage/ndb/plugin/ndb_bitmap.h" namespace dd { - class Table; +class Table; } struct NDB_SHARE; @@ -43,21 +43,20 @@ union NdbValue; distribution. */ -class Ndb_event_data -{ +class Ndb_event_data { Ndb_event_data() = delete; - Ndb_event_data(const Ndb_event_data&) = delete; + Ndb_event_data(const Ndb_event_data &) = delete; - Ndb_event_data(NDB_SHARE* the_share, size_t num_columns); + Ndb_event_data(NDB_SHARE *the_share, size_t num_columns); ~Ndb_event_data(); void init_stored_columns(); void init_pk_bitmap(); - TABLE *open_shadow_table(class THD* thd, - const char *db, const char *table_name, - const char *key, const dd::Table *table_def, - class THD *owner_thd); -public: + TABLE *open_shadow_table(class THD *thd, const char *db, + const char *table_name, const char *key, + const dd::Table *table_def, class THD *owner_thd); + + public: MEM_ROOT mem_root; TABLE *shadow_table; NDB_SHARE *share; @@ -74,19 +73,15 @@ class Ndb_event_data // Factory function to create Ndb_event_data, open the shadow_table and // initialize bitmaps. - static Ndb_event_data* create_event_data(class THD* thd, - NDB_SHARE *share, - const char *db, - const char *table_name, - const char *key, - class THD* owner_thd, - const dd::Table *table_def); - static void destroy(const Ndb_event_data*); + static Ndb_event_data *create_event_data( + class THD *thd, NDB_SHARE *share, const char *db, const char *table_name, + const char *key, class THD *owner_thd, const dd::Table *table_def); + static void destroy(const Ndb_event_data *); // Read uint32 value directly from NdbRecAttr in received event uint32 unpack_uint32(unsigned attr_id) const; // Read string value directly from NdbRecAttr in received event - const char* unpack_string(unsigned attr_id) const; + const char *unpack_string(unsigned attr_id) const; }; #endif diff --git a/storage/ndb/plugin/ndb_fk_util.cpp b/storage/ndb/plugin/ndb_fk_util.cpp index 2fc210b2d061..5e3cd55d0e3e 100644 --- a/storage/ndb/plugin/ndb_fk_util.cpp +++ b/storage/ndb/plugin/ndb_fk_util.cpp @@ -28,7 +28,6 @@ #include "storage/ndb/plugin/ndb_thd.h" #include "storage/ndb/plugin/ndb_thd_ndb.h" - /** Split the given internal ndb object name into usable format. The object maybe a table, index or a foreign key. @@ -43,9 +42,7 @@ @return On success, the actual name of the table, index or the FK is returned. */ -const char * -fk_split_name(char dst[], const char * src, bool index) -{ +const char *fk_split_name(char dst[], const char *src, bool index) { DBUG_PRINT("info", ("fk_split_name: %s index=%d", src, index)); /** @@ -53,17 +50,15 @@ fk_split_name(char dst[], const char * src, bool index) * * Store result in dst */ - char * dstptr = dst; - const char * save = src; - while (src[0] != 0 && src[0] != '/') - { - * dstptr = * src; + char *dstptr = dst; + const char *save = src; + while (src[0] != 0 && src[0] != '/') { + *dstptr = *src; dstptr++; src++; } - if (src[0] == 0) - { + if (src[0] == 0) { /** * No '/' found * set db to '' @@ -79,12 +74,11 @@ fk_split_name(char dst[], const char * src, bool index) assert(src[0] == '/'); src++; - * dstptr = 0; + *dstptr = 0; dstptr++; // Skip over catalog (not implemented) - while (src[0] != '/') - { + while (src[0] != '/') { src++; } @@ -94,10 +88,8 @@ fk_split_name(char dst[], const char * src, bool index) /** * Indexes contains an extra / */ - if (index) - { - while (src[0] != '/') - { + if (index) { + while (src[0] != '/') { src++; } assert(src[0] == '/'); @@ -108,7 +100,6 @@ fk_split_name(char dst[], const char * src, bool index) return dstptr; } - /** Fetch all tables that are referenced by the given table as a part of a foreign key relationship. @@ -123,62 +114,51 @@ fk_split_name(char dst[], const char * src, bool index) @return true On success false On failure */ -bool -fetch_referenced_tables_from_ndb_dictionary( - THD* thd, const char* schema_name, const char* table_name, - std::set> &referenced_tables) -{ +bool fetch_referenced_tables_from_ndb_dictionary( + THD *thd, const char *schema_name, const char *table_name, + std::set> &referenced_tables) { DBUG_ENTER("fetch_referenced_tables_from_ndb_dictionary"); - Thd_ndb* thd_ndb = get_thd_ndb(thd); - Ndb* ndb = thd_ndb->ndb; + Thd_ndb *thd_ndb = get_thd_ndb(thd); + Ndb *ndb = thd_ndb->ndb; // save db Ndb_db_guard db_guard(ndb); - if (ndb->setDatabaseName(schema_name) != 0) - { - DBUG_PRINT("error", ("Error setting database '%s'. Error : %s", - schema_name, ndb->getNdbError().message)); + if (ndb->setDatabaseName(schema_name) != 0) { + DBUG_PRINT("error", ("Error setting database '%s'. Error : %s", schema_name, + ndb->getNdbError().message)); DBUG_RETURN(false); } - NdbDictionary::Dictionary *dict= ndb->getDictionary(); + NdbDictionary::Dictionary *dict = ndb->getDictionary(); Ndb_table_guard tab_guard(dict, table_name); - const NdbDictionary::Table *table= tab_guard.get_table(); - if (table == NULL) - { + const NdbDictionary::Table *table = tab_guard.get_table(); + if (table == NULL) { DBUG_PRINT("error", ("Unable to load table '%s.%s' from ndb. Error : %s", - schema_name, table_name, - dict->getNdbError().message)); + schema_name, table_name, dict->getNdbError().message)); DBUG_RETURN(false); } NdbDictionary::Dictionary::List obj_list; - if (dict->listDependentObjects(obj_list, *table) != 0) - { + if (dict->listDependentObjects(obj_list, *table) != 0) { DBUG_PRINT("error", ("Unable to list dependents of '%s.%s'. Error : %s", - schema_name, table_name, - dict->getNdbError().message)); + schema_name, table_name, dict->getNdbError().message)); DBUG_RETURN(false); } DBUG_PRINT("info", ("found %u dependent objects", obj_list.count)); - for (unsigned i = 0; i < obj_list.count; i++) - { - const NdbDictionary::Dictionary::List::Element &element= + for (unsigned i = 0; i < obj_list.count; i++) { + const NdbDictionary::Dictionary::List::Element &element = obj_list.elements[i]; - if (element.type != NdbDictionary::Object::ForeignKey) - { - DBUG_PRINT("info", ("skip non-FK '%s' type %d", - element.name, element.type)); + if (element.type != NdbDictionary::Object::ForeignKey) { + DBUG_PRINT("info", + ("skip non-FK '%s' type %d", element.name, element.type)); continue; } NdbDictionary::ForeignKey fk; - if (dict->getForeignKey(fk, element.name) != 0) - { + if (dict->getForeignKey(fk, element.name) != 0) { DBUG_PRINT("error", ("Unable to fetch foreign key '%s'. Error : %s", - element.name, - dict->getNdbError().message)); + element.name, dict->getNdbError().message)); DBUG_RETURN(false); } @@ -186,15 +166,14 @@ fetch_referenced_tables_from_ndb_dictionary( const char *parent_name = fk_split_name(parent_db, fk.getParentTable()); if (strcmp(parent_db, schema_name) == 0 && - strcmp(parent_name, table_name) == 0) - { + strcmp(parent_name, table_name) == 0) { // Given table is the parent of this FK. Skip adding. DBUG_PRINT("info", ("skip FK '%s'", element.name)); continue; } - DBUG_PRINT("info", ("Adding referenced tables '%s.%s'", - parent_db, parent_name)); + DBUG_PRINT("info", + ("Adding referenced tables '%s.%s'", parent_db, parent_name)); referenced_tables.insert( std::pair(parent_db, parent_name)); } diff --git a/storage/ndb/plugin/ndb_fk_util.h b/storage/ndb/plugin/ndb_fk_util.h index 2e724c66347e..ea307022833c 100644 --- a/storage/ndb/plugin/ndb_fk_util.h +++ b/storage/ndb/plugin/ndb_fk_util.h @@ -31,28 +31,24 @@ #include "storage/ndb/include/ndbapi/NdbApi.hpp" // Database name guard for Ndb objects -struct Ndb_db_guard -{ - Ndb_db_guard(class Ndb* ndb) { +struct Ndb_db_guard { + Ndb_db_guard(class Ndb *ndb) { this->ndb = ndb; - save_db= ndb->getDatabaseName(); + save_db = ndb->getDatabaseName(); } - void restore() { - ndb->setDatabaseName(save_db.c_str()); - } + void restore() { ndb->setDatabaseName(save_db.c_str()); } - ~Ndb_db_guard() { - ndb->setDatabaseName(save_db.c_str()); - } -private: - Ndb* ndb; + ~Ndb_db_guard() { ndb->setDatabaseName(save_db.c_str()); } + + private: + Ndb *ndb; std::string save_db; }; -const char* fk_split_name(char dst[], const char * src, bool index= false); +const char *fk_split_name(char dst[], const char *src, bool index = false); bool fetch_referenced_tables_from_ndb_dictionary( - class THD* thd, const char* schema_name, const char* table_name, + class THD *thd, const char *schema_name, const char *table_name, std::set> &referenced_tables); #endif diff --git a/storage/ndb/plugin/ndb_global_schema_lock.cc b/storage/ndb/plugin/ndb_global_schema_lock.cc index 411baa218622..3f1ea81df82a 100644 --- a/storage/ndb/plugin/ndb_global_schema_lock.cc +++ b/storage/ndb/plugin/ndb_global_schema_lock.cc @@ -30,7 +30,7 @@ #include "mysql/plugin.h" #include "sql/debug_sync.h" #include "sql/sql_class.h" -#include "sql/sql_thd_internal_api.h" // thd_query_unsafe +#include "sql/sql_thd_internal_api.h" // thd_query_unsafe #include "storage/ndb/include/ndbapi/NdbApi.hpp" #include "storage/ndb/plugin/ndb_schema_dist.h" #include "storage/ndb/plugin/ndb_sleep.h" @@ -48,7 +48,7 @@ * the schema change op before the coordinator will release the GSL. * As part of that, the participants will request a MDL-X-lock which blocks * due to the other client thread holding an MDL-IX-lock. Thus, we - * have effectively a deadlock between the client thread and the + * have effectively a deadlock between the client thread and the * schema change participant. * * We detect, and break, such deadlock by recording whether we @@ -77,17 +77,12 @@ static class Ndb_thd_gsl_participant { } } thd_gsl_participant; -static void ndb_set_gsl_participant(THD* thd) -{ - thd_gsl_participant= thd; -} +static void ndb_set_gsl_participant(THD *thd) { thd_gsl_participant = thd; } -static bool ndb_is_gsl_participant_active() -{ +static bool ndb_is_gsl_participant_active() { return (thd_gsl_participant != nullptr); } - /** * Another potential scenario for a deadlock between MDL and GSL locks is as * follows: @@ -120,30 +115,29 @@ static bool ndb_is_gsl_participant_active() */ static class Ndb_tablespace_gsl_guard { - std::mutex m_tablespace_gsl_acquired_mutex; // for m_tablespace_gsl_acquired + std::mutex m_tablespace_gsl_acquired_mutex; // for m_tablespace_gsl_acquired bool m_tablespace_gsl_acquired{false}; -public: + public: void tablespace_gsl_acquired() { - std::lock_guard - lock_gsl_acquired(m_tablespace_gsl_acquired_mutex); + std::lock_guard lock_gsl_acquired( + m_tablespace_gsl_acquired_mutex); m_tablespace_gsl_acquired = true; } void tablespace_gsl_released() { - std::lock_guard - lock_gsl_acquired(m_tablespace_gsl_acquired_mutex); + std::lock_guard lock_gsl_acquired( + m_tablespace_gsl_acquired_mutex); m_tablespace_gsl_acquired = false; } bool is_tablespace_gsl_acquired() { - std::lock_guard - lock_gsl_acquired(m_tablespace_gsl_acquired_mutex); + std::lock_guard lock_gsl_acquired( + m_tablespace_gsl_acquired_mutex); return m_tablespace_gsl_acquired; } } tablespace_gsl_guard; - /* The lock/unlock functions use the BACKUP_SEQUENCE row in SYSTAB_0 @@ -157,10 +151,9 @@ static class Ndb_tablespace_gsl_guard { lock failed due to some NdbError. If there is no error code set, lock was rejected by lock manager, likely due to deadlock. */ -static NdbTransaction * -gsl_lock_ext(THD *thd, Ndb *ndb, NdbError &ndb_error, bool no_retry = false, - bool no_wait = false) -{ +static NdbTransaction *gsl_lock_ext(THD *thd, Ndb *ndb, NdbError &ndb_error, + bool no_retry = false, + bool no_wait = false) { ndb->setDatabaseName("sys"); ndb->setDatabaseSchemaName("def"); NdbDictionary::Dictionary *dict = ndb->getDictionary(); @@ -169,58 +162,44 @@ gsl_lock_ext(THD *thd, Ndb *ndb, NdbError &ndb_error, bool no_retry = false, NdbOperation *op = nullptr; NdbTransaction *trans = nullptr; - while (1) - { - if (!ndbtab) - { - if (!(ndbtab= ndbtab_g.get_table())) - { - if (dict->getNdbError().status == NdbError::TemporaryError) - goto retry; + while (1) { + if (!ndbtab) { + if (!(ndbtab = ndbtab_g.get_table())) { + if (dict->getNdbError().status == NdbError::TemporaryError) goto retry; ndb_error = dict->getNdbError(); goto error_handler; } } trans = ndb->startTransaction(); - if (trans == nullptr) - { + if (trans == nullptr) { ndb_error = ndb->getNdbError(); goto error_handler; } op = trans->getNdbOperation(ndbtab); - if (op == nullptr) - { - if (dict->getNdbError().status == NdbError::TemporaryError) - goto retry; + if (op == nullptr) { + if (dict->getNdbError().status == NdbError::TemporaryError) goto retry; ndb_error = dict->getNdbError(); goto error_handler; } - if (op->readTuple(NdbOperation::LM_Exclusive)) - goto error_handler; - if (no_wait) - { - if (op->setNoWait()) - goto error_handler; + if (op->readTuple(NdbOperation::LM_Exclusive)) goto error_handler; + if (no_wait) { + if (op->setNoWait()) goto error_handler; } - if (op->equal("SYSKEY_0", NDB_BACKUP_SEQUENCE)) - goto error_handler; - if (trans->execute(NdbTransaction::NoCommit) == 0) - { + if (op->equal("SYSKEY_0", NDB_BACKUP_SEQUENCE)) goto error_handler; + if (trans->execute(NdbTransaction::NoCommit) == 0) { // The transaction is successful but still check if the operation has // failed since the abort mode is set to AO_IgnoreError. Error 635 // is the expected error when no_wait has been set and the row could not // be locked immediately - if (trans->getNdbError().code == 635) - goto error_handler; + if (trans->getNdbError().code == 635) goto error_handler; break; } if (trans->getNdbError().status != NdbError::TemporaryError) goto error_handler; - if (thd_killed(thd)) - goto error_handler; + if (thd_killed(thd)) goto error_handler; /** * Check for MDL / GSL deadlock. A deadlock is assumed if: @@ -244,14 +223,12 @@ gsl_lock_ext(THD *thd, Ndb *ndb, NdbError &ndb_error, bool no_retry = false, goto error_handler; retry: - if (trans) - { + if (trans) { ndb->closeTransaction(trans); trans = nullptr; } - if (no_retry) - { + if (no_retry) { break; } @@ -259,23 +236,18 @@ gsl_lock_ext(THD *thd, Ndb *ndb, NdbError &ndb_error, bool no_retry = false, } return trans; - error_handler: - if (trans) - { +error_handler: + if (trans) { ndb_error = trans->getNdbError(); ndb->closeTransaction(trans); } return nullptr; } - -static bool -gsl_unlock_ext(Ndb *ndb, NdbTransaction *trans, - NdbError &ndb_error) -{ - if (trans->execute(NdbTransaction::Commit)) - { - ndb_error= trans->getNdbError(); +static bool gsl_unlock_ext(Ndb *ndb, NdbTransaction *trans, + NdbError &ndb_error) { + if (trans->execute(NdbTransaction::Commit)) { + ndb_error = trans->getNdbError(); ndb->closeTransaction(trans); return false; } @@ -283,27 +255,22 @@ gsl_unlock_ext(Ndb *ndb, NdbTransaction *trans, return true; } -class Thd_proc_info_guard -{ -public: - Thd_proc_info_guard(THD *thd) - : m_thd(thd), m_proc_info(NULL) {} - void set(const char* message) - { - const char* old= thd_proc_info(m_thd, message); - if (!m_proc_info) - { +class Thd_proc_info_guard { + public: + Thd_proc_info_guard(THD *thd) : m_thd(thd), m_proc_info(NULL) {} + void set(const char *message) { + const char *old = thd_proc_info(m_thd, message); + if (!m_proc_info) { // Save the original on first change m_proc_info = old; } } - ~Thd_proc_info_guard() - { - if (m_proc_info) - thd_proc_info(m_thd, m_proc_info); + ~Thd_proc_info_guard() { + if (m_proc_info) thd_proc_info(m_thd, m_proc_info); } -private: - THD* const m_thd; + + private: + THD *const m_thd; const char *m_proc_info; }; @@ -315,27 +282,21 @@ class Thd_proc_info_guard lock/unlock calls are reference counted, so calls to lock must be matched to a call to unlock if the lock call succeeded */ -static -int -ndbcluster_global_schema_lock(THD *thd, - bool report_cluster_disconnected, - bool is_tablespace, - bool *victimized) -{ - Ndb *ndb= check_ndb_in_thd(thd); - Thd_ndb *thd_ndb= get_thd_ndb(thd); +static int ndbcluster_global_schema_lock(THD *thd, + bool report_cluster_disconnected, + bool is_tablespace, bool *victimized) { + Ndb *ndb = check_ndb_in_thd(thd); + Thd_ndb *thd_ndb = get_thd_ndb(thd); NdbError ndb_error; - *victimized= false; + *victimized = false; - if (thd_ndb->check_option(Thd_ndb::IS_SCHEMA_DIST_PARTICIPANT)) - { + if (thd_ndb->check_option(Thd_ndb::IS_SCHEMA_DIST_PARTICIPANT)) { ndb_set_gsl_participant(thd); return 0; } DBUG_ENTER("ndbcluster_global_schema_lock"); - if (thd_ndb->global_schema_lock_count) - { + if (thd_ndb->global_schema_lock_count) { // Remember that GSL was locked for tablespace if (is_tablespace) tablespace_gsl_guard.tablespace_gsl_acquired(); @@ -349,33 +310,29 @@ ndbcluster_global_schema_lock(THD *thd, DBUG_RETURN(0); } DBUG_ASSERT(thd_ndb->global_schema_lock_count == 0); - thd_ndb->global_schema_lock_count= 1; - thd_ndb->global_schema_lock_error= 0; + thd_ndb->global_schema_lock_count = 1; + thd_ndb->global_schema_lock_error = 0; DBUG_PRINT("exit", ("global_schema_lock_count: %d", thd_ndb->global_schema_lock_count)); - /* Take the lock */ Thd_proc_info_guard proc_info(thd); proc_info.set("Waiting for ndbcluster global schema lock"); - thd_ndb->global_schema_lock_trans= gsl_lock_ext(thd, ndb, ndb_error); + thd_ndb->global_schema_lock_trans = gsl_lock_ext(thd, ndb, ndb_error); - if (DBUG_EVALUATE_IF("sleep_after_global_schema_lock", true, false)) - { + if (DBUG_EVALUATE_IF("sleep_after_global_schema_lock", true, false)) { ndb_milli_sleep(6000); } - if (thd_ndb->global_schema_lock_trans) - { + if (thd_ndb->global_schema_lock_trans) { ndb_log_verbose(19, "Global schema lock acquired"); // Count number of global schema locks taken by this thread thd_ndb->schema_locks_count++; thd_ndb->global_schema_lock_count = 1; - DBUG_PRINT("info", ("schema_locks_count: %d", - thd_ndb->schema_locks_count)); + DBUG_PRINT("info", ("schema_locks_count: %d", thd_ndb->schema_locks_count)); // Remember that GSL was locked for tablespace if (is_tablespace) tablespace_gsl_guard.tablespace_gsl_acquired(); @@ -391,20 +348,19 @@ ndbcluster_global_schema_lock(THD *thd, * If GSL request failed due to no cluster connection (4009), * we consider the lock granted, else GSL request failed. */ - if (ndb_error.code != 4009) //No cluster connection + if (ndb_error.code != 4009) // No cluster connection { DBUG_ASSERT(thd_ndb->global_schema_lock_count == 1); // This reset triggers the special case in ndbcluster_global_schema_unlock() thd_ndb->global_schema_lock_count = 0; } - if (ndb_error.code == 266) //Deadlock resolution - { - ndb_log_info("Failed to acquire global schema lock due to deadlock resolution"); - *victimized= true; - } - else if (ndb_error.code != 4009 || report_cluster_disconnected) + if (ndb_error.code == 266) // Deadlock resolution { + ndb_log_info( + "Failed to acquire global schema lock due to deadlock resolution"); + *victimized = true; + } else if (ndb_error.code != 4009 || report_cluster_disconnected) { if (ndb_thd_is_background_thread(thd)) { // Don't push any warning when background thread fail to acquire GSL } else { @@ -412,30 +368,23 @@ ndbcluster_global_schema_lock(THD *thd, thd_ndb->push_warning("Could not acquire global schema lock"); } } - thd_ndb->global_schema_lock_error= ndb_error.code ? ndb_error.code : -1; + thd_ndb->global_schema_lock_error = ndb_error.code ? ndb_error.code : -1; DBUG_RETURN(-1); } - -static -int -ndbcluster_global_schema_unlock(THD *thd, bool is_tablespace) -{ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (unlikely(thd_ndb == NULL)) - { +static int ndbcluster_global_schema_unlock(THD *thd, bool is_tablespace) { + Thd_ndb *thd_ndb = get_thd_ndb(thd); + if (unlikely(thd_ndb == NULL)) { return 0; } - if (thd_ndb->check_option(Thd_ndb::IS_SCHEMA_DIST_PARTICIPANT)) - { + if (thd_ndb->check_option(Thd_ndb::IS_SCHEMA_DIST_PARTICIPANT)) { ndb_set_gsl_participant(NULL); return 0; } if (thd_ndb->global_schema_lock_error != 4009 && - thd_ndb->global_schema_lock_count == 0) - { + thd_ndb->global_schema_lock_count == 0) { // Special case to handle unlock after failure to acquire GSL due to // any error other than 4009. // - when error 4009 occurs the lock is granted anyway and the lock count is @@ -448,36 +397,32 @@ ndbcluster_global_schema_unlock(THD *thd, bool is_tablespace) thd_ndb->global_schema_lock_count++; } - Ndb *ndb= thd_ndb->ndb; + Ndb *ndb = thd_ndb->ndb; DBUG_ENTER("ndbcluster_global_schema_unlock"); - NdbTransaction *trans= thd_ndb->global_schema_lock_trans; + NdbTransaction *trans = thd_ndb->global_schema_lock_trans; // Don't allow decrementing from zero DBUG_ASSERT(thd_ndb->global_schema_lock_count > 0); thd_ndb->global_schema_lock_count--; DBUG_PRINT("exit", ("global_schema_lock_count: %d", thd_ndb->global_schema_lock_count)); DBUG_ASSERT(ndb != NULL); - if (ndb == NULL) - { + if (ndb == NULL) { DBUG_RETURN(0); } DBUG_ASSERT(trans != NULL || thd_ndb->global_schema_lock_error != 0); - if (thd_ndb->global_schema_lock_count != 0) - { + if (thd_ndb->global_schema_lock_count != 0) { DBUG_RETURN(0); } - thd_ndb->global_schema_lock_error= 0; + thd_ndb->global_schema_lock_error = 0; - if (trans) - { - thd_ndb->global_schema_lock_trans= NULL; + if (trans) { + thd_ndb->global_schema_lock_trans = NULL; // Remember GSL for tablespace released if (is_tablespace) tablespace_gsl_guard.tablespace_gsl_released(); NdbError ndb_error; - if (!gsl_unlock_ext(ndb, trans, ndb_error)) - { + if (!gsl_unlock_ext(ndb, trans, ndb_error)) { ndb_log_warning("Failed to release global schema lock, error: (%d)%s", ndb_error.code, ndb_error.message); thd_ndb->push_ndb_error_warning(ndb_error); @@ -490,71 +435,58 @@ ndbcluster_global_schema_unlock(THD *thd, bool is_tablespace) DBUG_RETURN(0); } - -bool -ndb_gsl_lock(THD *thd, bool lock, bool is_tablespace, bool *victimized) -{ +bool ndb_gsl_lock(THD *thd, bool lock, bool is_tablespace, bool *victimized) { DBUG_ENTER("ndb_gsl_lock"); - if (lock) - { + if (lock) { if (ndbcluster_global_schema_lock(thd, true, is_tablespace, victimized) != 0) { DBUG_PRINT("error", ("Failed to lock global schema lock")); - DBUG_RETURN(true); // Error + DBUG_RETURN(true); // Error } - DBUG_RETURN(false); // OK + DBUG_RETURN(false); // OK } - *victimized= false; - if (ndbcluster_global_schema_unlock(thd, is_tablespace) != 0) - { + *victimized = false; + if (ndbcluster_global_schema_unlock(thd, is_tablespace) != 0) { DBUG_PRINT("error", ("Failed to unlock global schema lock")); - DBUG_RETURN(true); // Error + DBUG_RETURN(true); // Error } - DBUG_RETURN(false); // OK + DBUG_RETURN(false); // OK } -bool -Thd_ndb::has_required_global_schema_lock(const char* func) const -{ - if (global_schema_lock_error) - { +bool Thd_ndb::has_required_global_schema_lock(const char *func) const { + if (global_schema_lock_error) { // An error occurred while locking, either because // no connection to cluster or another user has locked // the lock -> ok, but caller should not allow to continue return false; } - if (global_schema_lock_trans) - { + if (global_schema_lock_trans) { global_schema_lock_trans->refresh(); - return true; // All OK + return true; // All OK } // No attempt at taking global schema lock has been done, neither // error or trans set -> programming error - LEX_CSTRING query= thd_query_unsafe(m_thd); - ndb_log_error("programming error, no lock taken while running " - "query '%*s' in function '%s'", - (int)query.length, query.str, func); + LEX_CSTRING query = thd_query_unsafe(m_thd); + ndb_log_error( + "programming error, no lock taken while running " + "query '%*s' in function '%s'", + (int)query.length, query.str, func); abort(); return false; } - #include "storage/ndb/plugin/ndb_global_schema_lock_guard.h" Ndb_global_schema_lock_guard::Ndb_global_schema_lock_guard(THD *thd) - : m_thd(thd), m_locked(false), m_try_locked(false) -{ -} + : m_thd(thd), m_locked(false), m_try_locked(false) {} - -Ndb_global_schema_lock_guard::~Ndb_global_schema_lock_guard() -{ +Ndb_global_schema_lock_guard::~Ndb_global_schema_lock_guard() { if (m_try_locked) unlock(); else if (m_locked) @@ -567,8 +499,7 @@ Ndb_global_schema_lock_guard::~Ndb_global_schema_lock_guard() * 'victimized' as part of deadlock resolution. In the later case we * retry the GSL locking. */ -int Ndb_global_schema_lock_guard::lock(void) -{ +int Ndb_global_schema_lock_guard::lock(void) { /* only one lock call allowed */ assert(!m_locked); @@ -577,25 +508,22 @@ int Ndb_global_schema_lock_guard::lock(void) lock/unlock calls are reference counted, the number of calls to lock and unlock need to match up. */ - m_locked= true; - bool victimized= false; + m_locked = true; + bool victimized = false; bool ret; - do - { + do { ret = ndbcluster_global_schema_lock(m_thd, false, false /* is_tablespace */, &victimized); if (ret && thd_killed(m_thd)) { // Failed to acuire GSL and THD is killed -> give up! - break; // Terminate loop + break; // Terminate loop } - } - while (victimized); + } while (victimized); return ret; } -bool Ndb_global_schema_lock_guard::try_lock(void) -{ +bool Ndb_global_schema_lock_guard::try_lock(void) { /* Always set m_locked, even if lock fails. Since the lock/unlock calls are reference counted, the number of calls to lock and unlock need to match up. @@ -604,27 +532,24 @@ bool Ndb_global_schema_lock_guard::try_lock(void) m_try_locked = true; Thd_ndb *thd_ndb = get_thd_ndb(m_thd); // Check if this thd has acquired GSL already - if (thd_ndb->global_schema_lock_count) - return false; + if (thd_ndb->global_schema_lock_count) return false; thd_ndb->global_schema_lock_error = 0; Ndb *ndb = check_ndb_in_thd(m_thd); NdbError ndb_error; // Attempt to take the GSL with no_retry and no_wait both set - thd_ndb->global_schema_lock_trans = gsl_lock_ext(m_thd, ndb, ndb_error, - true, /* no_retry */ - true /* no_wait */); + thd_ndb->global_schema_lock_trans = + gsl_lock_ext(m_thd, ndb, ndb_error, true, /* no_retry */ + true /* no_wait */); - if (thd_ndb->global_schema_lock_trans != nullptr) - { + if (thd_ndb->global_schema_lock_trans != nullptr) { ndb_log_verbose(19, "Global schema lock acquired"); // Count number of global schema locks taken by this thread thd_ndb->schema_locks_count++; thd_ndb->global_schema_lock_count = 1; - DBUG_PRINT("info", ("schema_locks_count: %d", - thd_ndb->schema_locks_count)); + DBUG_PRINT("info", ("schema_locks_count: %d", thd_ndb->schema_locks_count)); return true; } @@ -632,33 +557,27 @@ bool Ndb_global_schema_lock_guard::try_lock(void) return false; } - -bool Ndb_global_schema_lock_guard::unlock() -{ +bool Ndb_global_schema_lock_guard::unlock() { // This function should only be called in conjunction with try_lock() DBUG_ASSERT(m_try_locked); - Thd_ndb *thd_ndb= get_thd_ndb(m_thd); - if (unlikely(thd_ndb == nullptr)) - { + Thd_ndb *thd_ndb = get_thd_ndb(m_thd); + if (unlikely(thd_ndb == nullptr)) { return true; } Ndb *ndb = thd_ndb->ndb; - if (ndb == nullptr) - { + if (ndb == nullptr) { return true; } NdbTransaction *trans = thd_ndb->global_schema_lock_trans; thd_ndb->global_schema_lock_error = 0; - if (trans != nullptr) - { + if (trans != nullptr) { thd_ndb->global_schema_lock_trans = nullptr; thd_ndb->global_schema_lock_count = 0; NdbError ndb_error; - if (!gsl_unlock_ext(ndb, trans, ndb_error)) - { + if (!gsl_unlock_ext(ndb, trans, ndb_error)) { ndb_log_warning("Failed to release global schema lock, error: (%d)%s", ndb_error.code, ndb_error.message); thd_ndb->push_ndb_error_warning(ndb_error); diff --git a/storage/ndb/plugin/ndb_global_schema_lock_guard.h b/storage/ndb/plugin/ndb_global_schema_lock_guard.h index ebe641b6e09d..26d74eddcdc4 100644 --- a/storage/ndb/plugin/ndb_global_schema_lock_guard.h +++ b/storage/ndb/plugin/ndb_global_schema_lock_guard.h @@ -27,19 +27,17 @@ class THD; -class Ndb_global_schema_lock_guard -{ -public: +class Ndb_global_schema_lock_guard { + public: Ndb_global_schema_lock_guard(THD *thd); ~Ndb_global_schema_lock_guard(); int lock(void); bool try_lock(void); - bool unlock(); // Should be called only in conjunction with try_lock() -private: - THD* const m_thd; + bool unlock(); // Should be called only in conjunction with try_lock() + private: + THD *const m_thd; bool m_locked; bool m_try_locked; }; #endif - diff --git a/storage/ndb/plugin/ndb_local_connection.cc b/storage/ndb/plugin/ndb_local_connection.cc index 57ebddd4acec..50f3474bf9ed 100644 --- a/storage/ndb/plugin/ndb_local_connection.cc +++ b/storage/ndb/plugin/ndb_local_connection.cc @@ -24,21 +24,19 @@ #include "storage/ndb/plugin/ndb_local_connection.h" -#include "sql/mysqld.h" // next_query_id() +#include "sql/mysqld.h" // next_query_id() #include "sql/sql_class.h" #include "sql/sql_prepare.h" #include "storage/ndb/plugin/ndb_log.h" class Ndb_local_connection::Impl { -public: - Impl(THD * thd_arg) : connection(thd_arg) {} + public: + Impl(THD *thd_arg) : connection(thd_arg) {} Ed_connection connection; }; -Ndb_local_connection::Ndb_local_connection(THD* thd_arg): - m_thd(thd_arg), - impl(std::make_unique(thd_arg)) -{ +Ndb_local_connection::Ndb_local_connection(THD *thd_arg) + : m_thd(thd_arg), impl(std::make_unique(thd_arg)) { assert(thd_arg); /* @@ -50,57 +48,46 @@ Ndb_local_connection::Ndb_local_connection(THD* thd_arg): Ndb_local_connection::~Ndb_local_connection() = default; -static inline bool -should_ignore_error(const uint* ignore_error_list, uint error) -{ +static inline bool should_ignore_error(const uint *ignore_error_list, + uint error) { DBUG_ENTER("should_ignore_error"); DBUG_PRINT("enter", ("error: %u", error)); - const uint* ignore_error = ignore_error_list; - while(*ignore_error) - { + const uint *ignore_error = ignore_error_list; + while (*ignore_error) { DBUG_PRINT("info", ("ignore_error: %u", *ignore_error)); - if (*ignore_error == error) - DBUG_RETURN(true); + if (*ignore_error == error) DBUG_RETURN(true); ignore_error++; } DBUG_PRINT("info", ("Don't ignore error")); DBUG_RETURN(false); } - class Suppressor { -public: + public: virtual ~Suppressor() {} - virtual bool should_ignore_error(Ed_connection& con) const = 0; + virtual bool should_ignore_error(Ed_connection &con) const = 0; }; - -bool -Ndb_local_connection::execute_query(MYSQL_LEX_STRING sql_text, - const uint* ignore_mysql_errors, - const Suppressor* suppressor) -{ +bool Ndb_local_connection::execute_query(MYSQL_LEX_STRING sql_text, + const uint *ignore_mysql_errors, + const Suppressor *suppressor) { DBUG_ENTER("Ndb_local_connection::execute_query"); - if (impl->connection.execute_direct(sql_text)) - { + if (impl->connection.execute_direct(sql_text)) { /* Error occured while executing the query */ const uint last_errno = impl->connection.get_last_errno(); - assert(last_errno); // last_errno must have been set - const char* last_errmsg = impl->connection.get_last_error(); + assert(last_errno); // last_errno must have been set + const char *last_errmsg = impl->connection.get_last_error(); - DBUG_PRINT("error", ("Query '%s' failed, error: '%d: %s'", - sql_text.str, + DBUG_PRINT("error", ("Query '%s' failed, error: '%d: %s'", sql_text.str, last_errno, last_errmsg)); // catch some SQL parse errors in debug - assert(last_errno != ER_PARSE_ERROR && - last_errno != ER_EMPTY_QUERY); + assert(last_errno != ER_PARSE_ERROR && last_errno != ER_EMPTY_QUERY); /* Check if this is a MySQL level errors that should be ignored */ if (ignore_mysql_errors && - should_ignore_error(ignore_mysql_errors, last_errno)) - { + should_ignore_error(ignore_mysql_errors, last_errno)) { /* MySQL level error suppressed -> return success */ m_thd->clear_error(); DBUG_RETURN(false); @@ -110,55 +97,46 @@ Ndb_local_connection::execute_query(MYSQL_LEX_STRING sql_text, Call the suppressor to check if it want to silence this error */ - if (suppressor && - suppressor->should_ignore_error(impl->connection)) - { + if (suppressor && suppressor->should_ignore_error(impl->connection)) { /* Error suppressed -> return sucess */ m_thd->clear_error(); DBUG_RETURN(false); } - if (m_push_warnings) - { + if (m_push_warnings) { // Append the error which caused the error to thd's warning list - push_warning(m_thd, Sql_condition::SL_WARNING, - last_errno, last_errmsg); - } - else - { + push_warning(m_thd, Sql_condition::SL_WARNING, last_errno, last_errmsg); + } else { // Print the error to log file - ndb_log_error("Query '%s' failed, error: %d: %s", - sql_text.str, last_errno, last_errmsg); + ndb_log_error("Query '%s' failed, error: %d: %s", sql_text.str, + last_errno, last_errmsg); } DBUG_RETURN(true); } - DBUG_RETURN(false); // Success + DBUG_RETURN(false); // Success } - /* Execute the query with even higher isolation than what execute_query provides to avoid that for example THD's status variables are changed */ -bool -Ndb_local_connection::execute_query_iso(MYSQL_LEX_STRING sql_text, - const uint* ignore_mysql_errors, - const Suppressor* suppressor) -{ +bool Ndb_local_connection::execute_query_iso(MYSQL_LEX_STRING sql_text, + const uint *ignore_mysql_errors, + const Suppressor *suppressor) { /* Don't allow queries to affect THD's status variables */ - struct System_status_var save_thd_status_var= m_thd->status_var; + struct System_status_var save_thd_status_var = m_thd->status_var; /* Check modified_non_trans_table is false(check if actually needed) */ assert(!m_thd->get_transaction()->has_modified_non_trans_table( - Transaction_ctx::STMT)); + Transaction_ctx::STMT)); /* Turn off binlogging */ - ulonglong save_thd_options= m_thd->variables.option_bits; + ulonglong save_thd_options = m_thd->variables.option_bits; assert(sizeof(save_thd_options) == sizeof(m_thd->variables.option_bits)); - m_thd->variables.option_bits&= ~OPTION_BIN_LOG; + m_thd->variables.option_bits &= ~OPTION_BIN_LOG; /* Increment query_id, the query_id is used when generating @@ -167,21 +145,17 @@ Ndb_local_connection::execute_query_iso(MYSQL_LEX_STRING sql_text, */ m_thd->set_query_id(next_query_id()); - bool result = execute_query(sql_text, - ignore_mysql_errors, - suppressor); + bool result = execute_query(sql_text, ignore_mysql_errors, suppressor); /* Restore THD settings */ - m_thd->variables.option_bits= save_thd_options; - m_thd->status_var= save_thd_status_var; + m_thd->variables.option_bits = save_thd_options; + m_thd->status_var = save_thd_status_var; return result; } -bool -Ndb_local_connection::truncate_table(const char* db, const char* table, - bool ignore_no_such_table) -{ +bool Ndb_local_connection::truncate_table(const char *db, const char *table, + bool ignore_no_such_table) { DBUG_ENTER("Ndb_local_connection::truncate_table"); DBUG_PRINT("enter", ("db: '%s', table: '%s'", db, table)); @@ -194,12 +168,10 @@ Ndb_local_connection::truncate_table(const char* db, const char* table, // Setup list of errors to ignore uint ignore_mysql_errors[2] = {0, 0}; - if (ignore_no_such_table) - ignore_mysql_errors[0] = ER_NO_SUCH_TABLE; + if (ignore_no_such_table) ignore_mysql_errors[0] = ER_NO_SUCH_TABLE; - DBUG_RETURN(execute_query_iso(sql_text.lex_string(), - ignore_mysql_errors, - NULL)); + DBUG_RETURN( + execute_query_iso(sql_text.lex_string(), ignore_mysql_errors, NULL)); } bool Ndb_local_connection::delete_rows(const std::string &db, @@ -218,8 +190,7 @@ bool Ndb_local_connection::delete_rows(const std::string &db, // Setup list of errors to ignore uint ignore_mysql_errors[2] = {0, 0}; - if (ignore_no_such_table) - ignore_mysql_errors[0] = ER_NO_SUCH_TABLE; + if (ignore_no_such_table) ignore_mysql_errors[0] = ER_NO_SUCH_TABLE; const LEX_STRING lex_string = {const_cast(sql_text.c_str()), sql_text.length()}; @@ -236,21 +207,17 @@ bool Ndb_local_connection::create_util_table(const std::string &table_def_sql) { DBUG_RETURN(execute_query_iso(sql_text, ignore_mysql_errors, nullptr)); } -bool Ndb_local_connection::run_acl_statement(const std::string & acl_sql) { +bool Ndb_local_connection::run_acl_statement(const std::string &acl_sql) { DBUG_ENTER("Ndb_local_connection::run_acl_statement"); - uint ignore_mysql_errors[2] = - { - ER_NO_SUCH_TABLE , - ER_NONEXISTING_TABLE_GRANT - }; + uint ignore_mysql_errors[2] = {ER_NO_SUCH_TABLE, ER_NONEXISTING_TABLE_GRANT}; ndb_log_verbose(30, "run_acl_statement: %s", acl_sql.c_str()); - MYSQL_LEX_STRING sql_text = { const_cast(acl_sql.c_str()), - acl_sql.length() }; + MYSQL_LEX_STRING sql_text = {const_cast(acl_sql.c_str()), + acl_sql.length()}; DBUG_RETURN(execute_query_iso(sql_text, ignore_mysql_errors, nullptr)); } -bool Ndb_local_connection::create_database(const std::string& database_name) { +bool Ndb_local_connection::create_database(const std::string &database_name) { DBUG_ENTER("Ndb_local_connection::create_database"); // Don't ignore any errors uint ignore_mysql_errors[1] = {0}; @@ -261,8 +228,7 @@ bool Ndb_local_connection::create_database(const std::string& database_name) { DBUG_RETURN(execute_query_iso(sql_text, ignore_mysql_errors, nullptr)); } - -bool Ndb_local_connection::drop_database(const std::string& database_name) { +bool Ndb_local_connection::drop_database(const std::string &database_name) { DBUG_ENTER("Ndb_local_connection::drop_database"); // Don't ignore any errors uint ignore_mysql_errors[1] = {0}; @@ -273,8 +239,7 @@ bool Ndb_local_connection::drop_database(const std::string& database_name) { DBUG_RETURN(execute_query_iso(sql_text, ignore_mysql_errors, nullptr)); } - -bool Ndb_local_connection::execute_database_ddl(const std::string& ddl_query) { +bool Ndb_local_connection::execute_database_ddl(const std::string &ddl_query) { DBUG_ENTER("Ndb_local_connection::execute_database_ddl"); // Don't ignore any errors uint ignore_mysql_errors[1] = {0}; @@ -284,23 +249,15 @@ bool Ndb_local_connection::execute_database_ddl(const std::string& ddl_query) { DBUG_RETURN(execute_query_iso(sql_text, ignore_mysql_errors, nullptr)); } - -bool -Ndb_local_connection::raw_run_query(const char* query, size_t query_length, - const int* suppress_errors) -{ +bool Ndb_local_connection::raw_run_query(const char *query, size_t query_length, + const int *suppress_errors) { DBUG_ENTER("Ndb_local_connection::raw_run_query"); - LEX_STRING sql_text = { const_cast(query), query_length }; + LEX_STRING sql_text = {const_cast(query), query_length}; - DBUG_RETURN(execute_query_iso(sql_text, - (const uint*)suppress_errors, - NULL)); + DBUG_RETURN(execute_query_iso(sql_text, (const uint *)suppress_errors, NULL)); } -Ed_result_set * -Ndb_local_connection::get_results() -{ +Ed_result_set *Ndb_local_connection::get_results() { return impl->connection.get_result_sets(); } - diff --git a/storage/ndb/plugin/ndb_local_connection.h b/storage/ndb/plugin/ndb_local_connection.h index f90c222b1d1a..e00cab8230e3 100644 --- a/storage/ndb/plugin/ndb_local_connection.h +++ b/storage/ndb/plugin/ndb_local_connection.h @@ -47,50 +47,48 @@ class THD; */ class Ndb_local_connection { -public: - Ndb_local_connection(THD* thd); + public: + Ndb_local_connection(THD *thd); ~Ndb_local_connection(); - bool truncate_table(const char* db, const char* table, + bool truncate_table(const char *db, const char *table, bool ignore_no_such_table); bool delete_rows(const std::string &db, const std::string &table, int ignore_no_such_table, const std::string &where); - bool create_util_table(const std::string& table_def_sql); + bool create_util_table(const std::string &table_def_sql); - bool create_database(const std::string& database_name); + bool create_database(const std::string &database_name); - bool drop_database(const std::string& database_name); + bool drop_database(const std::string &database_name); - bool execute_database_ddl(const std::string& ddl_query); + bool execute_database_ddl(const std::string &ddl_query); bool run_acl_statement(const std::string &acl_sql); /* Don't use this function for new implementation, backward compat. only */ - bool raw_run_query(const char* query, size_t query_length, - const int* suppress_errors); + bool raw_run_query(const char *query, size_t query_length, + const int *suppress_errors); -protected: + protected: bool execute_query_iso(MYSQL_LEX_STRING sql_text, - const uint* ignore_mysql_errors, - const class Suppressor* suppressor = NULL); + const uint *ignore_mysql_errors, + const class Suppressor *suppressor = NULL); - class Ed_result_set * get_results(); + class Ed_result_set *get_results(); -private: - bool execute_query(MYSQL_LEX_STRING sql_text, - const uint* ignore_mysql_errors, - const class Suppressor* suppressor = NULL); + private: + bool execute_query(MYSQL_LEX_STRING sql_text, const uint *ignore_mysql_errors, + const class Suppressor *suppressor = NULL); -protected: - THD* m_thd; + protected: + THD *m_thd; -private: + private: class Impl; std::unique_ptr impl; bool m_push_warnings; }; - #endif diff --git a/storage/ndb/plugin/ndb_log.cc b/storage/ndb/plugin/ndb_log.cc index c8e5c8107218..b73be6966a99 100644 --- a/storage/ndb/plugin/ndb_log.cc +++ b/storage/ndb/plugin/ndb_log.cc @@ -25,7 +25,7 @@ #include "storage/ndb/plugin/ndb_log.h" #include -#include // vfprintf, stderr +#include // vfprintf, stderr #include "my_dbug.h" #include "mysqld_error.h" @@ -48,10 +48,8 @@ */ -void -ndb_log_print(enum ndb_log_loglevel loglevel, - const char* prefix, const char* fmt, va_list args) -{ +void ndb_log_print(enum ndb_log_loglevel loglevel, const char *prefix, + const char *fmt, va_list args) { DBUG_ASSERT(fmt); int prio; @@ -61,16 +59,15 @@ ndb_log_print(enum ndb_log_loglevel loglevel, (void)vsnprintf(msg_buf, sizeof(msg_buf), fmt, args); // Print message to MySQL error log - switch (loglevel) - { + switch (loglevel) { case NDB_LOG_ERROR_LEVEL: - prio= ERROR_LEVEL; + prio = ERROR_LEVEL; break; case NDB_LOG_WARNING_LEVEL: - prio= WARNING_LEVEL; + prio = WARNING_LEVEL; break; case NDB_LOG_INFORMATION_LEVEL: - prio= INFORMATION_LEVEL; + prio = INFORMATION_LEVEL; break; default: // Should never happen, crash in debug @@ -85,9 +82,6 @@ ndb_log_print(enum ndb_log_loglevel loglevel, LogErr(prio, ER_NDB_LOG_ENTRY, msg_buf); } - - - /* Automatically detect any log message prefix used by the caller, these are important in order to distinguish which subsystem of ndbcluster @@ -114,39 +108,29 @@ ndb_log_print(enum ndb_log_loglevel loglevel, of Ndb_component where the prefix will be automatically set correct. */ -static -void -ndb_log_detect_prefix(const char* fmt, - const char** prefix, const char** fmt_start) -{ +static void ndb_log_detect_prefix(const char *fmt, const char **prefix, + const char **fmt_start) { // Check if string starts with "NDB :" by reading // at most 15 chars whithout colon, then a colon and space char subsystem[16], colon[2]; - if (sscanf(fmt, "NDB %15[^:]%1[:] ", subsystem, colon) == 2) - { - static - const char* allowed_prefixes[] = - { - "Binlog", // "NDB Binlog: " - "Slave" // "NDB Slave: " + if (sscanf(fmt, "NDB %15[^:]%1[:] ", subsystem, colon) == 2) { + static const char *allowed_prefixes[] = { + "Binlog", // "NDB Binlog: " + "Slave" // "NDB Slave: " }; const size_t num_allowed_prefixes = - sizeof(allowed_prefixes)/sizeof(allowed_prefixes[0]); + sizeof(allowed_prefixes) / sizeof(allowed_prefixes[0]); // Check if subsystem is in the list of allowed subsystem - for (size_t i = 0; i < num_allowed_prefixes; i++) - { - const char* allowed_prefix = allowed_prefixes[i]; + for (size_t i = 0; i < num_allowed_prefixes; i++) { + const char *allowed_prefix = allowed_prefixes[i]; - if (strncmp(subsystem, allowed_prefix, strlen(allowed_prefix)) == 0) - { + if (strncmp(subsystem, allowed_prefix, strlen(allowed_prefix)) == 0) { // String started with an allowed subsystem prefix, return // pointer to prefix and new start of format string *prefix = allowed_prefix; - *fmt_start = fmt + - 4 + /* "NDB " */ - strlen(allowed_prefix) + - 2; /* ": " */ + *fmt_start = fmt + 4 + /* "NDB " */ + strlen(allowed_prefix) + 2; /* ": " */ return; } } @@ -170,11 +154,9 @@ ndb_log_detect_prefix(const char* fmt, return; } -void -ndb_log_info(const char* fmt, ...) -{ - const char* prefix; - const char* fmt_start; +void ndb_log_info(const char *fmt, ...) { + const char *prefix; + const char *fmt_start; ndb_log_detect_prefix(fmt, &prefix, &fmt_start); va_list args; @@ -183,12 +165,9 @@ ndb_log_info(const char* fmt, ...) va_end(args); } - -void -ndb_log_warning(const char* fmt, ...) -{ - const char* prefix; - const char* fmt_start; +void ndb_log_warning(const char *fmt, ...) { + const char *prefix; + const char *fmt_start; ndb_log_detect_prefix(fmt, &prefix, &fmt_start); va_list args; @@ -197,12 +176,9 @@ ndb_log_warning(const char* fmt, ...) va_end(args); } - -void -ndb_log_error(const char* fmt, ...) -{ - const char* prefix; - const char* fmt_start; +void ndb_log_error(const char *fmt, ...) { + const char *prefix; + const char *fmt_start; ndb_log_detect_prefix(fmt, &prefix, &fmt_start); va_list args; @@ -214,22 +190,14 @@ ndb_log_error(const char* fmt, ...) // the verbose level is currently controlled by "ndb_extra_logging" extern ulong opt_ndb_extra_logging; -unsigned -ndb_log_get_verbose_level(void) -{ - return opt_ndb_extra_logging; -} - +unsigned ndb_log_get_verbose_level(void) { return opt_ndb_extra_logging; } -void -ndb_log_verbose(unsigned verbose_level, const char* fmt, ...) -{ +void ndb_log_verbose(unsigned verbose_level, const char *fmt, ...) { // Print message only if verbose level is set high enough - if (ndb_log_get_verbose_level() < verbose_level) - return; + if (ndb_log_get_verbose_level() < verbose_level) return; - const char* prefix; - const char* fmt_start; + const char *prefix; + const char *fmt_start; ndb_log_detect_prefix(fmt, &prefix, &fmt_start); va_list args; @@ -238,9 +206,7 @@ ndb_log_verbose(unsigned verbose_level, const char* fmt, ...) va_end(args); } -void -ndb_log_error_dump(const char* fmt, ...) -{ +void ndb_log_error_dump(const char *fmt, ...) { va_list args; va_start(args, fmt); // Dump the message verbatim to stderr @@ -249,8 +215,4 @@ ndb_log_error_dump(const char* fmt, ...) fprintf(stderr, "\n"); } -void -ndb_log_flush_buffered_messages() -{ - flush_error_log_messages(); -} +void ndb_log_flush_buffered_messages() { flush_error_log_messages(); } diff --git a/storage/ndb/plugin/ndb_log.h b/storage/ndb/plugin/ndb_log.h index b43a3781d139..3f698a26a8de 100644 --- a/storage/ndb/plugin/ndb_log.h +++ b/storage/ndb/plugin/ndb_log.h @@ -38,27 +38,23 @@ unsigned ndb_log_get_verbose_level(void); NOTE! Messages will always be prefixed with "NDB:" and "NDB :" if one of the prefix functions are used */ -void ndb_log_info(const char* fmt, ...) - MY_ATTRIBUTE((format(printf, 1, 2))); +void ndb_log_info(const char *fmt, ...) MY_ATTRIBUTE((format(printf, 1, 2))); -void ndb_log_warning(const char* fmt, ...) - MY_ATTRIBUTE((format(printf, 1, 2))); +void ndb_log_warning(const char *fmt, ...) MY_ATTRIBUTE((format(printf, 1, 2))); -void ndb_log_error(const char* fmt, ...) - MY_ATTRIBUTE((format(printf, 1, 2))); - -void ndb_log_verbose(unsigned verbose_level, const char* fmt, ...) - MY_ATTRIBUTE((format(printf, 2, 3))); +void ndb_log_error(const char *fmt, ...) MY_ATTRIBUTE((format(printf, 1, 2))); +void ndb_log_verbose(unsigned verbose_level, const char *fmt, ...) + MY_ATTRIBUTE((format(printf, 2, 3))); enum ndb_log_loglevel { - NDB_LOG_ERROR_LEVEL= 0, - NDB_LOG_WARNING_LEVEL= 1, - NDB_LOG_INFORMATION_LEVEL= 2 + NDB_LOG_ERROR_LEVEL = 0, + NDB_LOG_WARNING_LEVEL = 1, + NDB_LOG_INFORMATION_LEVEL = 2 }; -void ndb_log_print(enum ndb_log_loglevel loglevel, - const char* prefix, const char* fmt, va_list va_args) - MY_ATTRIBUTE((format(printf, 3, 0))); +void ndb_log_print(enum ndb_log_loglevel loglevel, const char *prefix, + const char *fmt, va_list va_args) + MY_ATTRIBUTE((format(printf, 3, 0))); /* @brief Write potentially long message to standard error. @@ -71,7 +67,7 @@ void ndb_log_print(enum ndb_log_loglevel loglevel, @param[in] fmt printf-like format string @param[in] ... Variable arguments matching format string */ -void ndb_log_error_dump(const char* fmt, ...) +void ndb_log_error_dump(const char *fmt, ...) MY_ATTRIBUTE((format(printf, 1, 2))); /* diff --git a/storage/ndb/plugin/ndb_metadata.cc b/storage/ndb/plugin/ndb_metadata.cc index b77caffa044f..c5bdbe9753b2 100644 --- a/storage/ndb/plugin/ndb_metadata.cc +++ b/storage/ndb/plugin/ndb_metadata.cc @@ -29,7 +29,7 @@ #include #include -#include "my_base.h" // For HA_SM_DISK and HA_SM_MEMORY, fix by bug27309072 +#include "my_base.h" // For HA_SM_DISK and HA_SM_MEMORY, fix by bug27309072 #include "sql/dd/dd.h" #include "sql/dd/impl/properties_impl.h" #include "sql/dd/object_id.h" @@ -42,34 +42,28 @@ #include "storage/ndb/plugin/ndb_ndbapi_util.h" // Key used for magic flag "explicit_tablespace" in table options -static const char* magic_key_explicit_tablespace = "explicit_tablespace"; +static const char *magic_key_explicit_tablespace = "explicit_tablespace"; // Key used for flag "storage" in table options -const char* key_storage = "storage"; +const char *key_storage = "storage"; // Check also partitioning properties -constexpr bool check_partitioning = false; // disabled +constexpr bool check_partitioning = false; // disabled -dd::String_type -Ndb_metadata::partition_expression() -{ +dd::String_type Ndb_metadata::partition_expression() { dd::String_type expr; if (m_ndbtab->getFragmentType() == NdbDictionary::Table::HashMapPartition && m_ndbtab->getDefaultNoPartitionsFlag() && - m_ndbtab->getFragmentCount() == 0 && - m_ndbtab->getLinearFlag() == false) - { + m_ndbtab->getFragmentCount() == 0 && m_ndbtab->getLinearFlag() == false) { // Default partitioning return expr; } - const char* separator = ""; + const char *separator = ""; const int num_columns = m_ndbtab->getNoOfColumns(); - for (int i = 0; i < num_columns; i++) - { - const NdbDictionary::Column* column = m_ndbtab->getColumn(i); - if (column->getPartitionKey()) - { + for (int i = 0; i < num_columns; i++) { + const NdbDictionary::Column *column = m_ndbtab->getColumn(i); + if (column->getPartitionKey()) { expr.append(separator); expr.append(column->getName()); separator = ";"; @@ -78,14 +72,11 @@ Ndb_metadata::partition_expression() return expr; } - -bool -Ndb_metadata::create_table_def(dd::Table* table_def) -{ +bool Ndb_metadata::create_table_def(dd::Table *table_def) { DBUG_ENTER("Ndb_metadata::create_table_def"); // name - const char* table_name = m_ndbtab->getName(); + const char *table_name = m_ndbtab->getName(); table_def->set_name(table_name); DBUG_PRINT("info", ("table_name: '%s'", table_name)); @@ -96,18 +87,15 @@ Ndb_metadata::create_table_def(dd::Table* table_def) // then get their collation from the table. Each existing column which // need a collation already have the correct value set as a property // on the column - //table_def->set_collation_id(some_collation_id); + // table_def->set_collation_id(some_collation_id); // engine table_def->set_engine("ndbcluster"); // row_format - if (m_ndbtab->getForceVarPart() == false) - { + if (m_ndbtab->getForceVarPart() == false) { table_def->set_row_format(dd::Table::RF_FIXED); - } - else - { + } else { table_def->set_row_format(dd::Table::RF_DYNAMIC); } @@ -121,53 +109,45 @@ Ndb_metadata::create_table_def(dd::Table* table_def) // table_def->set_comment(some_comment); // se_private_id, se_private_data - ndb_dd_table_set_object_id_and_version(table_def, - m_ndbtab->getObjectId(), + ndb_dd_table_set_object_id_and_version(table_def, m_ndbtab->getObjectId(), m_ndbtab->getObjectVersion()); // storage // no DD API setters or types available -> hardcode { - const NdbDictionary::Column::StorageType type = - m_ndbtab->getStorageType(); - switch (type) - { + const NdbDictionary::Column::StorageType type = m_ndbtab->getStorageType(); + switch (type) { case NdbDictionary::Column::StorageTypeDisk: - table_def->options().set(key_storage, - HA_SM_DISK); + table_def->options().set(key_storage, HA_SM_DISK); break; case NdbDictionary::Column::StorageTypeMemory: - table_def->options().set(key_storage, - HA_SM_MEMORY); - break; + table_def->options().set(key_storage, HA_SM_MEMORY); + break; case NdbDictionary::Column::StorageTypeDefault: // Not set break; } } - if (check_partitioning) - { + if (check_partitioning) { // partition_type dd::Table::enum_partition_type partition_type = dd::Table::PT_AUTO; - switch (m_ndbtab->getFragmentType()) - { - case NdbDictionary::Table::UserDefined: - DBUG_PRINT("info", ("UserDefined")); - // BY KEY - partition_type = dd::Table::PT_KEY_55; - break; - case NdbDictionary::Table::HashMapPartition: - DBUG_PRINT("info", ("HashMapPartition")); - if (m_ndbtab->getFragmentCount() != 0) - { + switch (m_ndbtab->getFragmentType()) { + case NdbDictionary::Table::UserDefined: + DBUG_PRINT("info", ("UserDefined")); + // BY KEY partition_type = dd::Table::PT_KEY_55; - } - break; - default: - // ndbcluster uses only two different FragmentType's - DBUG_ASSERT(false); - break; + break; + case NdbDictionary::Table::HashMapPartition: + DBUG_PRINT("info", ("HashMapPartition")); + if (m_ndbtab->getFragmentCount() != 0) { + partition_type = dd::Table::PT_KEY_55; + } + break; + default: + // ndbcluster uses only two different FragmentType's + DBUG_ASSERT(false); + break; } table_def->set_partition_type(partition_type); @@ -190,10 +170,7 @@ Ndb_metadata::create_table_def(dd::Table* table_def) DBUG_RETURN(true); } - -bool -Ndb_metadata::lookup_tablespace_id(THD* thd, dd::Table* table_def) -{ +bool Ndb_metadata::lookup_tablespace_id(THD *thd, dd::Table *table_def) { DBUG_ENTER("Ndb_metadata::lookup_tablespace_id"); Ndb_dd_client dd_client(thd); @@ -202,8 +179,7 @@ Ndb_metadata::lookup_tablespace_id(THD* thd, dd::Table* table_def) // tablespace_id // The id of the tablespace in DD. - if (!ndb_table_has_tablespace(m_ndbtab)) - { + if (!ndb_table_has_tablespace(m_ndbtab)) { // No tablespace DBUG_RETURN(true); } @@ -213,13 +189,11 @@ Ndb_metadata::lookup_tablespace_id(THD* thd, dd::Table* table_def) table_def->options().set(magic_key_explicit_tablespace, true); // Lookup tablespace_by name if name is available - const char* tablespace_name = ndb_table_tablespace_name(m_ndbtab); - if (tablespace_name) - { + const char *tablespace_name = ndb_table_tablespace_name(m_ndbtab); + if (tablespace_name) { DBUG_PRINT("info", ("tablespace_name: '%s'", tablespace_name)); dd::Object_id tablespace_id; - if (!dd_client.lookup_tablespace_id(tablespace_name, &tablespace_id)) - { + if (!dd_client.lookup_tablespace_id(tablespace_name, &tablespace_id)) { // Failed DBUG_RETURN(false); } @@ -231,14 +205,13 @@ Ndb_metadata::lookup_tablespace_id(THD* thd, dd::Table* table_def) // Lookup tablespace_id by object id Uint32 object_id, object_version; - if (m_ndbtab->getTablespace(&object_id, &object_version)) - { - DBUG_PRINT("info", ("tablespace_id: %u, tablespace_version: %u", - object_id, object_version)); + if (m_ndbtab->getTablespace(&object_id, &object_version)) { + DBUG_PRINT("info", ("tablespace_id: %u, tablespace_version: %u", object_id, + object_version)); // NOTE! Need to store the object id and version of tablespace // in se_private_data to be able to lookup a tablespace by object id - m_compare_tablespace_id = false; // Skip comparing tablespace_id for now + m_compare_tablespace_id = false; // Skip comparing tablespace_id for now DBUG_RETURN(true); } @@ -248,53 +221,45 @@ Ndb_metadata::lookup_tablespace_id(THD* thd, dd::Table* table_def) DBUG_RETURN(false); } - -bool Ndb_metadata::compare_table_def(const dd::Table* t1, const dd::Table* t2) -{ +bool Ndb_metadata::compare_table_def(const dd::Table *t1, const dd::Table *t2) { DBUG_ENTER("Ndb_metadata::compare_table_def"); class Compare_context { std::vector diffs; - void add_diff(const char* property, std::string a, - std::string b) - { + void add_diff(const char *property, std::string a, std::string b) { std::string diff; - diff.append("Diff in '").append(property).append("' detected, '") - .append(a).append("' != '").append(b).append("'"); + diff.append("Diff in '") + .append(property) + .append("' detected, '") + .append(a) + .append("' != '") + .append(b) + .append("'"); diffs.push_back(diff); } - public: - - void compare(const char* property, - dd::String_type a, dd::String_type b) - { - if (a == b) - return; + public: + void compare(const char *property, dd::String_type a, dd::String_type b) { + if (a == b) return; add_diff(property, a.c_str(), b.c_str()); } - void compare(const char* property, - unsigned long long a, unsigned long long b) - { - if (a == b) - return; - add_diff(property, std::to_string(a), std::to_string(b)); + void compare(const char *property, unsigned long long a, + unsigned long long b) { + if (a == b) return; + add_diff(property, std::to_string(a), std::to_string(b)); } - bool equal(){ - if (diffs.size() == 0) - return true; + bool equal() { + if (diffs.size() == 0) return true; // Print the list of diffs - for (std::string diff : diffs) - std::cout << diff << std::endl; + for (std::string diff : diffs) std::cout << diff << std::endl; return false; } } ctx; - // name // When using lower_case_table_names==2 the table will be // created using lowercase in NDB while still be original case in DD. @@ -304,13 +269,10 @@ bool Ndb_metadata::compare_table_def(const dd::Table* t1, const dd::Table* t2) // ctx.compare("collation_id", t1->collation_id(), t2->collation_id()); // tablespace_id (local) - if (m_compare_tablespace_id) - { + if (m_compare_tablespace_id) { // The id has been looked up from DD ctx.compare("tablespace_id", t1->tablespace_id(), t2->tablespace_id()); - } - else - { + } else { // It's known that table has tablespace but it could not be // looked up(yet), just check that DD definition have tablespace_id DBUG_ASSERT(t1->tablespace_id()); @@ -319,13 +281,11 @@ bool Ndb_metadata::compare_table_def(const dd::Table* t1, const dd::Table* t2) // Check magic flag "options.explicit_tablespace" { bool t1_explicit = false; - bool t2_explicit= false; - if (t1->options().exists(magic_key_explicit_tablespace)) - { + bool t2_explicit = false; + if (t1->options().exists(magic_key_explicit_tablespace)) { t1->options().get(magic_key_explicit_tablespace, &t1_explicit); } - if (t2->options().exists(magic_key_explicit_tablespace)) - { + if (t2->options().exists(magic_key_explicit_tablespace)) { t2->options().get(magic_key_explicit_tablespace, &t2_explicit); } ctx.compare("options.explicit_tablespace", t1_explicit, t2_explicit); @@ -355,12 +315,10 @@ bool Ndb_metadata::compare_table_def(const dd::Table* t1, const dd::Table* t2) { uint32 t1_storage = UINT_MAX32; uint32 t2_storage = UINT_MAX32; - if (t1->options().exists(key_storage)) - { + if (t1->options().exists(key_storage)) { t1->options().get(key_storage, &t1_storage); } - if (t2->options().exists(key_storage)) - { + if (t2->options().exists(key_storage)) { t2->options().get(key_storage, &t2_storage); } // There's a known bug in tables created in mysql versions <= 5.1.57 where @@ -369,15 +327,14 @@ bool Ndb_metadata::compare_table_def(const dd::Table* t1, const dd::Table* t2) // skip the comparison of this attribute for tables created using earlier // versions ulong t1_previous_mysql_version = UINT_MAX32; - if (!ndb_dd_table_get_previous_mysql_version(t1, t1_previous_mysql_version) - || t1_previous_mysql_version > 50157) - { + if (!ndb_dd_table_get_previous_mysql_version(t1, + t1_previous_mysql_version) || + t1_previous_mysql_version > 50157) { ctx.compare("options.storage", t1_storage, t2_storage); } } - if (check_partitioning) - { + if (check_partitioning) { // partition_type ctx.compare("partition_type", t1->partition_type(), t2->partition_type()); // default_partitioning @@ -404,99 +361,82 @@ bool Ndb_metadata::compare_table_def(const dd::Table* t1, const dd::Table* t2) t2->subpartition_expression_utf8()); } - - if (ctx.equal()) - DBUG_RETURN(true); // Tables are identical + if (ctx.equal()) DBUG_RETURN(true); // Tables are identical DBUG_RETURN(false); } - -bool Ndb_metadata::check_partition_info(const dd::Table* table_def) -{ +bool Ndb_metadata::check_partition_info(const dd::Table *table_def) { DBUG_ENTER("Ndb_metadata::check_partition_info"); // Compare the partition count of the NDB table with the partition // count of the table definition used by the caller const size_t dd_num_partitions = table_def->partitions().size(); const size_t ndb_num_partitions = m_ndbtab->getPartitionCount(); - if (ndb_num_partitions != dd_num_partitions) - { + if (ndb_num_partitions != dd_num_partitions) { std::cout << "Diff in 'partition count' detected, '" - << std::to_string(ndb_num_partitions) - << "' != '" << std::to_string(dd_num_partitions) - << "'" << std::endl; + << std::to_string(ndb_num_partitions) << "' != '" + << std::to_string(dd_num_partitions) << "'" << std::endl; DBUG_RETURN(false); } // Check if the engine of the partitions are as expected std::vector diffs; - for (size_t i = 0; i < dd_num_partitions; i++) - { + for (size_t i = 0; i < dd_num_partitions; i++) { auto partition = table_def->partitions().at(i); // engine - if (table_def->engine() != partition->engine()) - { + if (table_def->engine() != partition->engine()) { std::string diff; diff.append("Diff in 'engine' for partition '") - .append(partition->name().c_str()).append("' detected, '") - .append(table_def->engine().c_str()).append("' != '") - .append(partition->engine().c_str()).append("'"); + .append(partition->name().c_str()) + .append("' detected, '") + .append(table_def->engine().c_str()) + .append("' != '") + .append(partition->engine().c_str()) + .append("'"); diffs.push_back(diff); } } - if (diffs.size() != 0) - { + if (diffs.size() != 0) { // Print the list of diffs - for (std::string diff : diffs) - { + for (std::string diff : diffs) { std::cout << diff << std::endl; } DBUG_RETURN(false); } DBUG_RETURN(true); - } - -bool Ndb_metadata::compare(THD* thd, - const NdbDictionary::Table* m_ndbtab, - const dd::Table* table_def) -{ +bool Ndb_metadata::compare(THD *thd, const NdbDictionary::Table *m_ndbtab, + const dd::Table *table_def) { Ndb_metadata ndb_metadata(m_ndbtab); // Transform NDB table to DD table def std::unique_ptr ndb_table_def{dd::create_object()}; - if (!ndb_metadata.create_table_def(ndb_table_def.get())) - { + if (!ndb_metadata.create_table_def(ndb_table_def.get())) { DBUG_ASSERT(false); return false; } // Lookup tablespace id from DD - if (!ndb_metadata.lookup_tablespace_id(thd, ndb_table_def.get())) - { + if (!ndb_metadata.lookup_tablespace_id(thd, ndb_table_def.get())) { DBUG_ASSERT(false); return false; } // Compare the table definition generated from the NDB table // with the table definition used by caller - if (!ndb_metadata.compare_table_def(table_def, ndb_table_def.get())) - { + if (!ndb_metadata.compare_table_def(table_def, ndb_table_def.get())) { DBUG_ASSERT(false); return false; } // Check the partition information of the table definition used by caller - if (!ndb_metadata.check_partition_info(table_def)) - { + if (!ndb_metadata.check_partition_info(table_def)) { DBUG_ASSERT(false); return false; } return true; } - - diff --git a/storage/ndb/plugin/ndb_metadata.h b/storage/ndb/plugin/ndb_metadata.h index b8696f68c94a..1f258f88b77d 100644 --- a/storage/ndb/plugin/ndb_metadata.h +++ b/storage/ndb/plugin/ndb_metadata.h @@ -30,7 +30,7 @@ #include "storage/ndb/include/ndbapi/NdbDictionary.hpp" namespace dd { - class Table; +class Table; } /* @@ -38,9 +38,9 @@ namespace dd { */ class Ndb_metadata { bool m_compare_tablespace_id{true}; - const NdbDictionary::Table* m_ndbtab; + const NdbDictionary::Table *m_ndbtab; - Ndb_metadata(const NdbDictionary::Table* ndbtab) : m_ndbtab(ndbtab) {} + Ndb_metadata(const NdbDictionary::Table *ndbtab) : m_ndbtab(ndbtab) {} /* @@ -58,7 +58,7 @@ class Ndb_metadata { @return ? */ - bool create_table_def(dd::Table* table_def); + bool create_table_def(dd::Table *table_def); /* @brief lookup tablespace_id from DD @@ -66,8 +66,7 @@ class Ndb_metadata { @thd Thread context @table_def[out] DD table definition */ - bool lookup_tablespace_id(class THD* thd, dd::Table* table_def); - + bool lookup_tablespace_id(class THD *thd, dd::Table *table_def); /* @brief Compare two DD table definitions @@ -79,8 +78,7 @@ class Ndb_metadata { @note Only compares the properties which can be stored in NDB dictionary */ - bool compare_table_def(const dd::Table* t1, const dd::Table* t2); - + bool compare_table_def(const dd::Table *t1, const dd::Table *t2); /* @brief Check partition information in DD table definition @@ -90,9 +88,9 @@ class Ndb_metadata { @return true if the table's partition information is as expected */ - bool check_partition_info(const dd::Table* t1); + bool check_partition_info(const dd::Table *t1); -public: + public: /* @brief Compare the NdbApi table with the DD table definition @@ -102,9 +100,8 @@ class Ndb_metadata { @return true if the NdbApi table is identical to the DD table def. */ - static bool compare(class THD* thd, - const NdbDictionary::Table* ndbtab, - const dd::Table* table_def); + static bool compare(class THD *thd, const NdbDictionary::Table *ndbtab, + const dd::Table *table_def); }; #endif diff --git a/storage/ndb/plugin/ndb_metadata_change_monitor.cc b/storage/ndb/plugin/ndb_metadata_change_monitor.cc index 929ec47edd47..f1ecb13ddfb9 100644 --- a/storage/ndb/plugin/ndb_metadata_change_monitor.cc +++ b/storage/ndb/plugin/ndb_metadata_change_monitor.cc @@ -32,12 +32,12 @@ #include #include "my_dbug.h" -#include "mysql/psi/mysql_cond.h" // mysql_cond_t -#include "mysql/psi/mysql_mutex.h" // mysql_mutex_t -#include "sql/sql_class.h" // THD -#include "sql/table.h" // is_infoschema_db() / is_perfschema_db() -#include "storage/ndb/include/ndbapi/NdbError.hpp" // NdbError -#include "storage/ndb/plugin/ha_ndbcluster_binlog.h" // ndb_binlog_is_read_only +#include "mysql/psi/mysql_cond.h" // mysql_cond_t +#include "mysql/psi/mysql_mutex.h" // mysql_mutex_t +#include "sql/sql_class.h" // THD +#include "sql/table.h" // is_infoschema_db() / is_perfschema_db() +#include "storage/ndb/include/ndbapi/NdbError.hpp" // NdbError +#include "storage/ndb/plugin/ha_ndbcluster_binlog.h" // ndb_binlog_is_read_only #include "storage/ndb/plugin/ha_ndbcluster_connection.h" // ndbcluster_is_connected #include "storage/ndb/plugin/ndb_dd_client.h" // Ndb_dd_client #include "storage/ndb/plugin/ndb_ndbapi_util.h" // ndb_get_*_names diff --git a/storage/ndb/plugin/ndb_metadata_sync.cc b/storage/ndb/plugin/ndb_metadata_sync.cc index af8c4324c6a3..9004114de629 100644 --- a/storage/ndb/plugin/ndb_metadata_sync.cc +++ b/storage/ndb/plugin/ndb_metadata_sync.cc @@ -25,20 +25,20 @@ // Implements #include "storage/ndb/plugin/ndb_metadata_sync.h" -#include "sql/sql_class.h" // THD -#include "sql/sql_table.h" // build_table_filename -#include "storage/ndb/include/ndbapi/Ndb.hpp" // Ndb +#include "sql/sql_class.h" // THD +#include "sql/sql_table.h" // build_table_filename +#include "storage/ndb/include/ndbapi/Ndb.hpp" // Ndb #include "storage/ndb/plugin/ha_ndbcluster_binlog.h" // ndbcluster_binlog_setup_table #include "storage/ndb/plugin/ndb_dd_client.h" // Ndb_dd_client #include "storage/ndb/plugin/ndb_dd_disk_data.h" // ndb_dd_disk_data_get_object_id_and_version -#include "storage/ndb/plugin/ndb_dd_table.h" // ndb_dd_table_get_object_id_and_version -#include "storage/ndb/plugin/ndb_log.h" // ndb_log_* -#include "storage/ndb/plugin/ndb_ndbapi_util.h" // ndb_logfile_group_exists -#include "storage/ndb/plugin/ndb_schema_dist.h" // Ndb_schema_dist -#include "storage/ndb/plugin/ndb_table_guard.h" // Ndb_table_guard -#include "storage/ndb/plugin/ndb_tdc.h" // ndb_tdc_close_cached_table -#include "storage/ndb/plugin/ndb_thd.h" // get_thd_ndb -#include "storage/ndb/plugin/ndb_thd_ndb.h" // Thd_ndb +#include "storage/ndb/plugin/ndb_dd_table.h" // ndb_dd_table_get_object_id_and_version +#include "storage/ndb/plugin/ndb_log.h" // ndb_log_* +#include "storage/ndb/plugin/ndb_ndbapi_util.h" // ndb_logfile_group_exists +#include "storage/ndb/plugin/ndb_schema_dist.h" // Ndb_schema_dist +#include "storage/ndb/plugin/ndb_table_guard.h" // Ndb_table_guard +#include "storage/ndb/plugin/ndb_tdc.h" // ndb_tdc_close_cached_table +#include "storage/ndb/plugin/ndb_thd.h" // get_thd_ndb +#include "storage/ndb/plugin/ndb_thd_ndb.h" // Thd_ndb const char *Ndb_metadata_sync::object_type_str( object_detected_type type) const { diff --git a/storage/ndb/plugin/ndb_mi.cc b/storage/ndb/plugin/ndb_mi.cc index 2b111a490492..9e7f0caff4db 100644 --- a/storage/ndb/plugin/ndb_mi.cc +++ b/storage/ndb/plugin/ndb_mi.cc @@ -29,7 +29,6 @@ #include "sql/rpl_msr.h" #include "sql/rpl_rli.h" - /* Utility class for interacting with the global structure which holds information about the current multi source replication setup. @@ -41,100 +40,82 @@ So far the cluster replication only works with the default channel. */ class Multisource_info_guard { - Multisource_info_guard(const Multisource_info_guard&); - Multisource_info_guard& operator=(const Multisource_info_guard&); -public: - Multisource_info_guard() - { - channel_map.rdlock(); - } + Multisource_info_guard(const Multisource_info_guard &); + Multisource_info_guard &operator=(const Multisource_info_guard &); + + public: + Multisource_info_guard() { channel_map.rdlock(); } // Return the default channels Master_info* - Master_info* get_default_mi() const - { - Master_info* default_mi = channel_map.get_default_channel_mi(); + Master_info *get_default_mi() const { + Master_info *default_mi = channel_map.get_default_channel_mi(); // There should always be a default Master_info at this point DBUG_ASSERT(default_mi); return default_mi; } - ~Multisource_info_guard() - { + ~Multisource_info_guard() { // Unlock channel map channel_map.unlock(); } }; - -uint32 ndb_mi_get_master_server_id() -{ +uint32 ndb_mi_get_master_server_id() { Multisource_info_guard msi; - return (uint32) msi.get_default_mi()->master_id; + return (uint32)msi.get_default_mi()->master_id; } -const char* ndb_mi_get_group_master_log_name() -{ +const char *ndb_mi_get_group_master_log_name() { Multisource_info_guard msi; return msi.get_default_mi()->rli->get_group_master_log_name(); } -uint64 ndb_mi_get_group_master_log_pos() -{ +uint64 ndb_mi_get_group_master_log_pos() { Multisource_info_guard msi; - return (uint64) msi.get_default_mi()->rli->get_group_master_log_pos(); + return (uint64)msi.get_default_mi()->rli->get_group_master_log_pos(); } -uint64 ndb_mi_get_future_event_relay_log_pos() -{ +uint64 ndb_mi_get_future_event_relay_log_pos() { Multisource_info_guard msi; - return (uint64) msi.get_default_mi()->rli->get_future_event_relay_log_pos(); + return (uint64)msi.get_default_mi()->rli->get_future_event_relay_log_pos(); } -uint64 ndb_mi_get_group_relay_log_pos() -{ +uint64 ndb_mi_get_group_relay_log_pos() { Multisource_info_guard msi; - return (uint64) msi.get_default_mi()->rli->get_group_relay_log_pos(); + return (uint64)msi.get_default_mi()->rli->get_group_relay_log_pos(); } -bool ndb_mi_get_ignore_server_id(uint32 server_id) -{ +bool ndb_mi_get_ignore_server_id(uint32 server_id) { Multisource_info_guard msi; return (msi.get_default_mi()->shall_ignore_server_id(server_id) != 0); } -uint32 ndb_mi_get_slave_run_id() -{ +uint32 ndb_mi_get_slave_run_id() { Multisource_info_guard msi; return msi.get_default_mi()->rli->slave_run_id; } -ulong ndb_mi_get_relay_log_trans_retries() -{ +ulong ndb_mi_get_relay_log_trans_retries() { Multisource_info_guard msi; return msi.get_default_mi()->rli->trans_retries; } -void ndb_mi_set_relay_log_trans_retries(ulong number) -{ +void ndb_mi_set_relay_log_trans_retries(ulong number) { Multisource_info_guard msi; msi.get_default_mi()->rli->trans_retries = number; } -bool ndb_mi_get_slave_sql_running() -{ +bool ndb_mi_get_slave_sql_running() { Multisource_info_guard msi; return msi.get_default_mi()->rli->slave_running; } -ulong ndb_mi_get_slave_parallel_workers() -{ +ulong ndb_mi_get_slave_parallel_workers() { Multisource_info_guard msi; return msi.get_default_mi()->rli->opt_slave_parallel_workers; } -uint32 ndb_get_number_of_channels() -{ +uint32 ndb_get_number_of_channels() { Multisource_info_guard msi; return channel_map.get_num_instances(); } - diff --git a/storage/ndb/plugin/ndb_mi.h b/storage/ndb/plugin/ndb_mi.h index 440c8dd397f9..13bcc88569cb 100644 --- a/storage/ndb/plugin/ndb_mi.h +++ b/storage/ndb/plugin/ndb_mi.h @@ -38,7 +38,7 @@ Accessors */ uint32 ndb_mi_get_master_server_id(); -const char* ndb_mi_get_group_master_log_name(); +const char *ndb_mi_get_group_master_log_name(); uint64 ndb_mi_get_group_master_log_pos(); uint64 ndb_mi_get_future_event_relay_log_pos(); uint64 ndb_mi_get_group_relay_log_pos(); diff --git a/storage/ndb/plugin/ndb_modifiers.cc b/storage/ndb/plugin/ndb_modifiers.cc index 3c87e8d139a2..38c50d45bf90 100644 --- a/storage/ndb/plugin/ndb_modifiers.cc +++ b/storage/ndb/plugin/ndb_modifiers.cc @@ -27,22 +27,18 @@ #include #include "m_string.h" -#include "my_dbug.h" // DBUG_PRINT +#include "my_dbug.h" // DBUG_PRINT -static -bool -end_of_token(const char * str) -{ +static bool end_of_token(const char *str) { return str[0] == 0 || str[0] == ' ' || str[0] == ','; } -NDB_Modifiers::NDB_Modifiers(const char* prefix, - const NDB_Modifier modifiers[]) -{ +NDB_Modifiers::NDB_Modifiers(const char *prefix, + const NDB_Modifier modifiers[]) { m_prefix = prefix; m_prefixLen = strlen(prefix); - for (m_len = 0; modifiers[m_len].m_name != 0; m_len++) - {} + for (m_len = 0; modifiers[m_len].m_name != 0; m_len++) { + } m_modifiers = new NDB_Modifier[m_len + 1]; memcpy(m_modifiers, modifiers, (m_len + 1) * sizeof(NDB_Modifier)); m_comment_buf = NULL; @@ -52,106 +48,79 @@ NDB_Modifiers::NDB_Modifiers(const char* prefix, m_errmsg[0] = 0; } -NDB_Modifiers::~NDB_Modifiers() -{ - for (uint32 i = 0; i < m_len; i++) - { +NDB_Modifiers::~NDB_Modifiers() { + for (uint32 i = 0; i < m_len; i++) { if (m_modifiers[i].m_type == NDB_Modifier::M_STRING && - m_modifiers[i].m_val_str.str != NULL) - { - delete [] m_modifiers[i].m_val_str.str; + m_modifiers[i].m_val_str.str != NULL) { + delete[] m_modifiers[i].m_val_str.str; m_modifiers[i].m_val_str.str = NULL; } } - delete [] m_modifiers; - - delete [] m_comment_buf; + delete[] m_modifiers; + + delete[] m_comment_buf; } -int -NDB_Modifiers::parse_modifier(struct NDB_Modifier* m, - const char * str) -{ - if (m->m_found) - { - snprintf(m_errmsg, - sizeof(m_errmsg), - "%s : modifier %s specified twice", +int NDB_Modifiers::parse_modifier(struct NDB_Modifier *m, const char *str) { + if (m->m_found) { + snprintf(m_errmsg, sizeof(m_errmsg), "%s : modifier %s specified twice", m_prefix, m->m_name); return -1; } - switch(m->m_type){ - case NDB_Modifier::M_BOOL: - if (end_of_token(str)) - { - m->m_val_bool = true; - goto found; - } - if (str[0] != '=') - break; - - str++; - if (str[0] == '1' && end_of_token(str+1)) - { - m->m_val_bool = true; - goto found; - } + switch (m->m_type) { + case NDB_Modifier::M_BOOL: + if (end_of_token(str)) { + m->m_val_bool = true; + goto found; + } + if (str[0] != '=') break; - if (str[0] == '0' && end_of_token(str+1)) - { - m->m_val_bool = false; - goto found; - } - break; - case NDB_Modifier::M_STRING:{ - if (end_of_token(str)) - { - m->m_val_str.str = ""; - m->m_val_str.len = 0; - goto found; - } + str++; + if (str[0] == '1' && end_of_token(str + 1)) { + m->m_val_bool = true; + goto found; + } - if (str[0] != '=') + if (str[0] == '0' && end_of_token(str + 1)) { + m->m_val_bool = false; + goto found; + } break; + case NDB_Modifier::M_STRING: { + if (end_of_token(str)) { + m->m_val_str.str = ""; + m->m_val_str.len = 0; + goto found; + } - str++; - const char *start_str = str; - while (!end_of_token(str)) - str++; + if (str[0] != '=') break; - uint32 len = str - start_str; - char * tmp = new (std::nothrow) char[len+1]; - if (tmp == 0) - { - snprintf(m_errmsg, - sizeof(m_errmsg), - "Memory allocation error"); - return -1; + str++; + const char *start_str = str; + while (!end_of_token(str)) str++; + + uint32 len = str - start_str; + char *tmp = new (std::nothrow) char[len + 1]; + if (tmp == 0) { + snprintf(m_errmsg, sizeof(m_errmsg), "Memory allocation error"); + return -1; + } + memcpy(tmp, start_str, len); + tmp[len] = 0; // Null terminate for safe printing + m->m_val_str.len = len; + m->m_val_str.str = tmp; + goto found; } - memcpy(tmp, start_str, len); - tmp[len] = 0; // Null terminate for safe printing - m->m_val_str.len = len; - m->m_val_str.str = tmp; - goto found; - } } - { - const char * end = strpbrk(str, " ,"); - if (end) - { - snprintf(m_errmsg, - sizeof(m_errmsg), - "%s : invalid value '%.*s' for %s", + const char *end = strpbrk(str, " ,"); + if (end) { + snprintf(m_errmsg, sizeof(m_errmsg), "%s : invalid value '%.*s' for %s", m_prefix, (int)(end - str), str, m->m_name); - } - else - { - snprintf(m_errmsg, - sizeof(m_errmsg), - "%s : invalid value '%s' for %s", + } else { + snprintf(m_errmsg, sizeof(m_errmsg), "%s : invalid value '%s' for %s", m_prefix, str, m->m_name); } } @@ -161,33 +130,25 @@ NDB_Modifiers::parse_modifier(struct NDB_Modifier* m, return 0; } - -int -NDB_Modifiers::parseModifierListString(const char* string) -{ - const char* pos = string; +int NDB_Modifiers::parseModifierListString(const char *string) { + const char *pos = string; /* Attempt to extract modifiers */ - while (pos && pos[0] != 0 && pos[0] != ' ') - { - const char * end = strpbrk(pos, " ,"); // end of current modifier + while (pos && pos[0] != 0 && pos[0] != ' ') { + const char *end = strpbrk(pos, " ,"); // end of current modifier bool valid = false; /* Attempt to match modifier name */ - for (uint i = 0; i < m_len; i++) - { + for (uint i = 0; i < m_len; i++) { size_t l = m_modifiers[i].m_name_len; - if (native_strncasecmp(pos, m_modifiers[i].m_name, l) == 0) - { + if (native_strncasecmp(pos, m_modifiers[i].m_name, l) == 0) { /** * Found modifier... */ - if ((end_of_token(pos + l) || pos[l] == '=')) - { + if ((end_of_token(pos + l) || pos[l] == '=')) { pos += l; - if (parse_modifier(m_modifiers+i, pos) != 0) - { + if (parse_modifier(m_modifiers + i, pos) != 0) { DBUG_PRINT("info", ("Parse modifier failed")); return -1; } @@ -195,80 +156,57 @@ NDB_Modifiers::parseModifierListString(const char* string) valid = true; pos = end; - if (pos && pos[0] == ',') - pos++; - + if (pos && pos[0] == ',') pos++; + break; - } - else - { + } else { break; } } - } // for (modifier_name) - - if (!valid) - { - if (end) - { - snprintf(m_errmsg, - sizeof(m_errmsg), - "%s : unknown modifier: %.*s", + } // for (modifier_name) + + if (!valid) { + if (end) { + snprintf(m_errmsg, sizeof(m_errmsg), "%s : unknown modifier: %.*s", m_prefix, (int)(end - pos), pos); - } - else - { - snprintf(m_errmsg, - sizeof(m_errmsg), - "%s : unknown modifier: %s", + } else { + snprintf(m_errmsg, sizeof(m_errmsg), "%s : unknown modifier: %s", m_prefix, pos); } DBUG_PRINT("info", ("Error : %s", m_errmsg)); return -1; } - } // while pos + } // while pos - if (pos) - { + if (pos) { return pos - string; - } - else - { + } else { return strlen(string); } } - -int -NDB_Modifiers::loadComment(const char* str, - size_t len) -{ - if (m_comment_buf != NULL) - return -1; - if (len == 0) - { +int NDB_Modifiers::loadComment(const char *str, size_t len) { + if (m_comment_buf != NULL) return -1; + + if (len == 0) { return 0; } /* Load into internal string buffer */ - m_comment_buf = new (std::nothrow) char[len+1]; - if (m_comment_buf == NULL) - { - snprintf(m_errmsg, - sizeof(m_errmsg), - "Memory allocation failed"); + m_comment_buf = new (std::nothrow) char[len + 1]; + if (m_comment_buf == NULL) { + snprintf(m_errmsg, sizeof(m_errmsg), "Memory allocation failed"); return -1; } memcpy(m_comment_buf, str, len); m_comment_buf[len] = 0; uint32 inputLen = strlen(m_comment_buf); m_comment_len = inputLen; - - const char* pos = m_comment_buf; - + + const char *pos = m_comment_buf; + /* Check for comment prefix */ - if ((pos = strstr(pos, m_prefix)) == 0) - { + if ((pos = strstr(pos, m_prefix)) == 0) { /* No prefix - nothing to do */ return 0; } @@ -278,57 +216,41 @@ NDB_Modifiers::loadComment(const char* str, pos += m_prefixLen; int mod_len = parseModifierListString(pos); - if (mod_len > 0) - { + if (mod_len > 0) { m_mod_len = mod_len + m_prefixLen; return 0; - } - else - { - DBUG_PRINT("info", ("parseModifierListString (%s) returned %d", - pos, - mod_len)); + } else { + DBUG_PRINT("info", + ("parseModifierListString (%s) returned %d", pos, mod_len)); /* Parse error */ return -1; } } - -NDB_Modifier* -NDB_Modifiers::find(const char* name) const -{ - for (uint i = 0; i < m_len; i++) - { - if (native_strncasecmp(name, m_modifiers[i].m_name, m_modifiers[i].m_name_len) == 0) - { +NDB_Modifier *NDB_Modifiers::find(const char *name) const { + for (uint i = 0; i < m_len; i++) { + if (native_strncasecmp(name, m_modifiers[i].m_name, + m_modifiers[i].m_name_len) == 0) { return m_modifiers + i; } } return 0; } -const NDB_Modifier * -NDB_Modifiers::get(const char * name) const -{ +const NDB_Modifier *NDB_Modifiers::get(const char *name) const { return find(name); } -const NDB_Modifier * -NDB_Modifiers::notfound() const -{ - const NDB_Modifier * last = m_modifiers + m_len; +const NDB_Modifier *NDB_Modifiers::notfound() const { + const NDB_Modifier *last = m_modifiers + m_len; assert(last->m_found == false); - return last; // last has m_found == false + return last; // last has m_found == false } -bool -NDB_Modifiers::set(const char* name, bool value) -{ - NDB_Modifier* mod = find(name); - if (mod != NULL) - { - if (mod->m_type == NDB_Modifier::M_BOOL) - { +bool NDB_Modifiers::set(const char *name, bool value) { + NDB_Modifier *mod = find(name); + if (mod != NULL) { + if (mod->m_type == NDB_Modifier::M_BOOL) { mod->m_val_bool = value; mod->m_found = true; return true; @@ -337,115 +259,85 @@ NDB_Modifiers::set(const char* name, bool value) return false; } -bool -NDB_Modifiers::set(const char* name, const char* string) -{ - NDB_Modifier* mod = find(name); - if (mod != NULL) - { - if (mod->m_type == NDB_Modifier::M_STRING) - { +bool NDB_Modifiers::set(const char *name, const char *string) { + NDB_Modifier *mod = find(name); + if (mod != NULL) { + if (mod->m_type == NDB_Modifier::M_STRING) { uint32 len = strlen(string); - char* tmp = new (std::nothrow) char[len+1]; - if (tmp == NULL) - { + char *tmp = new (std::nothrow) char[len + 1]; + if (tmp == NULL) { return false; } memcpy(tmp, string, len); tmp[len] = 0; - - if (mod->m_found) - { + + if (mod->m_found) { /* Delete old */ - delete [] mod->m_val_str.str; + delete[] mod->m_val_str.str; } - + mod->m_val_str.str = tmp; mod->m_val_str.len = len; mod->m_found = true; - + return true; } } - return false; - + return false; } - -uint32 -NDB_Modifiers::generateModifierListString(char* buf, size_t buflen) const -{ +uint32 NDB_Modifiers::generateModifierListString(char *buf, + size_t buflen) const { size_t length = 0; bool first = true; /* if buf == NULL, we just calculate the length */ - - for (uint i = 0; i < m_len; i++) - { - const NDB_Modifier& mod = m_modifiers[i]; - if (mod.m_found) - { - if (!first) - { - if (buf) - { - snprintf(buf + length, - buflen - length, - ","); + + for (uint i = 0; i < m_len; i++) { + const NDB_Modifier &mod = m_modifiers[i]; + if (mod.m_found) { + if (!first) { + if (buf) { + snprintf(buf + length, buflen - length, ","); } - - length ++; + + length++; } first = false; - if (buf) - { - snprintf(buf + length, - buflen - length, - "%s", mod.m_name); + if (buf) { + snprintf(buf + length, buflen - length, "%s", mod.m_name); } - + length += mod.m_name_len; - - switch(mod.m_type) - { - case NDB_Modifier::M_BOOL: - { - if (buf) - { - snprintf(buf + length, - buflen - length, - "=%u", - mod.m_val_bool?1:0); + + switch (mod.m_type) { + case NDB_Modifier::M_BOOL: { + if (buf) { + snprintf(buf + length, buflen - length, "=%u", + mod.m_val_bool ? 1 : 0); + } + length += 2; + break; } - length += 2; - break; - } - case NDB_Modifier::M_STRING: - { - if (buf) - { - snprintf(buf+length, - buflen - length, - "=%s", - mod.m_val_str.str); + case NDB_Modifier::M_STRING: { + if (buf) { + snprintf(buf + length, buflen - length, "=%s", mod.m_val_str.str); + } + length += (mod.m_val_str.len + 1); + break; } - length += (mod.m_val_str.len + 1); - break; - } } } } - + return length; } -const char* -NDB_Modifiers::generateCommentString() -{ +const char *NDB_Modifiers::generateCommentString() { assert(m_comment_len >= m_mod_start_offset + m_mod_len); - const uint32 postCommentLen = m_comment_len - - (m_mod_start_offset + m_mod_len); + const uint32 postCommentLen = + m_comment_len - (m_mod_start_offset + m_mod_len); /* Calculate new comment length */ @@ -453,46 +345,35 @@ NDB_Modifiers::generateCommentString() const uint32 newModListStringLen = generateModifierListString(NULL, 0); uint32 newModLen = 0; bool extraSpace = false; - if (newModListStringLen > 0) - { + if (newModListStringLen > 0) { newModLen += m_prefixLen; newModLen += newModListStringLen; - - if ((m_mod_len == 0) && - (postCommentLen > 0)) - { + + if ((m_mod_len == 0) && (postCommentLen > 0)) { /* Extra space to separate post comment */ extraSpace = true; newModLen++; } } - - const uint32 newCommentLen = (m_comment_len - m_mod_len) + - newModLen; + + const uint32 newCommentLen = (m_comment_len - m_mod_len) + newModLen; DBUG_PRINT("info", ("getCommentString : old comment %s len %d " "start %d len %d postLen %d", - m_comment_buf, - m_comment_len, - m_mod_start_offset, - m_mod_len, - postCommentLen)); - DBUG_PRINT("info", ("getCommentString : newModListStringLen : %u newModLen : %u " - "newCommentLen : %u", - newModListStringLen, - newModLen, - newCommentLen)); - - char* newBuf = new (std::nothrow) char[newCommentLen + 1]; - if (newBuf == NULL) - { - snprintf(m_errmsg, - sizeof(m_errmsg), - "Memory allocation failed"); + m_comment_buf, m_comment_len, m_mod_start_offset, + m_mod_len, postCommentLen)); + DBUG_PRINT("info", + ("getCommentString : newModListStringLen : %u newModLen : %u " + "newCommentLen : %u", + newModListStringLen, newModLen, newCommentLen)); + + char *newBuf = new (std::nothrow) char[newCommentLen + 1]; + if (newBuf == NULL) { + snprintf(m_errmsg, sizeof(m_errmsg), "Memory allocation failed"); return NULL; } - - char* insertPos = newBuf; + + char *insertPos = newBuf; uint32 remain = newCommentLen + 1; /* Copy pre-comment if any */ @@ -502,47 +383,40 @@ NDB_Modifiers::generateCommentString() const uint32 newStartOffset = insertPos - m_comment_buf; - if (newModListStringLen > 0) - { + if (newModListStringLen > 0) { /* Add prefix */ - memcpy(insertPos, - m_prefix, - m_prefixLen); + memcpy(insertPos, m_prefix, m_prefixLen); insertPos += m_prefixLen; remain -= m_prefixLen; - + /* Add modifier list */ - generateModifierListString(insertPos, - remain); - insertPos+= newModListStringLen; + generateModifierListString(insertPos, remain); + insertPos += newModListStringLen; remain -= newModListStringLen; } - if (postCommentLen) - { - if (extraSpace) - { + if (postCommentLen) { + if (extraSpace) { /* No modifiers before, but some comment content. Add a space */ *insertPos = ' '; insertPos++; remain--; } - memcpy(insertPos, - m_comment_buf + m_mod_start_offset + m_mod_len, + memcpy(insertPos, m_comment_buf + m_mod_start_offset + m_mod_len, postCommentLen); - insertPos+= postCommentLen; - remain-= postCommentLen; + insertPos += postCommentLen; + remain -= postCommentLen; } /* Add trailing 0 */ *insertPos = 0; - + assert(strlen(newBuf) == newCommentLen); DBUG_PRINT("info", ("comment = %s", newBuf)); - - delete [] m_comment_buf; + + delete[] m_comment_buf; /* Update stored state */ m_comment_buf = newBuf; @@ -552,4 +426,3 @@ NDB_Modifiers::generateCommentString() return m_comment_buf; } - diff --git a/storage/ndb/plugin/ndb_modifiers.h b/storage/ndb/plugin/ndb_modifiers.h index d7ec36008cba..607a5fcca2e9 100644 --- a/storage/ndb/plugin/ndb_modifiers.h +++ b/storage/ndb/plugin/ndb_modifiers.h @@ -31,16 +31,15 @@ * Support for create table/column modifiers * by exploiting the comment field */ -struct NDB_Modifier -{ +struct NDB_Modifier { enum { M_BOOL, M_STRING } m_type; - const char * m_name; + const char *m_name; size_t m_name_len; bool m_found; union { bool m_val_bool; struct { - const char * str; + const char *str; size_t len; } m_val_str; }; @@ -54,73 +53,65 @@ struct NDB_Modifier * and then allowing the string to be regenerated with the * modified values */ -class NDB_Modifiers -{ -public: - NDB_Modifiers(const char* prefix, - const NDB_Modifier modifiers[]); +class NDB_Modifiers { + public: + NDB_Modifiers(const char *prefix, const NDB_Modifier modifiers[]); ~NDB_Modifiers(); /** - * Load comment, with length - * (not necessailly a null terminated string - * returns negative in case of errors, + * Load comment, with length + * (not necessailly a null terminated string + * returns negative in case of errors, * details from getErrMsg() */ - int loadComment(const char* str, - size_t len); + int loadComment(const char *str, size_t len); /** * Get modifier...returns NULL if unknown */ - const NDB_Modifier * get(const char * name) const; + const NDB_Modifier *get(const char *name) const; /** * return a modifier which has m_found == false */ - const NDB_Modifier * notfound() const; + const NDB_Modifier *notfound() const; /** * Set value of modifier */ - bool set(const char* name, bool value); - bool set(const char* name, const char* string); + bool set(const char *name, bool value); + bool set(const char *name, const char *string); /** * Generate comment string with current set modifiers - * Returns null in case of errors, + * Returns null in case of errors, * details from getErrMsg() */ - const char* generateCommentString(); + const char *generateCommentString(); /** * Get error detail string */ - const char* getErrMsg() const - { - return m_errmsg; - } -private: - const char* m_prefix; + const char *getErrMsg() const { return m_errmsg; } + + private: + const char *m_prefix; uint32 m_prefixLen; uint m_len; - struct NDB_Modifier * m_modifiers; + struct NDB_Modifier *m_modifiers; - char* m_comment_buf; + char *m_comment_buf; uint32 m_comment_len; uint32 m_mod_start_offset; uint32 m_mod_len; char m_errmsg[100]; - NDB_Modifier* find(const char* name) const; - - int parse_modifier(struct NDB_Modifier* m, const char * str); - int parseModifierListString(const char* string); - uint32 generateModifierListString(char* buf, size_t buflen) const; - + NDB_Modifier *find(const char *name) const; + int parse_modifier(struct NDB_Modifier *m, const char *str); + int parseModifierListString(const char *string); + uint32 generateModifierListString(char *buf, size_t buflen) const; }; - #endif diff --git a/storage/ndb/plugin/ndb_name_util.cc b/storage/ndb/plugin/ndb_name_util.cc index a3f6a6f5e08b..59d946f67b01 100644 --- a/storage/ndb/plugin/ndb_name_util.cc +++ b/storage/ndb/plugin/ndb_name_util.cc @@ -22,7 +22,6 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - #include "storage/ndb/plugin/ndb_name_util.h" #include "m_string.h" // strend() @@ -32,66 +31,56 @@ Set a given location from full pathname to database name. */ -void ndb_set_dbname(const char *path_name, char *dbname) -{ +void ndb_set_dbname(const char *path_name, char *dbname) { const char *end, *ptr; char tmp_buff[FN_REFLEN + 1]; - char *tmp_name= tmp_buff; + char *tmp_name = tmp_buff; /* Scan name from the end */ - ptr= strend(path_name)-1; + ptr = strend(path_name) - 1; while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { ptr--; } ptr--; - end= ptr; + end = ptr; while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { ptr--; } - uint name_len= (uint)(end - ptr); + uint name_len = (uint)(end - ptr); memcpy(tmp_name, ptr + 1, name_len); - tmp_name[name_len]= '\0'; + tmp_name[name_len] = '\0'; filename_to_tablename(tmp_name, dbname, sizeof(tmp_buff) - 1); } - - /** Set a given location from full pathname to table file. */ -void -ndb_set_tabname(const char *path_name, char * tabname) -{ +void ndb_set_tabname(const char *path_name, char *tabname) { const char *end, *ptr; char tmp_buff[FN_REFLEN + 1]; - char *tmp_name= tmp_buff; + char *tmp_name = tmp_buff; /* Scan name from the end */ - end= strend(path_name)-1; - ptr= end; + end = strend(path_name) - 1; + ptr = end; while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { ptr--; } - uint name_len= (uint)(end - ptr); + uint name_len = (uint)(end - ptr); memcpy(tmp_name, ptr + 1, end - ptr); - tmp_name[name_len]= '\0'; + tmp_name[name_len] = '\0'; filename_to_tablename(tmp_name, tabname, sizeof(tmp_buff) - 1); } -bool ndb_name_is_temp(const char *name) -{ +bool ndb_name_is_temp(const char *name) { return is_prefix(name, tmp_file_prefix) == 1; } - -bool ndb_name_is_blob_prefix(const char* name) -{ +bool ndb_name_is_blob_prefix(const char *name) { return is_prefix(name, "NDB$BLOB"); } - -bool ndb_name_is_index_stat(const char* name) -{ +bool ndb_name_is_index_stat(const char *name) { return is_prefix(name, "ndb_index_stat"); } diff --git a/storage/ndb/plugin/ndb_name_util.h b/storage/ndb/plugin/ndb_name_util.h index cb71a86b3777..5c1018bf5182 100644 --- a/storage/ndb/plugin/ndb_name_util.h +++ b/storage/ndb/plugin/ndb_name_util.h @@ -25,7 +25,6 @@ #ifndef NDB_NAME_UTIL_H #define NDB_NAME_UTIL_H - void ndb_set_dbname(const char *pathname, char *dbname); void ndb_set_tabname(const char *pathname, char *tabname); @@ -33,7 +32,7 @@ void ndb_set_tabname(const char *pathname, char *tabname); Return true if name starts with the prefix used for temporary name (normally this is "#sql") */ -bool ndb_name_is_temp(const char* name); +bool ndb_name_is_temp(const char *name); /* Return true if name starts with the prefix used for NDB blob @@ -42,7 +41,7 @@ bool ndb_name_is_temp(const char* name); NOTE! Those tables are internal but still returned in the public parts of NdbApi so they may need to be filtered in various places. */ -bool ndb_name_is_blob_prefix(const char* name); +bool ndb_name_is_blob_prefix(const char *name); /* Return true if name starts with the prefix used for Index @@ -51,6 +50,6 @@ bool ndb_name_is_blob_prefix(const char* name); NOTE! These tables are not installed in DD at this point of time so they need to be filtered out */ -bool ndb_name_is_index_stat(const char* name); +bool ndb_name_is_index_stat(const char *name); #endif diff --git a/storage/ndb/plugin/ndb_ndbapi_util.cc b/storage/ndb/plugin/ndb_ndbapi_util.cc index 0cfacec8c67c..dc9306c635e2 100644 --- a/storage/ndb/plugin/ndb_ndbapi_util.cc +++ b/storage/ndb/plugin/ndb_ndbapi_util.cc @@ -24,29 +24,26 @@ #include "storage/ndb/plugin/ndb_ndbapi_util.h" -#include // memcpy +#include // memcpy #include "my_byteorder.h" -#include "storage/ndb/plugin/ndb_name_util.h" // ndb_name_is_temp - +#include "storage/ndb/plugin/ndb_name_util.h" // ndb_name_is_temp void ndb_pack_varchar(const NdbDictionary::Table *ndbtab, unsigned column_index, - char (&buf)[512], const char *str, size_t sz) -{ + char (&buf)[512], const char *str, size_t sz) { // Get the column, cast to int to help compiler choose // the "const int" overload rather than "const char*" - const NdbDictionary::Column* col = + const NdbDictionary::Column *col = ndbtab->getColumn(static_cast(column_index)); assert(col->getLength() <= (int)sizeof(buf)); - switch (col->getArrayType()) - { + switch (col->getArrayType()) { case NdbDictionary::Column::ArrayTypeFixed: memcpy(buf, str, sz); break; case NdbDictionary::Column::ArrayTypeShortVar: - *(uchar*)buf= (uchar)sz; + *(uchar *)buf = (uchar)sz; memcpy(buf + 1, str, sz); break; case NdbDictionary::Column::ArrayTypeMediumVar: @@ -56,18 +53,15 @@ void ndb_pack_varchar(const NdbDictionary::Table *ndbtab, unsigned column_index, } } -void -ndb_pack_varchar(const NdbDictionary::Column* col, size_t offset, - const char* str, size_t str_length, char *buf) -{ +void ndb_pack_varchar(const NdbDictionary::Column *col, size_t offset, + const char *str, size_t str_length, char *buf) { buf += offset; - switch (col->getArrayType()) - { + switch (col->getArrayType()) { case NdbDictionary::Column::ArrayTypeFixed: memcpy(buf, str, str_length); break; case NdbDictionary::Column::ArrayTypeShortVar: - *(uchar*)buf= (uchar)str_length; + *(uchar *)buf = (uchar)str_length; memcpy(buf + 1, str, str_length); break; case NdbDictionary::Column::ArrayTypeMediumVar: @@ -77,10 +71,8 @@ ndb_pack_varchar(const NdbDictionary::Column* col, size_t offset, } } -void -ndb_unpack_varchar(const NdbDictionary::Column* col, size_t offset, - const char ** str, size_t * str_length, const char *buf) -{ +void ndb_unpack_varchar(const NdbDictionary::Column *col, size_t offset, + const char **str, size_t *str_length, const char *buf) { buf += offset; switch (col->getArrayType()) { @@ -101,19 +93,15 @@ ndb_unpack_varchar(const NdbDictionary::Column* col, size_t offset, } } -Uint32 -ndb_get_extra_metadata_version(const NdbDictionary::Table *ndbtab) -{ +Uint32 ndb_get_extra_metadata_version(const NdbDictionary::Table *ndbtab) { DBUG_ENTER("ndb_get_extra_metadata_version"); Uint32 version; - void* unpacked_data; + void *unpacked_data; Uint32 unpacked_length; const int get_result = - ndbtab->getExtraMetadata(version, - &unpacked_data, &unpacked_length); - if (get_result != 0) - { + ndbtab->getExtraMetadata(version, &unpacked_data, &unpacked_length); + if (get_result != 0) { // Could not get extra metadata, return 0 DBUG_RETURN(0); } @@ -121,7 +109,6 @@ ndb_get_extra_metadata_version(const NdbDictionary::Table *ndbtab) free(unpacked_data); DBUG_RETURN(version); - } bool ndb_table_get_serialized_metadata(const NdbDictionary::Table *ndbtab, @@ -144,17 +131,13 @@ bool ndb_table_get_serialized_metadata(const NdbDictionary::Table *ndbtab, return true; } -bool -ndb_table_has_blobs(const NdbDictionary::Table *ndbtab) -{ +bool ndb_table_has_blobs(const NdbDictionary::Table *ndbtab) { const int num_columns = ndbtab->getNoOfColumns(); - for (int i = 0; i < num_columns; i++) - { + for (int i = 0; i < num_columns; i++) { const NdbDictionary::Column::Type column_type = ndbtab->getColumn(i)->getType(); if (column_type == NdbDictionary::Column::Blob || - column_type == NdbDictionary::Column::Text) - { + column_type == NdbDictionary::Column::Text) { // Found at least one blob column, the table has blobs return true; } @@ -162,46 +145,32 @@ ndb_table_has_blobs(const NdbDictionary::Table *ndbtab) return false; } - -bool -ndb_table_has_hidden_pk(const NdbDictionary::Table *ndbtab) -{ - const char* hidden_pk_name = "$PK"; - if (ndbtab->getNoOfPrimaryKeys() == 1) - { - const NdbDictionary::Column* ndbcol = ndbtab->getColumn(hidden_pk_name); - if (ndbcol && - ndbcol->getType() == NdbDictionary::Column::Bigunsigned && - ndbcol->getLength() == 1 && - ndbcol->getNullable() == false && - ndbcol->getPrimaryKey() == true && - ndbcol->getAutoIncrement() == true && - ndbcol->getDefaultValue() == nullptr) - { +bool ndb_table_has_hidden_pk(const NdbDictionary::Table *ndbtab) { + const char *hidden_pk_name = "$PK"; + if (ndbtab->getNoOfPrimaryKeys() == 1) { + const NdbDictionary::Column *ndbcol = ndbtab->getColumn(hidden_pk_name); + if (ndbcol && ndbcol->getType() == NdbDictionary::Column::Bigunsigned && + ndbcol->getLength() == 1 && ndbcol->getNullable() == false && + ndbcol->getPrimaryKey() == true && ndbcol->getAutoIncrement() == true && + ndbcol->getDefaultValue() == nullptr) { return true; } } return false; } - - -bool -ndb_table_has_tablespace(const NdbDictionary::Table* ndbtab) -{ +bool ndb_table_has_tablespace(const NdbDictionary::Table *ndbtab) { // NOTE! There is a slight ambiguity in the NdbDictionary::Table. // Depending on wheter it has been retrieved from NDB or created // by user as part of defining a new table in NDB, different methods // need to be used for determining if table has tablespace - if (ndb_table_tablespace_name(ndbtab) != nullptr) - { + if (ndb_table_tablespace_name(ndbtab) != nullptr) { // Has tablespace return true; } - if (ndbtab->getTablespace()) - { + if (ndbtab->getTablespace()) { // Retrieved from NDB, the tablespace id and version // are avaliable in the table definition -> has tablespace. // NOTE! Fetching the name would require another roundtrip to NDB @@ -210,42 +179,31 @@ ndb_table_has_tablespace(const NdbDictionary::Table* ndbtab) // Neither name or id of tablespace is set -> no tablespace return false; - } - -const char* -ndb_table_tablespace_name(const NdbDictionary::Table* ndbtab) -{ +const char *ndb_table_tablespace_name(const NdbDictionary::Table *ndbtab) { // NOTE! The getTablespaceName() returns zero length string // to indicate no tablespace - const char* tablespace_name = ndbtab->getTablespaceName(); - if (strlen(tablespace_name) == 0) - { + const char *tablespace_name = ndbtab->getTablespaceName(); + if (strlen(tablespace_name) == 0) { // Just the zero length name, no tablespace name return nullptr; } return tablespace_name; } - -std::string -ndb_table_tablespace_name(NdbDictionary::Dictionary *dict, - const NdbDictionary::Table *ndbtab) -{ +std::string ndb_table_tablespace_name(NdbDictionary::Dictionary *dict, + const NdbDictionary::Table *ndbtab) { // NOTE! The getTablespaceName() returns zero length string // to indicate no tablespace std::string tablespace_name = ndbtab->getTablespaceName(); - if (tablespace_name.empty()) - { + if (tablespace_name.empty()) { // Just the zero length name, no tablespace name // Try and retrieve it using the id as a fallback mechanism Uint32 tablespace_id; - if (ndbtab->getTablespace(&tablespace_id)) - { + if (ndbtab->getTablespace(&tablespace_id)) { const NdbDictionary::Tablespace ts = dict->getTablespace(tablespace_id); - if (!ndb_dict_check_NDB_error(dict)) - { + if (!ndb_dict_check_NDB_error(dict)) { tablespace_name = ts.getName(); } } @@ -253,82 +211,64 @@ ndb_table_tablespace_name(NdbDictionary::Dictionary *dict, return tablespace_name; } - -bool -ndb_dict_check_NDB_error(NdbDictionary::Dictionary* dict) -{ +bool ndb_dict_check_NDB_error(NdbDictionary::Dictionary *dict) { return (dict->getNdbError().code != 0); } - -bool ndb_get_logfile_group_names(const NdbDictionary::Dictionary* dict, - std::unordered_set& lfg_names) -{ +bool ndb_get_logfile_group_names(const NdbDictionary::Dictionary *dict, + std::unordered_set &lfg_names) { NdbDictionary::Dictionary::List lfg_list; - if (dict->listObjects(lfg_list, NdbDictionary::Object::LogfileGroup) != 0) - { + if (dict->listObjects(lfg_list, NdbDictionary::Object::LogfileGroup) != 0) { return false; } - for (uint i = 0; i < lfg_list.count; i++) - { + for (uint i = 0; i < lfg_list.count; i++) { NdbDictionary::Dictionary::List::Element &elmt = lfg_list.elements[i]; lfg_names.insert(elmt.name); } return true; } - -bool ndb_get_tablespace_names(const NdbDictionary::Dictionary* dict, - std::unordered_set& tablespace_names) -{ +bool ndb_get_tablespace_names( + const NdbDictionary::Dictionary *dict, + std::unordered_set &tablespace_names) { NdbDictionary::Dictionary::List tablespace_list; - if (dict->listObjects(tablespace_list, NdbDictionary::Object::Tablespace) - != 0) - { + if (dict->listObjects(tablespace_list, NdbDictionary::Object::Tablespace) != + 0) { return false; } - for (uint i = 0; i < tablespace_list.count; i++) - { + for (uint i = 0; i < tablespace_list.count; i++) { NdbDictionary::Dictionary::List::Element &elmt = - tablespace_list.elements[i]; + tablespace_list.elements[i]; tablespace_names.insert(elmt.name); } return true; } - -bool ndb_get_table_names_in_schema(NdbDictionary::Dictionary* dict, - const std::string &schema_name, - std::unordered_set& table_names) -{ +bool ndb_get_table_names_in_schema( + NdbDictionary::Dictionary *dict, const std::string &schema_name, + std::unordered_set &table_names) { NdbDictionary::Dictionary::List list; - if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) - { + if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) { return false; } - for (uint i = 0; i < list.count; i++) - { + for (uint i = 0; i < list.count; i++) { NdbDictionary::Dictionary::List::Element &elmt = list.elements[i]; - if (schema_name != elmt.database) - { + if (schema_name != elmt.database) { continue; } - if (ndb_name_is_temp(elmt.name) || - ndb_name_is_blob_prefix(elmt.name) || - ndb_name_is_index_stat(elmt.name)) - { + if (ndb_name_is_temp(elmt.name) || ndb_name_is_blob_prefix(elmt.name) || + ndb_name_is_index_stat(elmt.name)) { continue; } if (elmt.state == NdbDictionary::Object::StateOnline || elmt.state == NdbDictionary::Object::ObsoleteStateBackup || - elmt.state == NdbDictionary::Object::StateBuilding) - { + elmt.state == NdbDictionary::Object::StateBuilding) { // Only return the table if they're already usable i.e. StateOnline or // StateBackup or if they're expected to be usable soon which is denoted // by StateBuilding @@ -338,56 +278,45 @@ bool ndb_get_table_names_in_schema(NdbDictionary::Dictionary* dict, return true; } - bool ndb_get_undofile_names(NdbDictionary::Dictionary *dict, const std::string &logfile_group_name, - std::vector &undofile_names) -{ + std::vector &undofile_names) { NdbDictionary::Dictionary::List undofile_list; - if (dict->listObjects(undofile_list, NdbDictionary::Object::Undofile) != 0) - { + if (dict->listObjects(undofile_list, NdbDictionary::Object::Undofile) != 0) { return false; } - for (uint i = 0; i < undofile_list.count; i++) - { + for (uint i = 0; i < undofile_list.count; i++) { NdbDictionary::Dictionary::List::Element &elmt = undofile_list.elements[i]; NdbDictionary::Undofile uf = dict->getUndofile(-1, elmt.name); - if (logfile_group_name.compare(uf.getLogfileGroup()) == 0) - { + if (logfile_group_name.compare(uf.getLogfileGroup()) == 0) { undofile_names.push_back(elmt.name); } } return true; } - bool ndb_get_datafile_names(NdbDictionary::Dictionary *dict, const std::string &tablespace_name, - std::vector &datafile_names) -{ + std::vector &datafile_names) { NdbDictionary::Dictionary::List datafile_list; - if (dict->listObjects(datafile_list, NdbDictionary::Object::Datafile) != 0) - { + if (dict->listObjects(datafile_list, NdbDictionary::Object::Datafile) != 0) { return false; } - for (uint i = 0; i < datafile_list.count; i++) - { + for (uint i = 0; i < datafile_list.count; i++) { NdbDictionary::Dictionary::List::Element &elmt = datafile_list.elements[i]; NdbDictionary::Datafile df = dict->getDatafile(-1, elmt.name); - if (tablespace_name.compare(df.getTablespace()) == 0) - { + if (tablespace_name.compare(df.getTablespace()) == 0) { datafile_names.push_back(elmt.name); } } return true; } -bool -ndb_get_database_names_in_dictionary( - NdbDictionary::Dictionary* dict, - std::unordered_set& database_names) { +bool ndb_get_database_names_in_dictionary( + NdbDictionary::Dictionary *dict, + std::unordered_set &database_names) { DBUG_ENTER("ndb_get_database_names_in_dictionary"); /* Get all the list of tables from NDB and read the database names */ @@ -395,86 +324,71 @@ ndb_get_database_names_in_dictionary( if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) DBUG_RETURN(false); - for (uint i= 0 ; i < list.count ; i++) { - NdbDictionary::Dictionary::List::Element& elmt= list.elements[i]; + for (uint i = 0; i < list.count; i++) { + NdbDictionary::Dictionary::List::Element &elmt = list.elements[i]; /* Skip the table if it is not in an expected state or if it is a temporary or blob table.*/ if ((elmt.state != NdbDictionary::Object::StateOnline && elmt.state != NdbDictionary::Object::StateBuilding) || ndb_name_is_temp(elmt.name) || ndb_name_is_blob_prefix(elmt.name)) { - DBUG_PRINT("debug", - ("Skipping table %s.%s", elmt.database, elmt.name)); + DBUG_PRINT("debug", ("Skipping table %s.%s", elmt.database, elmt.name)); continue; } - DBUG_PRINT("debug", - ("Found %s.%s in NDB", elmt.database, elmt.name)); + DBUG_PRINT("debug", ("Found %s.%s in NDB", elmt.database, elmt.name)); database_names.insert(elmt.database); } DBUG_RETURN(true); } - bool ndb_logfile_group_exists(NdbDictionary::Dictionary *dict, const std::string &logfile_group_name, - bool &exists) -{ + bool &exists) { NdbDictionary::LogfileGroup lfg = - dict->getLogfileGroup(logfile_group_name.c_str()); + dict->getLogfileGroup(logfile_group_name.c_str()); const int dict_error_code = dict->getNdbError().code; - if (dict_error_code == 0) - { + if (dict_error_code == 0) { exists = true; return true; } - if (dict_error_code == 723) - { + if (dict_error_code == 723) { exists = false; return true; } return false; } - bool ndb_tablespace_exists(NdbDictionary::Dictionary *dict, - const std::string &tablespace_name, bool &exists) -{ + const std::string &tablespace_name, bool &exists) { NdbDictionary::Tablespace tablespace = - dict->getTablespace(tablespace_name.c_str()); + dict->getTablespace(tablespace_name.c_str()); const int dict_error_code = dict->getNdbError().code; - if (dict_error_code == 0) - { + if (dict_error_code == 0) { exists = true; return true; } - if (dict_error_code == 723) - { + if (dict_error_code == 723) { exists = false; return true; } return false; } - bool ndb_table_exists(NdbDictionary::Dictionary *dict, - const std::string &db_name, - const std::string &table_name, bool &exists) -{ + const std::string &db_name, const std::string &table_name, + bool &exists) { NdbDictionary::Dictionary::List list; - if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) - { + if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) { // List objects failed return false; } - for (unsigned int i = 0; i < list.count; i++) - { + for (unsigned int i = 0; i < list.count; i++) { NdbDictionary::Dictionary::List::Element &elmt = list.elements[i]; if (db_name == elmt.database && table_name == elmt.name && (elmt.state == NdbDictionary::Object::StateOnline || elmt.state == NdbDictionary::Object::ObsoleteStateBackup || - elmt.state == NdbDictionary::Object::StateBuilding)) - { + elmt.state == NdbDictionary::Object::StateBuilding)) { exists = true; return true; } @@ -483,15 +397,12 @@ bool ndb_table_exists(NdbDictionary::Dictionary *dict, return true; } - bool ndb_get_logfile_group_id_and_version(NdbDictionary::Dictionary *dict, const std::string &logfile_group_name, - int &id, int &version) -{ + int &id, int &version) { NdbDictionary::LogfileGroup lfg = - dict->getLogfileGroup(logfile_group_name.c_str()); - if (dict->getNdbError().code != 0) - { + dict->getLogfileGroup(logfile_group_name.c_str()); + if (dict->getNdbError().code != 0) { return false; } id = lfg.getObjectId(); @@ -499,15 +410,11 @@ bool ndb_get_logfile_group_id_and_version(NdbDictionary::Dictionary *dict, return true; } - bool ndb_get_tablespace_id_and_version(NdbDictionary::Dictionary *dict, const std::string &tablespace_name, - int &id, int &version) -{ - NdbDictionary::Tablespace ts = - dict->getTablespace(tablespace_name.c_str()); - if (dict->getNdbError().code != 0) - { + int &id, int &version) { + NdbDictionary::Tablespace ts = dict->getTablespace(tablespace_name.c_str()); + if (dict->getNdbError().code != 0) { return false; } id = ts.getObjectId(); diff --git a/storage/ndb/plugin/ndb_ndbapi_util.h b/storage/ndb/plugin/ndb_ndbapi_util.h index e203b45b23cf..489c874219a5 100644 --- a/storage/ndb/plugin/ndb_ndbapi_util.h +++ b/storage/ndb/plugin/ndb_ndbapi_util.h @@ -34,14 +34,12 @@ #include "storage/ndb/include/ndbapi/NdbDictionary.hpp" #include "storage/ndb/include/ndbapi/NdbRecAttr.hpp" -union NdbValue -{ +union NdbValue { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; }; - /** * @brief ndb_pack_varchar, pack the given string using "MySQL Server varchar * format" into a buffer suitable for the given column of the NDB table @@ -53,8 +51,8 @@ union NdbValue * @note The hardcoded value 512 is the current size of FN_REFLEN, only buffers * of that size is currently supported by this function */ -void ndb_pack_varchar(const NdbDictionary::Table* ndbtab, unsigned column_index, - char (&buf)[512], const char* str, size_t str_length); +void ndb_pack_varchar(const NdbDictionary::Table *ndbtab, unsigned column_index, + char (&buf)[512], const char *str, size_t str_length); /** * @brief ndb_pack_varchar, pack the given string using "MySQL Server varchar @@ -65,8 +63,8 @@ void ndb_pack_varchar(const NdbDictionary::Table* ndbtab, unsigned column_index, * @param str_length length of string to pack * @param buf pointer to data buffer of any size */ -void ndb_pack_varchar(const NdbDictionary::Column* column, size_t offset, - const char* str, size_t str_length, char *buf); +void ndb_pack_varchar(const NdbDictionary::Column *column, size_t offset, + const char *str, size_t str_length, char *buf); /** * @brief ndb_unpack_varchar, retrieve a pointer and string length from @@ -78,9 +76,8 @@ void ndb_pack_varchar(const NdbDictionary::Column* column, size_t offset, * @param str_length string length destination (out) * @param buf pointer to filled data buffer */ -void ndb_unpack_varchar(const NdbDictionary::Column* column, size_t offset, - const char ** str, size_t * str_length, const char *buf); - +void ndb_unpack_varchar(const NdbDictionary::Column *column, size_t offset, + const char **str, size_t *str_length, const char *buf); /** @brief ndb_get_extra_metadata_version, returns the version of the @@ -88,7 +85,7 @@ void ndb_unpack_varchar(const NdbDictionary::Column* column, size_t offset, @param ndbtab @return version of extra metadata or 0 if none */ -Uint32 ndb_get_extra_metadata_version(const NdbDictionary::Table* ndbtab); +Uint32 ndb_get_extra_metadata_version(const NdbDictionary::Table *ndbtab); /** @brief returns serialized metadata attached to the @@ -107,8 +104,7 @@ bool ndb_table_get_serialized_metadata(const NdbDictionary::Table *ndbtab, * @param ndbtab * @return true if the table have blobs */ -bool ndb_table_has_blobs(const NdbDictionary::Table* ndbtab); - +bool ndb_table_has_blobs(const NdbDictionary::Table *ndbtab); /** * @brief ndb_table_has_hidden_pk, check if the NDB table has a hidden @@ -117,8 +113,7 @@ bool ndb_table_has_blobs(const NdbDictionary::Table* ndbtab); * @param ndbtab * @return true if the table has a hidden primary key */ -bool ndb_table_has_hidden_pk(const NdbDictionary::Table* ndbtab); - +bool ndb_table_has_hidden_pk(const NdbDictionary::Table *ndbtab); /** * @brief check if the NDB table has tablespace @@ -128,8 +123,7 @@ bool ndb_table_has_hidden_pk(const NdbDictionary::Table* ndbtab); * @note This is indicated either by the table having a tablespace name * or id+version of the tablespace */ -bool ndb_table_has_tablespace(const NdbDictionary::Table* ndbtab); - +bool ndb_table_has_tablespace(const NdbDictionary::Table *ndbtab); /** * @brief check if the NDB table has tablespace name indicating @@ -143,8 +137,7 @@ bool ndb_table_has_tablespace(const NdbDictionary::Table* ndbtab); * from a function returning "const char*" * */ -const char* ndb_table_tablespace_name(const NdbDictionary::Table* ndbtab); - +const char *ndb_table_tablespace_name(const NdbDictionary::Table *ndbtab); /** * @brief Return the tablespace name of an NDB table @@ -155,14 +148,12 @@ const char* ndb_table_tablespace_name(const NdbDictionary::Table* ndbtab); std::string ndb_table_tablespace_name(NdbDictionary::Dictionary *dict, const NdbDictionary::Table *ndbtab); - /** * @brief Checks if an error has occurred in a ndbapi call * @param dict NDB Dictionary * @return true if error has occurred, false if not */ -bool ndb_dict_check_NDB_error(NdbDictionary::Dictionary* dict); - +bool ndb_dict_check_NDB_error(NdbDictionary::Dictionary *dict); /** * @brief Retrieves list of logfile group names from NDB Dictionary @@ -170,9 +161,8 @@ bool ndb_dict_check_NDB_error(NdbDictionary::Dictionary* dict); * @param lfg_names [out] List of logfile group names * @return true on success, false on failure */ -bool ndb_get_logfile_group_names(const NdbDictionary::Dictionary* dict, - std::unordered_set& lfg_names); - +bool ndb_get_logfile_group_names(const NdbDictionary::Dictionary *dict, + std::unordered_set &lfg_names); /** * @brief Retrieves list of tablespace names from NDB Dictionary @@ -180,10 +170,9 @@ bool ndb_get_logfile_group_names(const NdbDictionary::Dictionary* dict, * @param tablespace_names [out] List of tablespace names * @return true on success, false on failure */ -bool -ndb_get_tablespace_names(const NdbDictionary::Dictionary* dict, - std::unordered_set& tablespace_names); - +bool ndb_get_tablespace_names( + const NdbDictionary::Dictionary *dict, + std::unordered_set &tablespace_names); /** * @brief Retrieves list of table names in the given schema from NDB Dictionary @@ -192,11 +181,9 @@ ndb_get_tablespace_names(const NdbDictionary::Dictionary* dict, * @param table_names [out] List of table names * @return true on success, false on failure */ -bool -ndb_get_table_names_in_schema(NdbDictionary::Dictionary* dict, - const std::string &schema_name, - std::unordered_set& table_names); - +bool ndb_get_table_names_in_schema( + NdbDictionary::Dictionary *dict, const std::string &schema_name, + std::unordered_set &table_names); /** * @brief Retrieves list of undofile names assigned to a logfile group from NDB @@ -210,7 +197,6 @@ bool ndb_get_undofile_names(NdbDictionary::Dictionary *dict, const std::string &logfile_group_name, std::vector &undofile_names); - /** * @brief Retrieves list of datafile names assigned to a tablespace from NDB * Dictionary @@ -219,21 +205,19 @@ bool ndb_get_undofile_names(NdbDictionary::Dictionary *dict, * @param datafile_names [out] Datafile names * @return true on success, false on failure */ -bool ndb_get_datafile_names(NdbDictionary::Dictionary* dict, +bool ndb_get_datafile_names(NdbDictionary::Dictionary *dict, const std::string &tablespace_name, std::vector &datafile_names); - /** * @brief Retrieves list of database names in NDB Dictionary * @param dict NDB Dictionary * @param database_names [out] List of database names in Dictionary * @return true on success, false on failure */ -bool -ndb_get_database_names_in_dictionary(NdbDictionary::Dictionary* dict, - std::unordered_set& database_names); - +bool ndb_get_database_names_in_dictionary( + NdbDictionary::Dictionary *dict, + std::unordered_set &database_names); /** * @brief Check if a logfile group exists in NDB Dictionary @@ -247,7 +231,6 @@ bool ndb_logfile_group_exists(NdbDictionary::Dictionary *dict, const std::string &logfile_group_name, bool &exists); - /** * @brief Check if a tablespace exists in NDB Dictionary * @param dict NDB Dictionary @@ -259,7 +242,6 @@ bool ndb_logfile_group_exists(NdbDictionary::Dictionary *dict, bool ndb_tablespace_exists(NdbDictionary::Dictionary *dict, const std::string &tablespace_name, bool &exists); - /** * @brief Check if a table exists in NDB Dictionary * @param dict NDB Dictionary @@ -270,9 +252,8 @@ bool ndb_tablespace_exists(NdbDictionary::Dictionary *dict, * @return true on success, false on failure */ bool ndb_table_exists(NdbDictionary::Dictionary *dict, - const std::string &db_name, - const std::string &table_name, bool &exists); - + const std::string &db_name, const std::string &table_name, + bool &exists); /** * @brief Retrieve the id and version of the logfile group definition in the NDB @@ -287,7 +268,6 @@ bool ndb_get_logfile_group_id_and_version(NdbDictionary::Dictionary *dict, const std::string &logfile_group_name, int &id, int &version); - /** * @brief Retrieve the id and version of the tablespace definition in the NDB * Dictionary diff --git a/storage/ndb/plugin/ndb_plugin_reference.cc b/storage/ndb/plugin/ndb_plugin_reference.cc index 3f18623c96b0..7b3bd9a87b73 100644 --- a/storage/ndb/plugin/ndb_plugin_reference.cc +++ b/storage/ndb/plugin/ndb_plugin_reference.cc @@ -31,39 +31,24 @@ // Using #include "sql/sql_plugin.h" +Ndb_plugin_reference::Ndb_plugin_reference() : plugin(nullptr) {} -Ndb_plugin_reference::Ndb_plugin_reference() : - plugin(nullptr) -{ -} - - -bool Ndb_plugin_reference::lock() -{ - const LEX_CSTRING plugin_name = { STRING_WITH_LEN("ndbcluster") }; +bool Ndb_plugin_reference::lock() { + const LEX_CSTRING plugin_name = {STRING_WITH_LEN("ndbcluster")}; // Resolve reference to "ndbcluster plugin" - plugin = plugin_lock_by_name(NULL, - plugin_name, - MYSQL_STORAGE_ENGINE_PLUGIN); - if (!plugin) - return false; + plugin = plugin_lock_by_name(NULL, plugin_name, MYSQL_STORAGE_ENGINE_PLUGIN); + if (!plugin) return false; return true; } - -st_plugin_int* -Ndb_plugin_reference::handle() const { - +st_plugin_int *Ndb_plugin_reference::handle() const { return plugin_ref_to_int(plugin); } - -Ndb_plugin_reference::~Ndb_plugin_reference() -{ - if (plugin) - { +Ndb_plugin_reference::~Ndb_plugin_reference() { + if (plugin) { // Unlock the "ndbcluster_plugin" reference plugin_unlock(NULL, plugin); } diff --git a/storage/ndb/plugin/ndb_plugin_reference.h b/storage/ndb/plugin/ndb_plugin_reference.h index 72820bd83b0b..bcca6a7b3d24 100644 --- a/storage/ndb/plugin/ndb_plugin_reference.h +++ b/storage/ndb/plugin/ndb_plugin_reference.h @@ -32,14 +32,14 @@ it's handle */ -class Ndb_plugin_reference -{ +class Ndb_plugin_reference { plugin_ref plugin; -public: + + public: Ndb_plugin_reference(); bool lock(); - st_plugin_int* handle() const; + st_plugin_int *handle() const; ~Ndb_plugin_reference(); }; diff --git a/storage/ndb/plugin/ndb_record_layout.cc b/storage/ndb/plugin/ndb_record_layout.cc index c297b78272dd..186d4505d6cd 100644 --- a/storage/ndb/plugin/ndb_record_layout.cc +++ b/storage/ndb/plugin/ndb_record_layout.cc @@ -113,11 +113,10 @@ void Ndb_record_layout::setValue(int idx, unsigned int *value, void Ndb_record_layout::packValue(int idx, std::string value, char *data) const { - ndb_pack_varchar(record_specs[idx].column, 0, - value.c_str(), value.length(), data); + ndb_pack_varchar(record_specs[idx].column, 0, value.c_str(), value.length(), + data); } - bool Ndb_record_layout::getValue(const char *data, int idx, unsigned short *value) const { DBUG_ASSERT(idx < (int)m_columns); diff --git a/storage/ndb/plugin/ndb_record_layout.h b/storage/ndb/plugin/ndb_record_layout.h index 5adb861fc083..ebfe4ef7e57d 100644 --- a/storage/ndb/plugin/ndb_record_layout.h +++ b/storage/ndb/plugin/ndb_record_layout.h @@ -30,14 +30,14 @@ #include "NdbApi.hpp" class Ndb_record_layout { -public: - NdbDictionary::RecordSpecification * const record_specs; + public: + NdbDictionary::RecordSpecification *const record_specs; unsigned int record_size; Ndb_record_layout(int number_of_columns); ~Ndb_record_layout(); - void clear(); /* Reset object, allowing it to be reused */ + void clear(); /* Reset object, allowing it to be reused */ void addColumn(const NdbDictionary::Column *); @@ -54,25 +54,22 @@ class Ndb_record_layout { /* Getters for nullable columns return false if the stored value is null. */ bool getValue(const char *data, int idx, unsigned short *value) const; - bool getValue(const char *data, int idx, - size_t *length, const char **str) const; + bool getValue(const char *data, int idx, size_t *length, + const char **str) const; bool getValue(const char *data, int idx, unsigned int *value) const; -private: + private: unsigned int m_columns, m_seq; }; -inline void -Ndb_record_layout::setNull(int idx, char * data) const { +inline void Ndb_record_layout::setNull(int idx, char *data) const { *(data + record_specs[idx].nullbit_byte_offset) |= - (char) (1 << record_specs[idx].nullbit_bit_in_byte); + (char)(1 << record_specs[idx].nullbit_bit_in_byte); } -inline void -Ndb_record_layout::setNotNull(int idx, char * data) const { - *(data +record_specs[idx].nullbit_byte_offset) &= - (char) (0xFF ^ (1 << record_specs[idx].nullbit_bit_in_byte)); +inline void Ndb_record_layout::setNotNull(int idx, char *data) const { + *(data + record_specs[idx].nullbit_byte_offset) &= + (char)(0xFF ^ (1 << record_specs[idx].nullbit_bit_in_byte)); } - #endif diff --git a/storage/ndb/plugin/ndb_repl_tab.cc b/storage/ndb/plugin/ndb_repl_tab.cc index e2af49d204d4..e2e84ef2ebe9 100644 --- a/storage/ndb/plugin/ndb_repl_tab.cc +++ b/storage/ndb/plugin/ndb_repl_tab.cc @@ -27,63 +27,48 @@ #include #include "mf_wcomp.h" -#include "sql/mysqld.h" // system_charset_info +#include "sql/mysqld.h" // system_charset_info #include "storage/ndb/plugin/ndb_share.h" #include "storage/ndb/plugin/ndb_sleep.h" #include "storage/ndb/plugin/ndb_table_guard.h" -Ndb_rep_tab_key::Ndb_rep_tab_key(const char* _db, - const char* _table_name, - uint _server_id) -{ - uint db_len= (uint) strlen(_db); - uint tabname_len = (uint) strlen(_table_name); +Ndb_rep_tab_key::Ndb_rep_tab_key(const char *_db, const char *_table_name, + uint _server_id) { + uint db_len = (uint)strlen(_db); + uint tabname_len = (uint)strlen(_table_name); assert(DB_MAXLEN < 256); /* Fits in Varchar */ assert(db_len <= DB_MAXLEN); assert(tabname_len <= TABNAME_MAXLEN); memcpy(&db[1], _db, db_len); - db[ 0 ]= db_len; + db[0] = db_len; memcpy(&table_name[1], _table_name, tabname_len); - table_name[ 0 ]= tabname_len; + table_name[0] = tabname_len; - server_id= _server_id; + server_id = _server_id; null_terminate_strings(); } -void Ndb_rep_tab_key::null_terminate_strings() -{ - assert((uint) db[0] <= DB_MAXLEN); - assert((uint) table_name[0] <= TABNAME_MAXLEN); - db[ db[0] + 1] = '\0'; - table_name[ table_name[0] + 1] = '\0'; +void Ndb_rep_tab_key::null_terminate_strings() { + assert((uint)db[0] <= DB_MAXLEN); + assert((uint)table_name[0] <= TABNAME_MAXLEN); + db[db[0] + 1] = '\0'; + table_name[table_name[0] + 1] = '\0'; } -int -Ndb_rep_tab_key::attempt_match(const char* keyptr, - const uint keylen, - const char* candidateptr, - const uint candidatelen, - const int exactmatchvalue) -{ - if (my_strnncoll(system_charset_info, - (const uchar*) keyptr, - keylen, - (const uchar*) candidateptr, - candidatelen) == 0) - { +int Ndb_rep_tab_key::attempt_match(const char *keyptr, const uint keylen, + const char *candidateptr, + const uint candidatelen, + const int exactmatchvalue) { + if (my_strnncoll(system_charset_info, (const uchar *)keyptr, keylen, + (const uchar *)candidateptr, candidatelen) == 0) { /* Exact match */ return exactmatchvalue; - } - else if (my_wildcmp(system_charset_info, - keyptr, - keyptr + keylen, - candidateptr, - candidateptr + candidatelen, - '\\', wild_one, wild_many) == 0) - { + } else if (my_wildcmp(system_charset_info, keyptr, keyptr + keylen, + candidateptr, candidateptr + candidatelen, '\\', + wild_one, wild_many) == 0) { /* Wild match */ return 0; } @@ -92,10 +77,8 @@ Ndb_rep_tab_key::attempt_match(const char* keyptr, return -1; } -int -Ndb_rep_tab_key::get_match_quality(const Ndb_rep_tab_key* key, - const Ndb_rep_tab_key* candidate_row) -{ +int Ndb_rep_tab_key::get_match_quality(const Ndb_rep_tab_key *key, + const Ndb_rep_tab_key *candidate_row) { /* 0= No match 1= Loosest match 8= Best match @@ -114,35 +97,26 @@ Ndb_rep_tab_key::get_match_quality(const Ndb_rep_tab_key* key, int quality = MIN_MATCH_VAL; int rc; - if ((rc = attempt_match(&key->db[1], - key->db[0], - &candidate_row->db[1], - candidate_row->db[0], - EXACT_MATCH_DB)) == -1) - { + if ((rc = attempt_match(&key->db[1], key->db[0], &candidate_row->db[1], + candidate_row->db[0], EXACT_MATCH_DB)) == -1) { /* No match, drop out now */ return 0; } - quality+= rc; + quality += rc; - if ((rc = attempt_match(&key->table_name[1], - key->table_name[0], + if ((rc = attempt_match(&key->table_name[1], key->table_name[0], &candidate_row->table_name[1], candidate_row->table_name[0], - EXACT_MATCH_TABLE_NAME)) == -1) - { + EXACT_MATCH_TABLE_NAME)) == -1) { /* No match, drop out now */ return 0; } - quality+= rc; + quality += rc; - if (candidate_row->server_id == key->server_id) - { + if (candidate_row->server_id == key->server_id) { /* Exact match */ quality += EXACT_MATCH_SERVER_ID; - } - else if (candidate_row->server_id != 0) - { + } else if (candidate_row->server_id != 0) { /* No match */ return 0; } @@ -150,62 +124,49 @@ Ndb_rep_tab_key::get_match_quality(const Ndb_rep_tab_key* key, return quality; } -Ndb_rep_tab_row::Ndb_rep_tab_row() - : binlog_type(0), cfs_is_null(true) -{ +Ndb_rep_tab_row::Ndb_rep_tab_row() : binlog_type(0), cfs_is_null(true) { memset(conflict_fn_spec, 0, sizeof(conflict_fn_spec)); } -const char* Ndb_rep_tab_reader::ndb_rep_db= "mysql"; -const char* Ndb_rep_tab_reader::ndb_replication_table = "ndb_replication"; -const char* Ndb_rep_tab_reader::nrt_db= "db"; -const char* Ndb_rep_tab_reader::nrt_table_name= "table_name"; -const char* Ndb_rep_tab_reader::nrt_server_id= "server_id"; -const char* Ndb_rep_tab_reader::nrt_binlog_type= "binlog_type"; -const char* Ndb_rep_tab_reader::nrt_conflict_fn= "conflict_fn"; +const char *Ndb_rep_tab_reader::ndb_rep_db = "mysql"; +const char *Ndb_rep_tab_reader::ndb_replication_table = "ndb_replication"; +const char *Ndb_rep_tab_reader::nrt_db = "db"; +const char *Ndb_rep_tab_reader::nrt_table_name = "table_name"; +const char *Ndb_rep_tab_reader::nrt_server_id = "server_id"; +const char *Ndb_rep_tab_reader::nrt_binlog_type = "binlog_type"; +const char *Ndb_rep_tab_reader::nrt_conflict_fn = "conflict_fn"; Ndb_rep_tab_reader::Ndb_rep_tab_reader() - : binlog_flags(NBT_DEFAULT), - conflict_fn_spec(NULL), - warning_msg(NULL) -{ -} + : binlog_flags(NBT_DEFAULT), conflict_fn_spec(NULL), warning_msg(NULL) {} -int Ndb_rep_tab_reader::check_schema(const NdbDictionary::Table* reptab, - const char** error_str) -{ +int Ndb_rep_tab_reader::check_schema(const NdbDictionary::Table *reptab, + const char **error_str) { DBUG_ENTER("check_schema"); - *error_str= NULL; + *error_str = NULL; - const NdbDictionary::Column - *col_db, *col_table_name, *col_server_id, *col_binlog_type, *col_conflict_fn; - if (reptab->getNoOfPrimaryKeys() != 3) - { - *error_str= "Wrong number of primary key parts, expected 3"; + const NdbDictionary::Column *col_db, *col_table_name, *col_server_id, + *col_binlog_type, *col_conflict_fn; + if (reptab->getNoOfPrimaryKeys() != 3) { + *error_str = "Wrong number of primary key parts, expected 3"; DBUG_RETURN(-2); } - col_db= reptab->getColumn(*error_str= nrt_db); - if (col_db == NULL || - !col_db->getPrimaryKey() || + col_db = reptab->getColumn(*error_str = nrt_db); + if (col_db == NULL || !col_db->getPrimaryKey() || col_db->getType() != NdbDictionary::Column::Varbinary) DBUG_RETURN(-1); - col_table_name= reptab->getColumn(*error_str= nrt_table_name); - if (col_table_name == NULL || - !col_table_name->getPrimaryKey() || + col_table_name = reptab->getColumn(*error_str = nrt_table_name); + if (col_table_name == NULL || !col_table_name->getPrimaryKey() || col_table_name->getType() != NdbDictionary::Column::Varbinary) DBUG_RETURN(-1); - col_server_id= reptab->getColumn(*error_str= nrt_server_id); - if (col_server_id == NULL || - !col_server_id->getPrimaryKey() || + col_server_id = reptab->getColumn(*error_str = nrt_server_id); + if (col_server_id == NULL || !col_server_id->getPrimaryKey() || col_server_id->getType() != NdbDictionary::Column::Unsigned) DBUG_RETURN(-1); - col_binlog_type= reptab->getColumn(*error_str= nrt_binlog_type); - if (col_binlog_type == NULL || - col_binlog_type->getPrimaryKey() || + col_binlog_type = reptab->getColumn(*error_str = nrt_binlog_type); + if (col_binlog_type == NULL || col_binlog_type->getPrimaryKey() || col_binlog_type->getType() != NdbDictionary::Column::Unsigned) DBUG_RETURN(-1); - col_conflict_fn= reptab->getColumn(*error_str= nrt_conflict_fn); - if (col_conflict_fn != NULL) - { + col_conflict_fn = reptab->getColumn(*error_str = nrt_conflict_fn); + if (col_conflict_fn != NULL) { if ((col_conflict_fn->getPrimaryKey()) || (col_conflict_fn->getType() != NdbDictionary::Column::Varbinary)) DBUG_RETURN(-1); @@ -214,70 +175,64 @@ int Ndb_rep_tab_reader::check_schema(const NdbDictionary::Table* reptab, DBUG_RETURN(0); } -int -Ndb_rep_tab_reader::scan_candidates(Ndb* ndb, - const NdbDictionary::Table* reptab, - const char* db, - const char* table_name, - uint server_id, - Ndb_rep_tab_row& best_match) -{ - uint retries= 100; - int best_match_quality= 0; +int Ndb_rep_tab_reader::scan_candidates(Ndb *ndb, + const NdbDictionary::Table *reptab, + const char *db, const char *table_name, + uint server_id, + Ndb_rep_tab_row &best_match) { + uint retries = 100; + int best_match_quality = 0; NdbError ok; NdbError ndberror; /* Loop to enable temporary error retries */ - while(true) - { + while (true) { ndberror = ok; /* reset */ - NdbTransaction *trans= ndb->startTransaction(); - if (trans == NULL) - { - ndberror= ndb->getNdbError(); + NdbTransaction *trans = ndb->startTransaction(); + if (trans == NULL) { + ndberror = ndb->getNdbError(); - if (ndberror.status == NdbError::TemporaryError) - { - if (retries--) - { + if (ndberror.status == NdbError::TemporaryError) { + if (retries--) { ndb_trans_retry_sleep(); continue; } } break; } - NdbRecAttr* ra_binlog_type= NULL; - NdbRecAttr* ra_conflict_fn_spec= NULL; + NdbRecAttr *ra_binlog_type = NULL; + NdbRecAttr *ra_conflict_fn_spec = NULL; Ndb_rep_tab_row row; bool have_conflict_fn_col = (reptab->getColumn(nrt_conflict_fn) != NULL); /* Define scan op on ndb_replication */ - NdbScanOperation* scanOp = trans->getNdbScanOperation(reptab); - if (scanOp == NULL) { ndberror= trans->getNdbError(); break; } + NdbScanOperation *scanOp = trans->getNdbScanOperation(reptab); + if (scanOp == NULL) { + ndberror = trans->getNdbError(); + break; + } if ((scanOp->readTuples(NdbScanOperation::LM_CommittedRead) != 0) || - (scanOp->getValue(nrt_db, (char*) row.key.db) == NULL) || - (scanOp->getValue(nrt_table_name, (char*) row.key.table_name) == NULL) || - (scanOp->getValue(nrt_server_id, (char*) &row.key.server_id) == NULL) || - ((ra_binlog_type = scanOp->getValue(nrt_binlog_type, (char*) &row.binlog_type)) == NULL) || + (scanOp->getValue(nrt_db, (char *)row.key.db) == NULL) || + (scanOp->getValue(nrt_table_name, (char *)row.key.table_name) == + NULL) || + (scanOp->getValue(nrt_server_id, (char *)&row.key.server_id) == NULL) || + ((ra_binlog_type = scanOp->getValue( + nrt_binlog_type, (char *)&row.binlog_type)) == NULL) || (have_conflict_fn_col && - ((ra_conflict_fn_spec= - scanOp->getValue(nrt_conflict_fn, (char*) row.conflict_fn_spec)) == NULL))) - { - ndberror= scanOp->getNdbError(); + ((ra_conflict_fn_spec = scanOp->getValue( + nrt_conflict_fn, (char *)row.conflict_fn_spec)) == NULL))) { + ndberror = scanOp->getNdbError(); break; } if (trans->execute(NdbTransaction::NoCommit, - NdbOperation::AO_IgnoreError)) - { - ndberror= trans->getNdbError(); + NdbOperation::AO_IgnoreError)) { + ndberror = trans->getNdbError(); ndb->closeTransaction(trans); - if (ndberror.status == NdbError::TemporaryError) - { - if (retries--) - { + if (ndberror.status == NdbError::TemporaryError) { + if (retries--) { ndb_trans_retry_sleep(); continue; } @@ -286,83 +241,66 @@ Ndb_rep_tab_reader::scan_candidates(Ndb* ndb, } /* Scroll through results, looking for best match */ - DBUG_PRINT("info", ("Searching ndb_replication for %s.%s %u", - db, table_name, server_id)); + DBUG_PRINT("info", ("Searching ndb_replication for %s.%s %u", db, + table_name, server_id)); bool ambiguous_match = false; Ndb_rep_tab_key searchkey(db, table_name, server_id); int scan_rc; - while ((scan_rc= scanOp->nextResult(true)) == 0) - { - if (ra_binlog_type->isNULL() == 1) - { - row.binlog_type= NBT_DEFAULT; + while ((scan_rc = scanOp->nextResult(true)) == 0) { + if (ra_binlog_type->isNULL() == 1) { + row.binlog_type = NBT_DEFAULT; } - if (ra_conflict_fn_spec) - { + if (ra_conflict_fn_spec) { row.set_conflict_fn_spec_null(ra_conflict_fn_spec->isNULL() == 1); } /* Compare row to searchkey to get quality of match */ - int match_quality= Ndb_rep_tab_key::get_match_quality(&searchkey, - &row.key); + int match_quality = + Ndb_rep_tab_key::get_match_quality(&searchkey, &row.key); #ifndef DBUG_OFF { row.null_terminate_strings(); DBUG_PRINT("info", ("Candidate : %s.%s %u : %u %s" " Match quality : %u.", - row.key.get_db(), - row.key.get_table_name(), - row.key.server_id, - row.binlog_type, - row.get_conflict_fn_spec(), - match_quality)); + row.key.get_db(), row.key.get_table_name(), + row.key.server_id, row.binlog_type, + row.get_conflict_fn_spec(), match_quality)); } #endif - if (match_quality > 0) - { - if (match_quality == best_match_quality) - { + if (match_quality > 0) { + if (match_quality == best_match_quality) { ambiguous_match = true; /* Ambiguous matches...*/ snprintf(warning_msg_buffer, sizeof(warning_msg_buffer), - "Ambiguous matches in %s.%s for %s.%s (%u)." - "Candidates : %s.%s (%u), %s.%s (%u).", - ndb_rep_db, ndb_replication_table, - db, table_name, server_id, - &best_match.key.db[1], - &best_match.key.table_name[1], - best_match.key.server_id, - &row.key.db[1], - &row.key.table_name[1], - row.key.server_id); + "Ambiguous matches in %s.%s for %s.%s (%u)." + "Candidates : %s.%s (%u), %s.%s (%u).", + ndb_rep_db, ndb_replication_table, db, table_name, server_id, + &best_match.key.db[1], &best_match.key.table_name[1], + best_match.key.server_id, &row.key.db[1], + &row.key.table_name[1], row.key.server_id); DBUG_PRINT("info", ("%s", warning_msg_buffer)); } - if (match_quality > best_match_quality) - { + if (match_quality > best_match_quality) { /* New best match */ - best_match= row; + best_match = row; best_match_quality = match_quality; ambiguous_match = false; - if (best_match_quality == Ndb_rep_tab_key::EXACT_MATCH_QUALITY) - { + if (best_match_quality == Ndb_rep_tab_key::EXACT_MATCH_QUALITY) { /* We're done */ break; } } } /* if (match_quality > 0) */ - } /* while ((scan_rc= scanOp->nextResult(true)) */ + } /* while ((scan_rc= scanOp->nextResult(true)) */ - if (scan_rc < 0) - { - ndberror= scanOp->getNdbError(); - if (ndberror.status == NdbError::TemporaryError) - { - if (retries--) - { + if (scan_rc < 0) { + ndberror = scanOp->getNdbError(); + if (ndberror.status == NdbError::TemporaryError) { + if (retries--) { ndb->closeTransaction(trans); ndb_trans_retry_sleep(); continue; @@ -372,121 +310,98 @@ Ndb_rep_tab_reader::scan_candidates(Ndb* ndb, ndb->closeTransaction(trans); - if (ambiguous_match) - { - warning_msg= warning_msg_buffer; + if (ambiguous_match) { + warning_msg = warning_msg_buffer; best_match_quality = -1; } break; } /* while(true) */ - if (ndberror.code != 0) - { + if (ndberror.code != 0) { snprintf(warning_msg_buffer, sizeof(warning_msg_buffer), - "Unable to retrieve %s.%s, logging and " - "conflict resolution may not function " - "as intended (ndberror %u)", - ndb_rep_db, ndb_replication_table, - ndberror.code); - warning_msg= warning_msg_buffer; + "Unable to retrieve %s.%s, logging and " + "conflict resolution may not function " + "as intended (ndberror %u)", + ndb_rep_db, ndb_replication_table, ndberror.code); + warning_msg = warning_msg_buffer; best_match_quality = -1; } return best_match_quality; } -int -Ndb_rep_tab_reader::lookup(Ndb* ndb, - /* Keys */ - const char* db, - const char* table_name, - uint server_id) -{ +int Ndb_rep_tab_reader::lookup(Ndb *ndb, + /* Keys */ + const char *db, const char *table_name, + uint server_id) { DBUG_ENTER("lookup"); - int error= 0; + int error = 0; NdbError ndberror; - const char *error_str= ""; + const char *error_str = ""; /* Set results to defaults */ - binlog_flags= NBT_DEFAULT; - conflict_fn_spec= NULL; - warning_msg= NULL; + binlog_flags = NBT_DEFAULT; + conflict_fn_spec = NULL; + warning_msg = NULL; ndb->setDatabaseName(ndb_rep_db); - NdbDictionary::Dictionary *dict= ndb->getDictionary(); + NdbDictionary::Dictionary *dict = ndb->getDictionary(); Ndb_table_guard ndbtab_g(dict, ndb_replication_table); - const NdbDictionary::Table *reptab= ndbtab_g.get_table(); + const NdbDictionary::Table *reptab = ndbtab_g.get_table(); - do - { - if (reptab == NULL) - { + do { + if (reptab == NULL) { if (dict->getNdbError().classification == NdbError::SchemaError || - dict->getNdbError().code == 4009) - { - DBUG_PRINT("info", ("No %s.%s table", ndb_rep_db, ndb_replication_table)); + dict->getNdbError().code == 4009) { + DBUG_PRINT("info", + ("No %s.%s table", ndb_rep_db, ndb_replication_table)); DBUG_RETURN(0); - } - else - { - error= 0; - ndberror= dict->getNdbError(); + } else { + error = 0; + ndberror = dict->getNdbError(); break; } } - if ((error = check_schema(reptab, &error_str)) != 0) - { - DBUG_PRINT("info", ("check_schema failed : %u, error_str : %s", - error, error_str)); + if ((error = check_schema(reptab, &error_str)) != 0) { + DBUG_PRINT("info", ("check_schema failed : %u, error_str : %s", error, + error_str)); break; } Ndb_rep_tab_row best_match_row; - int best_match_quality = scan_candidates(ndb, - reptab, - db, - table_name, - server_id, - best_match_row); + int best_match_quality = + scan_candidates(ndb, reptab, db, table_name, server_id, best_match_row); DBUG_PRINT("info", ("Best match at quality : %u", best_match_quality)); - if (best_match_quality == -1) - { + if (best_match_quality == -1) { /* Problem in matching, message already set */ assert(warning_msg != NULL); - error= -3; + error = -3; break; } - if (best_match_quality == 0) - { + if (best_match_quality == 0) { /* No match : Use defaults */ - } - else - { + } else { /* Have a matching row, copy out values */ /* Ensure VARCHARs are usable as strings */ best_match_row.null_terminate_strings(); - binlog_flags= (enum Ndb_binlog_type) best_match_row.binlog_type; + binlog_flags = (enum Ndb_binlog_type)best_match_row.binlog_type; - if (best_match_row.cfs_is_null) - { + if (best_match_row.cfs_is_null) { DBUG_PRINT("info", ("Conflict FN SPEC is Null")); /* No conflict fn spec */ - conflict_fn_spec= NULL; - } - else - { - const char* conflict_fn = best_match_row.get_conflict_fn_spec(); - uint len= (uint) strlen(conflict_fn); - if ((len + 1) > sizeof(conflict_fn_buffer)) - { - error= -2; - error_str= "Conflict function specification too long."; + conflict_fn_spec = NULL; + } else { + const char *conflict_fn = best_match_row.get_conflict_fn_spec(); + uint len = (uint)strlen(conflict_fn); + if ((len + 1) > sizeof(conflict_fn_buffer)) { + error = -2; + error_str = "Conflict function specification too long."; break; } memcpy(conflict_fn_buffer, conflict_fn, len); @@ -494,65 +409,53 @@ Ndb_rep_tab_reader::lookup(Ndb* ndb, conflict_fn_spec = conflict_fn_buffer; } } - } while(0); + } while (0); /* Error handling */ - if (error == 0) - { - if (ndberror.code != 0) - { + if (error == 0) { + if (ndberror.code != 0) { snprintf(warning_msg_buffer, sizeof(warning_msg_buffer), - "Unable to retrieve %s.%s, logging and " - "conflict resolution may not function " - "as intended (ndberror %u)", - ndb_rep_db, ndb_replication_table, - ndberror.code); - warning_msg= warning_msg_buffer; - error= -4; + "Unable to retrieve %s.%s, logging and " + "conflict resolution may not function " + "as intended (ndberror %u)", + ndb_rep_db, ndb_replication_table, ndberror.code); + warning_msg = warning_msg_buffer; + error = -4; } - } - else - { - switch (error) - { - case -1: - snprintf(warning_msg_buffer, sizeof(warning_msg_buffer), - "Missing or wrong type for column '%s'", error_str); - break; - case -2: - snprintf(warning_msg_buffer, sizeof(warning_msg_buffer), "%s", error_str); - break; - case -3: - /* Message already set */ - break; - default: - abort(); + } else { + switch (error) { + case -1: + snprintf(warning_msg_buffer, sizeof(warning_msg_buffer), + "Missing or wrong type for column '%s'", error_str); + break; + case -2: + snprintf(warning_msg_buffer, sizeof(warning_msg_buffer), "%s", + error_str); + break; + case -3: + /* Message already set */ + break; + default: + abort(); } - warning_msg= warning_msg_buffer; - error= 0; /* No real error, just use defaults */ + warning_msg = warning_msg_buffer; + error = 0; /* No real error, just use defaults */ } - DBUG_PRINT("info", ("Rc : %d Retrieved Binlog flags : %u and function spec : %s", - error, binlog_flags, (conflict_fn_spec != NULL ?conflict_fn_spec: - "NULL"))); + DBUG_PRINT( + "info", + ("Rc : %d Retrieved Binlog flags : %u and function spec : %s", error, + binlog_flags, (conflict_fn_spec != NULL ? conflict_fn_spec : "NULL"))); DBUG_RETURN(error); } -Uint32 -Ndb_rep_tab_reader::get_binlog_flags() const -{ - return binlog_flags; -} +Uint32 Ndb_rep_tab_reader::get_binlog_flags() const { return binlog_flags; } -const char* -Ndb_rep_tab_reader::get_conflict_fn_spec() const -{ +const char *Ndb_rep_tab_reader::get_conflict_fn_spec() const { return conflict_fn_spec; } -const char* -Ndb_rep_tab_reader::get_warning_message() const -{ +const char *Ndb_rep_tab_reader::get_warning_message() const { return warning_msg; } diff --git a/storage/ndb/plugin/ndb_repl_tab.h b/storage/ndb/plugin/ndb_repl_tab.h index 341ce8ee0d95..608bba426508 100644 --- a/storage/ndb/plugin/ndb_repl_tab.h +++ b/storage/ndb/plugin/ndb_repl_tab.h @@ -28,7 +28,7 @@ #include #include "my_inttypes.h" -#include "mysql_com.h" /* NAME_CHAR_LEN */ +#include "mysql_com.h" /* NAME_CHAR_LEN */ #include "storage/ndb/include/ndbapi/NdbApi.hpp" /* @@ -38,54 +38,42 @@ mysql.ndb_replication system table It is used when reading values from that table */ -class Ndb_rep_tab_key -{ -public: - static const uint DB_MAXLEN= NAME_CHAR_LEN - 1; - static const uint TABNAME_MAXLEN= NAME_CHAR_LEN - 1; +class Ndb_rep_tab_key { + public: + static const uint DB_MAXLEN = NAME_CHAR_LEN - 1; + static const uint TABNAME_MAXLEN = NAME_CHAR_LEN - 1; /* Char arrays in varchar format with 1 length byte and * trailing 0 */ - char db[ DB_MAXLEN + 2 ]; - char table_name[ TABNAME_MAXLEN + 2 ]; + char db[DB_MAXLEN + 2]; + char table_name[TABNAME_MAXLEN + 2]; uint server_id; - Ndb_rep_tab_key() - { + Ndb_rep_tab_key() { db[0] = 0; table_name[0] = 0; server_id = 0; } /* Constructor from normal null terminated strings */ - Ndb_rep_tab_key(const char* _db, - const char* _table_name, - uint _server_id); + Ndb_rep_tab_key(const char *_db, const char *_table_name, uint _server_id); /* Add null terminators to VARCHAR format string values */ void null_terminate_strings(); - const char* get_db() const - { - return &db[1]; - } + const char *get_db() const { return &db[1]; } - const char* get_table_name() const - { - return &table_name[1]; - } + const char *get_table_name() const { return &table_name[1]; } static const int MIN_MATCH_VAL = 1; static const int EXACT_MATCH_DB = 4; static const int EXACT_MATCH_TABLE_NAME = 2; static const int EXACT_MATCH_SERVER_ID = 1; - static const int EXACT_MATCH_QUALITY = - MIN_MATCH_VAL + - EXACT_MATCH_DB + - EXACT_MATCH_TABLE_NAME + - EXACT_MATCH_SERVER_ID; + static const int EXACT_MATCH_QUALITY = MIN_MATCH_VAL + EXACT_MATCH_DB + + EXACT_MATCH_TABLE_NAME + + EXACT_MATCH_SERVER_ID; /* This static method attempts an exact, then a wild @@ -96,10 +84,8 @@ class Ndb_rep_tab_key 0 : Wild match -1 : No match */ - static int attempt_match(const char* keyptr, - const uint keylen, - const char* candidateptr, - const uint candidatelen, + static int attempt_match(const char *keyptr, const uint keylen, + const char *candidateptr, const uint candidatelen, const int exactmatchvalue); /* This static method compares a fixed key value with @@ -109,8 +95,8 @@ class Ndb_rep_tab_key * indicating a better match quality. * An exact match returns EXACT_MATCH_QUALITY */ - static int get_match_quality(const Ndb_rep_tab_key* key, - const Ndb_rep_tab_key* candidate_row); + static int get_match_quality(const Ndb_rep_tab_key *key, + const Ndb_rep_tab_key *candidate_row); }; /* @@ -118,46 +104,37 @@ class Ndb_rep_tab_key This class represents a row in the mysql.ndb_replication table */ -class Ndb_rep_tab_row -{ -public: +class Ndb_rep_tab_row { + public: static const uint MAX_CONFLICT_FN_SPEC_LEN = 255; static const uint CONFLICT_FN_SPEC_BUF_LEN = - MAX_CONFLICT_FN_SPEC_LEN + 1; /* Trailing '\0' */ + MAX_CONFLICT_FN_SPEC_LEN + 1; /* Trailing '\0' */ Ndb_rep_tab_key key; uint binlog_type; bool cfs_is_null; /* Buffer has space for leading length byte */ - char conflict_fn_spec[ CONFLICT_FN_SPEC_BUF_LEN + 1 ]; + char conflict_fn_spec[CONFLICT_FN_SPEC_BUF_LEN + 1]; Ndb_rep_tab_row(); - void null_terminate_strings() - { + void null_terminate_strings() { key.null_terminate_strings(); - uint speclen= 0; + uint speclen = 0; speclen = conflict_fn_spec[0]; assert(speclen <= MAX_CONFLICT_FN_SPEC_LEN); conflict_fn_spec[1 + speclen] = '\0'; } - const char* get_conflict_fn_spec() - { - return &conflict_fn_spec[1]; - } + const char *get_conflict_fn_spec() { return &conflict_fn_spec[1]; } - void set_conflict_fn_spec_null(bool null) - { - if (null) - { + void set_conflict_fn_spec_null(bool null) { + if (null) { cfs_is_null = true; conflict_fn_spec[0] = 0; conflict_fn_spec[1] = 0; - } - else - { + } else { cfs_is_null = false; } } @@ -169,9 +146,8 @@ class Ndb_rep_tab_row A helper class for accessing the mysql.ndb_replication table */ -class Ndb_rep_tab_reader -{ -private: +class Ndb_rep_tab_reader { + private: static const char *ndb_rep_db; static const char *ndb_replication_table; static const char *nrt_db; @@ -181,11 +157,11 @@ class Ndb_rep_tab_reader static const char *nrt_conflict_fn; Uint32 binlog_flags; - char conflict_fn_buffer[ Ndb_rep_tab_row::CONFLICT_FN_SPEC_BUF_LEN ]; - char warning_msg_buffer[ FN_REFLEN ]; + char conflict_fn_buffer[Ndb_rep_tab_row::CONFLICT_FN_SPEC_BUF_LEN]; + char warning_msg_buffer[FN_REFLEN]; - const char* conflict_fn_spec; - const char* warning_msg; + const char *conflict_fn_spec; + const char *warning_msg; /** check_schema @@ -198,9 +174,8 @@ class Ndb_rep_tab_reader -2 if there's a more general error. Error description in error_str */ - static - int check_schema(const NdbDictionary::Table* reptab, - const char** error_str); + static int check_schema(const NdbDictionary::Table *reptab, + const char **error_str); /** scan_candidates @@ -216,13 +191,11 @@ class Ndb_rep_tab_reader if msg is set on return it contains a warning. Warnings may be produces in non error scenarios */ - int scan_candidates(Ndb* ndb, - const NdbDictionary::Table* reptab, - const char* db, - const char* table_name, - uint server_id, - Ndb_rep_tab_row& best_match); -public: + int scan_candidates(Ndb *ndb, const NdbDictionary::Table *reptab, + const char *db, const char *table_name, uint server_id, + Ndb_rep_tab_row &best_match); + + public: Ndb_rep_tab_reader(); ~Ndb_rep_tab_reader() {} @@ -241,16 +214,14 @@ class Ndb_rep_tab_reader 0 : Success. <0 : Error. */ - int lookup(Ndb* ndb, + int lookup(Ndb *ndb, /* Keys */ - const char* db, - const char* table_name, - uint server_id); + const char *db, const char *table_name, uint server_id); /* Following only valid after a call to lookup() */ Uint32 get_binlog_flags() const; - const char* get_conflict_fn_spec() const; - const char* get_warning_message() const; + const char *get_conflict_fn_spec() const; + const char *get_warning_message() const; }; #endif diff --git a/storage/ndb/plugin/ndb_require.h b/storage/ndb/plugin/ndb_require.h index 85cff15a61cd..ab6ca80b7fda 100644 --- a/storage/ndb/plugin/ndb_require.h +++ b/storage/ndb/plugin/ndb_require.h @@ -24,25 +24,21 @@ #include - namespace ndbcluster { - /** - * @brief ndbrequire() aborts the process, when the condition passed evaluates to - * false. Unlike assert(), it fails in both debug and release libraries. "ndbrequire" - * is contained inside the "ndbcluster" namespace and is supposed to be used only - * within ndbcluster code. - * - * @parma expr the condition to be evaluated. - */ - static inline - void ndbrequire(bool expr) - { - if (unlikely(!expr)) - { - std::abort(); - } +/** + * @brief ndbrequire() aborts the process, when the condition passed evaluates + * to false. Unlike assert(), it fails in both debug and release libraries. + * "ndbrequire" is contained inside the "ndbcluster" namespace and is supposed + * to be used only within ndbcluster code. + * + * @parma expr the condition to be evaluated. + */ +static inline void ndbrequire(bool expr) { + if (unlikely(!expr)) { + std::abort(); } } +} // namespace ndbcluster #endif diff --git a/storage/ndb/plugin/ndb_retry.h b/storage/ndb/plugin/ndb_retry.h index 2bc2ea410441..ac0a5073d7f0 100644 --- a/storage/ndb/plugin/ndb_retry.h +++ b/storage/ndb/plugin/ndb_retry.h @@ -27,9 +27,9 @@ #include // std::function -#include "ndbapi/NdbApi.hpp" // NdbError -#include "sql/sql_class.h" // THD -#include "storage/ndb/plugin/ndb_sleep.h" // ndb_retry_sleep +#include "ndbapi/NdbApi.hpp" // NdbError +#include "sql/sql_class.h" // THD +#include "storage/ndb/plugin/ndb_sleep.h" // ndb_retry_sleep /** A wrapper to execute the given std::function instance that uses NdbTransaction @@ -64,7 +64,7 @@ */ template bool ndb_execute_and_retry( - Ndb *ndb, const THD *thd, unsigned int retry_sleep, NdbError& last_ndb_err, + Ndb *ndb, const THD *thd, unsigned int retry_sleep, NdbError &last_ndb_err, std::function ndb_func, FunctionArgs... args) { @@ -127,7 +127,8 @@ bool ndb_trans_retry( ndb_func, FunctionArgs... args) { return ndb_execute_and_retry( - ndb, thd, 30, last_ndb_err, ndb_func, std::forward(args)...); + ndb, thd, 30, last_ndb_err, ndb_func, + std::forward(args)...); } #endif /* NDB_RETRY_H */ diff --git a/storage/ndb/plugin/ndb_schema_dist.cc b/storage/ndb/plugin/ndb_schema_dist.cc index 8b78de61e4bc..40f1729ceee1 100644 --- a/storage/ndb/plugin/ndb_schema_dist.cc +++ b/storage/ndb/plugin/ndb_schema_dist.cc @@ -30,7 +30,7 @@ #include "my_dbug.h" #include "ndbapi/ndb_cluster_connection.hpp" -#include "sql/query_options.h" // OPTION_BIN_LOG +#include "sql/query_options.h" // OPTION_BIN_LOG #include "sql/sql_thd_internal_api.h" #include "storage/ndb/plugin/ndb_anyvalue.h" #include "storage/ndb/plugin/ndb_name_util.h" @@ -46,38 +46,33 @@ // be removed when a NDB_SHARE can be acquired using db+table_name and the // key is formatted behind the curtains in NDB_SHARE without using // build_table_filename() etc. -static constexpr const char* NDB_SCHEMA_TABLE_KEY = +static constexpr const char *NDB_SCHEMA_TABLE_KEY = IF_WIN(".\\mysql\\ndb_schema", "./mysql/ndb_schema"); -bool Ndb_schema_dist::is_ready(void* requestor) { +bool Ndb_schema_dist::is_ready(void *requestor) { DBUG_TRACE; std::stringstream ss; ss << "is_ready_" << std::hex << requestor; const std::string reference = ss.str(); - NDB_SHARE* schema_share = - NDB_SHARE::acquire_reference_by_key(NDB_SCHEMA_TABLE_KEY, - reference.c_str()); - if (schema_share == nullptr) - return false; // Not ready + NDB_SHARE *schema_share = NDB_SHARE::acquire_reference_by_key( + NDB_SCHEMA_TABLE_KEY, reference.c_str()); + if (schema_share == nullptr) return false; // Not ready if (!schema_share->have_event_operation()) { NDB_SHARE::release_reference(schema_share, reference.c_str()); - return false; // Not ready + return false; // Not ready } NDB_SHARE::release_reference(schema_share, reference.c_str()); return true; } - -bool Ndb_schema_dist_client::is_schema_dist_table(const char* db, - const char* table_name) -{ +bool Ndb_schema_dist_client::is_schema_dist_table(const char *db, + const char *table_name) { if (db == Ndb_schema_dist_table::DB_NAME && - table_name == Ndb_schema_dist_table::TABLE_NAME) - { + table_name == Ndb_schema_dist_table::TABLE_NAME) { // This is the NDB table used for schema distribution return true; } @@ -85,7 +80,7 @@ bool Ndb_schema_dist_client::is_schema_dist_table(const char* db, } bool Ndb_schema_dist_client::is_schema_dist_result_table( - const char* db, const char* table_name) { + const char *db, const char *table_name) { if (db == Ndb_schema_result_table::DB_NAME && table_name == Ndb_schema_result_table::TABLE_NAME) { // This is the NDB table used for schema distribution results @@ -101,29 +96,24 @@ bool Ndb_schema_dist_client::is_schema_dist_result_table( */ static std::mutex acl_change_mutex; -void Ndb_schema_dist_client::acquire_acl_lock() -{ +void Ndb_schema_dist_client::acquire_acl_lock() { acl_change_mutex.lock(); m_holding_acl_mutex = true; } -Ndb_schema_dist_client::Ndb_schema_dist_client(THD* thd) +Ndb_schema_dist_client::Ndb_schema_dist_client(THD *thd) : m_thd(thd), m_thd_ndb(get_thd_ndb(thd)), m_holding_acl_mutex(false) {} -bool Ndb_schema_dist_client::prepare(const char* db, const char* tabname) -{ +bool Ndb_schema_dist_client::prepare(const char *db, const char *tabname) { DBUG_ENTER("Ndb_schema_dist_client::prepare"); // Acquire reference on mysql.ndb_schema // NOTE! Using fixed "reference", assuming only one Ndb_schema_dist_client // is started at a time since it requires GSL. This may have to be revisited - m_share = - NDB_SHARE::acquire_reference_by_key(NDB_SCHEMA_TABLE_KEY, - "ndb_schema_dist_client"); - if (m_share == nullptr || - m_share->have_event_operation() == false || - DBUG_EVALUATE_IF("ndb_schema_dist_not_ready_early", true, false)) - { + m_share = NDB_SHARE::acquire_reference_by_key(NDB_SCHEMA_TABLE_KEY, + "ndb_schema_dist_client"); + if (m_share == nullptr || m_share->have_event_operation() == false || + DBUG_EVALUATE_IF("ndb_schema_dist_not_ready_early", true, false)) { // The NDB_SHARE for mysql.ndb_schema hasn't been created or not setup // yet -> schema distribution is not ready m_thd_ndb->push_warning("Schema distribution is not ready"); @@ -158,14 +148,13 @@ bool Ndb_schema_dist_client::prepare(const char* db, const char* tabname) DBUG_RETURN(true); } -bool Ndb_schema_dist_client::prepare_rename(const char* db, const char* tabname, - const char* new_db, - const char* new_tabname) { +bool Ndb_schema_dist_client::prepare_rename(const char *db, const char *tabname, + const char *new_db, + const char *new_tabname) { DBUG_ENTER("Ndb_schema_dist_client::prepare_rename"); // Normal prepare first - if (!prepare(db, tabname)) - { + if (!prepare(db, tabname)) { DBUG_RETURN(false); } @@ -180,7 +169,7 @@ bool Ndb_schema_dist_client::prepare_rename(const char* db, const char* tabname, bool Ndb_schema_dist_client::prepare_acl_change(uint node_id) { /* Acquire the ACL change mutex. It will be released by the destructor. - */ + */ acquire_acl_lock(); /* @@ -200,7 +189,7 @@ bool Ndb_schema_dist_client::prepare_acl_change(uint node_id) { } bool Ndb_schema_dist_client::check_identifier_limits( - std::string& invalid_identifier) { + std::string &invalid_identifier) { DBUG_ENTER("Ndb_schema_dist_client::check_identifier_limits"); Ndb_schema_dist_table schema_dist_table(m_thd_ndb); @@ -211,8 +200,7 @@ bool Ndb_schema_dist_client::check_identifier_limits( // Check that identifiers does not exceed the limits imposed // by the ndb_schema table layout - for (auto key: m_prepared_keys.keys()) - { + for (auto key : m_prepared_keys.keys()) { // db if (!schema_dist_table.check_column_identifier_limit( Ndb_schema_dist_table::COL_DB, key.first)) { @@ -229,13 +217,13 @@ bool Ndb_schema_dist_client::check_identifier_limits( DBUG_RETURN(true); } -void Ndb_schema_dist_client::Prepared_keys::add_key(const char* db, - const char* tabname) { +void Ndb_schema_dist_client::Prepared_keys::add_key(const char *db, + const char *tabname) { m_keys.emplace_back(db, tabname); } bool Ndb_schema_dist_client::Prepared_keys::check_key( - const char* db, const char* tabname) const { + const char *db, const char *tabname) const { for (auto key : m_keys) { if (key.first == db && key.second == tabname) { return true; // OK, key has been prepared @@ -244,18 +232,15 @@ bool Ndb_schema_dist_client::Prepared_keys::check_key( return false; } -extern void update_slave_api_stats(const Ndb*); +extern void update_slave_api_stats(const Ndb *); -Ndb_schema_dist_client::~Ndb_schema_dist_client() -{ - if (m_share) - { +Ndb_schema_dist_client::~Ndb_schema_dist_client() { + if (m_share) { // Release the reference to mysql.ndb_schema table NDB_SHARE::release_reference(m_share, "ndb_schema_dist_client"); } - if (m_thd_ndb->is_slave_thread()) - { + if (m_thd_ndb->is_slave_thread()) { // Copy-out slave thread statistics // NOTE! This is just a "convenient place" to call this // function, it could be moved to "end of statement"(if there @@ -263,8 +248,7 @@ Ndb_schema_dist_client::~Ndb_schema_dist_client() update_slave_api_stats(m_thd_ndb->ndb); } - if(m_holding_acl_mutex) - { + if (m_holding_acl_mutex) { acl_change_mutex.unlock(); } } @@ -296,9 +280,9 @@ uint32 Ndb_schema_dist_client::unique_version() const { return ver; } -bool Ndb_schema_dist_client::log_schema_op(const char* query, - size_t query_length, const char* db, - const char* table_name, uint32 id, +bool Ndb_schema_dist_client::log_schema_op(const char *query, + size_t query_length, const char *db, + const char *table_name, uint32 id, uint32 version, SCHEMA_OP_TYPE type, bool log_query_on_participant) { DBUG_ENTER("Ndb_schema_dist_client::log_schema_op"); @@ -307,8 +291,7 @@ bool Ndb_schema_dist_client::log_schema_op(const char* query, DBUG_ASSERT(m_thd_ndb); // Never allow temporary names when communicating with participant - if (ndb_name_is_temp(db) || ndb_name_is_temp(table_name)) - { + if (ndb_name_is_temp(db) || ndb_name_is_temp(table_name)) { DBUG_ASSERT(false); DBUG_RETURN(false); } @@ -318,8 +301,7 @@ bool Ndb_schema_dist_client::log_schema_op(const char* query, ndbcluster::ndbrequire(m_share); // Check that prepared keys match - if (!m_prepared_keys.check_key(db, table_name)) - { + if (!m_prepared_keys.check_key(db, table_name)) { m_thd_ndb->push_warning("INTERNAL ERROR: prepared keys didn't match"); DBUG_ASSERT(false); // Catch in debug DBUG_RETURN(false); @@ -328,16 +310,15 @@ bool Ndb_schema_dist_client::log_schema_op(const char* query, // Don't distribute if thread has turned off schema distribution if (m_thd_ndb->check_option(Thd_ndb::NO_LOG_SCHEMA_OP)) { DBUG_PRINT("info", ("NO_LOG_SCHEMA_OP set - > skip schema distribution")); - DBUG_RETURN(true); // Ok, skipped + DBUG_RETURN(true); // Ok, skipped } // Verify identifier limits, this should already have been caught earlier { std::string invalid_identifier; - if (!check_identifier_limits(invalid_identifier)) - { + if (!check_identifier_limits(invalid_identifier)) { m_thd_ndb->push_warning("INTERNAL ERROR: identifier limits exceeded"); - DBUG_ASSERT(false); // Catch in debug + DBUG_ASSERT(false); // Catch in debug DBUG_RETURN(false); } } @@ -345,9 +326,9 @@ bool Ndb_schema_dist_client::log_schema_op(const char* query, // Calculate anyvalue const Uint32 anyvalue = calculate_anyvalue(log_query_on_participant); - const int result = log_schema_op_impl( - m_thd_ndb->ndb, query, static_cast(query_length), db, table_name, id, - version, type, anyvalue); + const int result = + log_schema_op_impl(m_thd_ndb->ndb, query, static_cast(query_length), + db, table_name, id, version, type, anyvalue); if (result != 0) { // Schema distribution failed m_thd_ndb->push_warning("Schema distribution failed!"); @@ -356,13 +337,12 @@ bool Ndb_schema_dist_client::log_schema_op(const char* query, DBUG_RETURN(true); } -bool Ndb_schema_dist_client::create_table(const char* db, - const char* table_name, int id, +bool Ndb_schema_dist_client::create_table(const char *db, + const char *table_name, int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::create_table"); - if (is_schema_dist_table(db, table_name)) - { + if (is_schema_dist_table(db, table_name)) { // Create of the schema distribution table is not distributed. Instead, // every MySQL Server have special handling to create it if not // exists and then open it as first step of connecting to the cluster @@ -373,17 +353,15 @@ bool Ndb_schema_dist_client::create_table(const char* db, db, table_name, id, version, SOT_CREATE_TABLE)); } -bool Ndb_schema_dist_client::truncate_table(const char* db, - const char* table_name, int id, +bool Ndb_schema_dist_client::truncate_table(const char *db, + const char *table_name, int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::truncate_table"); DBUG_RETURN(log_schema_op(ndb_thd_query(m_thd), ndb_thd_query_length(m_thd), db, table_name, id, version, SOT_TRUNCATE_TABLE)); } - - -bool Ndb_schema_dist_client::alter_table(const char* db, const char* table_name, +bool Ndb_schema_dist_client::alter_table(const char *db, const char *table_name, int id, int version, bool log_on_participant) { DBUG_ENTER("Ndb_schema_dist_client::alter_table"); @@ -392,8 +370,8 @@ bool Ndb_schema_dist_client::alter_table(const char* db, const char* table_name, log_on_participant)); } -bool Ndb_schema_dist_client::alter_table_inplace_prepare(const char* db, - const char* table_name, +bool Ndb_schema_dist_client::alter_table_inplace_prepare(const char *db, + const char *table_name, int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::alter_table_inplace_prepare"); DBUG_RETURN(log_schema_op(ndb_thd_query(m_thd), ndb_thd_query_length(m_thd), @@ -401,8 +379,8 @@ bool Ndb_schema_dist_client::alter_table_inplace_prepare(const char* db, SOT_ONLINE_ALTER_TABLE_PREPARE)); } -bool Ndb_schema_dist_client::alter_table_inplace_commit(const char* db, - const char* table_name, +bool Ndb_schema_dist_client::alter_table_inplace_commit(const char *db, + const char *table_name, int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::alter_table_inplace_commit"); DBUG_RETURN(log_schema_op(ndb_thd_query(m_thd), ndb_thd_query_length(m_thd), @@ -411,8 +389,8 @@ bool Ndb_schema_dist_client::alter_table_inplace_commit(const char* db, } bool Ndb_schema_dist_client::rename_table_prepare( - const char* db, const char* table_name, int id, int version, - const char* new_key_for_table) { + const char *db, const char *table_name, int id, int version, + const char *new_key_for_table) { DBUG_ENTER("Ndb_schema_dist_client::rename_table_prepare"); // NOTE! The rename table prepare phase is primarily done in order to // pass the "new key"(i.e db/table_name) for the table to be renamed, @@ -422,10 +400,10 @@ bool Ndb_schema_dist_client::rename_table_prepare( table_name, id, version, SOT_RENAME_TABLE_PREPARE)); } -bool Ndb_schema_dist_client::rename_table(const char* db, - const char* table_name, int id, - int version, const char* new_dbname, - const char* new_tabname, +bool Ndb_schema_dist_client::rename_table(const char *db, + const char *table_name, int id, + int version, const char *new_dbname, + const char *new_tabname, bool log_on_participant) { DBUG_ENTER("Ndb_schema_dist_client::rename_table"); @@ -494,54 +472,51 @@ bool Ndb_schema_dist_client::drop_table(const char *db, const char *table_name, log_on_participant)); } -bool Ndb_schema_dist_client::create_db(const char* query, uint query_length, - const char* db, unsigned int id, +bool Ndb_schema_dist_client::create_db(const char *query, uint query_length, + const char *db, unsigned int id, unsigned int version) { DBUG_ENTER("Ndb_schema_dist_client::create_db"); // Checking identifier limits "late", there is no way to return // an error to fail the CREATE DATABASE command std::string invalid_identifier; - if (!check_identifier_limits(invalid_identifier)) - { + if (!check_identifier_limits(invalid_identifier)) { // Check of db name limit failed m_thd_ndb->push_warning("Identifier name '%-.100s' is too long", invalid_identifier.c_str()); DBUG_RETURN(false); } - DBUG_RETURN(log_schema_op(query, query_length, db, "", - id, version, SOT_CREATE_DB)); + DBUG_RETURN( + log_schema_op(query, query_length, db, "", id, version, SOT_CREATE_DB)); } -bool Ndb_schema_dist_client::alter_db(const char* query, uint query_length, - const char* db, unsigned int id, +bool Ndb_schema_dist_client::alter_db(const char *query, uint query_length, + const char *db, unsigned int id, unsigned int version) { DBUG_ENTER("Ndb_schema_dist_client::alter_db"); // Checking identifier limits "late", there is no way to return // an error to fail the ALTER DATABASE command std::string invalid_identifier; - if (!check_identifier_limits(invalid_identifier)) - { + if (!check_identifier_limits(invalid_identifier)) { // Check of db name limit failed m_thd_ndb->push_warning("Identifier name '%-.100s' is too long", invalid_identifier.c_str()); DBUG_RETURN(false); } - DBUG_RETURN(log_schema_op(query, query_length, db, "", - id, version, SOT_ALTER_DB)); + DBUG_RETURN( + log_schema_op(query, query_length, db, "", id, version, SOT_ALTER_DB)); } -bool Ndb_schema_dist_client::drop_db(const char* db) { +bool Ndb_schema_dist_client::drop_db(const char *db) { DBUG_ENTER("Ndb_schema_dist_client::drop_db"); // Checking identifier limits "late", there is no way to return // an error to fail the DROP DATABASE command std::string invalid_identifier; - if (!check_identifier_limits(invalid_identifier)) - { + if (!check_identifier_limits(invalid_identifier)) { // Check of db name limit failed m_thd_ndb->push_warning("Identifier name '%-.100s' is too long", invalid_identifier.c_str()); @@ -554,23 +529,23 @@ bool Ndb_schema_dist_client::drop_db(const char* db) { } /* STATEMENT-style ACL change distribution */ -bool Ndb_schema_dist_client::acl_notify(const char *database, - const char* query, uint query_length, +bool Ndb_schema_dist_client::acl_notify(const char *database, const char *query, + uint query_length, bool participant_refresh) { DBUG_ENTER("Ndb_schema_dist_client::acl_notify"); DBUG_ASSERT(m_holding_acl_mutex); auto key = m_prepared_keys.keys()[0]; std::string new_query("use "); - if(database != nullptr && strcmp(database, "mysql")) { + if (database != nullptr && strcmp(database, "mysql")) { new_query.append(database).append(";").append(query, query_length); query = new_query.c_str(); query_length = new_query.size(); } SCHEMA_OP_TYPE type = - participant_refresh ? SOT_ACL_STATEMENT : SOT_ACL_STATEMENT_REFRESH; - DBUG_RETURN(log_schema_op(query, query_length, - key.first.c_str(), key.second.c_str(), - unique_id(), unique_version(), type)); + participant_refresh ? SOT_ACL_STATEMENT : SOT_ACL_STATEMENT_REFRESH; + DBUG_RETURN(log_schema_op(query, query_length, key.first.c_str(), + key.second.c_str(), unique_id(), unique_version(), + type)); } /* SNAPSHOT-style ACL change distribution */ @@ -580,18 +555,18 @@ bool Ndb_schema_dist_client::acl_notify(std::string user_list) { auto key = m_prepared_keys.keys()[0]; DBUG_RETURN(log_schema_op(user_list.c_str(), user_list.length(), - key.first.c_str(), key.second.c_str(), - unique_id(), unique_version(), SOT_ACL_SNAPSHOT)); + key.first.c_str(), key.second.c_str(), unique_id(), + unique_version(), SOT_ACL_SNAPSHOT)); } -bool Ndb_schema_dist_client::tablespace_changed(const char* tablespace_name, +bool Ndb_schema_dist_client::tablespace_changed(const char *tablespace_name, int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::tablespace_changed"); DBUG_RETURN(log_schema_op(ndb_thd_query(m_thd), ndb_thd_query_length(m_thd), "", tablespace_name, id, version, SOT_TABLESPACE)); } -bool Ndb_schema_dist_client::logfilegroup_changed(const char* logfilegroup_name, +bool Ndb_schema_dist_client::logfilegroup_changed(const char *logfilegroup_name, int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::logfilegroup_changed"); DBUG_RETURN(log_schema_op(ndb_thd_query(m_thd), ndb_thd_query_length(m_thd), @@ -599,7 +574,7 @@ bool Ndb_schema_dist_client::logfilegroup_changed(const char* logfilegroup_name, SOT_LOGFILE_GROUP)); } -bool Ndb_schema_dist_client::create_tablespace(const char* tablespace_name, +bool Ndb_schema_dist_client::create_tablespace(const char *tablespace_name, int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::create_tablespace"); DBUG_RETURN(log_schema_op(ndb_thd_query(m_thd), ndb_thd_query_length(m_thd), @@ -607,7 +582,7 @@ bool Ndb_schema_dist_client::create_tablespace(const char* tablespace_name, SOT_CREATE_TABLESPACE)); } -bool Ndb_schema_dist_client::alter_tablespace(const char* tablespace_name, +bool Ndb_schema_dist_client::alter_tablespace(const char *tablespace_name, int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::alter_tablespace"); DBUG_RETURN(log_schema_op(ndb_thd_query(m_thd), ndb_thd_query_length(m_thd), @@ -615,7 +590,7 @@ bool Ndb_schema_dist_client::alter_tablespace(const char* tablespace_name, SOT_ALTER_TABLESPACE)); } -bool Ndb_schema_dist_client::drop_tablespace(const char* tablespace_name, +bool Ndb_schema_dist_client::drop_tablespace(const char *tablespace_name, int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::drop_tablespace"); DBUG_RETURN(log_schema_op(ndb_thd_query(m_thd), ndb_thd_query_length(m_thd), @@ -623,95 +598,90 @@ bool Ndb_schema_dist_client::drop_tablespace(const char* tablespace_name, SOT_DROP_TABLESPACE)); } -bool -Ndb_schema_dist_client::create_logfile_group(const char* logfile_group_name, - int id, int version) { +bool Ndb_schema_dist_client::create_logfile_group( + const char *logfile_group_name, int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::create_logfile_group"); DBUG_RETURN(log_schema_op(ndb_thd_query(m_thd), ndb_thd_query_length(m_thd), "", logfile_group_name, id, version, SOT_CREATE_LOGFILE_GROUP)); } -bool -Ndb_schema_dist_client::alter_logfile_group(const char* logfile_group_name, - int id, int version) { +bool Ndb_schema_dist_client::alter_logfile_group(const char *logfile_group_name, + int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::alter_logfile_group"); DBUG_RETURN(log_schema_op(ndb_thd_query(m_thd), ndb_thd_query_length(m_thd), "", logfile_group_name, id, version, SOT_ALTER_LOGFILE_GROUP)); } -bool -Ndb_schema_dist_client::drop_logfile_group(const char* logfile_group_name, - int id, int version) { +bool Ndb_schema_dist_client::drop_logfile_group(const char *logfile_group_name, + int id, int version) { DBUG_ENTER("Ndb_schema_dist_client::drop_logfile_group"); DBUG_RETURN(log_schema_op(ndb_thd_query(m_thd), ndb_thd_query_length(m_thd), "", logfile_group_name, id, version, SOT_DROP_LOGFILE_GROUP)); } -const char* -Ndb_schema_dist_client::type_name(SCHEMA_OP_TYPE type) -{ - switch(type){ - case SOT_DROP_TABLE: - return "DROP_TABLE"; - case SOT_CREATE_TABLE: - return "CREATE_TABLE"; - case SOT_ALTER_TABLE_COMMIT: - return "ALTER_TABLE_COMMIT"; - case SOT_DROP_DB: - return "DROP_DB"; - case SOT_CREATE_DB: - return "CREATE_DB"; - case SOT_ALTER_DB: - return "ALTER_DB"; - case SOT_CLEAR_SLOCK: - return "CLEAR_SLOCK"; - case SOT_TABLESPACE: - return "TABLESPACE"; - case SOT_LOGFILE_GROUP: - return "LOGFILE_GROUP"; - case SOT_RENAME_TABLE: - return "RENAME_TABLE"; - case SOT_TRUNCATE_TABLE: - return "TRUNCATE_TABLE"; - case SOT_RENAME_TABLE_PREPARE: - return "RENAME_TABLE_PREPARE"; - case SOT_ONLINE_ALTER_TABLE_PREPARE: - return "ONLINE_ALTER_TABLE_PREPARE"; - case SOT_ONLINE_ALTER_TABLE_COMMIT: - return "ONLINE_ALTER_TABLE_COMMIT"; - case SOT_CREATE_USER: - return "CREATE_USER"; - case SOT_DROP_USER: - return "DROP_USER"; - case SOT_RENAME_USER: - return "RENAME_USER"; - case SOT_GRANT: - return "GRANT"; - case SOT_REVOKE: - return "REVOKE"; - case SOT_CREATE_TABLESPACE: - return "CREATE_TABLESPACE"; - case SOT_ALTER_TABLESPACE: - return "ALTER_TABLESPACE"; - case SOT_DROP_TABLESPACE: - return "DROP_TABLESPACE"; - case SOT_CREATE_LOGFILE_GROUP: - return "CREATE_LOGFILE_GROUP"; - case SOT_ALTER_LOGFILE_GROUP: - return "ALTER_LOGFILE_GROUP"; - case SOT_DROP_LOGFILE_GROUP: - return "DROP_LOGFILE_GROUP"; - case SOT_ACL_SNAPSHOT: - return "ACL_SNAPSHOT"; - case SOT_ACL_STATEMENT: - return "ACL_STATEMENT"; - case SOT_ACL_STATEMENT_REFRESH: - return "ACL_STATEMENT_REFRESH"; - default: - break; +const char *Ndb_schema_dist_client::type_name(SCHEMA_OP_TYPE type) { + switch (type) { + case SOT_DROP_TABLE: + return "DROP_TABLE"; + case SOT_CREATE_TABLE: + return "CREATE_TABLE"; + case SOT_ALTER_TABLE_COMMIT: + return "ALTER_TABLE_COMMIT"; + case SOT_DROP_DB: + return "DROP_DB"; + case SOT_CREATE_DB: + return "CREATE_DB"; + case SOT_ALTER_DB: + return "ALTER_DB"; + case SOT_CLEAR_SLOCK: + return "CLEAR_SLOCK"; + case SOT_TABLESPACE: + return "TABLESPACE"; + case SOT_LOGFILE_GROUP: + return "LOGFILE_GROUP"; + case SOT_RENAME_TABLE: + return "RENAME_TABLE"; + case SOT_TRUNCATE_TABLE: + return "TRUNCATE_TABLE"; + case SOT_RENAME_TABLE_PREPARE: + return "RENAME_TABLE_PREPARE"; + case SOT_ONLINE_ALTER_TABLE_PREPARE: + return "ONLINE_ALTER_TABLE_PREPARE"; + case SOT_ONLINE_ALTER_TABLE_COMMIT: + return "ONLINE_ALTER_TABLE_COMMIT"; + case SOT_CREATE_USER: + return "CREATE_USER"; + case SOT_DROP_USER: + return "DROP_USER"; + case SOT_RENAME_USER: + return "RENAME_USER"; + case SOT_GRANT: + return "GRANT"; + case SOT_REVOKE: + return "REVOKE"; + case SOT_CREATE_TABLESPACE: + return "CREATE_TABLESPACE"; + case SOT_ALTER_TABLESPACE: + return "ALTER_TABLESPACE"; + case SOT_DROP_TABLESPACE: + return "DROP_TABLESPACE"; + case SOT_CREATE_LOGFILE_GROUP: + return "CREATE_LOGFILE_GROUP"; + case SOT_ALTER_LOGFILE_GROUP: + return "ALTER_LOGFILE_GROUP"; + case SOT_DROP_LOGFILE_GROUP: + return "DROP_LOGFILE_GROUP"; + case SOT_ACL_SNAPSHOT: + return "ACL_SNAPSHOT"; + case SOT_ACL_STATEMENT: + return "ACL_STATEMENT"; + case SOT_ACL_STATEMENT_REFRESH: + return "ACL_STATEMENT_REFRESH"; + default: + break; } DBUG_ASSERT(false); return ""; diff --git a/storage/ndb/plugin/ndb_schema_dist.h b/storage/ndb/plugin/ndb_schema_dist.h index e70765c9ec74..fe8ad2f2e279 100644 --- a/storage/ndb/plugin/ndb_schema_dist.h +++ b/storage/ndb/plugin/ndb_schema_dist.h @@ -36,37 +36,36 @@ protocol. Changes would break compatibility between versions. Add new numbers to the end. */ -enum SCHEMA_OP_TYPE -{ - SOT_DROP_TABLE= 0, - SOT_CREATE_TABLE= 1, - SOT_RENAME_TABLE_NEW= 2, // Unused, but still reserved - SOT_ALTER_TABLE_COMMIT= 3, - SOT_DROP_DB= 4, - SOT_CREATE_DB= 5, - SOT_ALTER_DB= 6, - SOT_CLEAR_SLOCK= 7, - SOT_TABLESPACE= 8, - SOT_LOGFILE_GROUP= 9, - SOT_RENAME_TABLE= 10, - SOT_TRUNCATE_TABLE= 11, - SOT_RENAME_TABLE_PREPARE= 12, - SOT_ONLINE_ALTER_TABLE_PREPARE= 13, - SOT_ONLINE_ALTER_TABLE_COMMIT= 14, - SOT_CREATE_USER= 15, - SOT_DROP_USER= 16, - SOT_RENAME_USER= 17, - SOT_GRANT= 18, - SOT_REVOKE= 19, - SOT_CREATE_TABLESPACE= 20, - SOT_ALTER_TABLESPACE= 21, - SOT_DROP_TABLESPACE= 22, - SOT_CREATE_LOGFILE_GROUP= 23, - SOT_ALTER_LOGFILE_GROUP= 24, - SOT_DROP_LOGFILE_GROUP= 25, - SOT_ACL_SNAPSHOT= 26, - SOT_ACL_STATEMENT= 27, - SOT_ACL_STATEMENT_REFRESH= 28, +enum SCHEMA_OP_TYPE { + SOT_DROP_TABLE = 0, + SOT_CREATE_TABLE = 1, + SOT_RENAME_TABLE_NEW = 2, // Unused, but still reserved + SOT_ALTER_TABLE_COMMIT = 3, + SOT_DROP_DB = 4, + SOT_CREATE_DB = 5, + SOT_ALTER_DB = 6, + SOT_CLEAR_SLOCK = 7, + SOT_TABLESPACE = 8, + SOT_LOGFILE_GROUP = 9, + SOT_RENAME_TABLE = 10, + SOT_TRUNCATE_TABLE = 11, + SOT_RENAME_TABLE_PREPARE = 12, + SOT_ONLINE_ALTER_TABLE_PREPARE = 13, + SOT_ONLINE_ALTER_TABLE_COMMIT = 14, + SOT_CREATE_USER = 15, + SOT_DROP_USER = 16, + SOT_RENAME_USER = 17, + SOT_GRANT = 18, + SOT_REVOKE = 19, + SOT_CREATE_TABLESPACE = 20, + SOT_ALTER_TABLESPACE = 21, + SOT_DROP_TABLESPACE = 22, + SOT_CREATE_LOGFILE_GROUP = 23, + SOT_ALTER_LOGFILE_GROUP = 24, + SOT_DROP_LOGFILE_GROUP = 25, + SOT_ACL_SNAPSHOT = 26, + SOT_ACL_STATEMENT = 27, + SOT_ACL_STATEMENT_REFRESH = 28, }; namespace Ndb_schema_dist { @@ -89,7 +88,7 @@ enum Schema_op_result_code { @return true schema distribution is ready */ -bool is_ready(void* requestor); +bool is_ready(void *requestor); } // namespace Ndb_schema_dist @@ -124,17 +123,16 @@ class Ndb; */ class Ndb_schema_dist_client { class THD *const m_thd; - class Thd_ndb* const m_thd_ndb; + class Thd_ndb *const m_thd_ndb; struct NDB_SHARE *m_share{nullptr}; class Prepared_keys { using Key = std::pair; std::vector m_keys; + public: - const std::vector& keys() { - return m_keys; - } - void add_key(const char* db, const char* tabname); - bool check_key(const char* db, const char* tabname) const; + const std::vector &keys() { return m_keys; } + void add_key(const char *db, const char *tabname); + bool check_key(const char *db, const char *tabname) const; } m_prepared_keys; bool m_holding_acl_mutex; @@ -147,11 +145,10 @@ class Ndb_schema_dist_client { }; std::vector m_schema_op_results; - int log_schema_op_impl(Ndb* ndb, const char *query, int query_length, + int log_schema_op_impl(Ndb *ndb, const char *query, int query_length, const char *db, const char *table_name, uint32 ndb_table_id, uint32 ndb_table_version, - SCHEMA_OP_TYPE type, - uint32 anyvalue); + SCHEMA_OP_TYPE type, uint32 anyvalue); /** @brief Write row to ndb_schema to initiate the schema operation @@ -170,8 +167,7 @@ class Ndb_schema_dist_client { */ bool log_schema_op(const char *query, size_t query_length, const char *db, const char *table_name, uint32 id, uint32 version, - SCHEMA_OP_TYPE type, - bool log_query_on_participant = true); + SCHEMA_OP_TYPE type, bool log_query_on_participant = true); /** @brief Calculate the anyvalue to use for this schema change. The anyvalue @@ -219,7 +215,7 @@ class Ndb_schema_dist_client { hard to rollback at a later stage. @return true if prepare succeed */ - bool prepare(const char* db, const char* tabname); + bool prepare(const char *db, const char *tabname); /** @brief Prepare client for rename schema operation, check that @@ -257,7 +253,7 @@ class Ndb_schema_dist_client { @param invalid_identifer The name of the identifier that failed the check @return true if check succeed */ - bool check_identifier_limits(std::string& invalid_identifier); + bool check_identifier_limits(std::string &invalid_identifier); /** * @brief Check if given name is the schema distribution table, special @@ -266,7 +262,7 @@ class Ndb_schema_dist_client { @param table_name table name @return true if table is the schema distribution table */ - static bool is_schema_dist_table(const char* db, const char* table_name); + static bool is_schema_dist_table(const char *db, const char *table_name); /** * @brief Check if given name is the schema distribution result table, special @@ -283,7 +279,7 @@ class Ndb_schema_dist_client { * @param type * @return string describing the type */ - static const char* type_name(SCHEMA_OP_TYPE type); + static const char *type_name(SCHEMA_OP_TYPE type); bool create_table(const char *db, const char *table_name, int id, int version); @@ -316,14 +312,14 @@ class Ndb_schema_dist_client { bool tablespace_changed(const char *tablespace_name, int id, int version); bool logfilegroup_changed(const char *logfilegroup_name, int id, int version); - bool create_tablespace(const char* tablespace_name, int id, int version); - bool alter_tablespace(const char* tablespace_name, int id, int version); - bool drop_tablespace(const char* tablespace_name, int id, int version); + bool create_tablespace(const char *tablespace_name, int id, int version); + bool alter_tablespace(const char *tablespace_name, int id, int version); + bool drop_tablespace(const char *tablespace_name, int id, int version); - bool create_logfile_group(const char* logfile_group_name, int id, + bool create_logfile_group(const char *logfile_group_name, int id, int version); - bool alter_logfile_group(const char* logfile_group_name, int id, int version); - bool drop_logfile_group(const char* logfile_group_name, int id, int version); + bool alter_logfile_group(const char *logfile_group_name, int id, int version); + bool drop_logfile_group(const char *logfile_group_name, int id, int version); }; #endif diff --git a/storage/ndb/plugin/ndb_schema_dist_table.cc b/storage/ndb/plugin/ndb_schema_dist_table.cc index f2c61edfb384..a1aa1a791104 100644 --- a/storage/ndb/plugin/ndb_schema_dist_table.cc +++ b/storage/ndb/plugin/ndb_schema_dist_table.cc @@ -32,18 +32,18 @@ const std::string Ndb_schema_dist_table::DB_NAME = "mysql"; const std::string Ndb_schema_dist_table::TABLE_NAME = "ndb_schema"; -const char* Ndb_schema_dist_table::COL_DB = "db"; -const char* Ndb_schema_dist_table::COL_NAME = "name"; -const char* Ndb_schema_dist_table::COL_QUERY = "query"; -const char* Ndb_schema_dist_table::COL_ID = "id"; -const char* Ndb_schema_dist_table::COL_VERSION = "version"; -static const char* COL_SLOCK = "slock"; -static const char* COL_NODEID = "node_id"; -static const char* COL_EPOCH = "epoch"; -static const char* COL_TYPE = "type"; -static const char* COL_SCHEMA_OP_ID = "schema_op_id"; - -Ndb_schema_dist_table::Ndb_schema_dist_table(Thd_ndb* thd_ndb) +const char *Ndb_schema_dist_table::COL_DB = "db"; +const char *Ndb_schema_dist_table::COL_NAME = "name"; +const char *Ndb_schema_dist_table::COL_QUERY = "query"; +const char *Ndb_schema_dist_table::COL_ID = "id"; +const char *Ndb_schema_dist_table::COL_VERSION = "version"; +static const char *COL_SLOCK = "slock"; +static const char *COL_NODEID = "node_id"; +static const char *COL_EPOCH = "epoch"; +static const char *COL_TYPE = "type"; +static const char *COL_SCHEMA_OP_ID = "schema_op_id"; + +Ndb_schema_dist_table::Ndb_schema_dist_table(Thd_ndb *thd_ndb) : Ndb_util_table(thd_ndb, DB_NAME, TABLE_NAME, true) {} Ndb_schema_dist_table::~Ndb_schema_dist_table() {} @@ -290,7 +290,7 @@ bool Ndb_schema_dist_table::define_table_ndb(NdbDictionary::Table &new_table, // schema_op_id INT UNSIGNED NULL NdbDictionary::Column col_schema_op_id(COL_SCHEMA_OP_ID); col_schema_op_id.setType(NdbDictionary::Column::Unsigned); - col_schema_op_id.setNullable(true); // NULL! + col_schema_op_id.setNullable(true); // NULL! if (!define_table_add_column(new_table, col_schema_op_id)) return false; } @@ -307,16 +307,13 @@ bool Ndb_schema_dist_table::need_upgrade() const { return false; } -bool Ndb_schema_dist_table::drop_events_in_NDB() const -{ +bool Ndb_schema_dist_table::drop_events_in_NDB() const { // Drop the default event on ndb_schema table - if (!drop_event_in_NDB("REPL$mysql/ndb_schema")) - return false; + if (!drop_event_in_NDB("REPL$mysql/ndb_schema")) return false; // Legacy event on ndb_schema table, drop since it might // have been created(although ages ago) - if (!drop_event_in_NDB("REPLF$mysql/ndb_schema")) - return false; + if (!drop_event_in_NDB("REPLF$mysql/ndb_schema")) return false; return true; } diff --git a/storage/ndb/plugin/ndb_schema_dist_table.h b/storage/ndb/plugin/ndb_schema_dist_table.h index ca4b6e873cb9..0ac8bafa9828 100644 --- a/storage/ndb/plugin/ndb_schema_dist_table.h +++ b/storage/ndb/plugin/ndb_schema_dist_table.h @@ -33,7 +33,7 @@ // RAII style class for working with the schema distribution table in NDB class Ndb_schema_dist_table : public Ndb_util_table { Ndb_schema_dist_table() = delete; - Ndb_schema_dist_table(const Ndb_schema_dist_table&) = delete; + Ndb_schema_dist_table(const Ndb_schema_dist_table &) = delete; bool define_table_ndb(NdbDictionary::Table &table, unsigned mysql_version) const override; @@ -44,19 +44,19 @@ class Ndb_schema_dist_table : public Ndb_util_table { static const std::string DB_NAME; static const std::string TABLE_NAME; - static const char* COL_DB; - static const char* COL_NAME; - static const char* COL_QUERY; - static const char* COL_ID; - static const char* COL_VERSION; + static const char *COL_DB; + static const char *COL_NAME; + static const char *COL_QUERY; + static const char *COL_ID; + static const char *COL_VERSION; - Ndb_schema_dist_table(class Thd_ndb*); + Ndb_schema_dist_table(class Thd_ndb *); virtual ~Ndb_schema_dist_table(); bool check_schema() const override; - bool check_column_identifier_limit(const char* column_name, - const std::string& identifier) const; + bool check_column_identifier_limit(const char *column_name, + const std::string &identifier) const; bool need_upgrade() const override; diff --git a/storage/ndb/plugin/ndb_schema_object.cc b/storage/ndb/plugin/ndb_schema_object.cc index 252f7ee7c802..884d93622ff1 100644 --- a/storage/ndb/plugin/ndb_schema_object.cc +++ b/storage/ndb/plugin/ndb_schema_object.cc @@ -37,9 +37,8 @@ // List keeping track of active NDB_SCHEMA_OBJECTs. The list is used // by the schema distribution coordinator to find the correct NDB_SCHEMA_OBJECT // in order to communicate with the schema dist client. -class Ndb_schema_objects -{ -public: +class Ndb_schema_objects { + public: // Nodeid of this node uint32 m_own_nodeid{0}; @@ -77,10 +76,9 @@ class Ndb_schema_objects return nullptr; } - for (const auto entry : m_hash){ - NDB_SCHEMA_OBJECT* schema_object = entry.second; - if (schema_object->schema_op_id() == schema_op_id) - return schema_object; + for (const auto entry : m_hash) { + NDB_SCHEMA_OBJECT *schema_object = entry.second; + if (schema_object->schema_op_id() == schema_op_id) return schema_object; } return nullptr; } @@ -94,11 +92,10 @@ void NDB_SCHEMA_OBJECT::init(uint32 nodeid) { active_schema_clients.m_own_nodeid = nodeid; } -void NDB_SCHEMA_OBJECT::get_schema_op_ids(std::vector& ids) -{ +void NDB_SCHEMA_OBJECT::get_schema_op_ids(std::vector &ids) { std::lock_guard lock_hash(active_schema_clients.m_lock); - for (const auto entry : active_schema_clients.m_hash){ - NDB_SCHEMA_OBJECT* schema_object = entry.second; + for (const auto entry : active_schema_clients.m_hash) { + NDB_SCHEMA_OBJECT *schema_object = entry.second; ids.push_back(schema_object->schema_op_id()); } } @@ -152,8 +149,8 @@ NDB_SCHEMA_OBJECT *NDB_SCHEMA_OBJECT::get(const char *db, const char *table_name, uint32 id, uint32 version, bool create) { DBUG_ENTER("NDB_SCHEMA_OBJECT::get"); - DBUG_PRINT("enter", ("db: '%s', table_name: '%s', id: %u, version: %u", - db, table_name, id, version)); + DBUG_PRINT("enter", ("db: '%s', table_name: '%s', id: %u, version: %u", db, + table_name, id, version)); // Build a key on the form ".//__" const std::string key = std::string("./") + db + "/" + table_name + "_" + @@ -163,8 +160,7 @@ NDB_SCHEMA_OBJECT *NDB_SCHEMA_OBJECT::get(const char *db, std::lock_guard lock_hash(active_schema_clients.m_lock); NDB_SCHEMA_OBJECT *ndb_schema_object = active_schema_clients.find(key); - if (ndb_schema_object) - { + if (ndb_schema_object) { // Don't allow reuse of existing NDB_SCHEMA_OBJECT when requesting to // create, only the Ndb_schema_dist_client will create NDB_SCHEMA_OBJECT // and it should wait until previous schema operation with @@ -192,7 +188,7 @@ NDB_SCHEMA_OBJECT *NDB_SCHEMA_OBJECT::get(const char *db, DBUG_RETURN(ndb_schema_object); } -NDB_SCHEMA_OBJECT* NDB_SCHEMA_OBJECT::get(uint32 nodeid, uint32 schema_op_id) { +NDB_SCHEMA_OBJECT *NDB_SCHEMA_OBJECT::get(uint32 nodeid, uint32 schema_op_id) { DBUG_ENTER("NDB_SCHEMA_OBJECT::get"); DBUG_PRINT("enter", ("nodeid: %d, schema_op_id: %u", nodeid, schema_op_id)); @@ -222,9 +218,7 @@ NDB_SCHEMA_OBJECT *NDB_SCHEMA_OBJECT::get(NDB_SCHEMA_OBJECT *schema_object) { DBUG_RETURN(schema_object); } -void -NDB_SCHEMA_OBJECT::release(NDB_SCHEMA_OBJECT *ndb_schema_object) -{ +void NDB_SCHEMA_OBJECT::release(NDB_SCHEMA_OBJECT *ndb_schema_object) { DBUG_ENTER("NDB_SCHEMA_OBJECT::release"); DBUG_PRINT("enter", ("key: '%s'", ndb_schema_object->m_key.c_str())); @@ -252,11 +246,10 @@ size_t NDB_SCHEMA_OBJECT::count_active_schema_ops() { std::string NDB_SCHEMA_OBJECT::waiting_participants_to_string() const { std::lock_guard lock_state(state.m_lock); - const char* separator = ""; + const char *separator = ""; std::string participants("["); - for (const auto& it: state.m_participants){ - if (it.second.m_completed == true) - continue; // Don't show completed + for (const auto &it : state.m_participants) { + if (it.second.m_completed == true) continue; // Don't show completed participants.append(separator).append(std::to_string(it.first)); separator = ","; } @@ -264,18 +257,17 @@ std::string NDB_SCHEMA_OBJECT::waiting_participants_to_string() const { return participants; } -std::string NDB_SCHEMA_OBJECT::to_string(const char* line_separator) const -{ +std::string NDB_SCHEMA_OBJECT::to_string(const char *line_separator) const { std::stringstream ss; - ss << "NDB_SCHEMA_OBJECT { " << line_separator - << " '" << m_db << "'.'" << m_name << "', " << line_separator - << " id: " << m_id << ", version: " << m_version << ", " << line_separator + ss << "NDB_SCHEMA_OBJECT { " << line_separator << " '" << m_db << "'.'" + << m_name << "', " << line_separator << " id: " << m_id + << ", version: " << m_version << ", " << line_separator << " schema_op_id: " << m_schema_op_id << ", " << line_separator; // Dump state std::lock_guard lock_state(state.m_lock); { - ss << " use_count: " << state.m_use_count << ", " << line_separator; + ss << " use_count: " << state.m_use_count << ", " << line_separator; // Print the participant list ss << " participants: " << state.m_participants.size() << " [ " << line_separator; @@ -298,7 +290,7 @@ std::string NDB_SCHEMA_OBJECT::to_string(const char* line_separator) const size_t NDB_SCHEMA_OBJECT::count_completed_participants() const { size_t count = 0; - for (const auto& it : state.m_participants) { + for (const auto &it : state.m_participants) { const State::Participant &participant = it.second; if (participant.m_completed) count++; } @@ -315,8 +307,7 @@ void NDB_SCHEMA_OBJECT::register_participants( ndbcluster::ndbrequire(!state.m_coordinator_completed); // Insert new participants as specified by nodes list - for (const uint32 node: nodes) - state.m_participants[node]; + for (const uint32 node : nodes) state.m_participants[node]; // Double check that there are as many participants as nodes ndbcluster::ndbrequire(nodes.size() == state.m_participants.size()); @@ -336,7 +327,7 @@ void NDB_SCHEMA_OBJECT::result_received_from_node( } // Mark participant as completed and save result - State::Participant& participant = it->second; + State::Participant &participant = it->second; participant.m_completed = true; participant.m_result = result; participant.m_message = message; @@ -347,7 +338,7 @@ void NDB_SCHEMA_OBJECT::result_received_from_nodes( std::unique_lock lock_state(state.m_lock); // Mark the listed nodes as completed - for(auto node : nodes) { + for (auto node : nodes) { const auto it = state.m_participants.find(node); if (it == state.m_participants.end()) { // Received reply from node not registered as participant, may happen @@ -357,7 +348,7 @@ void NDB_SCHEMA_OBJECT::result_received_from_nodes( } // Participant is not in list, mark it as failed - State::Participant& participant = it->second; + State::Participant &participant = it->second; participant.m_completed = true; // No result or message provided in old protocol } @@ -371,14 +362,14 @@ bool NDB_SCHEMA_OBJECT::check_all_participants_completed() const { void NDB_SCHEMA_OBJECT::fail_participants_not_in_list( const std::unordered_set &nodes, uint32 result, const char *message) const { - for (auto& it : state.m_participants) { + for (auto &it : state.m_participants) { if (nodes.find(it.first) != nodes.end()) { // Participant still exist in list continue; } // Participant is not in list, mark it as failed - State::Participant& participant = it.second; + State::Participant &participant = it.second; participant.m_completed = true; participant.m_result = result; participant.m_message = message; @@ -411,8 +402,8 @@ bool NDB_SCHEMA_OBJECT::check_timeout(int timeout_seconds, uint32 result, return false; // Timeout has not occured // Mark all participants who hasn't already completed as timedout - for (auto& it : state.m_participants) { - State::Participant& participant = it.second; + for (auto &it : state.m_participants) { + State::Participant &participant = it.second; if (participant.m_completed) continue; participant.m_completed = true; @@ -426,8 +417,8 @@ bool NDB_SCHEMA_OBJECT::check_timeout(int timeout_seconds, uint32 result, return true; } -void NDB_SCHEMA_OBJECT::fail_schema_op(uint32 result, const char* message) const -{ +void NDB_SCHEMA_OBJECT::fail_schema_op(uint32 result, + const char *message) const { std::unique_lock lock_state(state.m_lock); if (state.m_participants.size() == 0) { @@ -437,8 +428,8 @@ void NDB_SCHEMA_OBJECT::fail_schema_op(uint32 result, const char* message) const } // Mark all participants who hasn't already completed as failed - for (auto& it : state.m_participants) { - State::Participant& participant = it.second; + for (auto &it : state.m_participants) { + State::Participant &participant = it.second; if (participant.m_completed) continue; participant.m_completed = true; @@ -453,11 +444,11 @@ void NDB_SCHEMA_OBJECT::fail_schema_op(uint32 result, const char* message) const state.m_coordinator_completed = true; } -void NDB_SCHEMA_OBJECT::fail_all_schema_ops(uint32 result, const char* message) -{ +void NDB_SCHEMA_OBJECT::fail_all_schema_ops(uint32 result, + const char *message) { std::lock_guard lock_hash(active_schema_clients.m_lock); - for (const auto entry : active_schema_clients.m_hash){ - const NDB_SCHEMA_OBJECT* schema_object = entry.second; + for (const auto entry : active_schema_clients.m_hash) { + const NDB_SCHEMA_OBJECT *schema_object = entry.second; schema_object->fail_schema_op(result, message); } } @@ -492,7 +483,7 @@ void NDB_SCHEMA_OBJECT::client_get_schema_op_results( // Make sure that coordinator has completed ndbcluster::ndbrequire(state.m_coordinator_completed); - for (const auto& it : state.m_participants) { + for (const auto &it : state.m_participants) { const State::Participant &participant = it.second; if (participant.m_result) results.push_back({ diff --git a/storage/ndb/plugin/ndb_schema_object.h b/storage/ndb/plugin/ndb_schema_object.h index 812ac3099251..82007b4a14f0 100644 --- a/storage/ndb/plugin/ndb_schema_object.h +++ b/storage/ndb/plugin/ndb_schema_object.h @@ -100,7 +100,7 @@ class NDB_SCHEMA_OBJECT { std::string m_message; Participant() = default; Participant(const Participant &) = delete; - Participant& operator=(const Participant&) = delete; + Participant &operator=(const Participant &) = delete; }; std::unordered_map m_participants; @@ -113,7 +113,7 @@ class NDB_SCHEMA_OBJECT { uint decremement_use_count() const; NDB_SCHEMA_OBJECT() = delete; - NDB_SCHEMA_OBJECT(const NDB_SCHEMA_OBJECT&) = delete; + NDB_SCHEMA_OBJECT(const NDB_SCHEMA_OBJECT &) = delete; NDB_SCHEMA_OBJECT(const char *key, const char *db, const char *name, uint32 id, uint32 version); ~NDB_SCHEMA_OBJECT(); @@ -122,10 +122,10 @@ class NDB_SCHEMA_OBJECT { uint32 result, const char *message) const; size_t count_completed_participants() const; - public: - const char * db() const { return m_db.c_str(); } - const char * name() const { return m_name.c_str(); } + public: + const char *db() const { return m_db.c_str(); } + const char *name() const { return m_name.c_str(); } uint32 id() const { return m_id; } uint32 version() const { return m_version; } @@ -135,7 +135,7 @@ class NDB_SCHEMA_OBJECT { // Return current list of waiting participants as human readable string std::string waiting_participants_to_string() const; - std::string to_string(const char* line_separator = "\n") const; + std::string to_string(const char *line_separator = "\n") const; /** @brief Initialize the NDB_SCHEMA_OBJECT facility @@ -172,8 +172,7 @@ class NDB_SCHEMA_OBJECT { @return nullptr if NDB_SCHEMA_OBJECT didn't exist */ static NDB_SCHEMA_OBJECT *get(const char *db, const char *table_name, - uint32 id, uint32 version, - bool create = false); + uint32 id, uint32 version, bool create = false); /** @brief Get NDB_SCHEMA_OBJECT by schema operation id @@ -198,8 +197,7 @@ class NDB_SCHEMA_OBJECT { @return pointer to existing NDB_SCHEMA_OBJECT */ - static NDB_SCHEMA_OBJECT *get(NDB_SCHEMA_OBJECT* schema_object); - + static NDB_SCHEMA_OBJECT *get(NDB_SCHEMA_OBJECT *schema_object); /** @brief Release NDB_SCHEMA_OBJECT which has been acquired with get() @@ -219,8 +217,7 @@ class NDB_SCHEMA_OBJECT { @brief Add list of nodes to participants @param nodes List of nodes to add */ - void register_participants(const std::unordered_set& nodes) const; - + void register_participants(const std::unordered_set &nodes) const; /** @brief Save the result received from a node @@ -290,7 +287,7 @@ class NDB_SCHEMA_OBJECT { @param result The result to set on the participant @param message The message to set on the participant */ - static void fail_all_schema_ops(uint32 result, const char* message); + static void fail_all_schema_ops(uint32 result, const char *message); /** @brief Wait until coordinator indicates that all participants has completed diff --git a/storage/ndb/plugin/ndb_schema_result_table.cc b/storage/ndb/plugin/ndb_schema_result_table.cc index f9c103ba8295..e50498bdc148 100644 --- a/storage/ndb/plugin/ndb_schema_result_table.cc +++ b/storage/ndb/plugin/ndb_schema_result_table.cc @@ -32,19 +32,19 @@ const std::string Ndb_schema_result_table::DB_NAME = "mysql"; const std::string Ndb_schema_result_table::TABLE_NAME = "ndb_schema_result"; -const char* Ndb_schema_result_table::COL_NODEID = "nodeid"; -const char* Ndb_schema_result_table::COL_SCHEMA_OP_ID = "schema_op_id"; -const char* Ndb_schema_result_table::COL_PARTICIPANT_NODEID = "participant_nodeid"; -const char* Ndb_schema_result_table::COL_RESULT = "result"; -const char* Ndb_schema_result_table::COL_MESSAGE = "message"; - -Ndb_schema_result_table::Ndb_schema_result_table(Thd_ndb* thd_ndb) +const char *Ndb_schema_result_table::COL_NODEID = "nodeid"; +const char *Ndb_schema_result_table::COL_SCHEMA_OP_ID = "schema_op_id"; +const char *Ndb_schema_result_table::COL_PARTICIPANT_NODEID = + "participant_nodeid"; +const char *Ndb_schema_result_table::COL_RESULT = "result"; +const char *Ndb_schema_result_table::COL_MESSAGE = "message"; + +Ndb_schema_result_table::Ndb_schema_result_table(Thd_ndb *thd_ndb) : Ndb_util_table(thd_ndb, DB_NAME, TABLE_NAME, true) {} Ndb_schema_result_table::~Ndb_schema_result_table() {} bool Ndb_schema_result_table::check_schema() const { - // nodeid // unsigned int if (!(check_column_exist(COL_NODEID) && check_column_unsigned(COL_NODEID))) { @@ -73,14 +73,14 @@ bool Ndb_schema_result_table::check_schema() const { // result // unsigned int - if (!(check_column_exist(COL_RESULT) && - check_column_unsigned(COL_RESULT))) { + if (!(check_column_exist(COL_RESULT) && check_column_unsigned(COL_RESULT))) { return false; } // message // varbinary, at least 255 bytes long - if (!(check_column_exist(COL_MESSAGE) && check_column_varbinary(COL_MESSAGE) && + if (!(check_column_exist(COL_MESSAGE) && + check_column_varbinary(COL_MESSAGE) && check_column_minlength(COL_MESSAGE, 255))) { return false; } @@ -89,7 +89,7 @@ bool Ndb_schema_result_table::check_schema() const { } bool Ndb_schema_result_table::define_table_ndb(NdbDictionary::Table &new_table, - unsigned mysql_version) const { + unsigned mysql_version) const { // Allow later online add column new_table.setForceVarPart(true); @@ -120,7 +120,8 @@ bool Ndb_schema_result_table::define_table_ndb(NdbDictionary::Table &new_table, col_participant_nodeid.setType(NdbDictionary::Column::Unsigned); col_participant_nodeid.setNullable(false); col_participant_nodeid.setPrimaryKey(true); - if (!define_table_add_column(new_table, col_participant_nodeid)) return false; + if (!define_table_add_column(new_table, col_participant_nodeid)) + return false; } { @@ -140,7 +141,7 @@ bool Ndb_schema_result_table::define_table_ndb(NdbDictionary::Table &new_table, if (!define_table_add_column(new_table, col_message)) return false; } - (void)mysql_version; // Only one version can be created + (void)mysql_version; // Only one version can be created return true; } @@ -160,11 +161,9 @@ std::string Ndb_schema_result_table::define_table_dd() const { return ss.str(); } -bool Ndb_schema_result_table::drop_events_in_NDB() const -{ +bool Ndb_schema_result_table::drop_events_in_NDB() const { // Drop the default event - if (!drop_event_in_NDB("REPL$mysql/ndb_schema_result")) - return false; + if (!drop_event_in_NDB("REPL$mysql/ndb_schema_result")) return false; return true; } diff --git a/storage/ndb/plugin/ndb_schema_result_table.h b/storage/ndb/plugin/ndb_schema_result_table.h index b6a6c722823c..436c0fbfe0ff 100644 --- a/storage/ndb/plugin/ndb_schema_result_table.h +++ b/storage/ndb/plugin/ndb_schema_result_table.h @@ -33,23 +33,24 @@ // RAII style class for working with the schema result table in NDB class Ndb_schema_result_table : public Ndb_util_table { Ndb_schema_result_table() = delete; - Ndb_schema_result_table(const Ndb_schema_result_table&) = delete; + Ndb_schema_result_table(const Ndb_schema_result_table &) = delete; bool define_table_ndb(NdbDictionary::Table &table, unsigned mysql_version) const override; bool drop_events_in_NDB() const override; + public: static const std::string DB_NAME; static const std::string TABLE_NAME; - static const char* COL_NODEID; - static const char* COL_SCHEMA_OP_ID; - static const char* COL_PARTICIPANT_NODEID; - static const char* COL_RESULT; - static const char* COL_MESSAGE; + static const char *COL_NODEID; + static const char *COL_SCHEMA_OP_ID; + static const char *COL_PARTICIPANT_NODEID; + static const char *COL_RESULT; + static const char *COL_MESSAGE; - Ndb_schema_result_table(class Thd_ndb*); + Ndb_schema_result_table(class Thd_ndb *); virtual ~Ndb_schema_result_table(); bool check_schema() const override; diff --git a/storage/ndb/plugin/ndb_server_hooks.cc b/storage/ndb/plugin/ndb_server_hooks.cc index 2af9d2aad6d0..c0fa64e64395 100644 --- a/storage/ndb/plugin/ndb_server_hooks.cc +++ b/storage/ndb/plugin/ndb_server_hooks.cc @@ -31,9 +31,7 @@ #include "storage/ndb/plugin/ndb_log.h" #include "storage/ndb/plugin/ndb_plugin_reference.h" - -bool Ndb_server_hooks::register_server_started(hook_t* hook_func) -{ +bool Ndb_server_hooks::register_server_started(hook_t *hook_func) { // Only allow one server_started hook to be installed DBUG_ASSERT(!m_server_state_observer); @@ -41,27 +39,24 @@ bool Ndb_server_hooks::register_server_started(hook_t* hook_func) // Resolve pointer to the ndbcluster plugin, it may // not resolve in case plugin has failed to init() - if (!ndbcluster_plugin.lock()) - return false; + if (!ndbcluster_plugin.lock()) return false; - m_server_state_observer = - new Server_state_observer { - sizeof(Server_state_observer), + m_server_state_observer = new Server_state_observer{ + sizeof(Server_state_observer), - // before clients are allowed to connect - (before_handle_connection_t)hook_func, - NULL, // before recovery - NULL, // after engine recovery - NULL, // after recovery - NULL, // before shutdown - NULL, // after shutdown - }; + // before clients are allowed to connect + (before_handle_connection_t)hook_func, + NULL, // before recovery + NULL, // after engine recovery + NULL, // after recovery + NULL, // before shutdown + NULL, // after shutdown + }; // Install server state observer to be called // before the server allows incoming connections if (register_server_state_observer(m_server_state_observer, - ndbcluster_plugin.handle())) - { + ndbcluster_plugin.handle())) { ndb_log_error("Failed to register server state observer"); return false; } @@ -69,21 +64,17 @@ bool Ndb_server_hooks::register_server_started(hook_t* hook_func) return true; } - -bool Ndb_server_hooks::register_applier_start(hook_t* hook_func) -{ +bool Ndb_server_hooks::register_applier_start(hook_t *hook_func) { // Only allow one applier_start hook to be installed DBUG_ASSERT(!m_binlog_relay_io_observer); Ndb_plugin_reference ndbcluster_plugin; // Resolve pointer to the ndbcluster plugin - if (!ndbcluster_plugin.lock()) - return false; + if (!ndbcluster_plugin.lock()) return false; - m_binlog_relay_io_observer= - new Binlog_relay_IO_observer { - sizeof(Binlog_relay_IO_observer), + m_binlog_relay_io_observer = new Binlog_relay_IO_observer{ + sizeof(Binlog_relay_IO_observer), NULL, // thread_start NULL, // thread_stop @@ -94,13 +85,11 @@ bool Ndb_server_hooks::register_applier_start(hook_t* hook_func) NULL, // after_queue_event NULL, // after_reset NULL // applier_log_event - }; - + }; // Install replication observer to be called when applier thread start if (register_binlog_relay_io_observer(m_binlog_relay_io_observer, - ndbcluster_plugin.handle())) - { + ndbcluster_plugin.handle())) { ndb_log_error("Failed to register binlog relay io observer"); return false; } @@ -108,20 +97,14 @@ bool Ndb_server_hooks::register_applier_start(hook_t* hook_func) return true; } - -void Ndb_server_hooks::unregister_all(void) -{ +void Ndb_server_hooks::unregister_all(void) { if (m_server_state_observer) - unregister_server_state_observer(m_server_state_observer, - nullptr); + unregister_server_state_observer(m_server_state_observer, nullptr); if (m_binlog_relay_io_observer) - unregister_binlog_relay_io_observer(m_binlog_relay_io_observer, - nullptr); + unregister_binlog_relay_io_observer(m_binlog_relay_io_observer, nullptr); } - -Ndb_server_hooks::~Ndb_server_hooks() -{ +Ndb_server_hooks::~Ndb_server_hooks() { delete m_server_state_observer; delete m_binlog_relay_io_observer; } diff --git a/storage/ndb/plugin/ndb_server_hooks.h b/storage/ndb/plugin/ndb_server_hooks.h index b090bccc67b9..18aae110bbdb 100644 --- a/storage/ndb/plugin/ndb_server_hooks.h +++ b/storage/ndb/plugin/ndb_server_hooks.h @@ -25,19 +25,17 @@ #ifndef NDB_SERVER_HOOKS_H #define NDB_SERVER_HOOKS_H +class Ndb_server_hooks { + using hook_t = int(void *); -class Ndb_server_hooks -{ - using hook_t = int (void*); + struct Server_state_observer *m_server_state_observer = nullptr; + struct Binlog_relay_IO_observer *m_binlog_relay_io_observer = nullptr; - struct Server_state_observer* m_server_state_observer = nullptr; - struct Binlog_relay_IO_observer* m_binlog_relay_io_observer = nullptr; - -public: + public: ~Ndb_server_hooks(); - bool register_server_started(hook_t*); - bool register_applier_start(hook_t*); + bool register_server_started(hook_t *); + bool register_applier_start(hook_t *); void unregister_all(void); }; diff --git a/storage/ndb/plugin/ndb_share.cc b/storage/ndb/plugin/ndb_share.cc index cd9b5fb654b6..2059d07d812b 100644 --- a/storage/ndb/plugin/ndb_share.cc +++ b/storage/ndb/plugin/ndb_share.cc @@ -40,47 +40,43 @@ #include "storage/ndb/plugin/ndb_require.h" #include "storage/ndb/plugin/ndb_table_map.h" -extern Ndb* g_ndb; +extern Ndb *g_ndb; extern mysql_mutex_t ndbcluster_mutex; // List of NDB_SHARE's which correspond to an open table. std::unique_ptr> - ndbcluster_open_tables; + ndbcluster_open_tables; // List of NDB_SHARE's which have been dropped, they are kept in this list // until all references to them have been released. -static std::unordered_set dropped_shares; +static std::unordered_set dropped_shares; -NDB_SHARE* -NDB_SHARE::create(const char* key) -{ - if (DBUG_EVALUATE_IF("ndb_share_create_fail1", true, false)) - { +NDB_SHARE *NDB_SHARE::create(const char *key) { + if (DBUG_EVALUATE_IF("ndb_share_create_fail1", true, false)) { // Simulate failure to create NDB_SHARE return nullptr; } - NDB_SHARE* share; - if (!(share= (NDB_SHARE*) my_malloc(PSI_INSTRUMENT_ME, - sizeof(*share), - MYF(MY_WME | MY_ZEROFILL)))) + NDB_SHARE *share; + if (!(share = (NDB_SHARE *)my_malloc(PSI_INSTRUMENT_ME, sizeof(*share), + MYF(MY_WME | MY_ZEROFILL)))) return nullptr; - share->flags= 0; - share->state= NSS_INITIAL; + share->flags = 0; + share->state = NSS_INITIAL; /* Allocates enough space for key, db, and table_name */ - share->key= NDB_SHARE::create_key(key); + share->key = NDB_SHARE::create_key(key); - share->db= NDB_SHARE::key_get_db_name(share->key); - share->table_name= NDB_SHARE::key_get_table_name(share->key); + share->db = NDB_SHARE::key_get_db_name(share->key); + share->table_name = NDB_SHARE::key_get_table_name(share->key); thr_lock_init(&share->lock); mysql_mutex_init(PSI_INSTRUMENT_ME, &share->mutex, MY_MUTEX_INIT_FAST); - share->m_cfn_share= nullptr; + share->m_cfn_share = nullptr; - share->op= 0; + share->op = 0; #ifndef DBUG_OFF DBUG_ASSERT(share->m_use_count == 0); @@ -92,9 +88,7 @@ NDB_SHARE::create(const char* key) return share; } -void -NDB_SHARE::destroy(NDB_SHARE* share) -{ +void NDB_SHARE::destroy(NDB_SHARE *share) { thr_lock_delete(&share->lock); mysql_mutex_destroy(&share->mutex); @@ -133,9 +127,7 @@ struct NDB_SHARE_KEY { char m_buffer[1]; }; -NDB_SHARE_KEY* -NDB_SHARE::create_key(const char *new_key) -{ +NDB_SHARE_KEY *NDB_SHARE::create_key(const char *new_key) { const size_t new_key_length = strlen(new_key); char db_name_buf[FN_HEADLEN]; @@ -147,21 +139,16 @@ NDB_SHARE::create_key(const char *new_key) const size_t table_name_len = strlen(table_name_buf); // Calculate total size needed for the variable length strings - const size_t size= - sizeof(NDB_SHARE_KEY) + - new_key_length + - db_name_len + 1 + - table_name_len + 1; + const size_t size = sizeof(NDB_SHARE_KEY) + new_key_length + db_name_len + 1 + + table_name_len + 1; - NDB_SHARE_KEY* allocated_key= - (NDB_SHARE_KEY*) my_malloc(PSI_INSTRUMENT_ME, - size, - MYF(MY_WME | ME_FATALERROR)); + NDB_SHARE_KEY *allocated_key = (NDB_SHARE_KEY *)my_malloc( + PSI_INSTRUMENT_ME, size, MYF(MY_WME | ME_FATALERROR)); allocated_key->m_key_length = new_key_length; // Copy key into the buffer - char* buf_ptr = allocated_key->m_buffer; + char *buf_ptr = allocated_key->m_buffer; my_stpcpy(buf_ptr, new_key); buf_ptr += new_key_length + 1; @@ -174,89 +161,68 @@ NDB_SHARE::create_key(const char *new_key) buf_ptr += table_name_len; // Check that writing has not occurred beyond end of allocated memory - assert(buf_ptr < reinterpret_cast(allocated_key) + size); + assert(buf_ptr < reinterpret_cast(allocated_key) + size); DBUG_PRINT("info", ("size: %lu", (unsigned long)size)); - DBUG_PRINT("info", ("new_key: '%s', %lu", - new_key, (unsigned long)new_key_length)); - DBUG_PRINT("info", ("db_name: '%s', %lu", - db_name_buf, (unsigned long)db_name_len)); + DBUG_PRINT("info", + ("new_key: '%s', %lu", new_key, (unsigned long)new_key_length)); + DBUG_PRINT("info", + ("db_name: '%s', %lu", db_name_buf, (unsigned long)db_name_len)); DBUG_PRINT("info", ("table_name: '%s', %lu", table_name_buf, (unsigned long)table_name_len)); - DBUG_DUMP("NDB_SHARE_KEY: ", (const uchar*)allocated_key->m_buffer, size); + DBUG_DUMP("NDB_SHARE_KEY: ", (const uchar *)allocated_key->m_buffer, size); return allocated_key; } +void NDB_SHARE::free_key(NDB_SHARE_KEY *key) { my_free(key); } -void NDB_SHARE::free_key(NDB_SHARE_KEY* key) -{ - my_free(key); -} - - -std::string NDB_SHARE::key_get_key(NDB_SHARE_KEY* key) -{ +std::string NDB_SHARE::key_get_key(NDB_SHARE_KEY *key) { assert(key->m_key_length == strlen(key->m_buffer)); return key->m_buffer; } - -char* NDB_SHARE::key_get_db_name(NDB_SHARE_KEY* key) -{ - char* buf_ptr = key->m_buffer; +char *NDB_SHARE::key_get_db_name(NDB_SHARE_KEY *key) { + char *buf_ptr = key->m_buffer; // Step past the key string and it's zero terminator buf_ptr += key->m_key_length + 1; return buf_ptr; } - -char* NDB_SHARE::key_get_table_name(NDB_SHARE_KEY* key) -{ - char* buf_ptr = key_get_db_name(key); +char *NDB_SHARE::key_get_table_name(NDB_SHARE_KEY *key) { + char *buf_ptr = key_get_db_name(key); const size_t db_name_len = strlen(buf_ptr); // Step past the db name string and it's zero terminator buf_ptr += db_name_len + 1; return buf_ptr; } - -size_t NDB_SHARE::key_length() const -{ +size_t NDB_SHARE::key_length() const { assert(key->m_key_length == strlen(key->m_buffer)); return key->m_key_length; } - -const char* NDB_SHARE::key_string() const -{ +const char *NDB_SHARE::key_string() const { assert(strlen(key->m_buffer) == key->m_key_length); return key->m_buffer; } - -const char* -NDB_SHARE::share_state_string(void) const -{ - switch(state) { - case NSS_INITIAL: - return "NSS_INITIAL"; - case NSS_DROPPED: - return "NSS_DROPPED"; +const char *NDB_SHARE::share_state_string(void) const { + switch (state) { + case NSS_INITIAL: + return "NSS_INITIAL"; + case NSS_DROPPED: + return "NSS_DROPPED"; } assert(false); return ""; } - -void -NDB_SHARE::free_share(NDB_SHARE **share) -{ +void NDB_SHARE::free_share(NDB_SHARE **share) { DBUG_ENTER("NDB_SHARE::free_share"); mysql_mutex_assert_owner(&ndbcluster_mutex); - if (!(*share)->decrement_use_count()) - { + if (!(*share)->decrement_use_count()) { // Noone is using the NDB_SHARE anymore, release it NDB_SHARE::real_free_share(share); } @@ -264,11 +230,8 @@ NDB_SHARE::free_share(NDB_SHARE **share) DBUG_VOID_RETURN; } - -NDB_SHARE* -NDB_SHARE::create_and_acquire_reference(const char *key, - const char *reference) -{ +NDB_SHARE *NDB_SHARE::create_and_acquire_reference(const char *key, + const char *reference) { DBUG_ENTER("create_and_acquire_reference"); DBUG_PRINT("enter", ("key: '%s'", key)); @@ -277,9 +240,8 @@ NDB_SHARE::create_and_acquire_reference(const char *key, // Make sure that the SHARE does not already exist DBUG_ASSERT(!acquire_reference_impl(key)); - NDB_SHARE* share = NDB_SHARE::create(key); - if (share == nullptr) - { + NDB_SHARE *share = NDB_SHARE::create(key); + if (share == nullptr) { DBUG_PRINT("error", ("failed to create NDB_SHARE")); DBUG_RETURN(nullptr); } @@ -298,33 +260,26 @@ NDB_SHARE::create_and_acquire_reference(const char *key, DBUG_RETURN(share); } - -NDB_SHARE* -NDB_SHARE::acquire_for_handler(const char* key, - const class ha_ndbcluster* reference) -{ +NDB_SHARE *NDB_SHARE::acquire_for_handler( + const char *key, const class ha_ndbcluster *reference) { DBUG_ENTER("acquire_for_handler"); mysql_mutex_lock(&ndbcluster_mutex); - NDB_SHARE* share = acquire_reference_impl(key); - if (share) - { + NDB_SHARE *share = acquire_reference_impl(key); + if (share) { share->refs_insert(reference); - DBUG_PRINT("NDB_SHARE", ("'%s' reference: 'ha_ndbcluster(%p)', " - "use_count: %u", - share->key_string(), reference, - share->use_count())); + DBUG_PRINT("NDB_SHARE", + ("'%s' reference: 'ha_ndbcluster(%p)', " + "use_count: %u", + share->key_string(), reference, share->use_count())); } mysql_mutex_unlock(&ndbcluster_mutex); DBUG_RETURN(share); } - -void -NDB_SHARE::release_for_handler(NDB_SHARE* share, - const ha_ndbcluster* reference) -{ +void NDB_SHARE::release_for_handler(NDB_SHARE *share, + const ha_ndbcluster *reference) { DBUG_ENTER("release_for_handler"); mysql_mutex_lock(&ndbcluster_mutex); @@ -340,15 +295,12 @@ NDB_SHARE::release_for_handler(NDB_SHARE* share, DBUG_VOID_RETURN; } - /* Acquire another reference using existing share reference. */ -NDB_SHARE* -NDB_SHARE::acquire_reference_on_existing(NDB_SHARE *share, - const char* reference) -{ +NDB_SHARE *NDB_SHARE::acquire_reference_on_existing(NDB_SHARE *share, + const char *reference) { mysql_mutex_lock(&ndbcluster_mutex); // Should already be referenced @@ -366,53 +318,42 @@ NDB_SHARE::acquire_reference_on_existing(NDB_SHARE *share, return share; } - /* Acquire reference using key. */ -NDB_SHARE* -NDB_SHARE::acquire_reference_by_key(const char* key, - const char* reference) -{ +NDB_SHARE *NDB_SHARE::acquire_reference_by_key(const char *key, + const char *reference) { mysql_mutex_lock(&ndbcluster_mutex); - NDB_SHARE* share = acquire_reference_impl(key); - if (share) - { + NDB_SHARE *share = acquire_reference_impl(key); + if (share) { share->refs_insert(reference); - DBUG_PRINT("NDB_SHARE", ("'%s', reference: '%s', use_count: %u", - share->key_string(), reference, - share->use_count())); + DBUG_PRINT("NDB_SHARE", + ("'%s', reference: '%s', use_count: %u", share->key_string(), + reference, share->use_count())); } mysql_mutex_unlock(&ndbcluster_mutex); return share; } - -NDB_SHARE* -NDB_SHARE::acquire_reference_by_key_have_lock(const char* key, - const char* reference) -{ +NDB_SHARE *NDB_SHARE::acquire_reference_by_key_have_lock( + const char *key, const char *reference) { mysql_mutex_assert_owner(&ndbcluster_mutex); - NDB_SHARE* share = acquire_reference_impl(key); - if (share) - { + NDB_SHARE *share = acquire_reference_impl(key); + if (share) { share->refs_insert(reference); - DBUG_PRINT("NDB_SHARE", ("'%s', reference: '%s', use_count: %u", - share->key_string(), reference, - share->use_count())); + DBUG_PRINT("NDB_SHARE", + ("'%s', reference: '%s', use_count: %u", share->key_string(), + reference, share->use_count())); } return share; } -void -NDB_SHARE::release_reference(NDB_SHARE* share, - const char* reference) -{ +void NDB_SHARE::release_reference(NDB_SHARE *share, const char *reference) { mysql_mutex_lock(&ndbcluster_mutex); DBUG_PRINT("NDB_SHARE", ("release '%s', reference: '%s', use_count: %u", @@ -424,11 +365,8 @@ NDB_SHARE::release_reference(NDB_SHARE* share, mysql_mutex_unlock(&ndbcluster_mutex); } - -void -NDB_SHARE::release_reference_have_lock(NDB_SHARE* share, - const char* reference) -{ +void NDB_SHARE::release_reference_have_lock(NDB_SHARE *share, + const char *reference) { mysql_mutex_assert_owner(&ndbcluster_mutex); DBUG_PRINT("NDB_SHARE", ("release '%s', reference: '%s', use_count: %u", @@ -438,20 +376,17 @@ NDB_SHARE::release_reference_have_lock(NDB_SHARE* share, NDB_SHARE::free_share(&share); } - #ifndef DBUG_OFF -bool -NDB_SHARE::Ndb_share_references::check_empty() const -{ - if (size() == 0) - { +bool NDB_SHARE::Ndb_share_references::check_empty() const { + if (size() == 0) { // There are no references, all OK return true; } - ndb_log_error("Consistency check of NDB_SHARE references failed, the list " - "of references should be empty at this time"); + ndb_log_error( + "Consistency check of NDB_SHARE references failed, the list " + "of references should be empty at this time"); std::string s; debug_print(s); @@ -460,18 +395,15 @@ NDB_SHARE::Ndb_share_references::check_empty() const return false; } -void -NDB_SHARE::Ndb_share_references::debug_print(std::string& out, - const char* line_separator) const -{ +void NDB_SHARE::Ndb_share_references::debug_print( + std::string &out, const char *line_separator) const { std::stringstream ss; // Print the handler list { - const char* separator = ""; + const char *separator = ""; ss << " handlers: " << handlers.size() << " [ "; - for (const auto& key : handlers) - { + for (const auto &key : handlers) { ss << separator << "'" << key << "'"; separator = ","; } @@ -481,10 +413,9 @@ NDB_SHARE::Ndb_share_references::debug_print(std::string& out, // Print the strings list { - const char* separator = ""; + const char *separator = ""; ss << " strings: " << strings.size() << " [ "; - for (const auto& key : strings) - { + for (const auto &key : strings) { ss << separator << "'" << key.c_str() << "'"; separator = ","; } @@ -493,19 +424,16 @@ NDB_SHARE::Ndb_share_references::debug_print(std::string& out, ss << ", " << line_separator; out = ss.str(); - } #endif -void -NDB_SHARE::debug_print(std::string& out, const char* line_separator) const -{ +void NDB_SHARE::debug_print(std::string &out, + const char *line_separator) const { std::stringstream ss; - ss << "NDB_SHARE { " << line_separator - << " db: '" << db << "'," << line_separator - << " table_name: '" << table_name << "', " << line_separator - << " key: '" << key_string() << "', " << line_separator + ss << "NDB_SHARE { " << line_separator << " db: '" << db << "'," + << line_separator << " table_name: '" << table_name << "', " + << line_separator << " key: '" << key_string() << "', " << line_separator << " use_count: " << use_count() << ", " << line_separator << " state: " << share_state_string() << ", " << line_separator << " op: " << op << ", " << line_separator; @@ -524,22 +452,20 @@ NDB_SHARE::debug_print(std::string& out, const char* line_separator) const out = ss.str(); } - -void -NDB_SHARE::debug_print_shares(std::string& out) -{ +void NDB_SHARE::debug_print_shares(std::string &out) { std::stringstream ss; - ss << "ndbcluster_open_tables {" << "\n"; + ss << "ndbcluster_open_tables {" + << "\n"; - for (const auto &key_and_value : *ndbcluster_open_tables) - { - const NDB_SHARE* share = key_and_value.second; + for (const auto &key_and_value : *ndbcluster_open_tables) { + const NDB_SHARE *share = key_and_value.second; std::string s; share->debug_print(s, "\n"); - ss << s << "\n"; + ss << s << "\n"; } - ss << "}" << "\n"; + ss << "}" + << "\n"; out = ss.str(); } @@ -549,12 +475,9 @@ uint NDB_SHARE::decrement_use_count() { return --m_use_count; } -void -NDB_SHARE::print_remaining_open_tables(void) -{ +void NDB_SHARE::print_remaining_open_tables(void) { mysql_mutex_lock(&ndbcluster_mutex); - if (!ndbcluster_open_tables->empty()) - { + if (!ndbcluster_open_tables->empty()) { std::string s; NDB_SHARE::debug_print_shares(s); std::cerr << s << std::endl; @@ -562,26 +485,22 @@ NDB_SHARE::print_remaining_open_tables(void) mysql_mutex_unlock(&ndbcluster_mutex); } - -int -NDB_SHARE::rename_share(NDB_SHARE *share, NDB_SHARE_KEY* new_key) -{ +int NDB_SHARE::rename_share(NDB_SHARE *share, NDB_SHARE_KEY *new_key) { DBUG_ENTER("NDB_SHARE::rename_share"); DBUG_PRINT("enter", ("share->key: '%s'", share->key_string())); - DBUG_PRINT("enter", ("new_key: '%s'", - NDB_SHARE::key_get_key(new_key).c_str())); + DBUG_PRINT("enter", + ("new_key: '%s'", NDB_SHARE::key_get_key(new_key).c_str())); mysql_mutex_lock(&ndbcluster_mutex); // Make sure that no NDB_SHARE with new_key already exists if (find_or_nullptr(*ndbcluster_open_tables, - NDB_SHARE::key_get_key(new_key))) - { + NDB_SHARE::key_get_key(new_key))) { // Dump the list of open NDB_SHARE's since new_key already exists - ndb_log_error("INTERNAL ERROR: Found existing NDB_SHARE for " - "new key: '%s' while renaming: '%s'", - NDB_SHARE::key_get_key(new_key).c_str(), - share->key_string()); + ndb_log_error( + "INTERNAL ERROR: Found existing NDB_SHARE for " + "new key: '%s' while renaming: '%s'", + NDB_SHARE::key_get_key(new_key).c_str(), share->key_string()); std::string s; NDB_SHARE::debug_print_shares(s); std::cerr << s << std::endl; @@ -589,8 +508,8 @@ NDB_SHARE::rename_share(NDB_SHARE *share, NDB_SHARE_KEY* new_key) } /* Update the share hash key. */ - NDB_SHARE_KEY *old_key= share->key; - share->key= new_key; + NDB_SHARE_KEY *old_key = share->key; + share->key = new_key; ndbcluster_open_tables->erase(NDB_SHARE::key_get_key(old_key)); ndbcluster_open_tables->emplace(NDB_SHARE::key_get_key(new_key), share); @@ -602,17 +521,14 @@ NDB_SHARE::rename_share(NDB_SHARE *share, NDB_SHARE_KEY* new_key) NDB_SHARE::key_get_key(new_key))); DBUG_PRINT("info", ("setting db and table_name to point at new key")); - share->db= NDB_SHARE::key_get_db_name(share->key); - share->table_name= NDB_SHARE::key_get_table_name(share->key); + share->db = NDB_SHARE::key_get_db_name(share->key); + share->table_name = NDB_SHARE::key_get_table_name(share->key); - if (share->op) - { + if (share->op) { Ndb_event_data *event_data = - static_cast(share->op->getCustomData()); - if (event_data && event_data->shadow_table) - { - if (!ndb_name_is_temp(share->table_name)) - { + static_cast(share->op->getCustomData()); + if (event_data && event_data->shadow_table) { + if (!ndb_name_is_temp(share->table_name)) { DBUG_PRINT("info", ("Renaming shadow table")); // Allocate new strings for db and table_name for shadow_table // in event_data's MEM_ROOT(where the shadow_table itself is allocated) @@ -620,16 +536,12 @@ NDB_SHARE::rename_share(NDB_SHARE *share, NDB_SHARE_KEY* new_key) // strings are not release until the mem_root is eventually // released. lex_string_strmake(&event_data->mem_root, - &event_data->shadow_table->s->db, - share->db, + &event_data->shadow_table->s->db, share->db, strlen(share->db)); lex_string_strmake(&event_data->mem_root, &event_data->shadow_table->s->table_name, - share->table_name, - strlen(share->table_name)); - } - else - { + share->table_name, strlen(share->table_name)); + } else { DBUG_PRINT("info", ("Name is temporary, skip rename of shadow table")); // don't rename the shadow table here, it's used by injector and all // events might not have been processed. It will be dropped anyway @@ -640,7 +552,6 @@ NDB_SHARE::rename_share(NDB_SHARE *share, NDB_SHARE_KEY* new_key) DBUG_RETURN(0); } - /** Acquire NDB_SHARE for key @@ -649,17 +560,14 @@ NDB_SHARE::rename_share(NDB_SHARE *share, NDB_SHARE_KEY* new_key) @param key The key for NDB_SHARE to acquire */ -NDB_SHARE * -NDB_SHARE::acquire_reference_impl(const char *key) -{ +NDB_SHARE *NDB_SHARE::acquire_reference_impl(const char *key) { DBUG_ENTER("NDB_SHARE::acquire_reference_impl"); DBUG_PRINT("enter", ("key: '%s'", key)); mysql_mutex_assert_owner(&ndbcluster_mutex); - auto it= ndbcluster_open_tables->find(key); - if (it == ndbcluster_open_tables->end()) - { + auto it = ndbcluster_open_tables->find(key); + if (it == ndbcluster_open_tables->end()) { DBUG_PRINT("error", ("%s does not exist", key)); DBUG_RETURN(nullptr); } @@ -672,19 +580,13 @@ NDB_SHARE::acquire_reference_impl(const char *key) DBUG_RETURN(share); } - -void -NDB_SHARE::initialize(CHARSET_INFO* charset) -{ - ndbcluster_open_tables.reset - (new collation_unordered_map - (charset, PSI_INSTRUMENT_ME)); +void NDB_SHARE::initialize(CHARSET_INFO *charset) { + ndbcluster_open_tables.reset( + new collation_unordered_map(charset, + PSI_INSTRUMENT_ME)); } - -void -NDB_SHARE::deinitialize(void) -{ +void NDB_SHARE::deinitialize(void) { mysql_mutex_lock(&ndbcluster_mutex); // There should not be any NDB_SHARE's left -> crash after logging in debug @@ -721,14 +623,10 @@ NDB_SHARE::deinitialize(void) mysql_mutex_unlock(&ndbcluster_mutex); } - -void -NDB_SHARE::release_extra_share_references(void) -{ +void NDB_SHARE::release_extra_share_references(void) { mysql_mutex_lock(&ndbcluster_mutex); - while (!ndbcluster_open_tables->empty()) - { - NDB_SHARE * share = ndbcluster_open_tables->begin()->second; + while (!ndbcluster_open_tables->empty()) { + NDB_SHARE *share = ndbcluster_open_tables->begin()->second; /* The share kept by the server has not been freed, free it Will also take it out of _open_tables list @@ -741,7 +639,7 @@ NDB_SHARE::release_extra_share_references(void) } void NDB_SHARE::real_free_share(NDB_SHARE **share_ptr) { - NDB_SHARE* share = *share_ptr; + NDB_SHARE *share = *share_ptr; DBUG_TRACE; mysql_mutex_assert_owner(&ndbcluster_mutex); @@ -760,10 +658,10 @@ void NDB_SHARE::real_free_share(NDB_SHARE **share_ptr) { NDB_SHARE::destroy(share); } -extern void ndb_index_stat_free(NDB_SHARE*); +extern void ndb_index_stat_free(NDB_SHARE *); void NDB_SHARE::mark_share_dropped(NDB_SHARE **share_ptr) { - NDB_SHARE* share = *share_ptr; + NDB_SHARE *share = *share_ptr; DBUG_TRACE; mysql_mutex_assert_owner(&ndbcluster_mutex); @@ -814,23 +712,18 @@ void NDB_SHARE::mark_share_dropped(NDB_SHARE **share_ptr) { } #ifndef DBUG_OFF -void -NDB_SHARE::dbg_check_shares_update() -{ +void NDB_SHARE::dbg_check_shares_update() { ndb_log_info("dbug_check_shares open:"); - for (const auto &key_and_value : *ndbcluster_open_tables) - { - const NDB_SHARE *share= key_and_value.second; - ndb_log_info(" %s.%s: state: %s(%u) use_count: %u", - share->db, share->table_name, - share->share_state_string(), - (unsigned)share->state, - share->use_count()); + for (const auto &key_and_value : *ndbcluster_open_tables) { + const NDB_SHARE *share = key_and_value.second; + ndb_log_info(" %s.%s: state: %s(%u) use_count: %u", share->db, + share->table_name, share->share_state_string(), + (unsigned)share->state, share->use_count()); assert(share->state != NSS_DROPPED); } ndb_log_info("dbug_check_shares dropped:"); - for (const NDB_SHARE * share: dropped_shares) { + for (const NDB_SHARE *share : dropped_shares) { ndb_log_info(" %s.%s: state: %s(%u) use_count: %u", share->db, share->table_name, share->share_state_string(), (unsigned)share->state, share->use_count()); @@ -840,9 +733,8 @@ NDB_SHARE::dbg_check_shares_update() /** * Only shares in mysql database may be open... */ - for (const auto &key_and_value : *ndbcluster_open_tables) - { - const NDB_SHARE *share= key_and_value.second; + for (const auto &key_and_value : *ndbcluster_open_tables) { + const NDB_SHARE *share = key_and_value.second; assert(strcmp(share->db, "mysql") == 0); } diff --git a/storage/ndb/plugin/ndb_share.h b/storage/ndb/plugin/ndb_share.h index c8556e4c5076..928c9471212e 100644 --- a/storage/ndb/plugin/ndb_share.h +++ b/storage/ndb/plugin/ndb_share.h @@ -30,24 +30,22 @@ #include #endif -#include "my_alloc.h" // MEM_ROOT -#include "storage/ndb/include/ndbapi/Ndb.hpp" // Ndb::TupleIdRange -#include "thr_lock.h" // THR_LOCK - -enum Ndb_binlog_type -{ - NBT_DEFAULT = 0 - ,NBT_NO_LOGGING = 1 - ,NBT_UPDATED_ONLY = 2 - ,NBT_FULL = 3 - ,NBT_USE_UPDATE = 4 - ,NBT_UPDATED_ONLY_USE_UPDATE = 6 - ,NBT_FULL_USE_UPDATE = 7 - ,NBT_UPDATED_ONLY_MINIMAL = 8 - ,NBT_UPDATED_FULL_MINIMAL = 9 +#include "my_alloc.h" // MEM_ROOT +#include "storage/ndb/include/ndbapi/Ndb.hpp" // Ndb::TupleIdRange +#include "thr_lock.h" // THR_LOCK + +enum Ndb_binlog_type { + NBT_DEFAULT = 0, + NBT_NO_LOGGING = 1, + NBT_UPDATED_ONLY = 2, + NBT_FULL = 3, + NBT_USE_UPDATE = 4, + NBT_UPDATED_ONLY_USE_UPDATE = 6, + NBT_FULL_USE_UPDATE = 7, + NBT_UPDATED_ONLY_MINIMAL = 8, + NBT_UPDATED_FULL_MINIMAL = 9 }; - /* Stats that can be retrieved from ndb */ @@ -55,19 +53,18 @@ struct Ndb_statistics { Uint64 row_count; ulong row_size; Uint64 fragment_memory; - Uint64 fragment_extent_space; + Uint64 fragment_extent_space; Uint64 fragment_extent_free_space; }; - struct NDB_SHARE { THR_LOCK lock; mutable mysql_mutex_t mutex; - struct NDB_SHARE_KEY* key; + struct NDB_SHARE_KEY *key; char *db; char *table_name; -private: + private: /* The current TupleIdRange for the table is stored in NDB_SHARE in order for the auto_increment value of a table to be consecutive between @@ -82,87 +79,74 @@ struct NDB_SHARE { */ Ndb::TupleIdRange tuple_id_range; -public: + public: // RAII style class for accessing tuple_id_range class Tuple_id_range_guard { - NDB_SHARE * m_share; - public: - Ndb::TupleIdRange& range; - - Tuple_id_range_guard(NDB_SHARE* share) : - m_share(share), - range(share->tuple_id_range) - { + NDB_SHARE *m_share; + + public: + Ndb::TupleIdRange ⦥ + + Tuple_id_range_guard(NDB_SHARE *share) + : m_share(share), range(share->tuple_id_range) { mysql_mutex_lock(&m_share->mutex); } - ~Tuple_id_range_guard() - { - mysql_mutex_unlock(&m_share->mutex); - } + ~Tuple_id_range_guard() { mysql_mutex_unlock(&m_share->mutex); } }; // Reset the tuple_id_range - void reset_tuple_id_range() - { + void reset_tuple_id_range() { Tuple_id_range_guard g(this); g.range.reset(); } struct Ndb_statistics stat; - struct Ndb_index_stat* index_stat_list; + struct Ndb_index_stat *index_stat_list; -private: + private: enum Ndb_share_flags : uint { // Flag describing binlogging ON or OFF // - table should not be binlogged - FLAG_NO_BINLOG = 1UL << 2, + FLAG_NO_BINLOG = 1UL << 2, // Flags describing the binlog mode // - table should be binlogged with full rows - FLAG_BINLOG_MODE_FULL = 1UL << 3, + FLAG_BINLOG_MODE_FULL = 1UL << 3, // - table update should be binlogged using update log event - FLAG_BINLOG_MODE_USE_UPDATE = 1UL << 4, + FLAG_BINLOG_MODE_USE_UPDATE = 1UL << 4, // - table update should be binlogged using minimal format: // i.e before(primary key(s)):after(changed column(s)) - FLAG_BINLOG_MODE_MINIMAL_UPDATE = 1UL << 5, + FLAG_BINLOG_MODE_MINIMAL_UPDATE = 1UL << 5, // Flag describing if table have event // NOTE! The decision wheter or not a table have event is decided // only once by Ndb_binlog_client::table_should_have_event() - FLAG_TABLE_HAVE_EVENT = 1UL << 6, + FLAG_TABLE_HAVE_EVENT = 1UL << 6, }; uint flags; -public: - bool get_binlog_nologging() const - { + + public: + bool get_binlog_nologging() const { return flags & NDB_SHARE::FLAG_NO_BINLOG; } - bool get_binlog_full() const - { + bool get_binlog_full() const { return flags & NDB_SHARE::FLAG_BINLOG_MODE_FULL; } - bool get_binlog_use_update() const - { + bool get_binlog_use_update() const { return flags & NDB_SHARE::FLAG_BINLOG_MODE_USE_UPDATE; } - bool get_binlog_update_minimal() const - { + bool get_binlog_update_minimal() const { return flags & NDB_SHARE::FLAG_BINLOG_MODE_MINIMAL_UPDATE; } void set_binlog_flags(Ndb_binlog_type ndb_binlog_type); - void set_have_event() - { - flags |= NDB_SHARE::FLAG_TABLE_HAVE_EVENT; - } - bool get_have_event() const - { + void set_have_event() { flags |= NDB_SHARE::FLAG_TABLE_HAVE_EVENT; } + bool get_have_event() const { return flags & NDB_SHARE::FLAG_TABLE_HAVE_EVENT; } - struct NDB_CONFLICT_FN_SHARE *m_cfn_share; // The event operation used for listening to changes in NDB for this @@ -180,22 +164,21 @@ struct NDB_SHARE { // Raw pointer for passing table definition from schema dist client to // participant in the same node to avoid that participant have to access // the DD to open the table definition. - const void* inplace_alter_new_table_def; + const void *inplace_alter_new_table_def; - static NDB_SHARE* create(const char* key); - static void destroy(NDB_SHARE* share); + static NDB_SHARE *create(const char *key); + static void destroy(NDB_SHARE *share); // Functions for working with the opaque NDB_SHARE_KEY - static struct NDB_SHARE_KEY* create_key(const char *new_key); - static void free_key(struct NDB_SHARE_KEY*); + static struct NDB_SHARE_KEY *create_key(const char *new_key); + static void free_key(struct NDB_SHARE_KEY *); - static std::string key_get_key(struct NDB_SHARE_KEY*); - static char* key_get_db_name(struct NDB_SHARE_KEY*); - static char* key_get_table_name(struct NDB_SHARE_KEY*); + static std::string key_get_key(struct NDB_SHARE_KEY *); + static char *key_get_db_name(struct NDB_SHARE_KEY *); + static char *key_get_table_name(struct NDB_SHARE_KEY *); size_t key_length() const; - const char* key_string() const; - + const char *key_string() const; /** * Note about acquire_reference() / release_reference() functions: @@ -216,25 +199,24 @@ struct NDB_SHARE { */ // Acquire NDB_SHARE reference for use by ha_ndbcluster - static NDB_SHARE* acquire_for_handler(const char* key, - const class ha_ndbcluster* reference); + static NDB_SHARE *acquire_for_handler(const char *key, + const class ha_ndbcluster *reference); // Release NDB_SHARE reference acquired by ha_ndbcluster - static void release_for_handler(NDB_SHARE* share, - const class ha_ndbcluster* reference); + static void release_for_handler(NDB_SHARE *share, + const class ha_ndbcluster *reference); // Create NDB_SHARE and acquire reference - static NDB_SHARE* create_and_acquire_reference(const char* key, - const char* reference); + static NDB_SHARE *create_and_acquire_reference(const char *key, + const char *reference); // Acquire reference by key - static NDB_SHARE* acquire_reference_by_key(const char* key, - const char* reference); - static NDB_SHARE* acquire_reference_by_key_have_lock(const char* key, - const char* reference); + static NDB_SHARE *acquire_reference_by_key(const char *key, + const char *reference); + static NDB_SHARE *acquire_reference_by_key_have_lock(const char *key, + const char *reference); // Acquire reference to existing NDB_SHARE - static NDB_SHARE* acquire_reference_on_existing(NDB_SHARE *share, - const char* reference); + static NDB_SHARE *acquire_reference_on_existing(NDB_SHARE *share, + const char *reference); // Release NDB_SHARE reference - static void release_reference(NDB_SHARE *share, - const char* reference); + static void release_reference(NDB_SHARE *share, const char *reference); static void release_reference_have_lock(NDB_SHARE *share, const char *reference); @@ -244,17 +226,16 @@ struct NDB_SHARE { @param share_ptr Pointer to the NDB_SHARE pointer */ - static void mark_share_dropped(NDB_SHARE** share_ptr); + static void mark_share_dropped(NDB_SHARE **share_ptr); // Rename share, rename in list of tables - static int rename_share(NDB_SHARE *share, - struct NDB_SHARE_KEY* new_key); + static int rename_share(NDB_SHARE *share, struct NDB_SHARE_KEY *new_key); #ifndef DBUG_OFF static void dbg_check_shares_update(void); #endif - static void initialize(CHARSET_INFO* charset); + static void initialize(CHARSET_INFO *charset); static void deinitialize(void); static void release_extra_share_references(void); @@ -262,23 +243,21 @@ struct NDB_SHARE { static void print_remaining_open_tables(); // Debug print the NDB_SHARE to string - void debug_print(std::string& out, const char *line_separator = "") const; -private: + void debug_print(std::string &out, const char *line_separator = "") const; + + private: // Debug print the list of open NDB_SHARE's to string - static void debug_print_shares(std::string& out); + static void debug_print_shares(std::string &out); -private: + private: uint m_use_count; uint increment_use_count() { return ++m_use_count; } uint decrement_use_count(); uint use_count() const { return m_use_count; } - enum { - NSS_INITIAL= 0, - NSS_DROPPED - } state; + enum { NSS_INITIAL = 0, NSS_DROPPED } state; - const char* share_state_string() const; + const char *share_state_string() const; /** @brief Permanently free a NDB_SHARE which is no longer referred. @@ -288,31 +267,26 @@ struct NDB_SHARE { @param share_ptr Pointer to NDB_SHARE pointer */ static void real_free_share(NDB_SHARE **share_ptr); - static void free_share(NDB_SHARE** share); + static void free_share(NDB_SHARE **share); - static NDB_SHARE* acquire_reference_impl(const char *key); + static NDB_SHARE *acquire_reference_impl(const char *key); #ifndef DBUG_OFF // Lists of the different "users" who have acquired a reference to // this NDB_SHARE, used for checking the reference counter "m_use_count" // in a programmatic way. // Protected by "ndbcluster_mutex" in the same way as "m_use_count". - struct Ndb_share_references - { - std::unordered_set handlers; + struct Ndb_share_references { + std::unordered_set handlers; std::unordered_set strings; - size_t size() const { - return handlers.size() + strings.size(); - } + size_t size() const { return handlers.size() + strings.size(); } - bool exists(const class ha_ndbcluster* ref) const - { + bool exists(const class ha_ndbcluster *ref) const { return handlers.find(ref) != handlers.end(); } - bool insert(const class ha_ndbcluster* ref) - { + bool insert(const class ha_ndbcluster *ref) { // The reference should not already exist DBUG_ASSERT(!exists(ref)); @@ -323,8 +297,7 @@ struct NDB_SHARE { return result.second; } - bool erase(const class ha_ndbcluster* ref) - { + bool erase(const class ha_ndbcluster *ref) { // The reference must already exist DBUG_ASSERT(exists(ref)); @@ -335,13 +308,9 @@ struct NDB_SHARE { return erased == 1; } - bool exists(const char* ref) - { - return strings.find(ref) != strings.end(); - } + bool exists(const char *ref) { return strings.find(ref) != strings.end(); } - bool insert(const char* ref) - { + bool insert(const char *ref) { // The reference should not already exist DBUG_ASSERT(!exists(ref)); @@ -352,8 +321,7 @@ struct NDB_SHARE { return result.second; } - bool erase(const char* ref) - { + bool erase(const char *ref) { // The reference must already exist DBUG_ASSERT(exists(ref)); @@ -367,30 +335,26 @@ struct NDB_SHARE { bool check_empty() const; // Debug print the reference lists to string - void debug_print(std::string& out, const char *line_separator = "") const; - + void debug_print(std::string &out, const char *line_separator = "") const; }; - Ndb_share_references* refs; + Ndb_share_references *refs; #endif - void refs_insert(const char* reference MY_ATTRIBUTE((unused))) - { + void refs_insert(const char *reference MY_ATTRIBUTE((unused))) { DBUG_ASSERT(refs->insert(reference)); } - void refs_insert(const class ha_ndbcluster* reference MY_ATTRIBUTE((unused))) - { + void refs_insert( + const class ha_ndbcluster *reference MY_ATTRIBUTE((unused))) { DBUG_ASSERT(refs->insert(reference)); } - void refs_erase(const char* reference MY_ATTRIBUTE((unused))) - { + void refs_erase(const char *reference MY_ATTRIBUTE((unused))) { DBUG_ASSERT(refs->erase(reference)); } - void refs_erase(const class ha_ndbcluster* reference MY_ATTRIBUTE((unused))) - { + void refs_erase(const class ha_ndbcluster *reference MY_ATTRIBUTE((unused))) { DBUG_ASSERT(refs->erase(reference)); } -public: - bool refs_exists(const char* reference MY_ATTRIBUTE((unused))) const - { + + public: + bool refs_exists(const char *reference MY_ATTRIBUTE((unused))) const { #ifndef DBUG_OFF return refs->exists(reference); #else @@ -407,47 +371,41 @@ struct NDB_SHARE { reference and release it when going out of scope. */ class Ndb_share_temp_ref { - NDB_SHARE* m_share; + NDB_SHARE *m_share; const std::string m_reference; - Ndb_share_temp_ref(const Ndb_share_temp_ref&); // prevent - Ndb_share_temp_ref& operator=(const Ndb_share_temp_ref&); // prevent -public: - Ndb_share_temp_ref(const char* key, const char* reference) : - m_reference(reference) - { - m_share = NDB_SHARE::acquire_reference_by_key(key, - m_reference.c_str()); - // Should always exist + Ndb_share_temp_ref(const Ndb_share_temp_ref &); // prevent + Ndb_share_temp_ref &operator=(const Ndb_share_temp_ref &); // prevent + public: + Ndb_share_temp_ref(const char *key, const char *reference) + : m_reference(reference) { + m_share = NDB_SHARE::acquire_reference_by_key(key, m_reference.c_str()); + // Should always exist assert(m_share); } - Ndb_share_temp_ref(NDB_SHARE* share, const char* reference) : - m_reference(reference) - { + Ndb_share_temp_ref(NDB_SHARE *share, const char *reference) + : m_reference(reference) { // The share and a reference should exist assert(share); assert(share->refs_exists(reference)); m_share = share; } - ~Ndb_share_temp_ref() - { + ~Ndb_share_temp_ref() { assert(m_share); /* release the temporary reference */ NDB_SHARE::release_reference(m_share, m_reference.c_str()); } // Return the NDB_SHARE* by type conversion operator - operator NDB_SHARE*() const - { + operator NDB_SHARE *() const { assert(m_share); return m_share; } // Return the NDB_SHARE* when using pointer operator - const NDB_SHARE* operator->() const - { + const NDB_SHARE *operator->() const { assert(m_share); return m_share; } diff --git a/storage/ndb/plugin/ndb_sleep.h b/storage/ndb/plugin/ndb_sleep.h index f684a066fa22..85ff934f6726 100644 --- a/storage/ndb/plugin/ndb_sleep.h +++ b/storage/ndb/plugin/ndb_sleep.h @@ -34,31 +34,25 @@ #include /* Wait a given number of milliseconds */ -static inline -void ndb_milli_sleep(time_t milliseconds) -{ +static inline void ndb_milli_sleep(time_t milliseconds) { #if defined(_WIN32) - Sleep((DWORD)milliseconds+1); /* Sleep() has millisecond arg */ + Sleep((DWORD)milliseconds + 1); /* Sleep() has millisecond arg */ #else struct timeval t; - t.tv_sec= milliseconds / 1000L; - t.tv_usec= milliseconds % 1000L; - select(0,0,0,0,&t); /* sleep */ + t.tv_sec = milliseconds / 1000L; + t.tv_usec = milliseconds % 1000L; + select(0, 0, 0, 0, &t); /* sleep */ #endif } - /* perform random sleep in the range milli_sleep to 5*milli_sleep */ -static inline -void ndb_retry_sleep(unsigned milli_sleep) -{ - ndb_milli_sleep(milli_sleep + 5*(rand()%(milli_sleep/5))); +static inline void ndb_retry_sleep(unsigned milli_sleep) { + ndb_milli_sleep(milli_sleep + 5 * (rand() % (milli_sleep / 5))); } /* perform random sleep while processing transaction */ -static inline -void ndb_trans_retry_sleep() { - ndb_retry_sleep(30); // milliseconds +static inline void ndb_trans_retry_sleep() { + ndb_retry_sleep(30); // milliseconds } #endif diff --git a/storage/ndb/plugin/ndb_sql_metadata_table.cc b/storage/ndb/plugin/ndb_sql_metadata_table.cc index 816d35229d69..8667c278f75d 100644 --- a/storage/ndb/plugin/ndb_sql_metadata_table.cc +++ b/storage/ndb/plugin/ndb_sql_metadata_table.cc @@ -29,8 +29,7 @@ #include "storage/ndb/plugin/ndb_thd_ndb.h" Ndb_sql_metadata_table::Ndb_sql_metadata_table(Thd_ndb *thd_ndb) - : Ndb_util_table(thd_ndb, "mysql", "ndb_sql_metadata", true, false) - {} + : Ndb_util_table(thd_ndb, "mysql", "ndb_sql_metadata", true, false) {} bool Ndb_sql_metadata_table::define_table_ndb(NdbDictionary::Table &new_table, unsigned) const { @@ -117,7 +116,6 @@ std::string Ndb_sql_metadata_table::define_table_dd() const { Ndb_sql_metadata_table::~Ndb_sql_metadata_table() {} - // Ndb_sql_metadata_api /* Map the table. @@ -141,12 +139,12 @@ void Ndb_sql_metadata_api::setup(NdbDictionary::Dictionary *dict, m_record_layout.addColumn(table->getColumn("sql_ddl_text")); m_full_record_size = m_record_layout.record_size; - m_row_rec = dict->createRecord(table, m_record_layout.record_specs, - 5, // ALL FIVE COLUMNS - sizeof(m_record_layout.record_specs[0])); - m_note_rec = dict->createRecord(table, m_record_layout.record_specs, - 4, // FIRST FOUR COLUMNS - sizeof(m_record_layout.record_specs[0])); + m_row_rec = dict->createRecord(table, m_record_layout.record_specs, + 5, // ALL FIVE COLUMNS + sizeof(m_record_layout.record_specs[0])); + m_note_rec = dict->createRecord(table, m_record_layout.record_specs, + 4, // FIRST FOUR COLUMNS + sizeof(m_record_layout.record_specs[0])); m_hash_key_rec = dict->createRecord(table, m_record_layout.record_specs, 3, // FIRST THREE COLUMNS sizeof(m_record_layout.record_specs[0])); @@ -158,18 +156,17 @@ void Ndb_sql_metadata_api::setup(NdbDictionary::Dictionary *dict, the index is not. Do not handle those conditions here. They are detected later, when isInitialized() returns false. */ - if(primary) { - m_ordered_index_rec - = dict->createRecord(primary, - table, m_record_layout.record_specs, - 3, // FIRST THREE COLUMNS - sizeof(m_record_layout.record_specs[0])); + if (primary) { + m_ordered_index_rec = + dict->createRecord(primary, table, m_record_layout.record_specs, + 3, // FIRST THREE COLUMNS + sizeof(m_record_layout.record_specs[0])); dict->removeIndexGlobal(*primary, false); } } void Ndb_sql_metadata_api::clear(NdbDictionary::Dictionary *dict) { - if(m_full_record_size) { + if (m_full_record_size) { dict->releaseRecord(m_row_rec); m_row_rec = nullptr; dict->releaseRecord(m_note_rec); @@ -181,7 +178,7 @@ void Ndb_sql_metadata_api::clear(NdbDictionary::Dictionary *dict) { m_full_record_size = 0; } - if(m_ordered_index_rec) { + if (m_ordered_index_rec) { dict->releaseRecord(m_ordered_index_rec); m_ordered_index_rec = nullptr; } diff --git a/storage/ndb/plugin/ndb_sql_metadata_table.h b/storage/ndb/plugin/ndb_sql_metadata_table.h index ee9e7d6f86a6..8bed36a997ef 100644 --- a/storage/ndb/plugin/ndb_sql_metadata_table.h +++ b/storage/ndb/plugin/ndb_sql_metadata_table.h @@ -35,7 +35,7 @@ // RAII style class for creating and updating the table class Ndb_sql_metadata_table : public Ndb_util_table { Ndb_sql_metadata_table() = delete; - Ndb_sql_metadata_table(const Ndb_sql_metadata_table&) = delete; + Ndb_sql_metadata_table(const Ndb_sql_metadata_table &) = delete; bool define_table_ndb(NdbDictionary::Table &table, unsigned mysql_version) const override; @@ -44,74 +44,78 @@ class Ndb_sql_metadata_table : public Ndb_util_table { unsigned int mysql_version) const override; public: - Ndb_sql_metadata_table(class Thd_ndb*); + Ndb_sql_metadata_table(class Thd_ndb *); virtual ~Ndb_sql_metadata_table(); bool check_schema() const override; bool need_upgrade() const override; std::string define_table_dd() const override; - const NdbDictionary::Index * get_index() const; - bool drop_events_in_NDB() const override { return true; } + const NdbDictionary::Index *get_index() const; + bool drop_events_in_NDB() const override { return true; } }; - /* Class provides an API for using the table, NdbRecord-style. It has a default constructor, so it can be statically allocated, but it cannot be used until after setup_records() is called. */ class Ndb_sql_metadata_api { -public: - Ndb_sql_metadata_api() : - m_record_layout(5) // five columns in table + public: + Ndb_sql_metadata_api() + : m_record_layout(5) // five columns in table {} ~Ndb_sql_metadata_api() = default; Ndb_sql_metadata_api(const Ndb_sql_metadata_api &) = delete; - Ndb_sql_metadata_api & operator=(const Ndb_sql_metadata_api &) = delete; + Ndb_sql_metadata_api &operator=(const Ndb_sql_metadata_api &) = delete; /* Record Types */ - static constexpr short TYPE_USER = 11; - static constexpr short TYPE_GRANT = 12; + static constexpr short TYPE_USER = 11; + static constexpr short TYPE_GRANT = 12; void setup(NdbDictionary::Dictionary *, const NdbDictionary::Table *); void clear(NdbDictionary::Dictionary *); - bool isInitialized() const { return m_ordered_index_rec; } - - NdbRecord * rowNdbRecord() const { return m_row_rec; } - NdbRecord * noteNdbRecord() const { return m_note_rec; } - NdbRecord * keyNdbRecord() const { return m_hash_key_rec; } - NdbRecord * orderedNdbRecord() const { return m_ordered_index_rec; } - - size_t getRowSize() const { return m_full_record_size; } - size_t getNoteSize() const { return m_note_record_size; } - size_t getKeySize() const { return m_key_record_size; } - - void setType(char *buf, short a) { layout().setValue(0, a, buf); } - void setName(char *buf, std::string a) { layout().setValue(1, a, buf); } - void packName(char *buf, std::string a) { layout().packValue(1, a, buf); } - void setSeq(char *buf, short a) { layout().setValue(2, a, buf); } - void setNote(char *buf, uint32_t *a) { layout().setValue(3, a, buf); } - void setSql(char *buf, std::string a) { layout().setValue(4, a, buf); } - - void getType(const char *buf, - unsigned short *a) { layout().getValue(buf, 0, a); } - void getName(const char *buf, - size_t *a, const char **b) { layout().getValue(buf, 1, a, b); } - void getSeq(const char *buf, - unsigned short *a) { layout().getValue(buf, 2, a); } + bool isInitialized() const { return m_ordered_index_rec; } + + NdbRecord *rowNdbRecord() const { return m_row_rec; } + NdbRecord *noteNdbRecord() const { return m_note_rec; } + NdbRecord *keyNdbRecord() const { return m_hash_key_rec; } + NdbRecord *orderedNdbRecord() const { return m_ordered_index_rec; } + + size_t getRowSize() const { return m_full_record_size; } + size_t getNoteSize() const { return m_note_record_size; } + size_t getKeySize() const { return m_key_record_size; } + + void setType(char *buf, short a) { layout().setValue(0, a, buf); } + void setName(char *buf, std::string a) { layout().setValue(1, a, buf); } + void packName(char *buf, std::string a) { layout().packValue(1, a, buf); } + void setSeq(char *buf, short a) { layout().setValue(2, a, buf); } + void setNote(char *buf, uint32_t *a) { layout().setValue(3, a, buf); } + void setSql(char *buf, std::string a) { layout().setValue(4, a, buf); } + + void getType(const char *buf, unsigned short *a) { + layout().getValue(buf, 0, a); + } + void getName(const char *buf, size_t *a, const char **b) { + layout().getValue(buf, 1, a, b); + } + void getSeq(const char *buf, unsigned short *a) { + layout().getValue(buf, 2, a); + } /* Getter for nullable column returns bool; true = NOT NULL */ - bool getNote(const char *buf, - uint32_t *a) { return layout().getValue(buf, 3, a); } - void getSql(const char *buf, - size_t *a, const char **b) { layout().getValue(buf, 4, a, b); } - -private: - Ndb_record_layout & layout() { return m_record_layout; } + bool getNote(const char *buf, uint32_t *a) { + return layout().getValue(buf, 3, a); + } + void getSql(const char *buf, size_t *a, const char **b) { + layout().getValue(buf, 4, a, b); + } + + private: + Ndb_record_layout &layout() { return m_record_layout; } Ndb_record_layout m_record_layout; - NdbRecord * m_row_rec{nullptr}; - NdbRecord * m_note_rec{nullptr}; - NdbRecord * m_hash_key_rec{nullptr}; - NdbRecord * m_ordered_index_rec{nullptr}; + NdbRecord *m_row_rec{nullptr}; + NdbRecord *m_note_rec{nullptr}; + NdbRecord *m_hash_key_rec{nullptr}; + NdbRecord *m_ordered_index_rec{nullptr}; size_t m_full_record_size{0}; size_t m_note_record_size{0}; diff --git a/storage/ndb/plugin/ndb_stored_grants.cc b/storage/ndb/plugin/ndb_stored_grants.cc index 0d8400178d48..88d9cc0f1c11 100644 --- a/storage/ndb/plugin/ndb_stored_grants.cc +++ b/storage/ndb/plugin/ndb_stored_grants.cc @@ -24,7 +24,7 @@ #include "storage/ndb/plugin/ndb_stored_grants.h" -#include // std::find() +#include // std::find() #include #include @@ -534,7 +534,7 @@ bool ThreadContext::write_snapshot() { void ThreadContext::update_user(std::string user) { int ngrants = get_grants_for_user(user); - if(ngrants) { + if (ngrants) { get_create_user(user, ngrants); if (local_granted_users.count(user)) { m_read_keys.push_back(Key(TYPE_USER, user, 0)); @@ -763,11 +763,11 @@ Ndb_stored_grants::Strategy ThreadContext::handle_change(ChangeNotice *notice) { } else if (operation == SQLCOM_DROP_USER) { /* DROP user or role. DROP ROLE can have a cascading effect upon the grants of other users, so this requires a full snapshot update. */ - if(m_intersection.size()) { + if (m_intersection.size()) { drop_list = &m_intersection; m_statement_users.clear(); - for(std::string user : local_granted_users) - if(! std::find(drop_list->begin(), drop_list->end(), user)) + for (std::string user : local_granted_users) + if (!std::find(drop_list->begin(), drop_list->end(), user)) m_statement_users.push_back(user); update_list = &m_statement_users; } @@ -777,8 +777,7 @@ Ndb_stored_grants::Strategy ThreadContext::handle_change(ChangeNotice *notice) { update_list = &m_intersection; /* Distribute ALTER USER and SET PASSWORD as snapshot refreshes in order to avoid transmitting plaintext passwords. */ - if (operation == SQLCOM_ALTER_USER || - operation == SQLCOM_SET_PASSWORD) + if (operation == SQLCOM_ALTER_USER || operation == SQLCOM_SET_PASSWORD) dist_as_snapshot = true; } @@ -853,8 +852,8 @@ bool Ndb_stored_grants::apply_stored_grants(THD *thd) { } Ndb_stored_grants::Strategy Ndb_stored_grants::handle_local_acl_change( - THD *thd, const Acl_change_notification *notice, - std::string *user_list, bool *schema_dist_use_db, bool *must_refresh) { + THD *thd, const Acl_change_notification *notice, std::string *user_list, + bool *schema_dist_use_db, bool *must_refresh) { if (!metadata_table.isInitialized()) { ndb_log_error("stored grants: initialization has failed."); return Strategy::ERROR; diff --git a/storage/ndb/plugin/ndb_stored_grants.h b/storage/ndb/plugin/ndb_stored_grants.h index 2791e509edfd..f4dc7f186a07 100644 --- a/storage/ndb/plugin/ndb_stored_grants.h +++ b/storage/ndb/plugin/ndb_stored_grants.h @@ -36,24 +36,23 @@ class Acl_change_notification; */ namespace Ndb_stored_grants { - bool initialize(THD *, Thd_ndb *); +bool initialize(THD *, Thd_ndb *); - void shutdown(Thd_ndb *); +void shutdown(Thd_ndb *); - bool apply_stored_grants(THD *); +bool apply_stored_grants(THD *); - enum class Strategy { ERROR, NONE, STATEMENT, SNAPSHOT }; +enum class Strategy { ERROR, NONE, STATEMENT, SNAPSHOT }; - Strategy handle_local_acl_change(THD *, - const class Acl_change_notification *, - std::string * user_list, - bool * schema_dist_use_db, - bool * particpants_must_refresh); +Strategy handle_local_acl_change(THD *, const class Acl_change_notification *, + std::string *user_list, + bool *schema_dist_use_db, + bool *particpants_must_refresh); - bool update_users_from_snapshot(THD *, std::string user_list); +bool update_users_from_snapshot(THD *, std::string user_list); - void maintain_cache(THD *); +void maintain_cache(THD *); -} // namespace +} // namespace Ndb_stored_grants #endif diff --git a/storage/ndb/plugin/ndb_table_guard.h b/storage/ndb/plugin/ndb_table_guard.h index d656805fb323..8b1e7ae0061a 100644 --- a/storage/ndb/plugin/ndb_table_guard.h +++ b/storage/ndb/plugin/ndb_table_guard.h @@ -31,27 +31,23 @@ #include "storage/ndb/include/ndbapi/Ndb.hpp" #include "storage/ndb/include/ndbapi/NdbDictionary.hpp" -class Ndb_table_guard -{ +class Ndb_table_guard { NdbDictionary::Dictionary *const m_dict; const NdbDictionary::Table *m_ndbtab{nullptr}; int m_invalidate{0}; -public: - Ndb_table_guard(NdbDictionary::Dictionary *dict) - : m_dict(dict) - {} + + public: + Ndb_table_guard(NdbDictionary::Dictionary *dict) : m_dict(dict) {} Ndb_table_guard(NdbDictionary::Dictionary *dict, const char *tabname) - : m_dict(dict) - { + : m_dict(dict) { DBUG_ENTER("Ndb_table_guard"); init(tabname); DBUG_VOID_RETURN; } - Ndb_table_guard(Ndb* ndb, const char* dbname, const char *tabname) - : m_dict(ndb->getDictionary()) - { + Ndb_table_guard(Ndb *ndb, const char *dbname, const char *tabname) + : m_dict(ndb->getDictionary()) { const std::string save_dbname(ndb->getDatabaseName()); - if (ndb->setDatabaseName(dbname) != 0){ + if (ndb->setDatabaseName(dbname) != 0) { // Failed to set databasname, indicate error by returning // without initializing the table pointer return; @@ -59,35 +55,31 @@ class Ndb_table_guard init(tabname); (void)ndb->setDatabaseName(save_dbname.c_str()); } - ~Ndb_table_guard() - { + ~Ndb_table_guard() { DBUG_ENTER("~Ndb_table_guard"); - if (m_ndbtab) - { - DBUG_PRINT("info", ("m_ndbtab: %p m_invalidate: %d", - m_ndbtab, m_invalidate)); + if (m_ndbtab) { + DBUG_PRINT("info", + ("m_ndbtab: %p m_invalidate: %d", m_ndbtab, m_invalidate)); m_dict->removeTableGlobal(*m_ndbtab, m_invalidate); - m_ndbtab= NULL; - m_invalidate= 0; + m_ndbtab = NULL; + m_invalidate = 0; } DBUG_VOID_RETURN; } - void init(const char *tabname) - { + void init(const char *tabname) { DBUG_ENTER("Ndb_table_guard::init"); /* Don't allow init() if already initialized */ DBUG_ASSERT(m_ndbtab == NULL); - m_ndbtab= m_dict->getTableGlobal(tabname); - m_invalidate= 0; + m_ndbtab = m_dict->getTableGlobal(tabname); + m_invalidate = 0; DBUG_PRINT("info", ("m_ndbtab: %p", m_ndbtab)); DBUG_VOID_RETURN; } const NdbDictionary::Table *get_table() const { return m_ndbtab; } - void invalidate() { m_invalidate= 1; } - const NdbDictionary::Table *release() - { + void invalidate() { m_invalidate = 1; } + const NdbDictionary::Table *release() { DBUG_ENTER("Ndb_table_guard::release"); - const NdbDictionary::Table *tmp= m_ndbtab; + const NdbDictionary::Table *tmp = m_ndbtab; DBUG_PRINT("info", ("m_ndbtab: %p", m_ndbtab)); m_ndbtab = 0; DBUG_RETURN(tmp); diff --git a/storage/ndb/plugin/ndb_table_map.cc b/storage/ndb/plugin/ndb_table_map.cc index 2dda913d4e16..d36602bdaa29 100644 --- a/storage/ndb/plugin/ndb_table_map.cc +++ b/storage/ndb/plugin/ndb_table_map.cc @@ -29,87 +29,71 @@ #include "sql/table.h" #include "storage/ndb/include/ndbapi/NdbApi.hpp" -Ndb_table_map::Ndb_table_map(struct TABLE * mysqlTable, - const NdbDictionary::Table * ndbTable) : - m_ndb_table(ndbTable), - m_array_size(mysqlTable->s->fields), - m_stored_fields(num_stored_fields(mysqlTable)), - m_hidden_pk((mysqlTable->s->primary_key == MAX_KEY) ? 1 : 0), - m_trivial(m_array_size == m_stored_fields) -{ - if(! m_trivial) - { +Ndb_table_map::Ndb_table_map(struct TABLE *mysqlTable, + const NdbDictionary::Table *ndbTable) + : m_ndb_table(ndbTable), + m_array_size(mysqlTable->s->fields), + m_stored_fields(num_stored_fields(mysqlTable)), + m_hidden_pk((mysqlTable->s->primary_key == MAX_KEY) ? 1 : 0), + m_trivial(m_array_size == m_stored_fields) { + if (!m_trivial) { /* Allocate arrays */ m_map_by_field = new int[m_array_size]; m_map_by_col = new int[m_array_size]; /* Initialize the two bitmaps */ - bitmap_init(& m_moved_fields, 0, m_array_size, 0); - bitmap_init(& m_rewrite_set, 0, m_array_size, 0); + bitmap_init(&m_moved_fields, 0, m_array_size, 0); + bitmap_init(&m_rewrite_set, 0, m_array_size, 0); /* Initialize both arrays full of -1 */ - for(uint i = 0 ; i < m_array_size ; i++) - { + for (uint i = 0; i < m_array_size; i++) { m_map_by_field[i] = m_map_by_col[i] = -1; } /* Build mappings, and set bits in m_moved_fields */ - for(uint fieldId= 0, colId= 0; fieldId < m_array_size ; fieldId ++) - { - if(colId != fieldId) - { - bitmap_set_bit(& m_moved_fields, fieldId); + for (uint fieldId = 0, colId = 0; fieldId < m_array_size; fieldId++) { + if (colId != fieldId) { + bitmap_set_bit(&m_moved_fields, fieldId); } - if(mysqlTable->field[fieldId]->stored_in_db) - { + if (mysqlTable->field[fieldId]->stored_in_db) { m_map_by_field[fieldId] = colId; m_map_by_col[colId] = fieldId; colId++; } - } // for(uint fieldId ... - } // if(! m_trivial ... + } // for(uint fieldId ... + } // if(! m_trivial ... } - -uint Ndb_table_map::get_column_for_field(uint fieldId) const -{ +uint Ndb_table_map::get_column_for_field(uint fieldId) const { assert(fieldId < m_array_size); - if(m_trivial) return fieldId; + if (m_trivial) return fieldId; const int colId = m_map_by_field[fieldId]; assert(colId >= 0); // The user must not ask for virtual fields - return (uint) colId; + return (uint)colId; } - -uint Ndb_table_map::get_field_for_column(uint colId) const -{ +uint Ndb_table_map::get_field_for_column(uint colId) const { assert(colId < m_stored_fields); // The user must not ask for hidden columns - if(m_trivial) return colId; + if (m_trivial) return colId; const int fieldId = m_map_by_col[colId]; DBUG_ASSERT(fieldId >= 0); // We do not expect any non-final hidden columns - return (uint) fieldId; + return (uint)fieldId; } - -unsigned char * Ndb_table_map::get_column_mask(const MY_BITMAP *field_mask) -{ - unsigned char * map = 0; - if(field_mask) - { +unsigned char *Ndb_table_map::get_column_mask(const MY_BITMAP *field_mask) { + unsigned char *map = 0; + if (field_mask) { map = (unsigned char *)(field_mask->bitmap); - if((! m_trivial) && bitmap_is_overlapping(& m_moved_fields, field_mask)) - { + if ((!m_trivial) && bitmap_is_overlapping(&m_moved_fields, field_mask)) { map = (unsigned char *)(m_rewrite_set.bitmap); - bitmap_clear_all(& m_rewrite_set); - for(uint i = 0 ; i < m_array_size ; i++) - { - int & colId = m_map_by_field[i]; - if(bitmap_is_set(field_mask, i) && colId >= 0) - { - bitmap_set_bit(& m_rewrite_set, colId); + bitmap_clear_all(&m_rewrite_set); + for (uint i = 0; i < m_array_size; i++) { + int &colId = m_map_by_field[i]; + if (bitmap_is_set(field_mask, i) && colId >= 0) { + bitmap_set_bit(&m_rewrite_set, colId); } } } @@ -117,36 +101,25 @@ unsigned char * Ndb_table_map::get_column_mask(const MY_BITMAP *field_mask) return map; } - -Ndb_table_map::~Ndb_table_map() -{ - if(! m_trivial) - { +Ndb_table_map::~Ndb_table_map() { + if (!m_trivial) { delete[] m_map_by_field; delete[] m_map_by_col; - bitmap_free(& m_moved_fields); - bitmap_free(& m_rewrite_set); + bitmap_free(&m_moved_fields); + bitmap_free(&m_rewrite_set); } } - -bool Ndb_table_map::has_virtual_gcol(const TABLE* table) -{ - if (table->vfield == NULL) - return false; - for (Field **gc= table->vfield; *gc; gc++) - { - if (!(*gc)->stored_in_db) - return true; +bool Ndb_table_map::has_virtual_gcol(const TABLE *table) { + if (table->vfield == NULL) return false; + for (Field **gc = table->vfield; *gc; gc++) { + if (!(*gc)->stored_in_db) return true; } return false; } - -uint Ndb_table_map::num_stored_fields(const TABLE* table) -{ - if (table->vfield == NULL) - { +uint Ndb_table_map::num_stored_fields(const TABLE *table) { + if (table->vfield == NULL) { // Table has no virtual fields, just return number of fields return table->s->fields; } @@ -154,29 +127,21 @@ uint Ndb_table_map::num_stored_fields(const TABLE* table) // Table has virtual fields, loop through and subtract those // which are not stored uint num_stored_fields = table->s->fields; - for (Field **vfield_ptr= table->vfield; *vfield_ptr; vfield_ptr++) - { - if (!(*vfield_ptr)->stored_in_db) - num_stored_fields--; + for (Field **vfield_ptr = table->vfield; *vfield_ptr; vfield_ptr++) { + if (!(*vfield_ptr)->stored_in_db) num_stored_fields--; } return num_stored_fields; } - -bool -Ndb_table_map::have_physical_blobs(const TABLE *table) -{ - for(uint i = 0 ; i < table->s->fields; i++) - { - Field * field = table->field[i]; - if (!field->stored_in_db) - { +bool Ndb_table_map::have_physical_blobs(const TABLE *table) { + for (uint i = 0; i < table->s->fields; i++) { + Field *field = table->field[i]; + if (!field->stored_in_db) { // Not stored continue; } - if (field->flags & BLOB_FLAG) - { + if (field->flags & BLOB_FLAG) { // Double check that TABLE_SHARE thinks that table had some // blobs(physical or not) DBUG_ASSERT(table->s->blob_fields > 0); @@ -186,81 +151,60 @@ Ndb_table_map::have_physical_blobs(const TABLE *table) return false; } - #ifndef DBUG_OFF -void -Ndb_table_map::print_record(const TABLE *table, const uchar *record) -{ - for (uint j= 0; j < table->s->fields; j++) - { +void Ndb_table_map::print_record(const TABLE *table, const uchar *record) { + for (uint j = 0; j < table->s->fields; j++) { char buf[40]; - int pos= 0; - Field *field= table->field[j]; - const uchar* field_ptr= field->ptr - table->record[0] + record; - int pack_len= field->pack_length(); - int n= pack_len < 10 ? pack_len : 10; - - for (int i= 0; i < n && pos < 20; i++) - { - pos+= sprintf(&buf[pos]," %x", (int) (uchar) field_ptr[i]); + int pos = 0; + Field *field = table->field[j]; + const uchar *field_ptr = field->ptr - table->record[0] + record; + int pack_len = field->pack_length(); + int n = pack_len < 10 ? pack_len : 10; + + for (int i = 0; i < n && pos < 20; i++) { + pos += sprintf(&buf[pos], " %x", (int)(uchar)field_ptr[i]); } - buf[pos]= 0; - DBUG_PRINT("info",("[%u]field_ptr[0->%d]: %s", j, n, buf)); + buf[pos] = 0; + DBUG_PRINT("info", ("[%u]field_ptr[0->%d]: %s", j, n, buf)); } } - -void -Ndb_table_map::print_table(const char *info, const TABLE *table) -{ - if (table == 0) - { - DBUG_PRINT("info",("%s: (null)", info)); +void Ndb_table_map::print_table(const char *info, const TABLE *table) { + if (table == 0) { + DBUG_PRINT("info", ("%s: (null)", info)); return; } DBUG_PRINT("info", ("%s: %s.%s s->fields: %d " "reclength: %lu rec_buff_length: %u record[0]: 0x%lx " "record[1]: 0x%lx", - info, - table->s->db.str, - table->s->table_name.str, - table->s->fields, - table->s->reclength, - table->s->rec_buff_length, - (long) table->record[0], - (long) table->record[1])); - - for (unsigned int i= 0; i < table->s->fields; i++) - { - Field *f= table->field[i]; - DBUG_PRINT("info", - ("[%d] \"%s\"(0x%lx:%s%s%s%s%s%s) type: %d pack_length: %d " - "ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]", - i, - f->field_name, - (long) f->flags, - (f->flags & PRI_KEY_FLAG) ? "pri" : "attr", - (f->flags & NOT_NULL_FLAG) ? "" : ",nullable", - (f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed", - (f->flags & ZEROFILL_FLAG) ? ",zerofill" : "", - (f->flags & BLOB_FLAG) ? ",blob" : "", - (f->flags & BINARY_FLAG) ? ",binary" : "", - f->real_type(), - f->pack_length(), - (long) f->ptr, (int) (f->ptr - table->record[0]), - f->null_bit, - (long) f->null_offset(0), - (int) f->null_offset())); - if (f->type() == MYSQL_TYPE_BIT) - { - Field_bit *g= (Field_bit*) f; - DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d bit_ptr: 0x%lx[+%d] " - "bit_ofs: %d bit_len: %u", - g->field_length, (long) g->bit_ptr, - (int) ((uchar*) g->bit_ptr - - table->record[0]), - g->bit_ofs, g->bit_len)); + info, table->s->db.str, table->s->table_name.str, + table->s->fields, table->s->reclength, table->s->rec_buff_length, + (long)table->record[0], (long)table->record[1])); + + for (unsigned int i = 0; i < table->s->fields; i++) { + Field *f = table->field[i]; + DBUG_PRINT( + "info", + ("[%d] \"%s\"(0x%lx:%s%s%s%s%s%s) type: %d pack_length: %d " + "ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]", + i, f->field_name, (long)f->flags, + (f->flags & PRI_KEY_FLAG) ? "pri" : "attr", + (f->flags & NOT_NULL_FLAG) ? "" : ",nullable", + (f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed", + (f->flags & ZEROFILL_FLAG) ? ",zerofill" : "", + (f->flags & BLOB_FLAG) ? ",blob" : "", + (f->flags & BINARY_FLAG) ? ",binary" : "", f->real_type(), + f->pack_length(), (long)f->ptr, (int)(f->ptr - table->record[0]), + f->null_bit, (long)f->null_offset(0), (int)f->null_offset())); + if (f->type() == MYSQL_TYPE_BIT) { + Field_bit *g = (Field_bit *)f; + DBUG_PRINT("MYSQL_TYPE_BIT", + ("field_length: %d bit_ptr: 0x%lx[+%d] " + "bit_ofs: %d bit_len: %u", + g->field_length, (long)g->bit_ptr, + (int)((uchar *)g->bit_ptr - table->record[0]), g->bit_ofs, + g->bit_len)); } } } diff --git a/storage/ndb/plugin/ndb_table_map.h b/storage/ndb/plugin/ndb_table_map.h index 2fb7835e4774..f7a3f08f2a00 100644 --- a/storage/ndb/plugin/ndb_table_map.h +++ b/storage/ndb/plugin/ndb_table_map.h @@ -30,26 +30,26 @@ #include "storage/ndb/include/ndbapi/NdbApi.hpp" /** Ndb_table_map -* -* An Ndb_table_map for a table provides a map between MySQL fields and -* NDB columns. Some MySQL fields, such as virtual generated columns, do -* not exist in NDB. Some NDB columns, such as hidden primary keys and -* partition ID columns, are not visible as MySQL fields. -* -* Ndb_table_map provides a getColumn() method that wraps -* NdbDictionary::Table::getColumn(), translating from a field number to the -* appropriate column number. It also provides a method get_column_mask() -* for wholesale translation, when needed, of an entire bitmap of field -* numbers to column numbers. -* -* The introduction of virtual generated columns from WL#411 requires the -* handler to understand that some fields are not stored, and to map between -* MySQL Field Ids and NDB Column Ids (which are no longer equivalent). -* -*/ + * + * An Ndb_table_map for a table provides a map between MySQL fields and + * NDB columns. Some MySQL fields, such as virtual generated columns, do + * not exist in NDB. Some NDB columns, such as hidden primary keys and + * partition ID columns, are not visible as MySQL fields. + * + * Ndb_table_map provides a getColumn() method that wraps + * NdbDictionary::Table::getColumn(), translating from a field number to the + * appropriate column number. It also provides a method get_column_mask() + * for wholesale translation, when needed, of an entire bitmap of field + * numbers to column numbers. + * + * The introduction of virtual generated columns from WL#411 requires the + * handler to understand that some fields are not stored, and to map between + * MySQL Field Ids and NDB Column Ids (which are no longer equivalent). + * + */ class Ndb_table_map { -public: - Ndb_table_map(struct TABLE*, const NdbDictionary::Table * ndb_table = 0); + public: + Ndb_table_map(struct TABLE *, const NdbDictionary::Table *ndb_table = 0); ~Ndb_table_map(); /* Get the NDB column number for a MySQL field. @@ -60,18 +60,18 @@ class Ndb_table_map { /* Get an NDB column by MySQL field number. The user must check field->stored_in_db, and only look up stored fields. */ - const NdbDictionary::Column * getColumn(uint mysql_field_number) const; + const NdbDictionary::Column *getColumn(uint mysql_field_number) const; /* Get column by field number; non-const version for CREATE TABLE. The user must check field->stored_in_db, and only look up stored fields. */ - NdbDictionary::Column * getColumn(NdbDictionary::Table &, - uint mysql_field_number) const; + NdbDictionary::Column *getColumn(NdbDictionary::Table &, + uint mysql_field_number) const; /* Get Blob handle by MySQL field number. The user must check field->stored_in_db, and only look up stored fields. */ - NdbBlob * getBlobHandle(const NdbOperation *, uint mysql_field_number) const; + NdbBlob *getBlobHandle(const NdbOperation *, uint mysql_field_number) const; /* Get NDB column numbers for special columns that are hidden from MySQL */ uint get_hidden_key_column() const; @@ -81,7 +81,7 @@ class Ndb_table_map { uint get_field_for_column(uint ndb_col_number) const; /* get_column_mask(): - Takes a pointer to a MySQL bitmask. + Takes a pointer to a MySQL bitmask. Returns a pointer which can be used as a record mask when building an NdbRecord operation. @@ -96,79 +96,70 @@ class Ndb_table_map { for this internal bitmask is owned by Ndb_table_map and will be reused by subsequent calls to rewrite_bitmap(). */ - unsigned char * get_column_mask(const MY_BITMAP * mysql_field_map); - + unsigned char *get_column_mask(const MY_BITMAP *mysql_field_map); /* Adapter function for checking wheter a TABLE* has virtual generated columns. Function existed in 5.7 as table->has_virtual_gcol() */ - static bool has_virtual_gcol(const struct TABLE* table); + static bool has_virtual_gcol(const struct TABLE *table); /* Adapter function for returning the number of stored fields in the TABLE*(i.e those who are not virtual). */ - static uint num_stored_fields(const struct TABLE* table); + static uint num_stored_fields(const struct TABLE *table); /* Check if the table has physical blob columns(i.e actually stored in the engine) */ - static bool have_physical_blobs(const struct TABLE* table); + static bool have_physical_blobs(const struct TABLE *table); #ifndef DBUG_OFF static void print_record(const struct TABLE *table, const uchar *record); static void print_table(const char *info, const struct TABLE *table); #endif -private: - const NdbDictionary::Table * m_ndb_table; + private: + const NdbDictionary::Table *m_ndb_table; MY_BITMAP m_moved_fields; MY_BITMAP m_rewrite_set; - int * m_map_by_field; - int * m_map_by_col; + int *m_map_by_field; + int *m_map_by_col; const uint m_array_size; const uint m_stored_fields; const unsigned short m_hidden_pk; const bool m_trivial; }; - // inline implementations -inline const NdbDictionary::Column * Ndb_table_map::getColumn(uint field) const -{ +inline const NdbDictionary::Column *Ndb_table_map::getColumn(uint field) const { return m_ndb_table->getColumn(get_column_for_field(field)); } -inline NdbDictionary::Column * - Ndb_table_map::getColumn(NdbDictionary::Table & create_table, uint field) const -{ +inline NdbDictionary::Column *Ndb_table_map::getColumn( + NdbDictionary::Table &create_table, uint field) const { return create_table.getColumn(get_column_for_field(field)); } -inline NdbBlob * Ndb_table_map::getBlobHandle(const NdbOperation *ndb_op, - uint mysql_field_number) const -{ +inline NdbBlob *Ndb_table_map::getBlobHandle(const NdbOperation *ndb_op, + uint mysql_field_number) const { return ndb_op->getBlobHandle(get_column_for_field(mysql_field_number)); } -inline uint Ndb_table_map::get_hidden_key_column() const -{ +inline uint Ndb_table_map::get_hidden_key_column() const { DBUG_ASSERT(m_hidden_pk); // The hidden primary key is just after the final stored, visible column return m_stored_fields; } -inline uint Ndb_table_map::get_partition_id_column() const -{ +inline uint Ndb_table_map::get_partition_id_column() const { // The hidden partition id, if present, is the final column return m_stored_fields + m_hidden_pk; } - - #endif diff --git a/storage/ndb/plugin/ndb_tdc.cc b/storage/ndb/plugin/ndb_tdc.cc index a7c7ca6bbc85..ce1a00182c36 100644 --- a/storage/ndb/plugin/ndb_tdc.cc +++ b/storage/ndb/plugin/ndb_tdc.cc @@ -24,28 +24,25 @@ #include "storage/ndb/plugin/ndb_tdc.h" -#include "sql/sql_base.h" // close_cached_tables() -#include "sql/table.h" // TABLE_LIST - +#include "sql/sql_base.h" // close_cached_tables() +#include "sql/table.h" // TABLE_LIST /* Close all tables in MySQL Server's table definition cache which aren't in use by any thread */ -bool ndb_tdc_close_cached_tables(void) -{ +bool ndb_tdc_close_cached_tables(void) { DBUG_ENTER("ndb_tdc_close_cached_tables"); - const int res = close_cached_tables(NULL, // No need for thd pointer - NULL, // Close all tables - false, // Don't wait - 0 // Timeout unused when not waiting - ); + const int res = close_cached_tables(NULL, // No need for thd pointer + NULL, // Close all tables + false, // Don't wait + 0 // Timeout unused when not waiting + ); DBUG_RETURN(res); } - /* Close table in MySQL Server's table definition cache which aren't in use by any thread @@ -55,23 +52,20 @@ bool ndb_tdc_close_cached_tables(void) @param[in] tabname Name of table */ -bool -ndb_tdc_close_cached_table(THD* thd, const char* dbname, const char* tabname) -{ - +bool ndb_tdc_close_cached_table(THD *thd, const char *dbname, + const char *tabname) { DBUG_ENTER("ndb_tdc_close_cached_table"); DBUG_PRINT("enter", ("dbname: %s, tabname: %s", dbname, tabname)); // NOTE! initializes only the minimal part of TABLE_LIST // required for calling close_cached_tables() TABLE_LIST table_list; - table_list.db= dbname; - table_list.alias= table_list.table_name= tabname; + table_list.db = dbname; + table_list.alias = table_list.table_name = tabname; - const int res = close_cached_tables(thd, - &table_list, - false, // Don't wait - 0 // Timeout unused when not waiting - ); + const int res = close_cached_tables(thd, &table_list, + false, // Don't wait + 0 // Timeout unused when not waiting + ); DBUG_RETURN(res); } diff --git a/storage/ndb/plugin/ndb_tdc.h b/storage/ndb/plugin/ndb_tdc.h index db3d4deaec94..4705ebceb484 100644 --- a/storage/ndb/plugin/ndb_tdc.h +++ b/storage/ndb/plugin/ndb_tdc.h @@ -33,7 +33,7 @@ bool ndb_tdc_close_cached_tables(void); // Close one named table -bool ndb_tdc_close_cached_table(class THD* thd, - const char* dbname, const char* tabname); +bool ndb_tdc_close_cached_table(class THD *thd, const char *dbname, + const char *tabname); #endif diff --git a/storage/ndb/plugin/ndb_thd.cc b/storage/ndb/plugin/ndb_thd.cc index 895ab3bbc4fa..329e4d1ee56e 100644 --- a/storage/ndb/plugin/ndb_thd.cc +++ b/storage/ndb/plugin/ndb_thd.cc @@ -28,7 +28,7 @@ #include "mysql/thread_type.h" #include "sql/handler.h" #include "sql/sql_class.h" -#include "storage/ndb/plugin/ndb_log.h" // ndb_log_* +#include "storage/ndb/plugin/ndb_log.h" // ndb_log_* #include "storage/ndb/plugin/ndb_thd_ndb.h" /* @@ -37,20 +37,15 @@ - validate_ndb, check if the Ndb object need to be recycled */ -Ndb* check_ndb_in_thd(THD* thd, bool validate_ndb) -{ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (!thd_ndb) - { - if (!(thd_ndb= Thd_ndb::seize(thd))) - return NULL; +Ndb *check_ndb_in_thd(THD *thd, bool validate_ndb) { + Thd_ndb *thd_ndb = get_thd_ndb(thd); + if (!thd_ndb) { + if (!(thd_ndb = Thd_ndb::seize(thd))) return NULL; thd_set_thd_ndb(thd, thd_ndb); } - else if (validate_ndb && !thd_ndb->valid_ndb()) - { - if (!thd_ndb->recycle_ndb()) - return NULL; + else if (validate_ndb && !thd_ndb->valid_ndb()) { + if (!thd_ndb->recycle_ndb()) return NULL; } DBUG_ASSERT(thd_ndb->is_slave_thread() == thd->slave_thread); @@ -58,18 +53,13 @@ Ndb* check_ndb_in_thd(THD* thd, bool validate_ndb) return thd_ndb->ndb; } - -bool -applying_binlog(const THD* thd) -{ - if (thd->slave_thread) - { +bool applying_binlog(const THD *thd) { + if (thd->slave_thread) { DBUG_PRINT("info", ("THD is slave thread")); return true; } - if (thd->rli_fake) - { + if (thd->rli_fake) { /* Thread is in "pseudo_slave_mode" which is entered implicitly when the first BINLOG statement is executed (see 'mysql_client_binlog_statement') @@ -84,30 +74,25 @@ applying_binlog(const THD* thd) extern ulong opt_server_id_mask; -uint32 -thd_unmasked_server_id(const THD* thd) -{ +uint32 thd_unmasked_server_id(const THD *thd) { const uint32 unmasked_server_id = thd->unmasked_server_id; assert(thd->server_id == (thd->unmasked_server_id & opt_server_id_mask)); return unmasked_server_id; } -const char* ndb_thd_query(const THD* thd) { return thd->query().str; } +const char *ndb_thd_query(const THD *thd) { return thd->query().str; } -size_t ndb_thd_query_length(const THD* thd) { return thd->query().length; } +size_t ndb_thd_query_length(const THD *thd) { return thd->query().length; } -bool ndb_thd_is_binlog_thread(const THD* thd) -{ +bool ndb_thd_is_binlog_thread(const THD *thd) { return thd->system_thread == SYSTEM_THREAD_NDBCLUSTER_BINLOG; } -bool ndb_thd_is_background_thread(const THD* thd) -{ +bool ndb_thd_is_background_thread(const THD *thd) { return thd->system_thread == SYSTEM_THREAD_BACKGROUND; } -void ndb_thd_register_trans(THD *thd, bool register_trans) -{ +void ndb_thd_register_trans(THD *thd, bool register_trans) { // Always register for the statement trans_register_ha(thd, false, ndbcluster_hton, nullptr); @@ -124,11 +109,11 @@ void clear_thd_conditions(THD *thd) { thd->get_stmt_da()->reset_condition_info(thd); } -void log_and_clear_thd_conditions( - THD *thd, condition_logging_level logging_level) { +void log_and_clear_thd_conditions(THD *thd, + condition_logging_level logging_level) { // Print THD's list of conditions to error log Diagnostics_area::Sql_condition_iterator it( - thd->get_stmt_da()->sql_conditions()); + thd->get_stmt_da()->sql_conditions()); const Sql_condition *err; while ((err = it++)) { switch (logging_level) { diff --git a/storage/ndb/plugin/ndb_thd.h b/storage/ndb/plugin/ndb_thd.h index f7d3e665b6d7..3a4b67beff7e 100644 --- a/storage/ndb/plugin/ndb_thd.h +++ b/storage/ndb/plugin/ndb_thd.h @@ -27,70 +27,59 @@ #include "mysql/plugin.h" -extern handlerton* ndbcluster_hton; +extern handlerton *ndbcluster_hton; - /* Get Thd_ndb pointer from THD */ -static inline -class Thd_ndb* -get_thd_ndb(THD* thd) -{ - return (class Thd_ndb *) thd_get_ha_data(thd, ndbcluster_hton); +static inline class Thd_ndb *get_thd_ndb(THD *thd) { + return (class Thd_ndb *)thd_get_ha_data(thd, ndbcluster_hton); } - /* Set Thd_ndb pointer for THD */ -static inline -void -thd_set_thd_ndb(THD *thd, class Thd_ndb *thd_ndb) -{ +static inline void thd_set_thd_ndb(THD *thd, class Thd_ndb *thd_ndb) { thd_set_ha_data(thd, ndbcluster_hton, thd_ndb); } - /* Make sure THD has a Thd_ndb struct assigned */ -class Ndb* check_ndb_in_thd(THD* thd, bool validate_ndb= false); +class Ndb *check_ndb_in_thd(THD *thd, bool validate_ndb = false); /* Determine if THD is applying binlog. ie. either marked as slave thread or being in "pseudo slave mode" */ -bool -applying_binlog(const THD* thd); - +bool applying_binlog(const THD *thd); /* Return the THD's unmasked server id */ -uint32 thd_unmasked_server_id(const THD* thd); +uint32 thd_unmasked_server_id(const THD *thd); /* @brief Return the THD's current query string @note It's safe for own thread to read it's query string */ -const char* ndb_thd_query(const THD* thd); +const char *ndb_thd_query(const THD *thd); /* @brief Return the length of THD's current query @note It's safe for own thread to read it's query string length */ -size_t ndb_thd_query_length(const THD* thd); +size_t ndb_thd_query_length(const THD *thd); /* @brief Check if THD is the "binlog injector thread" @return true if thread matches condition */ -bool ndb_thd_is_binlog_thread(const THD* thd); +bool ndb_thd_is_binlog_thread(const THD *thd); /* @brief Check if THD is a "background thread" @return true if thread matches condition */ -bool ndb_thd_is_background_thread(const THD* thd); +bool ndb_thd_is_background_thread(const THD *thd); /* @brief Register ndbcluster for a statement and optionally a transaction. @@ -118,7 +107,7 @@ enum condition_logging_level { INFO, WARNING, ERROR }; @param thd Thread handle @param logging_level Level of the log messages i.e. info, warning, or error */ -void log_and_clear_thd_conditions( - THD *thd, condition_logging_level logging_level); +void log_and_clear_thd_conditions(THD *thd, + condition_logging_level logging_level); #endif diff --git a/storage/ndb/plugin/ndb_thd_ndb.cc b/storage/ndb/plugin/ndb_thd_ndb.cc index e6d3060c0b0e..471994b304ee 100644 --- a/storage/ndb/plugin/ndb_thd_ndb.cc +++ b/storage/ndb/plugin/ndb_thd_ndb.cc @@ -25,7 +25,7 @@ #include "storage/ndb/plugin/ndb_thd_ndb.h" #include "my_dbug.h" -#include "mysql/plugin.h" // thd_get_thread_id +#include "mysql/plugin.h" // thd_get_thread_id #include "mysqld_error.h" #include "sql/derror.h" #include "sql/sql_error.h" @@ -38,47 +38,34 @@ the handler. Should really be 2 but there is a transaction to much allocated when lock table is used, and one extra to used for global schema lock. */ -static const int MAX_TRANSACTIONS= 4; +static const int MAX_TRANSACTIONS = 4; - -Thd_ndb* -Thd_ndb::seize(THD* thd) -{ +Thd_ndb *Thd_ndb::seize(THD *thd) { DBUG_ENTER("seize_thd_ndb"); - Thd_ndb* thd_ndb= new Thd_ndb(thd); - if (thd_ndb == NULL) - DBUG_RETURN(NULL); + Thd_ndb *thd_ndb = new Thd_ndb(thd); + if (thd_ndb == NULL) DBUG_RETURN(NULL); - if (thd_ndb->ndb->init(MAX_TRANSACTIONS) != 0) - { + if (thd_ndb->ndb->init(MAX_TRANSACTIONS) != 0) { DBUG_PRINT("error", ("Ndb::init failed, error: %d message: %s", thd_ndb->ndb->getNdbError().code, thd_ndb->ndb->getNdbError().message)); - + delete thd_ndb; - thd_ndb= NULL; - } - else - { + thd_ndb = NULL; + } else { thd_ndb->ndb->setCustomData64(thd_get_thread_id(thd)); } DBUG_RETURN(thd_ndb); } - -void -Thd_ndb::release(Thd_ndb* thd_ndb) -{ +void Thd_ndb::release(Thd_ndb *thd_ndb) { DBUG_ENTER("release_thd_ndb"); delete thd_ndb; DBUG_VOID_RETURN; } - -bool -Thd_ndb::recycle_ndb(void) -{ +bool Thd_ndb::recycle_ndb(void) { DBUG_ENTER("recycle_ndb"); DBUG_PRINT("enter", ("ndb: 0x%lx", (long)ndb)); @@ -86,23 +73,18 @@ Thd_ndb::recycle_ndb(void) DBUG_ASSERT(trans == NULL); delete ndb; - if ((ndb= new Ndb(connection, "")) == NULL) - { - DBUG_PRINT("error",("failed to allocate Ndb object")); + if ((ndb = new Ndb(connection, "")) == NULL) { + DBUG_PRINT("error", ("failed to allocate Ndb object")); DBUG_RETURN(false); } - if (ndb->init(MAX_TRANSACTIONS) != 0) - { + if (ndb->init(MAX_TRANSACTIONS) != 0) { delete ndb; - ndb= NULL; + ndb = NULL; DBUG_PRINT("error", ("Ndb::init failed, %d message: %s", - ndb->getNdbError().code, - ndb->getNdbError().message)); + ndb->getNdbError().code, ndb->getNdbError().message)); DBUG_RETURN(false); - } - else - { + } else { ndb->setCustomData64(thd_get_thread_id(m_thd)); } @@ -110,24 +92,19 @@ Thd_ndb::recycle_ndb(void) m_last_commit_epoch_session = 0; /* Update m_connect_count to avoid false failures of ::valid_ndb() */ - m_connect_count= connection->get_connect_count(); + m_connect_count = connection->get_connect_count(); DBUG_RETURN(true); } - -bool -Thd_ndb::valid_ndb(void) const -{ +bool Thd_ndb::valid_ndb(void) const { // The ndb object should be valid as long as a // global schema lock transaction is ongoing - if (global_schema_lock_trans) - return true; + if (global_schema_lock_trans) return true; // The ndb object should be valid as long as a // transaction is ongoing - if (trans) - return true; + if (trans) return true; if (unlikely(m_connect_count != connection->get_connect_count())) return false; @@ -135,58 +112,35 @@ Thd_ndb::valid_ndb(void) const return true; } - -void -Thd_ndb::init_open_tables() -{ - count= 0; - m_error= false; +void Thd_ndb::init_open_tables() { + count = 0; + m_error = false; open_tables.clear(); } +bool Thd_ndb::check_option(Options option) const { return (options & option); } -bool -Thd_ndb::check_option(Options option) const -{ - return (options & option); -} - - -void -Thd_ndb::set_option(Options option) -{ - options |= option; -} - +void Thd_ndb::set_option(Options option) { options |= option; } /* Used for every additional row operation, to update the guesstimate of pending bytes to send, and to check if it is now time to flush a batch. */ -bool -Thd_ndb::add_row_check_if_batch_full(uint size) -{ - if (m_unsent_bytes == 0) - free_root(&m_batch_mem_root, MY_MARK_BLOCKS_FREE); +bool Thd_ndb::add_row_check_if_batch_full(uint size) { + if (m_unsent_bytes == 0) free_root(&m_batch_mem_root, MY_MARK_BLOCKS_FREE); - uint unsent= m_unsent_bytes; - unsent+= size; - m_unsent_bytes= unsent; + uint unsent = m_unsent_bytes; + unsent += size; + m_unsent_bytes = unsent; return unsent >= m_batch_size; } - -bool -Thd_ndb::check_trans_option(Trans_options option) const -{ +bool Thd_ndb::check_trans_option(Trans_options option) const { return (trans_options & option); } - -void -Thd_ndb::set_trans_option(Trans_options option) -{ +void Thd_ndb::set_trans_option(Trans_options option) { #ifndef DBUG_OFF if (check_trans_option(TRANS_TRANSACTIONS_OFF)) DBUG_PRINT("info", ("Disabling transactions")); @@ -198,15 +152,11 @@ Thd_ndb::set_trans_option(Trans_options option) trans_options |= option; } - -void -Thd_ndb::reset_trans_options(void) -{ +void Thd_ndb::reset_trans_options(void) { DBUG_PRINT("info", ("Resetting trans_options")); trans_options = 0; } - /* Push to THD's condition stack @@ -215,14 +165,14 @@ Thd_ndb::reset_trans_options(void) @param[in] fmt printf-like format string @param[in] args Arguments */ -static void push_condition(THD* thd, +static void push_condition(THD *thd, Sql_condition::enum_severity_level severity, - uint code, const char* fmt, va_list args) - MY_ATTRIBUTE((format(printf, 4, 0))); + uint code, const char *fmt, va_list args) + MY_ATTRIBUTE((format(printf, 4, 0))); -static void push_condition(THD* thd, +static void push_condition(THD *thd, Sql_condition::enum_severity_level severity, - uint code, const char* fmt, va_list args) { + uint code, const char *fmt, va_list args) { DBUG_ASSERT(fmt); // Assemble the message @@ -242,7 +192,7 @@ static void push_condition(THD* thd, } } -void Thd_ndb::push_warning(const char* fmt, ...) const { +void Thd_ndb::push_warning(const char *fmt, ...) const { const uint code = ER_GET_ERRMSG; va_list args; va_start(args, fmt); @@ -250,14 +200,14 @@ void Thd_ndb::push_warning(const char* fmt, ...) const { va_end(args); } -void Thd_ndb::push_warning(uint code, const char* fmt, ...) const { +void Thd_ndb::push_warning(uint code, const char *fmt, ...) const { va_list args; va_start(args, fmt); push_condition(m_thd, Sql_condition::SL_WARNING, code, fmt, args); va_end(args); } -void Thd_ndb::push_ndb_error_warning(const NdbError& ndberr) const { +void Thd_ndb::push_ndb_error_warning(const NdbError &ndberr) const { if (ndberr.status == NdbError::TemporaryError) { push_warning_printf(m_thd, Sql_condition::SL_WARNING, ER_GET_TEMPORARY_ERRMSG, @@ -270,8 +220,7 @@ void Thd_ndb::push_ndb_error_warning(const NdbError& ndberr) const { } } -void Thd_ndb::set_ndb_error(const NdbError& ndberr, - const char* message) const { +void Thd_ndb::set_ndb_error(const NdbError &ndberr, const char *message) const { push_ndb_error_warning(ndberr); my_printf_error(ER_GET_ERRMSG, "%s", MYF(0), message); } diff --git a/storage/ndb/plugin/ndb_thd_ndb.h b/storage/ndb/plugin/ndb_thd_ndb.h index 0d274f4a3d1f..c846a39d9059 100644 --- a/storage/ndb/plugin/ndb_thd_ndb.h +++ b/storage/ndb/plugin/ndb_thd_ndb.h @@ -34,20 +34,20 @@ class THD; /* Class for ndbcluster thread specific data */ -class Thd_ndb -{ - THD* const m_thd; +class Thd_ndb { + THD *const m_thd; - Thd_ndb(THD*); + Thd_ndb(THD *); ~Thd_ndb(); - const bool m_slave_thread; // cached value of thd->slave_thread + const bool m_slave_thread; // cached value of thd->slave_thread uint32 options; uint32 trans_options; class Ndb_DDL_transaction_ctx *m_ddl_ctx; -public: - static Thd_ndb* seize(THD*); - static void release(Thd_ndb* thd_ndb); + + public: + static Thd_ndb *seize(THD *); + static void release(Thd_ndb *thd_ndb); void init_open_tables(); @@ -63,14 +63,13 @@ class Thd_ndb bool m_slow_path; bool m_force_send; - enum Options - { + enum Options { /* Don't distribute schema operations for this thread. NOTE! Flag is _only_ set by the binlog injector thread and thus any DDL operations it performs are not distributed. */ - NO_LOG_SCHEMA_OP= 1 << 0, + NO_LOG_SCHEMA_OP = 1 << 0, /* This Thd_ndb is a participant in a global schema distribution. Whenver a GSL lock is required, it is acquired by the coordinator. @@ -78,12 +77,12 @@ class Thd_ndb for the schema operation it is part of. Thus it should not take any GSL locks itself. */ - IS_SCHEMA_DIST_PARTICIPANT= 1 << 1, + IS_SCHEMA_DIST_PARTICIPANT = 1 << 1, /* Allow Thd_ndb to setup schema distribution and apply status */ - ALLOW_BINLOG_SETUP= 1 << 2, + ALLOW_BINLOG_SETUP = 1 << 2, /* Create a ndbcluster util table in DD. The table already exists @@ -107,30 +106,23 @@ class Thd_ndb // Guard class for automatically restoring the state of // Thd_ndb::options when the guard goes out of scope - class Options_guard - { - Thd_ndb* const m_thd_ndb; + class Options_guard { + Thd_ndb *const m_thd_ndb; const uint32 m_save_options; - public: - Options_guard(Thd_ndb* thd_ndb) - : m_thd_ndb(thd_ndb), - m_save_options(thd_ndb->options) - { + + public: + Options_guard(Thd_ndb *thd_ndb) + : m_thd_ndb(thd_ndb), m_save_options(thd_ndb->options) { assert(sizeof(m_save_options) == sizeof(thd_ndb->options)); } - ~Options_guard() - { + ~Options_guard() { // Restore the saved options - m_thd_ndb->options= m_save_options; - } - void set(Options option) - { - m_thd_ndb->set_option(option); + m_thd_ndb->options = m_save_options; } + void set(Options option) { m_thd_ndb->set_option(option); } }; - enum Trans_options - { + enum Trans_options { /* Remember that statement has written to ndb_apply_status and subsequent writes need to do updates @@ -141,13 +133,13 @@ class Thd_ndb Indicator that no looging is performd by this MySQL Server ans thus the anyvalue should have the nologging bit turned on */ - TRANS_NO_LOGGING = 1 << 1, + TRANS_NO_LOGGING = 1 << 1, /* Turn off transactional behaviour for the duration of this transaction/statement */ - TRANS_TRANSACTIONS_OFF = 1 << 2 + TRANS_TRANSACTIONS_OFF = 1 << 2 }; // Check if given trans option is set @@ -160,8 +152,8 @@ class Thd_ndb // Start of transaction check, to automatically detect which // trans options should be enabled void transaction_checks(void); - malloc_unordered_map - open_tables{PSI_INSTRUMENT_ME}; + malloc_unordered_map open_tables{ + PSI_INSTRUMENT_ME}; /* This is a memroot used to buffer rows for batched execution. It is reset after every execute(). @@ -221,8 +213,8 @@ class Thd_ndb NdbTransaction *global_schema_lock_trans; uint global_schema_lock_count; uint global_schema_lock_error; - uint schema_locks_count; // Number of global schema locks taken by thread - bool has_required_global_schema_lock(const char* func) const; + uint schema_locks_count; // Number of global schema locks taken by thread + bool has_required_global_schema_lock(const char *func) const; /** Epoch of last committed transaction in this session, 0 if none so far @@ -242,8 +234,8 @@ class Thd_ndb @param[in] fmt printf-like format string @param[in] ... Variable arguments matching format string */ - void push_warning(const char* fmt, ...) const - MY_ATTRIBUTE((format(printf, 2, 3))); + void push_warning(const char *fmt, ...) const + MY_ATTRIBUTE((format(printf, 2, 3))); /* @brief Push a warning message onto THD's condition stack. @@ -253,8 +245,8 @@ class Thd_ndb @param[in] fmt printf-like format string @param[in] ... Variable arguments matching format string */ - void push_warning(uint code, const char* fmt, ...) const - MY_ATTRIBUTE((format(printf, 3, 4))); + void push_warning(uint code, const char *fmt, ...) const + MY_ATTRIBUTE((format(printf, 3, 4))); /* @brief Push an error from NDB as warning message onto THD's condition stack. diff --git a/storage/ndb/plugin/ndb_util_table.cc b/storage/ndb/plugin/ndb_util_table.cc index c98ed45867fe..26686848c765 100644 --- a/storage/ndb/plugin/ndb_util_table.cc +++ b/storage/ndb/plugin/ndb_util_table.cc @@ -28,10 +28,10 @@ #include #include "my_base.h" -#include "my_byteorder.h" // uint2korr +#include "my_byteorder.h" // uint2korr #include "mysql_version.h" -#include "ndbapi/NdbRecAttr.hpp" // NdbRecAttr -#include "sql/sql_class.h" // THD +#include "ndbapi/NdbRecAttr.hpp" // NdbRecAttr +#include "sql/sql_class.h" // THD #include "storage/ndb/plugin/ha_ndbcluster_binlog.h" #include "storage/ndb/plugin/ndb_dd_client.h" #include "storage/ndb/plugin/ndb_dd_table.h" @@ -42,7 +42,7 @@ #include "storage/ndb/plugin/ndb_thd_ndb.h" class Db_name_guard { - Ndb * const m_ndb; + Ndb *const m_ndb; const std::string m_save_old_dbname; Db_name_guard() = delete; Db_name_guard(const Db_name_guard &) = delete; @@ -68,7 +68,7 @@ class Util_table_creator { const char *db_name() const { return m_util_table.db_name(); } const char *table_name() const { return m_util_table.table_name(); } - bool create_or_upgrade_in_NDB(bool upgrade_allowed, bool& reinstall) const; + bool create_or_upgrade_in_NDB(bool upgrade_allowed, bool &reinstall) const; bool install_in_DD(bool reinstall); @@ -82,10 +82,8 @@ class Util_table_creator { bool create_or_upgrade(bool upgrade_allowed, bool create_events); }; - -Ndb_util_table::Ndb_util_table(Thd_ndb* thd_ndb, std::string db_name, - std::string table_name, bool hidden, - bool events) +Ndb_util_table::Ndb_util_table(Thd_ndb *thd_ndb, std::string db_name, + std::string table_name, bool hidden, bool events) : m_thd_ndb(thd_ndb), m_table_guard(thd_ndb->ndb->getDictionary()), m_db_name(std::move(db_name)), @@ -100,7 +98,7 @@ bool Ndb_util_table::create_or_upgrade(THD *thd, bool upgrade_flag) { return creator.create_or_upgrade(upgrade_flag, m_create_events); } -void Ndb_util_table::push_warning(const char* fmt, ...) const { +void Ndb_util_table::push_warning(const char *fmt, ...) const { // Assemble the message char message[512]; va_list args; @@ -135,7 +133,7 @@ bool Ndb_util_table::exists() const { } bool Ndb_util_table::open() { - Ndb* ndb = m_thd_ndb->ndb; + Ndb *ndb = m_thd_ndb->ndb; // Set correct database name on the Ndb object Db_name_guard db_guard(ndb, m_db_name.c_str()); @@ -143,7 +141,7 @@ bool Ndb_util_table::open() { // Load up the table definition from NDB dictionary m_table_guard.init(m_table_name.c_str()); - const NdbDictionary::Table* tab = m_table_guard.get_table(); + const NdbDictionary::Table *tab = m_table_guard.get_table(); if (!tab) { push_warning("Failed to open table from NDB"); return false; @@ -152,17 +150,16 @@ bool Ndb_util_table::open() { return true; } -const NdbDictionary::Table* Ndb_util_table::get_table() const { +const NdbDictionary::Table *Ndb_util_table::get_table() const { return m_table_guard.get_table(); } -const NdbDictionary::Column* Ndb_util_table::get_column( - const char* name) const { +const NdbDictionary::Column *Ndb_util_table::get_column( + const char *name) const { return get_table()->getColumn(name); } - -bool Ndb_util_table::check_column_exist(const char* name) const { +bool Ndb_util_table::check_column_exist(const char *name) const { if (get_column(name) == nullptr) { push_warning("Could not find expected column '%s'", name); return false; @@ -171,33 +168,30 @@ bool Ndb_util_table::check_column_exist(const char* name) const { } bool Ndb_util_table::check_primary_key( - const std::vector columns) const { + const std::vector columns) const { // Check that the primary key of the table matches the given columns int keys = 0; - for (const char* name : columns) { - if (!get_column(name)->getPrimaryKey()) - { + for (const char *name : columns) { + if (!get_column(name)->getPrimaryKey()) { push_warning("Column '%s' is not part of primary key", name); return false; } keys++; } - if (keys != get_table()->getNoOfPrimaryKeys()) - { + if (keys != get_table()->getNoOfPrimaryKeys()) { push_warning("Invalid primary key"); return false; } return true; } -int Ndb_util_table::get_column_max_length(const char* name) const -{ +int Ndb_util_table::get_column_max_length(const char *name) const { return get_column(name)->getLength(); } -bool Ndb_util_table::check_column_type(const NdbDictionary::Column* col, +bool Ndb_util_table::check_column_type(const NdbDictionary::Column *col, NdbDictionary::Column::Type type, - const char* type_name) const { + const char *type_name) const { if (col->getType() != type) { push_warning("Column '%s' must be defined as '%s'", col->getName(), type_name); @@ -206,7 +200,7 @@ bool Ndb_util_table::check_column_type(const NdbDictionary::Column* col, return true; } -bool Ndb_util_table::check_column_minlength(const char* name, +bool Ndb_util_table::check_column_minlength(const char *name, int min_length) const { if (get_column(name)->getLength() < min_length) { push_warning("Column '%s' is too short, need at least %d bytes", name, @@ -216,32 +210,33 @@ bool Ndb_util_table::check_column_minlength(const char* name, return true; } -bool Ndb_util_table::check_column_varbinary(const char* name) const { +bool Ndb_util_table::check_column_varbinary(const char *name) const { return check_column_type(get_column(name), NdbDictionary::Column::Varbinary, "VARBINARY"); } -bool Ndb_util_table::check_column_binary(const char* name) const { +bool Ndb_util_table::check_column_binary(const char *name) const { return check_column_type(get_column(name), NdbDictionary::Column::Binary, "BINARY"); } -bool Ndb_util_table::check_column_unsigned(const char* name) const { +bool Ndb_util_table::check_column_unsigned(const char *name) const { return check_column_type(get_column(name), NdbDictionary::Column::Unsigned, "INT UNSIGNED "); } -bool Ndb_util_table::check_column_bigunsigned(const char* name) const { +bool Ndb_util_table::check_column_bigunsigned(const char *name) const { return check_column_type(get_column(name), NdbDictionary::Column::Bigunsigned, "BIGINT UNSIGNED"); } -bool Ndb_util_table::check_column_blob(const char* name) const { +bool Ndb_util_table::check_column_blob(const char *name) const { return check_column_type(get_column(name), NdbDictionary::Column::Blob, "BLOB"); } -bool Ndb_util_table::check_column_nullable(const char* name, bool nullable) const { +bool Ndb_util_table::check_column_nullable(const char *name, + bool nullable) const { if (get_column(name)->getNullable() != nullable) { push_warning("Column '%s' must be defined to %sallow NULL values", name, nullable ? "" : "not "); @@ -266,8 +261,8 @@ bool Ndb_util_table::define_indexes(const NdbDictionary::Table &, return true; } -bool Ndb_util_table::create_index(const NdbDictionary::Table & table, - const NdbDictionary::Index & idx) const { +bool Ndb_util_table::create_index(const NdbDictionary::Table &table, + const NdbDictionary::Index &idx) const { Db_name_guard db_guard(m_thd_ndb->ndb, m_db_name.c_str()); NdbDictionary::Dictionary *dict = m_thd_ndb->ndb->getDictionary(); @@ -280,13 +275,13 @@ bool Ndb_util_table::create_index(const NdbDictionary::Table & table, } bool Ndb_util_table::create_primary_ordered_index( - const NdbDictionary::Table & table) const { + const NdbDictionary::Table &table) const { NdbDictionary::Index index("PRIMARY"); index.setType(NdbDictionary::Index::OrderedIndex); index.setLogging(false); - for(int i = 0 ; i < table.getNoOfPrimaryKeys() ; i++) { + for (int i = 0; i < table.getNoOfPrimaryKeys(); i++) { index.addColumnName(table.getPrimaryKey(i)); } return create_index(table, index); @@ -312,10 +307,8 @@ bool Ndb_util_table::drop_table_in_NDB( Db_name_guard db_guard(m_thd_ndb->ndb, m_db_name.c_str()); NdbDictionary::Dictionary *dict = m_thd_ndb->ndb->getDictionary(); - if (!drop_events_in_NDB()) - { - push_warning("Failed to drop events for table '%s'", - m_table_name.c_str()); + if (!drop_events_in_NDB()) { + push_warning("Failed to drop events for table '%s'", m_table_name.c_str()); return false; } @@ -354,19 +347,15 @@ bool Ndb_util_table::create() const { mysql_version = 50725; } #endif - if (!define_table_ndb(new_table, mysql_version)) - return false; + if (!define_table_ndb(new_table, mysql_version)) return false; - if (!create_table_in_NDB(new_table)) - return false; + if (!create_table_in_NDB(new_table)) return false; - if(! define_indexes(new_table, mysql_version)) - return false; + if (!define_indexes(new_table, mysql_version)) return false; return true; } - // Upgrade table bool Ndb_util_table::upgrade() const { NdbDictionary::Table new_table(m_table_name.c_str()); @@ -391,9 +380,7 @@ bool Ndb_util_table::upgrade() const { return true; } - -std::string -Ndb_util_table::unpack_varbinary(NdbRecAttr* ndbRecAttr) { +std::string Ndb_util_table::unpack_varbinary(NdbRecAttr *ndbRecAttr) { DBUG_ENTER("Ndb_util_table::unpack_varbinary"); // Function should be called only on a varbinary column DBUG_ASSERT(ndbRecAttr->getType() == NdbDictionary::Column::Varbinary || @@ -402,7 +389,7 @@ Ndb_util_table::unpack_varbinary(NdbRecAttr* ndbRecAttr) { const char *value_start; size_t value_length; ndb_unpack_varchar(ndbRecAttr->getColumn(), 0, &value_start, &value_length, - ndbRecAttr->aRef()); + ndbRecAttr->aRef()); DBUG_RETURN(std::string(value_start, value_length)); } @@ -411,18 +398,14 @@ Ndb_util_table::unpack_varbinary(NdbRecAttr* ndbRecAttr) { // Util_table_creator // -Util_table_creator::Util_table_creator(THD *thd, - Thd_ndb *thd_ndb, - Ndb_util_table &util_table) : - m_thd(thd), - m_thd_ndb(thd_ndb), - m_util_table(util_table) -{ +Util_table_creator::Util_table_creator(THD *thd, Thd_ndb *thd_ndb, + Ndb_util_table &util_table) + : m_thd(thd), m_thd_ndb(thd_ndb), m_util_table(util_table) { m_name.append(db_name()).append(".").append(table_name()); } bool Util_table_creator::create_or_upgrade_in_NDB(bool upgrade_allowed, - bool& reinstall) const { + bool &reinstall) const { ndb_log_verbose(50, "Checking '%s' table", m_name.c_str()); if (!m_util_table.exists()) { @@ -459,7 +442,7 @@ bool Util_table_creator::create_or_upgrade_in_NDB(bool upgrade_allowed, ndb_log_error("Upgrade of '%s' table failed!", m_name.c_str()); return false; } - reinstall= true; + reinstall = true; ndb_log_info("Upgrade of '%s' table completed", m_name.c_str()); } @@ -542,8 +525,7 @@ bool Util_table_creator::setup_table_for_binlog() const { // Acquire exclusive MDL lock on schema and table Ndb_dd_client dd_client(m_thd); if (!dd_client.mdl_locks_acquire_exclusive(db_name(), table_name())) { - ndb_log_error("Failed to acquire MDL lock for '%s' table", - m_name.c_str()); + ndb_log_error("Failed to acquire MDL lock for '%s' table", m_name.c_str()); m_thd->clear_error(); return false; } @@ -576,7 +558,7 @@ bool Util_table_creator::create_or_upgrade(bool upgrade_allowed, return false; } - if(create_events) { + if (create_events) { if (!setup_table_for_binlog()) { return false; } diff --git a/storage/ndb/plugin/ndb_util_table.h b/storage/ndb/plugin/ndb_util_table.h index 77aec6cb3451..8fa22934eb42 100644 --- a/storage/ndb/plugin/ndb_util_table.h +++ b/storage/ndb/plugin/ndb_util_table.h @@ -38,44 +38,43 @@ class Thd_ndb; // Base class used for working with tables created in NDB by the // ndbcluster plugin class Ndb_util_table { - Thd_ndb* const m_thd_ndb; + Thd_ndb *const m_thd_ndb; Ndb_table_guard m_table_guard; const std::string m_db_name; const std::string m_table_name; const bool m_hidden; const bool m_create_events; - bool check_column_type(const NdbDictionary::Column*, + bool check_column_type(const NdbDictionary::Column *, NdbDictionary::Column::Type type, - const char* type_name) const; + const char *type_name) const; - void push_ndb_error_warning(const NdbError& ndb_err) const; + void push_ndb_error_warning(const NdbError &ndb_err) const; protected: - const NdbDictionary::Column* get_column(const char* name) const; - void push_warning(const char* fmt, ...) const + const NdbDictionary::Column *get_column(const char *name) const; + void push_warning(const char *fmt, ...) const MY_ATTRIBUTE((format(printf, 2, 3))); - Ndb_util_table(class Thd_ndb*, std::string db_name, - std::string table_name, bool hidden, - bool create_events = true); + Ndb_util_table(class Thd_ndb *, std::string db_name, std::string table_name, + bool hidden, bool create_events = true); ~Ndb_util_table(); - bool check_column_exist(const char* name) const; + bool check_column_exist(const char *name) const; - bool check_column_varbinary(const char* name) const; - bool check_column_binary(const char* name) const; - bool check_column_unsigned(const char* name) const; - bool check_column_bigunsigned(const char* name) const; - bool check_column_blob(const char* name) const; + bool check_column_varbinary(const char *name) const; + bool check_column_binary(const char *name) const; + bool check_column_unsigned(const char *name) const; + bool check_column_bigunsigned(const char *name) const; + bool check_column_blob(const char *name) const; - bool check_column_nullable(const char* name, bool nullable) const; + bool check_column_nullable(const char *name, bool nullable) const; - bool check_column_minlength(const char* name, int min_length) const; + bool check_column_minlength(const char *name, int min_length) const; - bool check_primary_key(const std::vector columns) const; + bool check_primary_key(const std::vector columns) const; - int get_column_max_length(const char* name) const; + int get_column_max_length(const char *name) const; /** @brief Define the NdbApi table definition @@ -108,10 +107,10 @@ class Ndb_util_table { @brief Drop one event from NDB @return true if events was dropped successfully */ - bool drop_event_in_NDB(const char* event_name) const; + bool drop_event_in_NDB(const char *event_name) const; public: - /** + /** @brief Create or upgrade the table in NDB, and in the local Data Dictionary, and setup NDB binlog events if enabled @return true on success @@ -137,19 +136,19 @@ class Ndb_util_table { functionality which does not exist after or during an upgrade. @return true if table definition fulfills minimal functionality. */ - virtual bool check_schema() const = 0; + virtual bool check_schema() const = 0; /** @brief Get name of the table @return name of the table */ - const char* table_name() const { return m_table_name.c_str(); } + const char *table_name() const { return m_table_name.c_str(); } /** @brief Get database of the table @return database of the table */ - const char* db_name() const { return m_db_name.c_str(); } + const char *db_name() const { return m_db_name.c_str(); } /** @brief Get hidden status of table @@ -192,7 +191,7 @@ class Ndb_util_table { @brief Unpack the varbinary column value @return the value stored in the varbinary column */ - static std::string unpack_varbinary(NdbRecAttr* ndbRecAttr); + static std::string unpack_varbinary(NdbRecAttr *ndbRecAttr); }; #endif