diff --git a/tests/sql/src/main/java/sql/SQLTest.java b/tests/sql/src/main/java/sql/SQLTest.java index fba98cd08..aea660946 100644 --- a/tests/sql/src/main/java/sql/SQLTest.java +++ b/tests/sql/src/main/java/sql/SQLTest.java @@ -19,6 +19,8 @@ */ package sql; +import com.gemstone.gemfire.cache.Region; +import com.pivotal.gemfirexd.internal.engine.distributed.GfxdMessage; import hydra.ClientVmInfo; import hydra.DerbyServerHelper; import hydra.EnvHelper; @@ -3280,7 +3282,7 @@ public void verifyResultSetsStandalone(){ } } - protected void verifyResultSets(Connection dConn, Connection gConn) { + protected void verifyResultSets(Connection dConn, Connection gConn) { if (dConn == null) { Log.getLogWriter().info("Connection to disc db is null, could not verify results"); return; @@ -3291,23 +3293,149 @@ protected void verifyResultSets(Connection dConn, Connection gConn) { for (String[] table: tables) { try { Log.getLogWriter().info("verifyResultSets-verifyResultSets-schema " + table[0] + " and table " + table[1]); - //if (SQLPrms.isSnappyMode() && !(table[0].equalsIgnoreCase("SNAPPY_HIVE_METASTORE"))) - verifyResultSets(dConn, gConn, table[0], table[1]); + verifyResultSets(dConn, gConn, table[0], table[1]); + if(RemoteTestModule.getCurrentThread().getCurrentTask().getTaskTypeString().equals + ("CLOSETASK")) { + dumpDiagnostics(gConn, table[0], table[1]); + } }catch (TestException te) { - if (verifyUsingOrderBy) throw te; //avoid OOME on accessor due to failure with large resultset - + if (verifyUsingOrderBy) throw te; //avoid OOME on accessor due to failure with large resultset Log.getLogWriter().info("verifyResultSets-do not throw Exception yet, until all tables are verified"); throwException = true; //Log.getLogWriter().info("Logged test failure:\n" + te.getMessage()); str.append(te.getMessage() + "\n"); } //temporary + } if (throwException) { throw new TestException ("verifyResultSets-verify results failed: " + str); } } - + + protected void dumpDiagnostics(Connection gConn, String schema, String table) { + boolean throwException = false, dumpIndex = false; + StringBuffer exceptionStr = new StringBuffer(); + String fullyQualifiedTableName = (schema + "." + table).toUpperCase(); + int numRecordsInTable = 0, numRecordsWithSecondary = 0, redundantCopies, numCopies = 0, numExpectedRowCount = 0; + int numRowsInReplTable = 0; + + Log.getLogWriter().info("dumpDiagnostics-Dumping diagnostics report for : " + fullyQualifiedTableName); + + Region region = Misc.getRegionForTable(fullyQualifiedTableName, true); + int numDataStores = GfxdMessage.getAllDataStores().size(); + + try { + if (region instanceof PartitionedRegion) { // if partition table get redundancy level + redundantCopies = ((PartitionedRegion)region).getRedundantCopies(); + Log.getLogWriter().info("DataPolicy is partition and redundancy is :" + redundantCopies); + String sql = "select count(*), dsid() from sys.members m --GEMFIREXD-PROPERTIES withSecondaries=true \n , " + + fullyQualifiedTableName + " where dsid() = m.id group by dsid()"; + Log.getLogWriter().info("Executing query : " + sql); + PreparedStatement ps1 = gConn.prepareStatement(sql); + ResultSet rsWithSecondary = ps1.executeQuery(); + + String sql1 = "select count(*), dsid() from sys.members m --GEMFIREXD-PROPERTIES withSecondaries=false \n , " + + fullyQualifiedTableName + " where dsid() = m.id group by dsid()"; + Log.getLogWriter().info("Executing query : " + sql1); + ps1 = gConn.prepareStatement(sql1); + ResultSet rsWithoutSecondary = ps1.executeQuery(); + + while (rsWithoutSecondary.next()) { + numRecordsInTable += rsWithoutSecondary.getInt(1); + } + rsWithoutSecondary.close(); + + while (rsWithSecondary.next()) { + numRecordsWithSecondary += rsWithSecondary.getInt(1); + } + rsWithSecondary.close(); + + numExpectedRowCount = numRecordsInTable * (redundantCopies + 1); + if (numExpectedRowCount != numRecordsWithSecondary) { + exceptionStr.append("Number of rows in primary and secondary did not match. \n NumRows in primary are " + numRecordsInTable + + "and with redundancy " + redundantCopies + " , expected row count is " + numExpectedRowCount + + " but found " + numRecordsWithSecondary); + throwException = true; + } + numCopies = redundantCopies + 1; + } else { // if replicated table + Log.getLogWriter().info("DataPolicy is replicate and numdataStores is :" + numDataStores); + String sql2 = "select count(*), dsid() from sys.members m --GEMFIREXD-PROPERTIES withSecondaries=false \n , " + + fullyQualifiedTableName + " where dsid() = m.id group by dsid()"; + Log.getLogWriter().info("Executing query :" + sql2); + PreparedStatement ps1 = gConn.prepareStatement(sql2); + ResultSet rsWithoutSecondary = ps1.executeQuery(); + while (rsWithoutSecondary.next()) { + numRowsInReplTable += rsWithoutSecondary.getInt(1); + } + + ResultSet rs = gConn.createStatement().executeQuery("select count(*) from " + fullyQualifiedTableName); + if (rs.next()) + numRecordsInTable = rs.getInt(1); + rs.close(); + + numExpectedRowCount = numRecordsInTable * numDataStores; + if (numExpectedRowCount != numRowsInReplTable) { + exceptionStr.append("Number of rows in replicated table across datastores do not match.\n Num rows in table are " + + numRecordsInTable + " and with " + numDataStores + " dataStores, expected total row count is " + + numExpectedRowCount + " but found " + numRowsInReplTable); + throwException = true; + } + numCopies = numDataStores; + } + + //compare number of rows in indexes + String sql3 = "select indexname,indextype from sys.indexes where schemaname=? and tablename=? and indextype not in ('PRIMARY KEY','GLOBAL:HASH')"; + Log.getLogWriter().info("Executing query :" + sql3); + PreparedStatement ps2 = gConn.prepareStatement(sql3); + ps2.setString(1, schema.toUpperCase()); + ps2.setString(2, table.toUpperCase()); + ResultSet rsIndex = ps2.executeQuery(); + Log.getLogWriter().info("From system table, index names for " + fullyQualifiedTableName + " in gfxd: "); + while (rsIndex.next()) { + int numRows = 0; + String indexName = rsIndex.getString("indexname"); + Log.getLogWriter().info("indexName :: " + indexName + "::" + rsIndex.getString("indextype")); + String sql4 = "select count(*), dsid() from sys.members m , " + fullyQualifiedTableName + " --GEMFIREXD-PROPERTIES index=" + indexName + " \n " + + "where dsid() = m.id group by dsid()"; + Log.getLogWriter().info("Executing query : " + sql4); + PreparedStatement ps3 = gConn.prepareStatement(sql4); + ResultSet rs = ps3.executeQuery(); + while (rs.next()) { + numRows += rs.getInt(1); + } + rs.close(); + numExpectedRowCount = numRecordsInTable * numCopies; + if (numRows != numExpectedRowCount) {// validation for index + exceptionStr.append("\n Number of rows in index and table did not match. Num rows in table are " + + numRecordsInTable + " and with " + numCopies + " copies, expected total row count is " + + numExpectedRowCount + " but found " + numRows); + throwException = true; + dumpIndex = true; + } + } + rsIndex.close(); + } catch (SQLException se) { + if (!(se.getSQLState().equals("X0Y55") || se.getSQLState().equals("X0Y60"))) { + throwException = true; + exceptionStr.append(se.getMessage() + TestHelper.getStackTrace(se) + "\n"); + } + } + if (throwException) { + if (dumpIndex) { + try { + Log.getLogWriter().info("Dumping index data for " + schema + "." + table); + ResultSet rs = gConn.createStatement().executeQuery("VALUES SYS.CHECK_TABLE_EX('" + schema + "','" + table + "')"); + rs.close(); + } catch (SQLException se) { + exceptionStr.append("\n Query execution failed for CHECK_TABLE_EX : " + se.getMessage()); + } + } + throw new TestException("dumpDiagnostics-diagnostics failed for : " + fullyQualifiedTableName + " \n " + exceptionStr); + } + } + protected void jsonVerification(Connection gConn){ boolean throwException = false; @@ -3478,7 +3606,6 @@ protected void verifyResultSets(Connection dConn, Connection gConn, String schem if ( (verifyByTid && getMyTid() == 0 ) || (!verifyByTid) ) verifyResultSets(dConn, gConn, schema, table, select, hasHdfs); - //TODO temp work around large number of rows returned using heap dump to clear heap if (verifyUsingOrderBy && (table.contains("customers".toUpperCase()) || table.contains("networth".toUpperCase()))) { @@ -6999,8 +7126,7 @@ protected boolean partitionedOnSymbol() { protected void removeUniqueKeyContraint(Connection conn) { try { - String sql = "alter table trade.securities " - + "drop unique SEC_UQ "; + String sql = "alter table trade.securities " + "drop unique SEC_UQ "; Log.getLogWriter().info(sql); conn.createStatement().execute(sql); } catch (SQLException se) { diff --git a/tests/sql/src/main/java/sql/dmlStatements/TradeBuyOrdersDMLStmt.java b/tests/sql/src/main/java/sql/dmlStatements/TradeBuyOrdersDMLStmt.java index 1e70cc4d1..05733fe00 100644 --- a/tests/sql/src/main/java/sql/dmlStatements/TradeBuyOrdersDMLStmt.java +++ b/tests/sql/src/main/java/sql/dmlStatements/TradeBuyOrdersDMLStmt.java @@ -94,7 +94,7 @@ public class TradeBuyOrdersDMLStmt extends AbstractDMLStmt { //no uniqkey queries "select * from trade.buyorders", "select cid, bid, cid, sid from trade.buyorders where cid >? and sid ? and orderTime=1", + "select sid, CAST(count(*) as Integer) as COUNT from trade.buyorders where status =? GROUP BY sid HAVING count(*) >=1", "select cid, CAST(count(distinct sid) as integer) as DIST_SID from trade.buyorders where status =? GROUP BY cid", "select cid, cast (avg(qty*bid) as decimal (30, 20)) as amount from trade.buyorders where status =? GROUP BY cid ORDER BY amount", "select cid, max(qty*bid) as largest_order from trade.buyorders where status =? GROUP BY cid HAVING max(qty*bid) > 20000 ORDER BY largest_order, cid DESC ", diff --git a/tests/sql/src/main/java/sql/sqlutil/ResultSetHelper.java b/tests/sql/src/main/java/sql/sqlutil/ResultSetHelper.java index 1211eb970..853d924c9 100644 --- a/tests/sql/src/main/java/sql/sqlutil/ResultSetHelper.java +++ b/tests/sql/src/main/java/sql/sqlutil/ResultSetHelper.java @@ -437,8 +437,8 @@ public static boolean compareResultSets(ResultSet derbyResultSet, ResultSet gfxd StructTypeImpl gfxdsti = null; if (SQLDAPTest.cidByRange || SQLDAPTest.tidByList) gfxdsti = sti; //reuse derby structure to work around #46311 else gfxdsti = getStructType(gfxdResultSet); - Log.getLogWriter().info("[Sonal]Derby sti is : " + sti.toString()); - Log.getLogWriter().info("[Sonal]Snappy sti is :" + gfxdsti.toString()); + //Log.getLogWriter().info("Derby sti is : " + sti.toString()); + //Log.getLogWriter().info("Snappy sti is :" + gfxdsti.toString()); List GFEList = asList(gfxdResultSet, gfxdsti, false); if (GFEList == null && SQLTest.isHATest) { //due to #41471 in HA && possible #42443 for offline exception test coverage @@ -558,7 +558,13 @@ public static void compareResultSets(List firstResultSet, if (!missingCol.equals(unexpectedCol)) { if (missingCol.getClass().getName().contains("BigDecimal") && unexpectedCol .getClass().getName().contains("BigDecimal")) { - Double diff = (((BigDecimal)missingCol).subtract((BigDecimal)unexpectedCol)).doubleValue(); + BigDecimal value1 = (BigDecimal)missingCol; + BigDecimal value2 = (BigDecimal)unexpectedCol; + Double diff; + if(value1.doubleValue() > value2.doubleValue()) + diff = value1.subtract(value2).doubleValue(); + else + diff = value2.subtract(value1).doubleValue(); Log.getLogWriter().info("diff is " + diff); if (diff <= 0.01) { isGenuineMismatch = false;