org.apache.parquet.Strings Java Examples
The following examples show how to use
org.apache.parquet.Strings.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PathUtils.java From Bats with Apache License 2.0 | 6 votes |
/** * Normalizes the given path eliminating repeated forward slashes. * * @return normalized path */ public static final String normalize(final String path) { if (Strings.isNullOrEmpty(Preconditions.checkNotNull(path))) { return path; } final StringBuilder builder = new StringBuilder(); char last = path.charAt(0); builder.append(last); for (int i=1; i<path.length(); i++) { char cur = path.charAt(i); if (last == '/' && cur == last) { continue; } builder.append(cur); last = cur; } return builder.toString(); }
Example #2
Source File: WholeFileTransformerProcessor.java From datacollector with Apache License 2.0 | 6 votes |
@Override protected List<ConfigIssue> init() { List<ConfigIssue> issues = super.init(); if (Strings.isNullOrEmpty(jobConfig.tempDir)) { issues.add(getContext().createConfigIssue( Groups.JOB.name(), JobConfig.TEMPDIR, Errors.CONVERT_02 )); } this.context = getContext(); this.errorRecordHandler = new DefaultErrorRecordHandler(getContext()); tempDirElEval = context.createELEval("tempDir"); compressionElEval = context.createELEval("compressionCodec"); rateLimitElEval = FileRefUtil.createElEvalForRateLimit(getContext()); variables = context.createELVars(); return issues; }
Example #3
Source File: TestGlob.java From parquet-mr with Apache License 2.0 | 5 votes |
private void assertTooManyCloseBraces(String s) { String expected = "Unexpected closing }:"; try { Strings.expandGlob(s); fail("this should throw"); } catch (GlobParseException e) { Assert.assertEquals(expected, e.getMessage().substring(0, expected.length())); } }
Example #4
Source File: PathUtils.java From Bats with Apache License 2.0 | 5 votes |
/** * Returns a normalized, combined path out of the given path segments. * * @param parts path segments to combine * @see #normalize(String) */ public static final String join(final String... parts) { final StringBuilder sb = new StringBuilder(); for (final String part:parts) { Preconditions.checkNotNull(part, "parts cannot contain null"); if (!Strings.isNullOrEmpty(part)) { sb.append(part).append("/"); } } if (sb.length() > 0) { sb.deleteCharAt(sb.length() - 1); } final String path = sb.toString(); return normalize(path); }
Example #5
Source File: TestGlob.java From parquet-mr with Apache License 2.0 | 5 votes |
private void assertNotEnoughCloseBraces(String s) { String expected = "Not enough close braces in: "; try { Strings.expandGlob(s); fail("this should throw"); } catch (GlobParseException e) { Assert.assertEquals(expected, e.getMessage().substring(0, expected.length())); } }
Example #6
Source File: TestGlob.java From parquet-mr with Apache License 2.0 | 5 votes |
@Test public void testCommaCornerCases() { // single empty string in each location assertEquals(Arrays.asList("foobar", "foo", "foobaz"), Strings.expandGlob("foo{bar,,baz}")); assertEquals(Arrays.asList("foo", "foobar", "foobaz"), Strings.expandGlob("foo{,bar,baz}")); assertEquals(Arrays.asList("foobar", "foobaz", "foo"), Strings.expandGlob("foo{bar,baz,}")); // multiple empty strings assertEquals(Arrays.asList("foobar", "foo", "foo", "foobaz"), Strings.expandGlob("foo{bar,,,baz}")); assertEquals(Arrays.asList("foo", "foo", "foobar", "foobaz"), Strings.expandGlob("foo{,,bar,baz}")); assertEquals(Arrays.asList("foobar", "foobaz", "foo", "foo"), Strings.expandGlob("foo{bar,baz,,}")); // between groups assertEquals(Arrays.asList("x", "y", "", "a", "b"), Strings.expandGlob("{{x,y},,{a,b}}")); }
Example #7
Source File: TestGlob.java From parquet-mr with Apache License 2.0 | 5 votes |
@Test public void testCommaInTopLevel() { try { Strings.expandGlob("foo,bar"); fail("This should throw"); } catch (GlobParseException e) { Assert.assertEquals("Unexpected comma outside of a {} group:\n" + "foo,bar\n" + "---^", e.getMessage()); } }
Example #8
Source File: TestGlob.java From parquet-mr with Apache License 2.0 | 5 votes |
@Test public void testNested() { assertEquals(Arrays.asList( "startoneend", "startpretwopostend", "startprethreepostend", "startfourend", "startfiveend", "a", "b", "foox", "fooy"), Strings.expandGlob("{start{one,pre{two,three}post,{four,five}}end,a,b,foo{x,y}}")); }
Example #9
Source File: TestGlob.java From parquet-mr with Apache License 2.0 | 5 votes |
@Test public void testSingleLevel() { assertEquals(Arrays.asList("foobar", "foobaz"), Strings.expandGlob("foo{bar,baz}")); assertEquals(Arrays.asList("startfooend", "startbarend"), Strings.expandGlob("start{foo,bar}end")); assertEquals(Arrays.asList("fooend", "barend"), Strings.expandGlob("{foo,bar}end")); assertEquals(Arrays.asList( "startfooenda", "startfooendb", "startfooendc", "startfooendd", "startbarenda", "startbarendb", "startbarendc", "startbarendd"), Strings.expandGlob("start{foo,bar}end{a,b,c,d}")); assertEquals(Arrays.asList("xa", "xb", "xc", "ya", "yb", "yc"), Strings.expandGlob("{x,y}{a,b,c}")); assertEquals(Arrays.asList("x", "y", "z"), Strings.expandGlob("{x,y,z}")); }
Example #10
Source File: TestGlob.java From parquet-mr with Apache License 2.0 | 5 votes |
@Test public void testEmptyGroup() { assertEquals(Arrays.asList(""), Strings.expandGlob("")); assertEquals(Arrays.asList(""), Strings.expandGlob("{}")); assertEquals(Arrays.asList("a"), Strings.expandGlob("a{}")); assertEquals(Arrays.asList("ab"), Strings.expandGlob("a{}b")); assertEquals(Arrays.asList("a"), Strings.expandGlob("{}a")); assertEquals(Arrays.asList("a"), Strings.expandGlob("a{}")); assertEquals(Arrays.asList("", ""), Strings.expandGlob("{,}")); assertEquals(Arrays.asList("ab", "a", "ac"), Strings.expandGlob("a{b,{},c}")); }
Example #11
Source File: StrictFieldProjectionFilter.java From parquet-mr with Apache License 2.0 | 5 votes |
/** * Construct a StrictFieldProjectionFilter from a list of Strings in the format expected by * {@link Strings#expandGlobToWildCardPaths(String, char)} * @param columnsToKeepGlobs glob patterns for columns to keep */ public StrictFieldProjectionFilter(List<String> columnsToKeepGlobs) { this.columnsToKeep = new ArrayList<WildcardPathStatus>(); for (String glob : columnsToKeepGlobs) { for (WildcardPath wp : Strings.expandGlobToWildCardPaths(glob, '.')) { columnsToKeep.add(new WildcardPathStatus(wp)); } } }
Example #12
Source File: ThriftReadSupport.java From parquet-mr with Apache License 2.0 | 5 votes |
public static FieldProjectionFilter getFieldProjectionFilter(Configuration conf) { String deprecated = conf.get(THRIFT_COLUMN_FILTER_KEY); String strict = conf.get(STRICT_THRIFT_COLUMN_FILTER_KEY); if (Strings.isNullOrEmpty(deprecated) && Strings.isNullOrEmpty(strict)) { return null; } if(!Strings.isNullOrEmpty(deprecated) && !Strings.isNullOrEmpty(strict)) { throw new ThriftProjectionException( "You cannot provide both " + THRIFT_COLUMN_FILTER_KEY + " and " + STRICT_THRIFT_COLUMN_FILTER_KEY +"! " + THRIFT_COLUMN_FILTER_KEY + " is deprecated." ); } if (!Strings.isNullOrEmpty(deprecated)) { LOG.warn("Using {} is deprecated. Please see the docs for {}!", THRIFT_COLUMN_FILTER_KEY, STRICT_THRIFT_COLUMN_FILTER_KEY); return new DeprecatedFieldProjectionFilter(deprecated); } return StrictFieldProjectionFilter.fromSemicolonDelimitedString(strict); }
Example #13
Source File: SparkExecutableLivy.java From kylin with Apache License 2.0 | 5 votes |
@Override protected int killAppRetry(String appId) throws IOException, InterruptedException { String status = getAppState(appId); if (Strings.isNullOrEmpty(status) || LivyStateEnum.dead.name().equalsIgnoreCase(status) || LivyStateEnum.error.name().equalsIgnoreCase(status) || LivyStateEnum.shutting_down.name().equalsIgnoreCase(status)) { logger.warn(appId + "is final state, no need to kill"); return 0; } killApp(appId); status = getAppState(appId); int retry = 0; while (Strings.isNullOrEmpty(status) || LivyStateEnum.dead.name().equalsIgnoreCase(status) || LivyStateEnum.error.name().equalsIgnoreCase(status) || LivyStateEnum.shutting_down.name().equalsIgnoreCase(status) && retry < 5) { killApp(appId); Thread.sleep(1000); status = getAppState(appId); retry++; } if (Strings.isNullOrEmpty(status)) { logger.info(appId + " killed successfully"); return 0; } else { logger.info(appId + " killed failed"); return 1; } }
Example #14
Source File: SparkExecutableLivy.java From kylin with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult onResumed(String appId, ExecutableManager mgr) throws ExecuteException { Map<String, String> info = new HashMap<>(); try { logger.info("livy spark_job_id:" + appId + " resumed"); info.put(ExecutableConstants.SPARK_JOB_ID, appId); while (!isPaused() && !isDiscarded()) { String status = getAppState(appId); if (Strings.isNullOrEmpty(status) || LivyStateEnum.dead.name().equalsIgnoreCase(status) || LivyStateEnum.error.name().equalsIgnoreCase(status) || LivyStateEnum.shutting_down.name().equalsIgnoreCase(status)) { mgr.updateJobOutput(getId(), ExecutableState.ERROR, null, appId + " has failed"); return new ExecuteResult(ExecuteResult.State.FAILED, appId + " has failed"); } if (LivyStateEnum.success.name().equalsIgnoreCase(status)) { mgr.addJobInfo(getId(), info); return new ExecuteResult(ExecuteResult.State.SUCCEED, appId + " has finished"); } Thread.sleep(5000); } killAppRetry(appId); if (isDiscarded()) { return new ExecuteResult(ExecuteResult.State.DISCARDED, appId + " is discarded"); } else { return new ExecuteResult(ExecuteResult.State.STOPPED, appId + " is stopped"); } } catch (Exception e) { logger.error("error run spark job:", e); return new ExecuteResult(ExecuteResult.State.ERROR, e.getLocalizedMessage()); } }
Example #15
Source File: SparkExecutableLivy.java From kylin with Apache License 2.0 | 5 votes |
@Override protected void onExecuteStart(ExecutableContext executableContext) { final Output output = getOutput(); if (output.getExtra().containsKey(START_TIME)) { final String sparkJobID = output.getExtra().get(ExecutableConstants.SPARK_JOB_ID); if (sparkJobID == null) { getManager().updateJobOutput(getId(), ExecutableState.RUNNING, null, null); return; } try { String status = getAppState(sparkJobID); if (Strings.isNullOrEmpty(status) || LivyStateEnum.dead.name().equalsIgnoreCase(status) || LivyStateEnum.error.name().equalsIgnoreCase(status) || LivyStateEnum.shutting_down.name().equalsIgnoreCase(status)) { //remove previous mr job info super.onExecuteStart(executableContext); } else { getManager().updateJobOutput(getId(), ExecutableState.RUNNING, null, null); } } catch (IOException e) { logger.warn("error get hadoop status"); super.onExecuteStart(executableContext); } } else { super.onExecuteStart(executableContext); } }
Example #16
Source File: HiveDB.java From Alink with Apache License 2.0 | 5 votes |
@Override public Table getBatchTable(String tableName, Params parameter, Long sessionId) throws Exception { ExecutionEnvironment env = MLEnvironmentFactory.get(sessionId).getExecutionEnvironment(); HiveBatchSource hiveTableSource = getHiveBatchSource(tableName, parameter); DataSet<BaseRow> dataSet = hiveTableSource.getDataSet(env); TableSchema schema = hiveTableSource.getTableSchema(); final DataType[] dataTypes = schema.getFieldDataTypes(); DataSet<Row> rows = dataSet.map(new BaseRowToRow(dataTypes)); Table tbl = DataSetConversionUtil.toTable(sessionId, rows, schema); if (getPartitionCols(tableName).size() > 0) { // remove static partition columns String[] fieldNames = getColNames(tableName); tbl = tbl.select(Strings.join(fieldNames, ",")); } return tbl; }
Example #17
Source File: SparkExecutableLivy.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Override protected int killAppRetry(String appId) throws IOException, InterruptedException { String status = getAppState(appId); if (Strings.isNullOrEmpty(status) || LivyStateEnum.dead.name().equalsIgnoreCase(status) || LivyStateEnum.error.name().equalsIgnoreCase(status) || LivyStateEnum.shutting_down.name().equalsIgnoreCase(status)) { logger.warn(appId + "is final state, no need to kill"); return 0; } killApp(appId); status = getAppState(appId); int retry = 0; while (Strings.isNullOrEmpty(status) || LivyStateEnum.dead.name().equalsIgnoreCase(status) || LivyStateEnum.error.name().equalsIgnoreCase(status) || LivyStateEnum.shutting_down.name().equalsIgnoreCase(status) && retry < 5) { killApp(appId); Thread.sleep(1000); status = getAppState(appId); retry++; } if (Strings.isNullOrEmpty(status)) { logger.info(appId + " killed successfully"); return 0; } else { logger.info(appId + " killed failed"); return 1; } }
Example #18
Source File: SparkExecutableLivy.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Override protected ExecuteResult onResumed(String appId, ExecutableManager mgr) throws ExecuteException { Map<String, String> info = new HashMap<>(); try { logger.info("livy spark_job_id:" + appId + " resumed"); info.put(ExecutableConstants.SPARK_JOB_ID, appId); while (!isPaused() && !isDiscarded()) { String status = getAppState(appId); if (Strings.isNullOrEmpty(status) || LivyStateEnum.dead.name().equalsIgnoreCase(status) || LivyStateEnum.error.name().equalsIgnoreCase(status) || LivyStateEnum.shutting_down.name().equalsIgnoreCase(status)) { mgr.updateJobOutput(getId(), ExecutableState.ERROR, null, appId + " has failed"); return new ExecuteResult(ExecuteResult.State.FAILED, appId + " has failed"); } if (LivyStateEnum.success.name().equalsIgnoreCase(status)) { mgr.addJobInfo(getId(), info); return new ExecuteResult(ExecuteResult.State.SUCCEED, appId + " has finished"); } Thread.sleep(5000); } killAppRetry(appId); if (isDiscarded()) { return new ExecuteResult(ExecuteResult.State.DISCARDED, appId + " is discarded"); } else { return new ExecuteResult(ExecuteResult.State.STOPPED, appId + " is stopped"); } } catch (Exception e) { logger.error("error run spark job:", e); return new ExecuteResult(ExecuteResult.State.ERROR, e.getLocalizedMessage()); } }
Example #19
Source File: SparkExecutableLivy.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Override protected void onExecuteStart(ExecutableContext executableContext) { final Output output = getOutput(); if (output.getExtra().containsKey(START_TIME)) { final String sparkJobID = output.getExtra().get(ExecutableConstants.SPARK_JOB_ID); if (sparkJobID == null) { getManager().updateJobOutput(getId(), ExecutableState.RUNNING, null, null); return; } try { String status = getAppState(sparkJobID); if (Strings.isNullOrEmpty(status) || LivyStateEnum.dead.name().equalsIgnoreCase(status) || LivyStateEnum.error.name().equalsIgnoreCase(status) || LivyStateEnum.shutting_down.name().equalsIgnoreCase(status)) { //remove previous mr job info super.onExecuteStart(executableContext); } else { getManager().updateJobOutput(getId(), ExecutableState.RUNNING, null, null); } } catch (IOException e) { logger.warn("error get hadoop status"); super.onExecuteStart(executableContext); } } else { super.onExecuteStart(executableContext); } }
Example #20
Source File: KylinTestBase.java From kylin with Apache License 2.0 | 4 votes |
protected void execAndCompPlan(String queryFolder, String[] exclusiveQuerys, boolean needSort, ICompareQueryTranslator translator) throws Exception { logger.info("---------- test folder: " + new File(queryFolder).getAbsolutePath()); Set<String> exclusiveSet = buildExclusiveSet(exclusiveQuerys); List<File> sqlFiles = getFilesFromFolder(new File(queryFolder), ".sql"); for (File sqlFile : sqlFiles) { String queryName = StringUtils.split(sqlFile.getName(), '.')[0]; if (exclusiveSet.contains(queryName)) { continue; } String sql1 = getTextFromFile(sqlFile); String sql2 = translator.transform(sqlFile); // execute Kylin logger.info("Query Result from Kylin - " + queryName + " (" + queryFolder + ")"); IDatabaseConnection kylinConn = new DatabaseConnection(cubeConnection); ITable kylinTable = executeQuery(kylinConn, queryName, sql1, needSort); RelNode calcitePlan = (RelNode) QueryContextFacade.current().getCalcitePlan(); if (calcitePlan == null) throw new NullPointerException(); // execute H2 logger.info("Query Result from H2 - " + queryName); long currentTime = System.currentTimeMillis(); ITable h2Table = executeQuery(newH2Connection(), queryName, sql2, needSort); logger.info("H2 spent " + (System.currentTimeMillis() - currentTime) + " mili-seconds."); try { // compare the result assertTableEquals(h2Table, kylinTable); } catch (Throwable t) { logger.info("execAndCompQuery failed on: " + sqlFile.getAbsolutePath()); throw t; } RelToSqlConverter converter = new RelToSqlConverter(CALCITE); SqlNode sqlNode = converter.visitChild(0, calcitePlan.getInput(0)).asStatement(); String optimizedSQL = sqlNode.toSqlString(CALCITE).getSql(); String expectedSQL = Strings.join(Files.readLines( new File(sqlFile.getParent(), sqlFile.getName() + ".expected"), Charset.forName("utf-8")), "\n"); Assert.assertEquals(expectedSQL, optimizedSQL); compQueryCount++; if (kylinTable.getRowCount() == 0) { zeroResultQueries.add(sql1); } } }
Example #21
Source File: TestGlob.java From parquet-mr with Apache License 2.0 | 4 votes |
@Test public void testNoGlobs() { assertEquals(Arrays.asList("foo"), Strings.expandGlob("foo")); }
Example #22
Source File: HiveMetadataUtils.java From dremio-oss with Apache License 2.0 | 4 votes |
public static HiveStorageCapabilities getHiveStorageCapabilities(final StorageDescriptor storageDescriptor) { final String location = storageDescriptor.getLocation(); if (null != location) { final URI uri; try { uri = URI.create(location); } catch (IllegalArgumentException e) { // unknown table source, default to HDFS. return HiveStorageCapabilities.DEFAULT_HDFS; } final String scheme = uri.getScheme(); if (!Strings.isNullOrEmpty(scheme)) { if (scheme.regionMatches(true, 0, "s3", 0, 2)) { /* AWS S3 does not support impersonation, last modified times or orc split file ids. */ return HiveStorageCapabilities.newBuilder() .supportsImpersonation(false) .supportsLastModifiedTime(false) .supportsOrcSplitFileIds(false) .build(); } else if (scheme.regionMatches(true, 0, "wasb", 0, 4) || scheme.regionMatches(true, 0, "abfs", 0, 4) || scheme.regionMatches(true, 0, "wasbs", 0, 5) || scheme.regionMatches(true, 0, "abfss", 0, 5)) { /* DX-17365: Azure Storage does not support correct last modified times, Azure returns last modified times, * however, the timestamps returned are incorrect. They reference the folder's create time rather * that the folder content's last modified time. Please see Prototype.java for Azure storage fs uri schemes. */ return HiveStorageCapabilities.newBuilder() .supportsImpersonation(true) .supportsLastModifiedTime(false) .supportsOrcSplitFileIds(true) .build(); } else if (!scheme.regionMatches(true, 0, "hdfs", 0, 4)) { /* Most hive supported non-HDFS file systems allow for impersonation and last modified times, but not orc split file ids. */ return HiveStorageCapabilities.newBuilder() .supportsImpersonation(true) .supportsLastModifiedTime(true) .supportsOrcSplitFileIds(false) .build(); } } } // Default to HDFS. return HiveStorageCapabilities.DEFAULT_HDFS; }
Example #23
Source File: HiveMetadataUtils.java From dremio-oss with Apache License 2.0 | 4 votes |
public static HiveStorageCapabilities getHiveStorageCapabilities(final StorageDescriptor storageDescriptor) { final String location = storageDescriptor.getLocation(); if (null != location) { final URI uri; try { uri = URI.create(location); } catch (IllegalArgumentException e) { // unknown table source, default to HDFS. return HiveStorageCapabilities.DEFAULT_HDFS; } final String scheme = uri.getScheme(); if (!Strings.isNullOrEmpty(scheme)) { if (scheme.regionMatches(true, 0, "s3", 0, 2)) { /* AWS S3 does not support impersonation, last modified times or orc split file ids. */ return HiveStorageCapabilities.newBuilder() .supportsImpersonation(false) .supportsLastModifiedTime(false) .supportsOrcSplitFileIds(false) .build(); } else if (scheme.regionMatches(true, 0, "wasb", 0, 4) || scheme.regionMatches(true, 0, "abfs", 0, 4) || scheme.regionMatches(true, 0, "wasbs", 0, 5) || scheme.regionMatches(true, 0, "abfss", 0, 5)) { /* DX-17365: Azure Storage does not support correct last modified times, Azure returns last modified times, * however, the timestamps returned are incorrect. They reference the folder's create time rather * that the folder content's last modified time. Please see Prototype.java for Azure storage fs uri schemes. */ return HiveStorageCapabilities.newBuilder() .supportsImpersonation(true) .supportsLastModifiedTime(false) .supportsOrcSplitFileIds(true) .build(); } else if (!scheme.regionMatches(true, 0, "hdfs", 0, 4)) { /* Most hive supported non-HDFS file systems allow for impersonation and last modified times, but not orc split file ids. */ return HiveStorageCapabilities.newBuilder() .supportsImpersonation(true) .supportsLastModifiedTime(true) .supportsOrcSplitFileIds(false) .build(); } } } // Default to HDFS. return HiveStorageCapabilities.DEFAULT_HDFS; }
Example #24
Source File: TestGlob.java From parquet-mr with Apache License 2.0 | 4 votes |
@Test public void testExtraBraces() { assertEquals(Arrays.asList("x", "y", "z"), Strings.expandGlob("{{x,y,z}}")); assertEquals(Arrays.asList("x", "y", "z"), Strings.expandGlob("{{{x,y,z}}}")); assertEquals(Arrays.asList("startx", "starta", "startb", "starty"), Strings.expandGlob("start{x,{a,b},y}")); }
Example #25
Source File: KylinTestBase.java From kylin-on-parquet-v2 with Apache License 2.0 | 4 votes |
protected void execAndCompPlan(String queryFolder, String[] exclusiveQuerys, boolean needSort, ICompareQueryTranslator translator) throws Exception { logger.info("---------- test folder: " + new File(queryFolder).getAbsolutePath()); Set<String> exclusiveSet = buildExclusiveSet(exclusiveQuerys); List<File> sqlFiles = getFilesFromFolder(new File(queryFolder), ".sql"); for (File sqlFile : sqlFiles) { String queryName = StringUtils.split(sqlFile.getName(), '.')[0]; if (exclusiveSet.contains(queryName)) { continue; } String sql1 = getTextFromFile(sqlFile); String sql2 = translator.transform(sqlFile); // execute Kylin logger.info("Query Result from Kylin - " + queryName + " (" + queryFolder + ")"); IDatabaseConnection kylinConn = new DatabaseConnection(cubeConnection); ITable kylinTable = executeQuery(kylinConn, queryName, sql1, needSort); RelNode calcitePlan = (RelNode) QueryContextFacade.current().getCalcitePlan(); if (calcitePlan == null) throw new NullPointerException(); // execute H2 logger.info("Query Result from H2 - " + queryName); long currentTime = System.currentTimeMillis(); ITable h2Table = executeQuery(newH2Connection(), queryName, sql2, needSort); logger.info("H2 spent " + (System.currentTimeMillis() - currentTime) + " mili-seconds."); try { // compare the result assertTableEquals(h2Table, kylinTable); } catch (Throwable t) { logger.info("execAndCompQuery failed on: " + sqlFile.getAbsolutePath()); throw t; } RelToSqlConverter converter = new RelToSqlConverter(CALCITE); SqlNode sqlNode = converter.visitChild(0, calcitePlan.getInput(0)).asStatement(); String optimizedSQL = sqlNode.toSqlString(CALCITE).getSql(); String expectedSQL = Strings.join(Files.readLines( new File(sqlFile.getParent(), sqlFile.getName() + ".expected"), Charset.forName("utf-8")), "\n"); Assert.assertEquals(expectedSQL, optimizedSQL); compQueryCount++; if (kylinTable.getRowCount() == 0) { zeroResultQueries.add(sql1); } } }