Java Code Examples for org.apache.zeppelin.interpreter.Interpreter#interpret()
The following examples show how to use
org.apache.zeppelin.interpreter.Interpreter#interpret() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RemoteInterpreterTest.java From zeppelin with Apache License 2.0 | 6 votes |
@Test public void testConvertDynamicForms() throws InterpreterException { GUI gui = new GUI(); OptionInput.ParamOption[] paramOptions = { new OptionInput.ParamOption("value1", "param1"), new OptionInput.ParamOption("value2", "param2") }; List<Object> defaultValues = new ArrayList(); defaultValues.add("default1"); defaultValues.add("default2"); gui.checkbox("checkbox_id", paramOptions, defaultValues); gui.select("select_id", paramOptions, "default"); gui.textbox("textbox_id"); Map<String, Input> expected = new LinkedHashMap<>(gui.getForms()); Interpreter interpreter = interpreterSetting.getDefaultInterpreter("user1", "note1"); InterpreterContext context = createDummyInterpreterContext(); interpreter.interpret("text", context); assertArrayEquals(expected.values().toArray(), gui.getForms().values().toArray()); }
Example 2
Source File: RemoteInterpreterTest.java From zeppelin with Apache License 2.0 | 6 votes |
@Test public void testFailToLaunchInterpreterProcess_InvalidRunner() { try { System.setProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_REMOTE_RUNNER.getVarName(), "invalid_runner"); final Interpreter interpreter1 = interpreterSetting.getInterpreter("user1", "note1", "sleep"); final InterpreterContext context1 = createDummyInterpreterContext(); // run this dummy interpret method first to launch the RemoteInterpreterProcess to avoid the // time overhead of launching the process. try { interpreter1.interpret("1", context1); fail("Should not be able to launch interpreter process"); } catch (InterpreterException e) { assertTrue(ExceptionUtils.getStackTrace(e).contains("No such file or directory")); } } finally { System.clearProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_REMOTE_RUNNER.getVarName()); } }
Example 3
Source File: RemoteInterpreterTest.java From zeppelin with Apache License 2.0 | 6 votes |
@Test public void testFailToLaunchInterpreterProcess_ErrorInRunner() { try { System.setProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_REMOTE_RUNNER.getVarName(), zeppelinHome.getAbsolutePath() + "/zeppelin-zengine/src/test/resources/bin/interpreter_invalid.sh"); final Interpreter interpreter1 = interpreterSetting.getInterpreter("user1", "note1", "sleep"); final InterpreterContext context1 = createDummyInterpreterContext(); // run this dummy interpret method first to launch the RemoteInterpreterProcess to avoid the // time overhead of launching the process. try { interpreter1.interpret("1", context1); fail("Should not be able to launch interpreter process"); } catch (InterpreterException e) { assertTrue(ExceptionUtils.getStackTrace(e).contains("invalid_command: command not found")); } } finally { System.clearProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_REMOTE_RUNNER.getVarName()); } }
Example 4
Source File: RemoteInterpreterTest.java From zeppelin with Apache License 2.0 | 6 votes |
@Test public void testFailToLaunchInterpreterProcess_Timeout() { try { System.setProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_REMOTE_RUNNER.getVarName(), zeppelinHome.getAbsolutePath() + "/zeppelin-zengine/src/test/resources/bin/interpreter_timeout.sh"); System.setProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_CONNECT_TIMEOUT.getVarName(), "10000"); final Interpreter interpreter1 = interpreterSetting.getInterpreter("user1", "note1", "sleep"); final InterpreterContext context1 = createDummyInterpreterContext(); // run this dummy interpret method first to launch the RemoteInterpreterProcess to avoid the // time overhead of launching the process. try { interpreter1.interpret("1", context1); fail("Should not be able to launch interpreter process"); } catch (InterpreterException e) { assertTrue(ExceptionUtils.getStackTrace(e).contains("Interpreter Process creation is time out")); } } finally { System.clearProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_REMOTE_RUNNER.getVarName()); System.clearProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_CONNECT_TIMEOUT.getVarName()); } }
Example 5
Source File: YarnInterpreterLauncherIntegrationTest.java From zeppelin with Apache License 2.0 | 6 votes |
@Test public void testLaunchShellInYarn() throws YarnException, InterpreterException, InterruptedException { InterpreterSetting shellInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("sh"); shellInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn"); shellInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath()); Interpreter shellInterpreter = interpreterFactory.getInterpreter("sh", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("sh").createExecutionContext()); InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").build(); InterpreterResult interpreterResult = shellInterpreter.interpret("pwd", context); assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code()); assertTrue(interpreterResult.toString(), interpreterResult.message().get(0).getData().contains("/usercache/")); Thread.sleep(1000); // 1 yarn application launched GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING)); GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request); assertEquals(1, response.getApplicationList().size()); interpreterSettingManager.close(); }
Example 6
Source File: CassandraInterpreterTest.java From zeppelin with Apache License 2.0 | 6 votes |
@Test @Ignore //TODO(n.a.) activate test when using Java 8 and C* 3.x public void should_describe_materialized_view() throws Exception { //Given Properties properties = new Properties(); properties.setProperty(CASSANDRA_HOSTS, "127.0.0.1"); properties.setProperty(CASSANDRA_PORT, "9042"); Interpreter interpreter = new CassandraInterpreter(properties); interpreter.open(); final String query = "DESCRIBE MATERIALIZED VIEWS;"; //When final InterpreterResult actual = interpreter.interpret(query, intrContext); //Then assertThat(actual.code()).isEqualTo(Code.SUCCESS); }
Example 7
Source File: CassandraInterpreterTest.java From zeppelin with Apache License 2.0 | 6 votes |
@Test @Ignore //TODO(n.a.) activate test when using Java 8 and C* 3.x public void should_describe_aggregate() throws Exception { //Given Properties properties = new Properties(); properties.setProperty(CASSANDRA_HOSTS, "127.0.0.1"); properties.setProperty(CASSANDRA_PORT, "9042"); Interpreter interpreter = new CassandraInterpreter(properties); interpreter.open(); final String query = "DESCRIBE AGGREGATES;"; //When final InterpreterResult actual = interpreter.interpret(query, intrContext); //Then assertThat(actual.code()).isEqualTo(Code.SUCCESS); }
Example 8
Source File: RInterpreterTest.java From zeppelin with Apache License 2.0 | 6 votes |
@Test public void testInvalidR() throws InterpreterException { tearDown(); Properties properties = new Properties(); properties.setProperty("zeppelin.R.cmd", "invalid_r"); properties.setProperty("spark.master", "local"); properties.setProperty("spark.app.name", "test"); InterpreterGroup interpreterGroup = new InterpreterGroup(); Interpreter rInterpreter = new LazyOpenInterpreter(new RInterpreter(properties)); interpreterGroup.addInterpreterToSession(rInterpreter, "session_1"); rInterpreter.setInterpreterGroup(interpreterGroup); InterpreterContext context = getInterpreterContext(); InterpreterContext.set(context); try { rInterpreter.interpret("1+1", getInterpreterContext()); fail("Should fail to open SparkRInterpreter"); } catch (InterpreterException e) { String stacktrace = ExceptionUtils.getStackTrace(e); assertTrue(stacktrace, stacktrace.contains("No such file or directory")); } }
Example 9
Source File: FlinkIntegrationTest.java From zeppelin with Apache License 2.0 | 5 votes |
private void testInterpreterBasics() throws IOException, InterpreterException { // test FlinkInterpreter Interpreter flinkInterpreter = interpreterFactory.getInterpreter("flink", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("flink").createExecutionContext()); InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").build(); InterpreterResult interpreterResult = flinkInterpreter.interpret("1+1", context); assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code()); assertTrue(interpreterResult.message().get(0).getData().contains("2")); interpreterResult = flinkInterpreter.interpret("val data = benv.fromElements(1, 2, 3)\ndata.collect()", context); assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code()); assertTrue(interpreterResult.message().get(0).getData().contains("1, 2, 3")); }
Example 10
Source File: CassandraInterpreterTest.java From zeppelin with Apache License 2.0 | 5 votes |
@Test @Ignore //TODO(n.a.) activate test when using Java 8 and C* 3.x public void should_describe_function() throws Exception { //Given Properties properties = new Properties(); properties.setProperty(CASSANDRA_HOSTS, "127.0.0.1"); properties.setProperty(CASSANDRA_PORT, "9042"); Interpreter interpreter = new CassandraInterpreter(properties); interpreter.open(); String createFunction = "CREATE FUNCTION zeppelin.maxof(val1 int,val2 int) " + "RETURNS NULL ON NULL INPUT " + "RETURNS int " + "LANGUAGE java " + "AS $$" + " return Math.max(val1, val2);\n" + "$$;"; interpreter.interpret(createFunction, intrContext); String query = "DESCRIBE FUNCTION zeppelin.maxOf;"; //When final InterpreterResult actual = interpreter.interpret(query, intrContext); //Then assertThat(actual.code()).isEqualTo(Code.SUCCESS); assertThat(actual.message()).isEqualTo("xxxxx"); }
Example 11
Source File: IPyFlinkInterpreterTest.java From zeppelin with Apache License 2.0 | 5 votes |
public static void testStreamPyFlink(Interpreter interpreter, Interpreter flinkScalaInterpreter) throws InterpreterException, IOException { InterpreterContext context = createInterpreterContext(); InterpreterResult result = interpreter.interpret( "import tempfile\n" + "import os\n" + "import shutil\n" + "sink_path = tempfile.gettempdir() + '/streaming.csv'\n" + "if os.path.exists(sink_path):\n" + " if os.path.isfile(sink_path):\n" + " os.remove(sink_path)\n" + " else:\n" + " shutil.rmtree(sink_path)\n" + "s_env.set_parallelism(1)\n" + "t = st_env.from_elements([(1, 'hi', 'hello'), (2, 'hi', 'hello')], ['a', 'b', 'c'])\n" + "st_env.connect(FileSystem().path(sink_path)) \\\n" + " .with_format(OldCsv()\n" + " .field_delimiter(',')\n" + " .field(\"a\", DataTypes.BIGINT())\n" + " .field(\"b\", DataTypes.STRING())\n" + " .field(\"c\", DataTypes.STRING())) \\\n" + " .with_schema(Schema()\n" + " .field(\"a\", DataTypes.BIGINT())\n" + " .field(\"b\", DataTypes.STRING())\n" + " .field(\"c\", DataTypes.STRING())) \\\n" + " .create_temporary_table(\"stream_sink\")\n" + "t.select(\"a + 1, b, c\").insert_into(\"stream_sink\")\n" + "st_env.execute(\"stream_job\")" , context); assertEquals(context.out.toString(), InterpreterResult.Code.SUCCESS, result.code()); }
Example 12
Source File: SparkRInterpreterTest.java From zeppelin with Apache License 2.0 | 5 votes |
@Test public void testInvalidR() throws InterpreterException { tearDown(); Properties properties = new Properties(); properties.setProperty("zeppelin.R.cmd", "invalid_r"); properties.setProperty(SparkStringConstants.MASTER_PROP_NAME, "local"); properties.setProperty(SparkStringConstants.APP_NAME_PROP_NAME, "test"); InterpreterGroup interpreterGroup = new InterpreterGroup(); Interpreter sparkRInterpreter = new LazyOpenInterpreter(new SparkRInterpreter(properties)); Interpreter sparkInterpreter = new LazyOpenInterpreter(new SparkInterpreter(properties)); interpreterGroup.addInterpreterToSession(sparkRInterpreter, "session_1"); interpreterGroup.addInterpreterToSession(sparkInterpreter, "session_1"); sparkRInterpreter.setInterpreterGroup(interpreterGroup); sparkInterpreter.setInterpreterGroup(interpreterGroup); InterpreterContext context = getInterpreterContext(); InterpreterContext.set(context); try { sparkRInterpreter.interpret("1+1", getInterpreterContext()); fail("Should fail to open SparkRInterpreter"); } catch (InterpreterException e) { String stacktrace = ExceptionUtils.getStackTrace(e); assertTrue(stacktrace, stacktrace.contains("No such file or directory")); } }
Example 13
Source File: RemoteInterpreterTest.java From zeppelin with Apache License 2.0 | 5 votes |
@Test public void should_push_local_angular_repo_to_remote() throws Exception { final AngularObjectRegistry registry = new AngularObjectRegistry("spark", null); registry.add("name_1", "value_1", "note_1", "paragraphId_1"); registry.add("name_2", "value_2", "node_2", "paragraphId_2"); Interpreter interpreter = interpreterSetting.getInterpreter("user1", "note1", "angular_obj"); interpreter.getInterpreterGroup().setAngularObjectRegistry(registry); final InterpreterContext context = createDummyInterpreterContext(); InterpreterResult result = interpreter.interpret("dummy", context); assertEquals(Code.SUCCESS, result.code()); assertEquals("2", result.message().get(0).getData()); }
Example 14
Source File: JdbcIntegrationTest.java From zeppelin with Apache License 2.0 | 4 votes |
@Test public void testMySql() throws InterpreterException, InterruptedException { InterpreterSetting interpreterSetting = interpreterSettingManager.getInterpreterSettingByName("jdbc"); interpreterSetting.setProperty("default.driver", "com.mysql.jdbc.Driver"); interpreterSetting.setProperty("default.url", "jdbc:mysql://localhost:3306/"); interpreterSetting.setProperty("default.user", "root"); Dependency dependency = new Dependency("mysql:mysql-connector-java:5.1.46"); interpreterSetting.setDependencies(Lists.newArrayList(dependency)); interpreterSettingManager.restart(interpreterSetting.getId()); interpreterSetting.waitForReady(60 * 1000); Interpreter jdbcInterpreter = interpreterFactory.getInterpreter("jdbc", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("test").createExecutionContext()); assertNotNull("JdbcInterpreter is null", jdbcInterpreter); InterpreterContext context = new InterpreterContext.Builder() .setNoteId("note1") .setParagraphId("paragraph_1") .setAuthenticationInfo(AuthenticationInfo.ANONYMOUS) .build(); InterpreterResult interpreterResult = jdbcInterpreter.interpret("show databases;", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); context.getLocalProperties().put("saveAs", "table_1"); interpreterResult = jdbcInterpreter.interpret("SELECT 1 as c1, 2 as c2;", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); assertEquals(1, interpreterResult.message().size()); assertEquals(InterpreterResult.Type.TABLE, interpreterResult.message().get(0).getType()); assertEquals("c1\tc2\n1\t2\n", interpreterResult.message().get(0).getData()); // read table_1 from python interpreter Interpreter pythonInterpreter = interpreterFactory.getInterpreter("python", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("test").createExecutionContext()); assertNotNull("PythonInterpreter is null", pythonInterpreter); context = new InterpreterContext.Builder() .setNoteId("note1") .setParagraphId("paragraph_1") .setAuthenticationInfo(AuthenticationInfo.ANONYMOUS) .build(); interpreterResult = pythonInterpreter.interpret("df=z.getAsDataFrame('table_1')\nz.show(df)", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); assertEquals(1, interpreterResult.message().size()); assertEquals(InterpreterResult.Type.TABLE, interpreterResult.message().get(0).getType()); assertEquals("c1\tc2\n1\t2\n", interpreterResult.message().get(0).getData()); }
Example 15
Source File: SparkIntegrationTest.java From zeppelin with Apache License 2.0 | 4 votes |
private void testInterpreterBasics() throws IOException, InterpreterException, XmlPullParserException { // add jars & packages for testing InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark"); sparkInterpreterSetting.setProperty("spark.jars.packages", "com.maxmind.geoip2:geoip2:2.5.0"); sparkInterpreterSetting.setProperty("SPARK_PRINT_LAUNCH_COMMAND", "true"); MavenXpp3Reader reader = new MavenXpp3Reader(); Model model = reader.read(new FileReader("pom.xml")); sparkInterpreterSetting.setProperty("spark.jars", new File("target/zeppelin-interpreter-integration-" + model.getVersion() + ".jar").getAbsolutePath()); // test SparkInterpreter Interpreter sparkInterpreter = interpreterFactory.getInterpreter("spark.spark", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("test").createExecutionContext()); InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").build(); InterpreterResult interpreterResult = sparkInterpreter.interpret("sc.version", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); String detectedSparkVersion = interpreterResult.message().get(0).getData(); assertTrue(detectedSparkVersion +" doesn't contain " + this.sparkVersion, detectedSparkVersion.contains(this.sparkVersion)); interpreterResult = sparkInterpreter.interpret("sc.range(1,10).sum()", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); assertTrue(interpreterResult.toString(), interpreterResult.message().get(0).getData().contains("45")); // test jars & packages can be loaded correctly interpreterResult = sparkInterpreter.interpret("import org.apache.zeppelin.interpreter.integration.DummyClass\n" + "import com.maxmind.geoip2._", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); // test PySparkInterpreter Interpreter pySparkInterpreter = interpreterFactory.getInterpreter("spark.pyspark", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("test").createExecutionContext()); interpreterResult = pySparkInterpreter.interpret("sqlContext.createDataFrame([(1,'a'),(2,'b')], ['id','name']).registerTempTable('test')", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); // test IPySparkInterpreter Interpreter ipySparkInterpreter = interpreterFactory.getInterpreter("spark.ipyspark", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("test").createExecutionContext()); interpreterResult = ipySparkInterpreter.interpret("sqlContext.table('test').show()", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); // test SparkSQLInterpreter Interpreter sqlInterpreter = interpreterFactory.getInterpreter("spark.sql", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("test").createExecutionContext()); interpreterResult = sqlInterpreter.interpret("select count(1) as c from test", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); assertEquals(interpreterResult.toString(), InterpreterResult.Type.TABLE, interpreterResult.message().get(0).getType()); assertEquals(interpreterResult.toString(), "c\n2\n", interpreterResult.message().get(0).getData()); // test SparkRInterpreter Interpreter sparkrInterpreter = interpreterFactory.getInterpreter("spark.r", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("test").createExecutionContext()); if (isSpark2() || isSpark3()) { interpreterResult = sparkrInterpreter.interpret("df <- as.DataFrame(faithful)\nhead(df)", context); } else { interpreterResult = sparkrInterpreter.interpret("df <- createDataFrame(sqlContext, faithful)\nhead(df)", context); } assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); assertEquals(interpreterResult.toString(), InterpreterResult.Type.TEXT, interpreterResult.message().get(0).getType()); assertTrue(interpreterResult.toString(), interpreterResult.message().get(0).getData().contains("eruptions waiting")); }
Example 16
Source File: YarnInterpreterLauncherIntegrationTest.java From zeppelin with Apache License 2.0 | 4 votes |
@Test public void testJdbcPython_YarnLauncher() throws InterpreterException, YarnException, InterruptedException { InterpreterSetting jdbcInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("jdbc"); jdbcInterpreterSetting.setProperty("default.driver", "com.mysql.jdbc.Driver"); jdbcInterpreterSetting.setProperty("default.url", "jdbc:mysql://localhost:3306/"); jdbcInterpreterSetting.setProperty("default.user", "root"); jdbcInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn"); jdbcInterpreterSetting.setProperty("zeppelin.interpreter.yarn.resource.memory", "512"); jdbcInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath()); Dependency dependency = new Dependency("mysql:mysql-connector-java:5.1.46"); jdbcInterpreterSetting.setDependencies(Lists.newArrayList(dependency)); interpreterSettingManager.restart(jdbcInterpreterSetting.getId()); jdbcInterpreterSetting.waitForReady(60 * 1000); InterpreterSetting pythonInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("python"); pythonInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn"); pythonInterpreterSetting.setProperty("zeppelin.interpreter.yarn.resource.memory", "512"); pythonInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath()); Interpreter jdbcInterpreter = interpreterFactory.getInterpreter("jdbc", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("test").createExecutionContext()); assertNotNull("JdbcInterpreter is null", jdbcInterpreter); InterpreterContext context = new InterpreterContext.Builder() .setNoteId("note1") .setParagraphId("paragraph_1") .setAuthenticationInfo(AuthenticationInfo.ANONYMOUS) .build(); InterpreterResult interpreterResult = jdbcInterpreter.interpret("show databases;", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); context.getLocalProperties().put("saveAs", "table_1"); interpreterResult = jdbcInterpreter.interpret("SELECT 1 as c1, 2 as c2;", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); assertEquals(1, interpreterResult.message().size()); assertEquals(InterpreterResult.Type.TABLE, interpreterResult.message().get(0).getType()); assertEquals("c1\tc2\n1\t2\n", interpreterResult.message().get(0).getData()); // read table_1 from python interpreter Interpreter pythonInterpreter = interpreterFactory.getInterpreter("python", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("test").createExecutionContext()); assertNotNull("PythonInterpreter is null", pythonInterpreter); context = new InterpreterContext.Builder() .setNoteId("note1") .setParagraphId("paragraph_1") .setAuthenticationInfo(AuthenticationInfo.ANONYMOUS) .build(); interpreterResult = pythonInterpreter.interpret("df=z.getAsDataFrame('table_1')\nz.show(df)", context); assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code()); assertEquals(1, interpreterResult.message().size()); assertEquals(InterpreterResult.Type.TABLE, interpreterResult.message().get(0).getType()); assertEquals("c1\tc2\n1\t2\n", interpreterResult.message().get(0).getData()); // 2 yarn application launched GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING)); GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request); assertEquals(2, response.getApplicationList().size()); interpreterSettingManager.close(); // sleep for 5 seconds to make sure yarn apps are finished Thread.sleep(5* 1000); request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING)); response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request); assertEquals(0, response.getApplicationList().size()); }
Example 17
Source File: KSQLInterpreterTest.java From zeppelin with Apache License 2.0 | 4 votes |
@Test public void shouldRenderKSQLSelectAsTable() throws InterpreterException, IOException, InterruptedException { // given Properties p = new Properties(); p.putAll(PROPS); KSQLRestService service = Mockito.mock(KSQLRestService.class); Stubber stubber = Mockito.doAnswer((invocation) -> { Consumer< KSQLResponse> callback = (Consumer< KSQLResponse>) invocation.getArguments()[2]; IntStream.range(1, 5) .forEach(i -> { Map<String, Object> map = new HashMap<>(); if (i == 4) { map.put("row", null); map.put("terminal", true); } else { map.put("row", Collections.singletonMap("columns", Arrays.asList("value " + i))); map.put("terminal", false); } callback.accept(new KSQLResponse(Arrays.asList("fieldName"), map)); try { Thread.sleep(3000); } catch (InterruptedException e) { e.printStackTrace(); } }); return null; }); stubber.when(service).executeQuery(Mockito.any(String.class), Mockito.anyString(), Mockito.any(Consumer.class)); Interpreter interpreter = new KSQLInterpreter(p, service); // when String query = "select * from orders"; interpreter.interpret(query, context); // then String expected = "%table fieldName\n" + "value 1\n" + "value 2\n" + "value 3\n"; assertEquals(1, context.out.toInterpreterResultMessage().size()); assertEquals(expected, context.out.toInterpreterResultMessage().get(0).toString()); assertEquals(InterpreterResult.Type.TABLE, context.out .toInterpreterResultMessage().get(0).getType()); interpreter.close(); }
Example 18
Source File: KSQLInterpreterTest.java From zeppelin with Apache License 2.0 | 4 votes |
@Test public void shouldRenderKSQLNonSelectAsTable() throws InterpreterException, IOException, InterruptedException { // given Properties p = new Properties(); p.putAll(PROPS); KSQLRestService service = Mockito.mock(KSQLRestService.class); Map<String, Object> row1 = new HashMap<>(); row1.put("name", "orders"); row1.put("registered", "false"); row1.put("replicaInfo", "[1]"); row1.put("consumerCount", "0"); row1.put("consumerGroupCount", "0"); Map<String, Object> row2 = new HashMap<>(); row2.put("name", "orders"); row2.put("registered", "false"); row2.put("replicaInfo", "[1]"); row2.put("consumerCount", "0"); row2.put("consumerGroupCount", "0"); Stubber stubber = Mockito.doAnswer((invocation) -> { Consumer< KSQLResponse> callback = (Consumer< KSQLResponse>) invocation.getArguments()[2]; callback.accept(new KSQLResponse(row1)); callback.accept(new KSQLResponse(row2)); return null; }); stubber.when(service).executeQuery( Mockito.any(String.class), Mockito.anyString(), Mockito.any(Consumer.class)); Interpreter interpreter = new KSQLInterpreter(p, service); // when String query = "show topics"; interpreter.interpret(query, context); // then List<Map<String, Object>> expected = Arrays.asList(row1, row2); String[] lines = context.out.toInterpreterResultMessage() .get(0).toString() .replace("%table ", "") .trim() .split("\n"); List<String[]> rows = Stream.of(lines) .map(line -> line.split("\t")) .collect(Collectors.toList()); List<Map<String, String>> actual = rows.stream() .skip(1) .map(row -> IntStream.range(0, row.length) .mapToObj(index -> new AbstractMap.SimpleEntry<>(rows.get(0)[index], row[index])) .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()))) .collect(Collectors.toList()); assertEquals(1, context.out.toInterpreterResultMessage().size()); assertEquals(expected, actual); assertEquals(InterpreterResult.Type.TABLE, context.out .toInterpreterResultMessage().get(0).getType()); }