Java Code Examples for org.apache.sqoop.model.MJob#getJobConfig()
The following examples show how to use
org.apache.sqoop.model.MJob#getJobConfig() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaConnectorTestCase.java From sqoop-on-spark with Apache License 2.0 | 5 votes |
protected void fillKafkaToConfig(MJob job){ MConfigList toConfig = job.getJobConfig(Direction.TO); toConfig.getStringInput("toJobConfig.topic").setValue(TOPIC); List<String> topics = new ArrayList<String>(1); topics.add(TOPIC); testUtil.initTopicList(topics); }
Example 2
Source File: SparkJobTest.java From sqoop-on-spark with Apache License 2.0 | 5 votes |
@Test public void test() throws Exception { createAndLoadTableCities(); // RDBMS link MLink rdbmsConnection = getClient() .createLink("generic-jdbc-connector"); fillRdbmsLinkConfig(rdbmsConnection); saveLink(rdbmsConnection); // HDFS link MLink hdfsConnection = getClient().createLink("hdfs-connector"); fillHdfsLink(hdfsConnection); saveLink(hdfsConnection); // Job creation MJob job = getClient().createJob(rdbmsConnection.getPersistenceId(), hdfsConnection.getPersistenceId()); // Set rdbms "FROM" config fillRdbmsFromConfig(job, "id"); // Fill the hdfs "TO" config fillHdfsToConfig(job, ToFormat.TEXT_FILE); MConfigList toConfig = job.getJobConfig(Direction.TO); toConfig.getBooleanInput("toJobConfig.appendMode").setValue(true); saveJob(job); // First execution executeJob(job); }
Example 3
Source File: FromRDBMSToKiteHiveTest.java From sqoop-on-spark with Apache License 2.0 | 5 votes |
@Test public void testCities() throws Exception { // Job creation MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), kiteLink.getPersistenceId()); // Set rdbms "FROM" config MConfigList fromConfig = job.getJobConfig(Direction.FROM); fillRdbmsFromConfig(job, "id"); // TODO: Kite have troubles with some data types, so we're limiting the columns to int only fromConfig.getStringInput("fromJobConfig.columns").setValue(provider.escapeColumnName("id")); // Fill the Kite "TO" config MConfigList toConfig = job.getJobConfig(Direction.TO); toConfig.getStringInput("toJobConfig.uri").setValue("dataset:hive:testtable"); toConfig.getEnumInput("toJobConfig.fileFormat").setValue(FileFormat.AVRO); // driver config MDriverConfig driverConfig = job.getDriverConfig(); driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(1); saveJob(job); executeJob(job); // Assert correct output ProviderAsserts.assertRow(hiveProvider, new TableName("testtable"), new Object[]{"id", 1}, "1"); ProviderAsserts.assertRow(hiveProvider, new TableName("testtable"), new Object[]{"id", 2}, "2"); ProviderAsserts.assertRow(hiveProvider, new TableName("testtable"), new Object[]{"id", 3}, "3"); ProviderAsserts.assertRow(hiveProvider, new TableName("testtable"), new Object[]{"id", 4}, "4"); hiveProvider.dropTable(new TableName("testtable")); }
Example 4
Source File: FromRDBMSToKiteTest.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
@Test public void testCities() throws Exception { // RDBMS link MLink rdbmsLink = getClient().createLink("generic-jdbc-connector"); fillRdbmsLinkConfig(rdbmsLink); saveLink(rdbmsLink); // Kite link MLink kiteLink = getClient().createLink("kite-connector"); kiteLink.getConnectorLinkConfig().getStringInput("linkConfig.authority").setValue(hdfsClient.getUri().getAuthority()); saveLink(kiteLink); // Job creation MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), kiteLink.getPersistenceId()); // Set rdbms "FROM" config fillRdbmsFromConfig(job, "id"); // TODO: Kite have troubles with some data types, so we're limiting the columns to int only MConfigList fromConfig = job.getJobConfig(Direction.FROM); fromConfig.getStringInput("fromJobConfig.columns").setValue(provider.escapeColumnName("id")); // Fill the Kite "TO" config MConfigList toConfig = job.getJobConfig(Direction.TO); toConfig.getStringInput("toJobConfig.uri").setValue("dataset:hdfs:" + getHadoopTestDirectory()); toConfig.getEnumInput("toJobConfig.fileFormat").setValue(FileFormat.CSV); // driver config MDriverConfig driverConfig = job.getDriverConfig(); driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(1); saveJob(job); executeJob(job); // Assert correct output assertHdfsTo( "\"1\"", "\"2\"", "\"3\"", "\"4\"" ); }
Example 5
Source File: SqoopJDBCHDFSJob.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { final SqoopSparkJob sparkJob = new SqoopSparkJob(); CommandLine cArgs = SqoopSparkJob.parseArgs(createOptions(), args); SparkConf conf = sparkJob.init(cArgs); JavaSparkContext context = new JavaSparkContext(conf); MConnector fromConnector = RepositoryManager.getInstance().getRepository() .findConnector("generic-jdbc-connector"); MConnector toConnector = RepositoryManager.getInstance().getRepository() .findConnector("hdfs-connector"); MLinkConfig fromLinkConfig = fromConnector.getLinkConfig(); MLinkConfig toLinkConfig = toConnector.getLinkConfig(); MLink fromLink = new MLink(fromConnector.getPersistenceId(), fromLinkConfig); fromLink.setName("jdbcLink-" + System.currentTimeMillis()); fromLink.getConnectorLinkConfig().getStringInput("linkConfig.jdbcDriver") .setValue("com.mysql.jdbc.Driver"); fromLink.getConnectorLinkConfig().getStringInput("linkConfig.connectionString") .setValue(cArgs.getOptionValue("jdbcString")); fromLink.getConnectorLinkConfig().getStringInput("linkConfig.username") .setValue(cArgs.getOptionValue("u")); fromLink.getConnectorLinkConfig().getStringInput("linkConfig.password") .setValue(cArgs.getOptionValue("p")); RepositoryManager.getInstance().getRepository().createLink(fromLink); MLink toLink = new MLink(toConnector.getPersistenceId(), toLinkConfig); toLink.setName("hdfsLink-" + System.currentTimeMillis()); toLink.getConnectorLinkConfig().getStringInput("linkConfig.confDir") .setValue(cArgs.getOptionValue("outputDir")); RepositoryManager.getInstance().getRepository().createLink(toLink); MFromConfig fromJobConfig = fromConnector.getFromConfig(); MToConfig toJobConfig = toConnector.getToConfig(); MJob sqoopJob = new MJob(fromConnector.getPersistenceId(), toConnector.getPersistenceId(), fromLink.getPersistenceId(), toLink.getPersistenceId(), fromJobConfig, toJobConfig, Driver .getInstance().getDriver().getDriverConfig()); MConfigList fromConfig = sqoopJob.getJobConfig(Direction.FROM); fromConfig.getStringInput("fromJobConfig.tableName").setValue(cArgs.getOptionValue("table")); fromConfig.getStringInput("fromJobConfig.partitionColumn").setValue( cArgs.getOptionValue("paritionCol")); MToConfig toConfig = sqoopJob.getToJobConfig(); toConfig.getStringInput("toJobConfig.outputDirectory").setValue( cArgs.getOptionValue("outputDir") + System.currentTimeMillis()); MDriverConfig driverConfig = sqoopJob.getDriverConfig(); if (cArgs.getOptionValue("numE") != null) { driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue( Integer.valueOf(cArgs.getOptionValue("numE"))); } if (cArgs.getOptionValue("numL") != null) { driverConfig.getIntegerInput("throttlingConfig.numLoaders").setValue( Integer.valueOf(cArgs.getOptionValue("numL"))); } RepositoryManager.getInstance().getRepository().createJob(sqoopJob); sparkJob.setJob(sqoopJob); sparkJob.execute(conf, context); }
Example 6
Source File: AppendModeTest.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
@Test public void test() throws Exception { createAndLoadTableCities(); // RDBMS link MLink rdbmsConnection = getClient().createLink("generic-jdbc-connector"); fillRdbmsLinkConfig(rdbmsConnection); saveLink(rdbmsConnection); // HDFS link MLink hdfsConnection = getClient().createLink("hdfs-connector"); fillHdfsLink(hdfsConnection); saveLink(hdfsConnection); // Job creation MJob job = getClient().createJob(rdbmsConnection.getPersistenceId(), hdfsConnection.getPersistenceId()); // Set rdbms "FROM" config fillRdbmsFromConfig(job, "id"); // Fill the hdfs "TO" config fillHdfsToConfig(job, ToFormat.TEXT_FILE); MConfigList toConfig = job.getJobConfig(Direction.TO); toConfig.getBooleanInput("toJobConfig.appendMode").setValue(true); saveJob(job); // First execution executeJob(job); assertHdfsTo( "1,'USA','2004-10-23','San Francisco'", "2,'USA','2004-10-24','Sunnyvale'", "3,'Czech Republic','2004-10-25','Brno'", "4,'USA','2004-10-26','Palo Alto'" ); // Second execution executeJob(job); assertHdfsTo( "1,'USA','2004-10-23','San Francisco'", "2,'USA','2004-10-24','Sunnyvale'", "3,'Czech Republic','2004-10-25','Brno'", "4,'USA','2004-10-26','Palo Alto'", "1,'USA','2004-10-23','San Francisco'", "2,'USA','2004-10-24','Sunnyvale'", "3,'Czech Republic','2004-10-25','Brno'", "4,'USA','2004-10-26','Palo Alto'" ); dropTable(); }
Example 7
Source File: FromRDBMSToHDFSTest.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
@Test public void testDuplicateColumns() throws Exception { createAndLoadTableCities(); // RDBMS link MLink rdbmsLink = getClient().createLink("generic-jdbc-connector"); fillRdbmsLinkConfig(rdbmsLink); saveLink(rdbmsLink); // HDFS link MLink hdfsLink = getClient().createLink("hdfs-connector"); fillHdfsLink(hdfsLink); saveLink(hdfsLink); // Job creation MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), hdfsLink.getPersistenceId()); // Connector values String partitionColumn = provider.escapeTableName(getTableName().getTableName()) + "." + provider.escapeColumnName("id"); MConfigList configs = job.getJobConfig(Direction.FROM); configs.getStringInput("fromJobConfig.sql").setValue( "SELECT " + provider.escapeColumnName("id") + " as " + provider.escapeColumnName("i") + ", " + provider.escapeColumnName("id") + " as " + provider.escapeColumnName("j") + " FROM " + provider.escapeTableName(getTableName().getTableName()) + " WHERE ${CONDITIONS}"); configs.getStringInput("fromJobConfig.partitionColumn").setValue(partitionColumn); configs.getStringInput("fromJobConfig.boundaryQuery").setValue( "SELECT MIN(" + partitionColumn + "), MAX(" + partitionColumn + ") FROM " + provider.escapeTableName(getTableName().getTableName())); fillHdfsToConfig(job, ToFormat.TEXT_FILE); saveJob(job); MSubmission submission = getClient().startJob(job.getPersistenceId()); assertTrue(submission.getStatus().isRunning()); // Wait until the job finish - this active waiting will be removed once // Sqoop client API will get blocking support. do { Thread.sleep(5000); submission = getClient().getJobStatus(job.getPersistenceId()); } while(submission.getStatus().isRunning()); // Assert correct output assertHdfsTo( "1,1", "2,2", "3,3", "4,4" ); // Clean up testing table dropTable(); }
Example 8
Source File: FromRDBMSToHDFSTest.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
@Test public void testSql() throws Exception { createAndLoadTableCities(); // RDBMS link MLink rdbmsLink = getClient().createLink("generic-jdbc-connector"); fillRdbmsLinkConfig(rdbmsLink); saveLink(rdbmsLink); // HDFS link MLink hdfsLink = getClient().createLink("hdfs-connector"); fillHdfsLink(hdfsLink); saveLink(hdfsLink); // Job creation MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), hdfsLink.getPersistenceId()); // Connector values MConfigList configs = job.getJobConfig(Direction.FROM); configs.getStringInput("fromJobConfig.sql").setValue("SELECT " + provider.escapeColumnName("id") + " FROM " + provider.escapeTableName(getTableName().getTableName()) + " WHERE ${CONDITIONS}"); configs.getStringInput("fromJobConfig.partitionColumn").setValue(provider.escapeColumnName("id")); fillHdfsToConfig(job, ToFormat.TEXT_FILE); saveJob(job); MSubmission submission = getClient().startJob(job.getPersistenceId()); assertTrue(submission.getStatus().isRunning()); // Wait until the job finish - this active waiting will be removed once // Sqoop client API will get blocking support. do { Thread.sleep(5000); submission = getClient().getJobStatus(job.getPersistenceId()); } while(submission.getStatus().isRunning()); // Assert correct output assertHdfsTo( "1", "2", "3", "4" ); // Clean up testing table dropTable(); }
Example 9
Source File: FromRDBMSToHDFSTest.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
@Test public void testColumns() throws Exception { createAndLoadTableCities(); // RDBMS link MLink rdbmsLink = getClient().createLink("generic-jdbc-connector"); fillRdbmsLinkConfig(rdbmsLink); saveLink(rdbmsLink); // HDFS link MLink hdfsLink = getClient().createLink("hdfs-connector"); fillHdfsLink(hdfsLink); saveLink(hdfsLink); // Job creation MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), hdfsLink.getPersistenceId()); // Connector values fillRdbmsFromConfig(job, "id"); MConfigList configs = job.getJobConfig(Direction.FROM); configs.getStringInput("fromJobConfig.columns").setValue(provider.escapeColumnName("id") + "," + provider.escapeColumnName("country")); fillHdfsToConfig(job, ToFormat.TEXT_FILE); saveJob(job); MSubmission submission = getClient().startJob(job.getPersistenceId()); assertTrue(submission.getStatus().isRunning()); // Wait until the job finish - this active waiting will be removed once // Sqoop client API will get blocking support. do { Thread.sleep(5000); submission = getClient().getJobStatus(job.getPersistenceId()); } while(submission.getStatus().isRunning()); // Assert correct output assertHdfsTo( "1,'USA'", "2,'USA'", "3,'Czech Republic'", "4,'USA'" ); // Clean up testing table dropTable(); }
Example 10
Source File: FromRDBMSToHDFSTest.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
@Test public void testStories() throws Exception { createAndLoadTableShortStories(); // RDBMS link MLink rdbmsLink = getClient().createLink("generic-jdbc-connector"); fillRdbmsLinkConfig(rdbmsLink); saveLink(rdbmsLink); // HDFS link MLink hdfsLink = getClient().createLink("hdfs-connector"); fillHdfsLink(hdfsLink); saveLink(hdfsLink); // Job creation MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), hdfsLink.getPersistenceId()); // Connector values fillRdbmsFromConfig(job, "id"); MConfigList configs = job.getJobConfig(Direction.FROM); configs.getStringInput("fromJobConfig.columns").setValue(provider.escapeColumnName("id") + "," + provider.escapeColumnName("name") + "," + provider.escapeColumnName("story")); fillHdfsToConfig(job, ToFormat.TEXT_FILE); saveJob(job); MSubmission submission = getClient().startJob(job.getPersistenceId()); assertTrue(submission.getStatus().isRunning()); // Wait until the job finish - this active waiting will be removed once // Sqoop client API will get blocking support. do { Thread.sleep(5000); submission = getClient().getJobStatus(job.getPersistenceId()); } while(submission.getStatus().isRunning()); // Assert correct output assertHdfsTo( "1,'The Gift of the Magi','ONE DOLLAR AND EIGHTY-SEVEN CENTS. THAT WAS ALL. AND SIXTY CENTS of it was in pennies. Pennies saved one and two at a time by bulldozing the grocer and the vegetable man and the butcher until ones cheeks burned with the silent imputation of parsimony that such close dealing implied. Three times Della counted it. One dollar and eighty-seven cents. And the next day would be Christmas.\\n\\nThere was clearly nothing left to do but flop down on the shabby little couch and howl. So Della did it. Which instigates the moral reflection that life is made up of sobs, sniffles, and smiles, with sniffles predominating.'", "2,'The Little Match Girl','Most terribly cold it was; it snowed, and was nearly quite dark, and evening-- the last evening of the year. In this cold and darkness there went along the street a poor little girl, bareheaded, and with naked feet. When she left home she had slippers on, it is true; but what was the good of that? They were very large slippers, which her mother had hitherto worn; so large were they; and the poor little thing lost them as she scuffled away across the street, because of two carriages that rolled by dreadfully fast.'", "3,'To Build a Fire','Day had broken cold and grey, exceedingly cold and grey, when the man turned aside from the main Yukon trail and climbed the high earth- bank, where a dim and little-travelled trail led eastward through the fat spruce timberland. It was a steep bank, and he paused for breath at the top, excusing the act to himself by looking at his watch. It was nine oclock. There was no sun nor hint of sun, though there was not a cloud in the sky. It was a clear day, and yet there seemed an intangible pall over the face of things, a subtle gloom that made the day dark, and that was due to the absence of sun. This fact did not worry the man. He was used to the lack of sun. It had been days since he had seen the sun, and he knew that a few more days must pass before that cheerful orb, due south, would just peep above the sky- line and dip immediately from view.'" ); // Clean up testing table dropTable(); }
Example 11
Source File: AllTypesTest.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
@Test public void testFrom() throws Exception { createTable("id", "id", "INT", "value", type.name ); int i = 1; for(ExampleValue value: type.values) { insertRow(false, Integer.toString(i++), value.insertStatement); } // RDBMS link MLink rdbmsConnection = getClient().createLink("generic-jdbc-connector"); fillRdbmsLinkConfig(rdbmsConnection); saveLink(rdbmsConnection); // HDFS link MLink hdfsConnection = getClient().createLink("hdfs-connector"); fillHdfsLink(hdfsConnection); saveLink(hdfsConnection); // Job creation MJob job = getClient().createJob(rdbmsConnection.getPersistenceId(), hdfsConnection.getPersistenceId()); // Fill rdbms "FROM" config fillRdbmsFromConfig(job, "id"); MConfigList fromConfig = job.getJobConfig(Direction.FROM); fromConfig.getStringInput("fromJobConfig.columns").setValue(provider.escapeColumnName("value")); // Fill the hdfs "TO" config fillHdfsToConfig(job, ToFormat.TEXT_FILE); // driver config MDriverConfig driverConfig = job.getDriverConfig(); driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(1); saveJob(job); executeJob(job); // Assert correct output assertHdfsTo(type.escapedStringValues()); // Clean up testing table dropTable(); }
Example 12
Source File: TableStagedRDBMSTest.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
@Test public void testStagedTransfer() throws Exception { final TableName stageTableName = new TableName("STAGE_" + getTableName()); createTableCities(); createHdfsFromFile("input-0001", "1,'USA','2004-10-23','San Francisco'", "2,'USA','2004-10-24','Sunnyvale'", "3,'Czech Republic','2004-10-25','Brno'", "4,'USA','2004-10-26','Palo Alto'" ); new Cities(provider, stageTableName).createTables(); // RDBMS link MLink rdbmsLink = getClient().createLink("generic-jdbc-connector"); fillRdbmsLinkConfig(rdbmsLink); saveLink(rdbmsLink); // HDFS link MLink hdfsLink = getClient().createLink("hdfs-connector"); fillHdfsLink(hdfsLink); saveLink(hdfsLink); // Job creation MJob job = getClient().createJob(hdfsLink.getPersistenceId(), rdbmsLink.getPersistenceId()); // fill HDFS "FROM" config fillHdfsFromConfig(job); // fill rdbms "TO" config here fillRdbmsToConfig(job); MConfigList configs = job.getJobConfig(Direction.TO); configs.getStringInput("toJobConfig.stageTableName").setValue(provider.escapeTableName(stageTableName.getTableName())); // driver config MConfigList driverConfig = job.getDriverConfig(); driverConfig.getIntegerInput("throttlingConfig.numExtractors").setValue(3); saveJob(job); executeJob(job); assertEquals(0L, provider.rowCount(stageTableName)); assertEquals(4L, provider.rowCount(getTableName())); assertRowInCities(1, "USA", "2004-10-23", "San Francisco"); assertRowInCities(2, "USA", "2004-10-24", "Sunnyvale"); assertRowInCities(3, "Czech Republic", "2004-10-25", "Brno"); assertRowInCities(4, "USA", "2004-10-26", "Palo Alto"); // Clean up testing table provider.dropTable(stageTableName); dropTable(); }
Example 13
Source File: IncrementalReadTest.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
@Test public void testQuery() throws Exception { createAndLoadTableUbuntuReleases(); // RDBMS link MLink rdbmsLink = getClient().createLink("generic-jdbc-connector"); fillRdbmsLinkConfig(rdbmsLink); saveLink(rdbmsLink); // HDFS link MLink hdfsLink = getClient().createLink("hdfs-connector"); fillHdfsLink(hdfsLink); saveLink(hdfsLink); // Job creation MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), hdfsLink.getPersistenceId()); String query = "SELECT * FROM " + provider.escapeTableName(getTableName().getTableName()) + " WHERE ${CONDITIONS}"; // Set the rdbms "FROM" config MConfigList fromConfig = job.getJobConfig(Direction.FROM); fromConfig.getStringInput("fromJobConfig.sql").setValue(query); fromConfig.getStringInput("fromJobConfig.partitionColumn").setValue(provider.escapeColumnName("id")); fromConfig.getStringInput("incrementalRead.checkColumn").setValue(provider.escapeColumnName(checkColumn)); fromConfig.getStringInput("incrementalRead.lastValue").setValue(lastValue); // Fill hdfs "TO" config fillHdfsToConfig(job, ToFormat.TEXT_FILE); saveJob(job); executeJob(job); // Assert correct output assertHdfsTo( "10,'Jaunty Jackalope',9.04,'2009-04-23',false", "11,'Karmic Koala',9.10,'2009-10-29',false", "12,'Lucid Lynx',10.04,'2010-04-29',true", "13,'Maverick Meerkat',10.10,'2010-10-10',false", "14,'Natty Narwhal',11.04,'2011-04-28',false", "15,'Oneiric Ocelot',11.10,'2011-10-10',false", "16,'Precise Pangolin',12.04,'2012-04-26',true", "17,'Quantal Quetzal',12.10,'2012-10-18',false", "18,'Raring Ringtail',13.04,'2013-04-25',false", "19,'Saucy Salamander',13.10,'2013-10-17',false" ); // TODO: After Sqoop will be properly updating configuration objects we need to verify new max value // Clean up testing table dropTable(); }
Example 14
Source File: IncrementalReadTest.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
@Test public void testTable() throws Exception { createAndLoadTableUbuntuReleases(); // RDBMS link MLink rdbmsLink = getClient().createLink("generic-jdbc-connector"); fillRdbmsLinkConfig(rdbmsLink); saveLink(rdbmsLink); // HDFS link MLink hdfsLink = getClient().createLink("hdfs-connector"); fillHdfsLink(hdfsLink); saveLink(hdfsLink); // Job creation MJob job = getClient().createJob(rdbmsLink.getPersistenceId(), hdfsLink.getPersistenceId()); // Set the rdbms "FROM" config fillRdbmsFromConfig(job, "id"); MConfigList fromConfig = job.getJobConfig(Direction.FROM); fromConfig.getStringInput("incrementalRead.checkColumn").setValue(provider.escapeColumnName(checkColumn)); fromConfig.getStringInput("incrementalRead.lastValue").setValue(lastValue); // Fill hdfs "TO" config fillHdfsToConfig(job, ToFormat.TEXT_FILE); saveJob(job); executeJob(job); // Assert correct output assertHdfsTo( "10,'Jaunty Jackalope',9.04,'2009-04-23',false", "11,'Karmic Koala',9.10,'2009-10-29',false", "12,'Lucid Lynx',10.04,'2010-04-29',true", "13,'Maverick Meerkat',10.10,'2010-10-10',false", "14,'Natty Narwhal',11.04,'2011-04-28',false", "15,'Oneiric Ocelot',11.10,'2011-10-10',false", "16,'Precise Pangolin',12.04,'2012-04-26',true", "17,'Quantal Quetzal',12.10,'2012-10-18',false", "18,'Raring Ringtail',13.04,'2013-04-25',false", "19,'Saucy Salamander',13.10,'2013-10-17',false" ); // TODO: After Sqoop will be properly updating configuration objects we need to verify new max value // Clean up testing table dropTable(); }
Example 15
Source File: ConnectorTestCase.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
protected void fillRdbmsToConfig(MJob job) { MConfigList toConfig = job.getJobConfig(Direction.TO); toConfig.getStringInput("toJobConfig.tableName").setValue(provider.escapeTableName(getTableName().getTableName())); }
Example 16
Source File: ConnectorTestCase.java From sqoop-on-spark with Apache License 2.0 | 4 votes |
protected void fillRdbmsFromConfig(MJob job, String partitionColumn) { MConfigList fromConfig = job.getJobConfig(Direction.FROM); fromConfig.getStringInput("fromJobConfig.tableName").setValue(provider.escapeTableName(getTableName().getTableName())); fromConfig.getStringInput("fromJobConfig.partitionColumn").setValue(provider.escapeColumnName(partitionColumn)); }
Example 17
Source File: ConnectorTestCase.java From sqoop-on-spark with Apache License 2.0 | 2 votes |
/** * Fill FROM config * * @param job MJob object to fill */ protected void fillHdfsFromConfig(MJob job) { MConfigList fromConfig = job.getJobConfig(Direction.FROM); fromConfig.getStringInput("fromJobConfig.inputDirectory").setValue(getHadoopTestDirectory()); }
Example 18
Source File: ConnectorTestCase.java From sqoop-on-spark with Apache License 2.0 | 2 votes |
/** * Fill TO config with specific storage and output type. * * @param job MJob object to fill * @param output Output type that should be set */ protected void fillHdfsToConfig(MJob job, ToFormat output) { MConfigList toConfig = job.getJobConfig(Direction.TO); toConfig.getEnumInput("toJobConfig.outputFormat").setValue(output); toConfig.getStringInput("toJobConfig.outputDirectory").setValue(getHadoopTestDirectory()); }