Java Code Examples for org.pentaho.di.trans.TransMeta#setStepLogTable()
The following examples show how to use
org.pentaho.di.trans.TransMeta#setStepLogTable() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SpoonExportXmlTest.java From pentaho-kettle with Apache License 2.0 | 6 votes |
private void initTables( TransMeta transMeta ) { TransLogTable transLogTable = TransLogTable.getDefault( mockedVariableSpace, mockedHasDbInterface, null ); initTableWithSampleParams( transLogTable ); transLogTable.setLogInterval( GLOBAL_PARAM ); transLogTable.setLogSizeLimit( GLOBAL_PARAM ); transMeta.setTransLogTable( transLogTable ); StepLogTable stepLogTable = StepLogTable.getDefault( mockedVariableSpace, mockedHasDbInterface ); initTableWithSampleParams( stepLogTable ); transMeta.setStepLogTable( stepLogTable ); PerformanceLogTable performanceLogTable = PerformanceLogTable.getDefault( mockedVariableSpace, mockedHasDbInterface ); initTableWithSampleParams( performanceLogTable ); performanceLogTable.setLogInterval( GLOBAL_PARAM ); transMeta.setPerformanceLogTable( performanceLogTable ); ChannelLogTable channelLogTable = ChannelLogTable.getDefault( mockedVariableSpace, mockedHasDbInterface ); initTableWithSampleParams( channelLogTable ); transMeta.setChannelLogTable( channelLogTable ); MetricsLogTable metricsLogTable = MetricsLogTable.getDefault( mockedVariableSpace, mockedHasDbInterface ); initTableWithSampleParams( metricsLogTable ); transMeta.setMetricsLogTable( metricsLogTable ); }
Example 2
Source File: PentahoMapReduceJobBuilderImpl.java From pentaho-hadoop-shims with Apache License 2.0 | 5 votes |
private void deleteLogging( Optional<TransConfiguration> transConfiguration ) { if ( !transConfiguration.isPresent() ) { return; } TransMeta meta = transConfiguration.get().getTransMeta(); if ( meta == null ) { return; } BaseLogTable table = meta.getStepLogTable(); table.setConnectionName( null ); meta.setStepLogTable( (StepLogTable) table ); table = meta.getMetricsLogTable(); table.setConnectionName( null ); meta.setMetricsLogTable( (MetricsLogTable) table ); table = meta.getPerformanceLogTable(); table.setConnectionName( null ); meta.setPerformanceLogTable( (PerformanceLogTable) table ); table = meta.getTransLogTable(); table.setConnectionName( null ); meta.setTransLogTable( (TransLogTable) table ); table = meta.getChannelLogTable(); table.setConnectionName( null ); meta.setChannelLogTable( (ChannelLogTable) table ); }
Example 3
Source File: XmlExportHelper.java From pentaho-kettle with Apache License 2.0 | 5 votes |
/** * When exporting meta we should not export user global parameters. * Method makes clone for each table and deletes all global parameters. * We have to make clones of each table, because we don't want to change real tables content. * * @param transMeta * meta, that contains log tables to be refactored before export */ public static void swapTables( TransMeta transMeta ) { TransLogTable transLogTable = transMeta.getTransLogTable(); if ( transLogTable != null ) { TransLogTable cloneTransLogTable = (TransLogTable) transLogTable.clone(); cloneTransLogTable.setAllGlobalParametersToNull(); transMeta.setTransLogTable( cloneTransLogTable ); } StepLogTable stepLogTable = transMeta.getStepLogTable(); if ( stepLogTable != null ) { StepLogTable cloneStepLogTable = (StepLogTable) stepLogTable.clone(); cloneStepLogTable.setAllGlobalParametersToNull(); transMeta.setStepLogTable( cloneStepLogTable ); } PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable(); if ( performanceLogTable != null ) { PerformanceLogTable clonePerformanceLogTable = (PerformanceLogTable) performanceLogTable.clone(); clonePerformanceLogTable.setAllGlobalParametersToNull(); transMeta.setPerformanceLogTable( clonePerformanceLogTable ); } ChannelLogTable channelLogTable = transMeta.getChannelLogTable(); if ( channelLogTable != null ) { ChannelLogTable cloneChannelLogTable = (ChannelLogTable) channelLogTable.clone(); cloneChannelLogTable.setAllGlobalParametersToNull(); transMeta.setChannelLogTable( cloneChannelLogTable ); } MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable(); if ( metricsLogTable != null ) { MetricsLogTable cloneMetricsLogTable = (MetricsLogTable) metricsLogTable.clone(); cloneMetricsLogTable.setAllGlobalParametersToNull(); transMeta.setMetricsLogTable( cloneMetricsLogTable ); } }
Example 4
Source File: PentahoMapReduceJobBuilderImplTest.java From pentaho-hadoop-shims with Apache License 2.0 | 4 votes |
@Test public void testDeleteLogging() throws Exception { when( hadoopShim.getPentahoMapReduceMapRunnerClass() ).thenReturn( "" ); when( hadoopShim.getPentahoMapReduceCombinerClass() ) .thenReturn( "org.pentaho.hadoop.mapreduce.GenericTransCombiner" ); when( hadoopShim.getPentahoMapReduceReducerClass() ) .thenReturn( "org.pentaho.hadoop.mapreduce.GenericTransReduce" ); pentahoMapReduceJobBuilder.setLogLevel( LogLevel.BASIC ); pentahoMapReduceJobBuilder.setInputPaths( new String[ 0 ] ); pentahoMapReduceJobBuilder.setOutputPath( "test" ); Configuration configuration = mock( Configuration.class ); when( hadoopShim.getFileSystem( configuration ) ).thenReturn( mock( FileSystem.class ) ); String testMrInput = "testMrInput"; String testMrOutput = "testMrOutput"; TransMeta meta = new TransMeta(); TransConfiguration transConfig = new TransConfiguration( meta, new TransExecutionConfiguration() ); String transXmlWOLogging = TransConfiguration.fromXML( transConfig.getXML() ).getXML(); meta = transConfig.getTransMeta(); HasDatabasesInterface dbIf = mock( HasDatabasesInterface.class ); VariableSpace vsLogging = variableSpace; MetricsLogTable metricsLogTable = MetricsLogTable.getDefault( vsLogging, dbIf ); metricsLogTable.setConnectionName( "logging-connection" ); meta.setMetricsLogTable( metricsLogTable ); PerformanceLogTable performanceLogTable = PerformanceLogTable.getDefault( vsLogging, dbIf ); performanceLogTable.setConnectionName( "logging-connection" ); meta.setPerformanceLogTable( performanceLogTable ); StepLogTable stepLogTable = StepLogTable.getDefault( vsLogging, dbIf ); stepLogTable.setConnectionName( "logging-connection" ); meta.setStepLogTable( stepLogTable ); TransLogTable transLogTable = TransLogTable.getDefault( vsLogging, dbIf, null ); transLogTable.setConnectionName( "logging-connection" ); meta.setTransLogTable( transLogTable ); ChannelLogTable channelLogTable = ChannelLogTable.getDefault( vsLogging, mock( HasDatabasesInterface.class ) ); channelLogTable.setConnectionName( "logging-connection" ); meta.setChannelLogTable( channelLogTable ); transConfig.setTransMeta( meta ); String logTransXml = TransConfiguration.fromXML( transConfig.getXML() ).getXML(); pentahoMapReduceJobBuilder.setMapperInfo( logTransXml, testMrInput, testMrOutput ); pentahoMapReduceJobBuilder.configure( configuration ); verify( configuration ).setMapRunnerClass( "" ); verify( configuration ).set( PentahoMapReduceJobBuilderImpl.TRANSFORMATION_MAP_XML, transXmlWOLogging ); verify( configuration ).set( PentahoMapReduceJobBuilderImpl.TRANSFORMATION_MAP_INPUT_STEPNAME, testMrInput ); verify( configuration ).set( PentahoMapReduceJobBuilderImpl.TRANSFORMATION_MAP_OUTPUT_STEPNAME, testMrOutput ); verify( configuration ).setJarByClass( Class.forName( "org.pentaho.hadoop.mapreduce.PentahoMapReduceJarMarker" ) ); verify( configuration ).set( PentahoMapReduceJobBuilderImpl.LOG_LEVEL, LogLevel.BASIC.toString() ); }
Example 5
Source File: RepositoryTestBase.java From pentaho-kettle with Apache License 2.0 | 4 votes |
protected TransMeta createTransMeta( final String dbName ) throws Exception { RepositoryDirectoryInterface rootDir = loadStartDirectory(); TransMeta transMeta = new TransMeta(); transMeta.setName( EXP_TRANS_NAME.concat( dbName ) ); transMeta.setDescription( EXP_TRANS_DESC ); transMeta.setExtendedDescription( EXP_TRANS_EXTENDED_DESC ); transMeta.setRepositoryDirectory( rootDir.findDirectory( DIR_TRANSFORMATIONS ) ); transMeta.setTransversion( EXP_TRANS_VERSION ); transMeta.setTransstatus( EXP_TRANS_STATUS ); transMeta.setCreatedUser( EXP_TRANS_CREATED_USER ); transMeta.setCreatedDate( EXP_TRANS_CREATED_DATE ); transMeta.setModifiedUser( EXP_TRANS_MOD_USER ); transMeta.setModifiedDate( EXP_TRANS_MOD_DATE ); transMeta.addParameterDefinition( EXP_TRANS_PARAM_1_NAME, EXP_TRANS_PARAM_1_DEF, EXP_TRANS_PARAM_1_DESC ); // TODO mlowery other transLogTable fields could be set for testing here TransLogTable transLogTable = TransLogTable.getDefault( transMeta, transMeta, new ArrayList<StepMeta>( 0 ) ); transLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME ); transLogTable.setLogInterval( EXP_TRANS_LOG_TABLE_INTERVAL ); transLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME ); transLogTable.setLogSizeLimit( EXP_TRANS_LOG_TABLE_SIZE_LIMIT ); transLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME ); transLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS ); transMeta.setTransLogTable( transLogTable ); // TODO mlowery other perfLogTable fields could be set for testing here PerformanceLogTable perfLogTable = PerformanceLogTable.getDefault( transMeta, transMeta ); perfLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME ); perfLogTable.setLogInterval( EXP_TRANS_LOG_TABLE_INTERVAL ); perfLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME ); perfLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME ); perfLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS ); transMeta.setPerformanceLogTable( perfLogTable ); // TODO mlowery other channelLogTable fields could be set for testing here ChannelLogTable channelLogTable = ChannelLogTable.getDefault( transMeta, transMeta ); channelLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME ); channelLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME ); channelLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME ); channelLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS ); transMeta.setChannelLogTable( channelLogTable ); // TODO mlowery other stepLogTable fields could be set for testing here StepLogTable stepLogTable = StepLogTable.getDefault( transMeta, transMeta ); stepLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME ); stepLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME ); stepLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME ); stepLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS ); transMeta.setStepLogTable( stepLogTable ); DatabaseMeta dbMeta = createDatabaseMeta( dbName ); // dbMeta must be saved so that it gets an ID repository.save( dbMeta, VERSION_COMMENT_V1, null ); deleteStack.push( dbMeta ); transMeta.setMaxDateConnection( dbMeta ); transMeta.setMaxDateTable( EXP_TRANS_MAX_DATE_TABLE ); transMeta.setMaxDateField( EXP_TRANS_MAX_DATE_FIELD ); transMeta.setMaxDateOffset( EXP_TRANS_MAX_DATE_OFFSET ); transMeta.setMaxDateDifference( EXP_TRANS_MAX_DATE_DIFF ); transMeta.setSizeRowset( EXP_TRANS_SIZE_ROWSET ); transMeta.setSleepTimeEmpty( EXP_TRANS_SLEEP_TIME_EMPTY ); transMeta.setSleepTimeFull( EXP_TRANS_SLEEP_TIME_FULL ); transMeta.setUsingUniqueConnections( EXP_TRANS_USING_UNIQUE_CONN ); transMeta.setFeedbackShown( EXP_TRANS_FEEDBACK_SHOWN ); transMeta.setFeedbackSize( EXP_TRANS_FEEDBACK_SIZE ); transMeta.setUsingThreadPriorityManagment( EXP_TRANS_USING_THREAD_PRIORITY_MGMT ); transMeta.setSharedObjectsFile( EXP_TRANS_SHARED_OBJECTS_FILE ); transMeta.setCapturingStepPerformanceSnapShots( EXP_TRANS_CAPTURE_STEP_PERF_SNAPSHOTS ); transMeta.setStepPerformanceCapturingDelay( EXP_TRANS_STEP_PERF_CAP_DELAY ); transMeta.addDependency( new TransDependency( dbMeta, EXP_TRANS_DEP_TABLE_NAME, EXP_TRANS_DEP_FIELD_NAME ) ); DatabaseMeta stepDbMeta = createDatabaseMeta( EXP_DBMETA_NAME_STEP.concat( dbName ) ); repository.save( stepDbMeta, VERSION_COMMENT_V1, null ); deleteStack.push( stepDbMeta ); Condition cond = new Condition(); StepMeta step1 = createStepMeta1( transMeta, stepDbMeta, cond ); transMeta.addStep( step1 ); StepMeta step2 = createStepMeta2( stepDbMeta, cond ); transMeta.addStep( step2 ); transMeta.addTransHop( createTransHopMeta( step1, step2 ) ); SlaveServer slaveServer = createSlaveServer( dbName ); PartitionSchema partSchema = createPartitionSchema( dbName ); // slaveServer, partSchema must be saved so that they get IDs repository.save( slaveServer, VERSION_COMMENT_V1, null ); deleteStack.push( slaveServer ); repository.save( partSchema, VERSION_COMMENT_V1, null ); deleteStack.push( partSchema ); SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(); slaveStepCopyPartitionDistribution.addPartition( EXP_SLAVE_NAME, EXP_PART_SCHEMA_NAME, 0 ); slaveStepCopyPartitionDistribution.setOriginalPartitionSchemas( Arrays .asList( new PartitionSchema[] { partSchema } ) ); transMeta.setSlaveStepCopyPartitionDistribution( slaveStepCopyPartitionDistribution ); transMeta.setSlaveTransformation( EXP_TRANS_SLAVE_TRANSFORMATION ); return transMeta; }