Java Code Examples for org.apache.hadoop.test.GenericTestUtils#getTempPath()
The following examples show how to use
org.apache.hadoop.test.GenericTestUtils#getTempPath() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestStorageContainerManager.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Test public void testSCMInitialization() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); final String path = GenericTestUtils.getTempPath( UUID.randomUUID().toString()); Path scmPath = Paths.get(path, "scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); // This will initialize SCM StorageContainerManager.scmInit(conf, "testClusterId"); SCMStorageConfig scmStore = new SCMStorageConfig(conf); Assert.assertEquals(NodeType.SCM, scmStore.getNodeType()); Assert.assertEquals("testClusterId", scmStore.getClusterID()); StorageContainerManager.scmInit(conf, "testClusterIdNew"); Assert.assertEquals(NodeType.SCM, scmStore.getNodeType()); Assert.assertEquals("testClusterId", scmStore.getClusterID()); }
Example 2
Source File: TestOzoneManagerConfiguration.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Before public void init() throws IOException { conf = new OzoneConfiguration(); omId = UUID.randomUUID().toString(); clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); final String path = GenericTestUtils.getTempPath(omId); Path metaDirPath = Paths.get(path, "om-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); conf.setTimeDuration( OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); OMStorage omStore = new OMStorage(conf); omStore.setClusterId("testClusterId"); omStore.setScmId("testScmId"); // writes the version file properties omStore.initialize(); }
Example 3
Source File: TestSCMNodeStorageStatMap.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Test public void testUpdateUnknownDatanode() throws SCMException { SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); UUID unknownNode = UUID.randomUUID(); String path = GenericTestUtils.getTempPath( TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + unknownNode .toString()); Set<StorageLocationReport> reportSet = new HashSet<>(); StorageLocationReport.Builder builder = StorageLocationReport.newBuilder(); builder.setStorageType(StorageType.DISK).setId(unknownNode.toString()) .setStorageLocation(path).setScmUsed(used).setRemaining(remaining) .setCapacity(capacity).setFailed(false); reportSet.add(builder.build()); thrown.expect(SCMException.class); thrown.expectMessage("No such datanode"); map.updateDatanodeMap(unknownNode, reportSet); }
Example 4
Source File: TestStorageContainerManager.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Test public void testSCMReinitialization() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); final String path = GenericTestUtils.getTempPath( UUID.randomUUID().toString()); Path scmPath = Paths.get(path, "scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); //This will set the cluster id in the version file MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build(); cluster.waitForClusterToBeReady(); try { // This will initialize SCM StorageContainerManager.scmInit(conf, "testClusterId"); SCMStorageConfig scmStore = new SCMStorageConfig(conf); Assert.assertEquals(NodeType.SCM, scmStore.getNodeType()); Assert.assertNotEquals("testClusterId", scmStore.getClusterID()); } finally { cluster.shutdown(); } }
Example 5
Source File: TestDeadNodeHandler.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Before public void setup() throws IOException, AuthenticationException { OzoneConfiguration conf = new OzoneConfiguration(); conf.setTimeDuration(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, 0, TimeUnit.SECONDS); storageDir = GenericTestUtils.getTempPath( TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); eventQueue = new EventQueue(); scm = HddsTestUtils.getScm(conf); nodeManager = (SCMNodeManager) scm.getScmNodeManager(); pipelineManager = (SCMPipelineManager)scm.getPipelineManager(); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, pipelineManager.getStateManager(), conf); pipelineManager.setPipelineProvider(RATIS, mockRatisProvider); containerManager = scm.getContainerManager(); deadNodeHandler = new DeadNodeHandler(nodeManager, Mockito.mock(PipelineManager.class), containerManager); eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler); publisher = Mockito.mock(EventPublisher.class); nodeReportHandler = new NodeReportHandler(nodeManager); }
Example 6
Source File: TestHadoopDirTreeGenerator.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Before public void setup() { path = GenericTestUtils .getTempPath(TestOzoneClientKeyGenerator.class.getSimpleName()); GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(RaftServerImpl.LOG, Level.DEBUG); File baseDir = new File(path); baseDir.mkdirs(); }
Example 7
Source File: TestDefaultCertificateClient.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { OzoneConfiguration config = new OzoneConfiguration(); config.setStrings(OZONE_SCM_NAMES, "localhost"); config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 2); final String omPath = GenericTestUtils .getTempPath(UUID.randomUUID().toString()); final String dnPath = GenericTestUtils .getTempPath(UUID.randomUUID().toString()); omMetaDirPath = Paths.get(omPath, "test"); dnMetaDirPath = Paths.get(dnPath, "test"); config.set(HDDS_METADATA_DIR_NAME, omMetaDirPath.toString()); omSecurityConfig = new SecurityConfig(config); config.set(HDDS_METADATA_DIR_NAME, dnMetaDirPath.toString()); dnSecurityConfig = new SecurityConfig(config); keyGenerator = new HDDSKeyGenerator(omSecurityConfig); omKeyCodec = new KeyCodec(omSecurityConfig, OM_COMPONENT); dnKeyCodec = new KeyCodec(dnSecurityConfig, DN_COMPONENT); Files.createDirectories(omSecurityConfig.getKeyLocation(OM_COMPONENT)); Files.createDirectories(dnSecurityConfig.getKeyLocation(DN_COMPONENT)); x509Certificate = generateX509Cert(null); certSerialId = x509Certificate.getSerialNumber().toString(); getCertClient(); }
Example 8
Source File: TestContainerStateMachine.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Create a MiniDFSCluster for testing. * * @throws IOException */ @Before public void setup() throws Exception { path = GenericTestUtils .getTempPath(TestContainerStateMachine.class.getSimpleName()); File baseDir = new File(path); baseDir.mkdirs(); conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); // conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setBoolean(OZONE_CLIENT_STREAM_BUFFER_FLUSH_DELAY, false); // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) .build(); cluster.waitForClusterToBeReady(); cluster.getOzoneManager().startSecretManager(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); volumeName = "testcontainerstatemachinefailures"; bucketName = volumeName; objectStore.createVolume(volumeName); objectStore.getVolume(volumeName).createBucket(bucketName); }
Example 9
Source File: TestHddsDispatcher.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Test public void testWriteChunkWithCreateContainerFailure() throws IOException { String testDir = GenericTestUtils.getTempPath( TestHddsDispatcher.class.getSimpleName()); try { UUID scmId = UUID.randomUUID(); OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, testDir); DatanodeDetails dd = randomDatanodeDetails(); HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest( dd.getUuidString(), 1L, 1L); HddsDispatcher mockDispatcher = Mockito.spy(hddsDispatcher); ContainerCommandResponseProto.Builder builder = getContainerCommandResponse(writeChunkRequest, ContainerProtos.Result.DISK_OUT_OF_SPACE, ""); // Return DISK_OUT_OF_SPACE response when writing chunk // with container creation. Mockito.doReturn(builder.build()).when(mockDispatcher) .createContainer(writeChunkRequest); GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer .captureLogs(HddsDispatcher.LOG); // send write chunk request without sending create container mockDispatcher.dispatch(writeChunkRequest, null); // verify the error log assertTrue(logCapturer.getOutput() .contains("ContainerID " + writeChunkRequest.getContainerID() + " creation failed , Result: DISK_OUT_OF_SPACE")); } finally { ContainerMetrics.remove(); FileUtils.deleteDirectory(new File(testDir)); } }
Example 10
Source File: TestBlockDeletion.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); GenericTestUtils.setLogLevel(DeletedBlockLogImpl.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG); String path = GenericTestUtils.getTempPath(TestBlockDeletion.class.getSimpleName()); File baseDir = new File(path); baseDir.mkdirs(); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); store = OzoneClientFactory.getRpcClient(conf).getObjectStore(); om = cluster.getOzoneManager(); scm = cluster.getStorageContainerManager(); containerIdsWithDeletedBlocks = new HashSet<>(); }
Example 11
Source File: TestStorageContainerManager.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Test public void testScmInfo() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); final String path = GenericTestUtils.getTempPath(UUID.randomUUID().toString()); try { Path scmPath = Paths.get(path, "scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); SCMStorageConfig scmStore = new SCMStorageConfig(conf); String clusterId = UUID.randomUUID().toString(); String scmId = UUID.randomUUID().toString(); scmStore.setClusterId(clusterId); scmStore.setScmId(scmId); // writes the version file properties scmStore.initialize(); StorageContainerManager scm = StorageContainerManager.createSCM(conf); //Reads the SCM Info from SCM instance ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); Assert.assertEquals(clusterId, scmInfo.getClusterId()); Assert.assertEquals(scmId, scmInfo.getScmId()); String expectedVersion = HddsVersionInfo.HDDS_VERSION_INFO.getVersion(); String actualVersion = scm.getSoftwareVersion(); Assert.assertEquals(expectedVersion, actualVersion); } finally { FileUtils.deleteQuietly(new File(path)); } }
Example 12
Source File: TestOzoneShellHA.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Create a MiniOzoneCluster for testing with using distributed Ozone * handler type. * * @throws Exception */ @BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); String path = GenericTestUtils.getTempPath( TestOzoneShellHA.class.getSimpleName()); baseDir = new File(path); baseDir.mkdirs(); testFile = new File(path + OzoneConsts.OZONE_URI_DELIMITER + "testFile"); testFile.getParentFile().mkdirs(); testFile.createNewFile(); ozoneShell = new OzoneShell(); // Init HA cluster omServiceId = "om-service-test1"; numOfOMs = 3; clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); cluster = MiniOzoneCluster.newHABuilder(conf) .setClusterId(clusterId) .setScmId(scmId) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); conf.setQuietMode(false); cluster.waitForClusterToBeReady(); }
Example 13
Source File: TestHealthyPipelineSafeModeRule.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testHealthyPipelineSafeModeRuleWithMixedPipelines() throws Exception { String storageDir = GenericTestUtils.getTempPath( TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID()); EventQueue eventQueue = new EventQueue(); List<ContainerInfo> containers = new ArrayList<>(HddsTestUtils.getContainerInfo(1)); OzoneConfiguration config = new OzoneConfiguration(); // In Mock Node Manager, first 8 nodes are healthy, next 2 nodes are // stale and last one is dead, and this repeats. So for a 12 node, 9 // healthy, 2 stale and one dead. MockNodeManager nodeManager = new MockNodeManager(true, 12); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); // enable pipeline check config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(config); try { SCMPipelineManager pipelineManager = new SCMPipelineManager(config, nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); pipelineManager.allowPipelineCreation(); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, pipelineManager.getStateManager(), config, true); pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); // Create 3 pipelines Pipeline pipeline1 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); Pipeline pipeline2 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); Pipeline pipeline3 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( config, containers, pipelineManager, eventQueue); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = scmSafeModeManager.getHealthyPipelineSafeModeRule(); // No pipeline event have sent to SCMSafemodeManager Assert.assertFalse(healthyPipelineSafeModeRule.validate()); GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger( SCMSafeModeManager.class)); // fire event with pipeline create status with ratis type and factor 1 // pipeline, validate() should return false firePipelineEvent(pipeline1, eventQueue); GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains( "reported count is 1"), 1000, 5000); Assert.assertFalse(healthyPipelineSafeModeRule.validate()); firePipelineEvent(pipeline2, eventQueue); firePipelineEvent(pipeline3, eventQueue); GenericTestUtils.waitFor(() -> healthyPipelineSafeModeRule.validate(), 1000, 5000); } finally { scmMetadataStore.getStore().close(); FileUtil.fullyDelete(new File(storageDir)); } }
Example 14
Source File: TestContainerStateMachineFailureOnRead.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Before public void setup() throws Exception { conf = new OzoneConfiguration(); String path = GenericTestUtils .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); File baseDir = new File(path); baseDir.mkdirs(); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 1200, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." + DatanodeRatisServerConfig.RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY, 1000, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." + DatanodeRatisServerConfig.RATIS_SERVER_NO_LEADER_TIMEOUT_KEY, 1000, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." + DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY, 3, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." + DatanodeRatisServerConfig. RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY, 3, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + "rpc.request.timeout", 3, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + "watch.request.timeout", 3, TimeUnit.SECONDS); conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY + ".client.request.write.timeout", 30, TimeUnit.SECONDS); conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); OzoneClient client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); volumeName = "testcontainerstatemachinefailures"; bucketName = volumeName; objectStore.createVolume(volumeName); objectStore.getVolume(volumeName).createBucket(bucketName); Logger.getLogger(GrpcLogAppender.class).setLevel(Level.WARN); }
Example 15
Source File: TestHealthyPipelineSafeModeRule.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Test public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { String storageDir = GenericTestUtils.getTempPath( TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID()); EventQueue eventQueue = new EventQueue(); List<ContainerInfo> containers = new ArrayList<>(HddsTestUtils.getContainerInfo(1)); OzoneConfiguration config = new OzoneConfiguration(); // In Mock Node Manager, first 8 nodes are healthy, next 2 nodes are // stale and last one is dead, and this repeats. So for a 12 node, 9 // healthy, 2 stale and one dead. MockNodeManager nodeManager = new MockNodeManager(true, 12); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); // enable pipeline check config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(config); try { SCMPipelineManager pipelineManager = new SCMPipelineManager(config, nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); pipelineManager.allowPipelineCreation(); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, pipelineManager.getStateManager(), config, true); pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); // Create 3 pipelines Pipeline pipeline1 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); Pipeline pipeline2 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); Pipeline pipeline3 = pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( config, containers, pipelineManager, eventQueue); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = scmSafeModeManager.getHealthyPipelineSafeModeRule(); // No datanodes have sent pipelinereport from datanode Assert.assertFalse(healthyPipelineSafeModeRule.validate()); // Fire pipeline report from all datanodes in first pipeline, as here we // have 3 pipelines, 10% is 0.3, when doing ceil it is 1. So, we should // validate should return true after fire pipeline event //Here testing with out pipelinereport handler, so not moving created // pipelines to allocated state, as pipelines changing to healthy is // handled by pipeline report handler. So, leaving pipeline's in pipeline // manager in open state for test case simplicity. firePipelineEvent(pipeline1, eventQueue); GenericTestUtils.waitFor(() -> healthyPipelineSafeModeRule.validate(), 1000, 5000); } finally { scmMetadataStore.getStore().close(); FileUtil.fullyDelete(new File(storageDir)); } }
Example 16
Source File: TestContainerDeletionChoosingPolicy.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Before public void init() throws Throwable { conf = new OzoneConfiguration(); path = GenericTestUtils .getTempPath(TestContainerDeletionChoosingPolicy.class.getSimpleName()); }
Example 17
Source File: TestContainerReplicationEndToEnd.java From hadoop-ozone with Apache License 2.0 | 4 votes |
/** * Create a MiniDFSCluster for testing. * * @throws IOException */ @BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); path = GenericTestUtils .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); File baseDir = new File(path); baseDir.mkdirs(); containerReportInterval = 2000; conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, containerReportInterval, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, containerReportInterval, TimeUnit.MILLISECONDS); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, 2 * containerReportInterval, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." + DatanodeRatisServerConfig.RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY, 1000, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." + DatanodeRatisServerConfig.RATIS_SERVER_NO_LEADER_TIMEOUT_KEY, 1000, TimeUnit.SECONDS); conf.setLong("hdds.scm.replication.thread.interval", containerReportInterval); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(4) .setTotalPipelineNumLimit(6).setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); cluster.getStorageContainerManager().getReplicationManager().start(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); xceiverClientManager = new XceiverClientManager(conf); volumeName = "testcontainerstatemachinefailures"; bucketName = volumeName; objectStore.createVolume(volumeName); objectStore.getVolume(volumeName).createBucket(bucketName); }
Example 18
Source File: TestContainerStateMachineFlushDelay.java From hadoop-ozone with Apache License 2.0 | 4 votes |
/** * Create a MiniDFSCluster for testing. * * @throws IOException */ @Before public void setup() throws Exception { chunkSize = 100; flushSize = 2 * chunkSize; maxFlushSize = 2 * flushSize; blockSize = 2 * maxFlushSize; keyString = UUID.randomUUID().toString(); path = GenericTestUtils .getTempPath(TestContainerStateMachineFlushDelay.class.getSimpleName()); File baseDir = new File(path); baseDir.mkdirs(); conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); // conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) .setBlockSize(blockSize) .setChunkSize(chunkSize) .setStreamBufferFlushSize(flushSize) .setStreamBufferMaxSize(maxFlushSize) .setStreamBufferSizeUnit(StorageUnit.BYTES) .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) .build(); cluster.waitForClusterToBeReady(); cluster.getOzoneManager().startSecretManager(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); volumeName = "testcontainerstatemachinefailures"; bucketName = volumeName; objectStore.createVolume(volumeName); objectStore.getVolume(volumeName).createBucket(bucketName); }
Example 19
Source File: TestOzoneFsHAURLs.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Before public void init() throws Exception { conf = new OzoneConfiguration(); omId = UUID.randomUUID().toString(); omServiceId = "om-service-test1"; numOfOMs = 3; clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); final String path = GenericTestUtils.getTempPath(omId); java.nio.file.Path metaDirPath = java.nio.file.Paths.get(path, "om-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3); OMStorage omStore = new OMStorage(conf); omStore.setClusterId(clusterId); omStore.setScmId(scmId); // writes the version file properties omStore.initialize(); // Start the cluster cluster = MiniOzoneCluster.newHABuilder(conf) .setNumDatanodes(7) .setTotalPipelineNumLimit(10) .setClusterId(clusterId) .setScmId(scmId) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); cluster.waitForClusterToBeReady(); om = cluster.getOzoneManager(); Assert.assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState()); volumeName = "volume" + RandomStringUtils.randomNumeric(5); ObjectStore objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf).getObjectStore(); objectStore.createVolume(volumeName); OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); bucketName = "bucket" + RandomStringUtils.randomNumeric(5); retVolumeinfo.createBucket(bucketName); rootPath = String.format("%s://%s.%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName, omServiceId); // Set fs.defaultFS conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); FileSystem fs = FileSystem.get(conf); // Create some dirs Path root = new Path("/"); Path dir1 = new Path(root, "dir1"); Path dir12 = new Path(dir1, "dir12"); Path dir2 = new Path(root, "dir2"); fs.mkdirs(dir12); fs.mkdirs(dir2); }
Example 20
Source File: TestDeleteWithSlowFollower.java From hadoop-ozone with Apache License 2.0 | 4 votes |
/** * Create a MiniDFSCluster for testing. * * @throws IOException */ @BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); path = GenericTestUtils .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); File baseDir = new File(path); baseDir.mkdirs(); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); // Make the stale, dead and server failure timeout higher so that a dead // node is not detecte at SCM as well as the pipeline close action // never gets initiated early at Datanode in the test. conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 1000, TimeUnit.SECONDS); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, 2000, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." + DatanodeRatisServerConfig.RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY, 1000, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." + DatanodeRatisServerConfig.RATIS_SERVER_NO_LEADER_TIMEOUT_KEY, 1000, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." + DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY, 3, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." + DatanodeRatisServerConfig. RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY, 3, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + "rpc.request.timeout", 3, TimeUnit.SECONDS); conf.setTimeDuration( RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + "watch.request.timeout", 10, TimeUnit.SECONDS); conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY + ".client.request.write.timeout", 30, TimeUnit.SECONDS); conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY + ".client.request.watch.timeout", 30, TimeUnit.SECONDS); conf.setQuietMode(false); int numOfDatanodes = 3; cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfDatanodes) .setTotalPipelineNumLimit( numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT) .setHbInterval(100) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, 60000); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); xceiverClientManager = new XceiverClientManager(conf); volumeName = "testcontainerstatemachinefailures"; bucketName = volumeName; objectStore.createVolume(volumeName); objectStore.getVolume(volumeName).createBucket(bucketName); }