Java Code Examples for org.apache.hadoop.util.Time#now()
The following examples show how to use
org.apache.hadoop.util.Time#now() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MiniRPCBenchmark.java From big-c with Apache License 2.0 | 6 votes |
long connectToServerUsingDelegationToken( final Configuration conf, final InetSocketAddress addr) throws IOException { MiniProtocol client = null; try { long start = Time.now(); try { client = currentUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() { @Override public MiniProtocol run() throws IOException { return RPC.getProxy(MiniProtocol.class, MiniProtocol.versionID, addr, conf); } }); } catch (InterruptedException e) { e.printStackTrace(); } long end = Time.now(); return end - start; } finally { RPC.stopProxy(client); } }
Example 2
Source File: TestMultithreadedTestUtil.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testThreadFails() throws Exception { TestContext ctx = new TestContext(); ctx.addThread(new TestingThread(ctx) { @Override public void doWork() throws Exception { fail(FAIL_MSG); } }); ctx.startThreads(); long st = Time.now(); try { ctx.waitFor(30000); fail("waitFor did not throw"); } catch (RuntimeException rte) { // expected assertEquals(FAIL_MSG, rte.getCause().getMessage()); } long et = Time.now(); // Test shouldn't have waited the full 30 seconds, since // the thread throws faster than that assertTrue("Test took " + (et - st) + "ms", et - st < 5000); }
Example 3
Source File: LoadGenerator.java From hadoop with Apache License 2.0 | 6 votes |
/** Create a file with a length of <code>fileSize</code>. * The file is filled with 'a'. */ private void genFile(Path file, long fileSize) throws IOException { long startTime = Time.now(); FSDataOutputStream out = null; try { out = fc.create(file, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), CreateOpts.createParent(), CreateOpts.bufferSize(4096), CreateOpts.repFac((short) 3)); executionTime[CREATE] += (Time.now() - startTime); numOfOps[CREATE]++; long i = fileSize; while (i > 0) { long s = Math.min(fileSize, WRITE_CONTENTS.length); out.write(WRITE_CONTENTS, 0, (int) s); i -= s; } startTime = Time.now(); executionTime[WRITE_CLOSE] += (Time.now() - startTime); numOfOps[WRITE_CLOSE]++; } finally { IOUtils.cleanup(LOG, out); } }
Example 4
Source File: AbstractDelegationTokenSecretManager.java From hadoop with Apache License 2.0 | 6 votes |
@Override protected synchronized byte[] createPassword(TokenIdent identifier) { int sequenceNum; long now = Time.now(); sequenceNum = incrementDelegationTokenSeqNum(); identifier.setIssueDate(now); identifier.setMaxDate(now + tokenMaxLifetime); identifier.setMasterKeyId(currentKey.getKeyId()); identifier.setSequenceNumber(sequenceNum); LOG.info("Creating password for identifier: " + identifier + ", currentKey: " + currentKey.getKeyId()); byte[] password = createPassword(identifier.getBytes(), currentKey.getKey()); DelegationTokenInformation tokenInfo = new DelegationTokenInformation(now + tokenRenewInterval, password, getTrackingIdIfEnabled(identifier)); try { storeToken(identifier, tokenInfo); } catch (IOException ioe) { LOG.error("Could not store token !!", ioe); } return password; }
Example 5
Source File: GenericTestUtils.java From big-c with Apache License 2.0 | 6 votes |
public static void waitFor(Supplier<Boolean> check, int checkEveryMillis, int waitForMillis) throws TimeoutException, InterruptedException { long st = Time.now(); do { boolean result = check.get(); if (result) { return; } Thread.sleep(checkEveryMillis); } while (Time.now() - st < waitForMillis); throw new TimeoutException("Timed out waiting for condition. " + "Thread diagnostics:\n" + TimedOutTestsListener.buildThreadDiagnosticString()); }
Example 6
Source File: BlockTokenSecretManager.java From hadoop with Apache License 2.0 | 6 votes |
/** Initialize block keys */ private synchronized void generateKeys() { if (!isMaster) return; /* * Need to set estimated expiry dates for currentKey and nextKey so that if * NN crashes, DN can still expire those keys. NN will stop using the newly * generated currentKey after the first keyUpdateInterval, however it may * still be used by DN and Balancer to generate new tokens before they get a * chance to sync their keys with NN. Since we require keyUpdInterval to be * long enough so that all live DN's and Balancer will sync their keys with * NN at least once during the period, the estimated expiry date for * currentKey is set to now() + 2 * keyUpdateInterval + tokenLifetime. * Similarly, the estimated expiry date for nextKey is one keyUpdateInterval * more. */ setSerialNo(serialNo + 1); currentKey = new BlockKey(serialNo, Time.now() + 2 * keyUpdateInterval + tokenLifetime, generateSecret()); setSerialNo(serialNo + 1); nextKey = new BlockKey(serialNo, Time.now() + 3 * keyUpdateInterval + tokenLifetime, generateSecret()); allKeys.put(currentKey.getKeyId(), currentKey); allKeys.put(nextKey.getKeyId(), nextKey); }
Example 7
Source File: TestZKFailoverControllerStress.java From big-c with Apache License 2.0 | 6 votes |
/** * Randomly expire the ZK sessions of the two ZKFCs. This differs * from the above test in that it is not a controlled failover - * we just do random expirations and expect neither one to ever * generate fatal exceptions. */ @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000) public void testRandomExpirations() throws Exception { cluster.start(); long st = Time.now(); long runFor = STRESS_RUNTIME_SECS * 1000; Random r = new Random(); while (Time.now() - st < runFor) { cluster.getTestContext().checkException(); int targetIdx = r.nextInt(2); ActiveStandbyElector target = cluster.getElector(targetIdx); long sessId = target.getZKSessionIdForTests(); if (sessId != -1) { LOG.info(String.format("Expiring session %x for svc %d", sessId, targetIdx)); getServer(serverFactory).closeSession(sessId); } Thread.sleep(r.nextInt(300)); } }
Example 8
Source File: TestZKFailoverControllerStress.java From hadoop with Apache License 2.0 | 6 votes |
/** * Have the services fail their health checks half the time, * causing the master role to bounce back and forth in the * cluster. Meanwhile, causes ZK to disconnect clients every * 50ms, to trigger the retry code and failures to become active. */ @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000) public void testRandomHealthAndDisconnects() throws Exception { long runFor = STRESS_RUNTIME_SECS * 1000; Mockito.doAnswer(new RandomlyThrow(0)) .when(cluster.getService(0).proxy).monitorHealth(); Mockito.doAnswer(new RandomlyThrow(1)) .when(cluster.getService(1).proxy).monitorHealth(); conf.setInt(CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_KEY, 100); // Don't start until after the above mocking. Otherwise we can get // Mockito errors if the HM calls the proxy in the middle of // setting up the mock. cluster.start(); long st = Time.now(); while (Time.now() - st < runFor) { cluster.getTestContext().checkException(); serverFactory.closeAll(); Thread.sleep(50); } }
Example 9
Source File: TestHFSTestCase.java From big-c with Apache License 2.0 | 5 votes |
@Test public void sleepRatio1() { setWaitForRatio(1); long start = Time.now(); sleep(100); long end = Time.now(); assertEquals(end - start, 100, 50); }
Example 10
Source File: OMVolumeSetQuotaRequest.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Override public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { long modificationTime = Time.now(); SetVolumePropertyRequest modifiedRequest = getOmRequest() .getSetVolumePropertyRequest().toBuilder() .setModificationTime(modificationTime).build(); return getOmRequest().toBuilder() .setSetVolumePropertyRequest(modifiedRequest.toBuilder()) .setUserInfo(getUserInfo()) .build(); }
Example 11
Source File: OMBucketCreateRequest.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Override public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { // Get original request. CreateBucketRequest createBucketRequest = getOmRequest().getCreateBucketRequest(); BucketInfo bucketInfo = createBucketRequest.getBucketInfo(); // Verify resource name OmUtils.validateBucketName(bucketInfo.getBucketName()); // Get KMS provider. KeyProviderCryptoExtension kmsProvider = ozoneManager.getKmsProvider(); // Create new Bucket request with new bucket info. CreateBucketRequest.Builder newCreateBucketRequest = createBucketRequest.toBuilder(); BucketInfo.Builder newBucketInfo = bucketInfo.toBuilder(); // Set creation time & modification time. long initialTime = Time.now(); newBucketInfo.setCreationTime(initialTime) .setModificationTime(initialTime); if (bucketInfo.hasBeinfo()) { newBucketInfo.setBeinfo(getBeinfo(kmsProvider, bucketInfo)); } newCreateBucketRequest.setBucketInfo(newBucketInfo.build()); return getOmRequest().toBuilder().setUserInfo(getUserInfo()) .setCreateBucketRequest(newCreateBucketRequest.build()).build(); }
Example 12
Source File: LoadGenerator.java From hadoop with Apache License 2.0 | 5 votes |
/** The list operation randomly picks a directory in the test space and * list the directory content. */ private void list() throws IOException { String dirName = dirs.get(r.nextInt(dirs.size())); long startTime = Time.now(); fc.listStatus(new Path(dirName)); executionTime[LIST] += (Time.now()-startTime); totalNumOfOps[LIST]++; }
Example 13
Source File: Server.java From hadoop with Apache License 2.0 | 5 votes |
Connection register(SocketChannel channel) { if (isFull()) { return null; } Connection connection = new Connection(channel, Time.now()); add(connection); if (LOG.isDebugEnabled()) { LOG.debug("Server connection from " + connection + "; # active connections: " + size() + "; # queued calls: " + callQueue.size()); } return connection; }
Example 14
Source File: FsVolumeImpl.java From big-c with Apache License 2.0 | 5 votes |
BlockIteratorState() { lastSavedMs = iterStartMs = Time.now(); curFinalizedDir = null; curFinalizedSubDir = null; curEntry = null; atEnd = false; }
Example 15
Source File: AbstractDelegationTokenSecretManager.java From big-c with Apache License 2.0 | 5 votes |
private synchronized void removeExpiredKeys() { long now = Time.now(); for (Iterator<Map.Entry<Integer, DelegationKey>> it = allKeys.entrySet() .iterator(); it.hasNext();) { Map.Entry<Integer, DelegationKey> e = it.next(); if (e.getValue().getExpiryDate() < now) { it.remove(); // ensure the tokens generated by this current key can be recovered // with this current key after this current key is rolled if(!e.getValue().equals(currentKey)) removeStoredMasterKey(e.getValue()); } } }
Example 16
Source File: EventQueue.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * This is just for unit testing, don't use it for production code. * <p> * It waits for all messages to be processed. If one event handler invokes an * other one, the later one also should be finished. * <p> * Long counter overflow is not handled, therefore it's safe only for unit * testing. * <p> * This method is just eventually consistent. In some cases it could return * even if there are new messages in some of the handler. But in a simple * case (one message) it will return only if the message is processed and * all the dependent messages (messages which are sent by current handlers) * are processed. * * @param timeout Timeout in milliseconds to wait for the processing. */ @VisibleForTesting public void processAll(long timeout) { long currentTime = Time.now(); while (true) { if (!isRunning) { LOG.warn("Processing of event skipped. EventQueue is not running"); return; } long processed = 0; Stream<EventExecutor> allExecutor = this.executors.values().stream() .flatMap(handlerMap -> handlerMap.keySet().stream()); boolean allIdle = allExecutor.allMatch(executor -> executor.queuedEvents() == executor .successfulEvents() + executor.failedEvents()); if (allIdle) { return; } try { Thread.sleep(100); } catch (InterruptedException e) { LOG.warn("Interrupted exception while sleeping.", e); Thread.currentThread().interrupt(); } if (Time.now() > currentTime + timeout) { throw new AssertionError( "Messages are not processed in the given timeframe. Queued: " + queuedCount.get() + " Processed: " + processed); } } }
Example 17
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
@Override public DataEncryptionKey newDataEncryptionKey() throws IOException { if (shouldEncryptData()) { synchronized (this) { if (encryptionKey == null || encryptionKey.expiryDate < Time.now()) { LOG.debug("Getting new encryption token from NN"); encryptionKey = namenode.getDataEncryptionKey(); } return encryptionKey; } } else { return null; } }
Example 18
Source File: Client.java From hadoop with Apache License 2.0 | 5 votes |
private synchronized void sendPing() throws IOException { long curTime = Time.now(); if ( curTime - lastActivity.get() >= pingInterval) { lastActivity.set(curTime); synchronized (out) { out.writeInt(pingRequest.size()); pingRequest.writeTo(out); out.flush(); } } }
Example 19
Source File: RPC.java From hadoop with Apache License 2.0 | 4 votes |
/** * Get a protocol proxy that contains a proxy connection to a remote server * and a set of methods that are supported by the server * * @param protocol protocol class * @param clientVersion client version * @param addr remote address * @param conf configuration to use * @param rpcTimeout timeout for each RPC * @param timeout time in milliseconds before giving up * @return the proxy * @throws IOException if the far end through a RemoteException */ public static <T> ProtocolProxy<T> waitForProtocolProxy(Class<T> protocol, long clientVersion, InetSocketAddress addr, Configuration conf, int rpcTimeout, RetryPolicy connectionRetryPolicy, long timeout) throws IOException { long startTime = Time.now(); IOException ioe; while (true) { try { return getProtocolProxy(protocol, clientVersion, addr, UserGroupInformation.getCurrentUser(), conf, NetUtils .getDefaultSocketFactory(conf), rpcTimeout, connectionRetryPolicy); } catch(ConnectException se) { // namenode has not been started LOG.info("Server at " + addr + " not available yet, Zzzzz..."); ioe = se; } catch(SocketTimeoutException te) { // namenode is busy LOG.info("Problem connecting to server: " + addr); ioe = te; } catch(NoRouteToHostException nrthe) { // perhaps a VIP is failing over LOG.info("No route to host for server: " + addr); ioe = nrthe; } // check if timed out if (Time.now()-timeout >= startTime) { throw ioe; } if (Thread.currentThread().isInterrupted()) { // interrupted during some IO; this may not have been caught throw new InterruptedIOException("Interrupted waiting for the proxy"); } // wait for retry try { Thread.sleep(1000); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw (IOException) new InterruptedIOException( "Interrupted waiting for the proxy").initCause(ioe); } } }
Example 20
Source File: TestShortCircuitLocalRead.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test to run benchmarks between short circuit read vs regular read with * specified number of threads simultaneously reading. * <br> * Run this using the following command: * bin/hadoop --config confdir \ * org.apache.hadoop.hdfs.TestShortCircuitLocalRead \ * <shortcircuit on?> <checsum on?> <Number of threads> */ public static void main(String[] args) throws Exception { if (args.length != 3) { System.out.println("Usage: test shortcircuit checksum threadCount"); System.exit(1); } boolean shortcircuit = Boolean.valueOf(args[0]); boolean checksum = Boolean.valueOf(args[1]); int threadCount = Integer.parseInt(args[2]); // Setup create a file final Configuration conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, shortcircuit); conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, "/tmp/TestShortCircuitLocalRead._PORT"); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, checksum); //Override fileSize and DATA_TO_WRITE to much larger values for benchmark test int fileSize = 1000 * blockSize + 100; // File with 1000 blocks final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize); // create a new file in home directory. Do not close it. final Path file1 = new Path("filelocal.dat"); final FileSystem fs = FileSystem.get(conf); FSDataOutputStream stm = createFile(fs, file1, 1); stm.write(dataToWrite); stm.close(); long start = Time.now(); final int iteration = 20; Thread[] threads = new Thread[threadCount]; for (int i = 0; i < threadCount; i++) { threads[i] = new Thread() { @Override public void run() { for (int i = 0; i < iteration; i++) { try { String user = getCurrentUser(); checkFileContent(fs.getUri(), file1, dataToWrite, 0, user, conf, true); } catch (IOException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } } } }; } for (int i = 0; i < threadCount; i++) { threads[i].start(); } for (int i = 0; i < threadCount; i++) { threads[i].join(); } long end = Time.now(); System.out.println("Iteration " + iteration + " took " + (end - start)); fs.delete(file1, false); }