Java Code Examples for java.util.concurrent.locks.ReadWriteLock#readLock()
The following examples show how to use
java.util.concurrent.locks.ReadWriteLock#readLock() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ContainerImpl.java From hadoop with Apache License 2.0 | 6 votes |
public ContainerImpl(Configuration conf, Dispatcher dispatcher, NMStateStoreService stateStore, ContainerLaunchContext launchContext, Credentials creds, NodeManagerMetrics metrics, ContainerTokenIdentifier containerTokenIdentifier) { this.daemonConf = conf; this.dispatcher = dispatcher; this.stateStore = stateStore; this.launchContext = launchContext; this.containerTokenIdentifier = containerTokenIdentifier; this.containerId = containerTokenIdentifier.getContainerID(); this.resource = containerTokenIdentifier.getResource(); this.diagnostics = new StringBuilder(); this.credentials = creds; this.metrics = metrics; user = containerTokenIdentifier.getApplicationSubmitter(); ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); this.readLock = readWriteLock.readLock(); this.writeLock = readWriteLock.writeLock(); stateMachine = stateMachineFactory.make(this); }
Example 2
Source File: LuceneEngine.java From jstarcraft-core with Apache License 2.0 | 6 votes |
public LuceneEngine(IndexWriterConfig config, Path path) { try { this.config = config; Directory transienceDirectory = new ByteBuffersDirectory(); this.transienceManager = new TransienceManager((IndexWriterConfig) BeanUtils.cloneBean(config), transienceDirectory); Directory persistenceDirectory = FSDirectory.open(path); this.persistenceManager = new PersistenceManager((IndexWriterConfig) BeanUtils.cloneBean(config), persistenceDirectory); this.searcher = new LuceneSearcher(this.transienceManager, this.persistenceManager); this.semaphore = new AtomicInteger(); ReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); } catch (Exception exception) { throw new StorageException(exception); } }
Example 3
Source File: Translog.java From Elasticsearch with Apache License 2.0 | 6 votes |
public Translog(TranslogConfig config, String nodeId) { super(config.getShardId(), config.getIndexSettings()); this.config = null; recoveredTranslogs = null; syncScheduler = null; bigArrays = null; ReadWriteLock rwl = new ReentrantReadWriteLock(); readLock = new ReleasableLock(rwl.readLock()); writeLock = new ReleasableLock(rwl.writeLock()); location = null; current = null; currentCommittingTranslog = null; lastCommittedTranslogFileGeneration = -1; config = null; translogUUID = null; }
Example 4
Source File: LocalTranslog.java From Elasticsearch with Apache License 2.0 | 6 votes |
public LocalTranslog(TranslogConfig config) throws IOException { super(config.getShardId(), config.getIndexSettings()); ReadWriteLock rwl = new ReentrantReadWriteLock(); readLock = new ReleasableLock(rwl.readLock()); writeLock = new ReleasableLock(rwl.writeLock()); this.translogPath = config.getTranslogPath(); // clean all files Files.createDirectories(this.translogPath); Files.walkFileTree(this.translogPath, new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { Files.delete(file); return FileVisitResult.CONTINUE; } }); // create a new directory writeChannel = FileChannel.open(this.translogPath.resolve(getFileNameFromId(tmpTranslogGeneration.get())), StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE); writtenOffset = 0; }
Example 5
Source File: BlobServerConnection.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Creates a new BLOB connection for a client request. * * @param clientSocket The socket to read/write data. * @param blobServer The BLOB server. */ BlobServerConnection(Socket clientSocket, BlobServer blobServer) { super("BLOB connection for " + clientSocket.getRemoteSocketAddress()); setDaemon(true); this.clientSocket = clientSocket; this.blobServer = checkNotNull(blobServer); ReadWriteLock readWriteLock = blobServer.getReadWriteLock(); this.readLock = readWriteLock.readLock(); }
Example 6
Source File: LocalizedResource.java From hadoop with Apache License 2.0 | 5 votes |
public LocalizedResource(LocalResourceRequest rsrc, Dispatcher dispatcher) { this.rsrc = rsrc; this.dispatcher = dispatcher; this.ref = new LinkedList<ContainerId>(); ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); this.readLock = readWriteLock.readLock(); this.writeLock = readWriteLock.writeLock(); this.stateMachine = stateMachineFactory.make(this); }
Example 7
Source File: MemoryTokenStore.java From azeroth with Apache License 2.0 | 5 votes |
public MemoryTokenStore() { this.cache = CacheBuilder.newBuilder().softValues().expireAfterAccess(120, TimeUnit.SECONDS) .build(); ReadWriteLock lock = new ReentrantReadWriteLock(); this.r = lock.readLock(); this.w = lock.writeLock(); }
Example 8
Source File: DeprecatedNaturalIdCacheStatisticsImpl.java From lams with GNU General Public License v2.0 | 5 votes |
DeprecatedNaturalIdCacheStatisticsImpl(String regionName, Set<NaturalIdDataAccess> accessStrategies) { this.regionName = regionName; this.accessStrategies = accessStrategies; final ReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); }
Example 9
Source File: NaturalIdStatisticsImpl.java From lams with GNU General Public License v2.0 | 5 votes |
NaturalIdStatisticsImpl(EntityPersister rootEntityDescriptor) { super( () -> rootEntityDescriptor.getNaturalIdCacheAccessStrategy() != null ? rootEntityDescriptor.getNaturalIdCacheAccessStrategy().getRegion() : null ); this.rootEntityName = rootEntityDescriptor.getRootEntityName(); final ReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); }
Example 10
Source File: ConfigurationFactoryImpl.java From neoscada with Eclipse Public License 1.0 | 5 votes |
public ConfigurationFactoryImpl () { final ReadWriteLock lock = new ReentrantReadWriteLock (); this.readLock = lock.readLock (); this.writeLock = lock.writeLock (); final BundleContext context = FrameworkUtil.getBundle ( DataContext.class ).getBundleContext (); this.executor = new ScheduledExportedExecutorService ( "org.eclipse.scada.da.server.exporter.rest", 1 ); this.hiveSource = new ServiceListenerHiveSource ( context, this.executor ); this.hiveSource.open (); }
Example 11
Source File: SimpleLRUCache.java From dorado with Apache License 2.0 | 5 votes |
private SimpleLRUCache(int capacity) { this.cache = new LRUMap<K, V>(capacity); ReadWriteLock lock = new ReentrantReadWriteLock(); this.r = lock.readLock(); this.w = lock.writeLock(); }
Example 12
Source File: BlobServerConnection.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a new BLOB connection for a client request. * * @param clientSocket The socket to read/write data. * @param blobServer The BLOB server. */ BlobServerConnection(Socket clientSocket, BlobServer blobServer) { super("BLOB connection for " + clientSocket.getRemoteSocketAddress()); setDaemon(true); this.clientSocket = clientSocket; this.blobServer = checkNotNull(blobServer); ReadWriteLock readWriteLock = blobServer.getReadWriteLock(); this.readLock = readWriteLock.readLock(); }
Example 13
Source File: AuthenticationServiceImpl.java From neoscada with Eclipse Public License 1.0 | 4 votes |
public AuthenticationServiceImpl () { final ReadWriteLock lock = new ReentrantReadWriteLock (); this.readLock = lock.readLock (); this.writeLock = lock.writeLock (); }
Example 14
Source File: QueryStatisticsImpl.java From lams with GNU General Public License v2.0 | 4 votes |
QueryStatisticsImpl(String query) { this.query = query; ReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); }
Example 15
Source File: JobImpl.java From hadoop with Apache License 2.0 | 4 votes |
public JobImpl(JobId jobId, ApplicationAttemptId applicationAttemptId, Configuration conf, EventHandler eventHandler, TaskAttemptListener taskAttemptListener, JobTokenSecretManager jobTokenSecretManager, Credentials jobCredentials, Clock clock, Map<TaskId, TaskInfo> completedTasksFromPreviousRun, MRAppMetrics metrics, OutputCommitter committer, boolean newApiCommitter, String userName, long appSubmitTime, List<AMInfo> amInfos, AppContext appContext, JobStateInternal forcedState, String forcedDiagnostic) { this.applicationAttemptId = applicationAttemptId; this.jobId = jobId; this.jobName = conf.get(JobContext.JOB_NAME, "<missing job name>"); this.conf = new JobConf(conf); this.metrics = metrics; this.clock = clock; this.completedTasksFromPreviousRun = completedTasksFromPreviousRun; this.amInfos = amInfos; this.appContext = appContext; this.userName = userName; this.queueName = conf.get(MRJobConfig.QUEUE_NAME, "default"); this.appSubmitTime = appSubmitTime; this.oldJobId = TypeConverter.fromYarn(jobId); this.committer = committer; this.newApiCommitter = newApiCommitter; this.taskAttemptListener = taskAttemptListener; this.eventHandler = eventHandler; ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); this.readLock = readWriteLock.readLock(); this.writeLock = readWriteLock.writeLock(); this.jobCredentials = jobCredentials; this.jobTokenSecretManager = jobTokenSecretManager; this.aclsManager = new JobACLsManager(conf); this.username = System.getProperty("user.name"); this.jobACLs = aclsManager.constructJobACLs(conf); ThreadFactory threadFactory = new ThreadFactoryBuilder() .setNameFormat("Job Fail Wait Timeout Monitor #%d") .setDaemon(true) .build(); this.executor = new ScheduledThreadPoolExecutor(1, threadFactory); // This "this leak" is okay because the retained pointer is in an // instance variable. stateMachine = stateMachineFactory.make(this); this.forcedState = forcedState; if(forcedDiagnostic != null) { this.diagnostics.add(forcedDiagnostic); } this.maxAllowedFetchFailuresFraction = conf.getFloat( MRJobConfig.MAX_ALLOWED_FETCH_FAILURES_FRACTION, MRJobConfig.DEFAULT_MAX_ALLOWED_FETCH_FAILURES_FRACTION); this.maxFetchFailuresNotifications = conf.getInt( MRJobConfig.MAX_FETCH_FAILURES_NOTIFICATIONS, MRJobConfig.DEFAULT_MAX_FETCH_FAILURES_NOTIFICATIONS); }
Example 16
Source File: AggregatingContentStore.java From alfresco-repository with GNU Lesser General Public License v3.0 | 4 votes |
/** * Default constructor */ public AggregatingContentStore() { ReadWriteLock storeLock = new ReentrantReadWriteLock(); readLock = storeLock.readLock(); }
Example 17
Source File: TaskAttemptImpl.java From hadoop with Apache License 2.0 | 4 votes |
public TaskAttemptImpl(TaskId taskId, int i, EventHandler eventHandler, TaskAttemptListener taskAttemptListener, Path jobFile, int partition, JobConf conf, String[] dataLocalHosts, Token<JobTokenIdentifier> jobToken, Credentials credentials, Clock clock, AppContext appContext) { oldJobId = TypeConverter.fromYarn(taskId.getJobId()); this.conf = conf; this.clock = clock; attemptId = recordFactory.newRecordInstance(TaskAttemptId.class); attemptId.setTaskId(taskId); attemptId.setId(i); this.taskAttemptListener = taskAttemptListener; this.appContext = appContext; // Initialize reportedStatus reportedStatus = new TaskAttemptStatus(); initTaskAttemptStatus(reportedStatus); ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); readLock = readWriteLock.readLock(); writeLock = readWriteLock.writeLock(); this.credentials = credentials; this.jobToken = jobToken; this.eventHandler = eventHandler; this.jobFile = jobFile; this.partition = partition; //TODO:create the resource reqt for this Task attempt this.resourceCapability = recordFactory.newRecordInstance(Resource.class); this.resourceCapability.setMemory( getMemoryRequired(conf, taskId.getTaskType())); this.resourceCapability.setVirtualCores( getCpuRequired(conf, taskId.getTaskType())); this.resourceCapability.setGpuCores( getGpuRequired(conf, taskId.getTaskType())); this.dataLocalHosts = resolveHosts(dataLocalHosts); RackResolver.init(conf); this.dataLocalRacks = new HashSet<String>(); for (String host : this.dataLocalHosts) { this.dataLocalRacks.add(RackResolver.resolve(host).getNetworkLocation()); } locality = Locality.OFF_SWITCH; avataar = Avataar.VIRGIN; // This "this leak" is okay because the retained pointer is in an // instance variable. stateMachine = stateMachineFactory.make(this); }
Example 18
Source File: Switcher.java From joyrpc with Apache License 2.0 | 4 votes |
public Switcher(final ReadWriteLock lock) { this(lock.readLock(), lock.writeLock(), new AtomicBoolean()); }
Example 19
Source File: TaskImpl.java From hadoop with Apache License 2.0 | 4 votes |
public TaskImpl(JobId jobId, TaskType taskType, int partition, EventHandler eventHandler, Path remoteJobConfFile, JobConf conf, TaskAttemptListener taskAttemptListener, Token<JobTokenIdentifier> jobToken, Credentials credentials, Clock clock, int appAttemptId, MRAppMetrics metrics, AppContext appContext) { this.conf = conf; this.clock = clock; this.jobFile = remoteJobConfFile; ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); readLock = readWriteLock.readLock(); writeLock = readWriteLock.writeLock(); this.attempts = Collections.emptyMap(); this.finishedAttempts = new HashSet<TaskAttemptId>(2); this.failedAttempts = new HashSet<TaskAttemptId>(2); this.inProgressAttempts = new HashSet<TaskAttemptId>(2); // This overridable method call is okay in a constructor because we // have a convention that none of the overrides depends on any // fields that need initialization. maxAttempts = getMaxAttempts(); taskId = MRBuilderUtils.newTaskId(jobId, partition, taskType); this.partition = partition; this.taskAttemptListener = taskAttemptListener; this.eventHandler = eventHandler; this.credentials = credentials; this.jobToken = jobToken; this.metrics = metrics; this.appContext = appContext; this.encryptedShuffle = conf.getBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, MRConfig.SHUFFLE_SSL_ENABLED_DEFAULT); // This "this leak" is okay because the retained pointer is in an // instance variable. stateMachine = stateMachineFactory.make(this); // All the new TaskAttemptIDs are generated based on MR // ApplicationAttemptID so that attempts from previous lives don't // over-step the current one. This assumes that a task won't have more // than 1000 attempts in its single generation, which is very reasonable. nextAttemptNumber = (appAttemptId - 1) * 1000; }
Example 20
Source File: MemoryBlock.java From neoscada with Eclipse Public License 1.0 | 3 votes |
/** * Create a new memory block * * @param executor * a single threaded executor for sending out events * @param hiveSource * the source of the hive to export * @param properties * properties to log on to the hive * @param logName * an optional logging name */ public MemoryBlock ( final ScheduledExecutorService executor, final HiveSource hiveSource, final Properties properties, final String logName ) { final ReadWriteLock lock = new ReentrantReadWriteLock (); this.readLock = lock.readLock (); this.writeLock = lock.writeLock (); this.manager = new SingleSubscriptionManager ( executor, hiveSource, properties, logName ); this.manager.start (); }