Java Code Examples for com.datastax.driver.core.Row#getTimestamp()
The following examples show how to use
com.datastax.driver.core.Row#getTimestamp() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CassandraStorage.java From cassandra-reaper with Apache License 2.0 | 6 votes |
private RepairRun buildRepairRunFromRow(Row repairRunResult, UUID id) { LOG.trace("buildRepairRunFromRow {} / {}", id, repairRunResult); Date startTime = repairRunResult.getTimestamp("start_time"); Date pauseTime = repairRunResult.getTimestamp("pause_time"); Date endTime = repairRunResult.getTimestamp("end_time"); return RepairRun.builder(repairRunResult.getString("cluster_name"), repairRunResult.getUUID("repair_unit_id")) .creationTime(new DateTime(repairRunResult.getTimestamp("creation_time"))) .intensity(repairRunResult.getDouble("intensity")) .segmentCount(repairRunResult.getInt("segment_count")) .repairParallelism(RepairParallelism.fromName(repairRunResult.getString("repair_parallelism"))) .cause(repairRunResult.getString("cause")) .owner(repairRunResult.getString("owner")) .startTime(null != startTime ? new DateTime(startTime) : null) .pauseTime(null != pauseTime ? new DateTime(pauseTime) : null) .endTime(null != endTime ? new DateTime(endTime) : null) .lastEvent(repairRunResult.getString("last_event")) .runState(RunState.valueOf(repairRunResult.getString("state"))) .tables(repairRunResult.getSet("tables", String.class)) .build(id); }
Example 2
Source File: CassandraStorage.java From copper-engine with Apache License 2.0 | 6 votes |
private WorkflowInstance row2WorkflowInstance(Row row) { final WorkflowInstance cw = new WorkflowInstance(); cw.id = row.getString("ID"); cw.ppoolId = row.getString("PPOOL_ID"); cw.prio = row.getInt("PRIO"); cw.creationTS = row.getTimestamp("CREATION_TS"); cw.timeout = row.getTimestamp("TIMEOUT"); cw.waitMode = toWaitMode(row.getString("WAIT_MODE")); cw.serializedWorkflow = new SerializedWorkflow(); cw.serializedWorkflow.setData(row.getString("DATA")); cw.serializedWorkflow.setObjectState(row.getString("OBJECT_STATE")); cw.cid2ResponseMap = toResponseMap(row.getString("RESPONSE_MAP_JSON")); cw.state = ProcessingState.valueOf(row.getString("STATE")); cw.lastModTS = row.getTimestamp("LAST_MOD_TS"); cw.classname = row.getString("CLASSNAME"); return cw; }
Example 3
Source File: CassandraScheduleDao.java From arcusplatform with Apache License 2.0 | 6 votes |
protected Optional<ScheduledCommand> rowToCommand(Row row) { try { ScheduledCommand command = new ScheduledCommand(); command.setPlaceId(row.getUUID(ScheduledEventTable.Columns.PLACE_ID)); command.setScheduledTime(row.getTimestamp(ScheduledEventTable.Columns.SCHEDULED_TIME)); command.setSchedulerAddress(Address.fromString(row.getString(ScheduledEventTable.Columns.SCHEDULER))); command.setExpirationTime(row.getTimestamp(ScheduledEventTable.Columns.EXPIRES_AT)); PlatformPartition partition = partitioner.getPartitionById(row.getInt(SchedulerOffsetTable.Columns.PARTITION_ID)); PartitionOffset offset = new PartitionOffset( partition, row.getTimestamp(ScheduledEventTable.Columns.TIME_BUCKET), windowSizeMs ); command.setOffset(offset); return Optional.of(command); } catch(Exception e) { logger.warn("Unable to load row [{}]: {}", row, e.getMessage(), e); return Optional.absent(); } }
Example 4
Source File: CassandraClusterServiceDao.java From arcusplatform with Apache License 2.0 | 5 votes |
private ClusterServiceRecord transform(Row row) { ClusterServiceRecord record = new ClusterServiceRecord(); record.setHost(row.getString(ClusterServiceTable.Columns.HOST)); record.setService(row.getString(ClusterServiceTable.Columns.SERVICE)); record.setMemberId(row.getInt(ClusterServiceTable.Columns.CLUSTER_ID)); Date registered = row.getTimestamp(ClusterServiceTable.Columns.REGISTERED); if(registered != null) { record.setRegistered(registered.toInstant()); } Date heartbeat = row.getTimestamp(ClusterServiceTable.Columns.HEARTBEAT); if(heartbeat != null) { record.setLastHeartbeat(heartbeat.toInstant()); } return record; }
Example 5
Source File: PlacePurgeRecordingTable.java From arcusplatform with Apache License 2.0 | 5 votes |
public PlacePurgeRecord buildEntity(Row row) { PurgeMode mode = PurgeMode.ALL; if(!row.isNull(COL_MODE)) { mode = PurgeMode.valueOf(row.getString(COL_MODE)); } return new PlacePurgeRecord(row.getUUID(COL_PLACEID), row.getTimestamp(COL_DELETE_TIME), mode); }
Example 6
Source File: PurgeRecordingV2Table.java From arcusplatform with Apache License 2.0 | 5 votes |
@Override protected PurgeRecord buildEntity(Row row) { String storageStr = row.getString(COL_STORAGE); boolean purgePreview = false; if(StringUtils.isNotBlank(storageStr) && storageStr.endsWith(PURGE_PREVIEW_SUFFIX)) { storageStr = storageStr.substring(0, storageStr.length()-2); purgePreview = true; } return new PurgeRecord(row.getTimestamp(COL_DELETETIME), row.getInt(COL_PARTITIONID), row.getUUID(COL_RECORDINGID), row.getUUID(COL_PLACEID), storageStr, true, purgePreview); }
Example 7
Source File: BaseCassandraViewQuery.java From eventapis with Apache License 2.0 | 5 votes |
static EntityEvent convertToEntityEvent(Row entityEventData) { EventKey eventKey = new EventKey(entityEventData.getString(CassandraEventRecorder.ENTITY_ID), entityEventData.getInt(CassandraEventRecorder.VERSION)); String opId = entityEventData.getString(CassandraEventRecorder.OP_ID); String eventData = entityEventData.getString(CassandraEventRecorder.EVENT_DATA); return new EntityEvent(eventKey, opId, entityEventData.getTimestamp(CassandraEventRecorder.OP_DATE), entityEventData.getString(CassandraEventRecorder.EVENT_TYPE), EventState.valueOf(entityEventData.getString(CassandraEventRecorder.STATUS)), entityEventData.getString(CassandraEventRecorder.AUDIT_INFO), eventData); }
Example 8
Source File: CassandraScheduleDao.java From arcusplatform with Apache License 2.0 | 5 votes |
protected PartitionOffset rowToOffset(Row row) { PlatformPartition partition = partitioner.getPartitionById(row.getInt(SchedulerOffsetTable.Columns.PARTITION_ID)); PartitionOffset offset = new PartitionOffset( partition, row.getTimestamp(SchedulerOffsetTable.Columns.LAST_EXECUTED_BUCKET), windowSizeMs ); return offset; }
Example 9
Source File: CassandraMailRepositoryMailDAO.java From james-project with Apache License 2.0 | 5 votes |
private MailDTO toMail(Row row) { MaybeSender sender = Optional.ofNullable(row.getString(SENDER)) .map(MaybeSender::getMailSender) .orElse(MaybeSender.nullSender()); List<MailAddress> recipients = row.getList(RECIPIENTS, String.class) .stream() .map(Throwing.function(MailAddress::new)) .collect(Guavate.toImmutableList()); String state = row.getString(STATE); String remoteAddr = row.getString(REMOTE_ADDR); String remoteHost = row.getString(REMOTE_HOST); String errorMessage = row.getString(ERROR_MESSAGE); String name = row.getString(MAIL_KEY); Date lastUpdated = row.getTimestamp(LAST_UPDATED); Map<String, ByteBuffer> rawAttributes = row.getMap(ATTRIBUTES, String.class, ByteBuffer.class); PerRecipientHeaders perRecipientHeaders = fromHeaderMap(row.getMap(PER_RECIPIENT_SPECIFIC_HEADERS, String.class, UDTValue.class)); MailImpl.Builder mailBuilder = MailImpl.builder() .name(name) .sender(sender) .addRecipients(recipients) .lastUpdated(lastUpdated) .errorMessage(errorMessage) .remoteHost(remoteHost) .remoteAddr(remoteAddr) .state(state) .addAllHeadersForRecipients(perRecipientHeaders) .addAttributes(toAttributes(rawAttributes)); return new MailDTO(mailBuilder, blobIdFactory.from(row.getString(HEADER_BLOB_ID)), blobIdFactory.from(row.getString(BODY_BLOB_ID))); }
Example 10
Source File: CassandraMailRepositoryMailDaoV2.java From james-project with Apache License 2.0 | 5 votes |
private MailDTO toMail(Row row) { MaybeSender sender = MaybeSender.getMailSender(row.getString(SENDER)); List<MailAddress> recipients = row.getList(RECIPIENTS, String.class) .stream() .map(Throwing.function(MailAddress::new)) .collect(Guavate.toImmutableList()); String state = row.getString(STATE); String remoteAddr = row.getString(REMOTE_ADDR); String remoteHost = row.getString(REMOTE_HOST); String errorMessage = row.getString(ERROR_MESSAGE); String name = row.getString(MAIL_KEY); Date lastUpdated = row.getTimestamp(LAST_UPDATED); Map<String, String> rawAttributes = row.getMap(ATTRIBUTES, String.class, String.class); PerRecipientHeaders perRecipientHeaders = fromList(row.getList(PER_RECIPIENT_SPECIFIC_HEADERS, TupleValue.class)); MailImpl.Builder mailBuilder = MailImpl.builder() .name(name) .sender(sender) .addRecipients(recipients) .lastUpdated(lastUpdated) .errorMessage(errorMessage) .remoteHost(remoteHost) .remoteAddr(remoteAddr) .state(state) .addAllHeadersForRecipients(perRecipientHeaders) .addAttributes(toAttributes(rawAttributes)); return new MailDTO(mailBuilder, blobIdFactory.from(row.getString(HEADER_BLOB_ID)), blobIdFactory.from(row.getString(BODY_BLOB_ID))); }
Example 11
Source File: AlertingDisabledDao.java From glowroot with Apache License 2.0 | 5 votes |
@Override public @Nullable Long getAlertingDisabledUntilTime(String agentRollupId) throws Exception { BoundStatement boundStatement = readPS.bind(); boundStatement.setString(0, agentRollupId); Row row = session.read(boundStatement).one(); if (row == null) { return null; } Date timestamp = row.getTimestamp(0); return timestamp == null ? null : timestamp.getTime(); }
Example 12
Source File: GuicedCassandraSessionDAO.java From arcusplatform with Apache License 2.0 | 4 votes |
private Session hydrateSession(UUID id, Row row) { UUID rowId = row.getUUID(Columns.ID); if (id.equals(rowId)) { Date start = row.getTimestamp(Columns.START); // If this is null, then the row is a tombstone. if (start != null) { ByteBuffer buffer = row.getBytes(Columns.SERIALIZED); // If the buffer has anything, then it is an old style serialized session. if (buffer != null && buffer.remaining() > 0) { byte[] bytes = new byte[buffer.remaining()]; buffer.get(bytes); return serializer.deserialize(bytes); } else { // New style session. Read the fields and create a session. Date stop = row.getTimestamp(Columns.STOP); Date lastAccess = row.getTimestamp(Columns.LAST_ACCESS); long timeout = row.getLong(Columns.TIMEOUT); boolean expired = row.getBool(Columns.EXPIRED); String host = row.getString(Columns.HOST); // Read the attributes Map<String, String> serialized_attrs = row.getMap(Columns.ATTRIBUTES, String.class, String.class); Map<Object, Object> attributes = new HashMap<>(); for (Map.Entry<String, String> entry : serialized_attrs.entrySet()) { String json = entry.getValue(); if (json != null && !json.isEmpty()) { attributes.put(entry.getKey(), deserializeAttribute(entry.getKey(), json)); } } // Create and populate the session. SimpleSession session = new SimpleSession(); session.setId(rowId); session.setStartTimestamp(start); session.setStopTimestamp(stop); session.setLastAccessTime(lastAccess); session.setTimeout(timeout); session.setExpired(expired); session.setHost(host); session.setAttributes(attributes); return session; } } } return null; }
Example 13
Source File: AbstractCassandraProcessor.java From localization_nifi with Apache License 2.0 | 4 votes |
protected static Object getCassandraObject(Row row, int i, DataType dataType) { if (dataType.equals(DataType.blob())) { return row.getBytes(i); } else if (dataType.equals(DataType.varint()) || dataType.equals(DataType.decimal())) { // Avro can't handle BigDecimal and BigInteger as numbers - it will throw an // AvroRuntimeException such as: "Unknown datum type: java.math.BigDecimal: 38" return row.getObject(i).toString(); } else if (dataType.equals(DataType.cboolean())) { return row.getBool(i); } else if (dataType.equals(DataType.cint())) { return row.getInt(i); } else if (dataType.equals(DataType.bigint()) || dataType.equals(DataType.counter())) { return row.getLong(i); } else if (dataType.equals(DataType.ascii()) || dataType.equals(DataType.text()) || dataType.equals(DataType.varchar())) { return row.getString(i); } else if (dataType.equals(DataType.cfloat())) { return row.getFloat(i); } else if (dataType.equals(DataType.cdouble())) { return row.getDouble(i); } else if (dataType.equals(DataType.timestamp())) { return row.getTimestamp(i); } else if (dataType.equals(DataType.date())) { return row.getDate(i); } else if (dataType.equals(DataType.time())) { return row.getTime(i); } else if (dataType.isCollection()) { List<DataType> typeArguments = dataType.getTypeArguments(); if (typeArguments == null || typeArguments.size() == 0) { throw new IllegalArgumentException("Column[" + i + "] " + dataType.getName() + " is a collection but no type arguments were specified!"); } // Get the first type argument, to be used for lists and sets (and the first in a map) DataType firstArg = typeArguments.get(0); TypeCodec firstCodec = codecRegistry.codecFor(firstArg); if (dataType.equals(DataType.set(firstArg))) { return row.getSet(i, firstCodec.getJavaType()); } else if (dataType.equals(DataType.list(firstArg))) { return row.getList(i, firstCodec.getJavaType()); } else { // Must be an n-arg collection like map DataType secondArg = typeArguments.get(1); TypeCodec secondCodec = codecRegistry.codecFor(secondArg); if (dataType.equals(DataType.map(firstArg, secondArg))) { return row.getMap(i, firstCodec.getJavaType(), secondCodec.getJavaType()); } } } else { // The different types that we support are numbers (int, long, double, float), // as well as boolean values and Strings. Since Avro doesn't provide // timestamp types, we want to convert those to Strings. So we will cast anything other // than numbers or booleans to strings by using the toString() method. return row.getObject(i).toString(); } return null; }
Example 14
Source File: EnqueuedMailsDaoUtil.java From james-project with Apache License 2.0 | 4 votes |
static EnqueuedItemWithSlicingContext toEnqueuedMail(Row row, BlobId.Factory blobFactory) { MailQueueName queueName = MailQueueName.fromString(row.getString(QUEUE_NAME)); EnqueueId enqueueId = EnqueueId.of(row.getUUID(ENQUEUE_ID)); Instant timeRangeStart = row.getTimestamp(TIME_RANGE_START).toInstant(); BucketedSlices.BucketId bucketId = BucketedSlices.BucketId.of(row.getInt(BUCKET_ID)); Instant enqueuedTime = row.getTimestamp(ENQUEUED_TIME).toInstant(); BlobId headerBlobId = blobFactory.from(row.getString(HEADER_BLOB_ID)); BlobId bodyBlobId = blobFactory.from(row.getString(BODY_BLOB_ID)); MimeMessagePartsId mimeMessagePartsId = MimeMessagePartsId .builder() .headerBlobId(headerBlobId) .bodyBlobId(bodyBlobId) .build(); MailAddress sender = Optional.ofNullable(row.getString(SENDER)) .map(Throwing.function(MailAddress::new)) .orElse(null); List<MailAddress> recipients = row.getList(RECIPIENTS, String.class) .stream() .map(Throwing.function(MailAddress::new)) .collect(ImmutableList.toImmutableList()); String state = row.getString(STATE); String remoteAddr = row.getString(REMOTE_ADDR); String remoteHost = row.getString(REMOTE_HOST); String errorMessage = row.getString(ERROR_MESSAGE); String name = row.getString(NAME); Date lastUpdated = row.getTimestamp(LAST_UPDATED); Map<String, ByteBuffer> rawAttributes = row.getMap(ATTRIBUTES, String.class, ByteBuffer.class); PerRecipientHeaders perRecipientHeaders = fromList(row.getList(PER_RECIPIENT_SPECIFIC_HEADERS, TupleValue.class)); MailImpl mail = MailImpl.builder() .name(name) .sender(sender) .addRecipients(recipients) .lastUpdated(lastUpdated) .errorMessage(errorMessage) .remoteHost(remoteHost) .remoteAddr(remoteAddr) .state(state) .addAllHeadersForRecipients(perRecipientHeaders) .addAttributes(toAttributes(rawAttributes)) .build(); EnqueuedItem enqueuedItem = EnqueuedItem.builder() .enqueueId(enqueueId) .mailQueueName(queueName) .mail(mail) .enqueuedTime(enqueuedTime) .mimeMessagePartsId(mimeMessagePartsId) .build(); return EnqueuedItemWithSlicingContext.builder() .enqueuedItem(enqueuedItem) .slicingContext(EnqueuedItemWithSlicingContext.SlicingContext.of(bucketId, timeRangeStart)) .build(); }
Example 15
Source File: TraceDaoImpl.java From glowroot with Apache License 2.0 | 4 votes |
private static List<TracePoint> processPoints(ResultSet results, TracePointFilter filter, boolean partial, boolean errorPoints) throws IOException { List<TracePoint> tracePoints = new ArrayList<>(); for (Row row : results) { int i = 0; String agentId = checkNotNull(row.getString(i++)); String traceId = checkNotNull(row.getString(i++)); long captureTime = checkNotNull(row.getTimestamp(i++)).getTime(); if (partial) { // real_capture_time is only present for data written starting with 0.13.1 Date realCaptureTime = row.getTimestamp(i++); if (realCaptureTime != null) { captureTime = realCaptureTime.getTime(); } } long durationNanos = row.getLong(i++); boolean error = errorPoints || row.getBool(i++); // error points are defined by having an error message, so safe to checkNotNull String errorMessage = errorPoints ? checkNotNull(row.getString(i++)) : ""; // headline is null for data inserted prior to 0.9.7 String headline = Strings.nullToEmpty(row.getString(i++)); String user = Strings.nullToEmpty(row.getString(i++)); ByteBuffer attributeBytes = row.getBytes(i++); List<Trace.Attribute> attrs = Messages.parseDelimitedFrom(attributeBytes, Trace.Attribute.parser()); Map<String, List<String>> attributes = attrs.stream().collect( Collectors.toMap(Trace.Attribute::getName, Trace.Attribute::getValueList)); if (filter.matchesDuration(durationNanos) && filter.matchesHeadline(headline) && filter.matchesError(errorMessage) && filter.matchesUser(user) && filter.matchesAttributes(attributes)) { tracePoints.add(ImmutableTracePoint.builder() .agentId(agentId) .traceId(traceId) .captureTime(captureTime) .durationNanos(durationNanos) .partial(partial) .error(error) .checkLiveTraces(false) .build()); } } return tracePoints; }
Example 16
Source File: AbstractCassandraProcessor.java From nifi with Apache License 2.0 | 4 votes |
protected static Object getCassandraObject(Row row, int i, DataType dataType) { if (dataType.equals(DataType.blob())) { return row.getBytes(i); } else if (dataType.equals(DataType.varint()) || dataType.equals(DataType.decimal())) { // Avro can't handle BigDecimal and BigInteger as numbers - it will throw an // AvroRuntimeException such as: "Unknown datum type: java.math.BigDecimal: 38" return row.getObject(i).toString(); } else if (dataType.equals(DataType.cboolean())) { return row.getBool(i); } else if (dataType.equals(DataType.cint())) { return row.getInt(i); } else if (dataType.equals(DataType.bigint()) || dataType.equals(DataType.counter())) { return row.getLong(i); } else if (dataType.equals(DataType.ascii()) || dataType.equals(DataType.text()) || dataType.equals(DataType.varchar())) { return row.getString(i); } else if (dataType.equals(DataType.cfloat())) { return row.getFloat(i); } else if (dataType.equals(DataType.cdouble())) { return row.getDouble(i); } else if (dataType.equals(DataType.timestamp())) { return row.getTimestamp(i); } else if (dataType.equals(DataType.date())) { return row.getDate(i); } else if (dataType.equals(DataType.time())) { return row.getTime(i); } else if (dataType.isCollection()) { List<DataType> typeArguments = dataType.getTypeArguments(); if (typeArguments == null || typeArguments.size() == 0) { throw new IllegalArgumentException("Column[" + i + "] " + dataType.getName() + " is a collection but no type arguments were specified!"); } // Get the first type argument, to be used for lists and sets (and the first in a map) DataType firstArg = typeArguments.get(0); TypeCodec firstCodec = codecRegistry.codecFor(firstArg); if (dataType.equals(DataType.set(firstArg))) { return row.getSet(i, firstCodec.getJavaType()); } else if (dataType.equals(DataType.list(firstArg))) { return row.getList(i, firstCodec.getJavaType()); } else { // Must be an n-arg collection like map DataType secondArg = typeArguments.get(1); TypeCodec secondCodec = codecRegistry.codecFor(secondArg); if (dataType.equals(DataType.map(firstArg, secondArg))) { return row.getMap(i, firstCodec.getJavaType(), secondCodec.getJavaType()); } } } else { // The different types that we support are numbers (int, long, double, float), // as well as boolean values and Strings. Since Avro doesn't provide // timestamp types, we want to convert those to Strings. So we will cast anything other // than numbers or booleans to strings by using the toString() method. return row.getObject(i).toString(); } return null; }
Example 17
Source File: CassandraStorage.java From copper-engine with Apache License 2.0 | 4 votes |
private void resume(final String wfId, final HybridDBStorageAccessor internalStorageAccessor) throws Exception { logger.trace("resume(wfId={})", wfId); final ResultSet rs = session.execute(preparedStatements.get(CQL_SEL_WORKFLOW_INSTANCE).bind(wfId)); final Row row = rs.one(); if (row == null) { logger.warn("No workflow instance {} found - deleting row in COP_WFI_ID", wfId); session.executeAsync(preparedStatements.get(CQL_DEL_WFI_ID).bind(wfId)); return; } final String ppoolId = row.getString("PPOOL_ID"); final int prio = row.getInt("PRIO"); final WaitMode waitMode = toWaitMode(row.getString("WAIT_MODE")); final Map<String, String> responseMap = toResponseMap(row.getString("RESPONSE_MAP_JSON")); final ProcessingState state = ProcessingState.valueOf(row.getString("STATE")); final Date timeout = row.getTimestamp("TIMEOUT"); final boolean timeoutOccured = timeout != null && timeout.getTime() <= System.currentTimeMillis(); if (state == ProcessingState.ERROR || state == ProcessingState.INVALID) { return; } if (state == ProcessingState.ENQUEUED) { internalStorageAccessor.enqueue(wfId, ppoolId, prio); return; } if (responseMap != null) { final List<String> missingResponseCorrelationIds = new ArrayList<String>(); int numberOfAvailableResponses = 0; for (Entry<String, String> e : responseMap.entrySet()) { final String correlationId = e.getKey(); final String response = e.getValue(); internalStorageAccessor.registerCorrelationId(correlationId, wfId); if (response != null) { numberOfAvailableResponses++; } else { missingResponseCorrelationIds.add(correlationId); } } boolean modified = false; if (!missingResponseCorrelationIds.isEmpty()) { // check for early responses for (String cid : missingResponseCorrelationIds) { String earlyResponse = readEarlyResponse(cid); if (earlyResponse != null) { responseMap.put(cid, earlyResponse); numberOfAvailableResponses++; modified = true; } } } if (modified || timeoutOccured) { final ProcessingState newState = (timeoutOccured || numberOfAvailableResponses == responseMap.size() || (numberOfAvailableResponses == 1 && waitMode == WaitMode.FIRST)) ? ProcessingState.ENQUEUED : ProcessingState.WAITING; final String responseMapJson = jsonMapper.toJSON(responseMap); session.execute(preparedStatements.get(CQL_UPD_WORKFLOW_INSTANCE_STATE_AND_RESPONSE_MAP).bind(newState.name(), responseMapJson, wfId)); if (newState == ProcessingState.ENQUEUED) { internalStorageAccessor.enqueue(wfId, ppoolId, prio); } } } }