Java Code Examples for com.netflix.astyanax.model.Row#getKey()
The following examples show how to use
com.netflix.astyanax.model.Row#getKey() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AstyanaxQueueDAO.java From emodb with Apache License 2.0 | 6 votes |
@Override public Iterator<String> listQueues() { final Iterator<Row<String, UUID>> rowIter = execute( _keyspace.prepareQuery(CF_DEDUP_MD, ConsistencyLevel.CL_LOCAL_QUORUM) .getAllRows() .setRowLimit(100) .withColumnRange(new RangeBuilder().setLimit(1).build())) .iterator(); return new AbstractIterator<String>() { @Override protected String computeNext() { while (rowIter.hasNext()) { Row<String, UUID> row = rowIter.next(); if (!row.getColumns().isEmpty()) { return row.getKey(); } } return endOfData(); } }; }
Example 2
Source File: AstyanaxQueueDAO.java From emodb with Apache License 2.0 | 6 votes |
@Override public Map<UUID, ByteBuffer> findMaxRecords(Collection<UUID> dataIds) { // Finding the max using a reversed column range shouldn't have to worry about skipping tombstones since // we always delete smaller column values before deleting larger column values--scanning will hit the max // before needing to skip over tombstones. Map<UUID, ByteBuffer> resultMap = Maps.newHashMap(); for (List<UUID> batch : Iterables.partition(dataIds, 10)) { Rows<UUID, ByteBuffer> rows = execute( _keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM) .getKeySlice(batch) .withColumnRange(new RangeBuilder() .setReversed(true) .setLimit(1) .build())); for (Row<UUID, ByteBuffer> row : rows) { UUID dataId = row.getKey(); for (Column<ByteBuffer> column : row.getColumns()) { resultMap.put(dataId, column.getName()); } } } return resultMap; }
Example 3
Source File: AstyanaxEventReaderDAO.java From emodb with Apache License 2.0 | 6 votes |
@Override public Iterator<String> listChannels() { final Iterator<Row<String, ByteBuffer>> rowIter = execute( _keyspace.prepareQuery(ColumnFamilies.MANIFEST, ConsistencyLevel.CL_LOCAL_QUORUM) .getAllRows() .setRowLimit(1000) .withColumnRange(new RangeBuilder().setLimit(1).build())) .iterator(); return new AbstractIterator<String>() { @Override protected String computeNext() { while (rowIter.hasNext()) { Row<String, ByteBuffer> row = rowIter.next(); if (!row.getColumns().isEmpty()) { return row.getKey(); } } return endOfData(); } }; }
Example 4
Source File: CassandraArchiveRepository.java From Nicobar with Apache License 2.0 | 6 votes |
/** * Get the last update times of all of the script archives managed by this Repository. * @return map of moduleId to last update time */ @Override public Map<ModuleId, Long> getArchiveUpdateTimes() throws IOException { Iterable<Row<String, String>> rows; try { rows = getRows((EnumSet<?>)EnumSet.of(Columns.module_id, Columns.last_update)); } catch (Exception e) { throw new IOException(e); } Map<ModuleId, Long> updateTimes = new LinkedHashMap<ModuleId, Long>(); for (Row<String, String> row : rows) { String moduleId = row.getKey(); Column<String> lastUpdateColumn = row.getColumns().getColumnByName(Columns.last_update.name()); Long updateTime = lastUpdateColumn != null ? lastUpdateColumn.getLongValue() : null; if (StringUtils.isNotBlank(moduleId) && updateTime != null) { updateTimes.put(ModuleId.fromString(moduleId), updateTime); } } return updateTimes; }
Example 5
Source File: CassandraArchiveRepository.java From Nicobar with Apache License 2.0 | 6 votes |
/** * Get a summary of all archives in this Repository * @return List of summaries */ @Override public List<ArchiveSummary> getArchiveSummaries() throws IOException { List<ArchiveSummary> summaries = new LinkedList<ArchiveSummary>(); Iterable<Row<String, String>> rows; try { rows = getRows((EnumSet<?>)EnumSet.of(Columns.module_id, Columns.last_update, Columns.module_spec)); } catch (Exception e) { throw new IOException(e); } for (Row<String, String> row : rows) { String moduleId = row.getKey(); ColumnList<String> columns = row.getColumns(); Column<String> lastUpdateColumn = columns.getColumnByName(Columns.last_update.name()); long updateTime = lastUpdateColumn != null ? lastUpdateColumn.getLongValue() : 0; ScriptModuleSpec moduleSpec = getModuleSpec(columns); ArchiveSummary summary = new ArchiveSummary(ModuleId.fromString(moduleId), moduleSpec, updateTime, null); summaries.add(summary); } return summaries; }
Example 6
Source File: AstyanaxStorageProvider.java From emodb with Apache License 2.0 | 5 votes |
private static Iterator<Map.Entry<String, StorageSummary>> decodeMetadataRows( final Iterator<Row<ByteBuffer, Composite>> rowIter, final AstyanaxTable table) { return new AbstractIterator<Map.Entry<String, StorageSummary>>() { @Override protected Map.Entry<String, StorageSummary> computeNext() { while (rowIter.hasNext()) { Row<ByteBuffer, Composite> row = rowIter.next(); ByteBuffer key = row.getKey(); ColumnList<Composite> columns = row.getColumns(); String blobId = AstyanaxStorage.getContentKey(key); StorageSummary summary = toStorageSummary(columns); if (summary == null) { continue; // Partial blob, parts may still be replicating. } // TODO should be removed for blob s3 migration // Cleanup older versions of the blob, if any (unlikely). deleteDataColumns(table, blobId, columns, ConsistencyLevel.CL_ANY, summary.getTimestamp()); return Maps.immutableEntry(blobId, summary); } return endOfData(); } }; }
Example 7
Source File: CassandraArchiveRepository.java From Nicobar with Apache License 2.0 | 4 votes |
/** * Get all of the {@link ScriptArchive}s for the given set of moduleIds. Will perform the operation in batches * as specified by {@link CassandraArchiveRepositoryConfig#getArchiveFetchBatchSize()} and outputs the jar files in * the path specified by {@link CassandraArchiveRepositoryConfig#getArchiveOutputDirectory()}. * * @param moduleIds keys to search for * @return set of ScriptArchives retrieved from the database */ @Override public Set<ScriptArchive> getScriptArchives(Set<ModuleId> moduleIds) throws IOException { Set<ScriptArchive> archives = new LinkedHashSet<ScriptArchive>(moduleIds.size()*2); Path archiveOuputDir = getConfig().getArchiveOutputDirectory(); List<ModuleId> moduleIdList = new LinkedList<ModuleId>(moduleIds); int batchSize = getConfig().getArchiveFetchBatchSize(); int start = 0; try { while (start < moduleIdList.size()) { int end = Math.min(moduleIdList.size(), start + batchSize); List<ModuleId> batchModuleIds = moduleIdList.subList(start, end); List<String> rowKeys = new ArrayList<String>(batchModuleIds.size()); for (ModuleId batchModuleId:batchModuleIds) { rowKeys.add(batchModuleId.toString()); } Rows<String, String> rows = cassandra.getRows(rowKeys.toArray(new String[0])); for (Row<String, String> row : rows) { String moduleId = row.getKey(); ColumnList<String> columns = row.getColumns(); Column<String> lastUpdateColumn = columns.getColumnByName(Columns.last_update.name()); Column<String> hashColumn = columns.getColumnByName(Columns.archive_content_hash.name()); Column<String> contentColumn = columns.getColumnByName(Columns.archive_content.name()); if (lastUpdateColumn == null || hashColumn == null || contentColumn == null) { continue; } ScriptModuleSpec moduleSpec = getModuleSpec(columns); long lastUpdateTime = lastUpdateColumn.getLongValue(); byte[] hash = hashColumn.getByteArrayValue(); byte[] content = contentColumn.getByteArrayValue(); // verify the hash if (hash != null && hash.length > 0 && !verifyHash(hash, content)) { logger.warn("Content hash validation failed for moduleId {}. size: {}", moduleId, content.length); continue; } String fileName = new StringBuilder().append(moduleId).append("-").append(lastUpdateTime).append(".jar").toString(); Path jarFile = archiveOuputDir.resolve(fileName); Files.write(jarFile, content); JarScriptArchive scriptArchive = new JarScriptArchive.Builder(jarFile) .setModuleSpec(moduleSpec) .setCreateTime(lastUpdateTime) .build(); archives.add(scriptArchive); } start = end; } } catch (Exception e) { throw new IOException(e); } return archives; }