Example usage for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap

List of usage examples for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentSkipListMap ConcurrentSkipListMap.

Prototype

public ConcurrentSkipListMap() 

Source Link

Document

Constructs a new, empty map, sorted according to the Comparable natural ordering of the keys.

Usage

From source file:com.doctor.other.concurrent_hash_map_based_table.ConcurrentHashMapBasedTable.java

/**
 * /*from  w w  w .  j av  a  2  s. c o  m*/
 * @param rowKey
 * @param columnKey
 * @param timesplice
 *            ?:201572701
 * @param value
 * @return
 */
public boolean put(final String rowKey, final String columnKey, final String timesplice, final T value) {
    Preconditions.checkState(StringUtils.isNotBlank(rowKey), "rowKey is blank");
    Preconditions.checkState(StringUtils.isNotBlank(columnKey), "columnKey is blank");
    Preconditions.checkState(StringUtils.isNotBlank(timesplice), "timesplice is blank");
    Preconditions.checkNotNull(value, "value is null");

    ConcurrentHashMap<String, ConcurrentSkipListMap<String, ConcurrentSet<T>>> row = table.get(rowKey);
    if (row == null) {
        table.putIfAbsent(rowKey,
                new ConcurrentHashMap<String, ConcurrentSkipListMap<String, ConcurrentSet<T>>>());
    }

    row = table.get(rowKey);

    ConcurrentSkipListMap<String, ConcurrentSet<T>> column = row.get(columnKey);
    if (column == null) {
        row.putIfAbsent(columnKey, new ConcurrentSkipListMap<String, ConcurrentSet<T>>());
    }

    column = row.get(columnKey);

    ConcurrentSet<T> values = column.get(timesplice);
    if (values == null) {
        column.putIfAbsent(timesplice, new ConcurrentSet<>());
    }

    values = column.get(timesplice);
    return values.add(value);
}

From source file:org.jfrog.build.extractor.clientConfiguration.ArtifactoryClientConfiguration.java

public ArtifactoryClientConfiguration(Log log) {
    this.root = new PrefixPropertyHandler(log, new ConcurrentSkipListMap<String, String>());
    this.rootConfig = new PrefixPropertyHandler(root, BUILD_INFO_CONFIG_PREFIX);
    this.resolver = new ResolverHandler();
    this.publisher = new PublisherHandler();
    this.info = new BuildInfoHandler();
    this.proxy = new ProxyHandler();
}

From source file:org.omnaest.utils.table.impl.TableIndexArbitraryImpl.java

/**
 * @see TableIndexArbitraryImpl//w ww  . ja v  a 2  s .  c  o m
 * @param column
 */
TableIndexArbitraryImpl(Table<E> table, KeyExtractor<K, RowDataReader<E>> keyExtractor,
        Comparator<K> comparator) {
    super();
    this.table = table;
    this.keyExtractor = keyExtractor;

    this.keyToRowSetMap = MapUtils.initializedSortedMap(new ConcurrentSkipListMap<K, Set<Row<E>>>(),
            new LinkedHashSetFactory<Row<E>>());

    this.rebuildIndexFully();

}

From source file:org.apache.synapse.commons.throttle.core.ThrottleContext.java

/**
 * default constructor  expects a throttle configuration.
 *
 * @param throttleConfiguration - configuration data according to the policy
 *//* www . ja v  a  2s  . c  o m*/
public ThrottleContext(ThrottleConfiguration throttleConfiguration, ThrottleReplicator throttleReplicator) {
    if (throttleConfiguration == null) {
        throw new InstantiationError(
                "Couldn't create the throttle context " + "from null a throttle configuration");
    }
    this.throttleReplicator = throttleReplicator;
    this.keyToTimeStampMap = new ConcurrentHashMap();
    this.callersMap = new ConcurrentSkipListMap();
    this.nextCleanTime = 0;
    this.throttleConfiguration = throttleConfiguration;
    this.debugOn = log.isDebugEnabled();
    this.throttleWindowReplicator = ThrottleContextFactory.getThrottleWindowReplicatorInstance();
    ThrottleContextFactory.getThrottleContextCleanupTaskInstance().addThrottleContext(this);
}

From source file:com.microsoft.wake.contrib.grouper.impl.CombiningSnowshovelGrouper.java

@Inject
public CombiningSnowshovelGrouper(Combiner<OutType, K, V> c, Partitioner<K> p, Extractor<InType, K, V> ext,
        @Parameter(StageConfiguration.StageObserver.class) Observer<Tuple<Integer, OutType>> o,
        @Parameter(StageConfiguration.NumberOfThreads.class) int outputThreads,
        @Parameter(StageConfiguration.StageName.class) String stageName,
        @Parameter(ContinuousStage.PeriodNS.class) long outputPeriod_ns) {
    super(stageName);

    this.c = c;//from w  w w .j a  v a  2  s.c  o  m
    this.p = p;
    this.ext = ext;
    this.o = o;
    this.outputHandler = new OutputImpl();
    // calling this.new on a @Unit's inner class without its own state is currently the same as Tang injecting it
    this.outputDriver = new ContinuousStage<Object>(outputHandler, outputThreads, stageName + "-output",
            outputPeriod_ns);
    this.doneHandler = ((ContinuousStage<Object>) outputDriver).getDoneHandler();
    register = new ConcurrentSkipListMap<>();
    inputDone = false;
    this.inputObserver = this.new InputImpl();

    this.sleeping = new AtomicInteger();

    // there is no dependence from input finish to output start
    // The alternative placement of this event is in the first call to onNext,
    // but Output onNext already provides blocking
    outputDriver.onNext(new GrouperEvent());

    startTime = prevAdjustedTime = System.nanoTime();
    prevCombinedCount = 0;
    combiningMeter = new Meter(stageName);
}

From source file:opendap.aws.glacier.GlacierVaultManager.java

public GlacierVaultManager(String vaultName, File glacierRootDir) throws IOException {

    if (vaultName == null) {
        throw new IOException("Vault name was null!");
    }//from  w  w  w .j ava  2  s .  c  om

    _name = vaultName;
    File vaultDir = mkDir(glacierRootDir, _name);

    _indexDirectory = mkDir(vaultDir, DefaultIndexDirectoryName);
    _archiveRecordsDirectory = mkDir(vaultDir, DefaultArchiveRecordsDirectoryName);
    _resourceCacheDirectory = mkDir(glacierRootDir, DefaultResourceCacheDirectoryName);

    _resourceIds = new ConcurrentHashMap<String, ResourceId>();
    _glacierRecords = new ConcurrentSkipListMap<ResourceId, GlacierArchive>();

    _indexObjects = new ConcurrentHashMap<String, Index>();

    _vaultInventory = new File(_resourceCacheDirectory, _name + "-INVENTORY.json");

    _max_records = DEFAULT_MAX_RECORDS_IN_MEMORY;
}

From source file:org.darkware.wpman.security.ChecksumDatabase.java

/**
 * Create a new {@code ChecksumDatabase} for files under the declared root and using the
 * given file for saved state.//  w ww.jav a  2s  .co m
 *
 * @param dbFile The file to load and store the database data into.
 * @param root  The highest level directory represented in the database.
 */
public ChecksumDatabase(final Path dbFile, final Path root) {
    super();

    this.root = root;
    this.dbFile = dbFile;
    this.hashes = new ConcurrentSkipListMap<>();
    this.suppressed = new ConcurrentSkipListSet<>();
    this.initialized = new AtomicBoolean(false);
    this.lock = new ReentrantReadWriteLock();

    this.initialize();
}

From source file:com.twitter.ambrose.hive.HiveDAGTransformer.java

/**
 * Constructs DAGNodes for each Hive MR task
 *///ww  w . j a  v  a 2 s  . co  m
private void createNodeIdToDAGNode() {

    // creates DAGNodes: each node represents a MR job
    nodeIdToDAGNode = new ConcurrentSkipListMap<String, DAGNode<Job>>();
    for (Task<? extends Serializable> task : allTasks) {
        if (task.getWork() instanceof MapredWork) {
            DAGNode<Job> dagNode = asDAGNode(task);
            nodeIdToDAGNode.put(dagNode.getName(), dagNode);
        }
    }

    // get job dependencies
    Map<String, List<String>> nodeIdToDependencies = getNodeIdToDependencies();

    // wire DAGNodes
    for (Map.Entry<String, List<String>> entry : nodeIdToDependencies.entrySet()) {
        String nodeId = entry.getKey();
        List<String> successorIds = entry.getValue();
        DAGNode<Job> dagNode = nodeIdToDAGNode.get(nodeId);
        List<DAGNode<? extends Job>> dagSuccessors = new ArrayList<DAGNode<? extends Job>>(successorIds.size());

        for (String sId : successorIds) {
            DAGNode<Job> successor = nodeIdToDAGNode.get(sId);
            dagSuccessors.add(successor);
        }
        dagNode.setSuccessors(dagSuccessors);
    }
}

From source file:com.netflix.ice.basic.BasicReservationService.java

public BasicReservationService(ReservationPeriod term, ReservationUtilization defaultUtilization) {
    this.term = term;
    this.defaultUtilization = defaultUtilization;

    ec2InstanceReservationPrices = Maps.newHashMap();
    for (ReservationUtilization utilization : ReservationUtilization.values()) {
        ec2InstanceReservationPrices.put(utilization,
                new ConcurrentSkipListMap<Ec2InstanceReservationPrice.Key, Ec2InstanceReservationPrice>());
    }/*from  w w w.j  a va 2  s  .  c o m*/

    reservations = Maps.newHashMap();
    for (ReservationUtilization utilization : ReservationUtilization.values()) {
        reservations.put(utilization, Maps.<TagGroup, List<Reservation>>newHashMap());
    }
}

From source file:com.twitter.ambrose.hive.reporter.AmbroseHiveProgressReporter.java

public void reset() {
    init();/*from   ww w . j av a  2 s  . c  om*/
    nodeIdToDAGNode = new ConcurrentSkipListMap<String, DAGNode<Job>>();
    sendDagNodeNameMap(null, nodeIdToDAGNode);
    resetAdditionals(); //TODO order?
}