Example usage for com.google.common.collect Multimaps synchronizedMultimap

List of usage examples for com.google.common.collect Multimaps synchronizedMultimap

Introduction

In this page you can find the example usage for com.google.common.collect Multimaps synchronizedMultimap.

Prototype

public static <K, V> Multimap<K, V> synchronizedMultimap(Multimap<K, V> multimap) 

Source Link

Document

Returns a synchronized (thread-safe) multimap backed by the specified multimap.

Usage

From source file:azkaban.app.Scheduler.java

public Scheduler(JobManager jobManager, FlowManager allKnownFlows, Mailman mailman, String jobSuccessEmail,
        String jobFailureEmail, ClassLoader classLoader, File scheduleFile, File backupScheduleFile,
        int numThreads) {
    this.allKnownFlows = allKnownFlows;
    Multimap<String, ScheduledJob> typedMultiMap = HashMultimap.create();

    _scheduleFile = scheduleFile;// w ww.  ja  v  a  2  s.c o m
    _scheduleBackupFile = backupScheduleFile;
    _jobManager = Utils.nonNull(jobManager);
    _mailman = mailman;
    _completed = Multimaps.synchronizedMultimap(typedMultiMap);
    _scheduled = new ConcurrentHashMap<String, ScheduledJob>();
    _executing = new ConcurrentHashMap<String, ScheduledJobAndInstance>();
    // _baseClassLoader = classLoader;
    _jobSuccessEmail = jobSuccessEmail;
    _jobFailureEmail = jobFailureEmail;
    _executor = new ScheduledThreadPoolExecutor(numThreads, new SchedulerThreadFactory());

    // Don't, by default, keep running scheduled tasks after shutdown.
    _executor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);

    loadSchedule();
}

From source file:me.lucko.luckperms.api.context.MutableContextSet.java

public MutableContextSet() {
    this.map = Multimaps.synchronizedMultimap(HashMultimap.create());
}

From source file:me.lucko.luckperms.api.context.MutableContextSet.java

private MutableContextSet(Multimap<String, String> contexts) {
    this.map = Multimaps.synchronizedMultimap(HashMultimap.create(contexts));
}

From source file:org.marketcetera.marketdata.SimulatedExchange.java

/**
 * Create a new <code>SimulatedExchange</code> instance.
 * //from   w  w w  .  j  a v a 2s.  co  m
 * @param inName a <code>String</code> value containing the name to associate with the exchange
 * @param inCode a <code>String</code> value containing the exchange code of this exchange
 * @param inMaxBookDepth an <code>int</code> value containing the maximum depth to maintain for the order books.  This value
 *  must conform to the requirements established for {@link OrderBook#OrderBook(org.marketcetera.trade.Instrument,int)}.
 * @throws IllegalArgumentException if the given <code>inMaxBookDepth</code> does not correspond to a valid {@link OrderBook} maximum depth
 */
public SimulatedExchange(String inName, String inCode, int inMaxBookDepth) {
    if (inName == null || inCode == null) {
        throw new NullPointerException();
    }
    OrderBook.validateMaximumBookDepth(inMaxBookDepth);
    name = inName;
    code = inCode;
    maxDepth = inMaxBookDepth;
    Multimap<Instrument, FilteringSubscriber> unsynchronizedOptionChainSubscribers = HashMultimap.create();
    optionChainSubscribers = Multimaps.synchronizedMultimap(unsynchronizedOptionChainSubscribers);
    setStatus(Status.STOPPED);
}

From source file:org.artifactory.storage.db.fs.service.WatchesServiceImpl.java

private void lazyInitCacheIfNeeded() {
    if (!initialized) {
        synchronized (this) {
            if (!initialized) {
                if (watchersCache == null) {
                    watchersCache = HashMultimap.create();
                    watchersCache = Multimaps.synchronizedMultimap(watchersCache);
                }// w w w .jav  a 2s.  c  om

                try {
                    //TODO: [by YS] consider using single query to get watch + repo path
                    List<Watch> nodeWatches = watchesDao.getWatches();
                    for (Watch nodeWatch : nodeWatches) {
                        RepoPath repoPath = fileService.loadItem(nodeWatch.getNodeId()).getRepoPath();
                        watchersCache.put(repoPath, nodeWatch);
                    }
                    initialized = true;
                } catch (SQLException e) {
                    throw new StorageException("Failed to load watches", e);
                }
            }
        }
    }
}

From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java

/**
 * @return A Multimap<startkey, LoadQueueItem> that groups LQI by likely bulk load region targets.
 *///ww w.  j a va  2s  .  c  om
private Multimap<ByteBuffer, LoadQueueItem> groupOrSplitPhase(final HTable table, ExecutorService pool,
        Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
    // <region start key, LQI> need synchronized only within this scope of this
    // phase because of the puts that happen in futures.
    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);

    // drain LQIs and figure out bulk load groups
    Set<Future<List<LoadQueueItem>>> splittingFutures = new HashSet<Future<List<LoadQueueItem>>>();
    while (!queue.isEmpty()) {
        final LoadQueueItem item = queue.remove();

        final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
            public List<LoadQueueItem> call() throws Exception {
                List<LoadQueueItem> splits = groupOrSplit(regionGroups, item, table, startEndKeys);
                return splits;
            }
        };
        splittingFutures.add(pool.submit(call));
    }
    // get all the results. All grouping and splitting must finish before
    // we can attempt the atomic loads.
    for (Future<List<LoadQueueItem>> lqis : splittingFutures) {
        try {
            List<LoadQueueItem> splits = lqis.get();
            if (splits != null) {
                queue.addAll(splits);
            }
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                LOG.error("IOException during splitting", e1);
                throw (IOException) t; // would have been thrown if not parallelized,
            }
            LOG.error("Unexpected execution exception during splitting", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during splitting", e1);
            throw new IllegalStateException(e1);
        }
    }
    return regionGroups;
}

From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java

/**
 * @return A Multimap<startkey, LoadQueueItem> that groups LQI by likely
 * bulk load region targets./*  w w  w. ja v  a2s  .  co m*/
 */
private Multimap<ByteBuffer, LoadQueueItem> groupOrSplitPhase(final HTable table, ExecutorService pool,
        Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
    // <region start key, LQI> need synchronized only within this scope of this
    // phase because of the puts that happen in futures.
    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);

    // drain LQIs and figure out bulk load groups
    Set<Future<List<LoadQueueItem>>> splittingFutures = new HashSet<Future<List<LoadQueueItem>>>();
    while (!queue.isEmpty()) {
        final LoadQueueItem item = queue.remove();

        final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
            public List<LoadQueueItem> call() throws Exception {
                List<LoadQueueItem> splits = groupOrSplit(regionGroups, item, table, startEndKeys);
                return splits;
            }
        };
        splittingFutures.add(pool.submit(call));
    }
    // get all the results.  All grouping and splitting must finish before
    // we can attempt the atomic loads.
    for (Future<List<LoadQueueItem>> lqis : splittingFutures) {
        try {
            List<LoadQueueItem> splits = lqis.get();
            if (splits != null) {
                queue.addAll(splits);
            }
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                LOG.error("IOException during splitting", e1);
                throw (IOException) t; // would have been thrown if not parallelized,
            }
            LOG.error("Unexpected execution exception during splitting", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during splitting", e1);
            throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
        }
    }
    return regionGroups;
}