Example usage for com.google.common.collect Maps newConcurrentMap

List of usage examples for com.google.common.collect Maps newConcurrentMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newConcurrentMap.

Prototype

public static <K, V> ConcurrentMap<K, V> newConcurrentMap() 

Source Link

Document

Returns a general-purpose instance of ConcurrentMap , which supports all optional operations of the ConcurrentMap interface.

Usage

From source file:interactivespaces.util.data.dynamic.InterfaceMap.java

/**
 * Create a new instance of the given class with an empty backing map.
 *
 * @param interfaceClass//w ww .j av a  2s.c om
 *          class of object to create
 * @param <T>
 *          type of object
 *
 * @return new dynamic object instance with empty backing map
 */
public static <T> T createInstance(Class<T> interfaceClass) {
    Map<String, Object> backingMap = Maps.newConcurrentMap();
    return createInstance(interfaceClass, backingMap);
}

From source file:org.glowroot.agent.plugin.servlet.ResponseHeaderComponent.java

synchronized void addHeader(String name, Object value) {
    if (responseHeaders == null) {
        responseHeaders = Maps.newConcurrentMap();
    }/*from   ww  w .j  a v  a  2s  .c  o m*/
    String nameUpper = name.toUpperCase(Locale.ENGLISH);
    ResponseHeader responseHeader = responseHeaders.get(nameUpper);
    if (responseHeader == null) {
        responseHeaders.put(nameUpper, new ResponseHeader(name, value));
    } else {
        responseHeader.addValue(value);
    }
}

From source file:org.onosproject.reactive.routing.ReactiveRoutingFib.java

/**
 * Class constructor./*from  w  w  w  .  j ava 2 s . c om*/
 *
 * @param appId application ID to use to generate intents
 * @param hostService host service
 * @param configService routing configuration service
 * @param interfaceService interface service
 * @param intentSynchronizer intent synchronization service
 */
public ReactiveRoutingFib(ApplicationId appId, HostService hostService,
        RoutingConfigurationService configService, InterfaceService interfaceService,
        IntentSynchronizationService intentSynchronizer) {
    this.appId = appId;
    this.hostService = hostService;
    this.configService = configService;
    this.interfaceService = interfaceService;
    this.intentSynchronizer = intentSynchronizer;

    routeIntents = Maps.newConcurrentMap();
}

From source file:org.zenoss.zep.index.impl.MultiBackendEventIndexDao.java

public MultiBackendEventIndexDao(String name, EventSummaryBaseDao eventDao, WorkQueueBuilder queueBuilder,
        KeyValueStore store, Messages messages, TaskScheduler scheduler, UUIDGenerator uuidGenerator) {
    super(name, messages, scheduler, uuidGenerator);
    this.store = store;
    this.eventDao = eventDao;
    this.queueBuilder = queueBuilder;
    backends = Maps.newLinkedHashMap();/*from  w ww  .j  ava 2  s.co  m*/
    workQueues = Maps.newConcurrentMap();
    workers = Maps.newConcurrentMap();
    rebuilders = Maps.newConcurrentMap();
    initialBackendConfigurations = Lists.newArrayList();
}

From source file:org.apache.giraph.partition.SimplePartition.java

@Override
public void readFields(DataInput input) throws IOException {
    super.readFields(input);
    if (USE_OUT_OF_CORE_MESSAGES.get(getConf())) {
        vertexMap = new ConcurrentSkipListMap<I, Vertex<I, V, E, M>>();
    } else {/*from  www . j av  a2s .  co  m*/
        vertexMap = Maps.newConcurrentMap();
    }
    int vertices = input.readInt();
    for (int i = 0; i < vertices; ++i) {
        progress();
        Vertex<I, V, E, M> vertex = WritableUtils.readVertexFromDataInput(input, getConf());
        if (vertexMap.put(vertex.getId(), vertex) != null) {
            throw new IllegalStateException("readFields: " + this + " already has same id " + vertex);
        }
    }
}

From source file:org.eclipse.milo.opcua.stack.core.application.DirectoryCertificateValidator.java

public DirectoryCertificateValidator(File baseDir) throws IOException {
    this.baseDir = baseDir;
    ensureDirectoryExists(baseDir);//ww w.jav a  2  s  .c  o  m

    issuerDir = baseDir.toPath().resolve("issuers").toFile();
    ensureDirectoryExists(issuerDir);

    issuerCertsDir = issuerDir.toPath().resolve("certs").toFile();
    ensureDirectoryExists(issuerCertsDir);

    issuerCrlsDir = issuerDir.toPath().resolve("crls").toFile();
    ensureDirectoryExists(issuerCrlsDir);

    trustedDir = baseDir.toPath().resolve("trusted").toFile();
    ensureDirectoryExists(trustedDir);

    trustedCertsDir = trustedDir.toPath().resolve("certs").toFile();
    ensureDirectoryExists(trustedCertsDir);

    rejectedDir = baseDir.toPath().resolve("rejected").toFile();
    ensureDirectoryExists(rejectedDir);

    watchService = FileSystems.getDefault().newWatchService();

    Map<WatchKey, Runnable> watchKeys = Maps.newConcurrentMap();

    watchKeys.put(
            issuerCertsDir.toPath().register(watchService, StandardWatchEventKinds.ENTRY_CREATE,
                    StandardWatchEventKinds.ENTRY_DELETE, StandardWatchEventKinds.ENTRY_MODIFY),
            this::synchronizeIssuerCerts);
    watchKeys.put(
            issuerCrlsDir.toPath().register(watchService, StandardWatchEventKinds.ENTRY_CREATE,
                    StandardWatchEventKinds.ENTRY_DELETE, StandardWatchEventKinds.ENTRY_MODIFY),
            this::synchronizeIssuerCrls);
    watchKeys.put(
            trustedCertsDir.toPath().register(watchService, StandardWatchEventKinds.ENTRY_CREATE,
                    StandardWatchEventKinds.ENTRY_DELETE, StandardWatchEventKinds.ENTRY_MODIFY),
            this::synchronizeTrustedCerts);

    watchThread = new Thread(new Watcher(watchService, watchKeys));
    watchThread.setName("certificate-store-watcher");
    watchThread.setDaemon(true);
    watchThread.start();

    synchronizeIssuerCerts();
    synchronizeIssuerCrls();
    synchronizeTrustedCerts();
}

From source file:com.dtstack.jlogstash.classloader.JarClassLoader.java

private Map<String, URL[]> getClassLoadJarUrls(File dir) throws MalformedURLException, IOException {
    String dirName = dir.getName();
    Map<String, URL[]> jurls = Maps.newConcurrentMap();
    File[] files = dir.listFiles();
    if (files != null && files.length > 0) {
        for (File f : files) {
            String jarName = f.getName();
            if (f.isFile() && jarName.endsWith(".jar")) {
                jarName = jarName.split("-")[0].toLowerCase();
                String[] jns = jarName.split("\\.");
                jurls.put(String.format("%s:%s", dirName, jns.length == 0 ? jarName : jns[jns.length - 1]),
                        new URL[] { f.toURI().toURL() });
            }//from  ww w . j  av  a2 s.  c o m
        }
    }
    return jurls;
}

From source file:org.apache.hive.spark.client.RemoteDriver.java

private RemoteDriver(String[] args) throws Exception {
    this.activeJobs = Maps.newConcurrentMap();
    this.jcLock = new Object();
    this.shutdownLock = new Object();
    localTmpDir = Files.createTempDir();

    SparkConf conf = new SparkConf();
    String serverAddress = null;/* w  w w. java2s  .  c o  m*/
    int serverPort = -1;
    for (int idx = 0; idx < args.length; idx += 2) {
        String key = args[idx];
        if (key.equals("--remote-host")) {
            serverAddress = getArg(args, idx);
        } else if (key.equals("--remote-port")) {
            serverPort = Integer.parseInt(getArg(args, idx));
        } else if (key.equals("--client-id")) {
            conf.set(SparkClientFactory.CONF_CLIENT_ID, getArg(args, idx));
        } else if (key.equals("--secret")) {
            conf.set(SparkClientFactory.CONF_KEY_SECRET, getArg(args, idx));
        } else if (key.equals("--conf")) {
            String[] val = getArg(args, idx).split("[=]", 2);
            conf.set(val[0], val[1]);
        } else {
            throw new IllegalArgumentException("Invalid command line: " + Joiner.on(" ").join(args));
        }
    }

    executor = Executors.newCachedThreadPool();

    LOG.info("Connecting to: {}:{}", serverAddress, serverPort);

    Map<String, String> mapConf = Maps.newHashMap();
    for (Tuple2<String, String> e : conf.getAll()) {
        mapConf.put(e._1(), e._2());
        LOG.debug("Remote Driver configured with: " + e._1() + "=" + e._2());
    }

    String clientId = mapConf.get(SparkClientFactory.CONF_CLIENT_ID);
    Preconditions.checkArgument(clientId != null, "No client ID provided.");
    String secret = mapConf.get(SparkClientFactory.CONF_KEY_SECRET);
    Preconditions.checkArgument(secret != null, "No secret provided.");

    int threadCount = new RpcConfiguration(mapConf).getRpcThreadCount();
    this.egroup = new NioEventLoopGroup(threadCount,
            new ThreadFactoryBuilder().setNameFormat("Driver-RPC-Handler-%d").setDaemon(true).build());
    this.protocol = new DriverProtocol();

    // The RPC library takes care of timing out this.
    this.clientRpc = Rpc.createClient(mapConf, egroup, serverAddress, serverPort, clientId, secret, protocol)
            .get();
    this.running = true;

    this.clientRpc.addListener(new Rpc.Listener() {
        @Override
        public void rpcClosed(Rpc rpc) {
            LOG.warn("Shutting down driver because RPC channel was closed.");
            shutdown(null);
        }
    });

    try {
        JavaSparkContext sc = new JavaSparkContext(conf);
        sc.sc().addSparkListener(new ClientListener());
        synchronized (jcLock) {
            jc = new JobContextImpl(sc, localTmpDir);
            jcLock.notifyAll();
        }
    } catch (Exception e) {
        LOG.error("Failed to start SparkContext: " + e, e);
        shutdown(e);
        synchronized (jcLock) {
            jcLock.notifyAll();
        }
        throw e;
    }

    synchronized (jcLock) {
        for (Iterator<JobWrapper<?>> it = jobQueue.iterator(); it.hasNext();) {
            it.next().submit();
        }
    }
}

From source file:oculus.aperture.cms.couchdb.CouchDbCmsService.java

CouchDbCmsService() {
    dbClientsByDbName = Maps.newConcurrentMap();
}

From source file:com.google.gerrit.server.plugins.PluginLoader.java

@Inject
public PluginLoader(SitePaths sitePaths, PluginGuiceEnvironment pe, ServerInformationImpl sii,
        PluginUser.Factory puf, Provider<PluginCleanerTask> pct, @GerritServerConfig Config cfg) {
    pluginsDir = sitePaths.plugins_dir;/*from  ww w.j  av a 2  s .co m*/
    dataDir = sitePaths.data_dir;
    tmpDir = sitePaths.tmp_dir;
    env = pe;
    srvInfoImpl = sii;
    pluginUserFactory = puf;
    running = Maps.newConcurrentMap();
    disabled = Maps.newConcurrentMap();
    broken = Maps.newHashMap();
    toCleanup = Queues.newArrayDeque();
    cleanupHandles = Maps.newConcurrentMap();
    cleaner = pct;

    long checkFrequency = ConfigUtil.getTimeUnit(cfg, "plugins", null, "checkFrequency",
            TimeUnit.MINUTES.toMillis(1), TimeUnit.MILLISECONDS);
    if (checkFrequency > 0) {
        scanner = new PluginScannerThread(this, checkFrequency);
    } else {
        scanner = null;
    }
}