Example usage for com.google.common.collect Maps newConcurrentMap

List of usage examples for com.google.common.collect Maps newConcurrentMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newConcurrentMap.

Prototype

public static <K, V> ConcurrentMap<K, V> newConcurrentMap() 

Source Link

Document

Returns a general-purpose instance of ConcurrentMap , which supports all optional operations of the ConcurrentMap interface.

Usage

From source file:ratpack.sep.exec.InvokeWithRetry.java

public Promise<ActionResults<O>> apply(ExecControl execControl, Action<T, O> action, Integer retryCount)
        throws Exception {

    return execControl.<Map<String, ActionResult<O>>>promise(fulfiller -> {
        AtomicInteger repeatCounter = new AtomicInteger(retryCount + 1);
        Map<String, ActionResult<O>> results = Maps.newConcurrentMap();
        applyWithRetry(execControl, fulfiller, action, results, repeatCounter);
    }).map(ImmutableMap::copyOf).map(map -> new ActionResults<O>(map));
}

From source file:com.olacabs.fabric.compute.source.PipelineStreamSource.java

public void initialize(Properties globalProperties) throws Exception {
    final Integer count = PropertyReader.readInt(this.properties, globalProperties,
            "computation.eventset.in_flight_count", 5);
    jsonConversion = PropertyReader.readBoolean(this.properties, globalProperties,
            "computation.eventset.is_serialized", true);
    delivered = new LinkedBlockingQueue<>(count);
    messages = Maps.newConcurrentMap();
    source.initialize(instanceId, globalProperties, properties, processingContext, sourceMetadata);
    transactionIdGenerator.seed(seedTransactionId());
    this.notificationBus.source(this);
}

From source file:com.continuuity.weave.discovery.ZKDiscoveryService.java

/**
 * Constructs ZKDiscoveryService using the provided zookeeper client for storing service registry under namepsace.
 * @param zkClient of zookeeper quorum/*from w  w  w.j a va2 s.  c  o m*/
 * @param namespace under which the service registered would be stored in zookeeper.
 *                  If namespace is {@code null}, no namespace will be used.
 */
public ZKDiscoveryService(ZKClient zkClient, String namespace) {
    this.zkClient = namespace == null ? zkClient : ZKClients.namespace(zkClient, namespace);
    services = new AtomicReference<Multimap<String, Discoverable>>(HashMultimap.<String, Discoverable>create());
    serviceWatched = Maps.newConcurrentMap();
}

From source file:com.cloudera.livy.rsc.rpc.RpcServer.java

public RpcServer(RSCConf lconf) throws IOException, InterruptedException {
    this.config = lconf;
    this.group = new NioEventLoopGroup(this.config.getInt(RPC_MAX_THREADS),
            new ThreadFactoryBuilder().setNameFormat("RPC-Handler-%d").setDaemon(true).build());
    this.channel = new ServerBootstrap().group(group).channel(NioServerSocketChannel.class)
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override/*from   w ww . j a v  a  2  s.  co  m*/
                public void initChannel(SocketChannel ch) throws Exception {
                    SaslServerHandler saslHandler = new SaslServerHandler(config);
                    final Rpc newRpc = Rpc.createServer(saslHandler, config, ch, group);
                    saslHandler.rpc = newRpc;

                    Runnable cancelTask = new Runnable() {
                        @Override
                        public void run() {
                            LOG.warn("Timed out waiting for hello from client.");
                            newRpc.close();
                        }
                    };
                    saslHandler.cancelTask = group.schedule(cancelTask,
                            config.getTimeAsMs(RPC_CLIENT_HANDSHAKE_TIMEOUT), TimeUnit.MILLISECONDS);
                }
            }).option(ChannelOption.SO_BACKLOG, 1).option(ChannelOption.SO_REUSEADDR, true)
            .childOption(ChannelOption.SO_KEEPALIVE, true).bind(0).sync().channel();
    this.port = ((InetSocketAddress) channel.localAddress()).getPort();
    this.pendingClients = Maps.newConcurrentMap();

    String address = config.get(RPC_SERVER_ADDRESS);
    if (address == null) {
        address = config.findLocalAddress();
    }
    this.address = address;
}

From source file:org.apache.hive.spark.client.AbstractSparkClient.java

protected AbstractSparkClient(RpcServer rpcServer, Map<String, String> conf, HiveConf hiveConf,
        String sessionid) throws IOException {
    this.conf = conf;
    this.hiveConf = hiveConf;
    this.jobs = Maps.newConcurrentMap();

    String secret = rpcServer.createSecret();
    this.driverFuture = startDriver(rpcServer, sessionid, secret);
    this.protocol = new ClientProtocol();

    try {/*from ww  w  .  j  a v a  2  s . c  om*/
        // The RPC server will take care of timeouts here.
        this.driverRpc = rpcServer.registerClient(sessionid, secret, protocol).get();
    } catch (Throwable e) {
        String errorMsg;
        if (e.getCause() instanceof TimeoutException) {
            errorMsg = "Timed out waiting for Remote Spark Driver to connect to HiveServer2.\nPossible reasons "
                    + "include network issues, errors in remote driver, cluster has no available resources, etc."
                    + "\nPlease check YARN or Spark driver's logs for further information.";
        } else if (e.getCause() instanceof InterruptedException) {
            errorMsg = "Interrupted while waiting for Remote Spark Driver to connect to HiveServer2.\nIt is possible "
                    + "that the query was cancelled which would cause the Spark Session to close.";
        } else {
            errorMsg = "Error while waiting for Remote Spark Driver to connect back to HiveServer2.";
        }
        if (driverFuture.isDone()) {
            try {
                driverFuture.get();
            } catch (InterruptedException ie) {
                // Give up.
                LOG.warn("Interrupted before driver thread was finished.", ie);
            } catch (ExecutionException ee) {
                LOG.error("Driver thread failed", ee);
            }
        } else {
            driverFuture.cancel(true);
        }
        throw new RuntimeException(errorMsg, e);
    }

    LOG.info("Successfully connected to Remote Spark Driver at: " + this.driverRpc.getRemoteAddress());

    driverRpc.addListener(new Rpc.Listener() {
        @Override
        public void rpcClosed(Rpc rpc) {
            if (isAlive) {
                LOG.warn("Connection to Remote Spark Driver {} closed unexpectedly",
                        driverRpc.getRemoteAddress());
                isAlive = false;
            }
        }

        @Override
        public String toString() {
            return "Connection to Remote Spark Driver Closed Unexpectedly";
        }
    });
    isAlive = true;
}

From source file:net.myrrix.web.servlets.AbstractMyrrixServlet.java

@Override
public void init(ServletConfig config) throws ServletException {
    super.init(config);

    ServletContext context = config.getServletContext();
    recommender = (MyrrixRecommender) context.getAttribute(RECOMMENDER_KEY);
    rescorerProvider = (RescorerProvider) context.getAttribute(RESCORER_PROVIDER_KEY);

    @SuppressWarnings("unchecked")
    ReloadingReference<List<List<HostAndPort>>> theAllPartitions = (ReloadingReference<List<List<HostAndPort>>>) context
            .getAttribute(ALL_PARTITIONS_REF_KEY);
    allPartitions = theAllPartitions;//from   w w  w  .  j  a  va2  s  .  c  om

    thisPartition = (Integer) context.getAttribute(PARTITION_KEY);
    responseTypeCache = Maps.newConcurrentMap();

    Map<String, ServletStats> timings;
    synchronized (context) {
        @SuppressWarnings("unchecked")
        Map<String, ServletStats> temp = (Map<String, ServletStats>) context.getAttribute(TIMINGS_KEY);
        timings = temp;
        if (timings == null) {
            timings = Maps.newTreeMap();
            context.setAttribute(TIMINGS_KEY, timings);
        }
    }

    String key = getClass().getSimpleName();
    ServletStats theTiming = timings.get(key);
    if (theTiming == null) {
        theTiming = new ServletStats();
        timings.put(key, theTiming);
    }
    timing = theTiming;
}

From source file:com.moz.fiji.mapreduce.kvstore.KeyValueStoreReaderFactory.java

/**
 * Creates a KeyValueStoreReaderFactory backed by store bindings specified in a Configuration.
 *
 * @param conf the Configuration from which a set of KeyValueStore bindings should
 *     be deserialized and initialized./* w w w  .ja v  a2s  .  c  o  m*/
 * @throws IOException if there is an error deserializing or initializing a
 *     KeyValueStore instance.
 */
private KeyValueStoreReaderFactory(Configuration conf) throws IOException {
    Map<String, KeyValueStore<?, ?>> keyValueStores = new HashMap<String, KeyValueStore<?, ?>>();
    int numKvStores = conf.getInt(KeyValueStoreConfigSerializer.CONF_KEY_VALUE_STORE_COUNT,
            KeyValueStoreConfigSerializer.DEFAULT_KEY_VALUE_STORE_COUNT);
    for (int i = 0; i < numKvStores; i++) {
        KeyValueStoreConfiguration kvStoreConf = KeyValueStoreConfiguration.createInConfiguration(conf, i);

        Class<? extends KeyValueStore> kvStoreClass = kvStoreConf
                .<KeyValueStore>getClass(KeyValueStoreConfigSerializer.CONF_CLASS, null, KeyValueStore.class);

        String kvStoreName = kvStoreConf.get(KeyValueStoreConfigSerializer.CONF_NAME, "");

        if (null != kvStoreClass) {
            KeyValueStore<?, ?> kvStore = ReflectionUtils.newInstance(kvStoreClass, conf);
            if (null != kvStore) {
                kvStore.initFromConf(kvStoreConf);
                if (kvStoreName.isEmpty()) {
                    LOG.warn("Deserialized KeyValueStore not bound to a name; ignoring.");
                    continue;
                }
                keyValueStores.put(kvStoreName, kvStore);
            }
        }
    }

    mKeyValueStores = Collections.unmodifiableMap(keyValueStores);
    mKVStoreReaderCache = Maps.newConcurrentMap();
}

From source file:org.onosproject.mapping.impl.DistributedMappingStore.java

/**
 * Obtains map representation of mapping store.
 *
 * @param type mapping store type/*from   w w w .java 2s. co  m*/
 * @return map representation of mapping store
 */
private Map<MappingId, Mapping> getStoreMap(Type type) {
    switch (type) {
    case MAP_DATABASE:
        return databaseMap;
    case MAP_CACHE:
        return cacheMap;
    default:
        log.warn("Unrecognized map store type {}", type);
        return Maps.newConcurrentMap();
    }
}

From source file:com.cloudera.livy.client.local.rpc.RpcServer.java

public RpcServer(LocalConf lconf) throws IOException, InterruptedException {
    this.config = lconf;
    this.group = new NioEventLoopGroup(this.config.getInt(RPC_MAX_THREADS),
            new ThreadFactoryBuilder().setNameFormat("RPC-Handler-%d").setDaemon(true).build());
    this.channel = new ServerBootstrap().group(group).channel(NioServerSocketChannel.class)
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override//from w ww . ja  v  a 2s  .com
                public void initChannel(SocketChannel ch) throws Exception {
                    SaslServerHandler saslHandler = new SaslServerHandler(config);
                    final Rpc newRpc = Rpc.createServer(saslHandler, config, ch, group);
                    saslHandler.rpc = newRpc;

                    Runnable cancelTask = new Runnable() {
                        @Override
                        public void run() {
                            LOG.warn("Timed out waiting for hello from client.");
                            newRpc.close();
                        }
                    };
                    saslHandler.cancelTask = group.schedule(cancelTask,
                            config.getTimeAsMs(RPC_CLIENT_HANDSHAKE_TIMEOUT), TimeUnit.MILLISECONDS);
                }
            }).option(ChannelOption.SO_BACKLOG, 1).option(ChannelOption.SO_REUSEADDR, true)
            .childOption(ChannelOption.SO_KEEPALIVE, true).bind(0).sync().channel();
    this.port = ((InetSocketAddress) channel.localAddress()).getPort();
    this.pendingClients = Maps.newConcurrentMap();

    String address = config.get(RPC_SERVER_ADDRESS);
    if (address == null) {
        address = config.findLocalAddress();
    }
    this.address = address;
}

From source file:com.google.devtools.build.lib.skyframe.ActionExecutionFunction.java

public ActionExecutionFunction(SkyframeActionExecutor skyframeActionExecutor,
        AtomicReference<TimestampGranularityMonitor> tsgm) {
    this.skyframeActionExecutor = skyframeActionExecutor;
    this.tsgm = tsgm;
    stateMap = Maps.newConcurrentMap();
}