Example usage for com.google.common.collect Maps newConcurrentMap

List of usage examples for com.google.common.collect Maps newConcurrentMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newConcurrentMap.

Prototype

public static <K, V> ConcurrentMap<K, V> newConcurrentMap() 

Source Link

Document

Returns a general-purpose instance of ConcurrentMap , which supports all optional operations of the ConcurrentMap interface.

Usage

From source file:co.cask.cdap.internal.app.services.HttpServiceTwillRunnable.java

/**
 * Initializes this runnable from the given context.
 *
 * @param context the context for initialization
 *//*from  ww  w .j ava  2  s  .  com*/
@Override
public void initialize(TwillContext context) {
    LOG.info("In initialize method in HTTP Service");
    // initialize the base class so that we can use this context later
    super.initialize(context);

    handlerReferences = Maps.newConcurrentMap();
    handlerReferenceQueue = new ReferenceQueue<Supplier<HandlerContextPair>>();

    Map<String, String> runnableArgs = Maps.newHashMap(context.getSpecification().getConfigs());
    appName = runnableArgs.get(CONF_APP);
    serviceName = runnableArgs.get(CONF_RUNNABLE);
    handlers = Lists.newArrayList();
    List<String> handlerNames = GSON.fromJson(runnableArgs.get(CONF_HANDLER), HANDLER_NAMES_TYPE);
    List<HttpServiceSpecification> specs = GSON.fromJson(runnableArgs.get(CONF_SPEC), HANDLER_SPEC_TYPE);
    datasets = GSON.fromJson(runnableArgs.get("service.datasets"), new TypeToken<Set<String>>() {
    }.getType());
    // we will need the context based on the spec when we create NettyHttpService
    List<HandlerDelegatorContext> delegatorContexts = Lists.newArrayList();
    InstantiatorFactory instantiatorFactory = new InstantiatorFactory(false);

    for (int i = 0; i < handlerNames.size(); ++i) {
        try {
            Class<?> handlerClass = program.getClassLoader().loadClass(handlerNames.get(i));
            @SuppressWarnings("unchecked")
            TypeToken<HttpServiceHandler> type = TypeToken.of((Class<HttpServiceHandler>) handlerClass);
            delegatorContexts
                    .add(new HandlerDelegatorContext(type, instantiatorFactory, specs.get(i), context));
        } catch (Exception e) {
            LOG.error("Could not initialize HTTP Service");
            Throwables.propagate(e);
        }
    }
    String pathPrefix = String.format("%s/apps/%s/services/%s/methods", Constants.Gateway.GATEWAY_VERSION,
            appName, serviceName);
    service = createNettyHttpService(context.getHost().getCanonicalHostName(), delegatorContexts, pathPrefix);
}

From source file:org.apache.tajo.querymaster.Query.java

public Query(final QueryMasterTask.QueryMasterTaskContext context, final QueryId id, final long appSubmitTime,
        final String queryStr, final EventHandler eventHandler, final MasterPlan plan) {
    this.context = context;
    this.systemConf = context.getConf();
    this.id = id;
    this.clock = context.getClock();
    this.appSubmitTime = appSubmitTime;
    this.queryStr = queryStr;
    this.stages = Maps.newConcurrentMap();
    this.eventHandler = eventHandler;
    this.plan = plan;
    this.cursor = new ExecutionBlockCursor(plan, true);

    StringBuilder sb = new StringBuilder("\n=======================================================");
    sb.append("\nThe order of execution: \n");
    int order = 1;
    for (ExecutionBlock currentEB : cursor) {
        sb.append("\n").append(order).append(": ").append(currentEB.getId());
        order++;/*  w ww  .  j a  v a  2  s .  co  m*/
    }
    sb.append("\n=======================================================");
    LOG.info(sb);

    ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
    this.readLock = readWriteLock.readLock();
    this.writeLock = readWriteLock.writeLock();

    stateMachine = stateMachineFactory.make(this);
    queryState = stateMachine.getCurrentState();
}

From source file:org.apache.giraph.comm.messages.with_source.SimpleMessageWithSourceStore.java

@Override
public void readFieldsForPartition(DataInput in, int partitionId) throws IOException {
    if (in.readBoolean()) {
        ConcurrentMap<I, ConcurrentMap<I, T>> partitionMap = Maps.newConcurrentMap();
        int numVertices = in.readInt();
        for (int v = 0; v < numVertices; v++) {
            I dstId = config.createVertexId();
            dstId.readFields(in);// w  w  w .  j  av a 2s .co m
            ConcurrentMap<I, T> srcMap = Maps.newConcurrentMap();

            int numSrc = in.readInt();
            for (int s = 0; s < numSrc; s++) {
                I srcId = config.createVertexId();
                srcId.readFields(in);
                srcMap.put(srcId, readFieldsForMessages(in));
            }
            partitionMap.put(dstId, srcMap);
        }
        map.put(partitionId, partitionMap);
    }
}

From source file:org.onosproject.simplefabric.SimpleFabricManager.java

private boolean refresh() {
    log.debug("simple fabric refresh");
    boolean dirty = false;

    SimpleFabricConfig config = configService.getConfig(coreService.registerApplication(APP_ID),
            SimpleFabricConfig.class);
    if (config == null) {
        log.debug("No simple fabric config available!");
        return false;
    }/*  w  w w .  j av a  2s  .co  m*/

    // l2Networks
    Set<L2Network> newL2Networks = new HashSet<>();
    Set<Interface> newL2NetworkInterfaces = new HashSet<>();
    for (L2Network newL2NetworkConfig : config.getL2Networks()) {
        L2Network newL2Network = L2Network.of(newL2NetworkConfig);

        // fill up interfaces and Hosts with active port only
        for (String ifaceName : newL2NetworkConfig.interfaceNames()) {
            Interface iface = getInterfaceByName(ifaceName);
            if (iface != null && deviceService.isAvailable(iface.connectPoint().deviceId())) {
                newL2Network.addInterface(iface);
                newL2NetworkInterfaces.add(iface);
            }
        }
        for (Host host : hostService.getHosts()) {
            // consider host with ip only
            if (!host.ipAddresses().isEmpty()) {
                Interface iface = findAvailableDeviceHostInterface(host);
                if (iface != null && newL2Network.contains(iface)) {
                    newL2Network.addHost(host);
                }
            }
        }
        newL2Network.setDirty(true);

        // update newL2Network's dirty flags if same entry already exists
        for (L2Network prevL2Network : l2Networks) {
            if (prevL2Network.equals(newL2Network)) {
                newL2Network.setDirty(prevL2Network.dirty());
                break;
            }
        }
        newL2Networks.add(newL2Network);
    }
    if (!l2Networks.equals(newL2Networks)) {
        l2Networks = newL2Networks;
        dirty = true;
    }
    if (!l2NetworkInterfaces.equals(newL2NetworkInterfaces)) {
        l2NetworkInterfaces = newL2NetworkInterfaces;
        dirty = true;
    }

    // ipSubnets
    Set<IpSubnet> newIpSubnets = config.ipSubnets();
    InvertedRadixTree<IpSubnet> newIp4SubnetTable = new ConcurrentInvertedRadixTree<>(
            new DefaultByteArrayNodeFactory());
    InvertedRadixTree<IpSubnet> newIp6SubnetTable = new ConcurrentInvertedRadixTree<>(
            new DefaultByteArrayNodeFactory());
    Map<IpAddress, MacAddress> newVirtualGatewayIpMacMap = Maps.newConcurrentMap();
    for (IpSubnet subnet : newIpSubnets) {
        if (subnet.ipPrefix().isIp4()) {
            newIp4SubnetTable.put(createBinaryString(subnet.ipPrefix()), subnet);
        } else {
            newIp6SubnetTable.put(createBinaryString(subnet.ipPrefix()), subnet);
        }
        newVirtualGatewayIpMacMap.put(subnet.gatewayIp(), subnet.gatewayMac());
    }
    if (!ipSubnets.equals(newIpSubnets)) {
        ipSubnets = newIpSubnets;
        ip4SubnetTable = newIp4SubnetTable;
        ip6SubnetTable = newIp6SubnetTable;
        dirty = true;
    }
    if (!virtualGatewayIpMacMap.equals(newVirtualGatewayIpMacMap)) {
        virtualGatewayIpMacMap = newVirtualGatewayIpMacMap;
        dirty = true;
    }

    // borderRoutes config handling
    Set<Route> newBorderRoutes = config.borderRoutes();
    if (!borderRoutes.equals(newBorderRoutes)) {
        InvertedRadixTree<Route> newIp4BorderRouteTable = new ConcurrentInvertedRadixTree<>(
                new DefaultByteArrayNodeFactory());
        InvertedRadixTree<Route> newIp6BorderRouteTable = new ConcurrentInvertedRadixTree<>(
                new DefaultByteArrayNodeFactory());
        for (Route route : newBorderRoutes) {
            if (route.prefix().isIp4()) {
                newIp4BorderRouteTable.put(createBinaryString(route.prefix()), route);
            } else {
                newIp6BorderRouteTable.put(createBinaryString(route.prefix()), route);
            }
        }
        borderRoutes = newBorderRoutes;
        ip4BorderRouteTable = newIp4BorderRouteTable;
        ip6BorderRouteTable = newIp6BorderRouteTable;
        dirty = true;
    }

    // notify to SimpleFabric listeners
    if (dirty) {
        log.info("simple fabric refresh; notify events");
        process(new SimpleFabricEvent(SimpleFabricEvent.Type.SIMPLE_FABRIC_UPDATED, "updated"));
    }
    return dirty;
}

From source file:com.jivesoftware.os.amza.service.replication.http.HttpRowsTaker.java

private void flushQueues(RingHost ringHost, Ackable ackable, long currentVersion) throws Exception {
    Map<VersionedPartitionName, RowsTakenPayload> rowsTaken;
    PongPayload pong;/* w w w  . j  a  va2  s.  c o m*/
    ackable.semaphore.acquire(Short.MAX_VALUE);
    try {
        rowsTaken = ackable.rowsTakenPayloads.getAndSet(Maps.newConcurrentMap());
        pong = ackable.pongPayloads.getAndSet(null);
    } finally {
        ackable.semaphore.release(Short.MAX_VALUE);
    }
    if (rowsTaken != null && !rowsTaken.isEmpty()) {
        LOG.inc("flush>rowsTaken>pow>" + UIO.chunkPower(rowsTaken.size(), 0));
    }

    if (rowsTaken != null && !rowsTaken.isEmpty() || pong != null) {
        flushExecutor.submit(() -> {
            try {
                String endpoint = "/amza/ackBatch";
                ringClient.call("",
                        new ConnectionDescriptorSelectiveStrategy(
                                new HostPort[] { new HostPort(ringHost.getHost(), ringHost.getPort()) }),
                        "ackBatch", httpClient -> {

                            HttpResponse response = httpClient.postStreamableRequest(endpoint, out -> {
                                try {
                                    DataOutputStream dos = new DataOutputStream(out);
                                    if (rowsTaken.isEmpty()) {
                                        dos.write((byte) 0); // hasMore for rowsTaken stream
                                    } else {
                                        for (Entry<VersionedPartitionName, RowsTakenPayload> e : rowsTaken
                                                .entrySet()) {
                                            dos.write((byte) 1); // hasMore for rowsTaken stream
                                            VersionedPartitionName versionedPartitionName = e.getKey();

                                            byte[] bytes = versionedPartitionName.toBytes();
                                            dos.writeShort(bytes.length);
                                            dos.write(bytes);

                                            RowsTakenPayload rowsTakenPayload = e.getValue();
                                            bytes = rowsTakenPayload.ringMember.toBytes();
                                            dos.writeShort(bytes.length);
                                            dos.write(bytes);

                                            dos.writeLong(rowsTakenPayload.takeSessionId);
                                            dos.writeLong(rowsTakenPayload.takeSharedKey);
                                            dos.writeLong(rowsTakenPayload.txId);
                                            dos.writeLong(rowsTakenPayload.leadershipToken);
                                        }
                                        dos.write((byte) 0); // EOS for rowsTaken stream
                                    }

                                    if (pong == null) {
                                        dos.write((byte) 0); // has pong
                                    } else {
                                        dos.write((byte) 1); // has pong
                                        byte[] bytes = pong.ringMember.toBytes();
                                        dos.writeShort(bytes.length);
                                        dos.write(bytes);

                                        dos.writeLong(pong.takeSessionId);
                                        dos.writeLong(pong.takeSharedKey);

                                    }
                                } catch (Exception x) {
                                    throw new RuntimeException("Failed while streaming ackBatch.", x);
                                } finally {
                                    out.flush();
                                    out.close();
                                }

                            }, null);

                            if (response.getStatusCode() < 200 || response.getStatusCode() >= 300) {
                                throw new NonSuccessStatusCodeException(response.getStatusCode(),
                                        response.getStatusReasonPhrase());
                            }
                            Boolean result = (Boolean) conf.asObject(response.getResponseBody());
                            return new ClientResponse<>(result, true);
                        });

            } catch (Exception x) {
                LOG.warn("Failed to deliver acks for remote:{}", new Object[] { ringHost }, x);
            } finally {
                ackable.running.set(false);
                LOG.inc("flush>version>consume>" + name);
                synchronized (flushVersion) {
                    if (currentVersion != flushVersion.get()) {
                        flushVersion.notify();
                    }
                }
            }
        });
    } else {
        ackable.running.set(false);
        LOG.inc("flush>version>consume>" + name);
        synchronized (flushVersion) {
            if (currentVersion != flushVersion.get()) {
                flushVersion.notify();
            }
        }
    }
}

From source file:org.cinchapi.concourse.server.ConcourseServer.java

/**
 * Construct a ConcourseServer that listens on {@code port} and store data
 * in {@code dbStore} and {@code bufferStore}.
 * /*from  w  w  w.j  a  v a2 s  . c o m*/
 * @param port
 * @param bufferStore
 * @param dbStore
 * @throws TTransportException
 */
public ConcourseServer(int port, String bufferStore, String dbStore) throws TTransportException {
    Preconditions.checkState(!bufferStore.equalsIgnoreCase(dbStore),
            "Cannot store buffer and database files in the same directory. " + "Please check concourse.prefs.");
    Preconditions.checkState(!Strings.isNullOrEmpty(Environments.sanitize(DEFAULT_ENVIRONMENT)),
            "Cannot initialize " + "Concourse Server with a default environment of "
                    + "'%s'. Please use a default environment name that "
                    + "contains only alphanumeric characters.",
            DEFAULT_ENVIRONMENT);
    FileSystem.mkdirs(bufferStore);
    FileSystem.mkdirs(dbStore);
    TServerSocket socket = new TServerSocket(port);
    ConcourseService.Processor<Iface> processor = new ConcourseService.Processor<Iface>(this);
    Args args = new TThreadPoolServer.Args(socket);
    args.processor(processor);
    args.maxWorkerThreads(NUM_WORKER_THREADS);
    args.executorService(
            Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("Server" + "-%d").build()));
    this.server = new TThreadPoolServer(args);
    this.bufferStore = bufferStore;
    this.dbStore = dbStore;
    this.engines = Maps.newConcurrentMap();
    this.manager = AccessManager.create(ACCESS_FILE);
    getEngine(); // load the default engine
}

From source file:com.netflix.metacat.main.services.impl.PartitionServiceImpl.java

@Override
public Map<String, List<QualifiedName>> getQualifiedNames(final List<String> uris, final boolean prefixSearch) {
    final Map<String, List<QualifiedName>> result = Maps.newConcurrentMap();
    final List<ListenableFuture<Void>> futures = Lists.newArrayList();
    catalogService.getCatalogNames().forEach(catalog -> {
        final Session session = sessionProvider.getSession(QualifiedName.ofCatalog(catalog.getCatalogName()));
        futures.add(threadServiceManager.getExecutor().submit(() -> {
            final Map<String, List<SchemaTablePartitionName>> schemaTablePartitionNames = splitManager
                    .getPartitionNames(session, uris, prefixSearch);
            schemaTablePartitionNames.forEach((uri, schemaTablePartitionNames1) -> {
                final List<QualifiedName> partitionNames = schemaTablePartitionNames1.stream()
                        .map(schemaTablePartitionName -> QualifiedName.ofPartition(catalog.getConnectorName(),
                                schemaTablePartitionName.getTableName().getSchemaName(),
                                schemaTablePartitionName.getTableName().getTableName(),
                                schemaTablePartitionName.getPartitionId()))
                        .collect(Collectors.toList());
                final List<QualifiedName> existingPartitionNames = result.get(uri);
                if (existingPartitionNames == null) {
                    result.put(uri, partitionNames);
                } else {
                    existingPartitionNames.addAll(partitionNames);
                }//from w w  w  .  j  a va2 s.c  om
            });
            return null;
        }));
    });
    try {
        Futures.allAsList(futures).get(1, TimeUnit.HOURS);
    } catch (Exception e) {
        Throwables.propagate(e);
    }
    return result;
}

From source file:org.onosproject.vtnweb.resources.SubnetWebResource.java

/**
 * Changes JsonNode alocPools to a collection of the alocPools.
 *
 * @param allocationPools the allocationPools JsonNode
 * @return a collection of allocationPools
 *///from  w w  w.  j  ava  2s. c om
public Iterable<AllocationPool> jsonNodeToAllocationPools(JsonNode allocationPools) {
    checkNotNull(allocationPools, JSON_NOT_NULL);
    ConcurrentMap<Integer, AllocationPool> alocplMaps = Maps.newConcurrentMap();
    Integer i = 0;
    for (JsonNode node : allocationPools) {
        IpAddress startIp = IpAddress.valueOf(node.get("start").asText());
        IpAddress endIp = IpAddress.valueOf(node.get("end").asText());
        AllocationPool alocPls = new DefaultAllocationPool(startIp, endIp);
        alocplMaps.putIfAbsent(i, alocPls);
        i++;
    }
    return Collections.unmodifiableCollection(alocplMaps.values());
}

From source file:org.onosproject.vtnweb.resources.TenantNetworkWebResource.java

/**
 * Returns a collection of tenantNetworks.
 *
 * @param nodes the network jsonnodes//ww  w .j  ava2s. c  o  m
 * @return a collection of tenantNetworks
 */
public Iterable<TenantNetwork> changeJson2objs(JsonNode nodes) {
    checkNotNull(nodes, JSON_NOT_NULL);
    TenantNetwork network = null;
    ConcurrentMap<TenantNetworkId, TenantNetwork> networksMap = Maps.newConcurrentMap();
    if (nodes != null) {
        for (JsonNode node : nodes) {
            String id = node.get("id").asText();
            String name = node.get("name").asText();
            boolean adminStateUp = node.get("admin_state_up").asBoolean();
            String state = node.get("status").asText();
            boolean shared = node.get("shared").asBoolean();
            String tenantId = node.get("tenant_id").asText();
            boolean routerExternal = node.get("router:external").asBoolean();
            String type = node.get("provider:network_type").asText();
            String physicalNetwork = node.get("provider:physical_network").asText();
            String segmentationId = node.get("provider:segmentation_id").asText();
            network = new DefaultTenantNetwork(TenantNetworkId.networkId(id), name, adminStateUp,
                    isState(state), shared, TenantId.tenantId(tenantId), routerExternal, isType(type),
                    PhysicalNetwork.physicalNetwork(physicalNetwork),
                    SegmentationId.segmentationId(segmentationId));
            networksMap.putIfAbsent(TenantNetworkId.networkId(id), network);
        }
    }
    return Collections.unmodifiableCollection(networksMap.values());
}

From source file:gobblin.data.management.conversion.hive.validation.ValidationJob.java

private void runCountValidation() throws InterruptedException {
    try {/*from www .  jav a 2s . c  om*/
        // Validation results
        this.successfulConversions = Maps.newConcurrentMap();
        this.failedConversions = Maps.newConcurrentMap();
        this.warnConversions = Maps.newConcurrentMap();
        this.dataValidationFailed = Maps.newConcurrentMap();
        this.dataValidationSuccessful = Maps.newConcurrentMap();

        // Find datasets to validate
        Iterator<HiveDataset> iterator = this.datasetFinder.getDatasetsIterator();
        EventSubmitter.submit(Optional.of(this.eventSubmitter),
                EventConstants.VALIDATION_FIND_HIVE_TABLES_EVENT);

        while (iterator.hasNext()) {
            ConvertibleHiveDataset hiveDataset = (ConvertibleHiveDataset) iterator.next();
            try (AutoReturnableObject<IMetaStoreClient> client = hiveDataset.getClientPool().getClient()) {

                // Validate dataset
                log.info(String.format("Validating dataset: %s", hiveDataset));
                if (HiveUtils.isPartitioned(hiveDataset.getTable())) {
                    processPartitionedTable(hiveDataset, client);
                } else {
                    processNonPartitionedTable(hiveDataset);
                }
            }
        }

        // Wait for all validation queries to finish
        log.info(String.format("Waiting for %d futures to complete", this.futures.size()));

        this.exec.shutdown();
        this.exec.awaitTermination(4, TimeUnit.HOURS);

        boolean oneFutureFailure = false;
        // Check if there were any exceptions
        for (Future<Void> future : this.futures) {
            try {
                future.get();
            } catch (Throwable t) {
                log.error("getValidationOutputFromHive failed", t);
                oneFutureFailure = true;
            }
        }

        // Log validation results:
        // Validation results are consolidated into the successfulConversions and failedConversions
        // These are then converted into log lines in the Azkaban logs as done below
        for (Map.Entry<String, String> successfulConversion : this.successfulConversions.entrySet()) {
            log.info(String.format("Successful conversion: %s [%s]", successfulConversion.getKey(),
                    successfulConversion.getValue()));
        }
        for (Map.Entry<String, String> successfulConversion : this.warnConversions.entrySet()) {
            log.warn(String.format("No conversion found for: %s [%s]", successfulConversion.getKey(),
                    successfulConversion.getValue()));
        }
        for (Map.Entry<String, String> failedConverion : this.failedConversions.entrySet()) {
            log.error(String.format("Failed conversion: %s [%s]", failedConverion.getKey(),
                    failedConverion.getValue()));
        }

        for (Map.Entry<String, String> success : this.dataValidationSuccessful.entrySet()) {
            log.info(
                    String.format("Data validation successful: %s [%s]", success.getKey(), success.getValue()));
        }

        for (Map.Entry<String, String> failed : this.dataValidationFailed.entrySet()) {
            log.error(String.format("Data validation failed: %s [%s]", failed.getKey(), failed.getValue()));
        }

        if (!this.failedConversions.isEmpty() || !this.dataValidationFailed.isEmpty()) {
            throw new RuntimeException(String.format(
                    "Validation failed for %s conversions. See previous logs for exact validation failures",
                    failedConversions.size()));
        }
        if (oneFutureFailure) {
            throw new RuntimeException("At least one hive ddl failed. Check previous logs");
        }

    } catch (IOException e) {
        Throwables.propagate(e);
    }
}