Example usage for java.util.concurrent ConcurrentMap put

List of usage examples for java.util.concurrent ConcurrentMap put

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentMap put.

Prototype

V put(K key, V value);

Source Link

Document

Associates the specified value with the specified key in this map (optional operation).

Usage

From source file:com.github.podd.example.ExamplePoddClient.java

/**
 * @param trayUriMap/*from w ww.ja v a 2  s. c  o  m*/
 * @param trayId
 * @param nextProjectID
 * @param nextExperimentUri
 * @return
 * @throws PoddClientException
 * @throws GraphUtilException
 */
private URI getTrayUri(final ConcurrentMap<String, ConcurrentMap<URI, URI>> trayUriMap, final String trayId,
        final InferredOWLOntologyID nextProjectID, final URI nextExperimentUri)
        throws PoddClientException, GraphUtilException {
    // Check whether trayId already has an assigned URI
    URI nextTrayURI;
    if (trayUriMap.containsKey(trayId)) {
        nextTrayURI = trayUriMap.get(trayId).keySet().iterator().next();
    } else {
        final Model trayIdSparqlResults = this.doSPARQL(
                String.format(ExampleSpreadsheetConstants.TEMPLATE_SPARQL_BY_TYPE_LABEL_STRSTARTS,
                        RenderUtils.escape(trayId), RenderUtils.getSPARQLQueryString(PODD.PODD_SCIENCE_TRAY)),
                Arrays.asList(nextProjectID));

        if (trayIdSparqlResults.isEmpty()) {
            this.log.debug(
                    "Could not find an existing container for tray barcode, assigning a temporary URI: {} {}",
                    trayId, nextProjectID);

            nextTrayURI = RestletPoddClientImpl.vf
                    .createURI(RestletPoddClientImpl.TEMP_UUID_PREFIX + "tray:" + UUID.randomUUID().toString());
        } else {
            nextTrayURI = GraphUtil.getUniqueSubjectURI(trayIdSparqlResults, RDF.TYPE, PODD.PODD_SCIENCE_TRAY);
        }

        ConcurrentMap<URI, URI> nextTrayUriMap = new ConcurrentHashMap<>();
        final ConcurrentMap<URI, URI> putIfAbsent2 = trayUriMap.putIfAbsent(trayId, nextTrayUriMap);
        if (putIfAbsent2 != null) {
            nextTrayUriMap = putIfAbsent2;
        }
        nextTrayUriMap.put(nextTrayURI, nextExperimentUri);
    }
    return nextTrayURI;
}

From source file:org.apache.bookkeeper.client.BookieInfoReader.java

Map<BookieSocketAddress, BookieInfo> getBookieInfo() throws BKException, InterruptedException {
    BookieClient bkc = bk.getBookieClient();
    final AtomicInteger totalSent = new AtomicInteger();
    final AtomicInteger totalCompleted = new AtomicInteger();
    final ConcurrentMap<BookieSocketAddress, BookieInfo> map = new ConcurrentHashMap<BookieSocketAddress, BookieInfo>();
    final CountDownLatch latch = new CountDownLatch(1);
    long requested = BookkeeperProtocol.GetBookieInfoRequest.Flags.TOTAL_DISK_CAPACITY_VALUE
            | BookkeeperProtocol.GetBookieInfoRequest.Flags.FREE_DISK_SPACE_VALUE;

    Collection<BookieSocketAddress> bookies;
    bookies = bk.bookieWatcher.getBookies();
    bookies.addAll(bk.bookieWatcher.getReadOnlyBookies());

    totalSent.set(bookies.size());/* w  w  w  .j a v  a  2  s.  c o  m*/
    for (BookieSocketAddress b : bookies) {
        bkc.getBookieInfo(b, requested, new GetBookieInfoCallback() {
            @Override
            public void getBookieInfoComplete(int rc, BookieInfo bInfo, Object ctx) {
                BookieSocketAddress b = (BookieSocketAddress) ctx;
                if (rc != BKException.Code.OK) {
                    if (LOG.isErrorEnabled()) {
                        LOG.error("Reading bookie info from bookie {} failed due to {}", b,
                                BKException.codeLogger(rc));
                    }
                } else {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Free disk space on bookie {} is {}.", b, bInfo.getFreeDiskSpace());
                    }
                    map.put(b, bInfo);
                }
                if (totalCompleted.incrementAndGet() == totalSent.get()) {
                    latch.countDown();
                }
            }
        }, b);
    }
    try {
        latch.await();
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        LOG.error("Received InterruptedException ", e);
        throw e;
    }
    return map;
}

From source file:instance.os.OS.java

private void rescheduleAllTimers(long newBandwidth, long now) {
    logger.debug("Rescheduling all current downloads with bandwidth: " + newBandwidth + " B/s");
    addToBandwidthDiagram(newBandwidth);
    currentBandwidth = newBandwidth;/*  w w w . j a va 2s . c  o  m*/
    ConcurrentMap<UUID, String> ct = new ConcurrentHashMap<UUID, String>();

    for (Entry<UUID, String> en : currentTransfers.entrySet()) {
        trigger(new MemoryCheckOperation(), cpu);
        Process p = pt.get(en.getValue());
        if (p == null)
            continue;
        p.setRemainingBlockSize(
                p.getRemainingBlockSize() - (long) (now - p.getSnapshot()) * p.getCurrentBandwidth() / 1000);
        if (p.getRemainingBlockSize() < 0)
            p.setRemainingBlockSize(0);
        p.setCurrentBandwidth(newBandwidth);
        p.setTimeout(p.getRemainingBlockSize() / p.getCurrentBandwidth());
        p.setSnapshot(now);
        pt.put(p.getPid(), p);
        ScheduleTimeout st = new ScheduleTimeout(1000 * p.getRemainingBlockSize() / p.getCurrentBandwidth());
        TransferringFinished tt = new TransferringFinished(st);
        tt.setPid(p.getPid());
        st.setTimeoutEvent(tt);
        ct.put(st.getTimeoutEvent().getTimeoutId(), p.getPid());
        trigger(st, timer);
    }

    currentTransfers.clear();
    currentTransfers.putAll(ct);
}

From source file:com.alibaba.dubbo.governance.sync.RegistryServerSync.java

public void notify(List<URL> urls) {
    if (urls == null || urls.isEmpty()) {
        return;//from  ww w  .ja  v  a 2s .c  o  m
    }
    // Map<category, Map<servicename, Map<Long, URL>>>
    final Map<String, Map<String, Map<Long, URL>>> categories = new HashMap<String, Map<String, Map<Long, URL>>>();
    for (URL url : urls) {
        String category = url.getParameter(Constants.CATEGORY_KEY, Constants.PROVIDERS_CATEGORY);
        if (Constants.EMPTY_PROTOCOL.equalsIgnoreCase(url.getProtocol())) { // ?empty??groupversion*
            ConcurrentMap<String, Map<Long, URL>> services = registryCache.get(category);
            if (services != null) {
                String group = url.getParameter(Constants.GROUP_KEY);
                String version = url.getParameter(Constants.VERSION_KEY);
                // ?empty??groupversion*
                if (!Constants.ANY_VALUE.equals(group) && !Constants.ANY_VALUE.equals(version)) {
                    services.remove(url.getServiceKey());
                } else {
                    for (Map.Entry<String, Map<Long, URL>> serviceEntry : services.entrySet()) {
                        String service = serviceEntry.getKey();
                        if (Tool.getInterface(service).equals(url.getServiceInterface())
                                && (Constants.ANY_VALUE.equals(group)
                                        || StringUtils.isEquals(group, Tool.getGroup(service)))
                                && (Constants.ANY_VALUE.equals(version)
                                        || StringUtils.isEquals(version, Tool.getVersion(service)))) {
                            services.remove(service);
                        }
                    }
                }
            }
        } else {
            Map<String, Map<Long, URL>> services = categories.get(category);
            if (services == null) {
                services = new HashMap<String, Map<Long, URL>>();
                categories.put(category, services);
            }
            String service = url.getServiceKey();
            Map<Long, URL> ids = services.get(service);
            if (ids == null) {
                ids = new HashMap<Long, URL>();
                services.put(service, ids);
            }
            ids.put(ID.incrementAndGet(), url);
        }
    }
    for (Map.Entry<String, Map<String, Map<Long, URL>>> categoryEntry : categories.entrySet()) {
        String category = categoryEntry.getKey();
        ConcurrentMap<String, Map<Long, URL>> services = registryCache.get(category);
        if (services == null) {
            services = new ConcurrentHashMap<String, Map<Long, URL>>();
            registryCache.put(category, services);
        }
        services.putAll(categoryEntry.getValue());
    }
}

From source file:com.fhzz.dubbo.sync.RegistryServerSync.java

@PostConstruct
public void start() {
    registry.subscribe(SUBSCRIBE, new NotifyListener() {
        @Override/*from   w ww.  ja va2s .  c o m*/
        //  ???override?subcribe?route?Provider????
        public void notify(List<URL> urls) {
            if (urls == null || urls.isEmpty()) {
                return;
            }
            // Map<category, Map<servicename, Map<Long, URL>>>
            final Map<String, Map<String, Map<Long, URL>>> categories = new HashMap<String, Map<String, Map<Long, URL>>>();
            for (URL url : urls) {
                String category = url.getParameter(Constants.CATEGORY_KEY, Constants.PROVIDERS_CATEGORY);
                if (Constants.EMPTY_PROTOCOL.equalsIgnoreCase(url.getProtocol())) { // ?empty??groupversion*
                    ConcurrentMap<String, Map<Long, URL>> services = registryCache.get(category);
                    if (services != null) {
                        String group = url.getParameter(Constants.GROUP_KEY);
                        String version = url.getParameter(Constants.VERSION_KEY);
                        // ?empty??groupversion*
                        if (!Constants.ANY_VALUE.equals(group) && !Constants.ANY_VALUE.equals(version)) {
                            services.remove(url.getServiceKey());
                        } else {
                            for (Map.Entry<String, Map<Long, URL>> serviceEntry : services.entrySet()) {
                                String service = serviceEntry.getKey();
                                if (Tool.getInterface(service).equals(url.getServiceInterface())
                                        && (Constants.ANY_VALUE.equals(group)
                                                || StringUtils.isEquals(group, Tool.getGroup(service)))
                                        && (Constants.ANY_VALUE.equals(version)
                                                || StringUtils.isEquals(version, Tool.getVersion(service)))) {
                                    services.remove(service);
                                }
                            }
                        }
                    }
                } else {
                    Map<String, Map<Long, URL>> services = categories.get(category);
                    if (services == null) {
                        services = new HashMap<String, Map<Long, URL>>();
                        categories.put(category, services);
                    }
                    String service = url.getServiceKey();
                    Map<Long, URL> ids = services.get(service);
                    if (ids == null) {
                        ids = new HashMap<Long, URL>();
                        services.put(service, ids);
                    }
                    ids.put(ID.incrementAndGet(), url);
                }
            }
            for (Map.Entry<String, Map<String, Map<Long, URL>>> categoryEntry : categories.entrySet()) {
                String category = categoryEntry.getKey();
                ConcurrentMap<String, Map<Long, URL>> services = registryCache.get(category);
                if (services == null) {
                    services = new ConcurrentHashMap<String, Map<Long, URL>>();
                    registryCache.put(category, services);
                }
                services.putAll(categoryEntry.getValue());
            }
        }
    });
}

From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.TestLogAggregationService.java

@SuppressWarnings("resource")
@Test(timeout = 50000)//from  w  ww.j a  va  2 s . c o  m
public void testLogAggregationServiceWithPatternsAndIntervals() throws Exception {
    LogAggregationContext logAggregationContext = Records.newRecord(LogAggregationContext.class);
    // set IncludePattern and RolledLogsIncludePattern.
    // When the app is running, we only aggregate the log with
    // the name stdout. After the app finishes, we only aggregate
    // the log with the name std_final.
    logAggregationContext.setRolledLogsIncludePattern("stdout");
    logAggregationContext.setIncludePattern("std_final");
    this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
    //configure YarnConfiguration.NM_REMOTE_APP_LOG_DIR to
    //have fully qualified path
    this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.toURI().toString());
    this.conf.setLong(YarnConfiguration.NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS, 3600);

    this.conf.setLong(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 3600);

    ApplicationId application = BuilderUtils.newApplicationId(System.currentTimeMillis(), 1);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(application, 1);
    ContainerId container = createContainer(appAttemptId, 1, ContainerType.APPLICATION_MASTER);

    ConcurrentMap<ApplicationId, Application> maps = this.context.getApplications();
    Application app = mock(Application.class);
    maps.put(application, app);
    when(app.getContainers()).thenReturn(this.context.getContainers());

    LogAggregationService logAggregationService = new LogAggregationService(dispatcher, context, this.delSrvc,
            super.dirsHandler);

    logAggregationService.init(this.conf);
    logAggregationService.start();

    // AppLogDir should be created
    File userFold = new File(localLogDir, userFolder);
    File appLogDir = new File(userFold, ConverterUtils.toString(application));
    appLogDir.mkdir();
    logAggregationService.handle(new LogHandlerAppStartedEvent(application, this.user, null, this.acls,
            logAggregationContext, this.userFolder));

    // Simulate log-file creation
    // create std_final in log directory which will not be aggregated
    // until the app finishes.
    String[] logFilesWithFinalLog = new String[] { "stdout", "std_final" };
    writeContainerLogs(appLogDir, container, logFilesWithFinalLog);

    // Do log aggregation
    AppLogAggregatorImpl aggregator = (AppLogAggregatorImpl) logAggregationService.getAppLogAggregators()
            .get(application);

    aggregator.doLogAggregationOutOfBand();

    Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 1, false, null));

    String[] logFiles = new String[] { "stdout" };
    verifyContainerLogs(logAggregationService, application, new ContainerId[] { container }, logFiles, 1, true);

    logAggregationService.handle(new LogHandlerContainerFinishedEvent(container, 0));

    dispatcher.await();

    // Do the log aggregation after ContainerFinishedEvent but before
    // AppFinishedEvent. The std_final is expected to be aggregated this time
    // even if the app is running but the container finishes.
    aggregator.doLogAggregationOutOfBand();

    Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 2, false, null));

    // This container finishes.
    // The log "std_final" should be aggregated this time.
    String[] logFinalLog = new String[] { "std_final" };
    verifyContainerLogs(logAggregationService, application, new ContainerId[] { container }, logFinalLog, 1,
            true);

    logAggregationService.handle(new LogHandlerAppFinishedEvent(application));

    logAggregationService.stop();
}

From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.TestLogAggregationService.java

@SuppressWarnings("unchecked")
private void testLogAggregationService(boolean retentionSizeLimitation) throws Exception {
    LogAggregationContext logAggregationContextWithInterval = Records.newRecord(LogAggregationContext.class);
    // set IncludePattern/excludePattern in rolling fashion
    // we expect all the logs except std_final will be uploaded
    // when app is running. The std_final will be uploaded when
    // the app finishes.
    logAggregationContextWithInterval.setRolledLogsIncludePattern(".*");
    logAggregationContextWithInterval.setRolledLogsExcludePattern("std_final");
    this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
    //configure YarnConfiguration.NM_REMOTE_APP_LOG_DIR to
    //have fully qualified path
    this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.toURI().toString());
    this.conf.setLong(YarnConfiguration.NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS, 3600);
    if (retentionSizeLimitation) {
        // set the retention size as 1. The number of logs for one application
        // in one NM should be 1.
        this.conf.setInt(YarnConfiguration.NM_PREFIX + "log-aggregation.num-log-files-per-app", 1);
    }//from w  ww. j a  v a  2 s .  co m

    // by setting this configuration, the log files will not be deleted immediately after
    // they are aggregated to remote directory.
    // We could use it to test whether the previous aggregated log files will be aggregated
    // again in next cycle.
    this.conf.setLong(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 3600);

    ApplicationId application = BuilderUtils.newApplicationId(System.currentTimeMillis(), 1);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(application, 1);
    ContainerId container = createContainer(appAttemptId, 1, ContainerType.APPLICATION_MASTER);

    ConcurrentMap<ApplicationId, Application> maps = this.context.getApplications();
    Application app = mock(Application.class);
    maps.put(application, app);
    when(app.getContainers()).thenReturn(this.context.getContainers());

    LogAggregationService logAggregationService = new LogAggregationService(dispatcher, context, this.delSrvc,
            super.dirsHandler);

    logAggregationService.init(this.conf);
    logAggregationService.start();

    // AppLogDir should be created
    File userFold = new File(localLogDir, userFolder);
    File appLogDir = new File(userFold, application.toString());
    appLogDir.mkdir();
    logAggregationService.handle(new LogHandlerAppStartedEvent(application, this.user, null, this.acls,
            logAggregationContextWithInterval, this.userFolder));

    LogFileStatusInLastCycle logFileStatusInLastCycle = null;
    // Simulate log-file creation
    // create std_final in log directory which will not be aggregated
    // until the app finishes.
    String[] logFiles1WithFinalLog = new String[] { "stdout", "stderr", "syslog", "std_final" };
    String[] logFiles1 = new String[] { "stdout", "stderr", "syslog" };
    writeContainerLogs(appLogDir, container, logFiles1WithFinalLog);

    // Do log aggregation
    AppLogAggregatorImpl aggregator = (AppLogAggregatorImpl) logAggregationService.getAppLogAggregators()
            .get(application);
    aggregator.doLogAggregationOutOfBand();

    if (retentionSizeLimitation) {
        Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 1, true, null));
    } else {
        Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 1, false, null));
    }
    // Container logs should be uploaded
    logFileStatusInLastCycle = verifyContainerLogs(logAggregationService, application,
            new ContainerId[] { container }, logFiles1, 3, true);
    for (String logFile : logFiles1) {
        Assert.assertTrue(logFileStatusInLastCycle.getLogFileTypesInLastCycle().contains(logFile));
    }
    // Make sure the std_final is not uploaded.
    Assert.assertFalse(logFileStatusInLastCycle.getLogFileTypesInLastCycle().contains("std_final"));

    Thread.sleep(2000);

    // There is no log generated at this time. Do the log aggregation again.
    aggregator.doLogAggregationOutOfBand();

    // Same logs will not be aggregated again.
    // Only one aggregated log file in Remote file directory.
    Assert.assertTrue("Only one aggregated log file in Remote file directory expected",
            waitAndCheckLogNum(logAggregationService, application, 50, 1, true, null));

    Thread.sleep(2000);

    // Do log aggregation
    String[] logFiles2 = new String[] { "stdout_1", "stderr_1", "syslog_1" };
    writeContainerLogs(appLogDir, container, logFiles2);

    aggregator.doLogAggregationOutOfBand();

    if (retentionSizeLimitation) {
        Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 1, true,
                logFileStatusInLastCycle.getLogFilePathInLastCycle()));
    } else {
        Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 2, false, null));
    }
    // Container logs should be uploaded
    logFileStatusInLastCycle = verifyContainerLogs(logAggregationService, application,
            new ContainerId[] { container }, logFiles2, 3, true);

    for (String logFile : logFiles2) {
        Assert.assertTrue(logFileStatusInLastCycle.getLogFileTypesInLastCycle().contains(logFile));
    }
    // Make sure the std_final is not uploaded.
    Assert.assertFalse(logFileStatusInLastCycle.getLogFileTypesInLastCycle().contains("std_final"));

    Thread.sleep(2000);

    // create another logs
    String[] logFiles3 = new String[] { "stdout_2", "stderr_2", "syslog_2" };
    writeContainerLogs(appLogDir, container, logFiles3);

    logAggregationService.handle(new LogHandlerContainerFinishedEvent(container, 0));

    dispatcher.await();
    logAggregationService.handle(new LogHandlerAppFinishedEvent(application));
    if (retentionSizeLimitation) {
        Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 1, true,
                logFileStatusInLastCycle.getLogFilePathInLastCycle()));
    } else {
        Assert.assertTrue(waitAndCheckLogNum(logAggregationService, application, 50, 3, false, null));
    }

    // the app is finished. The log "std_final" should be aggregated this time.
    String[] logFiles3WithFinalLog = new String[] { "stdout_2", "stderr_2", "syslog_2", "std_final" };
    verifyContainerLogs(logAggregationService, application, new ContainerId[] { container },
            logFiles3WithFinalLog, 4, true);
    logAggregationService.stop();
    assertEquals(0, logAggregationService.getNumAggregators());
}

From source file:com.networknt.light.rule.transform.AbstractTransformRule.java

public List<Map<String, Object>> getTransformRequest(String ruleClass) {
    String sql = "SELECT FROM TransformRequest WHERE ruleClass = '" + ruleClass + "' ORDER BY sequence";
    List<Map<String, Object>> transforms = null;

    Map<String, Object> ruleMap = ServiceLocator.getInstance().getMemoryImage("ruleMap");
    ConcurrentMap<Object, Object> cache = (ConcurrentMap<Object, Object>) ruleMap.get("cache");
    if (cache == null) {
        cache = new ConcurrentLinkedHashMap.Builder<Object, Object>().maximumWeightedCapacity(1000).build();
        ruleMap.put("cache", cache);
    } else {//from w  ww .  ja  v  a 2 s  .  c  om
        Map<String, Object> rule = (Map<String, Object>) cache.get(ruleClass);
        if (rule != null) {
            transforms = (List<Map<String, Object>>) rule.get("transformRequest");
        }
    }
    if (transforms == null) {
        OrientGraph graph = ServiceLocator.getInstance().getGraph();
        try {
            OSQLSynchQuery<ODocument> query = new OSQLSynchQuery<>(sql);
            List<ODocument> docs = graph.getRawGraph().command(query).execute();
            transforms = new ArrayList<Map<String, Object>>();
            if (docs != null) {
                for (ODocument doc : docs) {
                    Map<String, Object> map = new HashMap<String, Object>();
                    map.put("sequence", doc.field("sequence"));
                    map.put("transformRule", doc.field("transformRule"));
                    map.put("transformData", doc.field("transformData"));
                    map.put("createUserId", doc.field("createUserId"));
                    transforms.add(map);
                }
            }
            // put an empty list into the cache if no transform rules available. This can avoid access db every time the cache is hit.
            Map<String, Object> rule = (Map<String, Object>) cache.get(ruleClass);
            if (rule != null) {
                rule.put("transformRequest", transforms);
            } else {
                rule = new HashMap<String, Object>();
                rule.put("transformRequest", transforms);
                cache.put(ruleClass, rule);
            }
        } catch (Exception e) {
            logger.error("Exception:", e);
            throw e;
        } finally {
            graph.shutdown();
        }
    }
    return transforms;
}

From source file:com.networknt.light.rule.transform.AbstractTransformRule.java

public List<Map<String, Object>> getTransformResponse(String ruleClass) {
    String sql = "SELECT FROM TransformResponse WHERE ruleClass = '" + ruleClass + "' ORDER BY sequence";
    List<Map<String, Object>> transforms = null;

    Map<String, Object> ruleMap = ServiceLocator.getInstance().getMemoryImage("ruleMap");
    ConcurrentMap<Object, Object> cache = (ConcurrentMap<Object, Object>) ruleMap.get("cache");
    if (cache == null) {
        cache = new ConcurrentLinkedHashMap.Builder<Object, Object>().maximumWeightedCapacity(1000).build();
        ruleMap.put("cache", cache);
    } else {//from ww w  .  j a v  a 2s .  co  m
        Map<String, Object> rule = (Map<String, Object>) cache.get(ruleClass);
        if (rule != null) {
            transforms = (List<Map<String, Object>>) rule.get("transformResponse");
        }
    }
    if (transforms == null) {
        OrientGraph graph = ServiceLocator.getInstance().getGraph();
        try {
            OSQLSynchQuery<ODocument> query = new OSQLSynchQuery<>(sql);
            List<ODocument> docs = graph.getRawGraph().command(query).execute();
            transforms = new ArrayList<Map<String, Object>>();
            if (docs != null) {
                for (ODocument doc : docs) {
                    Map<String, Object> map = new HashMap<String, Object>();
                    map.put("sequence", doc.field("sequence"));
                    map.put("transformRule", doc.field("transformRule"));
                    map.put("transformData", doc.field("transformData"));
                    map.put("createUserId", doc.field("createUserId"));
                    transforms.add(map);
                }
            }
            // put an empty list into the cache if no transform rules available. This can avoid access db every time the cache is hit.
            Map<String, Object> rule = (Map<String, Object>) cache.get(ruleClass);
            if (rule != null) {
                rule.put("transformResponse", transforms);
            } else {
                rule = new HashMap<String, Object>();
                rule.put("transformResponse", transforms);
                cache.put(ruleClass, rule);
            }
        } catch (Exception e) {
            logger.error("Exception:", e);
            throw e;
        } finally {
            graph.shutdown();
        }
    }
    return transforms;
}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

/**
 * Spill message buffers of a particular type of message (current or incoming
 * buffer) for a partition to disk./*from w w  w.  j a  v  a  2s. c  o  m*/
 *
 * @param partitionId Id of the partition to spill the messages for
 * @param pendingMessages The map to get the message buffers from
 * @param superstep Superstep of which we want to offload messages. This is
 *                  equal to current superstep number if we want to offload
 *                  buffers for currentMessageStore, and is equal to next
 *                  superstep number if we want to offload buffer for
 *                  incomingMessageStore
 * @throws IOException
 */
private void spillMessages(Integer partitionId,
        ConcurrentMap<Integer, Pair<Integer, List<VertexIdMessages<I, Writable>>>> pendingMessages,
        long superstep) throws IOException {
    Pair<Integer, List<VertexIdMessages<I, Writable>>> entry;
    messageBufferRWLock.writeLock().lock();
    entry = pendingMessages.remove(partitionId);
    if (entry != null && entry.getLeft() < minBuffSize) {
        pendingMessages.put(partitionId, entry);
        entry = null;
    }
    messageBufferRWLock.writeLock().unlock();

    if (entry == null) {
        return;
    }

    // Sanity check
    checkState(!entry.getRight().isEmpty(),
            "spillMessages: the message buffer that is supposed to be flushed to " + "disk does not exist.");

    File file = new File(getPendingMessagesBufferPath(partitionId, superstep));

    FileOutputStream fos = new FileOutputStream(file, true);
    BufferedOutputStream bos = new BufferedOutputStream(fos);
    DataOutputStream dos = new DataOutputStream(bos);
    for (VertexIdMessages<I, Writable> messages : entry.getRight()) {
        SerializedMessageClass messageClass;
        if (messages instanceof ByteArrayVertexIdMessages) {
            messageClass = SerializedMessageClass.BYTE_ARRAY_VERTEX_ID_MESSAGES;
        } else if (messages instanceof ByteArrayOneMessageToManyIds) {
            messageClass = SerializedMessageClass.BYTE_ARRAY_ONE_MESSAGE_TO_MANY_IDS;
        } else {
            throw new IllegalStateException("spillMessages: serialized message " + "type is not supported");
        }
        dos.writeInt(messageClass.ordinal());
        messages.write(dos);
    }
    dos.close();
}