List of usage examples for java.util.concurrent ConcurrentMap putIfAbsent
V putIfAbsent(K key, V value);
From source file:com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable.java
public Map<String, Long> registerMetrics(String topologyId, Set<String> metricNames) { TimeTicker ticker = new TimeTicker(TimeUnit.MILLISECONDS, true); ConcurrentMap<String, Long> memMeta = topologyMetricContexts.get(topologyId).getMemMeta(); Map<String, Long> ret = new HashMap<>(); for (String metricName : metricNames) { Long id = memMeta.get(metricName); if (id != null && MetricUtils.isValidId(id)) { ret.put(metricName, id);//from w w w .j a va 2 s . c om } else { id = metricIDGenerator.genMetricId(metricName); Long old = memMeta.putIfAbsent(metricName, id); if (old == null) { ret.put(metricName, id); } else { ret.put(metricName, old); } } } long cost = ticker.stop(); LOG.info("register metrics, topology:{}, size:{}, cost:{}", topologyId, metricNames.size(), cost); return ret; }
From source file:alluxio.job.move.MoveDefinition.java
/** * {@inheritDoc}/*w ww.j ava 2s . c om*/ * * Assigns each worker to move whichever files it has the most blocks for. If no worker has blocks * for a file, a random worker is chosen. */ @Override public Map<WorkerInfo, ArrayList<MoveCommand>> selectExecutors(MoveConfig config, List<WorkerInfo> jobWorkerInfoList, JobMasterContext jobMasterContext) throws Exception { AlluxioURI source = new AlluxioURI(config.getSource()); AlluxioURI destination = new AlluxioURI(config.getDestination()); if (source.equals(destination)) { return new HashMap<WorkerInfo, ArrayList<MoveCommand>>(); } checkMoveValid(config); List<BlockWorkerInfo> alluxioWorkerInfoList = AlluxioBlockStore.create(mFileSystemContext).getAllWorkers(); Preconditions.checkState(!jobWorkerInfoList.isEmpty(), "No workers are available"); List<URIStatus> allPathStatuses = getPathStatuses(source); ConcurrentMap<WorkerInfo, ArrayList<MoveCommand>> assignments = Maps.newConcurrentMap(); ConcurrentMap<String, WorkerInfo> hostnameToWorker = Maps.newConcurrentMap(); for (WorkerInfo workerInfo : jobWorkerInfoList) { hostnameToWorker.put(workerInfo.getAddress().getHost(), workerInfo); } List<String> keys = new ArrayList<>(); keys.addAll(hostnameToWorker.keySet()); // Assign each file to the worker with the most block locality. for (URIStatus status : allPathStatuses) { if (status.isFolder()) { moveDirectory(status.getPath(), source.getPath(), destination.getPath()); } else { WorkerInfo bestJobWorker = getBestJobWorker(status, alluxioWorkerInfoList, jobWorkerInfoList, hostnameToWorker); String destinationPath = computeTargetPath(status.getPath(), source.getPath(), destination.getPath()); assignments.putIfAbsent(bestJobWorker, Lists.<MoveCommand>newArrayList()); assignments.get(bestJobWorker).add(new MoveCommand(status.getPath(), destinationPath)); } } return assignments; }
From source file:org.apache.hadoop.yarn.util.TestFSDownload.java
@Test(timeout = 60000) public void testDownloadPublicWithStatCache() throws IOException, URISyntaxException, InterruptedException, ExecutionException { final Configuration conf = new Configuration(); FileContext files = FileContext.getLocalFSFileContext(conf); Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName())); // if test directory doesn't have ancestor permission, skip this test FileSystem f = basedir.getFileSystem(conf); assumeTrue(FSDownload.ancestorsHaveExecutePermissions(f, basedir, null)); files.mkdir(basedir, null, true);/* w w w. j a v a2 s . co m*/ conf.setStrings(TestFSDownload.class.getName(), basedir.toString()); int size = 512; final ConcurrentMap<Path, AtomicInteger> counts = new ConcurrentHashMap<Path, AtomicInteger>(); final CacheLoader<Path, Future<FileStatus>> loader = FSDownload.createStatusCacheLoader(conf); final LoadingCache<Path, Future<FileStatus>> statCache = CacheBuilder.newBuilder() .build(new CacheLoader<Path, Future<FileStatus>>() { public Future<FileStatus> load(Path path) throws Exception { // increment the count AtomicInteger count = counts.get(path); if (count == null) { count = new AtomicInteger(0); AtomicInteger existing = counts.putIfAbsent(path, count); if (existing != null) { count = existing; } } count.incrementAndGet(); // use the default loader return loader.load(path); } }); // test FSDownload.isPublic() concurrently final int fileCount = 3; List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>(); for (int i = 0; i < fileCount; i++) { Random rand = new Random(); long sharedSeed = rand.nextLong(); rand.setSeed(sharedSeed); System.out.println("SEED: " + sharedSeed); final Path path = new Path(basedir, "test-file-" + i); createFile(files, path, size, rand); final FileSystem fs = path.getFileSystem(conf); final FileStatus sStat = fs.getFileStatus(path); tasks.add(new Callable<Boolean>() { public Boolean call() throws IOException { return FSDownload.isPublic(fs, path, sStat, statCache); } }); } ExecutorService exec = Executors.newFixedThreadPool(fileCount); try { List<Future<Boolean>> futures = exec.invokeAll(tasks); // files should be public for (Future<Boolean> future : futures) { assertTrue(future.get()); } // for each path exactly one file status call should be made for (AtomicInteger count : counts.values()) { assertSame(count.get(), 1); } } finally { exec.shutdown(); } }
From source file:com.alibaba.jstorm.daemon.nimbus.TopologyMetricsRunnable.java
private void syncMetaFromRemote(String topologyId, TopologyMetricContext context) { try {/*from ww w. jav a 2 s.co m*/ int memSize = context.getMemMeta().size(); Integer zkSize = (Integer) stormClusterState.get_topology_metric(topologyId); if (zkSize != null && memSize != zkSize.intValue()) { ConcurrentMap<String, Long> memMeta = context.getMemMeta(); for (MetaType metaType : MetaType.values()) { List<MetricMeta> metaList = metricQueryClient.getMetricMeta(clusterName, topologyId, metaType); if (metaList != null) { LOG.info("get remote metric meta, topology:{}, metaType:{}, mem:{}, zk:{}, new size:{}", topologyId, metaType, memSize, zkSize, metaList.size()); for (MetricMeta meta : metaList) { memMeta.putIfAbsent(meta.getFQN(), meta.getId()); } } } metricCache.putMeta(topologyId, memMeta); } } catch (Exception ex) { LOG.error("failed to sync remote meta", ex); } }
From source file:hr.diskobolos.persistence.impl.EvaluationAnswerPersistenceImpl.java
@Override public ConcurrentMap<TermsOfConditionStatus, AtomicLong> fetchTermsOfCompetitionStatistic() { CriteriaBuilder cb = entityManager.getCriteriaBuilder(); CriteriaQuery<EvaluationAnswer> cq = cb.createQuery(EvaluationAnswer.class); Root<EvaluationAnswer> evaluationAnswer = cq.from(EvaluationAnswer.class); Join<EvaluationAnswer, QuestionChoicesDef> choiceDef = evaluationAnswer.join(EvaluationAnswer_.answer); Join<QuestionChoicesDef, EvaluationQuestionDef> questionDef = choiceDef .join(QuestionChoicesDef_.evaluationQuestionDef); ParameterExpression<QuestionnaireType> questionnaireType = cb.parameter(QuestionnaireType.class, "questionnaireType"); cq.select(evaluationAnswer);//from w w w. j av a 2 s. c om cq.where(cb.equal(questionDef.get(EvaluationQuestionDef_.questionnaireType), questionnaireType)); TypedQuery<EvaluationAnswer> query = entityManager.createQuery(cq); query.setParameter("questionnaireType", QuestionnaireType.TERMS_OF_CONDITION); List<EvaluationAnswer> evaluationAnswers = query.getResultList(); ConcurrentMap<TermsOfConditionStatus, AtomicLong> distributionByTermsOfCompetitionStatus = new ConcurrentHashMap<>(); List<EvaluationQuestionnaireDefEnum> questionnaireDef = Arrays .asList(EvaluationQuestionnaireDefEnum.values()); long numberOfQuestion = questionnaireDef.stream() .filter(q -> q.getQuestionnaireType().equals(QuestionnaireType.TERMS_OF_CONDITION)) .collect(Collectors.counting()); List<MemberRegister> memberRegisters = evaluationAnswers.stream() .filter(StreamUtil.distinctByKey((EvaluationAnswer e) -> e.getMemberRegister().getId())) .map(EvaluationAnswer::getMemberRegister).collect(Collectors.toList()); memberRegisters.stream().forEach((memberRegister) -> { TermsOfConditionStatus termsOfConditionStatus = TermsOfConditionStatus.NONE; if (evaluationAnswers.stream().filter(m -> m.getMemberRegister().equals(memberRegister)) .count() == numberOfQuestion) { boolean isValid = evaluationAnswers.stream() .filter(m -> m.getMemberRegister().equals(memberRegister)) .allMatch(e -> e.getAnswer().getLabel() .equals(messageSource.getMessage("QuestionChoicesDef.yes", null, Locale.ENGLISH))); termsOfConditionStatus = isValid ? TermsOfConditionStatus.VALID : TermsOfConditionStatus.INVALID; } distributionByTermsOfCompetitionStatus.putIfAbsent(termsOfConditionStatus, new AtomicLong(0)); distributionByTermsOfCompetitionStatus.get(termsOfConditionStatus).incrementAndGet(); }); return distributionByTermsOfCompetitionStatus; }
From source file:org.apereo.portal.portlets.search.SearchPortletController.java
/** * Performs a search of the explicitly configured {@link IPortalSearchService}s. This * is done as an event handler so that it can run concurrently with the other portlets * handling the search request/*from ww w . ja v a2 s . c o m*/ */ @SuppressWarnings("unchecked") @EventMapping(SearchConstants.SEARCH_REQUEST_QNAME_STRING) public void handleSearchRequest(EventRequest request, EventResponse response) { // UP-3887 Design flaw. Both the searchLauncher portlet instance and the search portlet instance receive // searchRequest and searchResult events because they are in the same portlet code base (to share // autosuggest_handler.jsp and because we have to calculate the search portlet url for the ajax call) // and share the portlet.xml which defines the event handling behavior. // If this instance is the searchLauncher, ignore the searchResult. The search was submitted to the search // portlet instance. final String searchLaunchFname = request.getPreferences().getValue(SEARCH_LAUNCH_FNAME, null); if (searchLaunchFname != null) { // Noisy in debug mode so commented out log statement // logger.debug("SearchLauncher does not participate in SearchRequest events so discarding message"); return; } final Event event = request.getEvent(); final SearchRequest searchQuery = (SearchRequest) event.getValue(); //Map used to track searches that have been handled, used so that one search doesn't get duplicate results ConcurrentMap<String, Boolean> searchHandledCache; final PortletSession session = request.getPortletSession(); synchronized (org.springframework.web.portlet.util.PortletUtils.getSessionMutex(session)) { searchHandledCache = (ConcurrentMap<String, Boolean>) session.getAttribute(SEARCH_HANDLED_CACHE_NAME, PortletSession.APPLICATION_SCOPE); if (searchHandledCache == null) { searchHandledCache = CacheBuilder.newBuilder().maximumSize(20) .expireAfterAccess(5, TimeUnit.MINUTES).<String, Boolean>build().asMap(); session.setAttribute(SEARCH_HANDLED_CACHE_NAME, searchHandledCache, PortletSession.APPLICATION_SCOPE); } } final String queryId = searchQuery.getQueryId(); if (searchHandledCache.putIfAbsent(queryId, Boolean.TRUE) != null) { //Already handled this search request return; } //Create the results final SearchResults results = new SearchResults(); results.setQueryId(queryId); results.setWindowId(request.getWindowID()); final List<SearchResult> searchResultList = results.getSearchResult(); //Run the search for each service appending the results for (IPortalSearchService searchService : searchServices) { try { logger.debug("For queryId {}, query '{}', searching search service {}", queryId, searchQuery.getSearchTerms(), searchService.getClass().toString()); final SearchResults serviceResults = searchService.getSearchResults(request, searchQuery); logger.debug("For queryId {}, obtained {} results from search service {}", queryId, serviceResults.getSearchResult().size(), searchService.getClass().toString()); searchResultList.addAll(serviceResults.getSearchResult()); } catch (Exception e) { logger.warn(searchService.getClass() + " threw an exception when searching, it will be ignored. " + searchQuery, e); } } //Respond with a results event if results were found if (!searchResultList.isEmpty()) { response.setEvent(SearchConstants.SEARCH_RESULTS_QNAME, results); } }
From source file:com.github.podd.example.ExamplePoddClient.java
private void populateGenotypeUriMap( final ConcurrentMap<String, ConcurrentMap<URI, InferredOWLOntologyID>> projectUriMap, final ConcurrentMap<URI, ConcurrentMap<URI, Model>> genotypeUriMap) throws PoddClientException { for (final String nextProjectName : projectUriMap.keySet()) { final ConcurrentMap<URI, InferredOWLOntologyID> nextProjectNameMapping = projectUriMap .get(nextProjectName);/*w w w . j a v a2 s . co m*/ for (final URI projectUri : nextProjectNameMapping.keySet()) { final InferredOWLOntologyID artifactId = nextProjectNameMapping.get(projectUri); final Model nextSparqlResults = this.doSPARQL( String.format(ExampleSpreadsheetConstants.TEMPLATE_SPARQL_BY_TYPE_ALL_PROPERTIES, RenderUtils.getSPARQLQueryString(PODD.PODD_SCIENCE_GENOTYPE)), Arrays.asList(artifactId)); if (nextSparqlResults.isEmpty()) { this.log.debug("Could not find any existing genotypes for project: {} {}", nextProjectName, projectUri); } for (final Resource nextGenotype : nextSparqlResults .filter(null, RDF.TYPE, PODD.PODD_SCIENCE_GENOTYPE).subjects()) { if (!(nextGenotype instanceof URI)) { this.log.error("Found genotype that was not assigned a URI: {} artifact={}", nextGenotype, artifactId); } else { ConcurrentMap<URI, Model> nextGenotypeMap = new ConcurrentHashMap<>(); final ConcurrentMap<URI, Model> putIfAbsent = genotypeUriMap.put(projectUri, nextGenotypeMap); if (putIfAbsent != null) { nextGenotypeMap = putIfAbsent; } final Model putIfAbsent2 = nextGenotypeMap.putIfAbsent((URI) nextGenotype, nextSparqlResults); if (putIfAbsent2 != null) { this.log.info( "Found existing description for genotype URI within the same project: {} {}", projectUri, nextGenotype); } } } } } }
From source file:org.amplafi.hivemind.factory.servicessetter.ServicesSetterImpl.java
/** * @see com.sworddance.core.ServicesSetter#wire(java.lang.Object, java.lang.Iterable) */// w w w . j av a2 s.c om @Override @SuppressWarnings("unchecked") public void wire(Object obj, Iterable<String> excludedProperties) { if (obj == null) { return; } List<String> props = getWriteableProperties(obj); Set<String> alwaysExcludedCollection = this.cachedAlwaysExcludedMap.get(obj.getClass()); if (alwaysExcludedCollection != null) { props.removeAll(alwaysExcludedCollection); if (getLog().isDebugEnabled()) { getLog().debug(obj.getClass() + ": autowiring. Class has already been filtered down to " + props.size() + "properties. props={" + join(props, ",") + "}"); } } else { if (getLog().isDebugEnabled()) { getLog().debug(obj.getClass() + ": autowiring for the first time. Class has at most " + props.size() + "properties. props={" + join(props, ",") + "}"); } alwaysExcludedCollection = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>()); this.cachedAlwaysExcludedMap.putIfAbsent(obj.getClass(), alwaysExcludedCollection); alwaysExcludedCollection = this.cachedAlwaysExcludedMap.get(obj.getClass()); } for (String exclude : excludedProperties) { props.remove(exclude); } int wiredCount = 0; for (String prop : props) { PropertyAdaptor type = getPropertyAdaptor(obj, prop); Class propertyType = type.getPropertyType(); if (!isWireableClass(propertyType)) { // if it is a standard java class then lets exclude it. alwaysExcludedCollection.add(prop); continue; } // if it is not readable then, then we can't verify that // we are not overwriting non-null property. if (!type.isReadable() || !type.isWritable()) { alwaysExcludedCollection.add(prop); continue; } // check to see if we have a service to offer before bothering // to checking if the property can be set. This avoids triggering // actions caused by calling the get/setters. Object srv = null; if (type.getPropertyType() == Log.class) { // log is special. srv = LogFactory.getLog(obj.getClass()); } else { ConcurrentMap<String, String> classServiceMap = this.serviceMap.get(obj.getClass()); if (classServiceMap == null) { this.serviceMap.putIfAbsent(obj.getClass(), new ConcurrentHashMap<String, String>()); classServiceMap = this.serviceMap.get(obj.getClass()); } String serviceName = classServiceMap.get(prop); if (serviceName == null) { InjectService service; try { service = findInjectService(obj, type); } catch (DontInjectException e) { // do nothing alwaysExcludedCollection.add(prop); continue; } if (service != null) { serviceName = service.value(); if (isNotBlank(serviceName)) { for (String attempt : new String[] { serviceName, serviceName + '.' + type.getPropertyName(), serviceName + '.' + StringUtils.capitalize(type.getPropertyName()) }) { srv = getService(attempt, propertyType); if (srv != null) { serviceName = attempt; break; } } } } if (srv != null) { classServiceMap.putIfAbsent(prop, serviceName); } else { // we looked but did not find... no need to look again. classServiceMap.putIfAbsent(prop, ""); } } else if (!serviceName.isEmpty()) { // we already found the service. srv = getService(serviceName, propertyType); } if (srv == null && !noServiceForType.containsKey(propertyType)) { try { srv = this.module.getService(propertyType); } catch (Exception e) { noServiceForType.put(propertyType, e); getLog().debug("Look up of class " + propertyType + " failed. The failure is caused if there is not exactly 1 service implementing the class. Further searches by this property class will be ignored."); } } } if (srv == null) { alwaysExcludedCollection.add(prop); } else if (type.read(obj) == null) { // Doing the read check last avoids // triggering problems caused by lazy initialization and read-only properties. if (type.getPropertyType().isAssignableFrom(srv.getClass())) { type.write(obj, srv); wiredCount++; } else { // this is probably an error so we do not just add to the exclude list. throw new ApplicationRuntimeException("Trying to set property " + obj.getClass() + "." + prop + " however, the property type=" + type.getPropertyType() + " is not a superclass or same class as " + srv.getClass() + ". srv=" + srv); } } } if (getLog().isDebugEnabled()) { getLog().debug(obj.getClass() + ": done autowiring. actual number of properties wired=" + wiredCount + " excluded properties=" + alwaysExcludedCollection); } }
From source file:com.github.podd.resources.RestletPoddClientImpl.java
@Override public Map<InferredOWLOntologyID, InferredOWLOntologyID> appendArtifacts( final Map<InferredOWLOntologyID, Model> uploadQueue) throws PoddException { final ConcurrentMap<InferredOWLOntologyID, InferredOWLOntologyID> resultMap = new ConcurrentHashMap<>(); for (final Entry<InferredOWLOntologyID, Model> nextUpload : uploadQueue.entrySet()) { try {/*from w w w. j a v a2 s.c o m*/ final StringWriter writer = new StringWriter(4096); Rio.write(nextUpload.getValue(), writer, RDFFormat.RDFJSON); final InferredOWLOntologyID newID = this.appendArtifact(nextUpload.getKey(), new ByteArrayInputStream(writer.toString().getBytes(Charset.forName("UTF-8"))), RDFFormat.RDFJSON); if (newID == null) { this.log.error("Did not find a valid result from append artifact: {}", nextUpload.getKey()); } else if (nextUpload.getKey().equals(newID)) { this.log.error("Result from append artifact was not changed, as expected. {} {}", nextUpload.getKey(), newID); } else { resultMap.putIfAbsent(nextUpload.getKey(), newID); } } catch (final RDFHandlerException e) { this.log.error("Found exception generating upload body: ", e); } } return resultMap; }
From source file:com.github.podd.example.ExamplePoddClient.java
public ConcurrentMap<InferredOWLOntologyID, InferredOWLOntologyID> uploadArtifacts( final ConcurrentMap<InferredOWLOntologyID, Model> uploadQueue) throws PoddClientException { final ConcurrentMap<InferredOWLOntologyID, InferredOWLOntologyID> resultMap = new ConcurrentHashMap<>(); for (final InferredOWLOntologyID nextUpload : uploadQueue.keySet()) { try {/*from ww w. j a va 2 s . c om*/ final StringWriter writer = new StringWriter(4096); Rio.write(uploadQueue.get(nextUpload), writer, RDFFormat.RDFJSON); final InferredOWLOntologyID newID = this.appendArtifact(nextUpload, new ByteArrayInputStream(writer.toString().getBytes(Charset.forName("UTF-8"))), RDFFormat.RDFJSON); if (newID == null) { this.log.error("Did not find a valid result from append artifact: {}", nextUpload); } else if (nextUpload.equals(newID)) { this.log.error("Result from append artifact was not changed, as expected. {} {}", nextUpload, newID); } else { resultMap.putIfAbsent(nextUpload, newID); } } catch (final RDFHandlerException e) { this.log.error("Found exception generating upload body: ", e); } } return resultMap; }