List of usage examples for java.util.concurrent ConcurrentMap isEmpty
boolean isEmpty();
From source file:org.jasig.portal.io.xml.JaxbPortalDataHandlerService.java
@Override public void importData(File directory, String pattern, final BatchImportOptions options) { if (!directory.exists()) { throw new IllegalArgumentException("The specified directory '" + directory + "' does not exist"); }//from w w w . j a v a 2 s. com //Create the file filter to use when searching for files to import final FileFilter fileFilter; if (pattern != null) { fileFilter = new AntPatternFileFilter(true, false, pattern, this.dataFileExcludes); } else { fileFilter = new AntPatternFileFilter(true, false, this.dataFileIncludes, this.dataFileExcludes); } //Determine the parent directory to log to final File logDirectory = determineLogDirectory(options, "import"); //Setup reporting file final File importReport = new File(logDirectory, "data-import.txt"); final PrintWriter reportWriter; try { reportWriter = new PrintWriter(new PeriodicFlushingBufferedWriter(500, new FileWriter(importReport))); } catch (IOException e) { throw new RuntimeException("Failed to create FileWriter for: " + importReport, e); } //Convert directory to URI String to provide better logging output final URI directoryUri = directory.toURI(); final String directoryUriStr = directoryUri.toString(); IMPORT_BASE_DIR.set(directoryUriStr); try { //Scan the specified directory for files to import logger.info("Scanning for files to Import from: {}", directory); final PortalDataKeyFileProcessor fileProcessor = new PortalDataKeyFileProcessor(this.dataKeyTypes, options); this.directoryScanner.scanDirectoryNoResults(directory, fileFilter, fileProcessor); final long resourceCount = fileProcessor.getResourceCount(); logger.info("Found {} files to Import from: {}", resourceCount, directory); //See if the import should fail on error final boolean failOnError = options != null ? options.isFailOnError() : true; //Map of files to import, grouped by type final ConcurrentMap<PortalDataKey, Queue<Resource>> dataToImport = fileProcessor.getDataToImport(); //Import the data files for (final PortalDataKey portalDataKey : this.dataKeyImportOrder) { final Queue<Resource> files = dataToImport.remove(portalDataKey); if (files == null) { continue; } final Queue<ImportFuture<?>> importFutures = new LinkedList<ImportFuture<?>>(); final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>(); final int fileCount = files.size(); logger.info("Importing {} files of type {}", fileCount, portalDataKey); reportWriter.println(portalDataKey + "," + fileCount); while (!files.isEmpty()) { final Resource file = files.poll(); //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory, false); failedFutures.addAll(newFailed); final AtomicLong importTime = new AtomicLong(-1); //Create import task final Callable<Object> task = new CallableWithoutResult() { @Override protected void callWithoutResult() { IMPORT_BASE_DIR.set(directoryUriStr); importTime.set(System.nanoTime()); try { importData(file, portalDataKey); } finally { importTime.set(System.nanoTime() - importTime.get()); IMPORT_BASE_DIR.remove(); } } }; //Submit the import task final Future<?> importFuture = this.importExportThreadPool.submit(task); //Add the future for tracking importFutures.offer(new ImportFuture(importFuture, file, portalDataKey, importTime)); } //Wait for all of the imports on of this type to complete final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory, true); failedFutures.addAll(newFailed); if (failOnError && !failedFutures.isEmpty()) { throw new RuntimeException( failedFutures.size() + " " + portalDataKey + " entities failed to import.\n\n" + "\tPer entity exception logs and a full report can be found in " + logDirectory + "\n"); } reportWriter.flush(); } if (!dataToImport.isEmpty()) { throw new IllegalStateException( "The following PortalDataKeys are not listed in the dataTypeImportOrder List: " + dataToImport.keySet()); } logger.info("For a detailed report on the data import see " + importReport); } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for entities to import", e); } finally { IOUtils.closeQuietly(reportWriter); IMPORT_BASE_DIR.remove(); } }
From source file:org.onosproject.yms.app.ysr.DefaultYangSchemaRegistry.java
/** * Returns schema node based on the revision. * * @param name name of the schema node//from ww w .ja v a 2s . c o m * @return schema node based on the revision */ private YangSchemaNode getSchemaNodeUsingSchemaNameWithRev(String name) { ConcurrentMap<String, YangSchemaNode> revMap; YangSchemaNode schemaNode; if (name.contains(AT)) { String[] revArray = name.split(AT); revMap = yangSchemaStore.get(revArray[0]); schemaNode = revMap.get(name); if (schemaNode == null) { log.error("{} not found.", name); } return schemaNode; } if (yangSchemaStore.containsKey(name)) { revMap = yangSchemaStore.get(name); if (revMap != null && !revMap.isEmpty()) { YangSchemaNode node = revMap.get(name); if (node != null) { return node; } String revName = getLatestVersion(revMap); return revMap.get(revName); } } log.error("{} not found.", name); return null; }
From source file:org.onosproject.yms.app.ysr.DefaultYangSchemaRegistry.java
/** * Removes schema node from schema map./*from w w w .j a va 2s . c o m*/ * * @param removableNode schema node which needs to be removed */ private void removeSchemaNode(YangSchemaNode removableNode) { String name = removableNode.getName(); String revName = name; String date = getDateInStringFormat(removableNode); if (date != null) { revName = name + AT + date; } ConcurrentMap<String, YangSchemaNode> revMap = yangSchemaStore.get(name); if (revMap != null && !revMap.isEmpty() && revMap.size() != 1) { revMap.remove(revName); } else { yangSchemaStore.remove(removableNode.getName()); } }
From source file:org.opendaylight.controller.protocol_plugin.openflow.internal.TopologyServiceShim.java
/** * Update local cache and return true if it needs to notify upper layer * Topology listeners./*from ww w. jav a 2 s . co m*/ * * @param container * The network container * @param edge * The edge * @param type * The update type * @param props * The edge properties * @return true if it needs to notify upper layer Topology listeners */ private boolean updateLocalEdgeMap(String container, Edge edge, UpdateType type, Set<Property> props) { ConcurrentMap<NodeConnector, Pair<Edge, Set<Property>>> edgePropsMap = edgeMap.get(container); NodeConnector src = edge.getTailNodeConnector(); Pair<Edge, Set<Property>> edgeProps = new ImmutablePair<Edge, Set<Property>>(edge, props); boolean rv = false; switch (type) { case ADDED: case CHANGED: if (edgePropsMap == null) { edgePropsMap = new ConcurrentHashMap<NodeConnector, Pair<Edge, Set<Property>>>(); rv = true; } else { if (edgePropsMap.containsKey(src) && edgePropsMap.get(src).equals(edgeProps)) { // Entry already exists. No update. rv = false; } else { rv = true; } } if (rv) { edgePropsMap.put(src, edgeProps); edgeMap.put(container, edgePropsMap); } break; case REMOVED: if ((edgePropsMap != null) && edgePropsMap.containsKey(src)) { edgePropsMap.remove(src); if (edgePropsMap.isEmpty()) { edgeMap.remove(container); } else { edgeMap.put(container, edgePropsMap); } rv = true; } break; default: logger.debug("notifyLocalEdgeMap: invalid {} for Edge {} in container {}", new Object[] { type.getName(), edge, container }); } if (rv) { logger.debug("notifyLocalEdgeMap: {} for Edge {} in container {}", new Object[] { type.getName(), edge, container }); } return rv; }
From source file:org.opendaylight.netvirt.elan.l2gw.utils.ElanL2GatewayUtils.java
/** * Delete elan l2 gateway devices ucast local macs from dpn. * * @param elanName/*from ww w.j a v a2 s.c om*/ * the elan name * @param dpnId * the dpn id */ public void deleteElanL2GwDevicesUcastLocalMacsFromDpn(final String elanName, final BigInteger dpnId) { ConcurrentMap<String, L2GatewayDevice> elanL2GwDevices = ElanL2GwCacheUtils .getInvolvedL2GwDevices(elanName); if (elanL2GwDevices == null || elanL2GwDevices.isEmpty()) { LOG.trace("No L2 gateway devices in Elan [{}] cache.", elanName); return; } final ElanInstance elan = ElanUtils.getElanInstanceByName(broker, elanName); if (elan == null) { LOG.error("Could not find Elan by name: {}", elanName); return; } LOG.info("Deleting Elan [{}] L2GatewayDevices UcastLocalMacs from Dpn [{}]", elanName, dpnId); final Long elanTag = elan.getElanTag(); for (final L2GatewayDevice l2GwDevice : elanL2GwDevices.values()) { List<MacAddress> localMacs = getL2GwDeviceLocalMacs(l2GwDevice); if (localMacs != null && !localMacs.isEmpty()) { for (final MacAddress mac : localMacs) { String jobKey = elanName + ":" + mac.getValue(); ElanClusterUtils.runOnlyInLeaderNode(entityOwnershipService, jobKey, "delete l2gw macs from dmac table", () -> { List<ListenableFuture<Void>> futures = new ArrayList<>(); futures.addAll(elanUtils.deleteDmacFlowsToExternalMac(elanTag, dpnId, l2GwDevice.getHwvtepNodeId(), mac.getValue())); return futures; }); } } } }
From source file:org.opendaylight.vpnservice.elan.l2gw.utils.ElanL2GatewayUtils.java
/** * Delete elan l2 gateway devices ucast local macs from dpn. * * @param elanName// ww w . java2 s .c om * the elan name * @param dpnId * the dpn id */ public static void deleteElanL2GwDevicesUcastLocalMacsFromDpn(final String elanName, final BigInteger dpnId) { ConcurrentMap<String, L2GatewayDevice> elanL2GwDevices = ElanL2GwCacheUtils .getInvolvedL2GwDevices(elanName); if (elanL2GwDevices == null || elanL2GwDevices.isEmpty()) { LOG.trace("No L2 gateway devices in Elan [{}] cache.", elanName); return; } final ElanInstance elan = ElanUtils.getElanInstanceByName(elanName); if (elan == null) { LOG.error("Could not find Elan by name: {}", elanName); return; } LOG.info("Deleting Elan [{}] L2GatewayDevices UcastLocalMacs from Dpn [{}]", elanName, dpnId); final Long elanTag = elan.getElanTag(); for (final L2GatewayDevice l2GwDevice : elanL2GwDevices.values()) { List<MacAddress> localMacs = getL2GwDeviceLocalMacs(l2GwDevice); if (localMacs != null && !localMacs.isEmpty()) { for (final MacAddress mac : localMacs) { String jobKey = elanName + ":" + mac.getValue(); ElanClusterUtils.runOnlyInLeaderNode(jobKey, "delete l2gw macs from dmac table", new Callable<List<ListenableFuture<Void>>>() { @Override public List<ListenableFuture<Void>> call() { List<ListenableFuture<Void>> futures = Lists.newArrayList(); futures.addAll(ElanUtils.deleteDmacFlowsToExternalMac(elanTag, dpnId, l2GwDevice.getHwvtepNodeId(), mac.getValue())); return futures; } }); } } } }
From source file:org.sakaiproject.memory.impl.GenericMultiRefCacheImpl.java
private void cleanEntityReferences(Object key, Object value) { if (M_log.isDebugEnabled()) M_log.debug("cleanEntityReferences(Object " + key + ", Object " + value + ")"); if (value == null) return;/*w w w.j a v a2s . c om*/ final MultiRefCacheEntry cachedEntry = (MultiRefCacheEntry) value; // remove this key from any of the entity references in m_refs that are dependent on this entry for (Iterator iRefs = cachedEntry.getRefs().iterator(); iRefs.hasNext();) { String ref = (String) iRefs.next(); ConcurrentMap<Object, Object> keys = m_refsStore.get(ref); if (keys != null && keys.remove(key) != null) { // remove the ref entry if it no longer has any cached keys in // its collection // TODO This isn't thread safe. if (keys.isEmpty()) { m_refsStore.remove(ref, keys); } } } if (mrcDebug) logCacheState("cleanEntityReferences(" + key + ")"); }
From source file:org.tomitribe.tribestream.registryng.resources.ClientResource.java
@GET @Path("invoke/stream") @Produces("text/event-stream") // will be part of JAX-RS 2.1, for now just making it working public void invokeScenario(@Suspended final AsyncResponse asyncResponse, @Context final Providers providers, @Context final HttpServletRequest httpServletRequest, // base64 encoded json with the request and identify since EventSource doesnt handle it very well // TODO: use a ciphering with a POST endpoint to avoid to have it readable (or other) @QueryParam("request") final String requestBytes) { final SseRequest in = loadPayload(SseRequest.class, providers, requestBytes); final String auth = in.getIdentity(); security.check(auth, httpServletRequest, () -> { }, () -> {//from w w w. j a v a 2s . co m throw new WebApplicationException(Response.Status.FORBIDDEN); }); final GenericClientService.Request req = toRequest(in.getHttp()); final Scenario scenario = in.getHttp().getScenario(); final MultivaluedHashMap<String, Object> fakeHttpHeaders = new MultivaluedHashMap<>(); final ConcurrentMap<Future<?>, Boolean> computations = new ConcurrentHashMap<>(); final MessageBodyWriter<LightHttpResponse> writerResponse = providers.getMessageBodyWriter( LightHttpResponse.class, LightHttpResponse.class, annotations, APPLICATION_JSON_TYPE); final MessageBodyWriter<ScenarioEnd> writerEnd = providers.getMessageBodyWriter(ScenarioEnd.class, ScenarioEnd.class, annotations, APPLICATION_JSON_TYPE); // not jaxrs one cause cxf wraps this one and prevents the flush() to works final HttpServletResponse httpServletResponse = HttpServletResponse.class .cast(httpServletRequest.getAttribute("tribe.registry.response")); httpServletResponse.setHeader("Content-Type", "text/event-stream"); try { httpServletResponse.flushBuffer(); } catch (final IOException e) { throw new IllegalStateException(e); } final ServletOutputStream out; try { out = httpServletResponse.getOutputStream(); } catch (final IOException e) { throw new IllegalStateException(e); } mes.submit(() -> { final AtomicReference<Invoker.Handle> handleRef = new AtomicReference<>(); try { // we compute some easy stats asynchronously final Map<Integer, AtomicInteger> sumPerResponse = new HashMap<>(); final AtomicInteger total = new AtomicInteger(); final AtomicLong min = new AtomicLong(); final AtomicLong max = new AtomicLong(); final AtomicLong sum = new AtomicLong(); final AtomicInteger writeErrors = new AtomicInteger(0); final long start = System.currentTimeMillis(); handleRef.set(invoker.invoke(scenario.getThreads(), scenario.getInvocations(), scenario.getDuration(), timeout, () -> { if (handleRef.get().isCancelled()) { return; } LightHttpResponse resp; try { final GenericClientService.Response invoke = service.invoke(req); resp = new LightHttpResponse(invoke.getStatus(), null, invoke.getClientExecutionDurationMs()); } catch (final RuntimeException e) { resp = new LightHttpResponse(-1, e.getMessage(), -1); } // let's process it in an environment where synchronisation is fine final LightHttpResponse respRef = resp; computations.put(mes.submit(() -> { synchronized (out) { try { out.write(dataStart); writerResponse.writeTo(respRef, LightHttpResponse.class, LightHttpResponse.class, annotations, APPLICATION_JSON_TYPE, fakeHttpHeaders, out); out.write(dataEnd); out.flush(); } catch (final IOException e) { if (writeErrors.incrementAndGet() > toleratedWriteErrors) { handleRef.get().cancel(); } throw new IllegalStateException(e); } } if (handleRef.get().isCancelled()) { return; } final long clientExecutionDurationMs = respRef.getClientExecutionDurationMs(); total.incrementAndGet(); sumPerResponse.computeIfAbsent(respRef.getStatus(), k -> new AtomicInteger()) .incrementAndGet(); sum.addAndGet(clientExecutionDurationMs); { long m = min.get(); do { m = min.get(); if (min.compareAndSet(m, clientExecutionDurationMs)) { break; } } while (m > clientExecutionDurationMs); } { long m = max.get(); do { m = max.get(); if (max.compareAndSet(m, clientExecutionDurationMs)) { break; } } while (m < clientExecutionDurationMs); } }), true); })); handleRef.get().await(); final long end = System.currentTimeMillis(); do { // wait all threads finished to compute the stats final Iterator<Future<?>> iterator = computations.keySet().iterator(); while (iterator.hasNext()) { try { iterator.next().get(timeout, TimeUnit.MILLISECONDS); } catch (final InterruptedException e) { Thread.interrupted(); } catch (final ExecutionException | TimeoutException e) { throw new IllegalStateException(e.getCause()); } finally { iterator.remove(); } } } while (!computations.isEmpty()); if (handleRef.get().isCancelled()) { return; } try { out.write(dataStart); writerEnd.writeTo( new ScenarioEnd( sumPerResponse.entrySet().stream() .collect(toMap(Map.Entry::getKey, t -> t.getValue().get())), end - start, total.get(), min.get(), max.get(), sum.get() * 1. / total.get()), ScenarioEnd.class, ScenarioEnd.class, annotations, APPLICATION_JSON_TYPE, new MultivaluedHashMap<>(), out); out.write(dataEnd); out.flush(); } catch (final IOException e) { throw new IllegalStateException(e); } } finally { try { // cxf will skip it since we already write ourself asyncResponse.resume(""); } catch (final RuntimeException re) { // no-op: not that important } } }); }