List of usage examples for java.lang Object notify
@HotSpotIntrinsicCandidate public final native void notify();
From source file:se.jiderhamn.classloader.leak.prevention.ClassLoaderLeakPreventor.java
protected void stopTimerThread(Thread thread) { // Seems it is not possible to access Timer of TimerThread, so we need to mimic Timer.cancel() /** /*from w w w. j a v a 2 s . co m*/ try { Timer timer = (Timer) findField(thread.getClass(), "this$0").get(thread); // This does not work! warn("Cancelling Timer " + timer + " / TimeThread '" + thread + "'"); timer.cancel(); } catch (IllegalAccessException iaex) { error(iaex); } */ try { final Field newTasksMayBeScheduled = findField(thread.getClass(), "newTasksMayBeScheduled"); final Object queue = findField(thread.getClass(), "queue").get(thread); // java.lang.TaskQueue final Method clear = queue.getClass().getDeclaredMethod("clear"); clear.setAccessible(true); // Do what java.util.Timer.cancel() does //noinspection SynchronizationOnLocalVariableOrMethodParameter synchronized (queue) { newTasksMayBeScheduled.set(thread, false); clear.invoke(queue); queue.notify(); // "In case queue was already empty." } // We shouldn't need to join() here, thread will finish soon enough } catch (Exception ex) { error(ex); } }
From source file:com.bt.aloha.sipp.SippEngineTestHelper.java
private void setUpSipp(String scenarioName, File directory, boolean respondToOriginatingAddress) throws IOException { // Give some time to settle between sipp calls. try {/*w w w. j a v a 2s. c o m*/ Thread.sleep(TWO_THOUSAND); } catch (InterruptedException e) { } Properties props = new Properties(); props.load(getClass().getResourceAsStream("/sipp.properties")); final String sippPath = props.getProperty("sipp.home") + "/sipp"; String localIpAddress = setIpAddress(props.getProperty("sip.stack.ip.address.pattern")); port = Integer.parseInt(props.getProperty("sipp.local.port")); String localPortOption = props.getProperty("sipp.local.port") == null ? "" : String.format("-p %s", port); String remoteAddressOption = respondToOriginatingAddress ? "" : String.format("-rsa %s:%s", localIpAddress, props.getProperty("sip.stack.port")); String runTimesOption = "-m 1"; String remoteAddressPort = respondToOriginatingAddress ? localIpAddress : String.format("%s:%s", localIpAddress, props.getProperty("sip.stack.port")); String cmdLine = String.format("%s %s %s %s %s %s", sippPath, remoteAddressOption, runTimesOption, scenarioName, remoteAddressPort, localPortOption); log.debug(cmdLine); System.out.println("COMMAND LINE:"); System.out.println("cd " + directory.getAbsolutePath()); System.out.println(cmdLine); process = Runtime.getRuntime().exec(cmdLine, null, directory); final BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); final OutputStream out = process.getOutputStream(); final BufferedReader err = new BufferedReader(new InputStreamReader(process.getErrorStream())); timer = new Timer(false); timer.schedule(new TimerTask() { @Override public void run() { process.destroy(); } }, 30000); final Object event = new Object(); new Thread() { public void run() { try { String line; while ((line = err.readLine()) != null) { // while (err.ready() && (line = err.readLine()) != // null) { errSB.append(line); } err.close(); } catch (IOException e) { log.debug("Unable to read the error stream from sipp", e); } } }.start(); new Thread() { public void run() { try { String line; while ((line = in.readLine()) != null) { // while (in.ready() && (line = in.readLine()) != null) // { if (line.contains("Terminated")) { break; } if (port == -1 && line.contains("Scenario Screen")) { line = in.readLine(); String pattern; int group; if (line.contains("Transport")) { pattern = "(\\d+)"; group = 1; } else if (line.contains("Remote-host")) { pattern = "(.*?\\ds.*?)(\\d+)"; group = 2; } else continue; line = in.readLine(); final Pattern pat = Pattern.compile(pattern); Matcher matcher = pat.matcher(line); matcher.find(); port = Integer.parseInt(matcher.group(group)); synchronized (event) { event.notify(); } } } in.close(); out.close(); } catch (IOException e) { log.debug("Unable to read the input stream from sipp", e); } } }.start(); synchronized (event) { try { event.wait(FIVE_THOUSAND); } catch (InterruptedException e) { } } if (port == -1) throw new IOException("Error reading sipp port"); System.out.println("Running sipp at " + getSippAddress()); }
From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java
/** * Replicates the request to all nodes in the given set of node identifiers * * @param nodeIds the NodeIdentifiers that identify which nodes to send the request to * @param method the HTTP method to use * @param uri the URI to send the request to * @param entity the entity to use * @param headers the HTTP Headers * @param performVerification whether or not to verify that all nodes in the cluster are connected and that all nodes can perform request. Ignored if request is not mutable. * @param response the response to update with the results * @param executionPhase <code>true</code> if this is the execution phase, <code>false</code> otherwise * @param monitor a monitor that will be notified when the request completes (successfully or otherwise) * @return an AsyncClusterResponse that can be used to obtain the result */// ww w. java 2 s . c o m AsyncClusterResponse replicate(final Set<NodeIdentifier> nodeIds, final String method, final URI uri, final Object entity, final Map<String, String> headers, final boolean performVerification, StandardAsyncClusterResponse response, final boolean executionPhase, final boolean merge, final Object monitor) { try { // state validation Objects.requireNonNull(nodeIds); Objects.requireNonNull(method); Objects.requireNonNull(uri); Objects.requireNonNull(entity); Objects.requireNonNull(headers); if (nodeIds.isEmpty()) { throw new IllegalArgumentException("Cannot replicate request to 0 nodes"); } // verify all of the nodes exist and are in the proper state for (final NodeIdentifier nodeId : nodeIds) { final NodeConnectionStatus status = clusterCoordinator.getConnectionStatus(nodeId); if (status == null) { throw new UnknownNodeException("Node " + nodeId + " does not exist in this cluster"); } if (status.getState() != NodeConnectionState.CONNECTED) { throw new IllegalClusterStateException( "Cannot replicate request to Node " + nodeId + " because the node is not connected"); } } logger.debug("Replicating request {} {} with entity {} to {}; response is {}", method, uri, entity, nodeIds, response); // Update headers to indicate the current revision so that we can // prevent multiple users changing the flow at the same time final Map<String, String> updatedHeaders = new HashMap<>(headers); final String requestId = updatedHeaders.computeIfAbsent(REQUEST_TRANSACTION_ID_HEADER, key -> UUID.randomUUID().toString()); long verifyClusterStateNanos = -1; if (performVerification) { final long start = System.nanoTime(); verifyClusterState(method, uri.getPath()); verifyClusterStateNanos = System.nanoTime() - start; } int numRequests = responseMap.size(); if (numRequests >= maxConcurrentRequests) { numRequests = purgeExpiredRequests(); } if (numRequests >= maxConcurrentRequests) { final Map<String, Long> countsByUri = responseMap.values().stream().collect( Collectors.groupingBy(StandardAsyncClusterResponse::getURIPath, Collectors.counting())); logger.error( "Cannot replicate request {} {} because there are {} outstanding HTTP Requests already. Request Counts Per URI = {}", method, uri.getPath(), numRequests, countsByUri); throw new IllegalStateException("There are too many outstanding HTTP requests with a total " + numRequests + " outstanding requests"); } // create a response object if one was not already passed to us if (response == null) { // create the request objects and replicate to all nodes. // When the request has completed, we need to ensure that we notify the monitor, if there is one. final CompletionCallback completionCallback = clusterResponse -> { try { onCompletedResponse(requestId); } finally { if (monitor != null) { synchronized (monitor) { monitor.notify(); } logger.debug("Notified monitor {} because request {} {} has completed", monitor, method, uri); } } }; final Runnable responseConsumedCallback = () -> onResponseConsumed(requestId); response = new StandardAsyncClusterResponse(requestId, uri, method, nodeIds, responseMapper, completionCallback, responseConsumedCallback, merge); responseMap.put(requestId, response); } if (verifyClusterStateNanos > -1) { response.addTiming("Verify Cluster State", "All Nodes", verifyClusterStateNanos); } logger.debug("For Request ID {}, response object is {}", requestId, response); // if mutable request, we have to do a two-phase commit where we ask each node to verify // that the request can take place and then, if all nodes agree that it can, we can actually // issue the request. This is all handled by calling performVerification, which will replicate // the 'vote' request to all nodes and then if successful will call back into this method to // replicate the actual request. final boolean mutableRequest = isMutableRequest(method, uri.getPath()); if (mutableRequest && performVerification) { logger.debug("Performing verification (first phase of two-phase commit) for Request ID {}", requestId); performVerification(nodeIds, method, uri, entity, updatedHeaders, response, merge, monitor); return response; } else if (mutableRequest) { response.setPhase(StandardAsyncClusterResponse.COMMIT_PHASE); } // Callback function for generating a NodeHttpRequestCallable that can be used to perform the work final StandardAsyncClusterResponse finalResponse = response; NodeRequestCompletionCallback nodeCompletionCallback = nodeResponse -> { logger.debug("Received response from {} for {} {}", nodeResponse.getNodeId(), method, uri.getPath()); finalResponse.add(nodeResponse); }; // instruct the node to actually perform the underlying action if (mutableRequest && executionPhase) { updatedHeaders.put(REQUEST_EXECUTION_HTTP_HEADER, "true"); } // replicate the request to all nodes final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest(nodeId, method, createURI(uri, nodeId), entity, updatedHeaders, nodeCompletionCallback, finalResponse); submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory, updatedHeaders); return response; } catch (final Throwable t) { if (monitor != null) { synchronized (monitor) { monitor.notify(); } logger.debug("Notified monitor {} because request {} {} has failed with Throwable {}", monitor, method, uri, t); } if (response != null) { final RuntimeException failure = (t instanceof RuntimeException) ? (RuntimeException) t : new RuntimeException("Failed to submit Replication Request to background thread", t); response.setFailure(failure, new NodeIdentifier()); } throw t; } }
From source file:org.ut.biolab.medsavant.client.view.genetics.variantinfo.GeneManiaSubInspector.java
protected void updateRelatedGenesPanel(Set<Gene> g) { genes = g;/*from ww w. j ava 2 s . com*/ kvpPanel.removeAll(); kvpPanel.invalidate(); kvpPanel.updateUI(); kvp = new KeyValuePairPanel(5); kvp.setKeysVisible(false); kvpPanel.add(kvp); progressBar.setVisible(true); progressMessage.setVisible(true); progressBar.setIndeterminate(true); progressMessage.setText("Querying GeneMANIA for related genes"); final Object lock = new Object(); Runnable r = new Runnable() { @Override public void run() { boolean setMsgOff = true; boolean buildGraph = true; if (!Thread.interrupted()) { try { List<String> geneNames = new ArrayList(); for (Gene gene : genes) { geneNames.add(gene.getName()); } List<String> notInGenemania = new ArrayList<String>(geneNames); notInGenemania.removeAll(GenemaniaInfoRetriever.getValidGenes(geneNames)); geneNames = GenemaniaInfoRetriever.getValidGenes(geneNames); genemania.setGenes(geneNames); if (notInGenemania.size() > 0) { String message = "<html><center>Following gene(s) not found in GeneMANIA: "; for (String invalidGene : notInGenemania) { message += "<br>" + invalidGene; } message += "</center></html>"; progressMessage.setText(message); setMsgOff = false; buildGraph = false; } GeneSetFetcher geneSetFetcher = GeneSetFetcher.getInstance(); if (genemania.getGenes().size() > 0) { int i = 1; String zero = Integer.toString(0); Font HEADER_FONT = new Font("Arial", Font.BOLD, 10); kvp.addKey(zero); JLabel geneHeader = new JLabel("Gene".toUpperCase()); geneHeader.setFont(HEADER_FONT); kvp.setValue(zero, geneHeader); JLabel varFreqHeader = new JLabel("<html>VARIATION<br>FREQUENCY<br>(var/kb)</html>"); varFreqHeader.setFont(HEADER_FONT); kvp.setAdditionalColumn(zero, 0, varFreqHeader); JLabel genemaniaHeader = new JLabel("<html>GENEMANIA<br>SCORE</html>"); genemaniaHeader.setFont(HEADER_FONT); if (Thread.interrupted()) { throw new InterruptedException(); } if (rankByVarFreq) { Iterator<org.ut.biolab.medsavant.shared.model.Gene> itr = geneSetFetcher .getGenesByNumVariants(genemania.getRelatedGeneNamesByScore()).iterator(); //skip the first one (it's the name of selected gene already displayed) itr.next(); while (itr.hasNext()) { addGeneToKeyValuePanel(itr.next(), i++); } currSizeOfArray = i - 1; } else { Iterator<String> itr = genemania.getRelatedGeneNamesByScore().iterator(); //skip the first one (it's the name of selected gene already displayed) itr.next(); List<String> tmp = new LinkedList<String>(); while (itr.hasNext()) { tmp.add(itr.next()); } System.out.println("start populating table" + System.currentTimeMillis()); /*while (itr.hasNext()) { //getNormalizedVariantCount(gene) addGeneToKeyValuePanel(GeneSetFetcher.getInstance().getGene(itr.next()), i++); }*/ for (String foo : tmp) { addGeneToKeyValuePanel(GeneSetFetcher.getInstance().getGene(foo), i++); } System.out.println("done thread" + System.currentTimeMillis()); currSizeOfArray = i - 1; } } } catch (InterruptedException e) { LOG.error(e); buildGraph = false; } catch (NoRelatedGenesInfoException e) { LOG.error(e); progressMessage.setText(e.getMessage()); setMsgOff = false; buildGraph = false; } catch (Exception ex) { LOG.error(ex); buildGraph = false; ClientMiscUtils.reportError("Error retrieving data from GeneMANIA: %s", ex); } catch (Error e) { LOG.error(e); } finally { progressBar.setIndeterminate(false); progressBar.setValue(0); progressBar.setVisible(false); if (setMsgOff) { progressMessage.setVisible(false); } } } synchronized (lock) { lock.notify(); } } }; if (genemaniaAlgorithmThread == null) { genemaniaAlgorithmThread = new Thread(r); } else { genemaniaAlgorithmThread.interrupt(); genemaniaAlgorithmThread = new Thread(r); } final Runnable geneDescriptionFetcher = new Runnable() { @Override public void run() { for (int j = 1; j <= currSizeOfArray; j++) { try { String geneName = kvp.getValue(Integer.toString(j)); Gene gene = GeneSetFetcher.getInstance().getGene(geneName); String d = gene.getDescription(); kvp.setToolTipForValue(Integer.toString(j), d); } catch (Exception e) { //do nothing (don't set tool tip to anything) } } } }; //} genemaniaAlgorithmThread.start(); Runnable r2 = new Runnable() { @Override public void run() { try { synchronized (lock) { lock.wait(); Thread toolTipGenerator = new Thread(geneDescriptionFetcher); Thread varFreqCalculator = new Thread(new Runnable() { @Override public void run() { for (int i = 1; i <= currSizeOfArray; i++) { try { String geneName = kvp.getValue(Integer.toString(i)); Gene gene = GeneSetFetcher.getInstance().getGene(geneName); kvp.setAdditionalColumn(Integer.toString(i), 0, new JLabel(Double.toString( GeneSetFetcher.getInstance().getNormalizedVariantCount(gene)))); kvp.invalidate(); kvp.updateUI(); } catch (Exception ex) { //don't put in any variation frequency } } } }); toolTipGenerator.start(); varFreqCalculator.start(); } } catch (Exception e) { } } }; Thread t2 = new Thread(r2); t2.start(); }
From source file:com.gemstone.gemfire.internal.cache.OplogJUnitTest.java
/** * Tests reduction in size of disk stats * when the oplog is rolled.//from w w w . j av a 2s . c om */ @Test public void testStatsSizeReductionOnRolling() throws Exception { final int MAX_OPLOG_SIZE = 500 * 2; diskProps.setMaxOplogSize(MAX_OPLOG_SIZE); diskProps.setPersistBackup(true); diskProps.setRolling(true); diskProps.setCompactionThreshold(100); diskProps.setSynchronous(true); diskProps.setOverflow(false); diskProps.setDiskDirsAndSizes(new File[] { dirs[0] }, new int[] { 4000 }); final byte[] val = new byte[333]; region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL); final DiskRegion dr = ((LocalRegion) region).getDiskRegion(); final Object lock = new Object(); final boolean[] exceptionOccured = new boolean[] { true }; final boolean[] okToExit = new boolean[] { false }; final boolean[] switchExpected = new boolean[] { false }; // calculate sizes final int extra_byte_num_per_entry = InternalDataSerializer .calculateBytesForTSandDSID(getDSID((LocalRegion) region)); final int key3_size = DiskOfflineCompactionJUnitTest.getSize4Create(extra_byte_num_per_entry, "key3", val); final int tombstone_key1 = DiskOfflineCompactionJUnitTest.getSize4TombstoneWithKey(extra_byte_num_per_entry, "key1"); final int tombstone_key2 = DiskOfflineCompactionJUnitTest.getSize4TombstoneWithKey(extra_byte_num_per_entry, "key2"); CacheObserver old = CacheObserverHolder.setInstance(new CacheObserverAdapter() { private long before = -1; private DirectoryHolder dh = null; private long oplogsSize = 0; @Override public void beforeSwitchingOplog() { cache.getLogger().info("beforeSwitchingOplog"); if (!switchExpected[0]) { fail("unexpected oplog switch"); } if (before == -1) { // only want to call this once; before the 1st oplog destroy this.dh = dr.getNextDir(); this.before = this.dh.getDirStatsDiskSpaceUsage(); } } @Override public void beforeDeletingCompactedOplog(Oplog oplog) { cache.getLogger().info("beforeDeletingCompactedOplog"); oplogsSize += oplog.getOplogSize(); } @Override public void afterHavingCompacted() { cache.getLogger().info("afterHavingCompacted"); if (before > -1) { synchronized (lock) { okToExit[0] = true; long after = this.dh.getDirStatsDiskSpaceUsage(); // after compaction, in _2.crf, key3 is an create-entry, // key1 and key2 are tombstones. // _2.drf contained a rvvgc with drMap.size()==1 int expected_drf_size = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + DiskOfflineCompactionJUnitTest.getRVVSize(1, new int[] { 0 }, true); int expected_crf_size = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + DiskOfflineCompactionJUnitTest.getRVVSize(1, new int[] { 1 }, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE + key3_size + tombstone_key1 + tombstone_key2; int oplog_2_size = expected_drf_size + expected_crf_size; if (after != oplog_2_size) { cache.getLogger().info("test failed before=" + before + " after=" + after + " oplogsSize=" + oplogsSize); exceptionOccured[0] = true; } else { exceptionOccured[0] = false; } LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false; lock.notify(); } } } }); try { LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true; cache.getLogger().info("putting key1"); region.put("key1", val); // Disk space should have changed due to 1 put //assertTrue("stats did not increase after put 1 ", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats()); checkDiskStats(); cache.getLogger().info("putting key2"); region.put("key2", val); //assertTrue("stats did not increase after put 2", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats()); checkDiskStats(); cache.getLogger().info("removing key1"); region.remove("key1"); cache.getLogger().info("removing key2"); region.remove("key2"); // This put will cause a switch as max-oplog size (900) will be exceeded (999) switchExpected[0] = true; cache.getLogger().info("putting key3"); region.put("key3", val); cache.getLogger().info("waiting for compaction"); synchronized (lock) { if (!okToExit[0]) { lock.wait(9000); assertTrue(okToExit[0]); } assertFalse(exceptionOccured[0]); } region.close(); } finally { LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false; CacheObserverHolder.setInstance(old); } }
From source file:org.apache.geode.internal.cache.OplogJUnitTest.java
/** * Tests reduction in size of disk stats when the oplog is rolled. */// www . ja v a 2s . c om @Category(FlakyTest.class) // GEODE-527: jvm sizing sensitive, non-thread-safe test hooks, time // sensitive @Test public void testStatsSizeReductionOnRolling() throws Exception { final int MAX_OPLOG_SIZE = 500 * 2; diskProps.setMaxOplogSize(MAX_OPLOG_SIZE); diskProps.setPersistBackup(true); diskProps.setRolling(true); diskProps.setCompactionThreshold(100); diskProps.setSynchronous(true); diskProps.setOverflow(false); diskProps.setDiskDirsAndSizes(new File[] { dirs[0] }, new int[] { 4000 }); final byte[] val = new byte[333]; region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL); final DiskRegion dr = ((LocalRegion) region).getDiskRegion(); final Object lock = new Object(); final boolean[] exceptionOccured = new boolean[] { true }; final boolean[] okToExit = new boolean[] { false }; final boolean[] switchExpected = new boolean[] { false }; // calculate sizes final int extra_byte_num_per_entry = InternalDataSerializer .calculateBytesForTSandDSID(getDSID((LocalRegion) region)); final int key3_size = DiskOfflineCompactionJUnitTest.getSize4Create(extra_byte_num_per_entry, "key3", val); final int tombstone_key1 = DiskOfflineCompactionJUnitTest.getSize4TombstoneWithKey(extra_byte_num_per_entry, "key1"); final int tombstone_key2 = DiskOfflineCompactionJUnitTest.getSize4TombstoneWithKey(extra_byte_num_per_entry, "key2"); // TODO: move static methods from DiskOfflineCompactionJUnitTest to shared util class CacheObserver old = CacheObserverHolder.setInstance(new CacheObserverAdapter() { private long before = -1; private DirectoryHolder dh = null; private long oplogsSize = 0; @Override public void beforeSwitchingOplog() { cache.getLogger().info("beforeSwitchingOplog"); if (!switchExpected[0]) { fail("unexpected oplog switch"); } if (before == -1) { // only want to call this once; before the 1st oplog destroy this.dh = dr.getNextDir(); this.before = this.dh.getDirStatsDiskSpaceUsage(); } } @Override public void beforeDeletingCompactedOplog(Oplog oplog) { cache.getLogger().info("beforeDeletingCompactedOplog"); oplogsSize += oplog.getOplogSize(); } @Override public void afterHavingCompacted() { cache.getLogger().info("afterHavingCompacted"); if (before > -1) { synchronized (lock) { okToExit[0] = true; long after = this.dh.getDirStatsDiskSpaceUsage(); // after compaction, in _2.crf, key3 is an create-entry, // key1 and key2 are tombstones. // _2.drf contained a rvvgc with drMap.size()==1 int expected_drf_size = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + DiskOfflineCompactionJUnitTest.getRVVSize(1, new int[] { 0 }, true); int expected_crf_size = Oplog.OPLOG_DISK_STORE_REC_SIZE + Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + DiskOfflineCompactionJUnitTest.getRVVSize(1, new int[] { 1 }, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE + key3_size + tombstone_key1 + tombstone_key2; int oplog_2_size = expected_drf_size + expected_crf_size; if (after != oplog_2_size) { cache.getLogger().info("test failed before=" + before + " after=" + after + " oplogsSize=" + oplogsSize); exceptionOccured[0] = true; } else { exceptionOccured[0] = false; } LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false; lock.notify(); } } } }); try { LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true; cache.getLogger().info("putting key1"); region.put("key1", val); // Disk space should have changed due to 1 put // assertTrue("stats did not increase after put 1 ", diskSpaceUsageStats() == // calculatedDiskSpaceUsageStats()); checkDiskStats(); cache.getLogger().info("putting key2"); region.put("key2", val); // assertTrue("stats did not increase after put 2", diskSpaceUsageStats() == // calculatedDiskSpaceUsageStats()); checkDiskStats(); cache.getLogger().info("removing key1"); region.remove("key1"); cache.getLogger().info("removing key2"); region.remove("key2"); // This put will cause a switch as max-oplog size (900) will be exceeded (999) switchExpected[0] = true; cache.getLogger().info("putting key3"); region.put("key3", val); cache.getLogger().info("waiting for compaction"); synchronized (lock) { if (!okToExit[0]) { lock.wait(9000); assertTrue(okToExit[0]); } assertFalse(exceptionOccured[0]); } region.close(); } finally { LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false; CacheObserverHolder.setInstance(old); } }
From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java
private void performVerification(final Set<NodeIdentifier> nodeIds, final String method, final URI uri, final Object entity, final Map<String, String> headers, final StandardAsyncClusterResponse clusterResponse, final boolean merge, final Object monitor) { logger.debug("Verifying that mutable request {} {} can be made", method, uri.getPath()); final Map<String, String> validationHeaders = new HashMap<>(headers); validationHeaders.put(REQUEST_VALIDATION_HTTP_HEADER, NODE_CONTINUE); final long startNanos = System.nanoTime(); final int numNodes = nodeIds.size(); final NodeRequestCompletionCallback completionCallback = new NodeRequestCompletionCallback() { final Set<NodeResponse> nodeResponses = Collections.synchronizedSet(new HashSet<>()); @Override/*from www . j av a2 s.com*/ public void onCompletion(final NodeResponse nodeResponse) { // Add the node response to our collection. We later need to know whether or // not this is the last node response, so we add the response and then check // the size within a synchronized block to ensure that those two things happen // atomically. Otherwise, we could have multiple threads checking the sizes of // the sets at the same time, which could result in multiple threads performing // the 'all nodes are complete' logic. final boolean allNodesResponded; synchronized (nodeResponses) { nodeResponses.add(nodeResponse); allNodesResponded = nodeResponses.size() == numNodes; } try { final long nanos = System.nanoTime() - startNanos; clusterResponse.addTiming("Completed Verification", nodeResponse.getNodeId().toString(), nanos); // If we have all of the node responses, then we can verify the responses // and if good replicate the original request to all of the nodes. if (allNodesResponded) { clusterResponse.addTiming("Verification Completed", "All Nodes", nanos); // Check if we have any requests that do not have a 150-Continue status code. final long dissentingCount = nodeResponses.stream() .filter(p -> p.getStatus() != NODE_CONTINUE_STATUS_CODE).count(); // If all nodes responded with 150-Continue, then we can replicate the original request // to all nodes and we are finished. if (dissentingCount == 0) { logger.debug( "Received verification from all {} nodes that mutable request {} {} can be made", numNodes, method, uri.getPath()); replicate(nodeIds, method, uri, entity, headers, false, clusterResponse, true, merge, monitor); return; } try { final Map<String, String> cancelLockHeaders = new HashMap<>(headers); cancelLockHeaders.put(REQUEST_TRANSACTION_CANCELATION_HTTP_HEADER, "true"); final Thread cancelLockThread = new Thread(new Runnable() { @Override public void run() { logger.debug("Found {} dissenting nodes for {} {}; canceling claim request", dissentingCount, method, uri.getPath()); final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest( nodeId, method, createURI(uri, nodeId), entity, cancelLockHeaders, null, clusterResponse); submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory, cancelLockHeaders); } }); cancelLockThread.setName("Cancel Flow Locks"); cancelLockThread.start(); // Add a NodeResponse for each node to the Cluster Response // Check that all nodes responded successfully. for (final NodeResponse response : nodeResponses) { if (response.getStatus() != NODE_CONTINUE_STATUS_CODE) { final Response clientResponse = response.getClientResponse(); final String message; if (clientResponse == null) { message = "Node " + response.getNodeId() + " is unable to fulfill this request due to: Unexpected Response Code " + response.getStatus(); logger.info( "Received a status of {} from {} for request {} {} when performing first stage of two-stage commit. The action will not occur", response.getStatus(), response.getNodeId(), method, uri.getPath()); } else { final String nodeExplanation = clientResponse.readEntity(String.class); message = "Node " + response.getNodeId() + " is unable to fulfill this request due to: " + nodeExplanation; logger.info( "Received a status of {} from {} for request {} {} when performing first stage of two-stage commit. " + "The action will not occur. Node explanation: {}", response.getStatus(), response.getNodeId(), method, uri.getPath(), nodeExplanation); } // if a node reports forbidden, use that as the response failure final RuntimeException failure; if (response.getStatus() == Status.FORBIDDEN.getStatusCode()) { if (response.hasThrowable()) { failure = new AccessDeniedException(message, response.getThrowable()); } else { failure = new AccessDeniedException(message); } } else { if (response.hasThrowable()) { failure = new IllegalClusterStateException(message, response.getThrowable()); } else { failure = new IllegalClusterStateException(message); } } clusterResponse.setFailure(failure, response.getNodeId()); } } } finally { if (monitor != null) { synchronized (monitor) { monitor.notify(); } logger.debug( "Notified monitor {} because request {} {} has failed due to at least 1 dissenting node", monitor, method, uri); } } } } catch (final Exception e) { clusterResponse.add(new NodeResponse(nodeResponse.getNodeId(), method, uri, e)); // If there was a problem, we need to ensure that we add all of the other nodes' responses // to the Cluster Response so that the Cluster Response is complete. for (final NodeResponse otherResponse : nodeResponses) { if (otherResponse.getNodeId().equals(nodeResponse.getNodeId())) { continue; } clusterResponse.add(otherResponse); } } } }; // Callback function for generating a NodeHttpRequestCallable that can be used to perform the work final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest(nodeId, method, createURI(uri, nodeId), entity, validationHeaders, completionCallback, clusterResponse); // replicate the 'verification request' to all nodes submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory, validationHeaders); }
From source file:com.codename1.impl.android.AndroidImplementation.java
/** * Calls the background fetch callback. If the app is in teh background, this will * check to see if the lifecycle class implements the {@link com.codename1.background.BackgroundFetch} * interface. If it does, it will execute its {@link com.codename1.background.BackgroundFetch#performBackgroundFetch(long, com.codename1.util.Callback) } * method.//from w w w . jav a 2 s .c o m * @param blocking True if this should block until it is complete. */ public static void performBackgroundFetch(boolean blocking) { if (Display.getInstance().isMinimized()) { // By definition, background fetch should only occur if the app is minimized. // This keeps it consistent with the iOS implementation that doesn't have a // choice final boolean[] complete = new boolean[1]; final Object lock = new Object(); final BackgroundFetch bgFetchListener = instance.getBackgroundFetchListener(); final long timeout = System.currentTimeMillis() + 25000; if (bgFetchListener != null) { Display.getInstance().callSerially(new Runnable() { public void run() { bgFetchListener.performBackgroundFetch(timeout, new Callback<Boolean>() { @Override public void onSucess(Boolean value) { // On Android the OS doesn't care whether it worked or not // So we'll just consume this. synchronized (lock) { complete[0] = true; lock.notify(); } } @Override public void onError(Object sender, Throwable err, int errorCode, String errorMessage) { com.codename1.io.Log.e(err); synchronized (lock) { complete[0] = true; lock.notify(); } } }); } }); } while (blocking && !complete[0]) { synchronized (lock) { try { lock.wait(1000); } catch (Exception ex) { } } if (!complete[0]) { System.out.println( "Waiting for background fetch to complete. Make sure your background fetch handler calls onSuccess() or onError() in the callback when complete"); } if (System.currentTimeMillis() > timeout) { System.out.println("Background fetch exceeded time alotted. Not waiting for its completion"); break; } } } }
From source file:com.codename1.impl.android.AndroidImplementation.java
@Override public Media createMediaRecorder(final String path, final String mimeType) throws IOException { if (getActivity() == null) { return null; }/*from w w w . j a v a 2 s. c o m*/ if (!checkForPermission(Manifest.permission.RECORD_AUDIO, "This is required to record audio")) { return null; } final AndroidRecorder[] record = new AndroidRecorder[1]; final IOException[] error = new IOException[1]; final Object lock = new Object(); synchronized (lock) { getActivity().runOnUiThread(new Runnable() { @Override public void run() { synchronized (lock) { MediaRecorder recorder = new MediaRecorder(); recorder.setAudioSource(MediaRecorder.AudioSource.MIC); if (mimeType.contains("amr")) { recorder.setOutputFormat(MediaRecorder.OutputFormat.AMR_NB); recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB); } else { recorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4); recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC); } recorder.setOutputFile(removeFilePrefix(path)); try { recorder.prepare(); record[0] = new AndroidRecorder(recorder); } catch (IllegalStateException ex) { Logger.getLogger(AndroidImplementation.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException ex) { error[0] = ex; } finally { lock.notify(); } } } }); try { lock.wait(); } catch (InterruptedException ex) { ex.printStackTrace(); } if (error[0] != null) { throw error[0]; } return record[0]; } }
From source file:com.codename1.impl.android.AndroidImplementation.java
public PeerComponent createBrowserComponent(final Object parent) { if (getActivity() == null) { return null; }//from w ww . j a va2 s. c om final AndroidImplementation.AndroidBrowserComponent[] bc = new AndroidImplementation.AndroidBrowserComponent[1]; final Throwable[] error = new Throwable[1]; final Object lock = new Object(); getActivity().runOnUiThread(new Runnable() { @Override public void run() { synchronized (lock) { try { WebView wv = new WebView(getActivity()) { public boolean onKeyDown(int keyCode, KeyEvent event) { switch (keyCode) { case KeyEvent.KEYCODE_BACK: Display.getInstance().keyPressed(AndroidImplementation.DROID_IMPL_KEY_BACK); return true; case KeyEvent.KEYCODE_MENU: //if the native commands are used don't handle the keycode if (Display.getInstance() .getCommandBehavior() != Display.COMMAND_BEHAVIOR_NATIVE) { Display.getInstance().keyPressed(AndroidImplementation.DROID_IMPL_KEY_MENU); return true; } } return super.onKeyDown(keyCode, event); } public boolean onKeyUp(int keyCode, KeyEvent event) { switch (keyCode) { case KeyEvent.KEYCODE_BACK: Display.getInstance().keyReleased(AndroidImplementation.DROID_IMPL_KEY_BACK); return true; case KeyEvent.KEYCODE_MENU: //if the native commands are used don't handle the keycode if (Display.getInstance() .getCommandBehavior() != Display.COMMAND_BEHAVIOR_NATIVE) { Display.getInstance().keyPressed(AndroidImplementation.DROID_IMPL_KEY_MENU); return true; } } return super.onKeyUp(keyCode, event); } }; wv.setOnTouchListener(new View.OnTouchListener() { @Override public boolean onTouch(View v, MotionEvent event) { switch (event.getAction()) { case MotionEvent.ACTION_DOWN: case MotionEvent.ACTION_UP: if (!v.hasFocus()) { v.requestFocus(); } break; } return false; } }); wv.getSettings().setDomStorageEnabled(true); wv.requestFocus(View.FOCUS_DOWN); wv.setFocusableInTouchMode(true); bc[0] = new AndroidImplementation.AndroidBrowserComponent(wv, getActivity(), parent); lock.notify(); } catch (Throwable t) { error[0] = t; lock.notify(); } } } }); while (bc[0] == null && error[0] == null) { Display.getInstance().invokeAndBlock(new Runnable() { public void run() { synchronized (lock) { if (bc[0] == null && error[0] == null) { try { lock.wait(20); } catch (InterruptedException ex) { ex.printStackTrace(); } } } } }); } if (error[0] != null) { throw new RuntimeException(error[0]); } return bc[0]; }