List of usage examples for java.lang Object wait
public final void wait() throws InterruptedException
From source file:it.anyplace.sync.bep.BlockPuller.java
public FileDownloadObserver pullBlocks(FileBlocks fileBlocks) throws InterruptedException { logger.info("pulling file = {}", fileBlocks); checkArgument(connectionHandler.hasFolder(fileBlocks.getFolder()), "supplied connection handler %s will not share folder %s", connectionHandler, fileBlocks.getFolder());/*from www.j ava2s . c o m*/ final Object lock = new Object(); final AtomicReference<Exception> error = new AtomicReference<>(); final Object listener = new Object() { @Subscribe public void handleResponseMessageReceivedEvent(ResponseMessageReceivedEvent event) { synchronized (lock) { try { if (!requestIds.contains(event.getMessage().getId())) { return; } checkArgument(equal(event.getMessage().getCode(), ErrorCode.NO_ERROR), "received error response, code = %s", event.getMessage().getCode()); byte[] data = event.getMessage().getData().toByteArray(); String hash = BaseEncoding.base16().encode(Hashing.sha256().hashBytes(data).asBytes()); blockCache.pushBlock(data); if (missingHashes.remove(hash)) { blocksByHash.put(hash, data); logger.debug("aquired block, hash = {}", hash); lock.notify(); } else { logger.warn("received not-needed block, hash = {}", hash); } } catch (Exception ex) { error.set(ex); lock.notify(); } } } }; FileDownloadObserver fileDownloadObserver = new FileDownloadObserver() { private long getReceivedData() { return blocksByHash.size() * BLOCK_SIZE; } private long getTotalData() { return (blocksByHash.size() + missingHashes.size()) * BLOCK_SIZE; } @Override public double getProgress() { return isCompleted() ? 1d : getReceivedData() / ((double) getTotalData()); } @Override public String getProgressMessage() { return (Math.round(getProgress() * 1000d) / 10d) + "% " + FileUtils.byteCountToDisplaySize(getReceivedData()) + " / " + FileUtils.byteCountToDisplaySize(getTotalData()); } @Override public boolean isCompleted() { return missingHashes.isEmpty(); } @Override public void checkError() { if (error.get() != null) { throw new RuntimeException(error.get()); } } @Override public double waitForProgressUpdate() throws InterruptedException { if (!isCompleted()) { synchronized (lock) { checkError(); lock.wait(); checkError(); } } return getProgress(); } @Override public InputStream getInputStream() { checkArgument(missingHashes.isEmpty(), "pull failed, some blocks are still missing"); List<byte[]> blockList = Lists .newArrayList(Lists.transform(hashList, Functions.forMap(blocksByHash))); return new SequenceInputStream(Collections .enumeration(Lists.transform(blockList, new Function<byte[], ByteArrayInputStream>() { @Override public ByteArrayInputStream apply(byte[] data) { return new ByteArrayInputStream(data); } }))); } @Override public void close() { missingHashes.clear(); hashList.clear(); blocksByHash.clear(); try { connectionHandler.getEventBus().unregister(listener); } catch (Exception ex) { } if (closeConnection) { connectionHandler.close(); } } }; try { synchronized (lock) { hashList.addAll(Lists.transform(fileBlocks.getBlocks(), new Function<BlockInfo, String>() { @Override public String apply(BlockInfo block) { return block.getHash(); } })); missingHashes.addAll(hashList); for (String hash : missingHashes) { byte[] block = blockCache.pullBlock(hash); if (block != null) { blocksByHash.put(hash, block); missingHashes.remove(hash); } } connectionHandler.getEventBus().register(listener); for (BlockInfo block : fileBlocks.getBlocks()) { if (missingHashes.contains(block.getHash())) { int requestId = Math.abs(new Random().nextInt()); requestIds.add(requestId); connectionHandler.sendMessage(Request.newBuilder().setId(requestId) .setFolder(fileBlocks.getFolder()).setName(fileBlocks.getPath()) .setOffset(block.getOffset()).setSize(block.getSize()) .setHash(ByteString.copyFrom(BaseEncoding.base16().decode(block.getHash()))) .build()); logger.debug("sent request for block, hash = {}", block.getHash()); } } return fileDownloadObserver; } } catch (Exception ex) { fileDownloadObserver.close(); throw ex; } }
From source file:org.apache.stratos.integration.common.TopologyHandler.java
/** * Assert application Active status/*from w ww . j a va 2 s . com*/ * * @param applicationId */ public void assertApplicationActiveStatus(final String applicationId) throws InterruptedException { log.info(String.format("Asserting application status ACTIVE for [application-id] %s...", applicationId)); final long startTime = System.currentTimeMillis(); final Object synObject = new Object(); ApplicationInstanceActivatedEventListener activatedEventListener = new ApplicationInstanceActivatedEventListener() { @Override protected void onEvent(Event event) { ApplicationInstanceActivatedEvent activatedEvent = (ApplicationInstanceActivatedEvent) event; Application application = ApplicationManager.getApplications().getApplication(applicationId); if (application == null) { log.warn(String.format("Application is null: [application-id] %s, [instance-id] %s", applicationId, activatedEvent.getInstanceId())); } if (application != null && application.getStatus() == ApplicationStatus.Active) { synchronized (synObject) { synObject.notify(); } } } }; applicationsEventReceiver.addEventListener(activatedEventListener); Future future = executorService.submit(new Runnable() { @Override public void run() { Application application = ApplicationManager.getApplications().getApplication(applicationId); while (!((application != null) && (application.getStatus() == ApplicationStatus.Active))) { if ((System.currentTimeMillis() - startTime) > APPLICATION_ACTIVATION_TIMEOUT) { log.error(String.format( "Application [application-id] %s did not activate within [timeout] %d", applicationId, APPLICATION_ACTIVATION_TIMEOUT)); break; } ApplicationStatus currentStatus = (application != null) ? application.getStatus() : null; log.info(String.format( "Waiting for [application-id] %s [current-status] %s to become [status] %s...", applicationId, currentStatus, ApplicationStatus.Active)); sleep(10000); application = ApplicationManager.getApplications().getApplication(applicationId); } synchronized (synObject) { synObject.notify(); } } }); synchronized (synObject) { synObject.wait(); future.cancel(true); applicationsEventReceiver.removeEventListener(activatedEventListener); } Application application = ApplicationManager.getApplications().getApplication(applicationId); ApplicationStatus currentStatus = (application != null) ? application.getStatus() : null; log.info(String.format( "Assert application active status for [application-id] %s [current-status] %s took %d ms", applicationId, currentStatus, System.currentTimeMillis() - startTime)); assertNotNull(String.format("Application is not found: [application-id] %s", applicationId), application); assertEquals( String.format("Application status did not change to %s: [application-id] %s", ApplicationStatus.Active, applicationId), ApplicationStatus.Active, application.getStatus()); }
From source file:io.druid.indexing.overlord.RemoteTaskRunner.java
@LifecycleStart public void start() { try {//from w w w .j av a2s . co m if (started) { return; } final MutableInt waitingFor = new MutableInt(1); final Object waitingForMonitor = new Object(); // Add listener for creation/deletion of workers workerPathCache.getListenable().addListener(new PathChildrenCacheListener() { @Override public void childEvent(CuratorFramework client, final PathChildrenCacheEvent event) throws Exception { final Worker worker; switch (event.getType()) { case CHILD_ADDED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); synchronized (waitingForMonitor) { waitingFor.increment(); } Futures.addCallback(addWorker(worker), new FutureCallback<ZkWorker>() { @Override public void onSuccess(ZkWorker zkWorker) { synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } } @Override public void onFailure(Throwable throwable) { synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } } }); break; case CHILD_UPDATED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); updateWorker(worker); break; case CHILD_REMOVED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); removeWorker(worker); break; case INITIALIZED: synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } default: break; } } }); workerPathCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT); synchronized (waitingForMonitor) { while (waitingFor.intValue() > 0) { waitingForMonitor.wait(); } } // Schedule cleanup for task status of the workers that might have disconnected while overlord was not running List<String> workers; try { workers = cf.getChildren().forPath(indexerZkConfig.getStatusPath()); } catch (KeeperException.NoNodeException e) { // statusPath doesn't exist yet; can occur if no middleManagers have started. workers = ImmutableList.of(); } for (String worker : workers) { if (!zkWorkers.containsKey(worker) && cf.checkExists() .forPath(JOINER.join(indexerZkConfig.getAnnouncementsPath(), worker)) == null) { scheduleTasksCleanupForWorker(worker, cf.getChildren().forPath(JOINER.join(indexerZkConfig.getStatusPath(), worker))); } } started = true; } catch (Exception e) { throw Throwables.propagate(e); } }
From source file:org.apache.druid.indexing.overlord.RemoteTaskRunner.java
@Override @LifecycleStart/* w w w .ja v a2s . c o m*/ public void start() { if (!lifecycleLock.canStart()) { return; } try { final MutableInt waitingFor = new MutableInt(1); final Object waitingForMonitor = new Object(); // Add listener for creation/deletion of workers workerPathCache.getListenable().addListener(new PathChildrenCacheListener() { @Override public void childEvent(CuratorFramework client, final PathChildrenCacheEvent event) throws Exception { final Worker worker; switch (event.getType()) { case CHILD_ADDED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); synchronized (waitingForMonitor) { waitingFor.increment(); } Futures.addCallback(addWorker(worker), new FutureCallback<ZkWorker>() { @Override public void onSuccess(ZkWorker zkWorker) { synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } } @Override public void onFailure(Throwable throwable) { synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } } }); break; case CHILD_UPDATED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); updateWorker(worker); break; case CHILD_REMOVED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); removeWorker(worker); break; case INITIALIZED: // Schedule cleanup for task status of the workers that might have disconnected while overlord was not running List<String> workers; try { workers = cf.getChildren().forPath(indexerZkConfig.getStatusPath()); } catch (KeeperException.NoNodeException e) { // statusPath doesn't exist yet; can occur if no middleManagers have started. workers = ImmutableList.of(); } for (String workerId : workers) { final String workerAnnouncePath = JOINER.join(indexerZkConfig.getAnnouncementsPath(), workerId); final String workerStatusPath = JOINER.join(indexerZkConfig.getStatusPath(), workerId); if (!zkWorkers.containsKey(workerId) && cf.checkExists().forPath(workerAnnouncePath) == null) { try { scheduleTasksCleanupForWorker(workerId, cf.getChildren().forPath(workerStatusPath)); } catch (Exception e) { log.warn(e, "Could not schedule cleanup for worker[%s] during startup (maybe someone removed the status znode[%s]?). Skipping.", workerId, workerStatusPath); } } } synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } break; case CONNECTION_SUSPENDED: case CONNECTION_RECONNECTED: case CONNECTION_LOST: // do nothing } } }); workerPathCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT); synchronized (waitingForMonitor) { while (waitingFor.intValue() > 0) { waitingForMonitor.wait(); } } ScheduledExecutors.scheduleAtFixedRate(cleanupExec, Period.ZERO.toStandardDuration(), config.getWorkerBlackListCleanupPeriod().toStandardDuration(), () -> checkBlackListedNodes()); provisioningService = provisioningStrategy.makeProvisioningService(this); lifecycleLock.started(); } catch (Exception e) { throw Throwables.propagate(e); } finally { lifecycleLock.exitStart(); } }
From source file:org.texai.torrent.TrackerTest.java
/** Tests the HTTP request and response messages. */ @SuppressWarnings({ "ThrowableResultIgnored", "null" }) private void httpClient() { final ClientBootstrap clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory( Executors.newCachedThreadPool(), Executors.newCachedThreadPool())); // configure the client pipeline final Object clientResume_lock = new Object(); final AbstractHTTPResponseHandler httpResponseHandler = new MockHTTPResponseHandler(clientResume_lock); final X509SecurityInfo x509SecurityInfo = KeyStoreTestUtils.getClientX509SecurityInfo(); final ChannelPipeline channelPipeline = HTTPClientPipelineFactory.getPipeline(httpResponseHandler, x509SecurityInfo);//from w w w.j a v a 2 s .c o m clientBootstrap.setPipeline(channelPipeline); LOGGER.info("pipeline: " + channelPipeline.toString()); // start the connection attempt ChannelFuture channelFuture = clientBootstrap.connect(new InetSocketAddress("localhost", SERVER_PORT)); // wait until the connection attempt succeeds or fails final Channel channel = channelFuture.awaitUninterruptibly().getChannel(); if (!channelFuture.isSuccess()) { channelFuture.getCause().printStackTrace(); fail(channelFuture.getCause().getMessage()); } LOGGER.info("HTTP client connected"); URI uri = null; HttpRequest httpRequest; String host; // send the statistics request try { uri = new URI("https://localhost:" + SERVER_PORT + "/torrent-tracker/statistics"); } catch (URISyntaxException ex) { fail(ex.getMessage()); } httpRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri.toASCIIString()); host = uri.getHost() == null ? "localhost" : uri.getHost(); httpRequest.setHeader(HttpHeaders.Names.HOST, host); httpRequest.setHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE); LOGGER.info("httpRequest ...\n" + httpRequest); channel.write(httpRequest); // wait for the request message to be sent channelFuture.awaitUninterruptibly(); if (!channelFuture.isSuccess()) { channelFuture.getCause().printStackTrace(); fail(channelFuture.getCause().getMessage()); } // send the scrape request try { uri = new URI("https://localhost:" + SERVER_PORT + "/torrent-tracker/scrape"); } catch (URISyntaxException ex) { fail(ex.getMessage()); } httpRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri.toASCIIString()); host = uri.getHost() == null ? "localhost" : uri.getHost(); httpRequest.setHeader(HttpHeaders.Names.HOST, host); httpRequest.setHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE); LOGGER.info("httpRequest ...\n" + httpRequest); channel.write(httpRequest); // wait for the request message to be sent channelFuture.awaitUninterruptibly(); if (!channelFuture.isSuccess()) { channelFuture.getCause().printStackTrace(); fail(channelFuture.getCause().getMessage()); } // send the announce request final byte[] myPeerIdBytes = { 0x14, 0x13, 0x12, 0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01 }; final int nbrBytesUploaded = 0; final int nbrBytesDownloaded = 0; final int nbrBytesLeftToDownloaded = 1024; final String event = "started"; final String myIPAddress = NetworkUtils.getLocalHostAddress().getHostAddress(); final StringBuilder stringBuilder = new StringBuilder(); stringBuilder.append("https://localhost:"); stringBuilder.append(SERVER_PORT); stringBuilder.append("/torrent-tracker/announce"); stringBuilder.append('?'); stringBuilder.append("info_hash="); stringBuilder.append(new String((new URLCodec()).encode(INFO_HASH))); stringBuilder.append("&peer_id="); stringBuilder.append(new String((new URLCodec()).encode(myPeerIdBytes))); stringBuilder.append("&port="); stringBuilder.append(SERVER_PORT); stringBuilder.append("&uploaded="); stringBuilder.append(nbrBytesUploaded); stringBuilder.append("&downloaded="); stringBuilder.append(nbrBytesDownloaded); stringBuilder.append("&left="); stringBuilder.append(nbrBytesLeftToDownloaded); stringBuilder.append("&event="); stringBuilder.append(event); stringBuilder.append("&ip="); stringBuilder.append(myIPAddress); try { uri = new URI(stringBuilder.toString()); } catch (URISyntaxException ex) { fail(ex.getMessage()); } httpRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri.toASCIIString()); host = uri.getHost() == null ? "localhost" : uri.getHost(); httpRequest.setHeader(HttpHeaders.Names.HOST, host); httpRequest.setHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.CLOSE); LOGGER.info("httpRequest ...\n" + httpRequest); channel.write(httpRequest); // wait for the request message to be sent channelFuture.awaitUninterruptibly(); if (!channelFuture.isSuccess()) { channelFuture.getCause().printStackTrace(); fail(channelFuture.getCause().getMessage()); } // the message response handler will signal this thread when the test exchanges are completed synchronized (clientResume_lock) { try { clientResume_lock.wait(); } catch (InterruptedException ex) { } } LOGGER.info("releasing HTTP client resources"); channel.close(); clientBootstrap.releaseExternalResources(); }
From source file:org.commonjava.maven.galley.cache.infinispan.FastLocalCacheProvider.java
/** * For file reading, first will check if the local cache has the file there. If yes, will directly to read the local * cache. If no, then will check the NFS volume for the file, and will copy it to the local cache if found, then read * from the local cache again.//from w ww . ja v a2 s . co m * * @param resource - the resource will be read * @return - the input stream for further reading * @throws IOException */ @Override public InputStream openInputStream(final ConcreteResource resource) throws IOException { final String pathKey = getKeyForResource(resource); // This lock is used to control the the local resource can be opened successfully finally when local resource missing // but NFS not, which means will do a NFS->local copy. final Object copyLock = new Object(); // A flag to mark if the local resource can be open now or need to wait for the copy thread completes its work final AtomicBoolean canStreamOpen = new AtomicBoolean(false); // A second flag to indicate whether copyTask failed final AtomicBoolean copyExceOccurs = new AtomicBoolean(false); // This copy task is responsible for the NFS->local copy, and will be run in another thread, // which can use PartyLine concurrent read/write function on the local cache to boost // the i/o operation final Runnable copyNFSTask = () -> { InputStream nfsIn = null; OutputStream localOut = null; try { lockByISPN(nfsOwnerCache, resource, LockLevel.write); File nfsFile = getNFSDetachedFile(resource); if (!nfsFile.exists()) { logger.trace("NFS file does not exist too."); copyExceOccurs.set(true); return; } nfsIn = new FileInputStream(nfsFile); localOut = plCacheProvider.openOutputStream(resource); canStreamOpen.set(true); // set it ASAP so the readers can start reading before copy completes synchronized (copyLock) { copyLock.notifyAll(); } IOUtils.copy(nfsIn, localOut); logger.trace("NFS copy to local cache done."); } catch (NotSupportedException | SystemException | IOException | InterruptedException e) { copyExceOccurs.set(true); if (e instanceof IOException) { final String errorMsg = String.format( "[galley] got i/o error when doing the NFS->Local copy for resource %s", resource.toString()); logger.warn(errorMsg, e); } else if (e instanceof InterruptedException) { final String errorMsg = String.format( "[galley] got thread interrupted error for partyline file locking when doing the NFS->Local copy for resource %s", resource.toString()); throw new IllegalStateException(errorMsg, e); } else { final String errorMsg = String.format( "[galley] Cache TransactionManager got error, locking key is %s, resource is %s", pathKey, resource.toString()); logger.error(errorMsg, e); throw new IllegalStateException(errorMsg, e); } } finally { unlockByISPN(nfsOwnerCache, false, resource); IOUtils.closeQuietly(nfsIn); IOUtils.closeQuietly(localOut); cacheLocalFilePath(resource); synchronized (copyLock) { copyLock.notifyAll(); } } }; // This lock is used to control the concurrent operations on the resource, like concurrent delete and read/write. // Use "this" as lock is heavy, should think about use the transfer for the resource as the lock for each thread final AtomicReference<IOException> taskException = new AtomicReference<>(); final InputStream stream = tryLockAnd(resource, DEFAULT_WAIT_FOR_TRANSFER_LOCK_SECONDS, TimeUnit.SECONDS, r -> { boolean localExisted = plCacheProvider.exists(r); if (localExisted) { logger.trace("local cache already exists, will directly get input stream from it."); try { return plCacheProvider.openInputStream(r); } catch (IOException e) { taskException.set(e); return null; } } else { logger.trace("local cache does not exist, will start to copy from NFS cache"); executor.execute(copyNFSTask); } synchronized (copyLock) { while (!canStreamOpen.get()) { if (copyExceOccurs.get()) { return null; } try { copyLock.wait(); } catch (InterruptedException e) { logger.warn("[galley] NFS copy thread is interrupted by other threads", e); } } logger.trace("the NFS->local copy completed, will get the input stream from local cache"); try { return plCacheProvider.openInputStream(r); } catch (IOException e) { taskException.set(e); return null; } } }); propagateException(taskException.get()); return stream; }
From source file:it.anyplace.sync.bep.BlockPusher.java
public FileUploadObserver pushFile(final DataSource dataSource, @Nullable FileInfo fileInfo, final String folder, final String path) { checkArgument(connectionHandler.hasFolder(folder), "supplied connection handler %s will not share folder %s", connectionHandler, folder); checkArgument(fileInfo == null || equal(fileInfo.getFolder(), folder)); checkArgument(fileInfo == null || equal(fileInfo.getPath(), path)); try {// w ww. j ava 2 s . com final ExecutorService monitoringProcessExecutorService = Executors.newCachedThreadPool(); final long fileSize = dataSource.getSize(); final Set<String> sentBlocks = Sets.newConcurrentHashSet(); final AtomicReference<Exception> uploadError = new AtomicReference<>(); final AtomicBoolean isCompleted = new AtomicBoolean(false); final Object updateLock = new Object(); final Object listener = new Object() { @Subscribe public void handleRequestMessageReceivedEvent(RequestMessageReceivedEvent event) { BlockExchageProtos.Request request = event.getMessage(); if (equal(request.getFolder(), folder) && equal(request.getName(), path)) { try { final String hash = BaseEncoding.base16().encode(request.getHash().toByteArray()); logger.debug("handling block request = {}:{}-{} ({})", request.getName(), request.getOffset(), request.getSize(), hash); byte[] data = dataSource.getBlock(request.getOffset(), request.getSize(), hash); checkNotNull(data, "data not found for hash = %s", hash); final Future future = connectionHandler.sendMessage( Response.newBuilder().setCode(BlockExchageProtos.ErrorCode.NO_ERROR) .setData(ByteString.copyFrom(data)).setId(request.getId()).build()); monitoringProcessExecutorService.submit(new Runnable() { @Override public void run() { try { future.get(); sentBlocks.add(hash); synchronized (updateLock) { updateLock.notifyAll(); } //TODO retry on error, register error and throw on watcher } catch (InterruptedException ex) { //return and do nothing } catch (ExecutionException ex) { uploadError.set(ex); synchronized (updateLock) { updateLock.notifyAll(); } } } }); } catch (Exception ex) { logger.error("error handling block request", ex); connectionHandler.sendMessage(Response.newBuilder() .setCode(BlockExchageProtos.ErrorCode.GENERIC).setId(request.getId()).build()); uploadError.set(ex); synchronized (updateLock) { updateLock.notifyAll(); } } } } }; connectionHandler.getEventBus().register(listener); logger.debug("send index update for file = {}", path); final Object indexListener = new Object() { @Subscribe public void handleIndexRecordAquiredEvent(IndexHandler.IndexRecordAquiredEvent event) { if (equal(event.getFolder(), folder)) { for (FileInfo fileInfo : event.getNewRecords()) { if (equal(fileInfo.getPath(), path) && equal(fileInfo.getHash(), dataSource.getHash())) { //TODO check not invalid // sentBlocks.addAll(dataSource.getHashes()); isCompleted.set(true); synchronized (updateLock) { updateLock.notifyAll(); } } } } } }; if (indexHandler != null) { indexHandler.getEventBus().register(indexListener); } final IndexUpdate indexUpdate = sendIndexUpdate(folder, BlockExchageProtos.FileInfo.newBuilder().setName(path).setSize(fileSize) .setType(BlockExchageProtos.FileInfoType.FILE).addAllBlocks(dataSource.getBlocks()), fileInfo == null ? null : fileInfo.getVersionList()).getRight(); final FileUploadObserver messageUploadObserver = new FileUploadObserver() { @Override public void close() { logger.debug("closing upload process"); try { connectionHandler.getEventBus().unregister(listener); monitoringProcessExecutorService.shutdown(); if (indexHandler != null) { indexHandler.getEventBus().unregister(indexListener); } } catch (Exception ex) { } if (closeConnection && connectionHandler != null) { connectionHandler.close(); } if (indexHandler != null) { FileInfo fileInfo = indexHandler.pushRecord(indexUpdate.getFolder(), Iterables.getOnlyElement(indexUpdate.getFilesList())); logger.info("sent file info record = {}", fileInfo); } } @Override public double getProgress() { return isCompleted() ? 1d : sentBlocks.size() / ((double) dataSource.getHashes().size()); } @Override public String getProgressMessage() { return (Math.round(getProgress() * 1000d) / 10d) + "% " + sentBlocks.size() + "/" + dataSource.getHashes().size(); } @Override public boolean isCompleted() { // return sentBlocks.size() == dataSource.getHashes().size(); return isCompleted.get(); } @Override public double waitForProgressUpdate() throws InterruptedException { synchronized (updateLock) { updateLock.wait(); } if (uploadError.get() != null) { throw new RuntimeException(uploadError.get()); } return getProgress(); } @Override public DataSource getDataSource() { return dataSource; } }; return messageUploadObserver; } catch (Exception ex) { throw new RuntimeException(ex); } }
From source file:org.apache.hadoop.hbase.client.TestFromClientSide.java
@Ignore("Flakey: HBASE-8989") @Test/* www .j a v a 2 s . co m*/ public void testClientPoolThreadLocal() throws IOException { final byte[] tableName = Bytes.toBytes("testClientPoolThreadLocal"); int poolSize = Integer.MAX_VALUE; int numVersions = 3; Configuration conf = TEST_UTIL.getConfiguration(); conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "thread-local"); conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize); final HTable table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, conf, 3); final long ts = EnvironmentEdgeManager.currentTimeMillis(); final Get get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); get.setMaxVersions(); for (int versions = 1; versions <= numVersions; versions++) { Put put = new Put(ROW); put.add(FAMILY, QUALIFIER, ts + versions, VALUE); table.put(put); Result result = table.get(get); NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(QUALIFIER); assertEquals("The number of versions of '" + FAMILY + ":" + QUALIFIER + " did not match " + versions + "; " + put.toString() + ", " + get.toString(), versions, navigableMap.size()); for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) { assertTrue("The value at time " + entry.getKey() + " did not match what was put", Bytes.equals(VALUE, entry.getValue())); } } final Object waitLock = new Object(); ExecutorService executorService = Executors.newFixedThreadPool(numVersions); final AtomicReference<AssertionError> error = new AtomicReference<AssertionError>(null); for (int versions = numVersions; versions < numVersions * 2; versions++) { final int versionsCopy = versions; executorService.submit(new Callable<Void>() { @Override public Void call() { try { Put put = new Put(ROW); put.add(FAMILY, QUALIFIER, ts + versionsCopy, VALUE); table.put(put); Result result = table.get(get); NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(QUALIFIER); assertEquals( "The number of versions of '" + Bytes.toString(FAMILY) + ":" + Bytes.toString(QUALIFIER) + " did not match " + versionsCopy, versionsCopy, navigableMap.size()); for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) { assertTrue("The value at time " + entry.getKey() + " did not match what was put", Bytes.equals(VALUE, entry.getValue())); } synchronized (waitLock) { waitLock.wait(); } } catch (Exception e) { } catch (AssertionError e) { // the error happens in a thread, it won't fail the test, // need to pass it to the caller for proper handling. error.set(e); LOG.error(e); } return null; } }); } synchronized (waitLock) { waitLock.notifyAll(); } executorService.shutdownNow(); assertNull(error.get()); }
From source file:org.ut.biolab.medsavant.client.view.genetics.variantinfo.GeneManiaSubInspector.java
protected void updateRelatedGenesPanel(Set<Gene> g) { genes = g;/*from ww w . ja va2s.c om*/ kvpPanel.removeAll(); kvpPanel.invalidate(); kvpPanel.updateUI(); kvp = new KeyValuePairPanel(5); kvp.setKeysVisible(false); kvpPanel.add(kvp); progressBar.setVisible(true); progressMessage.setVisible(true); progressBar.setIndeterminate(true); progressMessage.setText("Querying GeneMANIA for related genes"); final Object lock = new Object(); Runnable r = new Runnable() { @Override public void run() { boolean setMsgOff = true; boolean buildGraph = true; if (!Thread.interrupted()) { try { List<String> geneNames = new ArrayList(); for (Gene gene : genes) { geneNames.add(gene.getName()); } List<String> notInGenemania = new ArrayList<String>(geneNames); notInGenemania.removeAll(GenemaniaInfoRetriever.getValidGenes(geneNames)); geneNames = GenemaniaInfoRetriever.getValidGenes(geneNames); genemania.setGenes(geneNames); if (notInGenemania.size() > 0) { String message = "<html><center>Following gene(s) not found in GeneMANIA: "; for (String invalidGene : notInGenemania) { message += "<br>" + invalidGene; } message += "</center></html>"; progressMessage.setText(message); setMsgOff = false; buildGraph = false; } GeneSetFetcher geneSetFetcher = GeneSetFetcher.getInstance(); if (genemania.getGenes().size() > 0) { int i = 1; String zero = Integer.toString(0); Font HEADER_FONT = new Font("Arial", Font.BOLD, 10); kvp.addKey(zero); JLabel geneHeader = new JLabel("Gene".toUpperCase()); geneHeader.setFont(HEADER_FONT); kvp.setValue(zero, geneHeader); JLabel varFreqHeader = new JLabel("<html>VARIATION<br>FREQUENCY<br>(var/kb)</html>"); varFreqHeader.setFont(HEADER_FONT); kvp.setAdditionalColumn(zero, 0, varFreqHeader); JLabel genemaniaHeader = new JLabel("<html>GENEMANIA<br>SCORE</html>"); genemaniaHeader.setFont(HEADER_FONT); if (Thread.interrupted()) { throw new InterruptedException(); } if (rankByVarFreq) { Iterator<org.ut.biolab.medsavant.shared.model.Gene> itr = geneSetFetcher .getGenesByNumVariants(genemania.getRelatedGeneNamesByScore()).iterator(); //skip the first one (it's the name of selected gene already displayed) itr.next(); while (itr.hasNext()) { addGeneToKeyValuePanel(itr.next(), i++); } currSizeOfArray = i - 1; } else { Iterator<String> itr = genemania.getRelatedGeneNamesByScore().iterator(); //skip the first one (it's the name of selected gene already displayed) itr.next(); List<String> tmp = new LinkedList<String>(); while (itr.hasNext()) { tmp.add(itr.next()); } System.out.println("start populating table" + System.currentTimeMillis()); /*while (itr.hasNext()) { //getNormalizedVariantCount(gene) addGeneToKeyValuePanel(GeneSetFetcher.getInstance().getGene(itr.next()), i++); }*/ for (String foo : tmp) { addGeneToKeyValuePanel(GeneSetFetcher.getInstance().getGene(foo), i++); } System.out.println("done thread" + System.currentTimeMillis()); currSizeOfArray = i - 1; } } } catch (InterruptedException e) { LOG.error(e); buildGraph = false; } catch (NoRelatedGenesInfoException e) { LOG.error(e); progressMessage.setText(e.getMessage()); setMsgOff = false; buildGraph = false; } catch (Exception ex) { LOG.error(ex); buildGraph = false; ClientMiscUtils.reportError("Error retrieving data from GeneMANIA: %s", ex); } catch (Error e) { LOG.error(e); } finally { progressBar.setIndeterminate(false); progressBar.setValue(0); progressBar.setVisible(false); if (setMsgOff) { progressMessage.setVisible(false); } } } synchronized (lock) { lock.notify(); } } }; if (genemaniaAlgorithmThread == null) { genemaniaAlgorithmThread = new Thread(r); } else { genemaniaAlgorithmThread.interrupt(); genemaniaAlgorithmThread = new Thread(r); } final Runnable geneDescriptionFetcher = new Runnable() { @Override public void run() { for (int j = 1; j <= currSizeOfArray; j++) { try { String geneName = kvp.getValue(Integer.toString(j)); Gene gene = GeneSetFetcher.getInstance().getGene(geneName); String d = gene.getDescription(); kvp.setToolTipForValue(Integer.toString(j), d); } catch (Exception e) { //do nothing (don't set tool tip to anything) } } } }; //} genemaniaAlgorithmThread.start(); Runnable r2 = new Runnable() { @Override public void run() { try { synchronized (lock) { lock.wait(); Thread toolTipGenerator = new Thread(geneDescriptionFetcher); Thread varFreqCalculator = new Thread(new Runnable() { @Override public void run() { for (int i = 1; i <= currSizeOfArray; i++) { try { String geneName = kvp.getValue(Integer.toString(i)); Gene gene = GeneSetFetcher.getInstance().getGene(geneName); kvp.setAdditionalColumn(Integer.toString(i), 0, new JLabel(Double.toString( GeneSetFetcher.getInstance().getNormalizedVariantCount(gene)))); kvp.invalidate(); kvp.updateUI(); } catch (Exception ex) { //don't put in any variation frequency } } } }); toolTipGenerator.start(); varFreqCalculator.start(); } } catch (Exception e) { } } }; Thread t2 = new Thread(r2); t2.start(); }
From source file:com.codename1.impl.android.AndroidImplementation.java
@Override public Media createMediaRecorder(final String path, final String mimeType) throws IOException { if (getActivity() == null) { return null; }/* ww w. j a v a 2 s . c om*/ if (!checkForPermission(Manifest.permission.RECORD_AUDIO, "This is required to record audio")) { return null; } final AndroidRecorder[] record = new AndroidRecorder[1]; final IOException[] error = new IOException[1]; final Object lock = new Object(); synchronized (lock) { getActivity().runOnUiThread(new Runnable() { @Override public void run() { synchronized (lock) { MediaRecorder recorder = new MediaRecorder(); recorder.setAudioSource(MediaRecorder.AudioSource.MIC); if (mimeType.contains("amr")) { recorder.setOutputFormat(MediaRecorder.OutputFormat.AMR_NB); recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB); } else { recorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4); recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC); } recorder.setOutputFile(removeFilePrefix(path)); try { recorder.prepare(); record[0] = new AndroidRecorder(recorder); } catch (IllegalStateException ex) { Logger.getLogger(AndroidImplementation.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException ex) { error[0] = ex; } finally { lock.notify(); } } } }); try { lock.wait(); } catch (InterruptedException ex) { ex.printStackTrace(); } if (error[0] != null) { throw error[0]; } return record[0]; } }