List of usage examples for java.lang Object notifyAll
@HotSpotIntrinsicCandidate public final native void notifyAll();
From source file:org.sonatype.nexus.testsuite.obr.ObrITSupport.java
protected void deployUsingObrIntoFelix(final String repoId) throws Exception { final File felixHome = util.resolveFile("target/org.apache.felix.main.distribution-3.2.2"); final File felixRepo = util.resolveFile("target/felix-local-repository"); final File felixConfig = testData().resolveFile("felix.properties"); // ensure we have an obr.xml final Content content = content(); final Location obrLocation = new Location(repoId, ".meta/obr.xml"); content.download(obrLocation, new File(testIndex().getDirectory("downloads"), repoId + "-obr.xml")); FileUtils.deleteDirectory(new File(felixHome, "felix-cache")); FileUtils.deleteDirectory(new File(felixRepo, ".meta")); final ProcessBuilder pb = new ProcessBuilder("java", "-Dfelix.felix.properties=" + felixConfig.toURI(), "-jar", "bin/felix.jar"); pb.directory(felixHome);//www. j ava 2 s.c om pb.redirectErrorStream(true); final Process p = pb.start(); final Object lock = new Object(); final Thread t = new Thread(new Runnable() { public void run() { // just a safeguard, if felix get stuck kill everything try { synchronized (lock) { lock.wait(5 * 1000 * 60); } } catch (final InterruptedException e) { // ignore } p.destroy(); } }); t.setDaemon(true); t.start(); synchronized (lock) { final InputStream input = p.getInputStream(); final OutputStream output = p.getOutputStream(); waitFor(input, "g!"); output.write(("obr:repos add " + nexus().getUrl() + "content/" + obrLocation.toContentPath() + "\r\n") .getBytes()); output.flush(); waitFor(input, "g!"); output.write(("obr:repos remove http://felix.apache.org/obr/releases.xml\r\n").getBytes()); output.flush(); waitFor(input, "g!"); output.write(("obr:repos list\r\n").getBytes()); output.flush(); waitFor(input, "g!"); output.write("obr:deploy -s org.apache.felix.webconsole\r\n".getBytes()); output.flush(); waitFor(input, "done."); p.destroy(); lock.notifyAll(); } }
From source file:edu.cmu.graphchi.engine.HypergraphChiEngine.java
private void loadBeforeUpdates(int interval, final ChiVertex<VertexDataType, EdgeDataType>[] vertices, final MemoryShard<EdgeDataType> memShard, final int startVertex, final int endVertex) throws IOException { final Object terminationLock = new Object(); final TimerContext _timer = loadTimer.time(); // TODO: make easier to read synchronized (terminationLock) { final AtomicInteger countDown = new AtomicInteger(disableOutEdges ? 1 : nShards); if (!disableInEdges) { try { logger.info("Memshard: " + startVertex + " -- " + endVertex); memShard.loadVertices(startVertex, endVertex, vertices, disableOutEdges, parallelExecutor); logger.info("Loading memory-shard finished." + Thread.currentThread().getName()); if (countDown.decrementAndGet() == 0) { synchronized (terminationLock) { terminationLock.notifyAll(); }/*w w w. j ava 2 s .c o m*/ } } catch (IOException ioe) { ioe.printStackTrace(); throw new RuntimeException(ioe); } catch (Exception err) { err.printStackTrace(); } } /* Load in parallel */ if (!disableOutEdges) { for (int p = 0; p < nShards; p++) { if (p != interval || disableInEdges) { final int _p = p; final SlidingShard<EdgeDataType> shard = slidingShards.get(p); loadingExecutor.submit(new Runnable() { public void run() { try { shard.readNextVertices(vertices, startVertex, false); if (countDown.decrementAndGet() == 0) { synchronized (terminationLock) { terminationLock.notifyAll(); } } } catch (IOException ioe) { ioe.printStackTrace(); throw new RuntimeException(ioe); } catch (Exception err) { err.printStackTrace(); } } }); } } } // barrier try { while (countDown.get() > 0) { terminationLock.wait(5000); if (countDown.get() > 0) { logger.info("Still waiting for loading, counter is: " + countDown.get()); } } } catch (InterruptedException e) { e.printStackTrace(); } } _timer.stop(); }
From source file:io.druid.indexing.overlord.RemoteTaskRunner.java
@LifecycleStart public void start() { try {/*ww w. j a va 2 s . c o m*/ if (started) { return; } final MutableInt waitingFor = new MutableInt(1); final Object waitingForMonitor = new Object(); // Add listener for creation/deletion of workers workerPathCache.getListenable().addListener(new PathChildrenCacheListener() { @Override public void childEvent(CuratorFramework client, final PathChildrenCacheEvent event) throws Exception { final Worker worker; switch (event.getType()) { case CHILD_ADDED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); synchronized (waitingForMonitor) { waitingFor.increment(); } Futures.addCallback(addWorker(worker), new FutureCallback<ZkWorker>() { @Override public void onSuccess(ZkWorker zkWorker) { synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } } @Override public void onFailure(Throwable throwable) { synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } } }); break; case CHILD_UPDATED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); updateWorker(worker); break; case CHILD_REMOVED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); removeWorker(worker); break; case INITIALIZED: synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } default: break; } } }); workerPathCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT); synchronized (waitingForMonitor) { while (waitingFor.intValue() > 0) { waitingForMonitor.wait(); } } // Schedule cleanup for task status of the workers that might have disconnected while overlord was not running List<String> workers; try { workers = cf.getChildren().forPath(indexerZkConfig.getStatusPath()); } catch (KeeperException.NoNodeException e) { // statusPath doesn't exist yet; can occur if no middleManagers have started. workers = ImmutableList.of(); } for (String worker : workers) { if (!zkWorkers.containsKey(worker) && cf.checkExists() .forPath(JOINER.join(indexerZkConfig.getAnnouncementsPath(), worker)) == null) { scheduleTasksCleanupForWorker(worker, cf.getChildren().forPath(JOINER.join(indexerZkConfig.getStatusPath(), worker))); } } started = true; } catch (Exception e) { throw Throwables.propagate(e); } }
From source file:org.paxle.crawler.LimitedRateCopierTest.java
public void testThreadedCopy() throws Exception { // FIXME if size is in the order of magnitude of limitKBps, this test works, but ... final int size = 1024 * 4; // ... try to increase this value ... final int threadNum = 4; final int limitKBps = 8; // ... and this one proportionally, and watch how it explodes :) final int expectedTime = size / 1024 * 1000 / limitKBps * threadNum; final ILimitedRateCopier lrc = null; // new LimitedRateCopier(limitKBps); // System.out.println("expected time: " + expectedTime + " ms"); final ArrayList<Thread> threads = new ArrayList<Thread>(); final Object sync = new Object(); for (int i = 0; i < threadNum; i++) { final int num = i; final InputStream zis = new NullInputStream(size); final OutputStream nos = new NullOutputStream(); threads.add(new Thread() { {//from w ww .ja v a 2 s .co m this.setName("Test-thread " + num); } @Override public void run() { try { // System.out.println("thread " + num + " syncing"); synchronized (sync) { sync.wait(); } // System.out.println("thread " + num + " starts copying"); final long start = System.currentTimeMillis(); // lrc.copy(zis, nos, size); final long end = System.currentTimeMillis(); /* XXX this is wrong, because every new thread gets less bandwidth One would have to pre-set the * number of expected threads in the lrc for this to be correct assertTrue( String.format("%d: Threaded copying took %d ms but should have taken %d ms.", num, end - start, expectedTime), expectedTime <= end - start); */ // System.out.println("thread " + num + " finished in " + (end - start) + " ms"); } catch (Throwable e) { e.printStackTrace(); } } }); } for (final Thread t : threads) t.start(); Thread.sleep(10); // wait until all have started and are waiting on sync // System.out.println("notifying all"); final long start = System.currentTimeMillis(); synchronized (sync) { sync.notifyAll(); } for (final Thread t : threads) t.join(); final long end = System.currentTimeMillis(); // System.out.println(String.format("Finished in %d ms", end - start)); // assertTrue(String.format("All %d threads took %d ms but should have taken %d ms.", threadNum, end - start, expectedTime), // expectedTime <= end - start); }
From source file:it.anyplace.sync.bep.BlockPusher.java
public FileUploadObserver pushFile(final DataSource dataSource, @Nullable FileInfo fileInfo, final String folder, final String path) { checkArgument(connectionHandler.hasFolder(folder), "supplied connection handler %s will not share folder %s", connectionHandler, folder); checkArgument(fileInfo == null || equal(fileInfo.getFolder(), folder)); checkArgument(fileInfo == null || equal(fileInfo.getPath(), path)); try {//ww w . ja v a 2 s. co m final ExecutorService monitoringProcessExecutorService = Executors.newCachedThreadPool(); final long fileSize = dataSource.getSize(); final Set<String> sentBlocks = Sets.newConcurrentHashSet(); final AtomicReference<Exception> uploadError = new AtomicReference<>(); final AtomicBoolean isCompleted = new AtomicBoolean(false); final Object updateLock = new Object(); final Object listener = new Object() { @Subscribe public void handleRequestMessageReceivedEvent(RequestMessageReceivedEvent event) { BlockExchageProtos.Request request = event.getMessage(); if (equal(request.getFolder(), folder) && equal(request.getName(), path)) { try { final String hash = BaseEncoding.base16().encode(request.getHash().toByteArray()); logger.debug("handling block request = {}:{}-{} ({})", request.getName(), request.getOffset(), request.getSize(), hash); byte[] data = dataSource.getBlock(request.getOffset(), request.getSize(), hash); checkNotNull(data, "data not found for hash = %s", hash); final Future future = connectionHandler.sendMessage( Response.newBuilder().setCode(BlockExchageProtos.ErrorCode.NO_ERROR) .setData(ByteString.copyFrom(data)).setId(request.getId()).build()); monitoringProcessExecutorService.submit(new Runnable() { @Override public void run() { try { future.get(); sentBlocks.add(hash); synchronized (updateLock) { updateLock.notifyAll(); } //TODO retry on error, register error and throw on watcher } catch (InterruptedException ex) { //return and do nothing } catch (ExecutionException ex) { uploadError.set(ex); synchronized (updateLock) { updateLock.notifyAll(); } } } }); } catch (Exception ex) { logger.error("error handling block request", ex); connectionHandler.sendMessage(Response.newBuilder() .setCode(BlockExchageProtos.ErrorCode.GENERIC).setId(request.getId()).build()); uploadError.set(ex); synchronized (updateLock) { updateLock.notifyAll(); } } } } }; connectionHandler.getEventBus().register(listener); logger.debug("send index update for file = {}", path); final Object indexListener = new Object() { @Subscribe public void handleIndexRecordAquiredEvent(IndexHandler.IndexRecordAquiredEvent event) { if (equal(event.getFolder(), folder)) { for (FileInfo fileInfo : event.getNewRecords()) { if (equal(fileInfo.getPath(), path) && equal(fileInfo.getHash(), dataSource.getHash())) { //TODO check not invalid // sentBlocks.addAll(dataSource.getHashes()); isCompleted.set(true); synchronized (updateLock) { updateLock.notifyAll(); } } } } } }; if (indexHandler != null) { indexHandler.getEventBus().register(indexListener); } final IndexUpdate indexUpdate = sendIndexUpdate(folder, BlockExchageProtos.FileInfo.newBuilder().setName(path).setSize(fileSize) .setType(BlockExchageProtos.FileInfoType.FILE).addAllBlocks(dataSource.getBlocks()), fileInfo == null ? null : fileInfo.getVersionList()).getRight(); final FileUploadObserver messageUploadObserver = new FileUploadObserver() { @Override public void close() { logger.debug("closing upload process"); try { connectionHandler.getEventBus().unregister(listener); monitoringProcessExecutorService.shutdown(); if (indexHandler != null) { indexHandler.getEventBus().unregister(indexListener); } } catch (Exception ex) { } if (closeConnection && connectionHandler != null) { connectionHandler.close(); } if (indexHandler != null) { FileInfo fileInfo = indexHandler.pushRecord(indexUpdate.getFolder(), Iterables.getOnlyElement(indexUpdate.getFilesList())); logger.info("sent file info record = {}", fileInfo); } } @Override public double getProgress() { return isCompleted() ? 1d : sentBlocks.size() / ((double) dataSource.getHashes().size()); } @Override public String getProgressMessage() { return (Math.round(getProgress() * 1000d) / 10d) + "% " + sentBlocks.size() + "/" + dataSource.getHashes().size(); } @Override public boolean isCompleted() { // return sentBlocks.size() == dataSource.getHashes().size(); return isCompleted.get(); } @Override public double waitForProgressUpdate() throws InterruptedException { synchronized (updateLock) { updateLock.wait(); } if (uploadError.get() != null) { throw new RuntimeException(uploadError.get()); } return getProgress(); } @Override public DataSource getDataSource() { return dataSource; } }; return messageUploadObserver; } catch (Exception ex) { throw new RuntimeException(ex); } }
From source file:org.cloudgraph.hbase.graph.ParallelSubgraphTask.java
/** * Assembles a given set of edges where the target is a different row, within * this table or another. Since we are assembling a graph, and each edge links * another row, each edge requires a new row reader. * //from w ww .jav a 2 s . c o m * @param target * the object source to which we link edges * @param prop * the edge property * @param edges * the edges * @param rowReader * the row reader * @param childTableReader * the table reader for the child objects * @param level * the assembly level * @throws IOException */ protected void assembleExternalEdges(PlasmaDataObject target, long targetSequence, PlasmaProperty prop, EdgeReader collection, RowReader rowReader, TableReader childTableReader, int level) throws IOException { for (CellValues childValues : collection.getRowValues()) { // see if this row is locked during fetch, and wait for it Object rowLock = fetchLocks.get(Arrays.hashCode(childValues.getRowKey())); if (rowLock != null) { synchronized (rowLock) { try { rowLock.wait(); } catch (InterruptedException e) { log.error(e.getMessage(), e); } } } RowReader existingChildRowReader = childTableReader.getRowReader(childValues.getRowKey()); if (existingChildRowReader != null) { // If assembled this row root before, // just link it. The data is already complete. PlasmaDataObject existingChild = (PlasmaDataObject) existingChildRowReader.getRootDataObject(); synchronized (existingChild) { synchronized (target) { link(existingChild, target, prop); } } continue; } // While fetching this node, another thread can fail to find an // existing row reader registered // above and fall through to this fetch, and therefore fetch the // same row, in addition // to attempting to create the same row reader below, causing an // error or warning // The second thread may be arriving at this node from another // property/edge and // therefore need to link from another edge above. fetchLocks.put(Arrays.hashCode(childValues.getRowKey()), new Object()); this.assembleExternalEdge(childValues, collection, childTableReader, target, targetSequence, prop, level); rowLock = fetchLocks.remove(Arrays.hashCode(childValues.getRowKey())); if (rowLock != null) { synchronized (rowLock) { rowLock.notifyAll(); } } else { log.error("expected locked row key '" + Bytes.toString(childValues.getRowKey()) + "' for edgeReader, " + collection); } } }
From source file:com.safi.asterisk.handler.connection.AbstractConnectionManager.java
protected void handleLoopback(AgiRequest request, AgiChannel channel) { try {//from ww w .j a v a 2 s . c o m String uuid = channel.getVariable("SafiUUID"); if (uuid == null) return; Object obj = null; KeepAlive al = null; synchronized (loopbackCallLock) { obj = loopbackCallLock.get(uuid); if (obj == null) { log.error("No lock found for channel " + channel.getName() + " with uuid " + uuid); if (SafletEngine.debuggerLog.isEnabledFor(Level.ERROR)) SafletEngine.debuggerLog .error("No lock found for channel " + channel.getName() + " with uuid " + uuid); return; } al = new KeepAlive(channel); // keepAliveExecutor.execute(al); KeepAlive al2 = channelKeepaliveMap.put(channel.getUniqueId(), al); if (al2 != null) al2.stop(); if (obj != null) loopbackCallLock.put(uuid, new Object[] { channel, request }); } if (obj != null) { // loopbackCallLock.put(uuid, new Object[] { channel, request }); synchronized (obj) { obj.notifyAll(); } if (al != null) al.run(); } } catch (Exception e) { log.error("Error caught during handleLoopback", e); if (SafletEngine.debuggerLog.isEnabledFor(Level.ERROR)) SafletEngine.debuggerLog.error("Error caught during handleLoopback", e); } }
From source file:org.apache.druid.indexing.overlord.RemoteTaskRunner.java
@Override @LifecycleStart/*from w w w. j av a 2 s . c o m*/ public void start() { if (!lifecycleLock.canStart()) { return; } try { final MutableInt waitingFor = new MutableInt(1); final Object waitingForMonitor = new Object(); // Add listener for creation/deletion of workers workerPathCache.getListenable().addListener(new PathChildrenCacheListener() { @Override public void childEvent(CuratorFramework client, final PathChildrenCacheEvent event) throws Exception { final Worker worker; switch (event.getType()) { case CHILD_ADDED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); synchronized (waitingForMonitor) { waitingFor.increment(); } Futures.addCallback(addWorker(worker), new FutureCallback<ZkWorker>() { @Override public void onSuccess(ZkWorker zkWorker) { synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } } @Override public void onFailure(Throwable throwable) { synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } } }); break; case CHILD_UPDATED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); updateWorker(worker); break; case CHILD_REMOVED: worker = jsonMapper.readValue(event.getData().getData(), Worker.class); removeWorker(worker); break; case INITIALIZED: // Schedule cleanup for task status of the workers that might have disconnected while overlord was not running List<String> workers; try { workers = cf.getChildren().forPath(indexerZkConfig.getStatusPath()); } catch (KeeperException.NoNodeException e) { // statusPath doesn't exist yet; can occur if no middleManagers have started. workers = ImmutableList.of(); } for (String workerId : workers) { final String workerAnnouncePath = JOINER.join(indexerZkConfig.getAnnouncementsPath(), workerId); final String workerStatusPath = JOINER.join(indexerZkConfig.getStatusPath(), workerId); if (!zkWorkers.containsKey(workerId) && cf.checkExists().forPath(workerAnnouncePath) == null) { try { scheduleTasksCleanupForWorker(workerId, cf.getChildren().forPath(workerStatusPath)); } catch (Exception e) { log.warn(e, "Could not schedule cleanup for worker[%s] during startup (maybe someone removed the status znode[%s]?). Skipping.", workerId, workerStatusPath); } } } synchronized (waitingForMonitor) { waitingFor.decrement(); waitingForMonitor.notifyAll(); } break; case CONNECTION_SUSPENDED: case CONNECTION_RECONNECTED: case CONNECTION_LOST: // do nothing } } }); workerPathCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT); synchronized (waitingForMonitor) { while (waitingFor.intValue() > 0) { waitingForMonitor.wait(); } } ScheduledExecutors.scheduleAtFixedRate(cleanupExec, Period.ZERO.toStandardDuration(), config.getWorkerBlackListCleanupPeriod().toStandardDuration(), () -> checkBlackListedNodes()); provisioningService = provisioningStrategy.makeProvisioningService(this); lifecycleLock.started(); } catch (Exception e) { throw Throwables.propagate(e); } finally { lifecycleLock.exitStart(); } }
From source file:org.commonjava.maven.galley.cache.infinispan.FastLocalCacheProvider.java
/** * For file reading, first will check if the local cache has the file there. If yes, will directly to read the local * cache. If no, then will check the NFS volume for the file, and will copy it to the local cache if found, then read * from the local cache again./*from w w w .ja v a2 s. c om*/ * * @param resource - the resource will be read * @return - the input stream for further reading * @throws IOException */ @Override public InputStream openInputStream(final ConcreteResource resource) throws IOException { final String pathKey = getKeyForResource(resource); // This lock is used to control the the local resource can be opened successfully finally when local resource missing // but NFS not, which means will do a NFS->local copy. final Object copyLock = new Object(); // A flag to mark if the local resource can be open now or need to wait for the copy thread completes its work final AtomicBoolean canStreamOpen = new AtomicBoolean(false); // A second flag to indicate whether copyTask failed final AtomicBoolean copyExceOccurs = new AtomicBoolean(false); // This copy task is responsible for the NFS->local copy, and will be run in another thread, // which can use PartyLine concurrent read/write function on the local cache to boost // the i/o operation final Runnable copyNFSTask = () -> { InputStream nfsIn = null; OutputStream localOut = null; try { lockByISPN(nfsOwnerCache, resource, LockLevel.write); File nfsFile = getNFSDetachedFile(resource); if (!nfsFile.exists()) { logger.trace("NFS file does not exist too."); copyExceOccurs.set(true); return; } nfsIn = new FileInputStream(nfsFile); localOut = plCacheProvider.openOutputStream(resource); canStreamOpen.set(true); // set it ASAP so the readers can start reading before copy completes synchronized (copyLock) { copyLock.notifyAll(); } IOUtils.copy(nfsIn, localOut); logger.trace("NFS copy to local cache done."); } catch (NotSupportedException | SystemException | IOException | InterruptedException e) { copyExceOccurs.set(true); if (e instanceof IOException) { final String errorMsg = String.format( "[galley] got i/o error when doing the NFS->Local copy for resource %s", resource.toString()); logger.warn(errorMsg, e); } else if (e instanceof InterruptedException) { final String errorMsg = String.format( "[galley] got thread interrupted error for partyline file locking when doing the NFS->Local copy for resource %s", resource.toString()); throw new IllegalStateException(errorMsg, e); } else { final String errorMsg = String.format( "[galley] Cache TransactionManager got error, locking key is %s, resource is %s", pathKey, resource.toString()); logger.error(errorMsg, e); throw new IllegalStateException(errorMsg, e); } } finally { unlockByISPN(nfsOwnerCache, false, resource); IOUtils.closeQuietly(nfsIn); IOUtils.closeQuietly(localOut); cacheLocalFilePath(resource); synchronized (copyLock) { copyLock.notifyAll(); } } }; // This lock is used to control the concurrent operations on the resource, like concurrent delete and read/write. // Use "this" as lock is heavy, should think about use the transfer for the resource as the lock for each thread final AtomicReference<IOException> taskException = new AtomicReference<>(); final InputStream stream = tryLockAnd(resource, DEFAULT_WAIT_FOR_TRANSFER_LOCK_SECONDS, TimeUnit.SECONDS, r -> { boolean localExisted = plCacheProvider.exists(r); if (localExisted) { logger.trace("local cache already exists, will directly get input stream from it."); try { return plCacheProvider.openInputStream(r); } catch (IOException e) { taskException.set(e); return null; } } else { logger.trace("local cache does not exist, will start to copy from NFS cache"); executor.execute(copyNFSTask); } synchronized (copyLock) { while (!canStreamOpen.get()) { if (copyExceOccurs.get()) { return null; } try { copyLock.wait(); } catch (InterruptedException e) { logger.warn("[galley] NFS copy thread is interrupted by other threads", e); } } logger.trace("the NFS->local copy completed, will get the input stream from local cache"); try { return plCacheProvider.openInputStream(r); } catch (IOException e) { taskException.set(e); return null; } } }); propagateException(taskException.get()); return stream; }
From source file:edu.cmu.graphchi.engine.HypergraphChiEngine.java
private void execUpdates(final HypergraphChiProgram<VertexDataType, EdgeDataType> program, final ChiVertex<VertexDataType, EdgeDataType>[] vertices) { if (vertices == null || vertices.length == 0) return;// w ww . jav a 2s . c o m TimerContext _timer = executionTimer.time(); if (Runtime.getRuntime().availableProcessors() == 1) { /* Sequential updates */ for (ChiVertex<VertexDataType, EdgeDataType> vertex : vertices) { if (vertex != null) { nupdates++; hypergraphUpdate(program, vertex, chiContext); // program.update(vertex, chiContext); } } } else { final Object termlock = new Object(); final int chunkSize = 1 + vertices.length / 64; final int nWorkers = vertices.length / chunkSize + 1; final AtomicInteger countDown = new AtomicInteger(1 + nWorkers); if (!enableDeterministicExecution) { for (ChiVertex<VertexDataType, EdgeDataType> vertex : vertices) { if (vertex != null) vertex.parallelSafe = true; } } /* Parallel updates. One thread for non-parallel safe updates, others updated in parallel. This guarantees deterministic execution. */ /* Non-safe updates */ parallelExecutor.submit(new Runnable() { public void run() { int thrupdates = 0; GraphChiContext threadContext = chiContext.clone(0); try { for (ChiVertex<VertexDataType, EdgeDataType> vertex : vertices) { if (vertex != null && !vertex.parallelSafe) { thrupdates++; hypergraphUpdate(program, vertex, threadContext); //program.update(vertex, threadContext); } } } catch (Exception e) { e.printStackTrace(); } finally { int pending = countDown.decrementAndGet(); synchronized (termlock) { nupdates += thrupdates; if (pending == 0) { termlock.notifyAll(); ; } } } } }); /* Parallel updates */ for (int thrId = 0; thrId < nWorkers; thrId++) { final int myId = thrId; final int chunkStart = myId * chunkSize; final int chunkEnd = chunkStart + chunkSize; parallelExecutor.submit(new Runnable() { public void run() { int thrupdates = 0; GraphChiContext threadContext = chiContext.clone(1 + myId); try { int end = chunkEnd; if (end > vertices.length) end = vertices.length; for (int i = chunkStart; i < end; i++) { ChiVertex<VertexDataType, EdgeDataType> vertex = vertices[i]; if (vertex != null && vertex.parallelSafe) { thrupdates++; hypergraphUpdate(program, vertex, threadContext); //program.update(vertex, threadContext); } } } catch (Exception e) { e.printStackTrace(); } finally { int pending = countDown.decrementAndGet(); synchronized (termlock) { nupdates += thrupdates; if (pending == 0) { termlock.notifyAll(); } } } } }); } synchronized (termlock) { while (countDown.get() > 0) { try { termlock.wait(1500); } catch (InterruptedException e) { // What to do? e.printStackTrace(); } if (countDown.get() > 0) logger.info("Waiting for execution to finish: countDown:" + countDown.get()); } } } _timer.stop(); }