Example usage for java.util.concurrent Semaphore release

List of usage examples for java.util.concurrent Semaphore release

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore release.

Prototype

public void release() 

Source Link

Document

Releases a permit, returning it to the semaphore.

Usage

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testRespectsBufferMax() throws InterruptedException {
    final AtomicInteger droppedEvents = new AtomicInteger(0);
    final Semaphore semaphoreA = new Semaphore(0);
    final Semaphore semaphoreB = new Semaphore(0);
    final Semaphore semaphoreC = new Semaphore(-2);
    final AtomicInteger recordsReceived = new AtomicInteger(0);

    _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> {
        recordsReceived.incrementAndGet();

        // Annotations
        Assert.assertEquals(0, r.getAnnotationsCount());

        // Dimensions
        Assert.assertEquals(0, r.getDimensionsCount());

        // Samples
        assertSample(r.getTimersList(), "timer", 7d);
        assertSample(r.getCountersList(), "counter", 8d);
        assertSample(r.getGaugesList(), "gauge", 9d);
    })).willReturn(WireMock.aResponse().withStatus(200)));

    final Sink sink = new ApacheHttpSink.Builder()
            .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH)).setMaxBatchSize(2)
            .setParallelism(1).setBufferSize(5).setEmptyQueueInterval(Duration.ofMillis(1000))
            .setEventHandler(//  w w  w.j av  a  2  s .c  o  m
                    new RespectsMaxBufferEventHandler(semaphoreA, semaphoreB, semaphoreC, droppedEvents))
            .build();

    final TsdEvent event = new TsdEvent(Collections.emptyMap(),
            createQuantityMap("timer", TsdQuantity.newInstance(7d, null)),
            createQuantityMap("counter", TsdQuantity.newInstance(8d, null)),
            createQuantityMap("gauge", TsdQuantity.newInstance(9d, null)));

    // Add one event to be used as a synchronization point
    sink.record(event);
    semaphoreA.acquire();

    // Add the actual events to analyze
    for (int x = 0; x < 10; x++) {
        sink.record(event);
    }
    semaphoreB.release();
    semaphoreC.acquire();

    // Ensure expected handler was invoked
    Assert.assertEquals(5, droppedEvents.get());

    // Assert number of records received
    Assert.assertEquals(6, recordsReceived.get());

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that data was sent
    _wireMockRule.verify(4, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());
}

From source file:org.knime.al.util.noveltydetection.kernel.KernelCalculator.java

public RealMatrix calculateKernelMatrix(final double[][] training, final double[][] test,
        final ExecutionMonitor progMon) throws Exception {

    final ThreadPool pool = KNIMEConstants.GLOBAL_THREAD_POOL;
    final int procCount = (int) (Runtime.getRuntime().availableProcessors() * (2.0 / 3));
    final Semaphore semaphore = new Semaphore(procCount);
    RealMatrix result = null;//from w w w  .ja  va  2  s. c  o m
    try {
        result = pool.runInvisible(new Callable<RealMatrix>() {

            @Override
            public RealMatrix call() throws Exception {
                final double[][] resultArrayMatrix = new double[training.length][test.length];
                final CalculateKernelValuesRunnable[] kct = new CalculateKernelValuesRunnable[test.length];
                final int numberOfRunnables = kct.length;
                for (int i = 0; i < numberOfRunnables; i++) {
                    kct[i] = new CalculateKernelValuesRunnable(0, training.length, i, i + 1, training, test,
                            resultArrayMatrix, m_kernelFunction, semaphore);
                }
                final Future<?>[] threads = new Future<?>[numberOfRunnables];
                double progCounter = 0;
                for (int i = 0; i < numberOfRunnables; i++) {
                    try {
                        progMon.checkCanceled();
                    } catch (final Exception e) {
                        for (int j = 0; j < i; j++) {
                            if (threads[j] != null) {
                                threads[j].cancel(true);
                            }
                        }
                        throw e;
                    }
                    semaphore.acquire();
                    threads[i] = pool.enqueue(kct[i]);
                    progMon.setProgress(progCounter / (2 * numberOfRunnables),
                            "Kernel calculation started (" + i + "/" + numberOfRunnables + ")");
                    progCounter += 1;
                }
                for (int i = 0; i < numberOfRunnables; i++) {
                    try {
                        progMon.checkCanceled();
                    } catch (final Exception e) {
                        for (int j = 0; j < numberOfRunnables; j++) {
                            if (threads[j] != null) {
                                threads[j].cancel(true);
                            }
                        }
                        throw e;
                    }
                    semaphore.acquire();
                    threads[i].get();
                    semaphore.release();
                    progMon.setProgress(progCounter / (2 * numberOfRunnables),
                            "Kernel calculation finished (" + i + "/" + numberOfRunnables + ")");
                    progCounter += 1;
                }
                return MatrixUtils.createRealMatrix(resultArrayMatrix);
            }

        });
    } catch (final Exception e) {
        throw e;
    }

    return result;
}

From source file:org.commoncrawl.service.crawler.CrawlSegmentLog.java

/** sync the incoming segment against the local crawl log and then send it up to the history server **/
public int syncToLog(CrawlSegmentFPMap segmentDetail) throws IOException {
    if (Environment.detailLogEnabled())
        LOG.info("### SYNC: List:" + _listId + " Segment:" + _segmentId + " Syncing Progress Log");

    int itemsProcessed = 0;

    // and construct a path to the local crawl segment directory ... 
    File activeLogPath = buildActivePath(_rootDataDir, _listId, _segmentId);
    File checkpointLogPath = buildCheckpointPath(_rootDataDir, _listId, _segmentId);

    // check if it exists ... 
    if (checkpointLogPath.exists()) {
        // log it ... 
        if (Environment.detailLogEnabled())
            LOG.info("### SYNC: List:" + _listId + " Segment:" + _segmentId + " Checkpoint Log Found");
        // rename it as the active log ... 
        checkpointLogPath.renameTo(activeLogPath);
    }/*w  ww  .  j  a  v  a  2s .  c om*/

    if (activeLogPath.exists()) {
        // reconcile against active log (if it exists) ...
        _localLogItemCount = reconcileLogFile(FileSystem.getLocal(CrawlEnvironment.getHadoopConfig()),
                new Path(activeLogPath.getAbsolutePath()), _listId, _segmentId, segmentDetail, null);
        if (Environment.detailLogEnabled())
            LOG.info("### SYNC: List:" + _listId + " Segment:" + _segmentId
                    + " Reconciled Local Log File with ProcessedItemCount:" + _localLogItemCount);
        itemsProcessed += _localLogItemCount;
    }

    FileSystem hdfs = CrawlEnvironment.getDefaultFileSystem();

    // first things first ... check to see if special completion log file exists in hdfs 
    Path hdfsSegmentCompletionLogPath = new Path(
            CrawlEnvironment.getCrawlSegmentDataDirectory() + "/" + getListId() + "/" + getSegmentId() + "/"
                    + CrawlEnvironment.buildCrawlSegmentCompletionLogFileName(getNodeName()));

    if (hdfs.exists(hdfsSegmentCompletionLogPath)) {
        if (Environment.detailLogEnabled())
            LOG.info("### SYNC: List:" + _listId + " Segment:" + _segmentId
                    + " Completion File Found. Marking Segment Complete");
        // if the file exists then this segment has been crawled and uploaded already ... 
        // if active log file exists ... delete it ... 
        if (activeLogPath.exists())
            activeLogPath.delete();
        //reset local log item count ... 
        _localLogItemCount = 0;
        itemsProcessed = -1;

        // remove all hosts from segment
        segmentDetail._urlsComplete = segmentDetail._urlCount;
    } else {

        if (segmentDetail != null) {
            if (Environment.detailLogEnabled())
                LOG.info("### SYNC: Building BulkItem History Query for List:" + _listId + " Segment:"
                        + _segmentId);
            BulkItemHistoryQuery query = buildHistoryQueryBufferFromMap(segmentDetail);

            if (query != null) {
                // create blocking semaphore ... 
                final Semaphore semaphore = new Semaphore(1);
                semaphore.acquireUninterruptibly();
                if (Environment.detailLogEnabled())
                    LOG.info("### SYNC: Dispatching query to history server");
                //create an outer response object we can pass aysnc response to ... 
                final BulkItemHistoryQueryResponse outerResponse = new BulkItemHistoryQueryResponse();

                CrawlerServer.getServer().getHistoryServiceStub().bulkItemQuery(query,
                        new Callback<BulkItemHistoryQuery, BulkItemHistoryQueryResponse>() {

                            @Override
                            public void requestComplete(
                                    final AsyncRequest<BulkItemHistoryQuery, BulkItemHistoryQueryResponse> request) {
                                // response returns in async thread context ... 
                                if (request.getStatus() == Status.Success) {
                                    if (Environment.detailLogEnabled())
                                        LOG.info(
                                                "###SYNC: bulk Query to history server succeeded. setting out resposne");
                                    ImmutableBuffer buffer = request.getOutput().getResponseList();
                                    outerResponse.setResponseList(
                                            new Buffer(buffer.getReadOnlyBytes(), 0, buffer.getCount()));
                                } else {
                                    LOG.error("###SYNC: bulk Query to history server failed.");

                                }
                                // release semaphore
                                semaphore.release();
                            }
                        });
                LOG.info("###SYNC: Loader thread blocked waiting for bulk query response");
                semaphore.acquireUninterruptibly();
                LOG.info("###SYNC: Loader thread received response from history server");

                if (outerResponse.getResponseList().getCount() == 0) {
                    LOG.error("###SYNC: History Server Bulk Query Returned NULL!!! for List:" + _listId
                            + " Segment:" + _segmentId);
                } else {
                    // ok time to process the response and integrate the results into the fp list 
                    updateFPMapFromBulkQueryResponse(segmentDetail, outerResponse);
                }
            } else {
                if (Environment.detailLogEnabled())
                    LOG.warn("### SYNC: No fingerprints found when processing segment detail for List:"
                            + _listId + " Segment:" + _segmentId);
                segmentDetail._urlsComplete = segmentDetail._urlCount;
            }
        }
        /*
        // and now walk hdfs looking for any checkpointed logs ...
        // scan based on checkpoint filename ... 
        FileStatus[] remoteCheckpointFiles = hdfs.globStatus(new Path(CrawlEnvironment.getCrawlSegmentDataDirectory() + "/" + getListId() + "/"
            + getSegmentId() + "/" + CrawlEnvironment.buildCrawlSegmentLogCheckpointWildcardString(getNodeName())));
                
        if (remoteCheckpointFiles != null) {
                
          LOG.info("### SYNC: List:"+ _listId + " Segment:" + _segmentId +" Found Remote Checkpoint Files");
                  
          // create a temp file to hold the reconciled log ... 
          File consolidatedLogFile = null;
                  
          if (remoteCheckpointFiles.length > 1) { 
            // create temp log file ... 
            consolidatedLogFile = File.createTempFile("SegmentLog", Long.toString(System.currentTimeMillis()));
            // write out header ... 
            CrawlSegmentLog.writeHeader(consolidatedLogFile,0);
          }
          // walk the files 
          for(FileStatus checkpointFilePath : remoteCheckpointFiles) {
            // and reconcile them against segment ... 
            itemsProcessed += reconcileLogFile(hdfs,checkpointFilePath.getPath(),getListId(),getSegmentId(),segmentDetail,consolidatedLogFile);
            LOG.info("### SYNC: List:"+ _listId + " Segment:" + _segmentId +" Processed Checkpoint File:" + checkpointFilePath.getPath() + " Items Processed:" + itemsProcessed);          
          }
                  
          // finally ... if consolidatedLogFile is not null 
          if (consolidatedLogFile != null) { 
            // build a new hdfs file name ... 
            Path consolidatedHDFSPath = new Path(CrawlEnvironment.getCrawlSegmentDataDirectory() + "/" + getListId() + "/" + getSegmentId() + "/" + CrawlEnvironment.buildCrawlSegmentLogCheckpointFileName(getNodeName(), System.currentTimeMillis()));
            LOG.info("### SYNC: List:"+ _listId + " Segment:" + _segmentId +" Writing Consolidated Log File:" + consolidatedHDFSPath + " to HDFS");         
            // and copy local file to log ... 
            hdfs.copyFromLocalFile(new Path(consolidatedLogFile.getAbsolutePath()),consolidatedHDFSPath);
            // and delete all previous log file entries ... 
            for (FileStatus oldCheckPointFile : remoteCheckpointFiles) { 
              hdfs.delete(oldCheckPointFile.getPath());
            }
            consolidatedLogFile.delete();
          }
        }
        */
    }

    if (segmentDetail != null) {
        _remainingURLS += (segmentDetail._urlCount - segmentDetail._urlsComplete);
        // mark url count as valid now ...
        _urlCountValid = true;

        // now if remaining url count is zero ... then mark the segment as complete ... 
        if (_remainingURLS == 0 && _localLogItemCount == 0) {
            _segmentComplete = true;
        }
    }
    if (Environment.detailLogEnabled())
        LOG.info("### SYNC: List:" + _listId + " Segment:" + _segmentId
                + " Done Syncing Progress Log TotalURLS:" + segmentDetail._urlCount + " RemainingURLS:"
                + _remainingURLS + " LocalLogItemCount:" + _localLogItemCount);

    return itemsProcessed;
}

From source file:org.commoncrawl.service.listcrawler.CacheManager.java

private final void flushLocalLog(final long bytesToRemove, final int itemsToRemove,
        final List<FingerprintAndOffsetTuple> flushedTupleList,
        final ArrayList<IndexDataFileTriple> tempFileTriples) {

    LOG.info("Acquiring Log Access Semaphores");
    // first boost this thread's priority ... 
    int originalThreadPriority = Thread.currentThread().getPriority();
    Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
    // next acquire all permits to the local access log ... block until we get there ... 
    getLocalLogAccessSemaphore().acquireUninterruptibly(LOG_ACCESS_SEMAPHORE_COUNT);
    // now that we have all the semaphores we need, reduce the thread's priority to normal
    Thread.currentThread().setPriority(originalThreadPriority);
    LOG.info("Acquired ALL Log Access Semaphores");

    long timeStart = System.currentTimeMillis();

    // now we have exclusive access to the local transaction log ... 
    File activeLogFilePath = getActiveLogFilePath();
    File checkpointLogFilePath = getCheckpointLogFilePath();
    try {//from  w  ww. j  av  a2  s  .c o m
        // delete checkpoint file if it existed ... 
        checkpointLogFilePath.delete();
        // now rename activelog to checkpoint path 
        activeLogFilePath.renameTo(checkpointLogFilePath);

        long logFileConsolidationStartTime = System.currentTimeMillis();
        // now trap for exceptions in case something fails 
        try {
            // fix up the header ... 
            _header._fileSize -= bytesToRemove;
            _header._itemCount -= itemsToRemove;

            // open a old file and new file 
            RandomAccessFile newFile = new RandomAccessFile(activeLogFilePath, "rw");
            RandomAccessFile oldFile = new RandomAccessFile(checkpointLogFilePath, "r");

            LOG.info("Opened new and old files. New Header FileSize is:" + _header._fileSize + " ItemCount:"
                    + _header._itemCount);
            try {
                // write out header ...
                long bytesRemainingInLogFile = _header._fileSize;

                LOG.info("Writing Header to New File. Bytes Remaining for Data are:" + bytesRemainingInLogFile);
                // write header to new file ... 
                _header.writeHeader(newFile);
                // decrement bytes available ... 
                bytesRemainingInLogFile -= LocalLogFileHeader.SIZE;

                if (bytesRemainingInLogFile != 0) {
                    byte transferBuffer[] = new byte[(1 << 20) * 16];
                    LOG.info("Seeking old file past flushed data (pos:" + LocalLogFileHeader.SIZE
                            + bytesToRemove + ")");
                    // seek past old data ... 
                    oldFile.seek(LocalLogFileHeader.SIZE + bytesToRemove);
                    // and copy across remaining data 
                    while (bytesRemainingInLogFile != 0) {
                        int bytesToReadWriteThisIteration = Math.min((int) bytesRemainingInLogFile,
                                transferBuffer.length);
                        oldFile.read(transferBuffer, 0, bytesToReadWriteThisIteration);
                        newFile.write(transferBuffer, 0, bytesToReadWriteThisIteration);
                        LOG.info("Copied " + bytesToReadWriteThisIteration + " from Old to New");
                        bytesRemainingInLogFile -= bytesToReadWriteThisIteration;
                    }
                }
            } finally {
                if (newFile != null) {
                    newFile.close();
                }
                if (oldFile != null) {
                    oldFile.close();
                }
            }
            // if we reached here then checkpoint was successfull ... 
            LOG.info("Checkpoint - Log Consolidation Successfull! TOOK:"
                    + (System.currentTimeMillis() - logFileConsolidationStartTime));

            LOG.info("Loading Index Files");
            for (IndexDataFileTriple triple : tempFileTriples) {
                LOG.info("Loading Index File:" + triple._localIndexFilePath);
                final HDFSFileIndex fileIndex = new HDFSFileIndex(_remoteFileSystem, triple._localIndexFilePath,
                        triple._dataFilePath);
                LOG.info("Loaded Index File");
                // update hdfs index list ... 
                synchronized (CacheManager.this) {
                    LOG.info("Adding HDFS Index to list");
                    _hdfsIndexList.addElement(fileIndex);
                }
            }

            // create a semaphore to wait on 
            final Semaphore semaphore = new Semaphore(0);

            LOG.info("Scheduling Async Event");
            // now we need to schedule an async call to main thread to update data structures safely ... 
            _eventLoop.setTimer(new Timer(0, false, new Timer.Callback() {

                @Override
                public void timerFired(Timer timer) {
                    LOG.info("Cleaning Map");

                    synchronized (CacheManager.this) {
                        // walk tuples 
                        for (FingerprintAndOffsetTuple tuple : flushedTupleList) {
                            //TODO: HACK!
                            // remove from collection ... 
                            _fingerprintToLocalLogPos.removeAll(tuple._fingerprint);
                        }
                    }
                    LOG.info("Increment Offset Info");
                    // finally increment locallog offset by bytes removed ... 
                    _localLogStartOffset += bytesToRemove;

                    LOG.info("Releasing Wait Semaphore");
                    //release wait sempahore 
                    semaphore.release();
                }
            }));

            LOG.info("Waiting for Async Event to Complete");
            //wait for async operation to complete ...
            semaphore.acquireUninterruptibly();

            LOG.info("Async Event to Completed");
        } catch (IOException e) {
            LOG.error("Checkpoint Failed with Exception:" + CCStringUtils.stringifyException(e));
            // delete new file ... 
            activeLogFilePath.delete();
            // and rename checkpoint file to active file ... 
            checkpointLogFilePath.renameTo(activeLogFilePath);
        }
    } finally {
        LOG.info("Releasing ALL Log Access Semaphores. HELD FOR:" + (System.currentTimeMillis() - timeStart));
        getLocalLogAccessSemaphore().release(LOG_ACCESS_SEMAPHORE_COUNT);
    }
}

From source file:org.apache.solr.request.SimpleFacets.java

License:asdf

/**
 * Returns a list of value constraints and the associated facet counts 
 * for each facet field specified in the params.
 *
 * @see FacetParams#FACET_FIELD/*from   w w w  .  j  a v  a 2  s.  co  m*/
 * @see #getFieldMissingCount
 * @see #getFacetTermEnumCounts
 */
@SuppressWarnings("unchecked")
public NamedList<Object> getFacetFieldCounts() throws IOException, SyntaxError {

    NamedList<Object> res = new SimpleOrderedMap<>();
    String[] facetFs = global.getParams(FacetParams.FACET_FIELD);
    if (null == facetFs) {
        return res;
    }

    // Passing a negative number for FACET_THREADS implies an unlimited number of threads is acceptable.
    // Also, a subtlety of directExecutor is that no matter how many times you "submit" a job, it's really
    // just a method call in that it's run by the calling thread.
    int maxThreads = req.getParams().getInt(FacetParams.FACET_THREADS, 0);
    Executor executor = maxThreads == 0 ? directExecutor : facetExecutor;
    final Semaphore semaphore = new Semaphore((maxThreads <= 0) ? Integer.MAX_VALUE : maxThreads);
    List<Future<NamedList>> futures = new ArrayList<>(facetFs.length);

    if (fdebugParent != null) {
        fdebugParent.putInfoItem("maxThreads", maxThreads);
    }

    try {
        //Loop over fields; submit to executor, keeping the future
        for (String f : facetFs) {
            if (fdebugParent != null) {
                fdebug = new FacetDebugInfo();
                fdebugParent.addChild(fdebug);
            }
            final ParsedParams parsed = parseParams(FacetParams.FACET_FIELD, f);
            final SolrParams localParams = parsed.localParams;
            final String termList = localParams == null ? null : localParams.get(CommonParams.TERMS);
            final String key = parsed.key;
            final String facetValue = parsed.facetValue;
            Callable<NamedList> callable = () -> {
                try {
                    NamedList<Object> result = new SimpleOrderedMap<>();
                    if (termList != null) {
                        List<String> terms = StrUtils.splitSmart(termList, ",", true);
                        result.add(key, getListedTermCounts(facetValue, parsed, terms));
                    } else {
                        result.add(key, getTermCounts(facetValue, parsed));
                    }
                    return result;
                } catch (SolrException se) {
                    throw se;
                } catch (Exception e) {
                    throw new SolrException(ErrorCode.SERVER_ERROR,
                            "Exception during facet.field: " + facetValue, e);
                } finally {
                    semaphore.release();
                }
            };

            RunnableFuture<NamedList> runnableFuture = new FutureTask<>(callable);
            semaphore.acquire();//may block and/or interrupt
            executor.execute(runnableFuture);//releases semaphore when done
            futures.add(runnableFuture);
        } //facetFs loop

        //Loop over futures to get the values. The order is the same as facetFs but shouldn't matter.
        for (Future<NamedList> future : futures) {
            res.addAll(future.get());
        }
        assert semaphore.availablePermits() >= maxThreads;
    } catch (InterruptedException e) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                "Error while processing facet fields: InterruptedException", e);
    } catch (ExecutionException ee) {
        Throwable e = ee.getCause();//unwrap
        if (e instanceof RuntimeException) {
            throw (RuntimeException) e;
        }
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                "Error while processing facet fields: " + e.toString(), e);
    }

    return res;
}

From source file:fur.shadowdrake.minecraft.InstallPanel.java

private List<String> fetchUpdateInstructions(Pack pack) throws NetworkException {
    final Semaphore semaphore = new Semaphore(0);
    final StringBuffer sb = new StringBuffer();
    while (true) {
        result = ftpClient.openDataChannel((ActionEvent e) -> {
            if (e.getID() == FtpClient.FTP_OK) {
                try {
                    InputStreamReader isr;
                    int n;
                    char[] buffer = new char[4096];
                    isr = new InputStreamReader(((Socket) e.getSource()).getInputStream());
                    while (true) {
                        n = isr.read(buffer);
                        if (n < 0) {
                            break;
                        }//from  w  ww  . java  2  s .c  o  m
                        sb.append(buffer, 0, n);
                    }
                } catch (IOException ex) {
                    Logger.getLogger(InstallPanel.class.getName()).log(Level.SEVERE, "Download", ex);
                    log.println("Faild to save file.");
                    ftpClient.closeDataChannel();
                }
            }
        });
        switch (result) {
        case FtpClient.FTP_OK:
            int status = ftpClient.uins(pack, (ActionEvent e) -> {
                ftpClient.closeDataChannel();
                semaphore.release();
            });
            switch (status) {
            case FtpClient.FTP_OK:
                try {
                    semaphore.acquire();
                } catch (InterruptedException ex) {
                    return null;
                }
                break;
            case FtpClient.FTP_NODATA:
                log.println("Oops! Server's complaining about missing data channel, although I've opened it.");
                ftpClient.abandonDataChannel();
                return null;
            default:
                ftpClient.abandonDataChannel();
                return null;
            }
            break;
        case FtpClient.FTP_TIMEOUT:
            if (reconnect()) {
                continue;
            } else {
                return null;
            }
        default:
            return null;
        }
        break;
    }
    return Arrays.asList(sb.toString().split("\n"));
}

From source file:fur.shadowdrake.minecraft.InstallPanel.java

private boolean downloadArchive(String filename) throws NetworkException {
    final Semaphore semaphore1 = new Semaphore(0);
    final Semaphore semaphore2 = new Semaphore(0);
    success = false;//from  www.  ja  v a  2  s  . co m
    log.setIndeterminate();
    while (true) {
        result = ftpClient.openDataChannel((ActionEvent e) -> {
            if (e.getID() == FtpClient.FTP_OK) {
                try {
                    semaphore1.acquire();
                    InputStream is;
                    is = ((Socket) e.getSource()).getInputStream();
                    downloadedFiles = unTar(is, new File(workingDir));
                    success = true;
                } catch (IOException ex) {
                    Logger.getLogger(InstallPanel.class.getName()).log(Level.SEVERE, "Download", ex);
                    log.println("Faild to save file.");
                    ftpClient.closeDataChannel();
                    success = false;
                } catch (ArchiveException | InterruptedException ex) {
                    Logger.getLogger(InstallPanel.class.getName()).log(Level.SEVERE, null, ex);
                }
            }
        });

        switch (result) {
        case FtpClient.FTP_OK:
            downloadSize = ftpClient.retr(filename, (ActionEvent e) -> {
                ftpClient.closeDataChannel();
                semaphore2.release();
            });
            if (downloadSize >= 0) {
                if (downloadSize > 1048576) {
                    log.println("~" + Integer.toString(downloadSize / 1048576) + " MB");
                } else if (downloadSize > 1024) {
                    log.println("~" + Integer.toString(downloadSize / 1024) + " kB");
                }
                log.reset();
                log.showPercentage(true);
                log.setMaximum(downloadSize);
                semaphore1.release();
                try {
                    semaphore2.acquire();
                } catch (InterruptedException ex) {
                    return false;
                }
            } else {
                switch (downloadSize) {
                case FtpClient.FTP_NODATA:
                    log.println(
                            "Oops! Server's complaining about missing data channel, although I've opened it.");
                    ftpClient.abandonDataChannel();
                    return false;
                default:
                    ftpClient.abandonDataChannel();
                    return false;
                }
            }
            break;
        case FtpClient.FTP_TIMEOUT:
            if (reconnect()) {
                continue;
            } else {
                return false;
            }
        default:
            return false;
        }

        break;
    }
    return success;
}

From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestBlockReportRateLimiting.java

@Test(timeout = 180000)
public void testRateLimitingDuringDataNodeStartup() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES, 1);
    conf.setLong(DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS, 20L * 60L * 1000L);

    final Semaphore fbrSem = new Semaphore(0);
    final HashSet<DatanodeID> expectedFbrDns = new HashSet<>();
    final HashSet<DatanodeID> fbrDns = new HashSet<>();
    final AtomicReference<String> failure = new AtomicReference<String>("");

    final BlockManagerFaultInjector injector = new BlockManagerFaultInjector() {
        private int numLeases = 0;

        @Override/*from   w w  w.  j  a  v  a2s.  c  o m*/
        public void incomingBlockReportRpc(DatanodeID nodeID, BlockReportContext context) throws IOException {
            LOG.info("Incoming full block report from " + nodeID + ".  Lease ID = 0x"
                    + Long.toHexString(context.getLeaseId()));
            if (context.getLeaseId() == 0) {
                setFailure(failure,
                        "Got unexpected rate-limiting-" + "bypassing full block report RPC from " + nodeID);
            }
            fbrSem.acquireUninterruptibly();
            synchronized (this) {
                fbrDns.add(nodeID);
                if (!expectedFbrDns.remove(nodeID)) {
                    setFailure(failure, "Got unexpected full block report " + "RPC from " + nodeID
                            + ".  expectedFbrDns = " + Joiner.on(", ").join(expectedFbrDns));
                }
                LOG.info("Proceeding with full block report from " + nodeID + ".  Lease ID = 0x"
                        + Long.toHexString(context.getLeaseId()));
            }
        }

        @Override
        public void requestBlockReportLease(DatanodeDescriptor node, long leaseId) {
            if (leaseId == 0) {
                return;
            }
            synchronized (this) {
                numLeases++;
                expectedFbrDns.add(node);
                LOG.info("requestBlockReportLease(node=" + node + ", leaseId=0x" + Long.toHexString(leaseId)
                        + ").  " + "expectedFbrDns = " + Joiner.on(", ").join(expectedFbrDns));
                if (numLeases > 1) {
                    setFailure(failure, "More than 1 lease was issued at once.");
                }
            }
        }

        @Override
        public void removeBlockReportLease(DatanodeDescriptor node, long leaseId) {
            LOG.info("removeBlockReportLease(node=" + node + ", leaseId=0x" + Long.toHexString(leaseId) + ")");
            synchronized (this) {
                numLeases--;
            }
        }
    };
    BlockManagerFaultInjector.instance = injector;

    final int NUM_DATANODES = 5;
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
    cluster.waitActive();
    for (int n = 1; n <= NUM_DATANODES; n++) {
        LOG.info("Waiting for " + n + " datanode(s) to report in.");
        fbrSem.release();
        Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
        final int currentN = n;
        GenericTestUtils.waitFor(new Supplier<Boolean>() {
            @Override
            public Boolean get() {
                synchronized (injector) {
                    if (fbrDns.size() > currentN) {
                        setFailure(failure,
                                "Expected at most " + currentN
                                        + " datanodes to have sent a block report, but actually "
                                        + fbrDns.size() + " have.");
                    }
                    return (fbrDns.size() >= currentN);
                }
            }
        }, 25, 50000);
    }
    cluster.shutdown();
    Assert.assertEquals("", failure.get());
}

From source file:org.telegram.ui.ProfileActivity.java

@Override
public boolean onFragmentCreate() {
    user_id = arguments.getInt("user_id", 0);
    chat_id = getArguments().getInt("chat_id", 0);
    if (user_id != 0) {
        dialog_id = arguments.getLong("dialog_id", 0);
        if (dialog_id != 0) {
            currentEncryptedChat = MessagesController.getInstance().getEncryptedChat((int) (dialog_id >> 32));
        }/*from  w ww  .  j  av a 2s . c o m*/
        TLRPC.User user = MessagesController.getInstance().getUser(user_id);
        if (user == null) {
            return false;
        }
        NotificationCenter.getInstance().addObserver(this, NotificationCenter.updateInterfaces);
        NotificationCenter.getInstance().addObserver(this, NotificationCenter.contactsDidLoaded);
        NotificationCenter.getInstance().addObserver(this, NotificationCenter.encryptedChatCreated);
        NotificationCenter.getInstance().addObserver(this, NotificationCenter.encryptedChatUpdated);
        NotificationCenter.getInstance().addObserver(this, NotificationCenter.blockedUsersDidLoaded);
        NotificationCenter.getInstance().addObserver(this, NotificationCenter.botInfoDidLoaded);
        NotificationCenter.getInstance().addObserver(this, NotificationCenter.userInfoDidLoaded);
        if (currentEncryptedChat != null) {
            NotificationCenter.getInstance().addObserver(this, NotificationCenter.didReceivedNewMessages);
        }
        userBlocked = MessagesController.getInstance().blockedUsers.contains(user_id);
        if (user.bot) {
            BotQuery.loadBotInfo(user.id, true, classGuid);
        }
        MessagesController.getInstance().loadFullUser(MessagesController.getInstance().getUser(user_id),
                classGuid, true);
        participantsMap = null;
    } else if (chat_id != 0) {
        currentChat = MessagesController.getInstance().getChat(chat_id);
        if (currentChat == null) {
            final Semaphore semaphore = new Semaphore(0);
            MessagesStorage.getInstance().getStorageQueue().postRunnable(new Runnable() {
                @Override
                public void run() {
                    currentChat = MessagesStorage.getInstance().getChat(chat_id);
                    semaphore.release();
                }
            });
            try {
                semaphore.acquire();
            } catch (Exception e) {
                FileLog.e("tmessages", e);
            }
            if (currentChat != null) {
                MessagesController.getInstance().putChat(currentChat, true);
            } else {
                return false;
            }
        }

        if (currentChat.megagroup) {
            getChannelParticipants(true);
        } else {
            participantsMap = null;
        }
        NotificationCenter.getInstance().addObserver(this, NotificationCenter.chatInfoDidLoaded);

        sortedUsers = new ArrayList<>();
        updateOnlineCount();

        avatarUpdater = new AvatarUpdater();
        avatarUpdater.delegate = new AvatarUpdater.AvatarUpdaterDelegate() {
            @Override
            public void didUploadedPhoto(TLRPC.InputFile file, TLRPC.PhotoSize small, TLRPC.PhotoSize big) {
                if (chat_id != 0) {
                    MessagesController.getInstance().changeChatAvatar(chat_id, file);
                }
            }
        };
        avatarUpdater.parentFragment = this;

        if (ChatObject.isChannel(currentChat)) {
            MessagesController.getInstance().loadFullChat(chat_id, classGuid, true);
        }
    } else {
        return false;
    }

    if (dialog_id != 0) {
        SharedMediaQuery.getMediaCount(dialog_id, SharedMediaQuery.MEDIA_PHOTOVIDEO, classGuid, true);
    } else if (user_id != 0) {
        SharedMediaQuery.getMediaCount(user_id, SharedMediaQuery.MEDIA_PHOTOVIDEO, classGuid, true);
    } else if (chat_id > 0) {
        SharedMediaQuery.getMediaCount(-chat_id, SharedMediaQuery.MEDIA_PHOTOVIDEO, classGuid, true);
        if (mergeDialogId != 0) {
            SharedMediaQuery.getMediaCount(mergeDialogId, SharedMediaQuery.MEDIA_PHOTOVIDEO, classGuid, true);
        }
    }

    NotificationCenter.getInstance().addObserver(this, NotificationCenter.mediaCountDidLoaded);
    NotificationCenter.getInstance().addObserver(this, NotificationCenter.updateInterfaces);
    NotificationCenter.getInstance().addObserver(this, NotificationCenter.closeChats);
    updateRowsIds();

    return true;
}

From source file:org.commoncrawl.util.MapReduceJobStatsWriter.java

/** close and flush the log file **/
public void close(final Callback optionalAsyncCallback) {

    if (_eventLoop != null) {
        // allocate a blocking semaphore in case async callback was not specified 
        final Semaphore blockingCallSemaphore = new Semaphore(0);

        // perform shutdown in worker thread ... 
        _eventLoop.setTimer(new Timer(0, false, new Timer.Callback() {

            @Override/*from  w w  w.  java  2s. com*/
            public void timerFired(Timer timer) {

                try {
                    try {
                        if (_writer != null) {
                            _writer.close();
                        }
                    } catch (IOException e) {
                        LOG.error(CCStringUtils.stringifyException(e));
                        _lastLogWriteException = e;
                    } finally {
                        _writer = null;

                        try {
                            if (_outputStream != null) {
                                _outputStream.flush();
                                _outputStream.close();
                            }
                        } catch (IOException e) {
                            LOG.error(CCStringUtils.stringifyException(e));
                            _lastLogWriteException = e;
                        } finally {
                            _outputStream = null;
                        }
                    }

                    // now figure out if everything went smoothly or not 
                    if (_entryCount != 0 && _lastLogWriteException == null) {
                        // ok so far so good... time to copy the local log file to hdfs ... 
                        Path hdfsPath = new Path(Environment.HDFS_LOGCOLLECTOR_BASEDIR,
                                _logFamily + "/" + _groupingKey + "/" + Long.toString(_uniqueKey));

                        try {

                            // delete the remote file if it exists
                            _remoteFileSystem.delete(hdfsPath, false);
                            // ensure parent path 
                            _remoteFileSystem.mkdirs(hdfsPath.getParent());
                            // now if the local file exists and has data 
                            if (_tempFileName.exists() && _tempFileName.length() != 0) {
                                // copy the file to hdfs 
                                _remoteFileSystem.copyFromLocalFile(new Path(_tempFileName.getAbsolutePath()),
                                        hdfsPath);
                            }
                        } catch (IOException e) {
                            LOG.error(CCStringUtils.stringifyException(e));
                            _lastLogWriteException = e;
                        }
                    }
                } finally {
                    // always delete the temp file ... 
                    _tempFileName.delete();

                    // release semaphore 
                    blockingCallSemaphore.release();

                    // if callback was specified , call it now 
                    if (optionalAsyncCallback != null) {
                        optionalAsyncCallback.execute();
                    }

                    // stop the event loop ... 
                    _eventLoop.stop();
                    _eventLoop = null;
                }
            }
        }));

        // now if callback was not specified... wait for blocking semaphore to signal ... 
        if (optionalAsyncCallback == null) {
            blockingCallSemaphore.acquireUninterruptibly();
        }
    }
}