Example usage for java.util.concurrent Semaphore acquire

List of usage examples for java.util.concurrent Semaphore acquire

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore acquire.

Prototype

public void acquire() throws InterruptedException 

Source Link

Document

Acquires a permit from this semaphore, blocking until one is available, or the thread is Thread#interrupt interrupted .

Usage

From source file:fur.shadowdrake.minecraft.InstallPanel.java

private boolean downloadArchive(String filename) throws NetworkException {
    final Semaphore semaphore1 = new Semaphore(0);
    final Semaphore semaphore2 = new Semaphore(0);
    success = false;/*from   www.  j  a v  a2s  .c o  m*/
    log.setIndeterminate();
    while (true) {
        result = ftpClient.openDataChannel((ActionEvent e) -> {
            if (e.getID() == FtpClient.FTP_OK) {
                try {
                    semaphore1.acquire();
                    InputStream is;
                    is = ((Socket) e.getSource()).getInputStream();
                    downloadedFiles = unTar(is, new File(workingDir));
                    success = true;
                } catch (IOException ex) {
                    Logger.getLogger(InstallPanel.class.getName()).log(Level.SEVERE, "Download", ex);
                    log.println("Faild to save file.");
                    ftpClient.closeDataChannel();
                    success = false;
                } catch (ArchiveException | InterruptedException ex) {
                    Logger.getLogger(InstallPanel.class.getName()).log(Level.SEVERE, null, ex);
                }
            }
        });

        switch (result) {
        case FtpClient.FTP_OK:
            downloadSize = ftpClient.retr(filename, (ActionEvent e) -> {
                ftpClient.closeDataChannel();
                semaphore2.release();
            });
            if (downloadSize >= 0) {
                if (downloadSize > 1048576) {
                    log.println("~" + Integer.toString(downloadSize / 1048576) + " MB");
                } else if (downloadSize > 1024) {
                    log.println("~" + Integer.toString(downloadSize / 1024) + " kB");
                }
                log.reset();
                log.showPercentage(true);
                log.setMaximum(downloadSize);
                semaphore1.release();
                try {
                    semaphore2.acquire();
                } catch (InterruptedException ex) {
                    return false;
                }
            } else {
                switch (downloadSize) {
                case FtpClient.FTP_NODATA:
                    log.println(
                            "Oops! Server's complaining about missing data channel, although I've opened it.");
                    ftpClient.abandonDataChannel();
                    return false;
                default:
                    ftpClient.abandonDataChannel();
                    return false;
                }
            }
            break;
        case FtpClient.FTP_TIMEOUT:
            if (reconnect()) {
                continue;
            } else {
                return false;
            }
        default:
            return false;
        }

        break;
    }
    return success;
}

From source file:io.pravega.client.stream.impl.ControllerImplTest.java

@Test
public void testParallelGetCurrentSegments() throws Exception {
    final ExecutorService executorService = Executors.newFixedThreadPool(10);
    Semaphore createCount = new Semaphore(-19);
    AtomicBoolean success = new AtomicBoolean(true);
    for (int i = 0; i < 10; i++) {
        executorService.submit(() -> {
            for (int j = 0; j < 2; j++) {
                try {
                    CompletableFuture<StreamSegments> streamSegments;
                    streamSegments = controllerClient.getCurrentSegments("scope1", "streamparallel");
                    assertTrue(streamSegments.get().getSegments().size() == 2);
                    assertEquals(new Segment("scope1", "streamparallel", 0),
                            streamSegments.get().getSegmentForKey(0.2));
                    assertEquals(new Segment("scope1", "streamparallel", 1),
                            streamSegments.get().getSegmentForKey(0.6));
                    createCount.release();
                } catch (Exception e) {
                    log.error("Exception when getting segments: {}", e);

                    // Don't wait for other threads to complete.
                    success.set(false);//from w  w w.ja  va  2  s. co  m
                    createCount.release(20);
                }
            }
        });
    }
    createCount.acquire();
    executorService.shutdownNow();
    assertTrue(success.get());
}

From source file:com.impetus.ankush2.framework.monitor.AbstractMonitor.java

public void downloadlogs() {
    final String component = (String) parameterMap.get(com.impetus.ankush2.constant.Constant.Keys.COMPONENT);
    if (component == null || component.isEmpty()) {
        this.addAndLogError("Invalid Log request: Please specify a component.");
        return;/*from   w  w w  . j a  v  a2 s  .  c o  m*/
    }
    try {
        ArrayList<String> nodes = (ArrayList) parameterMap.get(Constant.JsonKeys.Logs.NODES);
        if (nodes == null || nodes.isEmpty()) {
            nodes = new ArrayList<String>(this.clusterConf.getComponents().get(component).getNodes().keySet());
        }

        ArrayList<String> roles = (ArrayList) parameterMap.get(Constant.JsonKeys.Logs.ROLES);

        Serviceable serviceableObj = ObjectFactory.getServiceObject(component);

        if (roles == null || roles.isEmpty()) {
            roles = new ArrayList<String>(serviceableObj.getServiceList(this.clusterConf));
        }

        String clusterResourcesLogsDir = AppStoreWrapper.getClusterResourcesPath() + "logs/";

        String clusterLogsDirName = "Logs_" + this.clusterConf.getName() + "_" + System.currentTimeMillis();

        String clusterLogsArchiveName = clusterLogsDirName + ".zip";

        final String cmpLogsDirPathOnServer = clusterResourcesLogsDir + clusterLogsDirName + "/" + component
                + "/";

        if (!FileUtils.ensureFolder(cmpLogsDirPathOnServer)) {
            this.addAndLogError("Could not create log directory for " + component + " on server.");
            return;
        }

        final Semaphore semaphore = new Semaphore(nodes.size());
        final ArrayList<String> rolesObj = new ArrayList<String>(roles);
        try {
            for (final String host : nodes) {
                semaphore.acquire();
                AppStoreWrapper.getExecutor().execute(new Runnable() {
                    @Override
                    public void run() {
                        NodeConfig nodeConfig = clusterConf.getNodes().get(host);

                        SSHExec connection = SSHUtils.connectToNode(host, clusterConf.getAuthConf());
                        if (connection == null) {
                            // TODO: handle Error
                            logger.error("Could not fetch log files - Connection not initialized", component,
                                    host);
                        }
                        Serviceable serviceableObj = null;
                        try {
                            serviceableObj = ObjectFactory.getServiceObject(component);

                            for (String role : rolesObj) {
                                if (nodeConfig.getRoles().get(component).contains(role)) {

                                    String tmpLogsDirOnServer = cmpLogsDirPathOnServer + "/" + role + "/" + host
                                            + "/";
                                    if (!FileUtils.ensureFolder(tmpLogsDirOnServer)) {
                                        // TODO: handle Error
                                        // Log error in operation table and
                                        // skip
                                        // this role
                                        continue;
                                    }

                                    String nodeLogsDirPath = FileUtils.getSeparatorTerminatedPathEntry(
                                            serviceableObj.getLogDirPath(clusterConf, host, role));
                                    String logFilesRegex = serviceableObj.getLogFilesRegex(clusterConf, host,
                                            role, null);
                                    String outputTarArchiveName = role + "_" + +System.currentTimeMillis()
                                            + ".tar.gz";
                                    try {
                                        List<String> logsFilesList = AnkushUtils.listFilesInDir(connection,
                                                host, nodeLogsDirPath, logFilesRegex);
                                        AnkushTask ankushTask = new CreateTarArchive(nodeLogsDirPath,
                                                nodeLogsDirPath + outputTarArchiveName, logsFilesList);
                                        if (connection.exec(ankushTask).rc != 0) {
                                            // TODO: handle Error
                                            // Log error in operation table
                                            // and
                                            // skip this
                                            // role
                                            continue;
                                        }
                                        connection.downloadFile(nodeLogsDirPath + outputTarArchiveName,
                                                tmpLogsDirOnServer + outputTarArchiveName);
                                        ankushTask = new Remove(nodeLogsDirPath + outputTarArchiveName);
                                        connection.exec(ankushTask);

                                        ankushTask = new UnTarArchive(tmpLogsDirOnServer + outputTarArchiveName,
                                                tmpLogsDirOnServer);

                                        Runtime.getRuntime().exec(ankushTask.getCommand()).waitFor();
                                        ankushTask = new Remove(tmpLogsDirOnServer + outputTarArchiveName);
                                        Runtime.getRuntime().exec(ankushTask.getCommand()).waitFor();
                                    } catch (Exception e) {
                                        e.printStackTrace();
                                        // TODO: handle exception
                                        // Log error in operation table and
                                        // skip
                                        // this role
                                        continue;
                                    }
                                }
                            }
                        } catch (Exception e) {
                            // TODO: handle exception
                            return;
                        } finally {
                            if (semaphore != null) {
                                semaphore.release();
                            }
                            if (connection != null) {
                                connection.disconnect();
                            }
                        }
                    }
                });
            }
            semaphore.acquire(nodes.size());
        } catch (Exception e) {

        }

        ZipUtil.pack(new File(clusterResourcesLogsDir + clusterLogsDirName),
                new File(clusterResourcesLogsDir + clusterLogsArchiveName), true);

        org.apache.commons.io.FileUtils.deleteDirectory(new File(clusterResourcesLogsDir + clusterLogsDirName));

        result.put(com.impetus.ankush2.constant.Constant.Keys.DOWNLOADPATH,
                clusterResourcesLogsDir + clusterLogsArchiveName);
    } catch (Exception e) {
        this.addAndLogError("Could not download logs for " + component + ".");
        logger.error(e.getMessage(), component, e);
    }
}

From source file:org.jumpmind.symmetric.service.impl.DataExtractorService.java

protected OutgoingBatch extractOutgoingBatch(ProcessInfo processInfo, Node targetNode, IDataWriter dataWriter,
        OutgoingBatch currentBatch, boolean useStagingDataWriter, boolean updateBatchStatistics,
        ExtractMode mode) {//  w w w.j a v  a 2 s.  co  m
    if (currentBatch.getStatus() != Status.OK || ExtractMode.EXTRACT_ONLY == mode) {

        Node sourceNode = nodeService.findIdentity();

        TransformWriter transformExtractWriter = null;
        if (useStagingDataWriter) {
            long memoryThresholdInBytes = parameterService.getLong(ParameterConstants.STREAM_TO_FILE_THRESHOLD);
            transformExtractWriter = createTransformDataWriter(sourceNode, targetNode,
                    new ProcessInfoDataWriter(
                            new StagingDataWriter(memoryThresholdInBytes, nodeService.findIdentityNodeId(),
                                    Constants.STAGING_CATEGORY_OUTGOING, stagingManager),
                            processInfo));
        } else {
            transformExtractWriter = createTransformDataWriter(sourceNode, targetNode,
                    new ProcessInfoDataWriter(dataWriter, processInfo));
        }

        long ts = System.currentTimeMillis();
        long extractTimeInMs = 0l;
        long byteCount = 0l;
        long transformTimeInMs = 0l;

        if (currentBatch.getStatus() == Status.IG) {
            Batch batch = new Batch(BatchType.EXTRACT, currentBatch.getBatchId(), currentBatch.getChannelId(),
                    symmetricDialect.getBinaryEncoding(), sourceNode.getNodeId(), currentBatch.getNodeId(),
                    currentBatch.isCommonFlag());
            batch.setIgnored(true);
            try {
                IStagedResource resource = getStagedResource(currentBatch);
                if (resource != null) {
                    resource.delete();
                }
                DataContext ctx = new DataContext(batch);
                ctx.put("targetNode", targetNode);
                ctx.put("sourceNode", sourceNode);
                transformExtractWriter.open(ctx);
                transformExtractWriter.start(batch);
                transformExtractWriter.end(batch, false);
            } finally {
                transformExtractWriter.close();
            }
        } else if (!isPreviouslyExtracted(currentBatch)) {
            int maxPermits = parameterService.getInt(ParameterConstants.CONCURRENT_WORKERS);
            String semaphoreKey = useStagingDataWriter ? Long.toString(currentBatch.getBatchId())
                    : currentBatch.getNodeBatchId();
            Semaphore lock = null;
            try {
                synchronized (locks) {
                    lock = locks.get(semaphoreKey);
                    if (lock == null) {
                        lock = new Semaphore(maxPermits);
                        locks.put(semaphoreKey, lock);
                    }
                    try {
                        lock.acquire();
                    } catch (InterruptedException e) {
                        throw new org.jumpmind.exception.InterruptedException(e);
                    }
                }

                synchronized (lock) {
                    if (!isPreviouslyExtracted(currentBatch)) {
                        currentBatch.setExtractCount(currentBatch.getExtractCount() + 1);
                        if (updateBatchStatistics) {
                            changeBatchStatus(Status.QY, currentBatch, mode);
                        }
                        currentBatch.resetStats();
                        IDataReader dataReader = new ExtractDataReader(symmetricDialect.getPlatform(),
                                new SelectFromSymDataSource(currentBatch, sourceNode, targetNode, processInfo));
                        DataContext ctx = new DataContext();
                        ctx.put(Constants.DATA_CONTEXT_TARGET_NODE, targetNode);
                        ctx.put(Constants.DATA_CONTEXT_TARGET_NODE_ID, targetNode.getNodeId());
                        ctx.put(Constants.DATA_CONTEXT_TARGET_NODE_EXTERNAL_ID, targetNode.getExternalId());
                        ctx.put(Constants.DATA_CONTEXT_TARGET_NODE_GROUP_ID, targetNode.getNodeGroupId());
                        ctx.put(Constants.DATA_CONTEXT_TARGET_NODE, targetNode);
                        ctx.put(Constants.DATA_CONTEXT_SOURCE_NODE, sourceNode);
                        ctx.put(Constants.DATA_CONTEXT_SOURCE_NODE_ID, sourceNode.getNodeId());
                        ctx.put(Constants.DATA_CONTEXT_SOURCE_NODE_EXTERNAL_ID, sourceNode.getExternalId());
                        ctx.put(Constants.DATA_CONTEXT_SOURCE_NODE_GROUP_ID, sourceNode.getNodeGroupId());

                        new DataProcessor(dataReader, transformExtractWriter, "extract").process(ctx);
                        extractTimeInMs = System.currentTimeMillis() - ts;
                        Statistics stats = transformExtractWriter.getNestedWriter().getStatistics().values()
                                .iterator().next();
                        transformTimeInMs = stats.get(DataWriterStatisticConstants.TRANSFORMMILLIS);
                        extractTimeInMs = extractTimeInMs - transformTimeInMs;
                        byteCount = stats.get(DataWriterStatisticConstants.BYTECOUNT);
                    }
                }
            } catch (RuntimeException ex) {
                IStagedResource resource = getStagedResource(currentBatch);
                if (resource != null) {
                    resource.close();
                    resource.delete();
                }
                throw ex;
            } finally {
                lock.release();
                synchronized (locks) {
                    if (lock.availablePermits() == maxPermits) {
                        locks.remove(semaphoreKey);
                    }
                }
            }
        }

        if (updateBatchStatistics) {
            long dataEventCount = currentBatch.getDataEventCount();
            long insertEventCount = currentBatch.getInsertEventCount();
            currentBatch = requeryIfEnoughTimeHasPassed(ts, currentBatch);

            // preserve in the case of a reload event
            if (dataEventCount > currentBatch.getDataEventCount()) {
                currentBatch.setDataEventCount(dataEventCount);
            }

            // preserve in the case of a reload event
            if (insertEventCount > currentBatch.getInsertEventCount()) {
                currentBatch.setInsertEventCount(insertEventCount);
            }

            // only update the current batch after we have possibly
            // "re-queried"
            if (extractTimeInMs > 0) {
                currentBatch.setExtractMillis(extractTimeInMs);
            }

            if (byteCount > 0) {
                currentBatch.setByteCount(byteCount);
                statisticManager.incrementDataBytesExtracted(currentBatch.getChannelId(), byteCount);
                statisticManager.incrementDataExtracted(currentBatch.getChannelId(),
                        currentBatch.getExtractCount());
            }
        }

    }

    return currentBatch;
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testPostSuccess() throws InterruptedException {
    final String start = Instant.now().minusMillis(812).atZone(ZoneId.of("UTC"))
            .format(DateTimeFormatter.ISO_INSTANT);
    final String end = Instant.now().atZone(ZoneId.of("UTC")).format(DateTimeFormatter.ISO_INSTANT);
    final String id = UUID.randomUUID().toString();

    _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> {
        // Annotations
        Assert.assertEquals(7, r.getAnnotationsCount());
        assertAnnotation(r.getAnnotationsList(), "foo", "bar");
        assertAnnotation(r.getAnnotationsList(), "_service", "myservice");
        assertAnnotation(r.getAnnotationsList(), "_cluster", "mycluster");
        assertAnnotation(r.getAnnotationsList(), "_start", start);
        assertAnnotation(r.getAnnotationsList(), "_end", end);
        assertAnnotation(r.getAnnotationsList(), "_id", id);

        // Dimensions
        Assert.assertEquals(3, r.getDimensionsCount());
        assertDimension(r.getDimensionsList(), "host", "some.host.com");
        assertDimension(r.getDimensionsList(), "service", "myservice");
        assertDimension(r.getDimensionsList(), "cluster", "mycluster");

        // Samples
        assertSample(r.getTimersList(), "timerLong", 123L, ClientV2.Unit.Type.Value.SECOND,
                ClientV2.Unit.Scale.Value.NANO);
        assertSample(r.getTimersList(), "timerInt", 123, ClientV2.Unit.Type.Value.SECOND,
                ClientV2.Unit.Scale.Value.NANO);
        assertSample(r.getTimersList(), "timerShort", (short) 123, ClientV2.Unit.Type.Value.SECOND,
                ClientV2.Unit.Scale.Value.NANO);
        assertSample(r.getTimersList(), "timerByte", (byte) 123, ClientV2.Unit.Type.Value.SECOND,
                ClientV2.Unit.Scale.Value.NANO);
        assertSample(r.getCountersList(), "counter", 8d);
        assertSample(r.getGaugesList(), "gauge", 10d, ClientV2.Unit.Type.Value.BYTE,
                ClientV2.Unit.Scale.Value.UNIT);
    })).willReturn(WireMock.aResponse().withStatus(200)));

    final AtomicBoolean assertionResult = new AtomicBoolean(false);
    final Semaphore semaphore = new Semaphore(0);
    final Sink sink = new ApacheHttpSink.Builder()
            .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH))
            .setEventHandler(new AttemptCompletedAssertionHandler(assertionResult, 1, 451, true,
                    new CompletionHandler(semaphore)))
            .build();//  ww w.ja  v  a  2s  . c om

    final Map<String, String> annotations = new LinkedHashMap<>();
    annotations.put("foo", "bar");
    annotations.put("_start", start);
    annotations.put("_end", end);
    annotations.put("_host", "some.host.com");
    annotations.put("_service", "myservice");
    annotations.put("_cluster", "mycluster");
    annotations.put("_id", id);

    final TsdEvent event = new TsdEvent(annotations,
            createQuantityMap("timerLong", TsdQuantity.newInstance(123L, Units.NANOSECOND), "timerInt",
                    TsdQuantity.newInstance(123, Units.NANOSECOND), "timerShort",
                    TsdQuantity.newInstance((short) 123, Units.NANOSECOND), "timerByte",
                    TsdQuantity.newInstance((byte) 123, Units.NANOSECOND)),
            createQuantityMap("counter", TsdQuantity.newInstance(8d, null)),
            createQuantityMap("gauge", TsdQuantity.newInstance(10d, Units.BYTE)));

    sink.record(event);
    semaphore.acquire();

    // Ensure expected handler was invoked
    Assert.assertTrue(assertionResult.get());

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that data was sent
    _wireMockRule.verify(1, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());
}

From source file:org.apache.pulsar.compaction.TwoPhaseCompactor.java

private void phaseTwoLoop(RawReader reader, MessageId to, Map<String, MessageId> latestForKey, LedgerHandle lh,
        Semaphore outstanding, CompletableFuture<Void> promise) {
    reader.readNextAsync().whenCompleteAsync((m, exception) -> {
        if (exception != null) {
            promise.completeExceptionally(exception);
            return;
        } else if (promise.isDone()) {
            return;
        }/*from  w  w  w. j av a  2s  .c o m*/
        MessageId id = m.getMessageId();
        Optional<RawMessage> messageToAdd = Optional.empty();
        if (RawBatchConverter.isReadableBatch(m)) {
            try {
                messageToAdd = RawBatchConverter.rebatchMessage(m,
                        (key, subid) -> latestForKey.get(key).equals(subid));
            } catch (IOException ioe) {
                log.info("Error decoding batch for message {}. Whole batch will be included in output", id,
                        ioe);
                messageToAdd = Optional.of(m);
            }
        } else {
            Pair<String, Integer> keyAndSize = extractKeyAndSize(m);
            MessageId msg;
            if (keyAndSize == null) { // pass through messages without a key
                messageToAdd = Optional.of(m);
            } else if ((msg = latestForKey.get(keyAndSize.getLeft())) != null && msg.equals(id)) { // consider message only if present into latestForKey map
                if (keyAndSize.getRight() <= 0) {
                    promise.completeExceptionally(new IllegalArgumentException(
                            "Compaction phase found empty record from sorted key-map"));
                }
                messageToAdd = Optional.of(m);
            } else {
                m.close();
                // Reached to last-id and phase-one found it deleted-message while iterating on ledger so, not
                // present under latestForKey. Complete the compaction.
                if (to.equals(id)) {
                    promise.complete(null);
                }
            }
        }

        messageToAdd.ifPresent((toAdd) -> {
            try {
                outstanding.acquire();
                CompletableFuture<Void> addFuture = addToCompactedLedger(lh, toAdd)
                        .whenComplete((res, exception2) -> {
                            outstanding.release();
                            if (exception2 != null) {
                                promise.completeExceptionally(exception2);
                            }
                        });
                if (to.equals(id)) {
                    addFuture.whenComplete((res, exception2) -> {
                        if (exception2 == null) {
                            promise.complete(null);
                        }
                    });
                }
            } catch (InterruptedException ie) {
                Thread.currentThread().interrupt();
                promise.completeExceptionally(ie);
            }
        });
        phaseTwoLoop(reader, to, latestForKey, lh, outstanding, promise);
    }, scheduler);
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testRespectsBufferMax() throws InterruptedException {
    final AtomicInteger droppedEvents = new AtomicInteger(0);
    final Semaphore semaphoreA = new Semaphore(0);
    final Semaphore semaphoreB = new Semaphore(0);
    final Semaphore semaphoreC = new Semaphore(-2);
    final AtomicInteger recordsReceived = new AtomicInteger(0);

    _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> {
        recordsReceived.incrementAndGet();

        // Annotations
        Assert.assertEquals(0, r.getAnnotationsCount());

        // Dimensions
        Assert.assertEquals(0, r.getDimensionsCount());

        // Samples
        assertSample(r.getTimersList(), "timer", 7d);
        assertSample(r.getCountersList(), "counter", 8d);
        assertSample(r.getGaugesList(), "gauge", 9d);
    })).willReturn(WireMock.aResponse().withStatus(200)));

    final Sink sink = new ApacheHttpSink.Builder()
            .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH)).setMaxBatchSize(2)
            .setParallelism(1).setBufferSize(5).setEmptyQueueInterval(Duration.ofMillis(1000))
            .setEventHandler(//from w ww. j ava  2  s .com
                    new RespectsMaxBufferEventHandler(semaphoreA, semaphoreB, semaphoreC, droppedEvents))
            .build();

    final TsdEvent event = new TsdEvent(Collections.emptyMap(),
            createQuantityMap("timer", TsdQuantity.newInstance(7d, null)),
            createQuantityMap("counter", TsdQuantity.newInstance(8d, null)),
            createQuantityMap("gauge", TsdQuantity.newInstance(9d, null)));

    // Add one event to be used as a synchronization point
    sink.record(event);
    semaphoreA.acquire();

    // Add the actual events to analyze
    for (int x = 0; x < 10; x++) {
        sink.record(event);
    }
    semaphoreB.release();
    semaphoreC.acquire();

    // Ensure expected handler was invoked
    Assert.assertEquals(5, droppedEvents.get());

    // Assert number of records received
    Assert.assertEquals(6, recordsReceived.get());

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that data was sent
    _wireMockRule.verify(4, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());
}

From source file:com.solace.samples.BasicRequestor.java

public void run(String... args) {
    System.out.println("BasicRequestor initializing...");

    try {//from  w  ww . j  a v  a  2 s  . c  o m
        // Create an Mqtt client
        final MqttClient mqttClient = new MqttClient("tcp://" + args[0], "HelloWorldBasicRequestor");
        MqttConnectOptions connOpts = new MqttConnectOptions();
        connOpts.setCleanSession(true);

        // Connect the client
        System.out.println("Connecting to Solace broker: tcp://" + args[0]);
        mqttClient.connect(connOpts);
        System.out.println("Connected");

        // Semaphore used for synchronizing b/w threads
        final Semaphore latch = new Semaphore(0);

        // Topic the client will use to send request messages
        final String requestTopic = "T/GettingStarted/request";

        // Callback - Anonymous inner-class for receiving the Reply-To topic from the Solace broker
        mqttClient.setCallback(new MqttCallback() {
            public void messageArrived(String topic, MqttMessage message) throws Exception {
                // If the topic is "$SYS/client/reply-to" then set our replyToTopic
                // to with the contents of the message payload received
                if (topic != null && topic.equals("$SYS/client/reply-to")) {
                    replyToTopic = new String(message.getPayload());
                    System.out.println("\nReceived Reply-to topic from Solace for the MQTT client:"
                            + "\n\tReply-To: " + replyToTopic + "\n");
                } else {
                    // Received a response to our request
                    try {
                        // Parse the response payload and convert to a JSONObject
                        Object obj = parser.parse(new String(message.getPayload()));
                        JSONObject jsonPayload = (JSONObject) obj;

                        System.out.println("\nReceived a response!" + "\n\tCorrel. Id: "
                                + (String) jsonPayload.get("correlationId") + "\n\tMessage:    "
                                + (String) jsonPayload.get("message") + "\n");
                    } catch (ParseException ex) {
                        System.out.println("Exception parsing response message!");
                        ex.printStackTrace();
                    }
                }

                latch.release(); // unblock main thread
            }

            public void connectionLost(Throwable cause) {
                System.out.println("Connection to Solace broker lost!" + cause.getMessage());
                latch.release();
            }

            public void deliveryComplete(IMqttDeliveryToken token) {
            }
        });

        // Subscribe client to the special Solace topic for requesting a unique
        // Reply-to destination for the MQTT client
        System.out.println("Requesting Reply-To topic from Solace...");
        mqttClient.subscribe("$SYS/client/reply-to", 0);

        // Wait for till we have received the reply to Topic
        try {
            latch.acquire();
        } catch (InterruptedException e) {
            System.out.println("I was awoken while waiting");
        }

        // Check if we have a Reply-To topic
        if (replyToTopic == null || replyToTopic.isEmpty()) {
            System.out.println("Unable to request Reply-To from Solace. Exiting");
            System.exit(0);
        }

        // Subscribe client to the Solace provide Reply-To topic with a QoS level of 0
        System.out.println("Subscribing client to Solace provide Reply-To topic");
        mqttClient.subscribe(replyToTopic, 0);

        // Create the request payload in JSON format
        JSONObject obj = new JSONObject();
        obj.put("correlationId", UUID.randomUUID().toString());
        obj.put("replyTo", replyToTopic);
        obj.put("message", "Sample Request");
        String reqPayload = obj.toJSONString();

        // Create a request message and set the request payload
        MqttMessage reqMessage = new MqttMessage(reqPayload.getBytes());
        reqMessage.setQos(0);

        System.out.println("Sending request to: " + requestTopic);

        // Publish the request message
        mqttClient.publish(requestTopic, reqMessage);

        // Wait for till we have received a response
        try {
            latch.acquire(); // block here until message received
        } catch (InterruptedException e) {
            System.out.println("I was awoken while waiting");
        }

        // Disconnect the client
        mqttClient.disconnect();
        System.out.println("Exiting");

        System.exit(0);
    } catch (MqttException me) {
        System.out.println("reason " + me.getReasonCode());
        System.out.println("msg " + me.getMessage());
        System.out.println("loc " + me.getLocalizedMessage());
        System.out.println("cause " + me.getCause());
        System.out.println("excep " + me);
        me.printStackTrace();
    }
}

From source file:org.apache.solr.request.SimpleFacets.java

License:asdf

/**
 * Returns a list of value constraints and the associated facet counts 
 * for each facet field specified in the params.
 *
 * @see FacetParams#FACET_FIELD//  w  ww.  jav a  2  s . c o  m
 * @see #getFieldMissingCount
 * @see #getFacetTermEnumCounts
 */
@SuppressWarnings("unchecked")
public NamedList<Object> getFacetFieldCounts() throws IOException, SyntaxError {

    NamedList<Object> res = new SimpleOrderedMap<>();
    String[] facetFs = global.getParams(FacetParams.FACET_FIELD);
    if (null == facetFs) {
        return res;
    }

    // Passing a negative number for FACET_THREADS implies an unlimited number of threads is acceptable.
    // Also, a subtlety of directExecutor is that no matter how many times you "submit" a job, it's really
    // just a method call in that it's run by the calling thread.
    int maxThreads = req.getParams().getInt(FacetParams.FACET_THREADS, 0);
    Executor executor = maxThreads == 0 ? directExecutor : facetExecutor;
    final Semaphore semaphore = new Semaphore((maxThreads <= 0) ? Integer.MAX_VALUE : maxThreads);
    List<Future<NamedList>> futures = new ArrayList<>(facetFs.length);

    if (fdebugParent != null) {
        fdebugParent.putInfoItem("maxThreads", maxThreads);
    }

    try {
        //Loop over fields; submit to executor, keeping the future
        for (String f : facetFs) {
            if (fdebugParent != null) {
                fdebug = new FacetDebugInfo();
                fdebugParent.addChild(fdebug);
            }
            final ParsedParams parsed = parseParams(FacetParams.FACET_FIELD, f);
            final SolrParams localParams = parsed.localParams;
            final String termList = localParams == null ? null : localParams.get(CommonParams.TERMS);
            final String key = parsed.key;
            final String facetValue = parsed.facetValue;
            Callable<NamedList> callable = () -> {
                try {
                    NamedList<Object> result = new SimpleOrderedMap<>();
                    if (termList != null) {
                        List<String> terms = StrUtils.splitSmart(termList, ",", true);
                        result.add(key, getListedTermCounts(facetValue, parsed, terms));
                    } else {
                        result.add(key, getTermCounts(facetValue, parsed));
                    }
                    return result;
                } catch (SolrException se) {
                    throw se;
                } catch (Exception e) {
                    throw new SolrException(ErrorCode.SERVER_ERROR,
                            "Exception during facet.field: " + facetValue, e);
                } finally {
                    semaphore.release();
                }
            };

            RunnableFuture<NamedList> runnableFuture = new FutureTask<>(callable);
            semaphore.acquire();//may block and/or interrupt
            executor.execute(runnableFuture);//releases semaphore when done
            futures.add(runnableFuture);
        } //facetFs loop

        //Loop over futures to get the values. The order is the same as facetFs but shouldn't matter.
        for (Future<NamedList> future : futures) {
            res.addAll(future.get());
        }
        assert semaphore.availablePermits() >= maxThreads;
    } catch (InterruptedException e) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                "Error while processing facet fields: InterruptedException", e);
    } catch (ExecutionException ee) {
        Throwable e = ee.getCause();//unwrap
        if (e instanceof RuntimeException) {
            throw (RuntimeException) e;
        }
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                "Error while processing facet fields: " + e.toString(), e);
    }

    return res;
}

From source file:fur.shadowdrake.minecraft.InstallPanel.java

private List<String> fetchUpdateInstructions(Pack pack) throws NetworkException {
    final Semaphore semaphore = new Semaphore(0);
    final StringBuffer sb = new StringBuffer();
    while (true) {
        result = ftpClient.openDataChannel((ActionEvent e) -> {
            if (e.getID() == FtpClient.FTP_OK) {
                try {
                    InputStreamReader isr;
                    int n;
                    char[] buffer = new char[4096];
                    isr = new InputStreamReader(((Socket) e.getSource()).getInputStream());
                    while (true) {
                        n = isr.read(buffer);
                        if (n < 0) {
                            break;
                        }/*from  w  w w .j  av a 2  s  .  c  o m*/
                        sb.append(buffer, 0, n);
                    }
                } catch (IOException ex) {
                    Logger.getLogger(InstallPanel.class.getName()).log(Level.SEVERE, "Download", ex);
                    log.println("Faild to save file.");
                    ftpClient.closeDataChannel();
                }
            }
        });
        switch (result) {
        case FtpClient.FTP_OK:
            int status = ftpClient.uins(pack, (ActionEvent e) -> {
                ftpClient.closeDataChannel();
                semaphore.release();
            });
            switch (status) {
            case FtpClient.FTP_OK:
                try {
                    semaphore.acquire();
                } catch (InterruptedException ex) {
                    return null;
                }
                break;
            case FtpClient.FTP_NODATA:
                log.println("Oops! Server's complaining about missing data channel, although I've opened it.");
                ftpClient.abandonDataChannel();
                return null;
            default:
                ftpClient.abandonDataChannel();
                return null;
            }
            break;
        case FtpClient.FTP_TIMEOUT:
            if (reconnect()) {
                continue;
            } else {
                return null;
            }
        default:
            return null;
        }
        break;
    }
    return Arrays.asList(sb.toString().split("\n"));
}