Example usage for java.util.concurrent Semaphore Semaphore

List of usage examples for java.util.concurrent Semaphore Semaphore

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore Semaphore.

Prototype

public Semaphore(int permits) 

Source Link

Document

Creates a Semaphore with the given number of permits and nonfair fairness setting.

Usage

From source file:com.impetus.ankush2.cassandra.monitor.CassandraClusterMonitor.java

private void addConfigFileParam(final Parameter parameter, final String fileName, final String loggedUser,
        final String fileType, boolean editNodeParam) {

    final String propertyName = parameter.getName();
    final String propertyValue = parameter.getValue();
    // get server.properties file path
    final String propertyFilePath = advanceConf.get(CassandraConstants.ClusterProperties.CONF_DIR) + fileName;
    if (editNodeParam) {
        String host = (String) parameterMap.get(Constant.Keys.HOST);
        addParameter(host, propertyName, propertyValue, propertyFilePath, fileType, loggedUser, fileName);
    } else {//  ww  w .j  a v a  2  s. com
        final Semaphore semaphore = new Semaphore(componentConfig.getNodes().size());
        try {
            // iterate over all the nodes.
            for (final String host : componentConfig.getNodes().keySet()) {
                semaphore.acquire();
                AppStoreWrapper.getExecutor().execute(new Runnable() {
                    @Override
                    public void run() {
                        addParameter(host, propertyName, propertyValue, propertyFilePath, fileType, loggedUser,
                                fileName);
                        if (semaphore != null) {
                            semaphore.release();
                        }

                    }
                });
            }
            semaphore.acquire(componentConfig.getNodes().size());
        } catch (Exception e) {
            addAndLogError("Error in updating config file params...");
        }
    }
}

From source file:com.clustercontrol.monitor.run.factory.RunMonitor.java

/**
 * ???1?????????Semaphore?//  w  ww  . j a v a  2s  .c  om
 * @param monitorType ??
 * @param facilityId ???FacilityId
 * @return
 */
private static Semaphore getSemaphoreForNodeAggregateMonitor(String monitorType, String facilityId) {
    if (nodeSemaphore.containsKey(monitorType) == false) {
        nodeSemaphore.putIfAbsent(monitorType, new ConcurrentHashMap<String, Semaphore>());
    }
    final Semaphore semaphore = nodeSemaphore.get(monitorType).get(facilityId);
    if (semaphore != null) {
        return semaphore;
    }

    final Semaphore newSemaphore = new Semaphore(1);
    final Semaphore oldSemaphore = nodeSemaphore.get(monitorType).putIfAbsent(facilityId, newSemaphore);
    if (oldSemaphore != null) {
        return oldSemaphore;
    } else {
        return newSemaphore;
    }
}

From source file:org.jumpmind.symmetric.service.impl.DataExtractorService.java

protected OutgoingBatch extractOutgoingBatch(ProcessInfo processInfo, Node targetNode, IDataWriter dataWriter,
        OutgoingBatch currentBatch, boolean useStagingDataWriter, boolean updateBatchStatistics,
        ExtractMode mode) {/* www .j ava 2 s  .  c  o m*/
    if (currentBatch.getStatus() != Status.OK || ExtractMode.EXTRACT_ONLY == mode) {

        Node sourceNode = nodeService.findIdentity();

        TransformWriter transformExtractWriter = null;
        if (useStagingDataWriter) {
            long memoryThresholdInBytes = parameterService.getLong(ParameterConstants.STREAM_TO_FILE_THRESHOLD);
            transformExtractWriter = createTransformDataWriter(sourceNode, targetNode,
                    new ProcessInfoDataWriter(
                            new StagingDataWriter(memoryThresholdInBytes, nodeService.findIdentityNodeId(),
                                    Constants.STAGING_CATEGORY_OUTGOING, stagingManager),
                            processInfo));
        } else {
            transformExtractWriter = createTransformDataWriter(sourceNode, targetNode,
                    new ProcessInfoDataWriter(dataWriter, processInfo));
        }

        long ts = System.currentTimeMillis();
        long extractTimeInMs = 0l;
        long byteCount = 0l;
        long transformTimeInMs = 0l;

        if (currentBatch.getStatus() == Status.IG) {
            Batch batch = new Batch(BatchType.EXTRACT, currentBatch.getBatchId(), currentBatch.getChannelId(),
                    symmetricDialect.getBinaryEncoding(), sourceNode.getNodeId(), currentBatch.getNodeId(),
                    currentBatch.isCommonFlag());
            batch.setIgnored(true);
            try {
                IStagedResource resource = getStagedResource(currentBatch);
                if (resource != null) {
                    resource.delete();
                }
                DataContext ctx = new DataContext(batch);
                ctx.put("targetNode", targetNode);
                ctx.put("sourceNode", sourceNode);
                transformExtractWriter.open(ctx);
                transformExtractWriter.start(batch);
                transformExtractWriter.end(batch, false);
            } finally {
                transformExtractWriter.close();
            }
        } else if (!isPreviouslyExtracted(currentBatch)) {
            int maxPermits = parameterService.getInt(ParameterConstants.CONCURRENT_WORKERS);
            String semaphoreKey = useStagingDataWriter ? Long.toString(currentBatch.getBatchId())
                    : currentBatch.getNodeBatchId();
            Semaphore lock = null;
            try {
                synchronized (locks) {
                    lock = locks.get(semaphoreKey);
                    if (lock == null) {
                        lock = new Semaphore(maxPermits);
                        locks.put(semaphoreKey, lock);
                    }
                    try {
                        lock.acquire();
                    } catch (InterruptedException e) {
                        throw new org.jumpmind.exception.InterruptedException(e);
                    }
                }

                synchronized (lock) {
                    if (!isPreviouslyExtracted(currentBatch)) {
                        currentBatch.setExtractCount(currentBatch.getExtractCount() + 1);
                        if (updateBatchStatistics) {
                            changeBatchStatus(Status.QY, currentBatch, mode);
                        }
                        currentBatch.resetStats();
                        IDataReader dataReader = new ExtractDataReader(symmetricDialect.getPlatform(),
                                new SelectFromSymDataSource(currentBatch, sourceNode, targetNode, processInfo));
                        DataContext ctx = new DataContext();
                        ctx.put(Constants.DATA_CONTEXT_TARGET_NODE, targetNode);
                        ctx.put(Constants.DATA_CONTEXT_TARGET_NODE_ID, targetNode.getNodeId());
                        ctx.put(Constants.DATA_CONTEXT_TARGET_NODE_EXTERNAL_ID, targetNode.getExternalId());
                        ctx.put(Constants.DATA_CONTEXT_TARGET_NODE_GROUP_ID, targetNode.getNodeGroupId());
                        ctx.put(Constants.DATA_CONTEXT_TARGET_NODE, targetNode);
                        ctx.put(Constants.DATA_CONTEXT_SOURCE_NODE, sourceNode);
                        ctx.put(Constants.DATA_CONTEXT_SOURCE_NODE_ID, sourceNode.getNodeId());
                        ctx.put(Constants.DATA_CONTEXT_SOURCE_NODE_EXTERNAL_ID, sourceNode.getExternalId());
                        ctx.put(Constants.DATA_CONTEXT_SOURCE_NODE_GROUP_ID, sourceNode.getNodeGroupId());

                        new DataProcessor(dataReader, transformExtractWriter, "extract").process(ctx);
                        extractTimeInMs = System.currentTimeMillis() - ts;
                        Statistics stats = transformExtractWriter.getNestedWriter().getStatistics().values()
                                .iterator().next();
                        transformTimeInMs = stats.get(DataWriterStatisticConstants.TRANSFORMMILLIS);
                        extractTimeInMs = extractTimeInMs - transformTimeInMs;
                        byteCount = stats.get(DataWriterStatisticConstants.BYTECOUNT);
                    }
                }
            } catch (RuntimeException ex) {
                IStagedResource resource = getStagedResource(currentBatch);
                if (resource != null) {
                    resource.close();
                    resource.delete();
                }
                throw ex;
            } finally {
                lock.release();
                synchronized (locks) {
                    if (lock.availablePermits() == maxPermits) {
                        locks.remove(semaphoreKey);
                    }
                }
            }
        }

        if (updateBatchStatistics) {
            long dataEventCount = currentBatch.getDataEventCount();
            long insertEventCount = currentBatch.getInsertEventCount();
            currentBatch = requeryIfEnoughTimeHasPassed(ts, currentBatch);

            // preserve in the case of a reload event
            if (dataEventCount > currentBatch.getDataEventCount()) {
                currentBatch.setDataEventCount(dataEventCount);
            }

            // preserve in the case of a reload event
            if (insertEventCount > currentBatch.getInsertEventCount()) {
                currentBatch.setInsertEventCount(insertEventCount);
            }

            // only update the current batch after we have possibly
            // "re-queried"
            if (extractTimeInMs > 0) {
                currentBatch.setExtractMillis(extractTimeInMs);
            }

            if (byteCount > 0) {
                currentBatch.setByteCount(byteCount);
                statisticManager.incrementDataBytesExtracted(currentBatch.getChannelId(), byteCount);
                statisticManager.incrementDataExtracted(currentBatch.getChannelId(),
                        currentBatch.getExtractCount());
            }
        }

    }

    return currentBatch;
}

From source file:edu.brown.hstore.PartitionExecutor.java

/**
 * Primary run method that is invoked a single time when the thread is
 * started. Has the opportunity to do startup config.
 *//*from www  .  ja v a  2  s.co m*/
@Override
public void run() {
    assert (this.hstore_site != null);
    assert (this.hstore_coordinator != null);
    assert (this.self == null);
    this.self = Thread.currentThread();
    this.self.setName(HStoreThreadManager.getThreadName(this.hstore_site, this.partitionId));

    if (hstore_conf.site.cpu_affinity) {
        this.hstore_site.getThreadManager().registerEEThread(partition);
    }

    // *********************************** DEBUG
    // ***********************************
    if (hstore_conf.site.exec_validate_work) {
        LOG.warn("Enabled Distributed Transaction Checking");
    }
    // *********************************** DEBUG
    // ***********************************

    // Things that we will need in the loop below
    AbstractTransaction current_txn = null;
    VoltMessage work = null;
    boolean stop = false;

    try {
        // Setup shutdown lock
        this.shutdown_latch = new Semaphore(0);

        if (d)
            LOG.debug("Starting PartitionExecutor run loop...");
        while (stop == false && this.isShuttingDown() == false) {
            this.currentTxnId = null;
            work = null;

            // -------------------------------
            // Poll Work Queue
            // -------------------------------
            try {
                work = this.work_queue.poll();
                if (work == null) {
                    // See if there is anything that we can do while we wait
                    // XXX this.utilityWork(null);

                    if (t)
                        LOG.trace("Partition " + this.partitionId + " queue is empty. Waiting...");
                    if (hstore_conf.site.exec_profiling)
                        this.work_idle_time.start();
                    work = this.work_queue.take();
                    if (hstore_conf.site.exec_profiling)
                        this.work_idle_time.stop();
                }
            } catch (InterruptedException ex) {
                if (d && this.isShuttingDown() == false)
                    LOG.debug("Unexpected interuption while polling work queue. Halting PartitionExecutor...",
                            ex);
                stop = true;
                break;
            }

            // -------------------------------
            // Transactional Work
            // -------------------------------
            if (work instanceof TransactionInfoBaseMessage) {
                this.currentTxnId = ((TransactionInfoBaseMessage) work).getTxnId();
                current_txn = hstore_site.getTransaction(this.currentTxnId);
                if (current_txn == null) {
                    String msg = String.format("No transaction state for txn #%d [%s]", this.currentTxnId,
                            work.getClass().getSimpleName());
                    LOG.error(msg + "\n" + work.toString());
                    throw new ServerFaultException(msg, this.currentTxnId);
                }
                // If this transaction has already been aborted and they are
                // trying to give us
                // something that isn't a FinishTaskMessage, then we won't
                // bother processing it
                else if (current_txn.isAborted() && (work instanceof FinishTaskMessage) == false) {
                    if (d)
                        LOG.debug(
                                String.format("%s - Was marked as aborted. Will not process %s on partition %d",
                                        current_txn, work.getClass().getSimpleName(), this.partitionId));
                    continue;
                }

                // -------------------------------
                // Execute Query Plan Fragments
                // -------------------------------
                if (work instanceof FragmentTaskMessage) {
                    FragmentTaskMessage ftask = (FragmentTaskMessage) work;
                    WorkFragment fragment = ftask.getWorkFragment();
                    assert (fragment != null);

                    // Get the ParameterSet array for this WorkFragment
                    // It can either be attached to the AbstractTransaction
                    // handle if it came
                    // over the wire directly from the txn's base partition,
                    // or it can be attached
                    // as for prefetch WorkFragments
                    ParameterSet parameters[] = null;
                    if (fragment.getPrefetch()) {
                        parameters = current_txn.getPrefetchParameterSets();
                        current_txn.markExecPrefetchQuery(this.partitionId);
                    } else {
                        parameters = current_txn.getAttachedParameterSets();
                    }
                    parameters = this.getFragmentParameters(current_txn, fragment, parameters);
                    assert (parameters != null);

                    // At this point we know that we are either the current
                    // dtxn or the current dtxn is null
                    // We will allow any read-only transaction to commit if
                    // (1) The WorkFragment for the remote txn is read-only
                    // (2) This txn has always been read-only up to this
                    // point at this partition
                    ExecutionMode newMode = null;
                    if (hstore_conf.site.exec_speculative_execution) {
                        newMode = (fragment.getReadOnly() && current_txn.isExecReadOnly(this.partitionId)
                                ? ExecutionMode.COMMIT_READONLY
                                : ExecutionMode.COMMIT_NONE);
                    } else {
                        newMode = ExecutionMode.DISABLED;
                    }
                    exec_lock.lock();
                    try {
                        // There is no current DTXN, so that means its us!
                        if (this.currentDtxn == null) {
                            this.setCurrentDtxn(current_txn);
                            if (d)
                                LOG.debug(String.format(
                                        "Marking %s as current DTXN on partition %d [nextMode=%s]", current_txn,
                                        this.partitionId, newMode));
                        }
                        // There is a current DTXN but it's not us!
                        // That means we need to block ourselves until it
                        // finishes
                        else if (this.currentDtxn != current_txn) {
                            if (d)
                                LOG.warn(String.format(
                                        "%s - Blocking on partition %d until current Dtxn %s finishes",
                                        current_txn, this.partitionId, this.currentDtxn));
                            this.currentBlockedTxns.add(ftask);
                            continue;
                        }
                        assert (this.currentDtxn == current_txn) : String.format(
                                "Trying to execute a second Dtxn %s before the current one has finished [current=%s]",
                                current_txn, this.currentDtxn);
                        this.setExecutionMode(current_txn, newMode);
                    } finally {
                        exec_lock.unlock();
                    } // SYNCH

                    this.processWorkFragment(current_txn, fragment, parameters);

                    // -------------------------------
                    // Invoke Stored Procedure
                    // -------------------------------
                } else if (work instanceof InitiateTaskMessage) {
                    if (hstore_conf.site.exec_profiling)
                        this.work_exec_time.start();
                    InitiateTaskMessage itask = (InitiateTaskMessage) work;

                    // If this is a MapReduceTransaction handle, we actually
                    // want to get the
                    // inner LocalTransaction handle for this partition. The
                    // MapReduceTransaction
                    // is just a placeholder
                    if (current_txn instanceof MapReduceTransaction) {
                        MapReduceTransaction orig_ts = (MapReduceTransaction) current_txn;
                        current_txn = orig_ts.getLocalTransaction(this.partitionId);
                        assert (current_txn != null) : "Unexpected null LocalTransaction handle from "
                                + orig_ts;
                    }

                    try {
                        this.processInitiateTaskMessage((LocalTransaction) current_txn, itask);
                    } catch (Throwable ex) {
                        LOG.error(String.format("Unexpected error when executing %s\n%s", current_txn,
                                current_txn.debug()));
                        throw ex;
                    } finally {
                        if (hstore_conf.site.exec_profiling)
                            this.work_exec_time.stop();
                    }

                    // -------------------------------
                    // Finish Transaction
                    // -------------------------------
                } else if (work instanceof FinishTaskMessage) {
                    FinishTaskMessage ftask = (FinishTaskMessage) work;
                    this.finishTransaction(current_txn, (ftask.getStatus() == Status.OK));
                }

                // -------------------------------
                // PotentialSnapshotWorkMessage
                // -------------------------------
            } else if (work instanceof PotentialSnapshotWorkMessage) {
                m_snapshotter.doSnapshotWork(ee);

                // -------------------------------
                // BAD MOJO!
                // -------------------------------
            } else if (work != null) {
                throw new ServerFaultException("Unexpected work message in queue: " + work, this.currentTxnId);
            }

            // Is there a better way to do this?
            this.work_throttler.checkThrottling(false);

            if (hstore_conf.site.exec_profiling && this.currentTxnId != null) {
                this.lastExecutedTxnId = this.currentTxnId;
                this.currentTxnId = null;
            }
        } // WHILE
    } catch (final Throwable ex) {
        if (this.isShuttingDown() == false) {
            ex.printStackTrace();
            LOG.fatal(String.format("Unexpected error for PartitionExecutor partition #%d [%s]%s",
                    this.partitionId, (current_txn != null ? " - " + current_txn : ""), ex), ex);
            if (current_txn != null)
                LOG.fatal("TransactionState Dump:\n" + current_txn.debug());

        }
        this.hstore_coordinator.shutdownCluster(ex);
    } finally {
        if (d) {
            String txnDebug = "";
            if (current_txn != null && current_txn.getBasePartition() == this.partitionId) {
                txnDebug = "\n" + current_txn.debug();
            }
            LOG.warn(String.format("PartitionExecutor %d is stopping.%s%s", this.partitionId,
                    (this.currentTxnId != null ? " In-Flight Txn: #" + this.currentTxnId : ""), txnDebug));
        }

        // Release the shutdown latch in case anybody waiting for us
        this.shutdown_latch.release();

        // Stop HStoreMessenger (because we're nice)
        if (this.isShuttingDown() == false) {
            if (this.hstore_coordinator != null)
                this.hstore_coordinator.shutdown();
        }
    }
}

From source file:com.impetus.ankush2.cassandra.monitor.CassandraClusterMonitor.java

private void editConfigFileParam(final Parameter parameter, final String fileName, final String loggedUser,
        final String fileType, boolean editNodeParam) {
    final String propertyName = parameter.getName();
    final String newValue = parameter.getValue();
    final String propertyFilePath = advanceConf.get(CassandraConstants.ClusterProperties.CONF_DIR) + fileName;
    if (editNodeParam) {
        String host = (String) parameterMap.get(Constant.Keys.HOST);
        editParameter(host, propertyName, newValue, propertyFilePath, fileType, loggedUser, fileName);
    } else {//  www.  j  ava2 s .  c  o  m
        final Semaphore semaphore = new Semaphore(componentConfig.getNodes().size());
        try {
            for (final String host : componentConfig.getNodes().keySet()) {
                semaphore.acquire();
                AppStoreWrapper.getExecutor().execute(new Runnable() {
                    @Override
                    public void run() {
                        editParameter(host, propertyName, newValue, propertyFilePath, fileType, loggedUser,
                                fileName);
                        if (semaphore != null) {
                            semaphore.release();
                        }
                    }
                });
            }
            semaphore.acquire(componentConfig.getNodes().size());
        } catch (Exception e) {
            addAndLogError("Error in updating config file params...");
        }
    }
}

From source file:com.impetus.ankush2.cassandra.monitor.CassandraClusterMonitor.java

private void deleteConfigFileParam(final Parameter parameter, final String fileName, final String fileType,
        boolean editNodeParam) {
    final String propertyName = parameter.getName();
    final String propertyFilePath = advanceConf.get(CassandraConstants.ClusterProperties.CONF_DIR) + fileName;

    if (editNodeParam) {
        String host = (String) parameterMap.get(Constant.Keys.HOST);
        deleteParameter(host, propertyName, propertyFilePath, fileType, fileName);
    } else {//  w ww  .  ja v a  2  s.co m
        final Semaphore semaphore = new Semaphore(componentConfig.getNodes().size());
        try {
            for (final String host : componentConfig.getNodes().keySet()) {
                semaphore.acquire();
                AppStoreWrapper.getExecutor().execute(new Runnable() {
                    @Override
                    public void run() {
                        deleteParameter(host, propertyName, propertyFilePath, fileType, fileName);
                        if (semaphore != null) {
                            semaphore.release();
                        }

                    }
                });
            }
            semaphore.acquire(componentConfig.getNodes().size());
        } catch (Exception e) {
            addAndLogError("Error in updating config file params...");
        }
    }
}

From source file:fur.shadowdrake.minecraft.InstallPanel.java

@SuppressWarnings({ "Convert2Lambda" })
private boolean downloadFile(String filename, String local) throws NetworkException {
    final Semaphore semaphore = new Semaphore(0);
    success = false;//from  w w w .  j  a va2  s.c  o  m
    while (true) {
        result = ftpClient.openDataChannel(new ActionListener() {
            @Override
            public void actionPerformed(ActionEvent e) {
                if (e.getID() == FtpClient.FTP_OK) {
                    try {
                        InputStream is;
                        FileOutputStream fos;

                        is = ((Socket) e.getSource()).getInputStream();
                        fos = new FileOutputStream(new File(workingDir, local));
                        byte[] buffer = new byte[4096];
                        for (int n = is.read(buffer); n > 0; n = is.read(buffer)) {
                            fos.write(buffer, 0, n);
                            log.advance(n);
                        }
                        fos.close();
                        success = true;
                    } catch (IOException ex) {
                        Logger.getLogger(InstallPanel.class.getName()).log(Level.SEVERE, "Download", ex);
                        log.println("Faild to save file.");
                        success = false;
                    }
                }
            }
        });
        switch (result) {
        case FtpClient.FTP_OK:
            int size = ftpClient.retr(filename, (ActionEvent e) -> {
                ftpClient.closeDataChannel();
                semaphore.release();
            });
            if (size < 0) {
                ftpClient.abandonDataChannel();
            } else {
                log.reset();
                log.setMaximum(size);
            }
            try {
                semaphore.acquire();
            } catch (InterruptedException ex) {
                return false;
            }
            break;
        case FtpClient.FTP_TIMEOUT:
            if (reconnect()) {
                continue;
            } else {
                ftpClient.abandonDataChannel();
                return false;
            }
        default:
            ftpClient.abandonDataChannel();
            return false;
        }
        break;
    }
    return success;
}

From source file:org.codice.ddf.catalog.content.monitor.AsyncFileAlterationObserverTest.java

private void initSemaphore(int latchNo) {
    doTestWrapper = this::delayFunc;
    artificialDelay = new Semaphore(0);
    delayLatch = new CountDownLatch(latchNo);
}

From source file:io.pravega.client.stream.impl.ControllerImplTest.java

@Test
public void testParallelCreateStream() throws Exception {
    final ExecutorService executorService = Executors.newFixedThreadPool(10);
    Semaphore createCount = new Semaphore(-19);
    AtomicBoolean success = new AtomicBoolean(true);
    for (int i = 0; i < 10; i++) {
        executorService.submit(() -> {
            for (int j = 0; j < 2; j++) {
                try {
                    CompletableFuture<Boolean> createStreamStatus;
                    createStreamStatus = controllerClient
                            .createStream(StreamConfiguration.builder().streamName("streamparallel")
                                    .scope("scope1").scalingPolicy(ScalingPolicy.fixed(1)).build());
                    log.info("{}", createStreamStatus.get());
                    assertTrue(createStreamStatus.get());
                    createCount.release();
                } catch (Exception e) {
                    log.error("Exception when creating stream: {}", e);

                    // Don't wait for other threads to complete.
                    success.set(false);//from   w ww  . j ava  2s.  com
                    createCount.release(20);
                }
            }
        });
    }
    createCount.acquire();
    executorService.shutdownNow();
    assertTrue(success.get());
}

From source file:org.commoncrawl.service.listcrawler.ProxyServlet.java

@Override
public void doGet(final HttpServletRequest req, final HttpServletResponse response)
        throws ServletException, IOException {

    // allocate a response data object ... which will be used by async thread to pass data to calling thread...
    final AsyncResponse responseData = new AsyncResponse();

    String queryString = req.getQueryString();
    final String originalPath = req.getParameter("url");
    final String format = (req.getParameter("renderAs") != null) ? req.getParameter("renderAs")
            : PROXY_RENDER_TYPE_NONE;//  w  w w.j ava2s  .c o m
    final String timeoutStr = req.getParameter("timeout");
    final String skipHTTPGET = req.getParameter("nocachenodice");

    final long desiredTimeOutInMS = (timeoutStr != null) ? Long.parseLong(timeoutStr) : 30000;
    final boolean skipHTTPGet = (skipHTTPGET != null && skipHTTPGET.equals("1"));
    final Semaphore semaphore = new Semaphore(0);

    //LOG.info("Got Request:" + originalPath);

    final long requestStartTime = System.currentTimeMillis();

    //LOG.info("Processing Request:" + originalPath);

    String hostName = (originalPath != null) ? URLUtils.fastGetHostFromURL(originalPath) : "";
    String fullPath = null;
    if (originalPath == null || !originalPath.startsWith("http:") || hostName.length() == 0
            || queryString == null) {
        LOG.info("URL From Proxy Request:" + originalPath + " is Invalid. Sending 400 Result Code");
        responseData.setHttpErrorResponse(400, "URL From Proxy Request:" + originalPath + " is Invalid");
    } else {

        // build url path from query string 
        int pathIndex = queryString.indexOf("url=");
        // grab the whole path ... 
        fullPath = queryString.substring(pathIndex + "url=".length());
        // unescape it 
        fullPath = URLDecoder.decode(fullPath, "UTF-8");

        //LOG.info("Doing Cache Lookup for URL:" + fullPath);
        boolean isAsyncOperation = checkCacheForURLV2(fullPath, responseData, semaphore, desiredTimeOutInMS,
                skipHTTPGet);
        if (isAsyncOperation) {
            //LOG.info("Waiting on Async Completion for URL:" + fullPath);
            semaphore.acquireUninterruptibly();
            //LOG.info("Done Waiting for Async Completion for URL:" + fullPath);
        }
    }

    // upon return we need to check the response object ... 
    if (responseData.getResponseType() == AsyncResponse.ResponseType.CacheItemResponse) {
        // send cache item response ... 
        sendCacheItemResponse(req, response, responseData.getCacheItem(), false, format, responseData,
                requestStartTime);
    } else if (responseData.getResponseType() == AsyncResponse.ResponseType.CrawlURLResponse) {
        sendCrawlURLResponse(req, response, responseData.getCrawlURL(), format, responseData, requestStartTime);
    } else if (responseData.getResponseType() == AsyncResponse.ResponseType.S3Response) {
        sendS3ItemResponse(req, response, responseData.getArcFileItem(), format, responseData,
                requestStartTime);
    } else {
        response.sendError(responseData.getHttpErrorCode(), responseData.getHttpErrorDesc());
        ProxyServer.getSingleton().logProxyFailure(responseData.getHttpErrorCode(),
                responseData.getHttpErrorDesc(), fullPath, "", responseData.getStartTime());
    }
}