Example usage for java.util.concurrent Executors newSingleThreadExecutor

List of usage examples for java.util.concurrent Executors newSingleThreadExecutor

Introduction

In this page you can find the example usage for java.util.concurrent Executors newSingleThreadExecutor.

Prototype

public static ExecutorService newSingleThreadExecutor() 

Source Link

Document

Creates an Executor that uses a single worker thread operating off an unbounded queue.

Usage

From source file:eu.esdihumboldt.hale.io.wfs.AbstractWFSWriter.java

@Override
public IOReport execute(ProgressIndicator progress) throws IOProviderConfigurationException, IOException {
    progress.begin("WFS Transaction", ProgressIndicator.UNKNOWN);

    // configure internal provider
    internalProvider.setDocumentWrapper(createTransaction());

    final PipedInputStream pIn = new PipedInputStream();
    PipedOutputStream pOut = new PipedOutputStream(pIn);
    currentExecuteStream = pOut;//from  www  . ja  v  a 2s.  com

    Future<Response> futureResponse = null;
    IOReporter reporter = createReporter();
    ExecutorService executor = Executors.newSingleThreadExecutor();
    try {
        // read the stream (in another thread)
        futureResponse = executor.submit(new Callable<Response>() {

            @Override
            public Response call() throws Exception {

                Proxy proxy = ProxyUtil.findProxy(targetWfs.getLocation());
                Request request = Request.Post(targetWfs.getLocation()).bodyStream(pIn,
                        ContentType.APPLICATION_XML);
                Executor executor = FluentProxyUtil.setProxy(request, proxy);

                // authentication
                String user = getParameter(PARAM_USER).as(String.class);
                String password = getParameter(PARAM_PASSWORD).as(String.class);

                if (user != null) {
                    // target host
                    int port = targetWfs.getLocation().getPort();
                    String hostName = targetWfs.getLocation().getHost();
                    String scheme = targetWfs.getLocation().getScheme();
                    HttpHost host = new HttpHost(hostName, port, scheme);

                    // add credentials
                    Credentials cred = ClientProxyUtil.createCredentials(user, password);
                    executor.auth(new AuthScope(host), cred);
                    executor.authPreemptive(host);
                }

                try {
                    return executor.execute(request);
                } finally {
                    pIn.close();
                }
            }
        });

        // write the stream
        SubtaskProgressIndicator subprogress = new SubtaskProgressIndicator(progress);
        reporter = (IOReporter) super.execute(subprogress);
    } finally {
        executor.shutdown();
    }

    try {
        Response response = futureResponse.get();
        HttpResponse res = response.returnResponse();
        int statusCode = res.getStatusLine().getStatusCode();
        XPathFactory xPathfactory = XPathFactory.newInstance();
        XPath xpath = xPathfactory.newXPath();
        if (statusCode >= 200 && statusCode < 300) {
            // success
            reporter.setSuccess(reporter.isSuccess());

            // construct summary from response
            try {
                Document responseDoc = parseResponse(res.getEntity());

                // totalInserted
                String inserted = xpath.compile("//TransactionSummary/totalInserted").evaluate(responseDoc);
                // XXX totalUpdated
                // XXX totalReplaced
                // XXX totalDeleted
                reporter.setSummary("Inserted " + inserted + " features.");
            } catch (XPathExpressionException e) {
                log.error("Error in XPath used to evaluate service response");
            } catch (ParserConfigurationException | SAXException e) {
                reporter.error(new IOMessageImpl(MessageFormat.format(
                        "Server returned status code {0}, but could not parse server response", statusCode),
                        e));
                reporter.setSuccess(false);
            }
        } else {
            // failure
            reporter.error(
                    new IOMessageImpl("Server reported failure with code " + res.getStatusLine().getStatusCode()
                            + ": " + res.getStatusLine().getReasonPhrase(), null));
            reporter.setSuccess(false);

            try {
                Document responseDoc = parseResponse(res.getEntity());
                String errorText = xpath.compile("//ExceptionText/text()").evaluate(responseDoc);
                reporter.setSummary("Request failed: " + errorText);
            } catch (XPathExpressionException e) {
                log.error("Error in XPath used to evaluate service response");
            } catch (ParserConfigurationException | SAXException e) {
                reporter.error(new IOMessageImpl("Could not parse server response", e));
                reporter.setSuccess(false);
            }
        }
    } catch (ExecutionException | InterruptedException e) {
        reporter.error(new IOMessageImpl("Failed to execute WFS-T request", e));
        reporter.setSuccess(false);
    }

    progress.end();

    return reporter;
}

From source file:io.druid.server.initialization.JettyTest.java

@Test
public void testThreadNotStuckOnException() throws Exception {
    final CountDownLatch latch = new CountDownLatch(1);
    Executors.newSingleThreadExecutor().execute(new Runnable() {
        @Override/*from   www . j av  a2s . c om*/
        public void run() {
            try {
                ListenableFuture<InputStream> go = client.go(
                        new Request(HttpMethod.GET,
                                new URL("http://localhost:" + port + "/exception/exception")),
                        new InputStreamResponseHandler());
                StringWriter writer = new StringWriter();
                IOUtils.copy(go.get(), writer, "utf-8");
            } catch (IOException e) {
                // Expected.
            } catch (Throwable t) {
                Throwables.propagate(t);
            }
            latch.countDown();
        }
    });

    latch.await(5, TimeUnit.SECONDS);
}

From source file:oz.hadoop.yarn.api.core.LocalApplicationLaunchTests.java

@Test(timeout = 30000)
public void validateLongLivedJavaContainerLaunchWithTermination() throws Exception {
    ExecutorService executor = Executors.newSingleThreadExecutor();
    YarnApplication<DataProcessor> yarnApplication = YarnAssembly
            .forApplicationContainer(SimpleRandomDelayContainer.class).containerCount(4).withApplicationMaster()
            .build("sample-yarn-application");

    final DataProcessor dataProcessor = yarnApplication.launch();
    assertEquals(4, dataProcessor.containers());
    executor.execute(new Runnable() {
        @Override//from   www .  j  av  a  2s  . c  o m
        public void run() {
            for (int i = 0; i < 1000000; i++) {
                for (int j = 0; j < dataProcessor.containers(); j++) {
                    try {
                        dataProcessor.process(ByteBuffer.wrap(("Hello Yarn!-" + i).getBytes()));
                    } catch (Exception e) {
                        e.printStackTrace();
                        throw new IllegalStateException("Failed to submit data for processing", e);
                    }
                }
            }
        }
    });

    assertTrue(yarnApplication.isRunning());
    Thread.sleep(new Random().nextInt(5000));
    yarnApplication.terminate();
    assertFalse(yarnApplication.isRunning());
}

From source file:de.tum.in.socket.client.SocketClient.java

public SocketClient() {
    super(APP_ID);
    this.m_worker = Executors.newSingleThreadExecutor();
}

From source file:com.yahoo.omid.tso.TSOTestBase.java

@Before
public void setupTSO() throws Exception {
    if (!LocalBookKeeper.waitForServerUp("localhost:2181", 10000)) {
        throw new Exception("Error starting zookeeper/bookkeeper");
    }/*from  w  w  w . ja va  2  s . co  m*/

    /*
     * TODO: Fix LocalBookKeeper to wait until the bookies are up
     * instead of waiting only until the zookeeper server is up.
     */
    Thread.sleep(500);

    LOG.info("Starting TSO");
    tso = new TSOServer(
            TSOServerConfig.configFactory(1234, 0, recoveryEnabled(), 4, 2, new String("localhost:2181")));
    tsoExecutor = Executors.newSingleThreadExecutor();
    tsoExecutor.execute(tso);
    TestUtils.waitForSocketListening("localhost", 1234, 100);
    LOG.info("Finished loading TSO");

    state = tso.getState();

    Thread.currentThread().setName("JUnit Thread");

    setupClient();
}

From source file:fiskinfoo.no.sintef.fiskinfoo.Http.BarentswatchApiRetrofit.BarentswatchApi.java

public BarentswatchApi() {
    String directoryPath = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DOWNLOADS)
            .toString();/*  www . j a v a2  s  .  c o m*/
    String fileName = directoryPath + "/FiskInfo/api_setting.json";
    File file = new File(fileName);
    String environment = null;

    if (file.exists()) {
        InputStream inputStream;
        InputStreamReader streamReader;
        JsonReader jsonReader;

        try {
            inputStream = new BufferedInputStream(new FileInputStream(file));
            streamReader = new InputStreamReader(inputStream, "UTF-8");
            jsonReader = new JsonReader(streamReader);

            jsonReader.beginObject();
            while (jsonReader.hasNext()) {
                String name = jsonReader.nextName();
                if (name.equals("environment")) {
                    environment = jsonReader.nextString();
                } else {
                    jsonReader.skipValue();
                }
            }
            jsonReader.endObject();
        } catch (FileNotFoundException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        } catch (IllegalStateException e) {
            e.printStackTrace();
        }
    }

    targetProd = !"pilot".equals(environment);
    currentPath = targetProd ? barentsWatchProdAddress : barentsWatchPilotAddress;
    BARENTSWATCH_API_ENDPOINT = currentPath + "/api/v1/geodata";

    Executor httpExecutor = Executors.newSingleThreadExecutor();
    MainThreadExecutor callbackExecutor = new MainThreadExecutor();
    barentswatchApi = initializeBarentswatchAPI(httpExecutor, callbackExecutor);
}

From source file:ddf.security.samlp.MetadataConfigurationParser.java

private void buildEntityDescriptor(String entityDescription) throws IOException {
    EntityDescriptor entityDescriptor = null;
    entityDescription = entityDescription.trim();
    if (entityDescription.startsWith(HTTPS) || entityDescription.startsWith(HTTP)) {
        if (entityDescription.startsWith(HTTP)) {
            LOGGER.warn(/*ww  w  . java 2  s.c  o m*/
                    "Retrieving metadata via HTTP instead of HTTPS. The metadata configuration is unsafe!!!");
        }
        HttpTransport httpTransport = new ApacheHttpTransport();
        HttpRequest httpRequest = httpTransport.createRequestFactory()
                .buildGetRequest(new GenericUrl(entityDescription));
        httpRequest.setUnsuccessfulResponseHandler(
                new HttpBackOffUnsuccessfulResponseHandler(new ExponentialBackOff())
                        .setBackOffRequired(HttpBackOffUnsuccessfulResponseHandler.BackOffRequired.ALWAYS));
        ListeningExecutorService service = MoreExecutors
                .listeningDecorator(Executors.newSingleThreadExecutor());
        ListenableFuture<HttpResponse> httpResponseFuture = service.submit(httpRequest::execute);

        Futures.addCallback(httpResponseFuture, new FutureCallback<HttpResponse>() {
            @Override
            public void onSuccess(HttpResponse httpResponse) {
                if (httpResponse != null) {
                    try {
                        String parsedResponse = httpResponse.parseAsString();
                        buildEntityDescriptor(parsedResponse);
                    } catch (IOException e) {
                        LOGGER.error("Unable to parse metadata from: {}",
                                httpResponse.getRequest().getUrl().toString(), e);
                    }
                }
            }

            @Override
            public void onFailure(Throwable throwable) {
                LOGGER.error("Unable to retrieve metadata.", throwable);
            }
        });
        service.shutdown();
    } else if (entityDescription.startsWith(FILE + System.getProperty("ddf.home"))) {
        String pathStr = StringUtils.substringAfter(entityDescription, FILE);
        Path path = Paths.get(pathStr);
        if (Files.isReadable(path)) {
            try (InputStream fileInputStream = Files.newInputStream(path)) {
                entityDescriptor = readEntityDescriptor(new InputStreamReader(fileInputStream, "UTF-8"));
            }
        }
    } else if (entityDescription.startsWith("<") && entityDescription.endsWith(">")) {
        entityDescriptor = readEntityDescriptor(new StringReader(entityDescription));
    } else {
        LOGGER.warn("Skipping unknown metadata configuration value: " + entityDescription);
    }

    if (entityDescriptor != null) {
        entityDescriptorMap.put(entityDescriptor.getEntityID(), entityDescriptor);
        if (updateCallback != null) {
            updateCallback.accept(entityDescriptor);
        }
    }
}

From source file:com.uwsoft.editor.controlles.ResolutionManager.java

public void createNewResolution(String name, int width, int height, final String resolutionBase,
        final ProgressHandler handler) {
    this.handler = handler;
    final ResolutionEntryVO newResolution = new ResolutionEntryVO();
    newResolution.name = name;/*from   ww w.j  a  v  a2 s .  c  om*/
    newResolution.width = width;
    newResolution.height = height;
    newResolution.base = resolutionBase.equals("width") ? 0 : 1;
    dataManager.getCurrentProjectInfoVO().resolutions.add(newResolution);
    ExecutorService executor = Executors.newSingleThreadExecutor();
    executor.execute(new Runnable() {
        @Override
        public void run() {
            // create new folder structure
            String projPath = dataManager.getCurrentWorkingPath() + "/"
                    + dataManager.getCurrentProjectVO().projectName;
            String sourcePath = projPath + "/" + "assets/orig/images";
            String targetPath = projPath + "/" + "assets/" + newResolution.name + "/images";
            createIfNotExist(sourcePath);
            createIfNotExist(projPath + "/" + "assets/" + newResolution.name + "/pack");
            copyTexturesFromTo(sourcePath, targetPath);
            int resizeWarnings = resizeTextures(targetPath, newResolution);
            rePackProjectImages(newResolution);
            createResizedAnimations(newResolution);
            changePercentBy(5);
            DialogUtils.showOKDialog(Sandbox.getInstance().getUIStage(), "Warning", resizeWarnings
                    + " images were not resized for smaller resolutions due to already small size ( < 3px )");
        }
    });
    executor.execute(new Runnable() {
        @Override
        public void run() {
            try {
                Thread.sleep(500);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            DataManager.getInstance().saveCurrentProject();
            handler.progressComplete();
        }
    });
    executor.shutdown();
}

From source file:com.oneops.ops.dao.PerfDataAccessor.java

/**
 * Inits the DAOs/connections//from   w ww. ja  v  a 2s.  com
 */
public void init() {
    logger.info("PerfDataAccessor: " + ":" + clusterName + ":" + keyspaceName);
    ExecutorService executor = Executors.newSingleThreadExecutor();
    Future<String> future = executor.submit(this::connectToCluster);

    try {
        logger.info("Started connecting.. with timeOut " + TIMEOUT_IN_SECONDS);
        logger.info(future.get(TIMEOUT_IN_SECONDS, TimeUnit.SECONDS));
        logger.info("Finished connecting!");

    } catch (TimeoutException e) {
        logger.error("no cassandra hosts available - shutting down");
        throw new HectorException("TimeOut occured in getting the cassandra connection");
    } catch (InterruptedException e) {
        e.printStackTrace();
    } catch (ExecutionException e) {
        e.printStackTrace();
    }

    executor.shutdownNow();
    initCluster();
}

From source file:dbseer.comp.process.transaction.TransactionLogWriter.java

public void writeLog(long timestamp, Collection<Transaction> transactions) throws Exception {
    if (!this.isInitialized) {
        throw new Exception("TransactionLogWriter not initialized.");
    }/*from www  . ja v a 2  s .  c om*/

    double totalCount = 0;
    double[][] count = new double[numServer][DBSeerConstants.MAX_NUM_TABLE];
    double[][] latencySum = new double[numServer][DBSeerConstants.MAX_NUM_TABLE];
    String gap = "   ";

    if (!dbscan.isInitialized() && !dbscan.isInitializing()) {
        initialTransactions.addAll(transactions);

        if (initialTransactions.size() > dbscan.getInitPts() && !dbscan.isInitializing()) {
            dbscanInitializer = Executors.newSingleThreadExecutor();
            dbscanInitializer.submit(new Runnable() {
                @Override
                public void run() {
                    dbscan.initialDBSCAN(initialTransactions);
                }
            });
        }
    }

    for (Transaction t : transactions) {
        if (dbscan != null && dbscan.isInitialized()) {
            if (liveLogProcessor.getTxStartTime() == 0) {
                liveLogProcessor.setTxStartTime(timestamp);
            }
            dbscan.train(t);
        }

        int type;
        if (t.getCluster() == null) {
            type = 0;
        } else {
            type = t.getCluster().getId();
        }

        if (type > maxType) {
            maxType = type;
        }

        // if not outlier;
        if (type >= 0) {
            String server = t.getServerName();
            int index = serverIndex.get(server);
            latencySum[index][type] += t.getLatency();
            count[index][type]++;
            totalCount++;

            ArrayList<Double> latencyList = writers.get(server).getLatencyMap().get(type);
            if (latencyList == null) {
                latencyList = new ArrayList<Double>();
                writers.get(server).getLatencyMap().put(type, latencyList);
            }
            latencyList.add((double) t.getLatency());

            // write sample
            HashMap<Integer, Integer> countMap = writers.get(server).getTransactionSampleCountMap();
            Integer sampleCount = countMap.get(type);
            if (sampleCount == null) {
                countMap.put(type, 1);
            } else {
                int countVal = sampleCount.intValue();
                if (countVal < DBSeerConstants.MAX_TRANSACTION_SAMPLE) {
                    HashMap<Integer, PrintWriter> sampleWriters = writers.get(server)
                            .getTransactionSampleWriter();
                    PrintWriter sampleWriter = sampleWriters.get(type);
                    if (sampleWriter == null) {
                        sampleWriter = new PrintWriter(new FileOutputStream(String.format("%s%d",
                                this.dir + File.separator + server + File.separator + "tx_sample_", type),
                                false));
                        sampleWriters.put(type, sampleWriter);
                    }
                    sampleWriter.print(t.getEntireStatement());
                    sampleWriter.println("---");
                    sampleWriter.flush();
                    countVal++;
                    countMap.put(type, countVal);
                }
            }
        }
    }

    // update live monitor
    if (monitor != null) {
        monitor.setCurrentTimestamp(timestamp);
        monitor.setNumTransactionTypes(maxType + 1);
        monitor.setGlobalTransactionCount(totalCount);

        for (int i = 0; i <= maxType; ++i) {
            double countSum = 0;
            double latencySumSum = 0;
            for (int j = 0; j < numServer; ++j) {
                countSum += count[j][i];
                latencySumSum += latencySum[j][i];
            }
            monitor.setCurrentTPS(i, countSum);
            if (countSum == 0) {
                monitor.setCurrentAverageLatency(i, 0.0);
            } else {
                monitor.setCurrentAverageLatency(i, latencySumSum / countSum);
            }
        }
    }

    if (timestamp < liveLogProcessor.getSysStartTime() || liveLogProcessor.getSysStartTime() == 0) {
        return;
    }

    for (String server : servers) {
        TransactionWriter writer = writers.get(server);
        PrintWriter tpsWriter = writer.getTpsWriter();
        PrintWriter latencyWriter = writer.getLatencyWriter();

        HashMap<Integer, PrintWriter> prctileLatencyWriter = writer.getPrctileLatencyWriter();
        HashMap<Integer, ArrayList<Double>> latencyMap = writer.getLatencyMap();

        tpsWriter.print(gap);
        latencyWriter.print(gap);

        tpsWriter.printf("%.16e", (double) timestamp);
        latencyWriter.printf("%.16e", (double) timestamp);

        int index = serverIndex.get(server);

        for (int i = 0; i <= maxType; ++i) {
            tpsWriter.print(gap);
            tpsWriter.printf("%.16e", count[index][i]);

            latencyWriter.print(gap);
            if (count[index][i] == 0.0) {
                latencyWriter.printf("%.16e", 0.0);
            } else {
                latencyWriter.printf("%.16e", (latencySum[index][i] / count[index][i]) / 1000.0);
            }

            // write percentile
            PrintWriter prctileWriter = prctileLatencyWriter.get(i);
            ArrayList<Double> latencyList = latencyMap.get(i);
            if (latencyList == null) {
                latencyList = new ArrayList<Double>();
                latencyMap.put(i, latencyList);
            }
            if (prctileWriter == null) {
                prctileWriter = new PrintWriter(new FileOutputStream(
                        String.format("%s%03d",
                                this.dir + File.separator + server + File.separator + "prctile_latency_", i),
                        false));
                prctileLatencyWriter.put(i, prctileWriter);
            }
            double[] latencies = Doubles.toArray(latencyList);
            prctileWriter.printf("%d,", timestamp);
            for (double p : percentiles) {
                Percentile percentile = new Percentile(p);
                percentile.setData(latencies);
                double val = percentile.evaluate();
                if (Double.isNaN(val))
                    val = 0.0;
                prctileWriter.printf("%f,", val / 1000.0);
            }
            prctileWriter.println();
            prctileWriter.flush();
            latencyList.clear();
        }

        tpsWriter.println();
        latencyWriter.println();
        tpsWriter.flush();
        latencyWriter.flush();
        isWritingStarted = true;
    }
}