List of usage examples for java.util.concurrent Executors newSingleThreadExecutor
public static ExecutorService newSingleThreadExecutor()
From source file:de.mendelson.comm.as2.client.AS2Gui.java
private void displayPreferences() { final String uniqueId = this.getClass().getName() + ".displayPreferences." + System.currentTimeMillis(); Runnable test = new Runnable() { @Override/*ww w.j av a 2s.c om*/ public void run() { JDialogPreferences dialog = null; try { List<PreferencesPanel> panelList = new ArrayList<PreferencesPanel>(); panelList.add(new PreferencesPanelMDN(AS2Gui.this.getBaseClient())); panelList.add(new PreferencesPanelProxy(AS2Gui.this.getBaseClient())); panelList.add(new PreferencesPanelSecurity(AS2Gui.this.getBaseClient())); panelList.add(new PreferencesPanelDirectories(AS2Gui.this.getBaseClient())); panelList.add(new PreferencesPanelSystemMaintenance(AS2Gui.this.getBaseClient())); panelList.add(new PreferencesPanelNotification(AS2Gui.this.getBaseClient())); //display wait indicator AS2Gui.this.as2StatusBar.startProgressIndeterminate( AS2Gui.this.rb.getResourceString("menu.file.preferences"), uniqueId); dialog = new JDialogPreferences(AS2Gui.this, AS2Gui.this.configConnection, AS2Gui.this.runtimeConnection, panelList); } finally { AS2Gui.this.as2StatusBar.stopProgressIfExists(uniqueId); if (dialog != null) { dialog.setVisible(true); } } } }; Executors.newSingleThreadExecutor().submit(test); }
From source file:com.uwsoft.editor.proxy.ProjectManager.java
public void importShaderIntoProject(Array<FileHandle> files, ProgressHandler progressHandler) { if (files == null) { return;//from www .j a va 2 s .c o m } handler = progressHandler; currentPercent = 0; ExecutorService executor = Executors.newSingleThreadExecutor(); executor.execute(() -> { for (FileHandle handle : files) { // check if shaders folder exists String shadersPath = currentProjectPath + "/assets/shaders"; File destination = new File(currentProjectPath + "/assets/shaders/" + handle.name()); try { FileUtils.forceMkdir(new File(shadersPath)); FileUtils.copyFile(handle.file(), destination); } catch (IOException e) { e.printStackTrace(); } } }); executor.execute(() -> { changePercentBy(100 - currentPercent); try { Thread.sleep(500); } catch (InterruptedException e) { e.printStackTrace(); } handler.progressComplete(); }); executor.shutdown(); }
From source file:com.o2d.pkayjava.editor.proxy.ProjectManager.java
public void importShaderIntoProject(Array<FileHandle> files, ProgressHandler progressHandler) { if (files == null) { return;/*from www .j a v a2 s . co m*/ } handler = progressHandler; currentPercent = 0; ExecutorService executor = Executors.newSingleThreadExecutor(); executor.execute(() -> { for (FileHandle handle : files) { // check if shaders folder exists String shadersPath = currentWorkingPath + "/" + currentProjectVO.projectName + "/assets/shaders"; File destination = new File(currentWorkingPath + "/" + currentProjectVO.projectName + "/assets/shaders/" + handle.name()); try { FileUtils.forceMkdir(new File(shadersPath)); FileUtils.copyFile(handle.file(), destination); } catch (IOException e) { e.printStackTrace(); } } }); executor.execute(() -> { changePercentBy(100 - currentPercent); try { Thread.sleep(500); } catch (InterruptedException e) { e.printStackTrace(); } handler.progressComplete(); }); executor.shutdown(); }
From source file:com.streamsets.pipeline.stage.origin.kafka.TestKafkaSource.java
@Test public void testProduceBinaryRecords() throws StageException, InterruptedException { CountDownLatch startLatch = new CountDownLatch(1); ExecutorService executorService = Executors.newSingleThreadExecutor(); executorService.submit(new ProducerRunnable(TOPIC14, SINGLE_PARTITION, producer, startLatch, DataType.TEXT, null, -1, null));// w w w . ja v a2 s . c o m KafkaConfigBean conf = new KafkaConfigBean(); conf.metadataBrokerList = sdcKafkaTestUtil.getMetadataBrokerURI(); conf.topic = TOPIC14; conf.consumerGroup = CONSUMER_GROUP; conf.zookeeperConnect = zkConnect; conf.maxBatchSize = 9; conf.maxWaitTime = 5000; conf.kafkaConsumerConfigs = null; conf.produceSingleRecordPerMessage = false; conf.dataFormat = DataFormat.BINARY; conf.dataFormatConfig.charset = "UTF-8"; conf.dataFormatConfig.removeCtrlChars = false; conf.dataFormatConfig.binaryMaxObjectLen = 1000; SourceRunner sourceRunner = new SourceRunner.Builder(StandaloneKafkaSource.class, createSource(conf)) .addOutputLane("lane").build(); sourceRunner.runInit(); startLatch.countDown(); List<Record> records = new ArrayList<>(); StageRunner.Output output = getOutputAndRecords(sourceRunner, 10, "lane", records); shutDownExecutorService(executorService); String newOffset = output.getNewOffset(); Assert.assertNull(newOffset); Assert.assertEquals(10, records.size()); for (int i = 0; i < records.size(); i++) { Assert.assertNotNull(records.get(i).get("/")); Assert.assertNotNull(records.get(i).get().getValueAsByteArray()); Assert.assertTrue(Arrays.equals(sdcKafkaTestUtil.generateTestData(DataType.TEXT, null).getBytes(), records.get(i).get("/").getValueAsByteArray())); } sourceRunner.runDestroy(); }
From source file:ch.iterate.openstack.swift.Client.java
/** * @param container The name of the container * @param name The name of the object * @param entity The name of the request entity (make sure to set the Content-Type * @param metadata The metadata for the object * @param md5sum The 32 character hex encoded MD5 sum of the data * @param objectSize The total size in bytes of the object to be stored * @param segmentSize Optional size in bytes of the object segments to be stored (forces large object support) default 4G * @param dynamicLargeObject Optional setting to use dynamic large objects, False/null will use static large objects if required * @param segmentContainer Optional name of container to store file segments, defaults to storing chunks in the same container as the file sill appear * @param segmentFolder Optional name of folder for storing file segments, defaults to ".chunks/" * @param leaveSegments Optional setting to leave segments of large objects in place when the manifest is overwrtten/changed * @return The ETAG if the save was successful, null otherwise * @throws GenericException There was a protocol level error talking to CloudFiles *//*from w ww .j a va 2 s .c o m*/ public String storeObject(Region region, String container, String name, HttpEntity entity, Map<String, String> metadata, String md5sum, Long objectSize, Long segmentSize, Boolean dynamicLargeObject, String segmentContainer, String segmentFolder, Boolean leaveSegments) throws IOException, InterruptedException { /* * Default values for large object support. We also use the defaults combined with the inputs * to determine whether to store as a large object. */ /* * The maximum size of a single object (5GiB). */ long singleObjectSizeLimit = (long) (5 * Math.pow(1024, 3)); /* * The default minimum segment size (1MiB). */ long minSegmentSize = 1024L * 1024L; /* * Set the segment size. * * Defaults to 4GiB segments, and will not permit smaller than 1MiB segments. */ long actualSegmentSize = (segmentSize == null) ? (long) (4 * Math.pow(1024, 3)) : Math.max(segmentSize, minSegmentSize); /* * Determines if we will store using large objects - we may do this for 3 reasons: * * - A segmentSize has been specified and the object size is greater than the minimum segment size * - If an objectSize is provided and is larger than the single object size limit of 5GiB * - A segmentSize has been specified, but no objectSize given (we take this as a request for segmentation) * * The last case may fail if the user does not provide at least as much data as the minimum segment * size configured on the server, and will always produce a large object structure (even if only one * small segment is required). */ objectSize = (objectSize == null) ? -1 : objectSize; boolean useLargeObject = ((segmentSize != null) && (objectSize > actualSegmentSize)) || (objectSize > singleObjectSizeLimit) || ((segmentSize != null) && (objectSize == -1)); if (!useLargeObject) { return storeObject(region, container, name, entity, metadata, md5sum); } else { /* * We need to upload a large object as defined by the method * parameters. For now this is done sequentially, but a parallel * version using appropriate random access to the underlying data * may be desirable. * * We make the assumption that the given file size will not be * greater than int.MAX_VALUE * segmentSize * */ leaveSegments = (leaveSegments == null) ? Boolean.FALSE : leaveSegments; dynamicLargeObject = (dynamicLargeObject == null) ? Boolean.FALSE : dynamicLargeObject; segmentFolder = (segmentFolder == null) ? ".file-segments" : segmentFolder; segmentContainer = (segmentContainer == null) ? container : segmentContainer; Map<String, List<StorageObject>> oldSegmentsToRemove = null; /* * If we have chosen not to leave existing large object segments in place (default) * then we need to collect information about any existing file segments so that we can * deal with them after we complete the upload of the new manifest. * * We should only delete existing segments after a successful upload of a new manifest file * because this constitutes an object update and the older file should remain available * until the new file can be downloaded. */ if (!leaveSegments) { ObjectMetadata existingMetadata; String manifestDLO = null; Boolean manifestSLO = Boolean.FALSE; try { existingMetadata = getObjectMetaData(region, container, name); if (existingMetadata.getMetaData().containsKey(Constants.MANIFEST_HEADER)) { manifestDLO = existingMetadata.getMetaData().get(Constants.MANIFEST_HEADER); } else if (existingMetadata.getMetaData().containsKey(Constants.X_STATIC_LARGE_OBJECT)) { JSONParser parser = new JSONParser(); String manifestSLOValue = existingMetadata.getMetaData() .get(Constants.X_STATIC_LARGE_OBJECT); manifestSLO = (Boolean) parser.parse(manifestSLOValue); } } catch (NotFoundException e) { /* * Just means no object exists already, so continue */ } catch (ParseException e) { /* * X_STATIC_LARGE_OBJECT header existed but failed to parse. * If a static large object already exists this must be set to "true". * If we got here then the X_STATIC_LARGE_OBJECT header existed, but failed * to parse as a boolean, so fail upload as a precaution. */ return null; } if (manifestDLO != null) { /* * We have found an existing dynamic large object, so use the prefix to get a list of * existing objects. If we're putting up a new dlo, make sure the segment prefixes are * different, then we can delete anything that's not in the new list if necessary. */ String oldContainer = manifestDLO.substring(0, manifestDLO.indexOf('/', 1)); String oldPath = manifestDLO.substring(manifestDLO.indexOf('/', 1), manifestDLO.length()); oldSegmentsToRemove = new HashMap<String, List<StorageObject>>(); oldSegmentsToRemove.put(oldContainer, listObjects(region, oldContainer, oldPath)); } else if (manifestSLO) { /* * We have found an existing static large object, so grab the manifest data that * details the existing segments - delete any later that we don't need any more */ } } int segmentNumber = 1; long timeStamp = System.currentTimeMillis() / 1000L; String segmentBase = String.format("%s/%d/%d", segmentFolder, timeStamp, objectSize); /* * Create subInputStream from the OutputStream we will pass to the * HttpEntity for writing content. */ final PipedInputStream contentInStream = new PipedInputStream(64 * 1024); final PipedOutputStream contentOutStream = new PipedOutputStream(contentInStream); SubInputStream segmentStream = new SubInputStream(contentInStream, actualSegmentSize, false); /* * Fork the call to entity.writeTo() that allows us to grab any exceptions raised */ final HttpEntity e = entity; final Callable<Boolean> writer = new Callable<Boolean>() { public Boolean call() throws Exception { e.writeTo(contentOutStream); return Boolean.TRUE; } }; ExecutorService writeExecutor = Executors.newSingleThreadExecutor(); final Future<Boolean> future = writeExecutor.submit(writer); /* * Check the future for exceptions after we've finished uploading segments */ Map<String, List<StorageObject>> newSegmentsAdded = new HashMap<String, List<StorageObject>>(); List<StorageObject> newSegments = new LinkedList<StorageObject>(); JSONArray manifestSLO = new JSONArray(); boolean finished = false; /* * Upload each segment of the file by reading sections of the content input stream * until the entire underlying stream is complete */ while (!finished) { String segmentName = String.format("%s/%08d", segmentBase, segmentNumber); String etag; boolean error = false; try { etag = storeObject(region, segmentContainer, segmentStream, "application/octet-stream", segmentName, new HashMap<String, String>()); } catch (IOException ex) { // Finished storing the object System.out.println("Caught IO Exception: " + ex.getMessage()); ex.printStackTrace(); throw ex; } String segmentPath = segmentContainer + "/" + segmentName; long bytesUploaded = segmentStream.getBytesProduced(); /* * Create the appropriate manifest structure if we're making a static large * object. * * ETAG returned by the simple upload * total size of segment uploaded * path of segment */ if (!dynamicLargeObject) { JSONObject segmentJSON = new JSONObject(); segmentJSON.put("path", segmentPath); segmentJSON.put("etag", etag); segmentJSON.put("size_bytes", bytesUploaded); manifestSLO.add(segmentJSON); newSegments.add(new StorageObject(segmentName)); } segmentNumber++; if (!finished) { finished = segmentStream.endSourceReached(); } newSegmentsAdded.put(segmentContainer, newSegments); System.out.println("JSON: " + manifestSLO.toString()); if (error) return ""; segmentStream.readMoreBytes(actualSegmentSize); } /* * Attempts to retrieve the return value from the write operation * Any exceptions raised can then be handled appropriately */ try { future.get(); } catch (InterruptedException ex) { /* * The write was interrupted... delete the segments? */ } catch (ExecutionException ex) { /* * This should always be an IOException or a RuntimeException * because the call to entity.writeTo() only throws IOException */ Throwable t = ex.getCause(); if (t instanceof IOException) { throw (IOException) t; } else { throw (RuntimeException) t; } } /* * Create an appropriate manifest depending on our DLO/SLO choice */ String manifestEtag = null; if (dynamicLargeObject) { /* * Empty manifest with header detailing the shared prefix of object segments */ long manifestTimeStamp = System.currentTimeMillis() / 1000L; metadata.put("X-Object-Manifest", segmentBase); metadata.put("x-object-meta-mtime", String.format("%s", manifestTimeStamp)); manifestEtag = storeObject(region, container, new ByteArrayInputStream(new byte[0]), entity.getContentType().getValue(), name, metadata); } else { /* * Manifest containing json list specifying details of the object segments. */ URIBuilder urlBuild = new URIBuilder(region.getStorageUrl(container, name)); urlBuild.setParameter("multipart-manifest", "put"); URI url; try { url = urlBuild.build(); String manifestContent = manifestSLO.toString(); InputStreamEntity manifestEntity = new InputStreamEntity( new ByteArrayInputStream(manifestContent.getBytes()), -1); manifestEntity.setChunked(true); manifestEntity.setContentType(entity.getContentType()); HttpPut method = new HttpPut(url); method.setEntity(manifestEntity); method.setHeader("x-static-large-object", "true"); Response response = this.execute(method, new DefaultResponseHandler()); if (response.getStatusCode() == HttpStatus.SC_CREATED) { manifestEtag = response.getResponseHeader(HttpHeaders.ETAG).getValue(); } else { throw new GenericException(response); } } catch (URISyntaxException ex) { ex.printStackTrace(); } } /* * Delete stale segments of overwritten large object if requested. */ if (!leaveSegments) { /* * Before deleting old segments, remove any objects from the delete list * that are also part of a new static large object that were updated during the upload. */ if (!(oldSegmentsToRemove == null)) { for (String c : oldSegmentsToRemove.keySet()) { List<StorageObject> rmv = oldSegmentsToRemove.get(c); if (newSegmentsAdded.containsKey(c)) { rmv.removeAll(newSegmentsAdded.get(c)); } List<String> rmvNames = new LinkedList<String>(); for (StorageObject s : rmv) { rmvNames.add(s.getName()); } deleteObjects(region, c, rmvNames); } } } return manifestEtag; } }
From source file:com.esri.squadleader.view.SquadLeaderActivity.java
private OnSingleTapListener createDefaultOnSingleTapListener() { return new OnSingleTapListener() { private static final long serialVersionUID = 3247725674465463146L; @Override/*from w ww .j a v a2 s. c o m*/ public void onSingleTap(float x, float y) { Callout callout = mapController.getCallout(); //Identify a chem light if (null != mil2525cController && null != (poppedUpChemLight = mil2525cController .identifyOneGraphic("chemlights", x, y, 5))) { View calloutView = getLayoutInflater().inflate(R.layout.chem_light_callout, null); callout.setStyle(R.xml.chem_light_callout_style); callout.refresh(); callout.animatedShow((Point) poppedUpChemLight.getGeometry(), calloutView); } else { callout.animatedHide(); // Identify a feature from a layer findViewById(R.id.button_saveAttributes).setVisibility(View.GONE); findViewById(R.id.button_cancelEditAttributes).setVisibility(View.GONE); findViewById(R.id.button_editAttributes).setVisibility(View.VISIBLE); final FutureTask<List<Popup>> identifyFuture = mapController.identifyFeatureLayers(x, y); Executors.newSingleThreadExecutor().submit(identifyFuture); try { final List<Popup> popups = identifyFuture.get(); if (0 < popups.size()) { loadPopupContainer(popups, BottomSheetBehavior.STATE_COLLAPSED); } else { bottomSheetBehavior_featurePopups.setState(BottomSheetBehavior.STATE_HIDDEN); findViewById(R.id.button_saveAttributes).setVisibility(View.GONE); findViewById(R.id.button_cancelEditAttributes).setVisibility(View.GONE); findViewById(R.id.button_editAttributes).setVisibility(View.VISIBLE); } } catch (InterruptedException | ExecutionException e) { Log.e(TAG, "Exception while identifying feature layers", e); } } } }; }
From source file:com.alibaba.wasp.master.FMaster.java
/** * We do the following in a different thread. If it is not completed in time, * we will time it out and assume it is not easy to recover. * * 1. Create a new ZK session. (since our current one is expired) 2. Try to * become a primary master again 3. Initialize all ZK based system trackers. * 4. Assign root and meta. (they are already assigned, but we need to update * our internal memory state to reflect it) 5. Process any RIT if any during * the process of our recovery.//from ww w.j a v a2s . c o m * * @return True if we could successfully recover from ZK session expiry. * @throws InterruptedException * @throws java.io.IOException * @throws org.apache.zookeeper.KeeperException * @throws java.util.concurrent.ExecutionException */ private boolean tryRecoveringExpiredZKSession() throws InterruptedException, IOException, KeeperException, ExecutionException { this.zooKeeper.reconnectAfterExpiration(); Callable<Boolean> callable = new Callable<Boolean>() { public Boolean call() throws InterruptedException, IOException, KeeperException { MonitoredTask status = TaskMonitor.get().createStatus("Recovering expired ZK session"); try { if (!becomeActiveMaster(status)) { return Boolean.FALSE; } initialized = false; finishInitialization(status, true); return Boolean.TRUE; } finally { status.cleanup(); } } }; long timeout = conf.getLong("wasp.master.zksession.recover.timeout", 300000); java.util.concurrent.ExecutorService executor = Executors.newSingleThreadExecutor(); Future<Boolean> result = executor.submit(callable); executor.shutdown(); if (executor.awaitTermination(timeout, TimeUnit.MILLISECONDS) && result.isDone()) { Boolean recovered = result.get(); if (recovered != null) { return recovered.booleanValue(); } } executor.shutdownNow(); return false; }
From source file:com.spotify.docker.client.DefaultDockerClientTest.java
@Test public void interruptTest() throws Exception { // Pull image sut.pull(BUSYBOX_LATEST);/* w w w .j av a2s. c o m*/ // Create container final ContainerConfig config = ContainerConfig.builder().image(BUSYBOX_LATEST) .cmd("sh", "-c", "while :; do sleep 1; done").build(); final String name = randomName(); final ContainerCreation creation = sut.createContainer(config, name); final String id = creation.id(); // Start container sut.startContainer(id); // Wait for container on a thread final ExecutorService executorService = Executors.newSingleThreadExecutor(); final SettableFuture<Boolean> started = SettableFuture.create(); final SettableFuture<Boolean> interrupted = SettableFuture.create(); final Future<ContainerExit> exitFuture = executorService.submit(new Callable<ContainerExit>() { @Override public ContainerExit call() throws Exception { try { started.set(true); return sut.waitContainer(id); } catch (InterruptedException e) { interrupted.set(true); throw e; } } }); // Interrupt waiting thread started.get(); executorService.shutdownNow(); try { exitFuture.get(); fail(); } catch (ExecutionException e) { assertThat(e.getCause(), instanceOf(InterruptedException.class)); } // Verify that the thread was interrupted assertThat(interrupted.get(), is(true)); }
From source file:com.spotify.docker.client.DefaultDockerClientTest.java
@Test public void testWaitContainer() throws Exception { sut.pull(BUSYBOX_LATEST);/*from ww w .j av a2s . c o m*/ // Create container final ContainerConfig config = ContainerConfig.builder().image(BUSYBOX_LATEST) .cmd("sh", "-c", "while :; do sleep 1; done").build(); final String name = randomName(); final ContainerCreation creation = sut.createContainer(config, name); final String id = creation.id(); // Start the container sut.startContainer(id); // Wait for container on a thread final ExecutorService executorService = Executors.newSingleThreadExecutor(); final Future<ContainerExit> exitFuture = executorService.submit(new Callable<ContainerExit>() { @Override public ContainerExit call() throws Exception { return sut.waitContainer(id); } }); // Wait for 40 seconds, then kill the container Thread.sleep(40000); sut.killContainer(id); // Ensure that waiting on the container worked without exception exitFuture.get(); }