List of usage examples for java.lang InterruptedException getCause
public synchronized Throwable getCause()
From source file:com.mobiperf.MeasurementScheduler.java
@SuppressWarnings("unchecked") private void uploadResults() { MeasurementResult result;// www . j a v a 2 s .com Future<MeasurementResult> future; JSONArray results = readResultsFromFile(); synchronized (this.pendingTasks) { try { for (MeasurementTask task : this.pendingTasks.keySet()) { future = this.pendingTasks.get(task); if (future != null) { sendStringMsg("Finished:\n" + task); if (future.isDone()) { try { this.pendingTasks.remove(task); if (!future.isCancelled()) { result = future.get(); } else { Logger.e("Task execution was canceled"); JSONObject cancelledResult = MeasurementJsonConvertor.encodeToJson(this .getFailureResult(task, new CancellationException("Task cancelled"))); results.put(cancelledResult); } } catch (InterruptedException e) { Logger.e("Task execution interrupted", e); } catch (ExecutionException e) { if (e.getCause() instanceof MeasurementSkippedException) { // Don't do anything with this - no need to report skipped measurements sendStringMsg("Task skipped - " + e.getCause().toString() + "\n" + task); Logger.i("Task skipped", e.getCause()); } else { // Log the error sendStringMsg("Task failed - " + e.getCause().toString() + "\n" + task); Logger.e("Task execution failed", e.getCause()); // Was already sent // finishedTasks.add(this.getFailureResult(task, e.getCause())); } } catch (CancellationException e) { Logger.e("Task cancelled", e); } } else if (task.isPassedDeadline()) { /* * If a task has reached its deadline but has not been run, remove it and report * failure */ this.pendingTasks.remove(task); future.cancel(true); JSONObject cancelledResult = MeasurementJsonConvertor .encodeToJson(this.getFailureResult(task, new RuntimeException("Deadline passed before execution"))); results.put(cancelledResult); } } if (future == null) { /* * Tasks that are scheduled after deadline are put into pendingTasks with a null future. */ this.pendingTasks.remove(task); JSONObject cancelledResult = MeasurementJsonConvertor.encodeToJson( this.getFailureResult(task, new RuntimeException("Task scheduled after deadline"))); results.put(cancelledResult); } } } catch (ConcurrentModificationException e) { /* * keySet is a synchronized view of the keys. However, changes during iteration will throw * ConcurrentModificationException. Since we have synchronized all changes to pendingTasks * this should not happen. */ Logger.e("Pending tasks is changed during measurement upload"); } catch (JSONException e) { e.printStackTrace(); } } if (results.length() > 0) { try { this.checkin.uploadMeasurementResult(results, resourceCapManager); } catch (IOException e) { Logger.e("Error when uploading message"); } } Logger.i("A total of " + results.length() + " uploaded"); Logger.i("A total of " + results.length() + " is in the results list"); }
From source file:de.hybris.platform.servicelayer.tx.ItemModelTransactionTest.java
private <V> V runInOtherThread(final Callable<V> callable, final int timeoutSeconds) { final ExecutorService pool = Executors.newFixedThreadPool(1, new ThreadFactory() { final Tenant tenant = Registry.getCurrentTenantNoFallback(); @Override/*from w w w. jav a2 s .c o m*/ public Thread newThread(final Runnable r) { return new Thread(r) { @Override public void run() { try { Registry.setCurrentTenant(tenant); super.run(); } finally { JaloSession.deactivate(); Registry.unsetCurrentTenant(); } } }; } }); try { final Future<V> future = pool.submit(callable); return future.get(timeoutSeconds, TimeUnit.SECONDS); } catch (final InterruptedException e) { Thread.currentThread().interrupt(); fail("interrupted while waiting"); } catch (final ExecutionException e) { fail("unexpected execution exception " + e.getCause()); } catch (final TimeoutException e) { fail("callable " + callable + " did not finish within maximum " + timeoutSeconds + " seconds to wait"); } finally { pool.shutdownNow(); } return null; }
From source file:com.hortonworks.registries.storage.filestorage.DbFileStorageTest.java
@Test(expected = StorageException.class) public void testConcurrentUpload() throws Throwable { try {// www .ja v a2 s . c o m transactionManager.beginTransaction(TransactionIsolation.SERIALIZABLE); String input = IOUtils.toString(this.getClass().getClassLoader().getResourceAsStream(FILE_NAME), "UTF-8"); String updated = input + " new text"; dbFileStorage.upload(IOUtils.toInputStream(input, "UTF-8"), FILE_NAME); InputStream slowStream = new InputStream() { byte[] bytes = updated.getBytes("UTF-8"); int i = 0; @Override public int read() throws IOException { try { Thread.sleep(10); } catch (InterruptedException ex) { } return (i < bytes.length) ? (bytes[i++] & 0xff) : -1; } }; FutureTask<String> ft1 = new FutureTask<>(() -> { try { transactionManager.beginTransaction(TransactionIsolation.SERIALIZABLE); String name = dbFileStorage.upload(slowStream, FILE_NAME); transactionManager.commitTransaction(); return name; } catch (Exception e) { transactionManager.rollbackTransaction(); throw e; } }); FutureTask<String> ft2 = new FutureTask<>(() -> { try { transactionManager.beginTransaction(TransactionIsolation.SERIALIZABLE); String name = dbFileStorage.upload(IOUtils.toInputStream(updated, "UTF-8"), FILE_NAME); transactionManager.commitTransaction(); return name; } catch (Exception e) { transactionManager.rollbackTransaction(); throw e; } }); Thread t1 = new Thread(ft1); Thread t2 = new Thread(ft2); t1.start(); t2.start(); t1.join(); t2.join(); try { ft1.get(); } catch (ExecutionException ex) { throw ex.getCause(); } transactionManager.commitTransaction(); } catch (Exception e) { transactionManager.rollbackTransaction(); throw e; } }
From source file:net.myrrix.online.ServerRecommender.java
private List<RecommendedItem> multithreadedTopN(final float[][] userFeatures, final FastIDSet userKnownItemIDs, final FastIDSet userTagIDs, final IDRescorer rescorer, final int howMany, CandidateFilter candidateFilter) { Collection<Iterator<FastByIDMap.MapEntry<float[]>>> candidateIterators = candidateFilter .getCandidateIterator(userFeatures); int numIterators = candidateIterators.size(); int parallelism = FastMath.min(numCores, numIterators); final Queue<MutableRecommendedItem> topN = TopN.initialQueue(howMany); if (parallelism > 1) { ExecutorService executorService = executor.get(); final Iterator<Iterator<FastByIDMap.MapEntry<float[]>>> candidateIteratorsIterator = candidateIterators .iterator();/* www.j ava2s . c o m*/ Collection<Future<?>> futures = Lists.newArrayList(); for (int i = 0; i < numCores; i++) { futures.add(executorService.submit(new Callable<Void>() { @Override public Void call() { float[] queueLeastValue = { Float.NEGATIVE_INFINITY }; while (true) { Iterator<FastByIDMap.MapEntry<float[]>> candidateIterator; synchronized (candidateIteratorsIterator) { if (!candidateIteratorsIterator.hasNext()) { break; } candidateIterator = candidateIteratorsIterator.next(); } Iterator<RecommendedItem> partialIterator = new RecommendIterator(userFeatures, candidateIterator, userKnownItemIDs, userTagIDs, rescorer); TopN.selectTopNIntoQueueMultithreaded(topN, queueLeastValue, partialIterator, howMany); } return null; } })); } for (Future<?> future : futures) { try { future.get(); } catch (InterruptedException e) { throw new IllegalStateException(e); } catch (ExecutionException e) { throw new IllegalStateException(e.getCause()); } } } else { for (Iterator<FastByIDMap.MapEntry<float[]>> candidateIterator : candidateIterators) { Iterator<RecommendedItem> partialIterator = new RecommendIterator(userFeatures, candidateIterator, userKnownItemIDs, userTagIDs, rescorer); TopN.selectTopNIntoQueue(topN, partialIterator, howMany); } } return TopN.selectTopNFromQueue(topN, howMany); }
From source file:com.datatorrent.contrib.hdht.HDHTWriterTest.java
@Test public void testWriteError() throws Exception { File file = new File(testInfo.getDir()); FileUtils.deleteDirectory(file);/*w w w .j a va 2s.c om*/ final RuntimeException writeError = new RuntimeException("failure simulation"); final CountDownLatch endWindowComplete = new CountDownLatch(1); final CountDownLatch writerActive = new CountDownLatch(1); FileAccessFSImpl fa = new MockFileAccess() { @Override public FileWriter getWriter(long bucketKey, String fileName) throws IOException { writerActive.countDown(); try { if (endWindowComplete.await(10, TimeUnit.SECONDS)) { throw writeError; } } catch (InterruptedException e) { //Do nothing } return super.getWriter(bucketKey, fileName); } }; fa.setBasePath(file.getAbsolutePath()); HDHTWriter hds = new HDHTWriter(); hds.setFileStore(fa); hds.setFlushIntervalCount(0); // flush after every window long BUCKETKEY = 1; hds.setup(new OperatorContextTestHelper.TestIdOperatorContext(0, new DefaultAttributeMap())); //hds.writeExecutor = new ScheduledThreadPoolExecutor(1); hds.beginWindow(1); long[] seqArray = { 5L, 1L, 3L, 4L, 2L }; for (long seq : seqArray) { Slice key = newKey(BUCKETKEY, seq); hds.put(BUCKETKEY, key, ("data" + seq).getBytes()); } hds.endWindow(); hds.checkpointed(1); hds.committed(1); endWindowComplete.countDown(); try { Assert.assertTrue(writerActive.await(10, TimeUnit.SECONDS)); hds.writeExecutor.shutdown(); hds.writeExecutor.awaitTermination(10, TimeUnit.SECONDS); hds.beginWindow(2); hds.endWindow(); Assert.fail("exception not raised"); } catch (Exception e) { Assert.assertSame(writeError, e.getCause()); } hds.teardown(); }
From source file:ch.cyberduck.core.s3.S3Path.java
/** * @param throttle Bandwidth throttle/* w w w .j ava 2 s . c om*/ * @param listener Callback for bytes sent * @param status Transfer status * @param object File location * @throws IOException I/O error * @throws ServiceException Service error */ private void uploadMultipart(final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final StorageObject object) throws IOException, ServiceException { final ThreadFactory threadFactory = new NamedThreadFactory("multipart"); MultipartUpload multipart = null; if (status.isResume()) { // This operation lists in-progress multipart uploads. An in-progress multipart upload is a // multipart upload that has been initiated, using the Initiate Multipart Upload request, but has // not yet been completed or aborted. final List<MultipartUpload> uploads = this.getSession().getClient() .multipartListUploads(this.getContainerName()); for (MultipartUpload upload : uploads) { if (!upload.getBucketName().equals(this.getContainerName())) { continue; } if (!upload.getObjectKey().equals(this.getKey())) { continue; } if (log.isInfoEnabled()) { log.info(String.format("Resume multipart upload %s", upload.getUploadId())); } multipart = upload; break; } } if (null == multipart) { log.info("No pending multipart upload found"); // Initiate multipart upload with metadata Map<String, Object> metadata = object.getModifiableMetadata(); if (StringUtils.isNotBlank(Preferences.instance().getProperty("s3.storage.class"))) { metadata.put(this.getSession().getClient().getRestHeaderPrefix() + "storage-class", Preferences.instance().getProperty("s3.storage.class")); } if (StringUtils.isNotBlank(Preferences.instance().getProperty("s3.encryption.algorithm"))) { metadata.put(this.getSession().getClient().getRestHeaderPrefix() + "server-side-encryption", Preferences.instance().getProperty("s3.encryption.algorithm")); } multipart = this.getSession().getClient().multipartStartUpload(this.getContainerName(), this.getKey(), metadata); } final List<MultipartPart> completed; if (status.isResume()) { log.info(String.format("List completed parts of %s", multipart.getUploadId())); // This operation lists the parts that have been uploaded for a specific multipart upload. completed = this.getSession().getClient().multipartListParts(multipart); } else { completed = new ArrayList<MultipartPart>(); } /** * At any point, at most * <tt>nThreads</tt> threads will be active processing tasks. */ final ExecutorService pool = Executors.newFixedThreadPool( Preferences.instance().getInteger("s3.upload.multipart.concurency"), threadFactory); try { final List<Future<MultipartPart>> parts = new ArrayList<Future<MultipartPart>>(); final long defaultPartSize = Math.max((status.getLength() / MAXIMUM_UPLOAD_PARTS), DEFAULT_MINIMUM_UPLOAD_PART_SIZE); long remaining = status.getLength(); long marker = 0; for (int partNumber = 1; remaining > 0; partNumber++) { boolean skip = false; if (status.isResume()) { log.info(String.format("Determine if part %d can be skipped", partNumber)); for (MultipartPart c : completed) { if (c.getPartNumber().equals(partNumber)) { log.info("Skip completed part number " + partNumber); listener.bytesSent(c.getSize()); skip = true; break; } } } // Last part can be less than 5 MB. Adjust part size. final long length = Math.min(defaultPartSize, remaining); if (!skip) { // Submit to queue parts.add(this.submitPart(throttle, listener, status, multipart, pool, partNumber, marker, length)); } remaining -= length; marker += length; } for (Future<MultipartPart> future : parts) { try { completed.add(future.get()); } catch (InterruptedException e) { log.error("Part upload failed:" + e.getMessage()); throw new ConnectionCanceledException(e.getMessage(), e); } catch (ExecutionException e) { log.warn("Part upload failed:" + e.getMessage()); if (e.getCause() instanceof ServiceException) { throw (ServiceException) e.getCause(); } if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } throw new ConnectionCanceledException(e.getMessage(), e); } } if (status.isComplete()) { this.getSession().getClient().multipartCompleteUpload(multipart, completed); } } finally { if (!status.isComplete()) { // Cancel all previous parts log.info(String.format("Cancel multipart upload %s", multipart.getUploadId())); this.getSession().getClient().multipartAbortUpload(multipart); } // Cancel future tasks pool.shutdown(); } }
From source file:org.eclipse.buckminster.jnlp.p2.wizard.install.InstallWizard.java
void retrieveStackInfo() throws InterruptedException { if (m_distroP2Properties == null) { try {//from www. j av a2 s . c o m ((AdvancedWizardDialog) getContainer()).disableNavigation(); getContainer().run(true, true, new IRunnableWithProgress() { public void run(IProgressMonitor monitor) throws InvocationTargetException, InterruptedException { try { m_distroP2Properties = m_distroProvider.getDistroP2Properties(m_draft, m_cspecId, monitor); m_opml = null; String opmlString = m_distroP2Properties.getProperty(PROP_OPML); if (opmlString != null) { IParser<OPML> opmlParser = CorePlugin.getDefault().getParserFactory() .getOPMLParser(true); m_opml = opmlParser.parse("byte image", new ByteArrayInputStream( TransferUtils.decompress(opmlString.getBytes("UTF-8")))); } } catch (InterruptedException e) { throw e; } catch (Exception e) { throw new InvocationTargetException(e); } m_installLocation = Path .fromOSString(MaterializationUtils.getDefaultDestination(m_artifactName)); } }); } catch (InterruptedException e) { throw e; } catch (Exception e) { if (e instanceof JNLPException) throw (JNLPException) e; Throwable originalException = e; if (e instanceof InvocationTargetException && e.getCause() != null) originalException = e.getCause(); throw new JNLPException("Cannot read distro specification", ERROR_CODE_REMOTE_IO_EXCEPTION, originalException); } finally { getContainer().updateButtons(); } } if (m_distroP2Properties.size() == 0) throw new JNLPException("Materialization properties were not retrieved", ERROR_CODE_MISSING_PROPERTY_EXCEPTION); }
From source file:com.amazonaws.mobileconnectors.s3.transferutility.UploadTask.java
private Boolean uploadMultipartAndWaitForCompletion() throws ExecutionException { /*/*from w w w.j av a 2 s. c o m*/ * For a new multipart upload, upload.mMultipartId should be null. If * it's a resumed upload, upload.mMultipartId would not be null. */ long bytesAlreadyTransferrd = 0; if (upload.multipartId == null || upload.multipartId.isEmpty()) { final PutObjectRequest putObjectRequest = createPutObjectRequest(upload); TransferUtility.appendMultipartTransferServiceUserAgentString(putObjectRequest); try { upload.multipartId = initiateMultipartUpload(putObjectRequest); } catch (final AmazonClientException ace) { LOGGER.error("Error initiating multipart upload: " + upload.id + " due to " + ace.getMessage(), ace); updater.throwError(upload.id, ace); updater.updateState(upload.id, TransferState.FAILED); return false; } dbUtil.updateMultipartId(upload.id, upload.multipartId); } else { /* * For a resumed upload, we should calculate the bytes already * transferred. */ bytesAlreadyTransferrd = dbUtil.queryBytesTransferredByMainUploadId(upload.id); if (bytesAlreadyTransferrd > 0) { LOGGER.debug(String.format("Resume transfer %d from %d bytes", upload.id, bytesAlreadyTransferrd)); } } updater.updateProgress(upload.id, bytesAlreadyTransferrd, upload.bytesTotal); final List<UploadPartRequest> requestList = dbUtil.getNonCompletedPartRequestsFromDB(upload.id, upload.multipartId); LOGGER.debug("multipart upload " + upload.id + " in " + requestList.size() + " parts."); final ArrayList<Future<Boolean>> futures = new ArrayList<Future<Boolean>>(); for (final UploadPartRequest request : requestList) { TransferUtility.appendMultipartTransferServiceUserAgentString(request); request.setGeneralProgressListener(updater.newProgressListener(upload.id)); futures.add(TransferThreadPool.submitTask(new UploadPartTask(request, s3, dbUtil, networkInfo))); } try { boolean isSuccess = true; /* * Future.get() will block the current thread until the method * returns. */ for (final Future<Boolean> f : futures) { // UploadPartTask returns false when it's interrupted by user // and the state is set by caller final boolean b = f.get(); isSuccess &= b; } if (!isSuccess) { return false; } } catch (final InterruptedException e) { /* * Future.get() will catch InterruptedException, but it's not a * failure, it may be caused by a pause operation from applications. */ for (final Future<?> f : futures) { f.cancel(true); } // abort by user LOGGER.debug("Transfer " + upload.id + " is interrupted by user"); return false; } catch (final ExecutionException ee) { // handle pause, cancel, etc boolean isNetworkInterrupted = false; if (ee.getCause() != null && ee.getCause() instanceof Exception) { // check for network interruption and pause the transfer instead of failing them isNetworkInterrupted = dbUtil.checkWaitingForNetworkPartRequestsFromDB(upload.id); if (isNetworkInterrupted) { LOGGER.debug("Network Connection Interrupted: Transfer " + upload.id + " waits for network"); updater.updateState(upload.id, TransferState.WAITING_FOR_NETWORK); return false; } final Exception e = (Exception) ee.getCause(); if (RetryUtils.isInterrupted(e)) { /* * thread is interrupted by user. don't update the state as * it's set by caller who interrupted */ LOGGER.debug("Transfer " + upload.id + " is interrupted by user"); return false; } else if (e.getCause() != null && e.getCause() instanceof IOException && !networkInfo.isNetworkConnected()) { LOGGER.debug("Transfer " + upload.id + " waits for network"); updater.updateState(upload.id, TransferState.WAITING_FOR_NETWORK); } updater.throwError(upload.id, e); } updater.updateState(upload.id, TransferState.FAILED); return false; } try { completeMultiPartUpload(upload.id, upload.bucketName, upload.key, upload.multipartId); updater.updateProgress(upload.id, upload.bytesTotal, upload.bytesTotal); updater.updateState(upload.id, TransferState.COMPLETED); return true; } catch (final AmazonClientException ace) { LOGGER.error("Failed to complete multipart: " + upload.id + " due to " + ace.getMessage(), ace); updater.throwError(upload.id, ace); updater.updateState(upload.id, TransferState.FAILED); return false; } }
From source file:org.apache.solr.client.solrj.impl.CloudSolrClient.java
private NamedList<Object> directUpdate(AbstractUpdateRequest request, String collection, ClusterState clusterState) throws SolrServerException { UpdateRequest updateRequest = (UpdateRequest) request; ModifiableSolrParams params = (ModifiableSolrParams) request.getParams(); ModifiableSolrParams routableParams = new ModifiableSolrParams(); ModifiableSolrParams nonRoutableParams = new ModifiableSolrParams(); if (params != null) { nonRoutableParams.add(params);//from w w w .j av a 2 s . c o m routableParams.add(params); for (String param : NON_ROUTABLE_PARAMS) { routableParams.remove(param); } } if (collection == null) { throw new SolrServerException( "No collection param specified on request and no default collection has been set."); } //Check to see if the collection is an alias. Aliases aliases = zkStateReader.getAliases(); if (aliases != null) { Map<String, String> collectionAliases = aliases.getCollectionAliasMap(); if (collectionAliases != null && collectionAliases.containsKey(collection)) { collection = collectionAliases.get(collection); } } DocCollection col = getDocCollection(clusterState, collection, null); DocRouter router = col.getRouter(); if (router instanceof ImplicitDocRouter) { // short circuit as optimization return null; } //Create the URL map, which is keyed on slice name. //The value is a list of URLs for each replica in the slice. //The first value in the list is the leader for the slice. final Map<String, List<String>> urlMap = buildUrlMap(col); final Map<String, LBHttpSolrClient.Req> routes = (urlMap == null ? null : updateRequest.getRoutes(router, col, urlMap, routableParams, this.idField)); if (routes == null) { if (directUpdatesToLeadersOnly && hasInfoToFindLeaders(updateRequest, idField)) { // we have info (documents with ids and/or ids to delete) with // which to find the leaders but we could not find (all of) them throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "directUpdatesToLeadersOnly==true but could not find leader(s)"); } else { // we could not find a leader or routes yet - use unoptimized general path return null; } } final NamedList<Throwable> exceptions = new NamedList<>(); final NamedList<NamedList> shardResponses = new NamedList<>(routes.size() + 1); // +1 for deleteQuery long start = System.nanoTime(); if (parallelUpdates) { final Map<String, Future<NamedList<?>>> responseFutures = new HashMap<>(routes.size()); for (final Map.Entry<String, LBHttpSolrClient.Req> entry : routes.entrySet()) { final String url = entry.getKey(); final LBHttpSolrClient.Req lbRequest = entry.getValue(); try { MDC.put("CloudSolrClient.url", url); responseFutures.put(url, threadPool.submit(() -> lbClient.request(lbRequest).getResponse())); } finally { MDC.remove("CloudSolrClient.url"); } } for (final Map.Entry<String, Future<NamedList<?>>> entry : responseFutures.entrySet()) { final String url = entry.getKey(); final Future<NamedList<?>> responseFuture = entry.getValue(); try { shardResponses.add(url, responseFuture.get()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } catch (ExecutionException e) { exceptions.add(url, e.getCause()); } } if (exceptions.size() > 0) { Throwable firstException = exceptions.getVal(0); if (firstException instanceof SolrException) { SolrException e = (SolrException) firstException; throw new RouteException(ErrorCode.getErrorCode(e.code()), exceptions, routes); } else { throw new RouteException(ErrorCode.SERVER_ERROR, exceptions, routes); } } } else { for (Map.Entry<String, LBHttpSolrClient.Req> entry : routes.entrySet()) { String url = entry.getKey(); LBHttpSolrClient.Req lbRequest = entry.getValue(); try { NamedList<Object> rsp = lbClient.request(lbRequest).getResponse(); shardResponses.add(url, rsp); } catch (Exception e) { if (e instanceof SolrException) { throw (SolrException) e; } else { throw new SolrServerException(e); } } } } UpdateRequest nonRoutableRequest = null; List<String> deleteQuery = updateRequest.getDeleteQuery(); if (deleteQuery != null && deleteQuery.size() > 0) { UpdateRequest deleteQueryRequest = new UpdateRequest(); deleteQueryRequest.setDeleteQuery(deleteQuery); nonRoutableRequest = deleteQueryRequest; } Set<String> paramNames = nonRoutableParams.getParameterNames(); Set<String> intersection = new HashSet<>(paramNames); intersection.retainAll(NON_ROUTABLE_PARAMS); if (nonRoutableRequest != null || intersection.size() > 0) { if (nonRoutableRequest == null) { nonRoutableRequest = new UpdateRequest(); } nonRoutableRequest.setParams(nonRoutableParams); List<String> urlList = new ArrayList<>(); urlList.addAll(routes.keySet()); Collections.shuffle(urlList, rand); LBHttpSolrClient.Req req = new LBHttpSolrClient.Req(nonRoutableRequest, urlList); try { LBHttpSolrClient.Rsp rsp = lbClient.request(req); shardResponses.add(urlList.get(0), rsp.getResponse()); } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, urlList.get(0), e); } } long end = System.nanoTime(); RouteResponse rr = condenseResponse(shardResponses, (int) TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS)); rr.setRouteResponses(shardResponses); rr.setRoutes(routes); return rr; }
From source file:org.dllearner.scripts.evaluation.EnrichmentEvaluationMultithreaded.java
private void applyLearningAlgorithm(AxiomLearningAlgorithm algorithm, Entity entity) { int attempt = 0; long startTime = 0; boolean timeout = true; String algName = AnnComponentManager.getName(algorithm); while (((AbstractAxiomLearningAlgorithm) algorithm).isTimeout() && attempt++ < maxAttempts) { if (attempt > 1) { try { logger.warn("Got timeout in " + algName + " for entity " + entity.getName() + ". Waiting " + delayInMilliseconds + " ms ..."); Thread.sleep(delayInMilliseconds); } catch (InterruptedException e) { e.printStackTrace();//from w w w . jav a2 s .c o m } } logger.info("Applying " + algName + " on " + entity.toString() + " ... (Attempt " + attempt + ")"); startTime = System.currentTimeMillis(); try { ((AbstractAxiomLearningAlgorithm) algorithm) .setForceSPARQL_1_0_Mode(attempt > nrOfAttemptsBeforeForceToSPARQL1_0_Mode); algorithm.start(); timeout = ((AbstractAxiomLearningAlgorithm) algorithm).isTimeout(); } catch (Exception e) { if (e.getCause() instanceof SocketTimeoutException) { } else { e.printStackTrace(); } } } long runTime = System.currentTimeMillis() - startTime; List<EvaluatedAxiom> learnedAxioms = algorithm.getCurrentlyBestEvaluatedAxioms(nrOfAxiomsToLearn); if (timeout && learnedAxioms.isEmpty()) { writeToDB(entity.toManchesterSyntaxString(baseURI, prefixes), algName, "TIMEOUT", 0, runTime, false); } else if (learnedAxioms == null || learnedAxioms.isEmpty()) { writeToDB(entity.toManchesterSyntaxString(baseURI, prefixes), algName, "NULL", 0, runTime, false); } else { for (EvaluatedAxiom learnedAxiom : learnedAxioms) { double score = learnedAxiom.getScore().getAccuracy(); if (Double.isNaN(score)) { score = -1; } writeToDB(entity.toManchesterSyntaxString(baseURI, prefixes).toString(), algName, learnedAxiom.getAxiom().toManchesterSyntaxString(baseURI, prefixes), score, runTime, isEntailed(learnedAxiom)); if (score >= threshold) { algorithm2Ontology.get(algorithm.getClass()) .add(OWLAPIAxiomConvertVisitor.convertAxiom(learnedAxiom.getAxiom())); } } } }