List of usage examples for java.util.concurrent ExecutorService invokeAll
<T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException;
From source file:org.springframework.integration.jdbc.lock.JdbcLockRegistryDifferentClientTests.java
@Test public void testOnlyOneLock() throws Exception { for (int i = 0; i < 100; i++) { final List<String> locked = new ArrayList<String>(); final CountDownLatch latch = new CountDownLatch(20); ExecutorService pool = Executors.newFixedThreadPool(6); ArrayList<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>(); for (int j = 0; j < 20; j++) { final DefaultLockRepository client = new DefaultLockRepository(this.dataSource); client.afterPropertiesSet(); this.context.getAutowireCapableBeanFactory().autowireBean(client); Callable<Boolean> task = () -> { Lock lock = new JdbcLockRegistry(client).obtain("foo"); try { if (locked.isEmpty() && lock.tryLock()) { if (locked.isEmpty()) { locked.add("done"); return true; }//from w w w .j a v a 2 s. co m } } finally { try { lock.unlock(); } catch (Exception e) { // ignore } latch.countDown(); } return false; }; tasks.add(task); } logger.info("Starting: " + i); pool.invokeAll(tasks); assertTrue(latch.await(10, TimeUnit.SECONDS)); // eventually they both get the lock and release it assertEquals(1, locked.size()); assertTrue(locked.contains("done")); } }
From source file:com.rovemonteux.silvertunnel.netlib.layer.tor.directory.Directory.java
/** * parse multiple router descriptors from one String. * //from ww w .jav a 2 s . c o m * @param routerDescriptors * @return the result; if multiple entries with the same fingerprint are in * routerDescriptors, the last will be considered */ protected Map<Fingerprint, Router> parseRouterDescriptors(final String routerDescriptors) { final long timeStart = System.currentTimeMillis(); final Map<Fingerprint, Router> result = new HashMap<Fingerprint, Router>(); final Matcher m = ROUTER_DESCRIPTORS_PATTERN.matcher(routerDescriptors); final ExecutorService executor = Executors.newFixedThreadPool(5); // TODO : make threadpool configurable final Collection<RouterParserCallable> allTasks = new ArrayList<RouterParserCallable>(); while (m.find()) { allTasks.add(new RouterParserCallable(m.group(1))); } List<Future<Router>> results = null; try { results = executor.invokeAll(allTasks); } catch (InterruptedException exception) { LOG.warn("error while parsing the router descriptors in parallel", exception); } if (results != null && !results.isEmpty()) { for (Future<Router> item : results) { Router router = null; try { router = item.get(); } catch (InterruptedException exception) { LOG.warn("error while parsing the router descriptors in parallel", exception); } catch (ExecutionException exception) { LOG.warn("error while parsing the router descriptors in parallel", exception); } if (router != null) { result.put(router.getFingerprint(), router); } } } if (LOG.isDebugEnabled()) { LOG.debug("parseRouterDescriptors took " + (System.currentTimeMillis() - timeStart) + " ms"); } return result; }
From source file:jp.aegif.nemaki.cmis.service.impl.ObjectServiceImpl.java
@Override public List<BulkUpdateObjectIdAndChangeToken> bulkUpdateProperties(CallContext callContext, String repositoryId, List<BulkUpdateObjectIdAndChangeToken> objectIdAndChangeTokenList, Properties properties, List<String> addSecondaryTypeIds, List<String> removeSecondaryTypeIds, ExtensionsData extension) { // ////////////////// // General Exception // ////////////////// // Each permission is checked at each execution exceptionService.invalidArgumentRequiredCollection("objectIdAndChangeToken", objectIdAndChangeTokenList); exceptionService.invalidArgumentSecondaryTypeIds(repositoryId, properties); // ////////////////// // Body of the method // ////////////////// List<BulkUpdateObjectIdAndChangeToken> results = new ArrayList<BulkUpdateObjectIdAndChangeToken>(); ExecutorService executor = Executors.newCachedThreadPool(); List<BulkUpdateTask> tasks = new ArrayList<>(); for (BulkUpdateObjectIdAndChangeToken objectIdAndChangeToken : objectIdAndChangeTokenList) { tasks.add(new BulkUpdateTask(callContext, repositoryId, objectIdAndChangeToken, properties, addSecondaryTypeIds, removeSecondaryTypeIds, extension)); }//from w w w . jav a 2 s . c o m try { List<Future<BulkUpdateObjectIdAndChangeToken>> _results = executor.invokeAll(tasks); for (Future<BulkUpdateObjectIdAndChangeToken> _result : _results) { try { BulkUpdateObjectIdAndChangeToken result = _result.get(); results.add(result); } catch (Exception e) { //TODO log //do nothing } } } catch (InterruptedException e1) { //TODO log e1.printStackTrace(); } return results; }
From source file:io.hops.security.TestUsersGroups.java
public void testConcurrentSetSameOwner(int cacheTime, int cacheSize) throws Exception { Configuration conf = new HdfsConfiguration(); conf.set(CommonConfigurationKeys.HOPS_UG_CACHE_SECS, Integer.toString(cacheTime)); conf.set(CommonConfigurationKeys.HOPS_UG_CACHE_SIZE, Integer.toString(cacheSize)); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive();/*from w w w .j a va 2s.c o m*/ DistributedFileSystem dfs = cluster.getFileSystem(); Path base = new Path("/base"); dfs.mkdirs(base); final String userName = "user"; final String groupName = "group"; final int CONCURRENT_USERS = 100; ExecutorService executorService = Executors.newFixedThreadPool(CONCURRENT_USERS); List<Callable<Boolean>> callables = new ArrayList<>(); for (int i = 0; i < CONCURRENT_USERS; i++) { Path file = new Path(base, "file" + i); dfs.create(file).close(); callables.add(new SetOwner(dfs, file, userName, groupName)); } List<Future<Boolean>> futures = executorService.invokeAll(callables); executorService.shutdown(); executorService.awaitTermination(1, TimeUnit.SECONDS); for (Future<Boolean> f : futures) { assertTrue(f.get()); } cluster.shutdown(); }
From source file:org.drftpd.protocol.speedtest.net.slave.SpeedTestHandler.java
private float getDownloadSpeed(String url) { long totalTime = 0L; long totalBytes = 0L; long startTime = System.currentTimeMillis(); RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(60000).setConnectTimeout(5000) .setConnectionRequestTimeout(5000).build(); HttpGet httpGet = new HttpGet(); httpGet.setConfig(requestConfig);//w w w.jav a 2 s.c om SpeedTestCallable[] speedTestCallables = new SpeedTestCallable[_downThreads]; for (int i = 0; i < _downThreads; i++) { speedTestCallables[i] = new SpeedTestCallable(); } ExecutorService executor = Executors.newFixedThreadPool(_downThreads); List<Future<Long>> threadList; Set<Callable<Long>> callables = new HashSet<Callable<Long>>(); url = url.substring(0, url.lastIndexOf('/') + 1) + "random"; StopWatch watch = new StopWatch(); for (int size : _sizes) { // Measure dl speed for each size in _sizes if ((System.currentTimeMillis() - startTime) > _downTime) { break; } String tmpURL = url + size + "x" + size + ".jpg"; try { httpGet.setURI(new URI(tmpURL)); } catch (URISyntaxException e) { logger.error("URI syntax error for " + tmpURL + " :: " + e.getMessage()); close(executor, callables); return 0; } callables.clear(); for (int k = 0; k < _downThreads; k++) { speedTestCallables[k].setHttpGet(httpGet); callables.add(speedTestCallables[k]); } for (int j = 0; j < _sizeLoop; j++) { try { watch.reset(); Thread.sleep(_sleep); watch.start(); threadList = executor.invokeAll(callables); for (Future<Long> fut : threadList) { Long bytes = fut.get(); totalBytes += bytes; } watch.stop(); totalTime += watch.getTime(); } catch (InterruptedException e) { logger.error(e.getMessage()); close(executor, callables); return 0; } catch (ExecutionException e) { logger.error(e.getMessage()); close(executor, callables); return 0; } if ((System.currentTimeMillis() - startTime) > _downTime) { break; } } } if (totalBytes == 0L || totalTime == 0L) { close(executor, callables); return 0; } close(executor, callables); return (float) (((totalBytes * 8) / totalTime) * 1000) / 1000000; }
From source file:org.drftpd.protocol.speedtest.net.slave.SpeedTestHandler.java
private float getUploadSpeed(String url) { long totalTime = 0L; long totalBytes = 0L; long startTime = System.currentTimeMillis(); RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(60000).setConnectTimeout(5000) .setConnectionRequestTimeout(5000).build(); HttpPost httpPost = new HttpPost(url); httpPost.setHeader("content-type", "application/x-www-form-urlencoded"); httpPost.setConfig(requestConfig);// ww w . j a v a 2 s . c om String payload = _payload; // Initial payload StopWatch watch = new StopWatch(); SpeedTestCallable[] speedTestCallables = new SpeedTestCallable[_upThreads]; for (int i = 0; i < _upThreads; i++) { speedTestCallables[i] = new SpeedTestCallable(); } ExecutorService executor = Executors.newFixedThreadPool(_upThreads); List<Future<Long>> threadList; Set<Callable<Long>> callables = new HashSet<Callable<Long>>(); boolean limitReached = false; int i = 2; while (true) { if ((System.currentTimeMillis() - startTime) > _upTime) { break; } List<NameValuePair> nameValuePairs = new ArrayList<NameValuePair>(); nameValuePairs.add(new BasicNameValuePair("content1", payload)); try { httpPost.setEntity(new UrlEncodedFormEntity(nameValuePairs)); } catch (UnsupportedEncodingException e) { logger.error("Unsupported encoding of payload for speedtest upload: " + e.getMessage()); close(executor, callables); return 0; } callables.clear(); for (int k = 0; k < _upThreads; k++) { speedTestCallables[k].setHttpPost(httpPost); callables.add(speedTestCallables[k]); } for (int j = 0; j < _payloadLoop; j++) { try { watch.reset(); Thread.sleep(_sleep); watch.start(); threadList = executor.invokeAll(callables); for (Future<Long> fut : threadList) { Long bytes = fut.get(); totalBytes += bytes; } watch.stop(); totalTime += watch.getTime(); } catch (InterruptedException e) { logger.error(e.getMessage()); close(executor, callables); return 0; } catch (ExecutionException e) { if (e.getMessage().contains("Error code 413")) { limitReached = true; payload = StringUtils.repeat(_payload, i - 2); } else { logger.error(e.getMessage()); close(executor, callables); return 0; } } if ((System.currentTimeMillis() - startTime) > _upTime) { break; } } if (!limitReached) { // Increase payload size if not too big payload = StringUtils.repeat(_payload, i); i++; } } if (totalBytes == 0L || totalTime == 0L) { close(executor, callables); return 0; } close(executor, callables); return (float) (((totalBytes * 8) / totalTime) * 1000) / 1000000; }
From source file:com.opendoorlogistics.core.geometry.rog.builder.ROGBuilder.java
@SuppressWarnings("resource") public void build() { try {//from w w w . j a v a 2s. c o m // Load the shapefile ODLDatastoreAlterable<ODLTableAlterable> ds = ODLDatastoreImpl.alterableFactory.create(); ImportShapefile.importShapefile(shapefile, false, ds, false); ODLTableReadOnly table = ds.getTableAt(0); if (table.getRowCount() == 0 || processingApi.isCancelled()) { return; } int geomCol = TableUtils.findColumnIndx(table, ODLColumnType.GEOM); int nrows = table.getRowCount(); // Create map to store positions in the output file LargeList<ShapeIndex> indices = new LargeList<>(); for (int row = 0; row < nrows; row++) { indices.add(new ShapeIndex(row, tileFactoryInfo.getMaximumZoomLevel(), (ODLGeom) table.getValueAt(row, geomCol))); } // Get full WGS84 geometries into collection of PendingWrites WKBWriter geomWriter = new WKBWriter(); LargeList<PendingWrite> pws = new LargeList<>(); for (int row = 0; row < nrows; row++) { Geometry g = ((ODLGeomImpl) table.getValueAt(row, geomCol)).getJTSGeometry(); byte[] bytes = geomWriter.write(g); pws.add(new PendingWrite(indices.get(row), bytes)); } if (processingApi.isCancelled()) { return; } // write full geometries QuadWriter quadWriter = new QuadWriter(tmpFile); quadWriter.add(pws, null, -1); if (processingApi.isCancelled()) { return; } // Create executor service ExecutorService executorService = Executors.newFixedThreadPool(nbThreads); // Loop over zoom levels for (int zoom = tileFactoryInfo.getMinimumZoomLevel(); zoom <= tileFactoryInfo .getMaximumZoomLevel(); zoom++) { postStatusMessage("ODLRG builder - processing zoom level " + zoom + " with " + ((long) tileFactoryInfo.getLongitudeDegreeWidthInPixels(zoom)) + " pixels/degree"); // Create converter for this zoom level TransformGeomToWorldBitmap mathTransform = createTransform(zoom); // Get a list and size it to store the results LargeList<PendingWrite> pendingWrites = new LargeList<>(); for (int row = 0; row < nrows; row++) { pendingWrites.add(null); } // create a per-thread processor and then invoke all RowAllocator allocator = new RowAllocator(nrows); ArrayList<RowProcessor> processors = new ArrayList<>(); for (int i = 0; i < nbThreads; i++) { processors.add(new RowProcessor(table, geomCol, indices, zoom, mathTransform, allocator, "ODLRG builder - processing zoom level " + zoom + "", pendingWrites)); } List<Future<Void>> futures = executorService.invokeAll(processors); for (Future<Void> future : futures) { future.get(); } // Process all pending writes LargeList<PendingWrite> nonNulls = new LargeList<>(); for (PendingWrite pw : pendingWrites) { if (pw != null) { nonNulls.add(pw); } } quadWriter.add(nonNulls, tileFactoryInfo, zoom); if (processingApi.isCancelled()) { return; } } // shutdown executor service executorService.shutdown(); // create final file quadWriter.finish(isNOLPL, indices, outfile); // try loading it validateFinalFile(); } catch (Exception e) { throw new RuntimeException(e); } }
From source file:org.apache.nutch.storage.TestGoraStorage.java
/** * Tests multiple processes reading and writing to the same store backend, * this is to simulate a multi process Nutch environment (i.e. MapReduce). * //from w w w . j a va2s . c o m * @throws Exception */ @Test public void testMultiProcess() throws Exception { // create and start a hsql server, a stand-alone (memory backed) db // (important: a stand-alone server should be used because simple // file based access i.e. jdbc:hsqldb:file is NOT process-safe.) Server server = new Server(); server.setDaemon(true); server.setSilent(true); // disables LOTS of trace final String className = getClass().getName(); String dbName = "test"; server.setDatabasePath(0, "mem:" + dbName); server.setDatabaseName(0, dbName); server.start(); //create the store so that the tests can start right away StorageUtils.createWebStore(conf, String.class, WebPage.class); // create a fixed thread pool int numThreads = 4; ExecutorService pool = Executors.newFixedThreadPool(numThreads); // spawn multiple processes, each thread spawns own process Collection<Callable<Integer>> tasks = new ArrayList<Callable<Integer>>(); for (int i = 0; i < numThreads; i++) { tasks.add(new Callable<Integer>() { @Override public Integer call() { try { String separator = System.getProperty("file.separator"); String classpath = System.getProperty("java.class.path"); String pathSeparator = System.getProperty("path.separator"); // connect local sql service classpath = "./src/testprocess" + pathSeparator + classpath; String path = System.getProperty("java.home") + separator + "bin" + separator + "java"; ProcessBuilder processBuilder = new ProcessBuilder(path, "-cp", classpath, className); processBuilder.redirectErrorStream(true); Process process = processBuilder.start(); InputStream in = process.getInputStream(); int exit = process.waitFor(); //print the output of the process System.out.println("===Process stream for " + Thread.currentThread() + "\n" + IOUtils.toString(in) + "===End of process stream."); in.close(); // process should exit with zero code return exit; } catch (Exception e) { e.printStackTrace(); // this will fail the test return 1; } } }); } // submit them at once List<Future<Integer>> results = pool.invokeAll(tasks); // check results for (Future<Integer> result : results) { assertEquals(0, (int) result.get()); } //stop db server.stop(); }
From source file:org.extensiblecatalog.ncip.v2.voyager.VoyagerLookupItemSetService.java
/** * Handles a NCIP LookupItem service by returning data from voyager. * * @param initData the LookupItemInitiationData * @param serviceManager provides access to remote services * @return LookupItemResponseData/*from w ww.ja va 2s . co m*/ */ @Override public LookupItemSetResponseData performService(LookupItemSetInitiationData initData, ServiceContext serviceContext, RemoteServiceManager serviceManager) throws ServiceException { voyagerSvcMgr = (VoyagerRemoteServiceManager) serviceManager; LookupItemSetResponseData luisResponseData = new LookupItemSetResponseData(); List<Problem> problems = new ArrayList<Problem>(); Date sService = new Date(); log.info("Performing LUIS service."); List<BibliographicId> bibIds = initData.getBibliographicIds(); if (bibIds == null) { problems.addAll(ServiceHelper.generateProblems(Version1GeneralProcessingError.NEEDED_DATA_MISSING, null, null, "Missing Bib IDs")); luisResponseData.setProblems(problems); return luisResponseData; } List<HoldingsSet> holdingSets = new ArrayList<HoldingsSet>(); List<String> holdingIds = null; List<String> itemIds = null; Document holdingsDocFromRestful = new Document(); Document holdingsDocFromXml = new Document(); int itemCount = 0; boolean reachedMaxItemCount = false; String token = initData.getNextItemToken(); ItemToken nextItemToken = null; if (token != null) { nextItemToken = tokens.get(token); if (nextItemToken != null) { int index = getIndexOfBibId(bibIds, nextItemToken.getBibliographicId()); if (index != -1) { bibIds.subList(0, index).clear(); } // Remove token from memory hashmap tokens.remove(token); } else { problems.addAll( ServiceHelper.generateProblems(Version1GeneralProcessingError.TEMPORARY_PROCESSING_FAILURE, null, token, "Invalid nextItemToken")); luisResponseData.setProblems(problems); return luisResponseData; } log.debug("after removing already processed Bib ids = " + bibIds); } // Retrieve XML from vxws web services List<Document> holdingsDocsFromRestful = new ArrayList<Document>(bibIds.size()); List<Document> holdingsDocsFromXml = new ArrayList<Document>(bibIds.size()); int i; int numBibs = bibIds.size(); // In a non-consortial environment, most LUIS calls will be on a single bib if (numBibs < 2) { for (i = 0; i < numBibs; i++) { BibliographicId bibId = bibIds.get(i); holdingsDocsFromRestful.add(i, getHoldingRecordsFromRestful(bibId)); holdingsDocsFromXml.add(i, getHoldingRecordsFromXml(bibId)); } // But in a consortial environment, there can be many bibs in a LUIS call // For now, let's look up each bib in a separate thread; perhaps later, we may need to get more sophisticated/smart w/r/t resources? } else { ExecutorService exec = Executors.newFixedThreadPool(numBibs); List<Future<HoldingInfoFromWeb>> futureList = new ArrayList<Future<HoldingInfoFromWeb>>(numBibs); List<Callable<HoldingInfoFromWeb>> callList = new ArrayList<Callable<HoldingInfoFromWeb>>(numBibs); for (i = 0; i < numBibs; i++) { BibliographicId bibId = bibIds.get(i); callList.add(new HoldingInfoFromWeb(i, bibId)); } try { futureList = exec.invokeAll(callList); for (Future<HoldingInfoFromWeb> future : futureList) { HoldingInfoFromWeb result = future.get(); holdingsDocsFromRestful.add(result.getIndex(), result.getRestful()); holdingsDocsFromXml.add(result.getIndex(), result.getXml()); } } catch (InterruptedException ex) { log.error("Error calling vxws services via ExecutorService: " + ex); } catch (ExecutionException ex) { log.error("Error calling vxws services via ExecutorService: " + ex); } finally { exec.shutdownNow(); } } List<BibInformation> bibInformations = new ArrayList<BibInformation>(); for (i = 0; i < numBibs; i++) { BibliographicId bibId = bibIds.get(i); String id = null; String itemAgencyId = null; id = bibId.getBibliographicRecordId().getBibliographicRecordIdentifier(); itemAgencyId = bibId.getBibliographicRecordId().getAgencyId().getValue(); try { BibInformation bibInformation = new BibInformation(); bibInformation.setBibliographicId(bibId); if (!checkValidAgencyId(itemAgencyId)) { log.error("Unrecognized Bibliographic Record Agency Id: " + itemAgencyId); problems.addAll( ServiceHelper.generateProblems(Version1GeneralProcessingError.NEEDED_DATA_MISSING, null, null, "Unrecognized Bibliographic Record Agency Id")); bibInformation.setProblems(problems); bibInformations.add(bibInformation); continue; } // Is the bib field empty? if (id.equals("") || itemAgencyId.equals("")) { log.error("Missing Bib Id or Agency Id"); problems.addAll( ServiceHelper.generateProblems(Version1GeneralProcessingError.NEEDED_DATA_MISSING, null, null, "Missing Bib ID or item Agency Id")); bibInformation.setProblems(problems); bibInformations.add(bibInformation); continue; } // We had already called these services in the beginning holdingsDocFromRestful = holdingsDocsFromRestful.get(i); holdingsDocFromXml = holdingsDocsFromXml.get(i); if (holdingsDocFromXml == null) { problems.addAll(ServiceHelper.generateProblems( Version1GeneralProcessingError.TEMPORARY_PROCESSING_FAILURE, null, id, "Problem contacting the vxws service")); luisResponseData.setProblems(problems); return luisResponseData; } if (!doesRecordExist(holdingsDocFromXml)) { log.error("Record does not exist"); problems.addAll(ServiceHelper.generateProblems(Version1LookupItemProcessingError.UNKNOWN_ITEM, null, id, "Record does not exist")); bibInformation.setProblems(problems); bibInformations.add(bibInformation); continue; } // Get holding Ids belonging to this bib holdingIds = getHoldingIdsFromHoldingDoc(holdingsDocFromXml); if (nextItemToken != null) { int index = holdingIds.indexOf(nextItemToken.getHoldingsId()); if (index != -1) { holdingIds.subList(0, index).clear(); } } if (holdingIds == null) { log.error("Bib does not have a holding record associated with it"); problems.addAll(ServiceHelper.generateProblems(Version1LookupItemProcessingError.UNKNOWN_ITEM, null, id, "Record does not have a holding record associated with it")); bibInformation.setProblems(problems); bibInformations.add(bibInformation); continue; // Bib record exists but has no Holding records } // Set bib desc BibliographicDescription bDesc = null; if (initData.getBibliographicDescriptionDesired()) { bDesc = getBibliographicDescriptionForBibId(holdingsDocFromXml); bibInformation.setBibliographicDescription(bDesc); } // title hold queue length // Ignoring in vxws release /*BigDecimal titleHoldQueue = voyagerSvcMgr.getTitleLevelHoldQueueLength(id); if (titleHoldQueue != null) { bibInformation.setTitleHoldQueueLength(titleHoldQueue); }*/ holdingSets = new ArrayList<HoldingsSet>(); // Build HoldingSet with items in it for (String holdingId : holdingIds) { log.debug("Processing Holding id = " + holdingId); itemIds = getItemIdsFromHoldingDoc(holdingId, holdingsDocFromRestful); log.debug("All itemIds: " + itemIds); // We need to distinguish between Holdings with actual Item Records vs. ones without. // This is so we may parse Item Record-less Holdings records differently, e.g., // add item-like info to them later (sans ItemID, of course, since they doen't refer to actual Item Records). // There are quite a few instances of Holdings without Item Records, but we still want to supply // useful information about them. The major (only?) difference will be the lack of ItemID. boolean hasItems = false; // Get Bib desc, holding set info only if items exist for that holdings if (itemIds != null && itemIds.size() > 0) { hasItems = true; } if (nextItemToken != null) { int index = itemIds.indexOf(nextItemToken.getItemId()); log.debug("Index of nextitem: " + index); if (index != -1) { itemIds.subList(0, index + 1).clear(); } log.debug("after removing already processed item ids = " + itemIds); if (itemIds.size() < 1) { continue; } } HoldingsSet holdingSet = new HoldingsSet(); // Set Bib Id and holdings set id holdingSet.setHoldingsSetId(holdingId); // if (initData.getElectronicResourceDesired()) { ElectronicResource eResource = getElectronicResourceForHoldingId(holdingId, holdingsDocFromXml); if (eResource != null) { holdingSet.setElectronicResource(eResource); } // } String callNumber = null; if (hasItems) { callNumber = getCallNumberForHoldingDoc(holdingId, holdingsDocFromRestful); } else { callNumber = getCallNumberForHoldingDocFromXml(holdingId, holdingsDocFromXml); } if (callNumber != null) { holdingSet.setCallNumber(callNumber); } if (hasItems) { int newItemCount = itemCount + itemIds.size(); if (newItemCount > MAX_ITEMS_TO_RETURN) { itemIds = getItemIdSubset(itemIds, itemCount); log.debug("Subset itemIds: " + itemIds); } Map<String, ItemInformation> itemInformations = new HashMap<String, ItemInformation>(); for (String itemId : itemIds) { ItemInformation itemInformation = new ItemInformation(); ItemId item = new ItemId(); item.setItemIdentifierValue(itemId); item.setAgencyId(new AgencyId(itemAgencyId)); itemInformation.setItemId(item); itemInformations.put(itemId, itemInformation); } Map<String, String> statuses = null; if (initData.getCirculationStatusDesired()) { statuses = getCirculationStatusForItemIds(itemIds, holdingsDocFromRestful); } // TODO: Double check that this really isn't available through GetHoldings /* Ignoring for vxws release Map<String, BigDecimal> lengths = null; if (initData.getHoldQueueLengthDesired()) { lengths = voyagerSvcMgr.getHoldQueueLengthForItemIds(itemIds); } */ Map<String, ItemDescription> itemDescriptions = null; //if (initData.getItemDescriptionDesired()) { itemDescriptions = getItemDescriptionForItemIds(itemIds, holdingsDocFromRestful); //} Map<String, Location> locations = null; if (initData.getLocationDesired()) { locations = getLocationForItemIds(itemIds, holdingsDocFromRestful); } Map<String, String> copyNumbers = new HashMap<String, String>(); Iterator<String> itrId = itemDescriptions.keySet().iterator(); while (itrId.hasNext()) { String key = itrId.next(); copyNumbers.put(itemDescriptions.get(key).getCopyNumber(), key); } Map<String, GregorianCalendar> dueDates = null; dueDates = getDueDateForItemIds(itemIds, holdingsDocFromXml, copyNumbers, holdingId); Iterator<String> itr = itemInformations.keySet().iterator(); while (itr.hasNext()) { ItemOptionalFields iof = new ItemOptionalFields(); String key = itr.next(); if (statuses != null) { String status = statuses.get(key); log.debug("Status for key " + status); try { if (statuses.get(key) != null) { iof.setCirculationStatus(XcCirculationStatus .find(XcCirculationStatus.XC_CIRCULATION_STATUS, status)); } } catch (ServiceException se) { log.error("Unrecognized item status"); } } /* Ignoring for vxws release if (lengths != null) { iof.setHoldQueueLength(lengths.get(key)); } */ if (itemDescriptions != null) { iof.setItemDescription(itemDescriptions.get(key)); } if (locations != null) { //List<Location> tempLocations = locations.get(key); List<Location> tempLocations = new ArrayList<Location>(); tempLocations.add(locations.get(key)); if (tempLocations != null) { LocationNameInstance lni = tempLocations.get(0).getLocationName() .getLocationNameInstances().get(0); iof.setLocations(tempLocations); } } ItemInformation itemInformation = itemInformations.get(key); itemInformation.setItemOptionalFields(iof); if (dueDates != null) { itemInformation.setDateDue(dueDates.get(key)); } itemInformations.put(key, itemInformation); } holdingSet.setItemInformations(new ArrayList<ItemInformation>(itemInformations.values())); itemCount = itemCount + itemIds.size(); log.debug("Item count: " + itemCount); if (itemCount == MAX_ITEMS_TO_RETURN) { // Set next item token ItemToken itemToken = new ItemToken(); itemToken.setBibliographicId(itemAgencyId + "_" + id); itemToken.setHoldingsId(holdingId); itemToken.setItemId(itemIds.get(itemIds.size() - 1)); int newToken = random.nextInt(); itemToken.setNextToken(Integer.toString(newToken)); tokens.put(Integer.toString(newToken), itemToken); luisResponseData.setNextItemToken(Integer.toString(newToken)); reachedMaxItemCount = true; log.info("Adding new holding set"); holdingSets.add(holdingSet); break; } // No Item Records, but we still want to return some Holdings info. } else { itemCount = itemCount + 1; log.debug("Item count: " + itemCount); Map<String, ItemInformation> itemInformations = new HashMap<String, ItemInformation>(); ItemInformation itemInformation = new ItemInformation(); ItemId item = new ItemId(); item.setItemIdentifierValue(""); item.setAgencyId(new AgencyId(itemAgencyId)); itemInformation.setItemId(item); /*** plug in item-like information for the non-item ***/ ItemOptionalFields iof2 = new ItemOptionalFields(); // Set location Location location = null; if (initData.getLocationDesired()) { location = getLocationForHoldingDocFromXml(holdingId, holdingsDocFromXml); if (location != null) { List<Location> tempLocations2 = new ArrayList<Location>(); tempLocations2.add(location); iof2.setLocations(tempLocations2); } } itemInformation.setItemOptionalFields(iof2); itemInformations.put("N/A", itemInformation); holdingSet.setItemInformations(new ArrayList<ItemInformation>(itemInformations.values())); if (itemCount == MAX_ITEMS_TO_RETURN) { // Set next item token ItemToken itemToken = new ItemToken(); itemToken.setBibliographicId(itemAgencyId + "_" + id); itemToken.setHoldingsId(holdingId); itemToken.setItemId(""); int newToken = random.nextInt(); itemToken.setNextToken(Integer.toString(newToken)); tokens.put(Integer.toString(newToken), itemToken); luisResponseData.setNextItemToken(Integer.toString(newToken)); reachedMaxItemCount = true; log.info("Adding new holding set"); holdingSets.add(holdingSet); break; } } log.info("Adding new holding set"); holdingSets.add(holdingSet); } if (holdingIds.size() != 0) { bibInformation.setHoldingsSets(holdingSets); } bibInformations.add(bibInformation); if (reachedMaxItemCount) { break; } } catch (ILSException e) { Problem p = new Problem(); p.setProblemType(new ProblemType("Processing error")); p.setProblemDetail(e.getMessage()); problems.add(p); luisResponseData.setProblems(problems); } } Date eService = new Date(); log.debug("LUIS Service time log : " + (eService.getTime() - sService.getTime()) + " " + ((eService.getTime() - sService.getTime()) / 1000) + " sec"); luisResponseData.setBibInformations(bibInformations); return luisResponseData; }
From source file:org.apache.sysml.runtime.compress.CompressedMatrixBlock.java
@Override public MatrixBlock transposeSelfMatrixMultOperations(MatrixBlock out, MMTSJType tstype, int k) throws DMLRuntimeException { //call uncompressed matrix mult if necessary if (!isCompressed()) { return super.transposeSelfMatrixMultOperations(out, tstype, k); }//from www . ja va 2 s . c o m //multi-threaded tsmm of single uncompressed colgroup if (isSingleUncompressedGroup()) { return ((ColGroupUncompressed) _colGroups.get(0)).getData().transposeSelfMatrixMultOperations(out, tstype, k); } Timing time = LOG.isDebugEnabled() ? new Timing(true) : null; //check for transpose type if (tstype != MMTSJType.LEFT) //right not supported yet throw new DMLRuntimeException("Invalid MMTSJ type '" + tstype.toString() + "'."); //create output matrix block if (out == null) out = new MatrixBlock(clen, clen, false); else out.reset(clen, clen, false); out.allocateDenseBlock(); if (!isEmptyBlock(false)) { //compute matrix mult try { ExecutorService pool = Executors.newFixedThreadPool(k); ArrayList<MatrixMultTransposeTask> tasks = new ArrayList<MatrixMultTransposeTask>(); int numgrp = _colGroups.size(); int blklen = (int) (Math.ceil((double) numgrp / (2 * k))); for (int i = 0; i < 2 * k & i * blklen < clen; i++) tasks.add(new MatrixMultTransposeTask(_colGroups, out, i * blklen, Math.min((i + 1) * blklen, numgrp))); List<Future<Object>> ret = pool.invokeAll(tasks); for (Future<Object> tret : ret) tret.get(); //check for errors pool.shutdown(); } catch (Exception ex) { throw new DMLRuntimeException(ex); } // post-processing out.recomputeNonZeros(); } if (LOG.isDebugEnabled()) LOG.debug("Compressed TSMM k=" + k + " in " + time.stop()); return out; }