List of usage examples for java.util.concurrent Semaphore Semaphore
public Semaphore(int permits)
From source file:org.commoncrawl.service.crawler.CrawlerEngine.java
/** internal loadWorkUnit routine **/ private CrawlSegmentStatus loadCrawlSegment(final CrawlSegment crawlSegment) { _activeLoadCount++;/*w ww . j a va 2 s . c o m*/ // mark the segment as crawling ... crawlSegment.setIsCrawling(true); final CrawlSegmentStatus status = new CrawlSegmentStatus(); status.setListId(crawlSegment.getListId()); status.setSegmentId(crawlSegment.getSegmentId()); status.setLoadStatus(CrawlSegmentStatus.LoadStatus.LOADING); status.setCrawlStatus(CrawlSegmentStatus.CrawlStatus.UNKNOWN); status.setUrlCount(0); status.setUrlsComplete(0); status.setIsDirty(true); _statusMap.put(CrawlLog.makeSegmentLogId(crawlSegment.getListId(), crawlSegment.getSegmentId()), status); if (Environment.detailLogEnabled()) LOG.info("loading crawl segment:" + crawlSegment.getSegmentId()); if (!getServer().externallyManageCrawlSegments()) { // remove crawl segment log from crawl log data structure // (we need to do this to protect the data structure from corruption, since the underlying // worker thread walks the log and reconciles it against the segment data) final CrawlSegmentLog segmentLogObj = (getServer().enableCrawlLog()) ? _crawlLog.removeSegmentLog(crawlSegment.getListId(), crawlSegment.getSegmentId()) : null; if (segmentLogObj == null && getServer().enableCrawlLog()) { _activeLoadCount--; throw new RuntimeException( "Expected Non-NULL CrawlSegmentLog for Segment:" + crawlSegment.getSegmentId()); } getServer().getDefaultThreadPool() .execute(new ConcurrentTask<CrawlSegmentStatus>(getServer().getEventLoop(), new Callable<CrawlSegmentStatus>() { public CrawlSegmentStatus call() throws Exception { try { LOG.info("### SYNC:Loading SegmentFPInfo for List:" + crawlSegment.getListId() + " Segment:" + crawlSegment.getSegmentId()); // load work unit fingerprint detail ... final CrawlSegmentFPMap urlFPMap = SegmentLoader.loadCrawlSegmentFPInfo( crawlSegment.getListId(), crawlSegment.getSegmentId(), CrawlerEngine.this.getServer().getHostName(), new SegmentLoader.CancelOperationCallback() { @Override public boolean cancelOperation() { return _shutdownFlag; } }); if (_shutdownFlag) { LOG.info("### SYNC:EXITING LOAD OF List:" + crawlSegment.getListId() + " Segment:" + crawlSegment.getSegmentId()); return new CrawlSegmentStatus(); } if (getServer().enableCrawlLog()) { LOG.info("### SYNC: Syncing Log to SegmentFPInfo for List:" + crawlSegment.getListId() + " Segment:" + crawlSegment.getSegmentId()); // re-sync log to segment ... segmentLogObj.syncToLog(urlFPMap); } LOG.info("### SYNC: Sync for List:" + crawlSegment.getListId() + " Segment:" + crawlSegment.getSegmentId() + " Returned:" + urlFPMap._urlCount + " Total URLS and " + urlFPMap._urlsComplete + " CompleteURLS"); if (!_shutdownFlag) { // now activate the segment log ... final Semaphore segActiveSemaphore = new Semaphore(0); // check for completion here ... if (urlFPMap._urlCount == urlFPMap._urlsComplete && !_shutdownFlag) { LOG.info("### SYNC: For List:" + crawlSegment.getListId() + " Segment:" + crawlSegment.getSegmentId() + " indicates Completed Segment."); _server.getEventLoop() .setTimer(new Timer(1, false, new Timer.Callback() { public void timerFired(Timer timer) { LOG.info("### SYNC: For List:" + crawlSegment.getListId() + " Segment:" + crawlSegment.getSegmentId() + " setting Status to CompletedCompleted Segment."); if (!_shutdownFlag) { // update segment status ... status.setUrlCount(urlFPMap._urlCount); status.setUrlsComplete(urlFPMap._urlCount); // update crawl status status.setCrawlStatus( CrawlSegmentStatus.CrawlStatus.CRAWL_COMPLETE); status.setIsComplete(true); // set dirty flag for segment status.setIsDirty(true); } // and release semaphore ... segActiveSemaphore.release(); } })); } else { _server.getEventLoop() .setTimer(new Timer(1, false, new Timer.Callback() { public void timerFired(Timer timer) { if (!_shutdownFlag) { if (getServer().enableCrawlLog()) { //back in primary thread context, so go ahead and SAFELY re-activate the segment log ... activateSegmentLog(segmentLogObj); } } // and release semaphore ... segActiveSemaphore.release(); } })); } // wait for segment activation ... segActiveSemaphore.acquireUninterruptibly(); } // now if complete return immediately if (urlFPMap._urlCount != urlFPMap._urlsComplete && !_shutdownFlag) { LOG.info("### LOADER Loading CrawlSegment Detail for Segment:" + crawlSegment.getSegmentId()); SegmentLoader.loadCrawlSegment(crawlSegment.getListId(), crawlSegment.getSegmentId(), CrawlerEngine.this.getServer().getHostName(), urlFPMap, null, createLoadProgressCallback(status), new SegmentLoader.CancelOperationCallback() { @Override public boolean cancelOperation() { return _shutdownFlag; } }); } } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw e; } return status; } }, createCompletionCallback(crawlSegment, status))); } else { getServer().loadExternalCrawlSegment(crawlSegment, createLoadProgressCallback(status), createCompletionCallback(crawlSegment, status), status); } return status; }
From source file:com.impetus.ankush2.cassandra.deployer.CassandraDeployer.java
@Override public boolean start(final ClusterConfig conf, Collection<String> nodes) { try {/*from w w w.ja v a2s . c o m*/ // Causing the thread to sleep for two minutes during add nodes case // to // update ring topology details if (this.clusterConfig.getState().equals(Constant.Cluster.State.ADD_NODE)) { // starting service on all newly added nodes for (final String host : nodes) { // setting cluster conf nodes status conf.getNodes().get(host).setStatus(startNode(host)); // Wait for two minutes try { logger.info("Waiting for two minutes...", getComponentName(), host); logger.debug("Wait for two minutes.", host); Thread.sleep(120000); } catch (InterruptedException e) { logger.debug(e.getMessage()); } } } else { final Semaphore semaphore = new Semaphore(nodes.size()); // starting service on each node in cluster for (final String host : nodes) { semaphore.acquire(); AppStoreWrapper.getExecutor().execute(new Runnable() { @Override public void run() { // setting cluster conf nodes status conf.getNodes().get(host).setStatus(startNode(host)); if (semaphore != null) { semaphore.release(); } } }); } semaphore.acquire(nodes.size()); } // Return false if any of the node is not deployed. return AnkushUtils.getStatus(conf.getNodes()); } catch (Exception e) { return addClusterError("Could not start " + getComponentName() + " services.", e); } }
From source file:com.impetus.ankush2.cassandra.deployer.CassandraDeployer.java
@Override public boolean stop(final ClusterConfig conf, Collection<String> nodes) { // Stop services only if cluster is not in REMOVING_NODES if (!conf.getState().equals(Constant.Cluster.State.REMOVE_NODE)) { final Semaphore semaphore = new Semaphore(nodes.size()); try {/*from ww w .j a v a 2 s . c om*/ // stopping service on each of the cluster nodes for (final String host : nodes) { semaphore.acquire(); AppStoreWrapper.getExecutor().execute(new Runnable() { @Override public void run() { conf.getNodes().get(host).setStatus(stopNode(host)); if (semaphore != null) { semaphore.release(); } } }); } semaphore.acquire(nodes.size()); } catch (Exception e) { logger.error(e.getMessage()); } } return true; }
From source file:com.impetus.ankush2.framework.monitor.AbstractMonitor.java
public void downloadlogs() { final String component = (String) parameterMap.get(com.impetus.ankush2.constant.Constant.Keys.COMPONENT); if (component == null || component.isEmpty()) { this.addAndLogError("Invalid Log request: Please specify a component."); return;/* w w w . j a v a 2 s. com*/ } try { ArrayList<String> nodes = (ArrayList) parameterMap.get(Constant.JsonKeys.Logs.NODES); if (nodes == null || nodes.isEmpty()) { nodes = new ArrayList<String>(this.clusterConf.getComponents().get(component).getNodes().keySet()); } ArrayList<String> roles = (ArrayList) parameterMap.get(Constant.JsonKeys.Logs.ROLES); Serviceable serviceableObj = ObjectFactory.getServiceObject(component); if (roles == null || roles.isEmpty()) { roles = new ArrayList<String>(serviceableObj.getServiceList(this.clusterConf)); } String clusterResourcesLogsDir = AppStoreWrapper.getClusterResourcesPath() + "logs/"; String clusterLogsDirName = "Logs_" + this.clusterConf.getName() + "_" + System.currentTimeMillis(); String clusterLogsArchiveName = clusterLogsDirName + ".zip"; final String cmpLogsDirPathOnServer = clusterResourcesLogsDir + clusterLogsDirName + "/" + component + "/"; if (!FileUtils.ensureFolder(cmpLogsDirPathOnServer)) { this.addAndLogError("Could not create log directory for " + component + " on server."); return; } final Semaphore semaphore = new Semaphore(nodes.size()); final ArrayList<String> rolesObj = new ArrayList<String>(roles); try { for (final String host : nodes) { semaphore.acquire(); AppStoreWrapper.getExecutor().execute(new Runnable() { @Override public void run() { NodeConfig nodeConfig = clusterConf.getNodes().get(host); SSHExec connection = SSHUtils.connectToNode(host, clusterConf.getAuthConf()); if (connection == null) { // TODO: handle Error logger.error("Could not fetch log files - Connection not initialized", component, host); } Serviceable serviceableObj = null; try { serviceableObj = ObjectFactory.getServiceObject(component); for (String role : rolesObj) { if (nodeConfig.getRoles().get(component).contains(role)) { String tmpLogsDirOnServer = cmpLogsDirPathOnServer + "/" + role + "/" + host + "/"; if (!FileUtils.ensureFolder(tmpLogsDirOnServer)) { // TODO: handle Error // Log error in operation table and // skip // this role continue; } String nodeLogsDirPath = FileUtils.getSeparatorTerminatedPathEntry( serviceableObj.getLogDirPath(clusterConf, host, role)); String logFilesRegex = serviceableObj.getLogFilesRegex(clusterConf, host, role, null); String outputTarArchiveName = role + "_" + +System.currentTimeMillis() + ".tar.gz"; try { List<String> logsFilesList = AnkushUtils.listFilesInDir(connection, host, nodeLogsDirPath, logFilesRegex); AnkushTask ankushTask = new CreateTarArchive(nodeLogsDirPath, nodeLogsDirPath + outputTarArchiveName, logsFilesList); if (connection.exec(ankushTask).rc != 0) { // TODO: handle Error // Log error in operation table // and // skip this // role continue; } connection.downloadFile(nodeLogsDirPath + outputTarArchiveName, tmpLogsDirOnServer + outputTarArchiveName); ankushTask = new Remove(nodeLogsDirPath + outputTarArchiveName); connection.exec(ankushTask); ankushTask = new UnTarArchive(tmpLogsDirOnServer + outputTarArchiveName, tmpLogsDirOnServer); Runtime.getRuntime().exec(ankushTask.getCommand()).waitFor(); ankushTask = new Remove(tmpLogsDirOnServer + outputTarArchiveName); Runtime.getRuntime().exec(ankushTask.getCommand()).waitFor(); } catch (Exception e) { e.printStackTrace(); // TODO: handle exception // Log error in operation table and // skip // this role continue; } } } } catch (Exception e) { // TODO: handle exception return; } finally { if (semaphore != null) { semaphore.release(); } if (connection != null) { connection.disconnect(); } } } }); } semaphore.acquire(nodes.size()); } catch (Exception e) { } ZipUtil.pack(new File(clusterResourcesLogsDir + clusterLogsDirName), new File(clusterResourcesLogsDir + clusterLogsArchiveName), true); org.apache.commons.io.FileUtils.deleteDirectory(new File(clusterResourcesLogsDir + clusterLogsDirName)); result.put(com.impetus.ankush2.constant.Constant.Keys.DOWNLOADPATH, clusterResourcesLogsDir + clusterLogsArchiveName); } catch (Exception e) { this.addAndLogError("Could not download logs for " + component + "."); logger.error(e.getMessage(), component, e); } }
From source file:com.impetus.ankush2.cassandra.deployer.CassandraDeployer.java
@Override public boolean removeNode(final ClusterConfig conf, Collection<String> nodes) { logger.info("Deleting Cassandra packages...", getComponentName()); try {//from w w w .j a va 2 s.c o m if (newClusterConf == null) { // setting clusterconf, componentconf and logger if (!setClassVariables(conf)) { return false; } } final Semaphore semaphore = new Semaphore(nodes.size()); // undeploying package from each node for (final String host : nodes) { semaphore.acquire(); AppStoreWrapper.getExecutor().execute(new Runnable() { @Override public void run() { // setting nodestatus default value to false boolean nodestatus = false; // if service stopped successfully, then removing // component from node if (stopNode(host)) { nodestatus = removeNode(host); } conf.getNodes().get(host).setStatus(nodestatus); if (semaphore != null) { semaphore.release(); } } }); } semaphore.acquire(nodes.size()); return AnkushUtils.getStatus(conf.getNodes()); } catch (Exception e) { return addClusterError("Could not remove " + getComponentName(), e); } }
From source file:org.commoncrawl.service.listcrawler.CrawlHistoryManager.java
private static void launchInTestMode() { File baseTestDir = new File("/tmp/logManagerTest"); FileUtils.recursivelyDeleteFile(baseTestDir); baseTestDir.mkdir();//from ww w. j a v a 2s. c o m File remoteDir = new File(baseTestDir, "remote"); File localDir = new File(baseTestDir, "local"); remoteDir.mkdir(); localDir.mkdir(); final TreeMap<String, URLFP> urlToFPMap = new TreeMap<String, URLFP>(); final TreeMap<URLFP, String> urlFPToString = new TreeMap<URLFP, String>(); Set<String> list1 = Sets.newHashSet(urlList1); Set<String> list2 = Sets.newHashSet(urlList2); final Set<String> combined = Sets.union(list1, list2); Set<String> difference = Sets.difference(list1, list2); final Set<String> completedURLS = new HashSet<String>(); for (String url : combined) { URLFP fingerprint = URLUtils.getURLFPFromURL(url, true); urlToFPMap.put(url, fingerprint); urlFPToString.put(fingerprint, url); } File testInputFile1 = new File(localDir, "INPUT_LIST-" + System.currentTimeMillis()); File testInputFile2 = new File(localDir, "INPUT_LIST-" + (System.currentTimeMillis() + 1)); try { generateTestURLFile(testInputFile1, urlList1); generateTestURLFile(testInputFile2, urlList2); FileSystem localFileSystem = FileSystem.getLocal(CrawlEnvironment.getHadoopConfig()); EventLoop eventLoop = new EventLoop(); eventLoop.start(); final CrawlHistoryManager logManager = new CrawlHistoryManager(localFileSystem, new Path(remoteDir.getAbsolutePath()), localDir, eventLoop, 0); final LinkedBlockingQueue<ProxyCrawlHistoryItem> queue = new LinkedBlockingQueue<ProxyCrawlHistoryItem>(); final Semaphore initialListComplete = new Semaphore(0); logManager.startQueueLoaderThread(new CrawlQueueLoader() { @Override public void queueURL(URLFP urlfp, String url) { ProxyCrawlHistoryItem item = new ProxyCrawlHistoryItem(); item.setOriginalURL(url); queue.add(item); } @Override public void flush() { // TODO Auto-generated method stub } }); Thread queueTestThread = new Thread(new Runnable() { @Override public void run() { while (true) { try { ProxyCrawlHistoryItem item = queue.take(); if (item.getOriginalURL().length() == 0) { break; } else { System.out.println("Got:" + item.getOriginalURL()); CrawlURL urlObject = new CrawlURL(); Assert.assertTrue(!completedURLS.contains(item.getOriginalURL())); completedURLS.add(item.getOriginalURL()); urlObject.setLastAttemptResult((byte) CrawlURL.CrawlResult.SUCCESS); urlObject.setUrl(item.getOriginalURL()); urlObject.setResultCode(200); logManager.crawlComplete(urlObject); if (completedURLS.equals(combined)) { System.out.println("Hit Trigger URL. Releasing InitialListComplete Sempahore"); initialListComplete.release(1); } } } catch (InterruptedException e) { } } } }); queueTestThread.start(); logManager.loadList(testInputFile1, 0); logManager.loadList(testInputFile2, 0); System.out.println("Waiting for Initial List to Complete"); initialListComplete.acquireUninterruptibly(); System.out.println("Woke Up"); try { eventLoop.getEventThread().join(); } catch (InterruptedException e) { e.printStackTrace(); } } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
From source file:net.spfbl.core.Analise.java
public static void load() { long time = System.currentTimeMillis(); File file = new File("./data/analise.set"); if (file.exists()) { try {//w ww . j ava 2s. c o m TreeSet<Analise> set; FileInputStream fileInputStream = new FileInputStream(file); try { set = SerializationUtils.deserialize(fileInputStream); } finally { fileInputStream.close(); } for (Analise analise : set) { try { if (analise.semaphoreSet == null) { analise.semaphoreSet = new Semaphore(1); } analise.ipSet.addAll(analise.processSet); analise.processSet.clear(); add(analise); } catch (Exception ex) { Server.logError(ex); } } Server.logLoad(time, file); } catch (Exception ex) { Server.logError(ex); } } time = System.currentTimeMillis(); file = new File("./data/cluster.map"); if (file.exists()) { try { TreeMap<String, Short[]> map; FileInputStream fileInputStream = new FileInputStream(file); try { map = SerializationUtils.deserialize(fileInputStream); } finally { fileInputStream.close(); } for (String token : map.keySet()) { Short[] value = map.get(token); if (token.contains("#") || token.contains(".H.")) { String hostname = token.replace("#", "0"); hostname = hostname.replace(".H.", ".0a."); if (Domain.isHostname(hostname)) { clusterMap.put(token, value); } } else if (Owner.isOwnerCPF(token.substring(1))) { String ownerID = Owner.normalizeID(token.substring(1)); clusterMap.put(ownerID, value); } else if (Owner.isOwnerID(token)) { String ownerID = Owner.normalizeID(token); clusterMap.put(ownerID, value); } else if (Subnet.isValidCIDR(token)) { clusterMap.put(token, value); } else if (Domain.isHostname(token)) { String hostname = Domain.normalizeHostname(token, true); if (Domain.isOfficialTLD(hostname) && !hostname.endsWith(".br")) { clusterMap.put(hostname, value); } } } Server.logLoad(time, file); } catch (Exception ex) { Server.logError(ex); } } }
From source file:org.ballerinalang.bre.bvm.BLangVM.java
private boolean invokeJoinWorkers(Map<String, BLangVMWorkers.WorkerExecutor> workers, Set<String> joinWorkerNames, int joinCount, long timeout) { ExecutorService exec = ThreadPoolFactory.getInstance().getWorkerExecutor(); Semaphore resultCounter = new Semaphore(-joinCount + 1); workers.forEach((k, v) -> {//from w w w .ja v a 2 s .co m if (joinWorkerNames.contains(k)) { v.setResultCounterSemaphore(resultCounter); } exec.submit(v); }); try { return resultCounter.tryAcquire(timeout, TimeUnit.SECONDS); } catch (InterruptedException ignore) { return false; } }
From source file:it.infn.ct.ParallelSemanticSearch_portlet.java
public void doGet(final ActionRequest request, final ActionResponse response, final App_Input appInput, final int numberRecords, final PortletPreferences portletPreferences) { testLookup();/*from w w w. j a v a 2s .c o m*/ int numThread = countTab(portletPreferences); System.out.println("About to submit tasks to " + tp); // PortletPreferences portletPreferences = request.getPreferences(); final Semaphore s = new Semaphore(0); Thread thread_openAgris = null; Thread thread_culturaItalia = null; Thread thread_engage = null; Thread thread_europeana = null; Thread thread_isidore = null; Thread thread_pubmed = null; Thread thread_chain = new Thread("CHAIN_THREAD") { @Override public void run() { System.out.println("Executing task in " + Thread.currentThread()); System.out.println("################### init_thread chain"); try { handlerTabCHAIN(request, response, appInput, numberRecords); } catch (RepositoryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (MalformedQueryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (QueryEvaluationException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (UnsupportedEncodingException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (MalformedURLException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } s.release(); System.out.println("thread_chain isAlive: " + Thread.currentThread().getName() + "---" + Thread.currentThread().isAlive()); System.out.println("###################### finish thread chain" + Thread.currentThread().isAlive()); } }; if (portletPreferences.getValue("OpenAgris", "").equals("true")) { // if (appPreferences.OpenAgris.equals("true")) { thread_openAgris = new Thread("OPENAGRIS_THREAD") { @Override public void run() { System.out.println("Executing task in " + Thread.currentThread()); System.out.println("################### init_thread OpenAgris"); try { handlerTabOpenAgris(request, response, appInput, numberRecords, portletPreferences); } catch (RepositoryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (MalformedQueryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (QueryEvaluationException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (UnsupportedEncodingException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (MalformedURLException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } s.release(); System.out.println("###################### finish thread OpenAgris"); } }; } if (portletPreferences.getValue("CulturaItalia", "").equals("true")) { // if (appPreferences.CulturaItalia.equals("true")) { thread_culturaItalia = new Thread("CULTURAITALIA_THREAD") { @Override public void run() { System.out.println("Executing task in " + Thread.currentThread()); System.out.println("################### init_thread CulturaItalia"); try { handlerTabCulturaItalia(request, response, appInput, numberRecords, portletPreferences); } catch (MalformedQueryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (QueryEvaluationException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (UnsupportedEncodingException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (MalformedURLException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (RepositoryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } s.release(); System.out.println("###################### finish thread CulturaItalia"); } }; } if (portletPreferences.getValue("Engage", "").equals("true")) { // if (appPreferences.Engage.equals("true")) { thread_engage = new Thread("ENGAGE_THREAD") { @Override public void run() { System.out.println("Executing task in " + Thread.currentThread()); System.out.println("################### init_thread Engage"); try { handlerTabEngage(request, response, appInput, numberRecords, portletPreferences); } catch (MalformedQueryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (QueryEvaluationException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (UnsupportedEncodingException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (MalformedURLException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (RepositoryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } s.release(); System.out.println("###################### finish thread Engage"); } }; } String EuropeanaSet = portletPreferences.getValue("Europeana", ""); System.out .println("EuropeanaSet--" + EuropeanaSet + " appPreferences.Europeana" + appPreferences.Europeana); if (EuropeanaSet.equals("true")) { thread_europeana = new Thread("EUROPEANA_THREAD") { @Override public void run() { System.out.println("Executing task in " + Thread.currentThread()); System.out.println("################### init_thread Europeana"); try { handlerTabEuropeana(request, response, appInput, numberRecords, portletPreferences); } catch (MalformedQueryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (QueryEvaluationException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (UnsupportedEncodingException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (MalformedURLException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (RepositoryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } s.release(); System.out.println("###################### finish thread europeana"); } }; } if (portletPreferences.getValue("Isidore", "").equals("true")) { //if (appPreferences.Isidore.equals("true")) { thread_isidore = new Thread("ISIDORE_THREAD") { @Override public void run() { System.out.println("Executing task in " + Thread.currentThread()); System.out.println("################### init_thread Isidore"); try { handlerTabIsidore(request, response, appInput, numberRecords, portletPreferences); } catch (MalformedQueryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (QueryEvaluationException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (UnsupportedEncodingException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (MalformedURLException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (RepositoryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } s.release(); System.out.println("###################### finish thread Isidore"); } }; } if (portletPreferences.getValue("Pubmed", "").equals("true")) { //if (appPreferences.Pubmed.equals("true")) { thread_pubmed = new Thread("PUBMED_THREAD") { @Override public void run() { System.out.println("Executing task in " + Thread.currentThread()); System.out.println("################### init_thread Pubmed"); try { handlerTabPubmed(request, response, appInput, numberRecords, portletPreferences); } catch (MalformedQueryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (QueryEvaluationException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (UnsupportedEncodingException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (MalformedURLException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (RepositoryException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } s.release(); System.out.println("###################### finish thread Pubmed"); } }; } if (tp != null) { tp.execute(thread_chain); if (thread_openAgris != null) { tp.execute(thread_openAgris); } if (thread_culturaItalia != null) { tp.execute(thread_culturaItalia); } if (thread_engage != null) { tp.execute(thread_engage); } if (thread_europeana != null) { tp.execute(thread_europeana); } if (thread_isidore != null) { tp.execute(thread_isidore); } if (thread_pubmed != null) { tp.execute(thread_pubmed); } try { s.acquire(numThread); } catch (InterruptedException ex) { Logger.getLogger(ParallelSemanticSearch_portlet.class.getName()).log(Level.SEVERE, null, ex); } } else { thread_chain.start(); } //tp.shutdown(); //while (!tp.isTerminated()) {} System.out.println("###################### finish threadPoolMio"); }
From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java
@Override public LockHandle acquireLock(String key) throws MetaException { /**/* w ww.j av a 2s . c o m*/ * The implementation here is a bit kludgey but done so that code exercised by unit tests * (which run against Derby which has no support for select for update) is as similar to * production code as possible. * In particular, with Derby we always run in a single process with a single metastore and * the absence of For Update is handled via a Semaphore. The later would strictly speaking * make the SQL statements below unnecessary (for Derby), but then they would not be tested. */ Connection dbConn = null; Statement stmt = null; ResultSet rs = null; try { try { String sqlStmt = sqlGenerator.addForUpdateClause( "select MT_COMMENT from AUX_TABLE where MT_KEY1=" + quoteString(key) + " and MT_KEY2=0"); lockInternal(); dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED, connPoolMutex); stmt = dbConn.createStatement(); if (LOG.isDebugEnabled()) { LOG.debug("About to execute SQL: " + sqlStmt); } rs = stmt.executeQuery(sqlStmt); if (!rs.next()) { close(rs); try { stmt.executeUpdate( "insert into AUX_TABLE(MT_KEY1,MT_KEY2) values(" + quoteString(key) + ", 0)"); dbConn.commit(); } catch (SQLException ex) { if (!isDuplicateKeyError(ex)) { throw new RuntimeException( "Unable to lock " + quoteString(key) + " due to: " + getMessage(ex), ex); } //if here, it means a concrurrent acquireLock() inserted the 'key' //rollback is done for the benefit of Postgres which throws (SQLState=25P02, ErrorCode=0) if //you attempt any stmt in a txn which had an error. dbConn.rollback(); } rs = stmt.executeQuery(sqlStmt); if (!rs.next()) { throw new IllegalStateException( "Unable to lock " + quoteString(key) + ". Expected row in AUX_TABLE is missing."); } } Semaphore derbySemaphore = null; if (dbProduct == DatabaseProduct.DERBY) { derbyKey2Lock.putIfAbsent(key, new Semaphore(1)); derbySemaphore = derbyKey2Lock.get(key); derbySemaphore.acquire(); } LOG.debug(quoteString(key) + " locked by " + quoteString(TxnHandler.hostname)); //OK, so now we have a lock return new LockHandleImpl(dbConn, stmt, rs, key, derbySemaphore); } catch (SQLException ex) { rollbackDBConn(dbConn); close(rs, stmt, dbConn); checkRetryable(dbConn, ex, "acquireLock(" + key + ")"); throw new MetaException("Unable to lock " + quoteString(key) + " due to: " + getMessage(ex) + "; " + StringUtils.stringifyException(ex)); } catch (InterruptedException ex) { rollbackDBConn(dbConn); close(rs, stmt, dbConn); throw new MetaException("Unable to lock " + quoteString(key) + " due to: " + ex.getMessage() + StringUtils.stringifyException(ex)); } finally { unlockInternal(); } } catch (RetryException ex) { return acquireLock(key); } }