List of usage examples for java.util.concurrent ExecutorCompletionService take
public Future<V> take() throws InterruptedException
From source file:com.alibaba.otter.node.etl.extract.extractor.DatabaseExtractor.java
@Override public void extract(DbBatch dbBatch) throws ExtractException { Assert.notNull(dbBatch);/* w w w. j a v a 2s .co m*/ Assert.notNull(dbBatch.getRowBatch()); // ?? Pipeline pipeline = getPipeline(dbBatch.getRowBatch().getIdentity().getPipelineId()); boolean mustDb = pipeline.getParameters().getSyncConsistency().isMedia(); boolean isRow = pipeline.getParameters().getSyncMode().isRow();// ??? // ?? adjustPoolSize(pipeline.getParameters().getExtractPoolSize()); // Extractor? ExecutorCompletionService completionService = new ExecutorCompletionService(executor); // ??? ExtractException exception = null; // ?? List<DataItem> items = new ArrayList<DataItem>(); List<Future> futures = new ArrayList<Future>(); List<EventData> eventDatas = dbBatch.getRowBatch().getDatas(); for (EventData eventData : eventDatas) { if (eventData.getEventType().isDdl()) { continue; } DataItem item = new DataItem(eventData); // row??????row??? boolean flag = mustDb || (eventData.getSyncConsistency() != null && eventData.getSyncConsistency().isMedia()); // ?case, oracle erosa?????? if (!flag && CollectionUtils.isEmpty(eventData.getUpdatedColumns())) { DataMedia dataMedia = ConfigHelper.findDataMedia(pipeline, eventData.getTableId()); if (dataMedia.getSource().getType().isOracle()) { flag |= true; eventData.setRemedy(true);// ???erosa????? } } if (isRow && !flag) { // ????? // view?? flag = checkNeedDbForRowMode(pipeline, eventData); } if (flag && (eventData.getEventType().isInsert() || eventData.getEventType().isUpdate())) {// ???? Future future = completionService.submit(new DatabaseExtractWorker(pipeline, item), null); // ?? if (future.isDone()) { // ?CallerRun???? try { future.get(); } catch (InterruptedException e) { cancel(futures);// ?? throw new ExtractException(e); } catch (ExecutionException e) { cancel(futures); // ?? throw new ExtractException(e); } } futures.add(future);// } items.add(item);// ? } // ? int index = 0; while (index < futures.size()) { // ?? try { Future future = completionService.take();// ? future.get(); } catch (InterruptedException e) { exception = new ExtractException(e); break;// future } catch (ExecutionException e) { exception = new ExtractException(e); break;// future } index++; } if (index < futures.size()) { // ???cancel????? cancel(futures); throw exception; } else { // ?, ???? for (int i = 0; i < items.size(); i++) { DataItem item = items.get(i); if (item.filter) { // ??????? eventDatas.remove(item.getEventData()); } } } }
From source file:com.baidu.rigel.biplatform.tesseract.isservice.search.service.impl.SearchIndexServiceImpl.java
@Override public SearchIndexResultSet query(QueryRequest query) throws IndexAndSearchException { ExecutorCompletionService<SearchIndexResultSet> completionService = new ExecutorCompletionService<>( taskExecutor);/* ww w . j ava 2s .c om*/ LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_BEGIN, "query", "[query:" + query + "]")); // 1. Does all the existed index cover this query // 2. get index meta and index shard // 3. trans query to Query that can used for searching // 4. dispatch search query // 5. do search // 6. merge result // 7. return if (query == null || StringUtils.isEmpty(query.getCubeId())) { LOGGER.error(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_EXCEPTION, "query", "[query:" + query + "]")); throw new IndexAndSearchException( TesseractExceptionUtils.getExceptionMessage(IndexAndSearchException.QUERYEXCEPTION_MESSAGE, IndexAndSearchExceptionType.ILLEGALARGUMENT_EXCEPTION), IndexAndSearchExceptionType.ILLEGALARGUMENT_EXCEPTION); } IndexMeta idxMeta = this.idxMetaService.getIndexMetaByCubeId(query.getCubeId(), query.getDataSourceInfo().getDataSourceKey()); SearchIndexResultSet result = null; long current = System.currentTimeMillis(); if (idxMeta == null || idxMeta.getIdxState().equals(IndexState.INDEX_UNAVAILABLE) || idxMeta.getIdxState().equals(IndexState.INDEX_UNINIT) || !query.isUseIndex() || (query.getFrom() != null && query.getFrom().getFrom() != null && !idxMeta.getDataDescInfo().getTableNameList().contains(query.getFrom().getFrom())) || !indexMetaContains(idxMeta, query)) { LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query", "use database")); // index does not exist or unavailable,use db query SqlQuery sqlQuery = QueryRequestUtil.transQueryRequest2SqlQuery(query); SqlDataSourceWrap dataSourceWrape = null; try { dataSourceWrape = (SqlDataSourceWrap) this.dataSourcePoolService .getDataSourceByKey(query.getDataSourceInfo()); } catch (DataSourceException e) { LOGGER.error(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_EXCEPTION, "query", "[query:" + query + "]", e)); throw new IndexAndSearchException( TesseractExceptionUtils.getExceptionMessage(IndexAndSearchException.QUERYEXCEPTION_MESSAGE, IndexAndSearchExceptionType.SQL_EXCEPTION), e, IndexAndSearchExceptionType.SQL_EXCEPTION); } if (dataSourceWrape == null) { throw new IllegalArgumentException(); } long limitStart = 0; long limitSize = 0; if (query.getLimit() != null) { limitStart = query.getLimit().getStart(); if (query.getLimit().getSize() > 0) { limitSize = query.getLimit().getSize(); } } SearchIndexResultSet currResult = this.dataQueryService.queryForListWithSQLQueryAndGroupBy(sqlQuery, dataSourceWrape, limitStart, limitSize, query); LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query", "db return " + currResult.size() + " records")); result = currResult; } else { LOGGER.info( String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query", "use index")); LOGGER.info("cost :" + (System.currentTimeMillis() - current) + " before prepare get record."); current = System.currentTimeMillis(); List<SearchIndexResultSet> idxShardResultSetList = new ArrayList<SearchIndexResultSet>(); for (IndexShard idxShard : idxMeta.getIdxShardList()) { if (idxShard.getIdxState().equals(IndexState.INDEX_UNINIT)) { continue; } completionService.submit(new Callable<SearchIndexResultSet>() { @Override public SearchIndexResultSet call() throws Exception { try { long current = System.currentTimeMillis(); Node searchNode = isNodeService.getFreeSearchNodeByIndexShard(idxShard, idxMeta.getClusterName()); searchNode.searchRequestCountAdd(); isNodeService.saveOrUpdateNodeInfo(searchNode); LOGGER.info("begin search in shard:{}", idxShard); SearchIndexResultSet result = (SearchIndexResultSet) isClient .search(query, idxShard, searchNode).getMessageBody(); searchNode.searchrequestCountSub(); isNodeService.saveOrUpdateNodeInfo(searchNode); LOGGER.info("compelete search in shard:{},take:{} ms", idxShard, System.currentTimeMillis() - current); return result; } catch (Exception e) { throw new IndexAndSearchException( TesseractExceptionUtils.getExceptionMessage( IndexAndSearchException.QUERYEXCEPTION_MESSAGE, IndexAndSearchExceptionType.NETWORK_EXCEPTION), e, IndexAndSearchExceptionType.NETWORK_EXCEPTION); } } }); } for (int i = 0; i < idxMeta.getIdxShardList().size(); i++) { try { idxShardResultSetList.add(completionService.take().get()); } catch (InterruptedException | ExecutionException e) { throw new IndexAndSearchException( TesseractExceptionUtils.getExceptionMessage( IndexAndSearchException.QUERYEXCEPTION_MESSAGE, IndexAndSearchExceptionType.NETWORK_EXCEPTION), e, IndexAndSearchExceptionType.NETWORK_EXCEPTION); } } LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query", "merging result from multiple index")); result = mergeResultSet(idxShardResultSetList, query); StringBuilder sb = new StringBuilder(); sb.append("cost :").append(System.currentTimeMillis() - current) .append(" in get result record,result size:").append(result.size()).append(" shard size:") .append(idxShardResultSetList.size()); LOGGER.info(sb.toString()); current = System.currentTimeMillis(); } LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query", "merging final result")); LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_END, "query", "[query:" + query + "]")); return result; }
From source file:com.clustercontrol.http.factory.RunMonitorHttpScenario.java
/** * []????//w w w . j ava 2 s . c om * * ??1??ID??????ID?????????? * ????????runMonitorInfo??? * */ @Override protected boolean runMonitorInfo() throws FacilityNotFound, MonitorNotFound, EntityExistsException, InvalidRole, HinemosUnknown { m_log.debug("runMonitorInfo()"); m_now = new Date(HinemosTime.currentTimeMillis()); m_priorityMap = new HashMap<Integer, ArrayList<String>>(); m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_INFO), new ArrayList<String>()); m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_WARNING), new ArrayList<String>()); m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_CRITICAL), new ArrayList<String>()); m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_UNKNOWN), new ArrayList<String>()); try { // boolean run = this.setMonitorInfo(m_monitorTypeId, m_monitorId); if (!run) { // ? return true; } // setJudgementInfo(); // ?? setCheckInfo(); ArrayList<String> facilityList = null; ExecutorCompletionService<ArrayList<MonitorRunResultInfo>> ecs = new ExecutorCompletionService<ArrayList<MonitorRunResultInfo>>( ParallelExecution.instance().getExecutorService()); int taskCount = 0; if (!m_isMonitorJob) { // ?? // ID????? // /?true?????ID?? facilityList = new RepositoryControllerBean().getExecTargetFacilityIdList(m_facilityId, m_monitor.getOwnerRoleId()); if (facilityList.size() == 0) { return true; } m_isNode = new RepositoryControllerBean().isNode(m_facilityId); // ??????? nodeInfo = new HashMap<String, NodeInfo>(); for (String facilityId : facilityList) { try { synchronized (this) { nodeInfo.put(facilityId, new RepositoryControllerBean().getNode(facilityId)); } } catch (FacilityNotFound e) { // ??? } } m_log.debug("monitor start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId); String facilityId = null; /** * ? */ // ID??????? Iterator<String> itr = facilityList.iterator(); while (itr.hasNext()) { facilityId = itr.next(); if (facilityId != null && !"".equals(facilityId)) { // ???RunMonitor???? // ????????????? RunMonitorHttpScenario runMonitor = new RunMonitorHttpScenario(); // ????? runMonitor.m_monitorTypeId = this.m_monitorTypeId; runMonitor.m_monitorId = this.m_monitorId; runMonitor.m_now = this.m_now; runMonitor.m_priorityMap = this.m_priorityMap; runMonitor.setMonitorInfo(runMonitor.m_monitorTypeId, runMonitor.m_monitorId); runMonitor.setJudgementInfo(); runMonitor.setCheckInfo(); runMonitor.nodeInfo = this.nodeInfo; ecs.submit(new CallableTaskHttpScenario(runMonitor, facilityId)); taskCount++; } else { itr.remove(); } } } else { // ?? // ?? // ID????? // /?true?????ID?? facilityList = new RepositoryControllerBean().getExecTargetFacilityIdList(m_facilityId, m_monitor.getOwnerRoleId()); if (facilityList.size() != 1 || !facilityList.get(0).equals(m_facilityId)) { return true; } m_isNode = new RepositoryControllerBean().isNode(m_facilityId); // ??????? nodeInfo = new HashMap<String, NodeInfo>(); for (String facilityId : facilityList) { try { synchronized (this) { nodeInfo.put(facilityId, new RepositoryControllerBean().getNode(m_facilityId)); } } catch (FacilityNotFound e) { // ??? } } m_log.debug("monitor start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId); /** * ? */ // ???RunMonitor???? // ????????????? RunMonitorHttpScenario runMonitor = new RunMonitorHttpScenario(); // ????? runMonitor.m_isMonitorJob = this.m_isMonitorJob; runMonitor.m_monitorTypeId = this.m_monitorTypeId; runMonitor.m_monitorId = this.m_monitorId; runMonitor.m_now = this.m_now; runMonitor.m_priorityMap = this.m_priorityMap; runMonitor.setMonitorInfo(runMonitor.m_monitorTypeId, runMonitor.m_monitorId); runMonitor.setJudgementInfo(); runMonitor.setCheckInfo(); runMonitor.nodeInfo = this.nodeInfo; ecs.submit(new CallableTaskHttpScenario(runMonitor, m_facilityId)); taskCount++; } /** * ?? */ ArrayList<MonitorRunResultInfo> resultList = null; m_log.debug("total start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId); // ??? List<Sample> sampleList = new ArrayList<Sample>(); Sample sample = null; if (m_monitor.getCollectorFlg()) { sample = new Sample(HinemosTime.getDateInstance(), m_monitor.getMonitorId()); } for (int i = 0; i < taskCount; i++) { Future<ArrayList<MonitorRunResultInfo>> future = ecs.take(); resultList = future.get(); // ?? for (MonitorRunResultInfo result : resultList) { m_nodeDate = result.getNodeDate(); String facilityId = result.getFacilityId(); // ? if (!m_isMonitorJob) { if (result.getMonitorFlg()) { notify(true, facilityId, result.getCheckResult(), new Date(m_nodeDate), result); } } else { m_monitorRunResultInfo = new MonitorRunResultInfo(); m_monitorRunResultInfo.setPriority(result.getPriority()); m_monitorRunResultInfo.setCheckResult(result.getCheckResult()); m_monitorRunResultInfo.setNodeDate(m_nodeDate); m_monitorRunResultInfo .setMessageOrg(makeJobOrgMessage(result.getMessageOrg(), result.getMessage())); } // ??? if (sample != null && result.getCollectorFlg()) { int errorCode = -1; if (result.isCollectorResult()) { errorCode = CollectedDataErrorTypeConstant.NOT_ERROR; } else { errorCode = CollectedDataErrorTypeConstant.UNKNOWN; } sample.set(facilityId, m_monitor.getItemName(), result.getValue(), errorCode, result.getDisplayName()); } } } // ????? if (sample != null) { sampleList.add(sample); } if (!sampleList.isEmpty()) { CollectDataUtil.put(sampleList); } m_log.debug("monitor end : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId); return true; } catch (EntityExistsException e) { throw e; } catch (FacilityNotFound e) { throw e; } catch (InvalidRole e) { throw e; } catch (InterruptedException e) { m_log.info("runMonitorInfo() monitorTypeId = " + m_monitorTypeId + ", monitorId = " + m_monitorId + " : " + e.getClass().getSimpleName() + ", " + e.getMessage()); return false; } catch (ExecutionException e) { m_log.info("runMonitorInfo() monitorTypeId = " + m_monitorTypeId + ", monitorId = " + m_monitorId + " : " + e.getClass().getSimpleName() + ", " + e.getMessage()); return false; } }
From source file:com.clustercontrol.monitor.run.factory.RunMonitor.java
/** * ????//from w w w . ja v a 2 s.co m * <p> * <ol> * <li>????????{@link #setMonitorInfo(String, String)}</li> * <li>?????????{@link #setJudgementInfo()}</li> * <li>??????????{@link #setCheckInfo()}</li> * <li>???????? {@link #collect(String)}</li> * <li>???????? {@link #getCheckResult(boolean)}</li> * <li>?????????{@link #getPriority(int)}</li> * <li>????{@link #notify(boolean, String, int, Date)}</li> * </ol> * * @return ??????</code> true </code> * @throws FacilityNotFound * @throws MonitorNotFound * @throws InvalidRole * @throws EntityExistsException * @throws HinemosUnknown * * @see #setMonitorInfo(String, String) * @see #setJudgementInfo() * @see #setCheckInfo() * @see #collect(String) * @see #getCheckResult(boolean) * @see #getPriority(int) * @see #notify(boolean, String, int, Date) */ protected boolean runMonitorInfo() throws FacilityNotFound, MonitorNotFound, InvalidRole, EntityExistsException, HinemosUnknown { m_now = HinemosTime.getDateInstance(); m_priorityMap = new HashMap<Integer, ArrayList<String>>(); m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_INFO), new ArrayList<String>()); m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_WARNING), new ArrayList<String>()); m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_CRITICAL), new ArrayList<String>()); m_priorityMap.put(Integer.valueOf(PriorityConstant.TYPE_UNKNOWN), new ArrayList<String>()); List<Sample> sampleList = new ArrayList<Sample>(); List<StringSample> collectedSamples = new ArrayList<>(); try { // boolean run = this.setMonitorInfo(m_monitorTypeId, m_monitorId); if (!run) { // ? return true; } // setJudgementInfo(); // ?? setCheckInfo(); ArrayList<String> facilityList = null; ExecutorCompletionService<MonitorRunResultInfo> ecs = new ExecutorCompletionService<MonitorRunResultInfo>( ParallelExecution.instance().getExecutorService()); int taskCount = 0; if (!m_isMonitorJob) { // ?? // ID????? // /?true?????ID?? facilityList = new RepositoryControllerBean().getExecTargetFacilityIdList(m_facilityId, m_monitor.getOwnerRoleId()); if (facilityList.size() == 0) { return true; } m_isNode = new RepositoryControllerBean().isNode(m_facilityId); // ??????? nodeInfo = new HashMap<String, NodeInfo>(); for (String facilityId : facilityList) { try { synchronized (this) { nodeInfo.put(facilityId, new RepositoryControllerBean().getNode(facilityId)); } } catch (FacilityNotFound e) { // ??? } } m_log.debug("monitor start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId); /** * ? */ // ID??????? Iterator<String> itr = facilityList.iterator(); while (itr.hasNext()) { String facilityId = itr.next(); if (facilityId != null && !"".equals(facilityId)) { // ???RunMonitor???? // ????????????? RunMonitor runMonitor = this.createMonitorInstance(); // ????? runMonitor.m_monitorTypeId = this.m_monitorTypeId; runMonitor.m_monitorId = this.m_monitorId; runMonitor.m_now = this.m_now; runMonitor.m_priorityMap = this.m_priorityMap; runMonitor.setMonitorInfo(runMonitor.m_monitorTypeId, runMonitor.m_monitorId); runMonitor.setJudgementInfo(); runMonitor.setCheckInfo(); runMonitor.nodeInfo = this.nodeInfo; ecs.submit(new MonitorExecuteTask(runMonitor, facilityId)); taskCount++; if (m_log.isDebugEnabled()) { m_log.debug("starting monitor result : monitorId = " + m_monitorId + ", facilityId = " + facilityId); } } else { facilityList.remove(facilityId); } } } else { // ?? // ID????? // /?true?????ID?? facilityList = new RepositoryControllerBean().getExecTargetFacilityIdList(m_facilityId, m_monitor.getOwnerRoleId()); if (facilityList.size() != 1 || !facilityList.get(0).equals(m_facilityId)) { return true; } m_isNode = true; // ??????? nodeInfo = new HashMap<String, NodeInfo>(); try { synchronized (this) { nodeInfo.put(m_facilityId, new RepositoryControllerBean().getNode(m_facilityId)); } } catch (FacilityNotFound e) { // ??? } m_log.debug("monitor start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId); /** * ? */ // ???RunMonitor???? // ????????????? RunMonitor runMonitor = this.createMonitorInstance(); // ????? runMonitor.m_isMonitorJob = this.m_isMonitorJob; runMonitor.m_monitorTypeId = this.m_monitorTypeId; runMonitor.m_monitorId = this.m_monitorId; runMonitor.m_now = this.m_now; runMonitor.m_priorityMap = this.m_priorityMap; runMonitor.setMonitorInfo(runMonitor.m_monitorTypeId, runMonitor.m_monitorId); runMonitor.setJudgementInfo(); runMonitor.setCheckInfo(); runMonitor.nodeInfo = this.nodeInfo; runMonitor.m_prvData = this.m_prvData; ecs.submit(new MonitorExecuteTask(runMonitor, m_facilityId)); taskCount++; if (m_log.isDebugEnabled()) { m_log.debug("starting monitor result : monitorId = " + m_monitorId + ", facilityId = " + m_facilityId); } } /** * ?? */ MonitorRunResultInfo result = new MonitorRunResultInfo(); // ?? m_log.debug("total start : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId); // ??? StringSample strSample = null; Sample sample = null; if (m_monitor.getCollectorFlg()) { //? - if (m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_STRING || m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_TRAP) { strSample = new StringSample(HinemosTime.getDateInstance(), m_monitor.getMonitorId()); } //? - else { sample = new Sample(HinemosTime.getDateInstance(), m_monitor.getMonitorId()); } } for (int i = 0; i < taskCount; i++) { Future<MonitorRunResultInfo> future = ecs.take(); result = future.get(); // ?? String facilityId = result.getFacilityId(); m_nodeDate = result.getNodeDate(); if (m_log.isDebugEnabled()) { m_log.debug("finished monitor : monitorId = " + m_monitorId + ", facilityId = " + facilityId); } //?????????? if (m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_STRING || m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_TRAP) { if (strSample != null) { strSample.set(facilityId, m_monitor.getMonitorTypeId(), result.getMessageOrg()); } } if (!m_isMonitorJob) { // ??? if (result.getProcessType().booleanValue()) { // ? notify(true, facilityId, result.getCheckResult(), new Date(m_nodeDate), result); // ??? if (sample != null) { int errorType = -1; if (result.isCollectorResult()) { errorType = CollectedDataErrorTypeConstant.NOT_ERROR; } else { errorType = CollectedDataErrorTypeConstant.UNKNOWN; } sample.set(facilityId, m_monitor.getItemName(), result.getValue(), errorType); } } } else { m_monitorRunResultInfo = new MonitorRunResultInfo(); m_monitorRunResultInfo.setPriority(result.getPriority()); m_monitorRunResultInfo.setCheckResult(result.getCheckResult()); m_monitorRunResultInfo.setNodeDate(m_nodeDate); m_monitorRunResultInfo .setMessageOrg(makeJobOrgMessage(result.getMessageOrg(), result.getMessage())); m_monitorRunResultInfo.setCurData(result.getCurData()); } } // ????? if (m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_STRING || m_monitor.getMonitorType() == MonitorTypeConstant.TYPE_TRAP) { //? - ????? if (strSample != null) { collectedSamples.add(strSample); } if (!collectedSamples.isEmpty()) { CollectStringDataUtil.store(collectedSamples); } } else { if (sample != null) { sampleList.add(sample); } if (!sampleList.isEmpty()) { CollectDataUtil.put(sampleList); } } m_log.debug("monitor end : monitorTypeId : " + m_monitorTypeId + ", monitorId : " + m_monitorId); return true; } catch (FacilityNotFound e) { throw e; } catch (InterruptedException e) { m_log.info("runMonitorInfo() monitorTypeId = " + m_monitorTypeId + ", monitorId = " + m_monitorId + " : " + e.getClass().getSimpleName() + ", " + e.getMessage()); return false; } catch (ExecutionException e) { m_log.info("runMonitorInfo() monitorTypeId = " + m_monitorTypeId + ", monitorId = " + m_monitorId + " : " + e.getClass().getSimpleName() + ", " + e.getMessage()); return false; } }
From source file:org.apache.flume.channel.kafka.TestKafkaChannel.java
private void wait(ExecutorCompletionService<Void> submitterSvc, int max) throws Exception { int completed = 0; while (completed < max) { submitterSvc.take(); completed++;/*from ww w . ja va 2 s. co m*/ } }
From source file:org.apache.hadoop.hbase.client.SpeculativeMutater.java
public Boolean mutate(final long waitToSendFailover, final long waitToSendFailoverWithException, final HBaseTableFunction<Void> function, final HTableInterface primaryTable, final Collection<HTableInterface> failoverTables, final AtomicLong lastPrimaryFail, final int waitTimeFromLastPrimaryFail) { ExecutorCompletionService<Boolean> exeS = new ExecutorCompletionService<Boolean>(exe); ArrayList<Callable<Boolean>> callables = new ArrayList<Callable<Boolean>>(); final AtomicBoolean isPrimarySuccess = new AtomicBoolean(false); final long startTime = System.currentTimeMillis(); final long lastPrimaryFinalFail = lastPrimaryFail.get(); if (System.currentTimeMillis() - lastPrimaryFinalFail > 5000) { callables.add(new Callable<Boolean>() { public Boolean call() throws Exception { try { LOG.info(" --- CallingPrimary.1:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); function.call(primaryTable); LOG.info(" --- CallingPrimary.2:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); isPrimarySuccess.set(true); return true; } catch (java.io.InterruptedIOException e) { Thread.currentThread().interrupt(); } catch (Exception e) { lastPrimaryFail.set(System.currentTimeMillis()); Thread.currentThread().interrupt(); }/*w ww . j a v a 2s .c o m*/ return null; } }); } for (final HTableInterface failoverTable : failoverTables) { callables.add(new Callable<Boolean>() { public Boolean call() throws Exception { long waitToRequest = (System.currentTimeMillis() - lastPrimaryFinalFail > 5000) ? waitToSendFailover - (System.currentTimeMillis() - startTime) : waitToSendFailoverWithException - (System.currentTimeMillis() - startTime); LOG.info(" --- waitToRequest:" + waitToRequest + "," + (System.currentTimeMillis() - lastPrimaryFinalFail) + "," + (waitToSendFailover - (System.currentTimeMillis() - startTime)) + "," + (waitToSendFailoverWithException - (System.currentTimeMillis() - startTime))); if (waitToRequest > 0) { Thread.sleep(waitToRequest); } LOG.info(" --- isPrimarySuccess.get():" + isPrimarySuccess.get()); if (isPrimarySuccess.get() == false) { LOG.info(" --- CallingFailOver.1:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); function.call(failoverTable); LOG.info(" --- CallingFailOver.2:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); } return false; } }); } try { for (Callable<Boolean> call : callables) { exeS.submit(call); } Boolean result = exeS.take().get(); return result; } catch (InterruptedException e) { e.printStackTrace(); LOG.error(e); } catch (ExecutionException e) { e.printStackTrace(); LOG.error(e); } return null; }
From source file:org.apache.hadoop.hbase.client.SpeculativeRequester.java
public ResultWrapper<T> request(final HBaseTableFunction<T> function, final HTableInterface primaryTable, final Collection<HTableInterface> failoverTables) { ExecutorCompletionService<ResultWrapper<T>> exeS = new ExecutorCompletionService<ResultWrapper<T>>(exe); final AtomicBoolean isPrimarySuccess = new AtomicBoolean(false); final long startTime = System.currentTimeMillis(); ArrayList<Callable<ResultWrapper<T>>> callables = new ArrayList<Callable<ResultWrapper<T>>>(); if (System.currentTimeMillis() - lastPrimaryFail.get() > waitTimeFromLastPrimaryFail) { callables.add(new Callable<ResultWrapper<T>>() { public ResultWrapper<T> call() throws Exception { try { T t = function.call(primaryTable); isPrimarySuccess.set(true); return new ResultWrapper(true, t); } catch (java.io.InterruptedIOException e) { Thread.currentThread().interrupt(); } catch (Exception e) { lastPrimaryFail.set(System.currentTimeMillis()); Thread.currentThread().interrupt(); }// w w w. j ava 2s . c o m return null; } }); } for (final HTableInterface failoverTable : failoverTables) { callables.add(new Callable<ResultWrapper<T>>() { public ResultWrapper<T> call() throws Exception { long waitToRequest = (System.currentTimeMillis() - lastPrimaryFail.get() > waitTimeFromLastPrimaryFail) ? waitTimeBeforeRequestingFailover - (System.currentTimeMillis() - startTime) : 0; if (waitToRequest > 0) { Thread.sleep(waitToRequest); } if (isPrimarySuccess.get() == false) { T t = function.call(failoverTable); long waitToAccept = (System.currentTimeMillis() - lastPrimaryFail.get() > waitTimeFromLastPrimaryFail) ? waitTimeBeforeAcceptingResults - (System.currentTimeMillis() - startTime) : 0; if (isPrimarySuccess.get() == false) { if (waitToAccept > 0) { Thread.sleep(waitToAccept); } } return new ResultWrapper(false, t); } else { throw new RuntimeException("Not needed"); } } }); } try { //ResultWrapper<T> t = exe.invokeAny(callables); for (Callable<ResultWrapper<T>> call : callables) { exeS.submit(call); } ResultWrapper<T> result = exeS.take().get(); //exe.shutdownNow(); return result; } catch (InterruptedException e) { e.printStackTrace(); LOG.error(e); } catch (ExecutionException e) { e.printStackTrace(); LOG.error(e); } return null; }
From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlock.java
protected void testConcurrentReadingInternals() throws IOException, InterruptedException, ExecutionException { for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) { Path path = new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading"); Random rand = defaultRandom(); List<Long> offsets = new ArrayList<Long>(); List<BlockType> types = new ArrayList<BlockType>(); writeBlocks(rand, compressAlgo, path, offsets, null, types, null); FSDataInputStream is = fs.open(path); long fileSize = fs.getFileStatus(path).getLen(); HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(true) .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag) .withCompression(compressAlgo).build(); HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, fileSize, meta); Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS); ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec); for (int i = 0; i < NUM_READER_THREADS; ++i) { ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr, offsets, types, fileSize)); }//from ww w . ja v a2 s .c o m for (int i = 0; i < NUM_READER_THREADS; ++i) { Future<Boolean> result = ecs.take(); assertTrue(result.get()); if (detailedLogging) { LOG.info(String.valueOf(i + 1) + " reader threads finished successfully (algo=" + compressAlgo + ")"); } } is.close(); } }
From source file:org.apache.hadoop.hbase.snapshot.SnapshotManifestV1.java
static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf, final Executor executor, final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException { FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { LOG.info("No regions under directory:" + snapshotDir); return null; }//from ww w . ja v a 2 s .c om final ExecutorCompletionService<SnapshotRegionManifest> completionService = new ExecutorCompletionService<SnapshotRegionManifest>( executor); for (final FileStatus region : regions) { completionService.submit(new Callable<SnapshotRegionManifest>() { @Override public SnapshotRegionManifest call() throws IOException { HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, region.getPath()); return buildManifestFromDisk(conf, fs, snapshotDir, hri); } }); } ArrayList<SnapshotRegionManifest> regionsManifest = new ArrayList<SnapshotRegionManifest>(regions.length); try { for (int i = 0; i < regions.length; ++i) { regionsManifest.add(completionService.take().get()); } } catch (InterruptedException e) { throw new InterruptedIOException(e.getMessage()); } catch (ExecutionException e) { IOException ex = new IOException(); ex.initCause(e.getCause()); throw ex; } return regionsManifest; }
From source file:org.apache.hadoop.hbase.snapshot.SnapshotManifestV2.java
static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf, final Executor executor, final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException { FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() { @Override//from ww w.ja va 2 s .c o m public boolean accept(Path path) { return path.getName().startsWith(SNAPSHOT_MANIFEST_PREFIX); } }); if (manifestFiles == null || manifestFiles.length == 0) return null; final ExecutorCompletionService<SnapshotRegionManifest> completionService = new ExecutorCompletionService<SnapshotRegionManifest>( executor); for (final FileStatus st : manifestFiles) { completionService.submit(new Callable<SnapshotRegionManifest>() { @Override public SnapshotRegionManifest call() throws IOException { FSDataInputStream stream = fs.open(st.getPath()); try { return SnapshotRegionManifest.parseFrom(stream); } finally { stream.close(); } } }); } ArrayList<SnapshotRegionManifest> regionsManifest = new ArrayList<SnapshotRegionManifest>( manifestFiles.length); try { for (int i = 0; i < manifestFiles.length; ++i) { regionsManifest.add(completionService.take().get()); } } catch (InterruptedException e) { throw new InterruptedIOException(e.getMessage()); } catch (ExecutionException e) { IOException ex = new IOException(); ex.initCause(e.getCause()); throw ex; } return regionsManifest; }