List of usage examples for java.util Set toString
public String toString()
From source file:org.gbif.harvest.tapir.TapirMetadataHandler.java
/** * Get the most prioritised content namespace. * In the event the capabilities response cannot be parsed, * the default content namespace is used * * @param inputStream capabilities response as ByteArrayInputStream * @param directory as String/*from ww w . j a v a 2 s . c om*/ * * @return most prioritized conetent namespace * * @throws HarvesterException thrown if method fails */ private String getNamespace(ByteArrayInputStream inputStream, String directory) throws HarvesterException { log.info("tapirmetadatahandler.start.getNamespace"); // Initially, set the namespace to the default String newestNamespace = DEFAULT_CONTENT_NAMESPACE; // reste stream as we're reading it a second time if (inputStream != null) { inputStream.reset(); } // retrieve the list of supported namespaces try { // namespaces = returnNamespace(fis, NAMESPACE_RESPONSE_XPATH_ELEMENT); Set<String> namespaces = digesterUtils.xmlToListOfAttributeValuesForSingleElement(inputStream, TapirMetadataHandler.namespaceResponseXPathElement, TapirMetadataHandler.supportedNamespaceAttributeName); // Iterate through the ordered list of available namespaces and // determine what the newest one from amongst the set of supported // namespaces retrieved is // Set the default namespace for (String supportedNamespace : supported_namespaces) { if (namespaces.contains(supportedNamespace)) { newestNamespace = supportedNamespace; log.debug("tapirmetadatahandler.getNamespace.chooseNamespace", newestNamespace); log.info("tapirmetadatahandler.end.getNamespace"); return newestNamespace; } } // if not found, alert operator log.error("tapirmetadatahandler.default.conceptualMappingNotFound", namespaces.toString()); // and write GBIF Log Message gbifLogger.openAndWriteToGbifLogMessageFile(directory, CommonGBIFLogEvent.COMMON_MESSAGES_UNKNOWN_SCHEMA_LOCATION.getName(), CommonGBIFLogEvent.COMMON_MESSAGES_UNKNOWN_SCHEMA_LOCATION.getValue(), Level.ERROR_INT, "None of the namespace(s) " + namespaces.toString() + " was not found in the TAPIR conceptualMapping.properties file. Please update this file with valid namespace(s) and try again. Defaulting to namespace http://rs.tdwg.org/dwc/dwcore/", 1, false); } catch (IOException e) { log.error("tapirmetadatahandler.error.getNamespace.parsing", e.getMessage(), e); log.debug("tapirmetadatahandler.default.getNamespace.chooseNamespace", newestNamespace); // throw new HarvesterException(e.getMessage(), e); } catch (SAXException e) { log.error("tapirmetadatahandler.error.getNamespace.parsing", e.getMessage(), e); log.debug("tapirmetadatahandler.default.getNamespace.chooseNamespace", newestNamespace); // throw new HarvesterException(e.getMessage(), e); } // close inputStream try { if (inputStream != null) { inputStream.close(); } } catch (Exception e) { // do nothing } log.info("tapirmetadatahandler.end.getNamespace"); return newestNamespace; }
From source file:org.apache.hadoop.hive.ql.parse.ExtractTmpSemanticAnalyzer.java
/** * 1. change ASTtrees according joinTree and ctx_1; * 2. after joinReorder, ASTtree's incJoin's right child include the incTable; * 3. return the leftAliases of incJoin// w ww . ja v a 2s . c om * because left child of incJoin need to surrounded by TOK_SUBQUERY; * @param incPoses * @return */ private void joinReorderAST(List<QBJoinTree> QBJoinTrees, List<ASTNode> ASTtrees, Set<String> incAliases, List<Integer> incPoses) { if (QBJoinTrees.size() == 1) { return; } assert QBJoinTrees.size() == ASTtrees.size(); LOG.info("incAliases are " + incAliases.toString()); int lastIncPos = incPoses.get(0); /*traverse the trees list to move the incTable outer*/ for (int inner = lastIncPos, outer = inner - 1; outer >= 0; inner--, outer--) { QBJoinTree innerQBJoinTree = QBJoinTrees.get(inner); QBJoinTree outerQBJoinTree = QBJoinTrees.get(outer); String[] leftAliases = innerQBJoinTree.getLeftAliases(); String[] rightAliases = innerQBJoinTree.getRightAliases(); List<String> leftsrc = outerQBJoinTree.getLeftSrc(); String[] outerRightAliases = outerQBJoinTree.getRightAliases(); boolean isinnerLeftInc = false, isinnerRightInc = false; boolean isFromLeft = false, isFromRight = false; boolean isouterRightInc = false; for (String leftAlias : leftAliases) { if (incAliases.contains(leftAlias)) { isinnerLeftInc = true; } if (leftsrc.contains(leftAlias)) { isFromLeft = true; } } for (String rightAlias : rightAliases) { if (incAliases.contains(rightAlias)) { isinnerRightInc = true; } if (leftsrc.contains(rightAlias)) { isFromRight = true; } } for (String outerRightAlias : outerRightAliases) { if (incAliases.contains(outerRightAlias)) { isouterRightInc = true; } } if (isouterRightInc || (isinnerLeftInc && isinnerRightInc) || (isFromLeft && isFromRight)) { LOG.info("Can't move " + Arrays.asList(outerRightAliases).toString() + " inner."); continue; } else if (isinnerLeftInc && isFromRight) { exchangeInc(ASTtrees, innerQBJoinTree, outerQBJoinTree, 0, outer, lastIncPos, incPoses); lastIncPos = outer; } else if (isinnerRightInc && isFromLeft) { exchangeInc(ASTtrees, innerQBJoinTree, outerQBJoinTree, 1, outer, lastIncPos, incPoses); lastIncPos = outer; } else { LOG.info("Can't move " + Arrays.asList(outerRightAliases).toString() + " inner."); } // boolean isExchange = false; // if(canExchange(incAliases,outerQBJoinTree.getLeftSrc(),ASTtrees,outer)){ // /*exchange the incTable and currAST's right table in AST tree*/ // if(java.util.Arrays.asList(leftAliases).containsAll(incAliases)){//incTable in left // exchangeInc(ASTtrees, innerQBJoinTree, outerQBJoinTree, 0,outer,lastIncPos,incPoses); // isExchange = true; // lastIncPos = outer; // }else if(java.util.Arrays.asList(rightAliases).containsAll(incAliases)){// incTable in right // exchangeInc(ASTtrees, innerQBJoinTree, outerQBJoinTree, 1,outer,lastIncPos,incPoses); // isExchange =true; // lastIncPos = outer; // }else{ // isExchange = false; // } // } // if(!isExchange){ // // add all incQBJoinTree's aliases to incAliases // CollectionUtils.addAll(incAliases,leftAliases); // CollectionUtils.addAll(incAliases,rightAliases); // } } }
From source file:org.etudes.component.app.jforum.JforumDataServiceImpl.java
/** * process topic// w w w . j av a 2 s. co m * @param connection connection * @param jforumUserId jforum user id * @param createdForumId forum id * @param topicTitle topic title * @param topicType topic type * @param topicFirstPostId topic's first post id * @throws SQLException */ private void processTopic(Connection connection, String fromContext, String toContextId, int jforumUserId, int fromForumId, int fromTopicId, int createdForumId, String topicTitle, int topicType, int topicGrade, int topicFirstPostId, Date startDate, Date endDate, int lockEndDate, Date gradebookEndDate) throws SQLException { int createdTopicId, createdPostId; /* create new topic then add new post. After adding post assign the post id to topic and update the topic*/ createdTopicId = createTopic(connection, toContextId, fromForumId, fromTopicId, createdForumId, topicTitle, jforumUserId, topicType, topicGrade, topicFirstPostId, startDate, endDate, lockEndDate, gradebookEndDate); //create post and post text createdPostId = createPost(connection, topicFirstPostId, createdTopicId, createdForumId, jforumUserId); String postTextSql = "select post_id, post_text, post_subject from jforum_posts_text where post_id = ?"; PreparedStatement postTextStmnt = connection.prepareStatement(postTextSql); postTextStmnt.setInt(1, topicFirstPostId); ResultSet rsPostText = postTextStmnt.executeQuery(); if (rsPostText.next()) { String postText = rsPostText.getString("post_text"); String postSubject = rsPostText.getString("post_subject"); // harvest post text embedded references Set<String> refs = XrefHelper.harvestEmbeddedReferences(postText, null); if (logger.isDebugEnabled()) logger.debug("processTopic(): embed references found:" + refs.toString()); if (!refs.isEmpty()) { List<Translation> translations = XrefHelper.importTranslateResources(refs, toContextId, "Jforum"); postText = XrefHelper.translateEmbeddedReferences(postText, translations, toContextId); } // clean html String cleanedPostText = HtmlHelper.clean(postText, true); addPostText(connection, createdPostId, cleanedPostText, postSubject); } rsPostText.close(); postTextStmnt.close(); //update topic for first post id updateTopic(connection, createdTopicId, createdPostId); //process attachments processAttachments(connection, topicFirstPostId, createdPostId, jforumUserId); //increment user posts String userIncrementPosts = "UPDATE jforum_users SET user_posts = user_posts + 1 WHERE user_id = ?"; PreparedStatement usrIncPostsStmnt = connection.prepareStatement(userIncrementPosts); usrIncPostsStmnt.setInt(1, jforumUserId); usrIncPostsStmnt.executeUpdate(); usrIncPostsStmnt.close(); //updateBoardStatus String forumUpdateLastPostSql = "UPDATE jforum_forums SET forum_last_post_id = ? WHERE forum_id = ?"; PreparedStatement forumLastPostStmnt = connection.prepareStatement(forumUpdateLastPostSql); forumLastPostStmnt.setInt(1, createdPostId); forumLastPostStmnt.setInt(2, createdForumId); forumLastPostStmnt.executeUpdate(); forumLastPostStmnt.close(); //increase topic's count String forumIncrementTotalTopics = "UPDATE jforum_forums SET forum_topics = forum_topics + 1 WHERE forum_id = ?"; PreparedStatement p = connection.prepareStatement(forumIncrementTotalTopics); p.setInt(1, createdForumId); p.executeUpdate(); p.close(); }
From source file:com.dell.asm.asmcore.asmmanager.util.DeploymentValidator.java
private void checkDuplicateOSHostNames(DeploymentValid deploymentValid, Map<String, ServiceTemplateComponent> componentMap, Map<ServiceTemplateComponentType, Set<String>> componentTypeMap, Deployment deployment) { Set<String> duplicates = new HashSet<>(); Set<String> currentHostNames = new HashSet<>(); Set<String> componentIds = componentTypeMap.get(ServiceTemplateComponentType.SERVER); if (componentIds != null && componentIds.size() > 0) { for (String componentId : componentIds) { ServiceTemplateComponent component = componentMap.get(componentId); if (component != null) { ServiceTemplateSetting osHostNameSetting = component.getParameter( ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_OS_RESOURCE, ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_OS_HOSTNAME_ID); if (osHostNameSetting != null && StringUtils.isNotBlank(osHostNameSetting.getValue()) && !BrownfieldUtil.NOT_FOUND.equals(osHostNameSetting.getValue())) { if (!currentHostNames.add(osHostNameSetting.getValue())) { // keep track of duplicates duplicates.add(osHostNameSetting.getValue()); }// ww w . j a v a 2s . co m } } } } List<DeploymentNamesRefEntity> entities = getDeploymentNamesRefDAO() .getAllDeploymentNamesRefsByType(DeploymentNamesType.OS_HOST_NAME); if (entities != null && entities.size() > 0) { String thisDeploymentId = deployment.getId(); for (DeploymentNamesRefEntity entity : entities) { if (entity.getDeploymentId() != null && !entity.getDeploymentId().equals(thisDeploymentId)) { if (currentHostNames.contains(entity.getName())) { duplicates.add(entity.getName()); } } } } if (!duplicates.isEmpty()) { logger.error("Duplicate hostnames found for Deployment " + deployment.getDeploymentName() + ". Found " + duplicates.size() + " number of duplicate names."); deploymentValid.setValid(false); deploymentValid.addMessage(AsmManagerMessages.duplicateHostname(duplicates.toString())); } }
From source file:org.apache.hadoop.hbase.master.TestMasterFailover.java
/** * Complex test of master failover that tests as many permutations of the * different possible states that regions in transition could be in within ZK * pointing to an RS that has died while no master is around to process it. * <p>//from ww w . j a v a2 s . co m * This tests the proper handling of these states by the failed-over master * and includes a thorough testing of the timeout code as well. * <p> * Starts with a single master and two regionservers. * <p> * Creates two tables, enabledTable and disabledTable, each containing 5 * regions. The disabledTable is then disabled. * <p> * After reaching steady-state, the master is killed. We then mock several * states in ZK. And one of the RS will be killed. * <p> * After mocking them and killing an RS, we will startup a new master which * should become the active master and also detect that it is a failover. The * primary test passing condition will be that all regions of the enabled * table are assigned and all the regions of the disabled table are not * assigned. * <p> * The different scenarios to be tested are below: * <p> * <b>ZK State: CLOSING</b> * <p>A node can get into CLOSING state if</p> * <ul> * <li>An RS has begun to close a region * </ul> * <p>We will mock the scenarios</p> * <ul> * <li>Region was being closed but the RS died before finishing the close * </ul> * <b>ZK State: OPENED</b> * <p>A node can get into OPENED state if</p> * <ul> * <li>An RS has finished opening a region but not acknowledged by master yet * </ul> * <p>We will mock the scenarios</p> * <ul> * <li>Region of a table that should be enabled was opened by a now-dead RS * <li>Region of a table that should be disabled was opened by a now-dead RS * </ul> * <p> * <b>ZK State: NONE</b> * <p>A region could not have a transition node if</p> * <ul> * <li>The server hosting the region died and no master processed it * </ul> * <p>We will mock the scenarios</p> * <ul> * <li>Region of enabled table was on a dead RS that was not yet processed * <li>Region of disabled table was on a dead RS that was not yet processed * </ul> * @throws Exception */ @Test(timeout = 180000) public void testMasterFailoverWithMockedRITOnDeadRS() throws Exception { final int NUM_MASTERS = 1; final int NUM_RS = 2; // Create and start the cluster HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); Configuration conf = TEST_UTIL.getConfiguration(); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, 2); TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); log("Cluster started"); // Create a ZKW to use in the test ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), "unittest", new Abortable() { @Override public void abort(String why, Throwable e) { LOG.error("Fatal ZK Error: " + why, e); org.junit.Assert.assertFalse("Fatal ZK error", true); } @Override public boolean isAborted() { return false; } }); // get all the master threads List<MasterThread> masterThreads = cluster.getMasterThreads(); assertEquals(1, masterThreads.size()); // only one master thread, let's wait for it to be initialized assertTrue(cluster.waitForActiveAndReadyMaster()); HMaster master = masterThreads.get(0).getMaster(); assertTrue(master.isActiveMaster()); assertTrue(master.isInitialized()); // disable load balancing on this master master.balanceSwitch(false); // create two tables in META, each with 30 regions byte[] FAMILY = Bytes.toBytes("family"); byte[][] SPLIT_KEYS = TEST_UTIL.getRegionSplitStartKeys(Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 30); byte[] enabledTable = Bytes.toBytes("enabledTable"); HTableDescriptor htdEnabled = new HTableDescriptor(TableName.valueOf(enabledTable)); htdEnabled.addFamily(new HColumnDescriptor(FAMILY)); FileSystem filesystem = FileSystem.get(conf); Path rootdir = FSUtils.getRootDir(conf); FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir); // Write the .tableinfo fstd.createTableDescriptor(htdEnabled); HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getTableName(), null, null); createRegion(hriEnabled, rootdir, conf, htdEnabled); List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS); TableName disabledTable = TableName.valueOf("disabledTable"); HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable); htdDisabled.addFamily(new HColumnDescriptor(FAMILY)); // Write the .tableinfo fstd.createTableDescriptor(htdDisabled); HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getTableName(), null, null); createRegion(hriDisabled, rootdir, conf, htdDisabled); List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS); log("Regions in hbase:meta and Namespace have been created"); // at this point we only expect 2 regions to be assigned out (catalogs and namespace ) assertEquals(2, cluster.countServedRegions()); // The first RS will stay online List<RegionServerThread> regionservers = cluster.getRegionServerThreads(); HRegionServer hrs = regionservers.get(0).getRegionServer(); // The second RS is going to be hard-killed RegionServerThread hrsDeadThread = regionservers.get(1); HRegionServer hrsDead = hrsDeadThread.getRegionServer(); ServerName deadServerName = hrsDead.getServerName(); // we'll need some regions to already be assigned out properly on live RS List<HRegionInfo> enabledAndAssignedRegions = new ArrayList<HRegionInfo>(); enabledAndAssignedRegions.addAll(enabledRegions.subList(0, 6)); enabledRegions.removeAll(enabledAndAssignedRegions); List<HRegionInfo> disabledAndAssignedRegions = new ArrayList<HRegionInfo>(); disabledAndAssignedRegions.addAll(disabledRegions.subList(0, 6)); disabledRegions.removeAll(disabledAndAssignedRegions); // now actually assign them for (HRegionInfo hri : enabledAndAssignedRegions) { master.assignmentManager.regionPlans.put(hri.getEncodedName(), new RegionPlan(hri, null, hrs.getServerName())); master.assignRegion(hri); } for (HRegionInfo hri : disabledAndAssignedRegions) { master.assignmentManager.regionPlans.put(hri.getEncodedName(), new RegionPlan(hri, null, hrs.getServerName())); master.assignRegion(hri); } log("Waiting for assignment to finish"); ZKAssign.blockUntilNoRIT(zkw); master.assignmentManager.waitUntilNoRegionsInTransition(60000); log("Assignment completed"); assertTrue(" Table must be enabled.", master.getAssignmentManager().getTableStateManager() .isTableState(TableName.valueOf("enabledTable"), ZooKeeperProtos.Table.State.ENABLED)); // we also need regions assigned out on the dead server List<HRegionInfo> enabledAndOnDeadRegions = new ArrayList<HRegionInfo>(); enabledAndOnDeadRegions.addAll(enabledRegions.subList(0, 6)); enabledRegions.removeAll(enabledAndOnDeadRegions); List<HRegionInfo> disabledAndOnDeadRegions = new ArrayList<HRegionInfo>(); disabledAndOnDeadRegions.addAll(disabledRegions.subList(0, 6)); disabledRegions.removeAll(disabledAndOnDeadRegions); // set region plan to server to be killed and trigger assign for (HRegionInfo hri : enabledAndOnDeadRegions) { master.assignmentManager.regionPlans.put(hri.getEncodedName(), new RegionPlan(hri, null, deadServerName)); master.assignRegion(hri); } for (HRegionInfo hri : disabledAndOnDeadRegions) { master.assignmentManager.regionPlans.put(hri.getEncodedName(), new RegionPlan(hri, null, deadServerName)); master.assignRegion(hri); } // wait for no more RIT log("Waiting for assignment to finish"); ZKAssign.blockUntilNoRIT(zkw); master.assignmentManager.waitUntilNoRegionsInTransition(60000); log("Assignment completed"); // Due to master.assignRegion(hri) could fail to assign a region to a specified RS // therefore, we need make sure that regions are in the expected RS verifyRegionLocation(hrs, enabledAndAssignedRegions); verifyRegionLocation(hrs, disabledAndAssignedRegions); verifyRegionLocation(hrsDead, enabledAndOnDeadRegions); verifyRegionLocation(hrsDead, disabledAndOnDeadRegions); assertTrue(" Didn't get enough regions of enabledTalbe on live rs.", enabledAndAssignedRegions.size() >= 2); assertTrue(" Didn't get enough regions of disalbedTable on live rs.", disabledAndAssignedRegions.size() >= 2); assertTrue(" Didn't get enough regions of enabledTalbe on dead rs.", enabledAndOnDeadRegions.size() >= 2); assertTrue(" Didn't get enough regions of disalbedTable on dead rs.", disabledAndOnDeadRegions.size() >= 2); // Stop the master log("Aborting master"); cluster.abortMaster(0); cluster.waitOnMaster(0); log("Master has aborted"); /* * Now, let's start mocking up some weird states as described in the method * javadoc. */ // Master is down, so is the meta. We need to assign it somewhere // so that regions can be assigned during the mocking phase. ZKAssign.createNodeOffline(zkw, HRegionInfo.FIRST_META_REGIONINFO, hrs.getServerName()); ProtobufUtil.openRegion(hrs.getRSRpcServices(), hrs.getServerName(), HRegionInfo.FIRST_META_REGIONINFO); while (true) { ServerName sn = MetaRegionTracker.getMetaRegionLocation(zkw); if (sn != null && sn.equals(hrs.getServerName())) { break; } Thread.sleep(100); } List<HRegionInfo> regionsThatShouldBeOnline = new ArrayList<HRegionInfo>(); List<HRegionInfo> regionsThatShouldBeOffline = new ArrayList<HRegionInfo>(); log("Beginning to mock scenarios"); // Disable the disabledTable in ZK TableStateManager zktable = new ZKTableStateManager(zkw); zktable.setTableState(disabledTable, ZooKeeperProtos.Table.State.DISABLED); assertTrue(" The enabled table should be identified on master fail over.", zktable.isTableState(TableName.valueOf("enabledTable"), ZooKeeperProtos.Table.State.ENABLED)); /* * ZK = CLOSING */ // Region of enabled table being closed on dead RS but not finished HRegionInfo region = enabledAndOnDeadRegions.remove(0); regionsThatShouldBeOnline.add(region); ZKAssign.createNodeClosing(zkw, region, deadServerName); LOG.debug("\n\nRegion of enabled table was CLOSING on dead RS\n" + region + "\n\n"); // Region of disabled table being closed on dead RS but not finished region = disabledAndOnDeadRegions.remove(0); regionsThatShouldBeOffline.add(region); ZKAssign.createNodeClosing(zkw, region, deadServerName); LOG.debug("\n\nRegion of disabled table was CLOSING on dead RS\n" + region + "\n\n"); /* * ZK = CLOSED */ // Region of enabled on dead server gets closed but not ack'd by master region = enabledAndOnDeadRegions.remove(0); regionsThatShouldBeOnline.add(region); int version = ZKAssign.createNodeClosing(zkw, region, deadServerName); ZKAssign.transitionNodeClosed(zkw, region, deadServerName, version); LOG.debug("\n\nRegion of enabled table was CLOSED on dead RS\n" + region + "\n\n"); // Region of disabled on dead server gets closed but not ack'd by master region = disabledAndOnDeadRegions.remove(0); regionsThatShouldBeOffline.add(region); version = ZKAssign.createNodeClosing(zkw, region, deadServerName); ZKAssign.transitionNodeClosed(zkw, region, deadServerName, version); LOG.debug("\n\nRegion of disabled table was CLOSED on dead RS\n" + region + "\n\n"); /* * ZK = OPENING */ // RS was opening a region of enabled table then died region = enabledRegions.remove(0); regionsThatShouldBeOnline.add(region); ZKAssign.createNodeOffline(zkw, region, deadServerName); ZKAssign.transitionNodeOpening(zkw, region, deadServerName); LOG.debug("\n\nRegion of enabled table was OPENING on dead RS\n" + region + "\n\n"); // RS was opening a region of disabled table then died region = disabledRegions.remove(0); regionsThatShouldBeOffline.add(region); ZKAssign.createNodeOffline(zkw, region, deadServerName); ZKAssign.transitionNodeOpening(zkw, region, deadServerName); LOG.debug("\n\nRegion of disabled table was OPENING on dead RS\n" + region + "\n\n"); /* * ZK = OPENED */ // Region of enabled table was opened on dead RS region = enabledRegions.remove(0); regionsThatShouldBeOnline.add(region); ZKAssign.createNodeOffline(zkw, region, deadServerName); ProtobufUtil.openRegion(hrsDead.getRSRpcServices(), hrsDead.getServerName(), region); while (true) { byte[] bytes = ZKAssign.getData(zkw, region.getEncodedName()); RegionTransition rt = RegionTransition.parseFrom(bytes); if (rt != null && rt.getEventType().equals(EventType.RS_ZK_REGION_OPENED)) { break; } Thread.sleep(100); } LOG.debug("\n\nRegion of enabled table was OPENED on dead RS\n" + region + "\n\n"); // Region of disabled table was opened on dead RS region = disabledRegions.remove(0); regionsThatShouldBeOffline.add(region); ZKAssign.createNodeOffline(zkw, region, deadServerName); ProtobufUtil.openRegion(hrsDead.getRSRpcServices(), hrsDead.getServerName(), region); while (true) { byte[] bytes = ZKAssign.getData(zkw, region.getEncodedName()); RegionTransition rt = RegionTransition.parseFrom(bytes); if (rt != null && rt.getEventType().equals(EventType.RS_ZK_REGION_OPENED)) { break; } Thread.sleep(100); } LOG.debug("\n\nRegion of disabled table was OPENED on dead RS\n" + region + "\n\n"); /* * ZK = NONE */ // Region of enabled table was open at steady-state on dead RS region = enabledRegions.remove(0); regionsThatShouldBeOnline.add(region); ZKAssign.createNodeOffline(zkw, region, deadServerName); ProtobufUtil.openRegion(hrsDead.getRSRpcServices(), hrsDead.getServerName(), region); while (true) { byte[] bytes = ZKAssign.getData(zkw, region.getEncodedName()); RegionTransition rt = RegionTransition.parseFrom(bytes); if (rt != null && rt.getEventType().equals(EventType.RS_ZK_REGION_OPENED)) { ZKAssign.deleteOpenedNode(zkw, region.getEncodedName(), rt.getServerName()); LOG.debug("DELETED " + rt); break; } Thread.sleep(100); } LOG.debug("\n\nRegion of enabled table was open at steady-state on dead RS" + "\n" + region + "\n\n"); // Region of disabled table was open at steady-state on dead RS region = disabledRegions.remove(0); regionsThatShouldBeOffline.add(region); ZKAssign.createNodeOffline(zkw, region, deadServerName); ProtobufUtil.openRegion(hrsDead.getRSRpcServices(), hrsDead.getServerName(), region); while (true) { byte[] bytes = ZKAssign.getData(zkw, region.getEncodedName()); RegionTransition rt = RegionTransition.parseFrom(bytes); if (rt != null && rt.getEventType().equals(EventType.RS_ZK_REGION_OPENED)) { ZKAssign.deleteOpenedNode(zkw, region.getEncodedName(), rt.getServerName()); break; } Thread.sleep(100); } LOG.debug("\n\nRegion of disabled table was open at steady-state on dead RS" + "\n" + region + "\n\n"); /* * DONE MOCKING */ log("Done mocking data up in ZK"); // Kill the RS that had a hard death log("Killing RS " + deadServerName); hrsDead.abort("Killing for unit test"); log("RS " + deadServerName + " killed"); // Start up a new master. Wait until regionserver is completely down // before starting new master because of hbase-4511. while (hrsDeadThread.isAlive()) { Threads.sleep(10); } log("Starting up a new master"); master = cluster.startMaster().getMaster(); log("Waiting for master to be ready"); assertTrue(cluster.waitForActiveAndReadyMaster()); log("Master is ready"); // Wait until SSH processing completed for dead server. while (master.getServerManager().areDeadServersInProgress()) { Thread.sleep(10); } // Failover should be completed, now wait for no RIT log("Waiting for no more RIT"); ZKAssign.blockUntilNoRIT(zkw); log("No more RIT in ZK"); long now = System.currentTimeMillis(); long maxTime = 120000; boolean done = master.assignmentManager.waitUntilNoRegionsInTransition(maxTime); if (!done) { RegionStates regionStates = master.getAssignmentManager().getRegionStates(); LOG.info("rit=" + regionStates.getRegionsInTransition()); } long elapsed = System.currentTimeMillis() - now; assertTrue("Elapsed=" + elapsed + ", maxTime=" + maxTime + ", done=" + done, elapsed < maxTime); log("No more RIT in RIT map, doing final test verification"); // Grab all the regions that are online across RSs Set<HRegionInfo> onlineRegions = new TreeSet<HRegionInfo>(); now = System.currentTimeMillis(); maxTime = 30000; for (JVMClusterUtil.RegionServerThread rst : cluster.getRegionServerThreads()) { try { HRegionServer rs = rst.getRegionServer(); while (!rs.getRegionsInTransitionInRS().isEmpty()) { elapsed = System.currentTimeMillis() - now; assertTrue("Test timed out in getting online regions", elapsed < maxTime); if (rs.isAborted() || rs.isStopped()) { // This region server is stopped, skip it. break; } Thread.sleep(100); } onlineRegions.addAll(ProtobufUtil.getOnlineRegions(rs.getRSRpcServices())); } catch (RegionServerStoppedException e) { LOG.info("Got RegionServerStoppedException", e); } } // Now, everything that should be online should be online for (HRegionInfo hri : regionsThatShouldBeOnline) { assertTrue("region=" + hri.getRegionNameAsString() + ", " + onlineRegions.toString(), onlineRegions.contains(hri)); } // Everything that should be offline should not be online for (HRegionInfo hri : regionsThatShouldBeOffline) { assertFalse(onlineRegions.contains(hri)); } log("Done with verification, all passed, shutting down cluster"); // Done, shutdown the cluster TEST_UTIL.shutdownMiniCluster(); }
From source file:org.dspace.harvest.OAIHarvester.java
/** * Performs a harvest cycle on this collection. This will query the remote OAI-PMH provider, check for updates since last * harvest, and ingest the returned items. * * @throws IOException// w ww .j av a2 s . c o m * A general class of exceptions produced by failed or interrupted I/O operations. * @throws SQLException * An exception that provides information on a database access error or other errors. * @throws AuthorizeException * Exception indicating the current user of the context does not have permission * to perform a particular action. */ public void runHarvest() throws SQLException, IOException, AuthorizeException { Context.Mode originalMode = ourContext.getCurrentMode(); ourContext.setMode(Context.Mode.BATCH_EDIT); // figure out the relevant parameters String oaiSource = harvestRow.getOaiSource(); String oaiSetId = harvestRow.getOaiSetId(); //If we have all selected then make sure that we do not include a set filter if ("all".equals(oaiSetId)) { oaiSetId = null; } Date lastHarvestDate = harvestRow.getHarvestDate(); String fromDate = null; if (lastHarvestDate != null) { fromDate = processDate(harvestRow.getHarvestDate()); } long totalListSize = 0; long currentRecord = 0; Date startTime = new Date(); String toDate = processDate(startTime, 0); String dateGranularity; try { // obtain the desired descriptive metadata format and verify that the OAI server actually provides it // do the same thing for ORE, which should be encoded in Atom and carry its namespace String descMDPrefix = null; String OREPrefix; try { dateGranularity = oaiGetDateGranularity(oaiSource); if (fromDate != null) { fromDate = fromDate.substring(0, dateGranularity.length()); } toDate = toDate.substring(0, dateGranularity.length()); descMDPrefix = oaiResolveNamespaceToPrefix(oaiSource, metadataNS.getURI()); OREPrefix = oaiResolveNamespaceToPrefix(oaiSource, ORESerialNS.getURI()); } catch (FileNotFoundException fe) { log.error("The OAI server did not respond."); throw new HarvestingException("The OAI server did not respond.", fe); } catch (ConnectException fe) { log.error("The OAI server did not respond."); throw new HarvestingException("The OAI server did not respond.", fe); } if (descMDPrefix == null) { log.error("The OAI server does not support this metadata format"); throw new HarvestingException( "The OAI server does not support this metadata format: " + metadataNS.getURI()); } if (OREPrefix == null && harvestRow.getHarvestType() != HarvestedCollection.TYPE_DMD) { throw new HarvestingException( "The OAI server does not support ORE dissemination in the configured serialization format: " + ORESerialNS.getURI()); } Document oaiResponse = null; Element root = null; String resumptionToken; // set the status indicating the collection is currently being processed harvestRow.setHarvestStatus(HarvestedCollection.STATUS_BUSY); harvestRow.setHarvestMessage("Collection harvesting is initializing..."); harvestRow.setHarvestStartTime(startTime); harvestedCollection.update(ourContext, harvestRow); intermediateCommit(); // expiration timer starts int expirationInterval = configurationService.getIntProperty("oai.harvester.threadTimeout"); if (expirationInterval == 0) { expirationInterval = 24; } Calendar calendar = Calendar.getInstance(); calendar.setTime(startTime); calendar.add(Calendar.HOUR, expirationInterval); Date expirationTime = calendar.getTime(); // main loop to keep requesting more objects until we're done List<Element> records; Set<String> errorSet = new HashSet<String>(); ListRecords listRecords = new ListRecords(oaiSource, fromDate, toDate, oaiSetId, descMDPrefix); log.debug("Harvesting request parameters: listRecords " + oaiSource + " " + fromDate + " " + toDate + " " + oaiSetId + " " + descMDPrefix); if (listRecords != null) { log.info("HTTP Request: " + listRecords.getRequestURL()); } while (listRecords != null) { records = new ArrayList<Element>(); oaiResponse = db.build(listRecords.getDocument()); if (listRecords.getErrors() != null && listRecords.getErrors().getLength() > 0) { for (int i = 0; i < listRecords.getErrors().getLength(); i++) { String errorCode = listRecords.getErrors().item(i).getAttributes().getNamedItem("code") .getTextContent(); errorSet.add(errorCode); } if (errorSet.contains("noRecordsMatch")) { log.info("noRecordsMatch: OAI server did not contain any updates"); harvestRow.setHarvestStartTime(new Date()); harvestRow.setHarvestMessage("OAI server did not contain any updates"); harvestRow.setHarvestStatus(HarvestedCollection.STATUS_READY); harvestedCollection.update(ourContext, harvestRow); return; } else { throw new HarvestingException(errorSet.toString()); } } else { root = oaiResponse.getRootElement(); records.addAll(root.getChild("ListRecords", OAI_NS).getChildren("record", OAI_NS)); Element resumptionElement = root.getChild("ListRecords", OAI_NS).getChild("resumptionToken", OAI_NS); if (resumptionElement != null && resumptionElement.getAttribute("completeListSize") != null) { String value = resumptionElement.getAttribute("completeListSize").getValue(); if (StringUtils.isNotBlank(value)) { totalListSize = Long.parseLong(value); } } } // Process the obtained records if (records != null && records.size() > 0) { log.info("Found " + records.size() + " records to process"); for (Element record : records) { // check for STOP interrupt from the scheduler if (HarvestScheduler.getInterrupt() == HarvestScheduler.HARVESTER_INTERRUPT_STOP) { throw new HarvestingException("Harvest process for " + targetCollection.getID() + " interrupted by stopping the scheduler."); } // check for timeout if (expirationTime.before(new Date())) { throw new HarvestingException( "runHarvest method timed out for collection " + targetCollection.getID()); } currentRecord++; processRecord(record, OREPrefix, currentRecord, totalListSize); ourContext.dispatchEvents(); intermediateCommit(); } } // keep going if there are more records to process resumptionToken = listRecords.getResumptionToken(); if (resumptionToken == null || resumptionToken.length() == 0) { listRecords = null; } else { listRecords = new ListRecords(oaiSource, resumptionToken); } ourContext.turnOffAuthorisationSystem(); try { collectionService.update(ourContext, targetCollection); harvestRow.setHarvestMessage( String.format("Collection is currently being harvested (item %d of %d)", currentRecord, totalListSize)); harvestedCollection.update(ourContext, harvestRow); } finally { //In case of an exception, make sure to restore our authentication state to the previous state ourContext.restoreAuthSystemState(); } ourContext.dispatchEvents(); intermediateCommit(); } } catch (HarvestingException hex) { log.error("Harvesting error occurred while processing an OAI record: " + hex.getMessage(), hex); harvestRow.setHarvestMessage("Error occurred while processing an OAI record"); // if the last status is also an error, alert the admin if (harvestRow.getHarvestMessage().contains("Error")) { alertAdmin(HarvestedCollection.STATUS_OAI_ERROR, hex); } harvestRow.setHarvestStatus(HarvestedCollection.STATUS_OAI_ERROR); harvestedCollection.update(ourContext, harvestRow); ourContext.complete(); return; } catch (Exception ex) { harvestRow.setHarvestMessage("Unknown error occurred while generating an OAI response"); harvestRow.setHarvestStatus(HarvestedCollection.STATUS_UNKNOWN_ERROR); harvestedCollection.update(ourContext, harvestRow); alertAdmin(HarvestedCollection.STATUS_UNKNOWN_ERROR, ex); log.error("Error occurred while generating an OAI response: " + ex.getMessage() + " " + ex.getCause(), ex); ourContext.complete(); return; } finally { harvestedCollection.update(ourContext, harvestRow); ourContext.turnOffAuthorisationSystem(); collectionService.update(ourContext, targetCollection); ourContext.restoreAuthSystemState(); } // If we got to this point, it means the harvest was completely successful Date finishTime = new Date(); long timeTaken = finishTime.getTime() - startTime.getTime(); harvestRow.setHarvestStartTime(startTime); harvestRow.setHarvestMessage("Harvest from " + oaiSource + " successful"); harvestRow.setHarvestStatus(HarvestedCollection.STATUS_READY); log.info("Harvest from " + oaiSource + " successful. The process took " + timeTaken + " milliseconds. Harvested " + currentRecord + " items."); harvestedCollection.update(ourContext, harvestRow); ourContext.setMode(originalMode); }
From source file:org.etudes.component.app.jforum.JforumDataServiceImpl.java
/** * creates topic//from w ww. j a va2 s . co m * * @param toForum topic forum * * @param fromTopic Existing topic * * @param toContextId To context of site * * @return The newly created topic */ protected Topic createTopic(Forum toForum, Topic fromTopic, String toContextId) { if (toForum == null || fromTopic == null) { return null; } org.etudes.api.app.jforum.Topic toTopic = jforumPostService.newTopic(); toTopic.setType(fromTopic.getType()); toTopic.setForumId(toForum.getId()); toTopic.setTitle(fromTopic.getTitle()); toTopic.setExportTopic(Boolean.TRUE); toTopic.setStatus(fromTopic.getStatus()); toTopic.getAccessDates().setOpenDate(fromTopic.getAccessDates().getOpenDate()); // hide until open if (fromTopic.getAccessDates().getOpenDate() != null) { toTopic.getAccessDates().setHideUntilOpen(fromTopic.getAccessDates().isHideUntilOpen()); } toTopic.getAccessDates().setDueDate(fromTopic.getAccessDates().getDueDate()); /*if (toTopic.getAccessDates().getDueDate() != null) { toTopic.getAccessDates().setLocked(fromTopic.getAccessDates().isLocked()); }*/ // allow until date toTopic.getAccessDates().setAllowUntilDate(fromTopic.getAccessDates().getAllowUntilDate()); if (toForum.getGradeType() == Grade.GradeType.TOPIC.getType()) { addGradeToTopic(fromTopic, toTopic); } else { /*if topic is gradable and forum is not grade by topic or not gradable make the topic grade by topic*/ if (fromTopic.isGradeTopic()) { Category category = toForum.getCategory(); if (toForum.getGradeType() == Grade.GradeType.FORUM.getType()) { toTopic.setGradeTopic(false); } else if (!category.isGradable()) { // modify forum to be grade by topic forum try { toForum.setGradeType(Grade.GradeType.TOPIC.getType()); toForum.setModifiedBySakaiUserId(UserDirectoryService.getCurrentUser().getId()); jforumForumService.modifyForum(toForum); addGradeToTopic(fromTopic, toTopic); } catch (JForumAccessException e) { toTopic.setGradeTopic(false); } catch (JForumGradesModificationException e) { toTopic.setGradeTopic(false); } } else { toTopic.setGradeTopic(false); } } else { toTopic.setGradeTopic(false); } } User postedBy = jforumUserService.getBySakaiUserId(UserDirectoryService.getCurrentUser().getId()); toTopic.setPostedBy(postedBy); Post post = jforumPostService.newPost(); Post fromTopicFirstPost = jforumPostService.getPost(fromTopic.getFirstPostId()); post.setSubject(fromTopicFirstPost.getSubject()); post.setBbCodeEnabled(fromTopicFirstPost.isBbCodeEnabled()); post.setSmiliesEnabled(fromTopicFirstPost.isSmiliesEnabled()); post.setSignatureEnabled(fromTopicFirstPost.isSignatureEnabled()); post.setPostedBy(postedBy); //post.setHtmlEnabled(fromTopicFirstPost.isHtmlEnabled()); String postText = fromTopicFirstPost.getText(); // harvest post text embedded references Set<String> refs = XrefHelper.harvestEmbeddedReferences(postText, null); if (logger.isDebugEnabled()) logger.debug("processTopic(): embed references found:" + refs.toString()); if (!refs.isEmpty()) { List<Translation> translations = XrefHelper.importTranslateResources(refs, toContextId, "Jforum"); postText = XrefHelper.translateEmbeddedReferences(postText, translations, toContextId); } // clean html String cleanedPostText = HtmlHelper.clean(postText, true); post.setText(cleanedPostText); // post attachments if (fromTopicFirstPost.hasAttachments()) { List<Attachment> fromTopicFirstPostAttachments = fromTopicFirstPost.getAttachments(); for (Attachment postAttachment : fromTopicFirstPostAttachments) { String fileName = null; String contentType = null; String comments = null; byte[] fileContent = null; fileName = postAttachment.getInfo().getRealFilename(); contentType = postAttachment.getInfo().getMimetype(); comments = postAttachment.getInfo().getComment(); String attachmentStoreDir = ServerConfigurationService.getString(ATTACHMENTS_STORE_DIR); if (attachmentStoreDir == null || attachmentStoreDir.trim().length() == 0) { if (logger.isWarnEnabled()) { logger.warn("JForum attachments directory (" + ATTACHMENTS_STORE_DIR + ") property is not set in sakai.properties "); } } else { String path = attachmentStoreDir + "/" + postAttachment.getInfo().getPhysicalFilename(); File attachmentFile = new File(path); try { fileContent = getBytesFromFile(attachmentFile); } catch (IOException e) { if (logger.isWarnEnabled()) { logger.warn(e.toString(), e); } continue; } Attachment attachment = jforumPostService.newAttachment(fileName, contentType, comments, fileContent); if (attachment != null) { post.getAttachments().add(attachment); post.setHasAttachments(Boolean.TRUE); } } } } toTopic.getPosts().clear(); toTopic.getPosts().add(post); try { jforumPostService.createTopic(toTopic); } catch (JForumAccessException e) { if (logger.isWarnEnabled()) { logger.warn(e.toString(), e); } } return toTopic; }
From source file:io.hops.hopsworks.api.zeppelin.socket.NotebookServerImpl.java
void permissionError(Session conn, String op, String userName, Set<String> userAndRoles, Set<String> allowed, Users user) throws IOException { LOG.log(Level.INFO, "Cannot {0}. Connection readers {1}. Allowed readers {2}", new Object[] { op, userAndRoles, allowed }); sendMsg(conn,// w ww . j a v a2 s .com serializeMessage(new Message(Message.OP.AUTH_INFO).put("info", "Insufficient privileges to " + op + "note.\n\n" + "Allowed users or roles: " + allowed.toString() + "\n\n" + "But the user " + user.getLname() + " belongs to: " + userAndRoles.toString()))); }
From source file:org.kuali.rice.krad.lookup.LookupableImpl.java
/** * Invoked to perform validation on the search criteria before the search is performed. * * <li>Check required criteria have a value</li> * <li>Check that criteria data type supports wildcards/operators</li> * <li>Check that wildcards/operators are not used on a secure criteria</li> * <li>Display info message when wildcards/operators are disabled</li> * <li>Throw exception when invalid criteria are specified</li> * * @param form lookup form instance containing the lookup data * @param searchCriteria map of criteria where key is search property name and value is * search value (which can include wildcards) * @return boolean true if validation was successful, false if there were errors and the search * should not be performed//from w w w . java2 s . c o m */ protected boolean validateSearchParameters(LookupForm form, Map<String, String> searchCriteria) { boolean valid = true; if (searchCriteria == null) { return valid; } // The form view can't be relied upon since the complete lifecycle hasn't ran against it. Instead // the viewPostMetadata is being used for the validation. // If the view was not previously posted then it's impossible to validate the search parameters because // of the missing viewPostMetadata. When this happens we assume the search parameters are correct. // (Calling the search controller method directly without displaying the lookup first can cause // this situation.) if (form.getViewPostMetadata() == null) { return valid; } Set<String> unprocessedSearchCriteria = new HashSet<String>(searchCriteria.keySet()); for (Map.Entry<String, Map<String, Object>> lookupCriteria : form.getViewPostMetadata().getLookupCriteria() .entrySet()) { String propertyName = lookupCriteria.getKey(); Map<String, Object> lookupCriteriaAttributes = lookupCriteria.getValue(); unprocessedSearchCriteria.remove(propertyName); if (isCriteriaRequired(lookupCriteriaAttributes) && StringUtils.isBlank(searchCriteria.get(propertyName))) { GlobalVariables.getMessageMap().putError(propertyName, RiceKeyConstants.ERROR_REQUIRED, getCriteriaLabel(form, (String) lookupCriteriaAttributes .get(UifConstants.LookupCriteriaPostMetadata.COMPONENT_ID))); } ValidCharactersConstraint constraint = getSearchCriteriaConstraint(lookupCriteriaAttributes); if (constraint != null) { validateSearchParameterConstraint(form, propertyName, lookupCriteriaAttributes, searchCriteria.get(propertyName), constraint); } if (searchCriteria.containsKey(propertyName)) { validateSearchParameterWildcardAndOperators(form, propertyName, lookupCriteriaAttributes, searchCriteria.get(propertyName)); } } if (!unprocessedSearchCriteria.isEmpty()) { throw new RuntimeException( "Invalid search value sent for property name(s): " + unprocessedSearchCriteria.toString()); } if (GlobalVariables.getMessageMap().hasErrors()) { valid = false; } return valid; }
From source file:org.codehaus.mojo.jsimport.AbstractImportMojo.java
/** * Build up the dependency graph and global symbol table by parsing the project's dependencies. * /*from w ww . j a v a2s.co m*/ * @param scope compile or test. * @param fileDependencyGraphModificationTime the time that the dependency graph was updated. Used for file time * comparisons to check the age of them. * @param processedFiles an insert-ordered set of files that have been processed. * @param targetFolder Where the target files live. * @param workFolder Where we can create some long lived information that may be useful to subsequent builds. * @param compileWorkFolder Ditto but in the case of testing it points to where the compile working folder is. * @return true if the dependency graph has been updated. * @throws MojoExecutionException if something bad happens. */ private boolean buildDependencyGraphForDependencies(Scope scope, long fileDependencyGraphModificationTime, LinkedHashSet<File> processedFiles, File targetFolder, File workFolder, File compileWorkFolder) throws MojoExecutionException { File targetJsFolder = new File(targetFolder, "js"); boolean fileDependencyGraphUpdated = false; // Determine how we need to filter things both for direct filtering and transitive filtering. String scopeStr = (scope == Scope.COMPILE ? Artifact.SCOPE_COMPILE : Artifact.SCOPE_TEST); AndArtifactFilter jsArtifactFilter = new AndArtifactFilter(); jsArtifactFilter.add(new ScopeArtifactFilter(scopeStr)); jsArtifactFilter.add(new TypeArtifactFilter("js")); AndArtifactFilter wwwZipArtifactFilter = new AndArtifactFilter(); wwwZipArtifactFilter.add(new ScopeArtifactFilter(scopeStr)); wwwZipArtifactFilter.add(new TypeArtifactFilter("zip")); wwwZipArtifactFilter.add(new ArtifactFilter() { public boolean include(Artifact artifact) { return artifact.hasClassifier() && artifact.getClassifier().equals("www"); } }); // Determine the artifacts to resolve and associate their transitive dependencies. Map<Artifact, LinkedHashSet<Artifact>> directArtifactWithTransitives = new HashMap<Artifact, LinkedHashSet<Artifact>>( dependencies.size()); Set<Artifact> directArtifacts = new HashSet<Artifact>(dependencies.size()); LinkedHashSet<Artifact> transitiveArtifacts = new LinkedHashSet<Artifact>(); for (Dependency dependency : dependencies) { // Process imports and symbols of this dependencies' transitives // first. Artifact directArtifact = artifactFactory.createDependencyArtifact(dependency.getGroupId(), dependency.getArtifactId(), VersionRange.createFromVersion(dependency.getVersion()), dependency.getType(), dependency.getClassifier(), dependency.getScope()); if (!jsArtifactFilter.include(directArtifact) && !wwwZipArtifactFilter.include(directArtifact)) { continue; } Set<Artifact> artifactsToResolve = new HashSet<Artifact>(1); artifactsToResolve.add(directArtifact); ArtifactResolutionResult result; try { result = resolver.resolveTransitively(artifactsToResolve, project.getArtifact(), remoteRepositories, localRepository, artifactMetadataSource); } catch (ArtifactResolutionException e) { throw new MojoExecutionException("Problem resolving dependencies", e); } catch (ArtifactNotFoundException e) { throw new MojoExecutionException("Problem resolving dependencies", e); } // Associate the transitive dependencies with the direct dependency and aggregate all transitives for // collection later. LinkedHashSet<Artifact> directTransitiveArtifacts = new LinkedHashSet<Artifact>( result.getArtifacts().size()); for (Object o : result.getArtifacts()) { Artifact resolvedArtifact = (Artifact) o; if (jsArtifactFilter.include(resolvedArtifact) && // !resolvedArtifact.equals(directArtifact)) { directTransitiveArtifacts.add(resolvedArtifact); } } directArtifacts.add(directArtifact); transitiveArtifacts.addAll(directTransitiveArtifacts); directArtifactWithTransitives.put(directArtifact, directTransitiveArtifacts); } // Resolve the best versions of the transitives to use by asking Maven to collect them. Set<Artifact> collectedArtifacts = new HashSet<Artifact>( directArtifacts.size() + transitiveArtifacts.size()); Map<ArtifactId, Artifact> indexedCollectedDependencies = new HashMap<ArtifactId, Artifact>( collectedArtifacts.size()); try { // Note that we must pass an insert-order set into the collector. The collector appears to assume that order // is significant, even though it is undocumented. LinkedHashSet<Artifact> collectableArtifacts = new LinkedHashSet<Artifact>(directArtifacts); collectableArtifacts.addAll(transitiveArtifacts); ArtifactResolutionResult resolutionResult = artifactCollector.collect(collectableArtifacts, project.getArtifact(), localRepository, remoteRepositories, artifactMetadataSource, null, // Collections.EMPTY_LIST); for (Object o : resolutionResult.getArtifacts()) { Artifact collectedArtifact = (Artifact) o; collectedArtifacts.add(collectedArtifact); // Build up an index of of collected transitive dependencies so that we can we refer back to them as we // process the direct dependencies. ArtifactId collectedArtifactId = new ArtifactId(collectedArtifact.getGroupId(), collectedArtifact.getArtifactId()); indexedCollectedDependencies.put(collectedArtifactId, collectedArtifact); } if (getLog().isDebugEnabled()) { getLog().debug("Dependencies collected: " + collectedArtifacts.toString()); } } catch (ArtifactResolutionException e) { throw new MojoExecutionException("Cannot collect dependencies", e); } // Now go through direct artifacts and process their transitives. LocalRepositoryCollector localRepositoryCollector = new LocalRepositoryCollector(project, localRepository, new File[] {}); for (Entry<Artifact, LinkedHashSet<Artifact>> entry : directArtifactWithTransitives.entrySet()) { Artifact directArtifact = entry.getKey(); LinkedHashSet<Artifact> directArtifactTransitives = entry.getValue(); LinkedHashSet<String> transitivesAsImports = new LinkedHashSet<String>( directArtifactTransitives.size()); for (Object o : directArtifactTransitives) { Artifact directTransitiveArtifact = (Artifact) o; // Get the transitive artifact that Maven decided was the best to use. ArtifactId directTransitiveArtifactId = new ArtifactId(directTransitiveArtifact.getGroupId(), directTransitiveArtifact.getArtifactId()); Artifact transitiveArtifact = indexedCollectedDependencies.get(directTransitiveArtifactId); List<File> transitiveArtifactFiles = getArtifactFiles(transitiveArtifact, targetFolder, workFolder, compileWorkFolder, localRepositoryCollector); // Only process this dependency if we've not done so // already. for (File transitiveArtifactFile : transitiveArtifactFiles) { if (!processedFiles.contains(transitiveArtifactFile)) { String localRepository = localRepositoryCollector .findLocalRepository(transitiveArtifactFile.getAbsolutePath()); if (localRepository != null) { if (processFileForImportsAndSymbols(new File(localRepository), targetJsFolder, transitiveArtifactFile, fileDependencyGraphModificationTime, directArtifactTransitives)) { processedFiles.add(transitiveArtifactFile); fileDependencyGraphUpdated = true; } } else { throw new MojoExecutionException( "Problem determining local repository for transitive file: " + transitiveArtifactFile); } } // Add transitives to the artifacts set of dependencies - // as if they were @import statements themselves. transitivesAsImports.add(transitiveArtifactFile.getPath()); } } // Now deal with the pom specified dependency. List<File> artifactFiles = getArtifactFiles(directArtifact, targetFolder, workFolder, compileWorkFolder, localRepositoryCollector); for (File artifactFile : artifactFiles) { String artifactPath = artifactFile.getAbsolutePath(); // Process imports and symbols of this dependency if we've not // already done so. if (!processedFiles.contains(artifactFile)) { String localRepository = localRepositoryCollector .findLocalRepository(artifactFile.getAbsolutePath()); if (localRepository != null) { if (processFileForImportsAndSymbols(new File(localRepository), targetJsFolder, artifactFile, fileDependencyGraphModificationTime, null)) { processedFiles.add(artifactFile); fileDependencyGraphUpdated = true; } } else { throw new MojoExecutionException( "Problem determining local repository for file: " + artifactFile); } } // Add in our transitives to the dependency graph if they're not // already there. LinkedHashSet<String> existingImports = fileDependencies.get(artifactPath); if (existingImports.addAll(transitivesAsImports)) { if (getLog().isDebugEnabled()) { getLog().debug("Using transitives as import: " + transitivesAsImports + " for file: " + artifactPath); } fileDependencyGraphUpdated = true; } } } return fileDependencyGraphUpdated; }