List of usage examples for java.util HashMap keySet
public Set<K> keySet()
From source file:at.treedb.util.SevenZip.java
public static void main(String args[]) throws IOException { System.out.println("Extract"); HashMap<String, byte[]> map = exctact(new File("c:/TreeDBdata/domains/ZooDB.7z"), "info.xml", "classes/"); for (String s : map.keySet()) { if (s.startsWith("classes/")) { String className = s.substring("classes/".length(), s.lastIndexOf(".class")).replace("/", "."); System.out.println(className); }// ww w . j a va2 s. com } }
From source file:TwitterClustering.java
public static void main(String[] args) throws FileNotFoundException, IOException { // TODO code application logic here File outFile = new File(args[3]); Scanner s = new Scanner(new File(args[1])).useDelimiter(","); JSONParser parser = new JSONParser(); Set<Cluster> clusterSet = new HashSet<Cluster>(); HashMap<String, Tweet> tweets = new HashMap(); FileWriter fw = new FileWriter(outFile.getAbsoluteFile()); BufferedWriter bw = new BufferedWriter(fw); // init/* w ww . ja va 2 s . c o m*/ try { Object obj = parser.parse(new FileReader(args[2])); JSONArray jsonArray = (JSONArray) obj; for (int i = 0; i < jsonArray.size(); i++) { Tweet twt = new Tweet(); JSONObject jObj = (JSONObject) jsonArray.get(i); String text = jObj.get("text").toString(); long sum = 0; for (int y = 0; y < text.toCharArray().length; y++) { sum += (int) text.toCharArray()[y]; } String[] token = text.split(" "); String tID = jObj.get("id").toString(); Set<String> mySet = new HashSet<String>(Arrays.asList(token)); twt.setAttributeValue(sum); twt.setText(mySet); twt.setTweetID(tID); tweets.put(tID, twt); } // preparing initial clusters int i = 0; while (s.hasNext()) { String id = s.next();// id Tweet t = tweets.get(id.trim()); clusterSet.add(new Cluster(i + 1, t, new LinkedList())); i++; } Iterator it = tweets.entrySet().iterator(); for (int l = 0; l < 2; l++) { // limit to 25 iterations while (it.hasNext()) { Map.Entry me = (Map.Entry) it.next(); // calculate distance to each centroid Tweet p = (Tweet) me.getValue(); HashMap<Cluster, Float> distMap = new HashMap(); for (Cluster clust : clusterSet) { distMap.put(clust, jaccardDistance(p.getText(), clust.getCentroid().getText())); } HashMap<Cluster, Float> sorted = (HashMap<Cluster, Float>) sortByValue(distMap); sorted.keySet().iterator().next().getMembers().add(p); } // calculate new centroid and update Clusterset for (Cluster clust : clusterSet) { TreeMap<String, Long> tDistMap = new TreeMap(); Tweet newCentroid = null; Long avgSumDist = new Long(0); for (int j = 0; j < clust.getMembers().size(); j++) { avgSumDist += clust.getMembers().get(j).getAttributeValue(); tDistMap.put(clust.getMembers().get(j).getTweetID(), clust.getMembers().get(j).getAttributeValue()); } if (clust.getMembers().size() != 0) { avgSumDist /= (clust.getMembers().size()); } ArrayList<Long> listValues = new ArrayList<Long>(tDistMap.values()); if (tDistMap.containsValue(findClosestNumber(listValues, avgSumDist))) { // found closest newCentroid = tweets .get(getKeyByValue(tDistMap, findClosestNumber(listValues, avgSumDist))); clust.setCentroid(newCentroid); } } } // create an iterator Iterator iterator = clusterSet.iterator(); // check values while (iterator.hasNext()) { Cluster c = (Cluster) iterator.next(); bw.write(c.getId() + "\t"); System.out.print(c.getId() + "\t"); for (Tweet t : c.getMembers()) { bw.write(t.getTweetID() + ", "); System.out.print(t.getTweetID() + ","); } bw.write("\n"); System.out.println(""); } System.out.println(""); System.out.println("SSE " + sumSquaredErrror(clusterSet)); } catch (Exception e) { e.printStackTrace(); } finally { bw.close(); fw.close(); } }
From source file:cooccurrence.emf.java
public static void main(String args[]) { String path = ""; String writePath = ""; BufferedReader br = null;//from ww w .j a v a 2 s.c o m ArrayList<String> files = new ArrayList<>(); listFilesForFolder(new File(path), files); for (String filePath : files) { System.out.println(filePath); String fileName = new File(filePath).getName(); //data structure to store the PPMI matrix in the file HashMap<String, HashMap<String, Double>> cooccur = new HashMap<>(); readFileContents(filePath, cooccur); //reading the file and storing the content in the hashmap //Because Matrices are identified by row and col id, the following //lists maps id to corresponding string. Note that matrix is symmetric. ArrayList<String> rowStrings = new ArrayList<>(cooccur.keySet()); ArrayList<String> colStrings = new ArrayList<>(cooccur.keySet()); //creating matrix with given dimensions and initializing it to 0 RealMatrix matrixR = MatrixUtils.createRealMatrix(rowStrings.size(), colStrings.size()); //populating the matrices based on the co-occur hashmap populateMatrixR(matrixR, cooccur, rowStrings, colStrings); } }
From source file:de.tudarmstadt.ukp.experiments.dip.wp1.documents.Step8GoldDataAggregator.java
public static void main(String[] args) throws Exception { String inputDir = args[0] + "/"; // output dir File outputDir = new File(args[1]); File turkersConfidence = new File(args[2]); if (outputDir.exists()) { outputDir.delete();// w w w . j a v a 2 s .c om } outputDir.mkdir(); List<String> annotatorsIDs = new ArrayList<>(); // for (File f : FileUtils.listFiles(new File(inputDir), new String[] { "xml" }, false)) { // QueryResultContainer queryResultContainer = QueryResultContainer // .fromXML(FileUtils.readFileToString(f, "utf-8")); // for (QueryResultContainer.SingleRankedResult rankedResults : queryResultContainer.rankedResults) { // for (QueryResultContainer.MTurkRelevanceVote relevanceVote : rankedResults.mTurkRelevanceVotes) { // if (!annotatorsIDs.contains(relevanceVote.turkID)) // annotatorsIDs.add(relevanceVote.turkID); // } // } // } HashMap<String, Integer> countVotesForATurker = new HashMap<>(); // creates temporary file with format for mace // Hashmap annotations: key is the id of a document and a sentence // Value is an array votes[] of turkers decisions: true or false (relevant or not) // the length of this array equals the number of annotators in List<String> annotatorsIDs. // If an annotator worked on the task his decision is written in the array otherwise the value is NULL // key: queryID + clueWebID + sentenceID // value: true and false annotations TreeMap<String, Annotations> annotations = new TreeMap<>(); for (File f : FileUtils.listFiles(new File(inputDir), new String[] { "xml" }, false)) { QueryResultContainer queryResultContainer = QueryResultContainer .fromXML(FileUtils.readFileToString(f, "utf-8")); System.out.println("Reading " + f.getName()); for (QueryResultContainer.SingleRankedResult rankedResults : queryResultContainer.rankedResults) { String documentID = rankedResults.clueWebID; for (QueryResultContainer.MTurkRelevanceVote relevanceVote : rankedResults.mTurkRelevanceVotes) { Integer turkerID; if (!annotatorsIDs.contains(relevanceVote.turkID)) { annotatorsIDs.add(relevanceVote.turkID); turkerID = annotatorsIDs.size() - 1; } else { turkerID = annotatorsIDs.indexOf(relevanceVote.turkID); } Integer count = countVotesForATurker.get(relevanceVote.turkID); if (count == null) { count = 0; } count++; countVotesForATurker.put(relevanceVote.turkID, count); String id; List<Integer> trueVotes; List<Integer> falseVotes; for (QueryResultContainer.SingleSentenceRelevanceVote singleSentenceRelevanceVote : relevanceVote.singleSentenceRelevanceVotes) if (!"".equals(singleSentenceRelevanceVote.sentenceID)) { id = f.getName() + "_" + documentID + "_" + singleSentenceRelevanceVote.sentenceID; Annotations turkerVotes = annotations.get(id); if (turkerVotes == null) { trueVotes = new ArrayList<>(); falseVotes = new ArrayList<>(); turkerVotes = new Annotations(trueVotes, falseVotes); } trueVotes = turkerVotes.trueAnnotations; falseVotes = turkerVotes.falseAnnotations; if ("true".equals(singleSentenceRelevanceVote.relevant)) { // votes[turkerID] = true; trueVotes.add(turkerID); } else if ("false".equals(singleSentenceRelevanceVote.relevant)) { // votes[turkerID] = false; falseVotes.add(turkerID); } else { throw new IllegalStateException("Annotation value of sentence " + singleSentenceRelevanceVote.sentenceID + " in " + rankedResults.clueWebID + " equals " + singleSentenceRelevanceVote.relevant); } try { int allVotesCount = trueVotes.size() + falseVotes.size(); if (allVotesCount > 5) { System.err.println(id + " doesn't have 5 annotators: true: " + trueVotes.size() + " false: " + falseVotes.size()); // nasty hack, we're gonna strip some data; true votes first /* we can't do that, it breaks something down the line int toRemove = allVotesCount - 5; if (trueVotes.size() >= toRemove) { trueVotes = trueVotes .subList(0, trueVotes.size() - toRemove); } else if ( falseVotes.size() >= toRemove) { falseVotes = falseVotes .subList(0, trueVotes.size() - toRemove); } */ System.err.println("Adjusted: " + id + " doesn't have 5 annotators: true: " + trueVotes.size() + " false: " + falseVotes.size()); } } catch (IllegalStateException e) { e.printStackTrace(); } turkerVotes.trueAnnotations = trueVotes; turkerVotes.falseAnnotations = falseVotes; annotations.put(id, turkerVotes); } else { throw new IllegalStateException( "Empty Sentence ID in " + f.getName() + " for turker " + turkerID); } } } } File tmp = printHashMap(annotations, annotatorsIDs.size()); String file = TEMP_DIR + "/" + tmp.getName(); MACE.main(new String[] { "--prefix", file }); //gets the keys of the documents and sentences ArrayList<String> lines = (ArrayList<String>) FileUtils.readLines(new File(file + ".prediction")); int i = 0; TreeMap<String, TreeMap<String, ArrayList<HashMap<String, String>>>> ids = new TreeMap<>(); ArrayList<HashMap<String, String>> sentences; if (lines.size() != annotations.size()) { throw new IllegalStateException( "The size of prediction file is " + lines.size() + "but expected " + annotations.size()); } for (Map.Entry entry : annotations.entrySet()) { //1001.xml_clueweb12-1905wb-13-07360_8783 String key = (String) entry.getKey(); String[] IDs = key.split("_"); if (IDs.length > 2) { String queryID = IDs[0]; String clueWebID = IDs[1]; String sentenceID = IDs[2]; TreeMap<String, ArrayList<HashMap<String, String>>> clueWebIDs = ids.get(queryID); if (clueWebIDs == null) { clueWebIDs = new TreeMap<>(); } sentences = clueWebIDs.get(clueWebID); if (sentences == null) { sentences = new ArrayList<>(); } HashMap<String, String> sentence = new HashMap<>(); sentence.put(sentenceID, lines.get(i)); sentences.add(sentence); clueWebIDs.put(clueWebID, sentences); ids.put(queryID, clueWebIDs); } else { throw new IllegalStateException("Wrong ID " + key); } i++; } for (Map.Entry entry : ids.entrySet()) { TreeMap<Integer, String> value = (TreeMap<Integer, String>) entry.getValue(); String queryID = (String) entry.getKey(); QueryResultContainer queryResultContainer = QueryResultContainer .fromXML(FileUtils.readFileToString(new File(inputDir, queryID), "utf-8")); for (QueryResultContainer.SingleRankedResult rankedResults : queryResultContainer.rankedResults) { for (Map.Entry val : value.entrySet()) { String clueWebID = (String) val.getKey(); if (clueWebID.equals(rankedResults.clueWebID)) { List<QueryResultContainer.SingleSentenceRelevanceVote> goldEstimatedLabels = new ArrayList<>(); List<QueryResultContainer.SingleSentenceRelevanceVote> turkersVotes = new ArrayList<>(); int size = 0; int hitSize = 0; String hitID = ""; for (QueryResultContainer.MTurkRelevanceVote vote : rankedResults.mTurkRelevanceVotes) { if (!hitID.equals(vote.hitID)) { hitID = vote.hitID; hitSize = vote.singleSentenceRelevanceVotes.size(); size = size + hitSize; turkersVotes.addAll(vote.singleSentenceRelevanceVotes); } else { if (vote.singleSentenceRelevanceVotes.size() != hitSize) { hitSize = vote.singleSentenceRelevanceVotes.size(); size = size + hitSize; turkersVotes.addAll(vote.singleSentenceRelevanceVotes); } } } ArrayList<HashMap<String, String>> sentenceList = (ArrayList<HashMap<String, String>>) val .getValue(); if (sentenceList.size() != turkersVotes.size()) { try { throw new IllegalStateException("Expected size of annotations is " + turkersVotes.size() + "but found " + sentenceList.size() + " for document " + rankedResults.clueWebID + " in " + queryID); } catch (IllegalStateException ex) { ex.printStackTrace(); } } for (QueryResultContainer.SingleSentenceRelevanceVote s : turkersVotes) { String valSentence = null; for (HashMap<String, String> anno : sentenceList) { if (anno.keySet().contains(s.sentenceID)) { valSentence = anno.get(s.sentenceID); } } QueryResultContainer.SingleSentenceRelevanceVote singleSentenceVote = new QueryResultContainer.SingleSentenceRelevanceVote(); singleSentenceVote.sentenceID = s.sentenceID; if (("false").equals(valSentence)) { singleSentenceVote.relevant = "false"; } else if (("true").equals(valSentence)) { singleSentenceVote.relevant = "true"; } else { throw new IllegalStateException("Annotation value of sentence " + singleSentenceVote.sentenceID + " equals " + val.getValue()); } goldEstimatedLabels.add(singleSentenceVote); } rankedResults.goldEstimatedLabels = goldEstimatedLabels; } } } File outputFile = new File(outputDir, queryID); FileUtils.writeStringToFile(outputFile, queryResultContainer.toXML(), "utf-8"); System.out.println("Finished " + outputFile); } ArrayList<String> annotators = (ArrayList<String>) FileUtils.readLines(new File(file + ".competence")); FileWriter fileWriter; StringBuilder sb = new StringBuilder(); for (int j = 0; j < annotatorsIDs.size(); j++) { String[] s = annotators.get(0).split("\t"); Float score = Float.parseFloat(s[j]); String turkerID = annotatorsIDs.get(j); System.out.println(turkerID + " " + score + " " + countVotesForATurker.get(turkerID)); sb.append(turkerID).append(" ").append(score).append(" ").append(countVotesForATurker.get(turkerID)) .append("\n"); } fileWriter = new FileWriter(turkersConfidence); fileWriter.append(sb.toString()); fileWriter.close(); }
From source file:cooccurrence.Omer_Levy.java
public static void main(String args[]) { String path = ""; String writePath = ""; BufferedReader br = null;// w ww .j av a2 s . c o m ArrayList<String> files = new ArrayList<>(); //reading all the files in the directory //each file is PPMI matrix for an year listFilesForFolder(new File(path), files); for (String filePath : files) { System.out.println(filePath); String fileName = new File(filePath).getName(); //data structure to store the PPMI matrix in the file HashMap<String, HashMap<String, Double>> cooccur = new HashMap<>(); readFileContents(filePath, cooccur); //reading the file and storing the content in the hashmap //Because Matrices are identified by row and col id, the following //lists maps id to corresponding string. Note that matrix is symmetric. ArrayList<String> rowStrings = new ArrayList<>(cooccur.keySet()); ArrayList<String> colStrings = new ArrayList<>(cooccur.keySet()); //creating matrix with given dimensions and initializing it to 0 RealMatrix matrixR = MatrixUtils.createRealMatrix(rowStrings.size(), colStrings.size()); //creating the matrices for storing top rank-d matrices of SVD RealMatrix matrixUd = MatrixUtils.createRealMatrix(D, D); RealMatrix matrixVd = MatrixUtils.createRealMatrix(D, D); RealMatrix coVarD = MatrixUtils.createRealMatrix(D, D); //populating the matrices based on the co-occur hashmap populateMatrixR(matrixR, cooccur, rowStrings, colStrings); //computing the svd SingularValueDecomposition svd = new SingularValueDecomposition(matrixR); //extracting the components of SVD factorization RealMatrix U = svd.getU(); RealMatrix V = svd.getV(); RealMatrix coVariance = svd.getCovariance(-1); //list to store indices of top-D singular values of coVar. //Use this with rowsString (colStrings) to get the corresponding word and context ArrayList<Integer> indicesD = new ArrayList<>(); //Extract topD singular value from covariance to store in coVarD and //extract corresponding columns from U and V to store in Ud and Vd getTopD(U, V, coVariance, matrixUd, matrixVd, coVarD, indicesD); //calulate the squareRoot of coVarD RealMatrix squareRootCoVarD = squareRoot(coVarD); RealMatrix W_svd = matrixUd.multiply(squareRootCoVarD); RealMatrix C_svd = matrixVd.multiply(squareRootCoVarD); } }
From source file:com.impetus.kundera.ycsb.benchmark.CouchDBNativeClient.java
public static void main(String[] args) { CouchDBNativeClient cli = new CouchDBNativeClient(); Properties props = new Properties(); props.setProperty("hosts", "localhost"); props.setProperty("port", "5984"); cli.setProperties(props);// w w w . j a va 2 s . co m try { cli.init(); } catch (Exception e) { e.printStackTrace(); System.exit(0); } HashMap<String, ByteIterator> vals = new HashMap<String, ByteIterator>(); vals.put("age", new StringByteIterator("57")); vals.put("middlename", new StringByteIterator("bradley")); vals.put("favoritecolor", new StringByteIterator("blue")); int res = cli.insert("usertable", "BrianFrankCooper", vals); System.out.println("Result of insert: " + res); HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>(); HashSet<String> fields = new HashSet<String>(); fields.add("middlename"); fields.add("age"); fields.add("favoritecolor"); res = cli.read("usertable", "BrianFrankCooper", null, result); System.out.println("Result of read: " + res); for (String s : result.keySet()) { System.out.println("[" + s + "]=[" + result.get(s) + "]"); } res = cli.delete("usertable", "BrianFrankCooper"); System.out.println("Result of delete: " + res); }
From source file:akori.AKORI.java
public static void main(String[] args) throws IOException, InterruptedException { System.out.println("esto es AKORI"); URL = "http://www.mbauchile.cl"; PATH = "E:\\NetBeansProjects\\AKORI\\"; NAME = "mbauchile.png"; // Extrar DOM tree Document doc = Jsoup.connect(URL).timeout(0).get(); // The Firefox driver supports javascript WebDriver driver = new FirefoxDriver(); driver.manage().window().maximize(); System.out.println(driver.manage().window().getSize().toString()); System.out.println(driver.manage().window().getPosition().toString()); int xmax = driver.manage().window().getSize().width; int ymax = driver.manage().window().getSize().height; // Go to the URL page driver.get(URL);//from ww w . j a va 2s. c o m File screen = ((TakesScreenshot) driver).getScreenshotAs(OutputType.FILE); FileUtils.copyFile(screen, new File(PATH + NAME)); BufferedImage img = ImageIO.read(new File(PATH + NAME)); //Graphics2D graph = img.createGraphics(); BufferedImage img1 = new BufferedImage(xmax, ymax, BufferedImage.TYPE_INT_ARGB); Graphics2D graph1 = img.createGraphics(); double[][] matrix = new double[ymax][xmax]; BufferedReader in = new BufferedReader(new FileReader("et.txt")); String linea; double max = 0; graph1.drawImage(img, 0, 0, null); HashMap<String, Integer> lista = new HashMap<String, Integer>(); int count = 0; for (int i = 0; (linea = in.readLine()) != null && i < 10000; ++i) { String[] datos = linea.split(","); int x = (int) Double.parseDouble(datos[0]); int y = (int) Double.parseDouble(datos[2]); long time = Double.valueOf(datos[4]).longValue(); if (x >= xmax || y >= ymax) continue; if (time < 691215) continue; if (time > 705648) break; if (lista.containsKey(x + "," + y)) lista.put(x + "," + y, lista.get(x + "," + y) + 1); else lista.put(x + "," + y, 1); ++count; } System.out.println(count); in.close(); Iterator iter = lista.entrySet().iterator(); Map.Entry e; for (String key : lista.keySet()) { Integer i = lista.get(key); if (max < i) max = i; } System.out.println(max); max = 0; while (iter.hasNext()) { e = (Map.Entry) iter.next(); String xy = (String) e.getKey(); String[] datos = xy.split(","); int x = Integer.parseInt(datos[0]); int y = Integer.parseInt(datos[1]); matrix[y][x] += (int) e.getValue(); double aux; if ((aux = normalMatrix(matrix, y, x, ((int) e.getValue()) * 4)) > max) { max = aux; } //normalMatrix(matrix,x,y,20); if (matrix[y][x] > max) max = matrix[y][x]; } int A, R, G, B, n; for (int i = 0; i < xmax; ++i) { for (int j = 0; j < ymax; ++j) { if (matrix[j][i] != 0) { n = (int) Math.round(matrix[j][i] * 100 / max); R = Math.round((255 * n) / 100); G = Math.round((255 * (100 - n)) / 100); B = 0; A = Math.round((255 * n) / 100); ; if (R > 255) R = 255; if (R < 0) R = 0; if (G > 255) G = 255; if (G < 0) G = 0; if (R < 50) A = 0; graph1.setColor(new Color(R, G, B, A)); graph1.fillOval(i, j, 1, 1); } } } //graph1.dispose(); ImageIO.write(img, "png", new File("example.png")); System.out.println(max); graph1.setColor(Color.RED); // Extraer elementos Elements e1 = doc.body().getAllElements(); int i = 1; ArrayList<String> tags = new ArrayList<String>(); for (Element temp : e1) { if (tags.indexOf(temp.tagName()) == -1) { tags.add(temp.tagName()); List<WebElement> query = driver.findElements(By.tagName(temp.tagName())); for (WebElement temp1 : query) { Point po = temp1.getLocation(); Dimension d = temp1.getSize(); if (d.width <= 0 || d.height <= 0 || po.x < 0 || po.y < 0) continue; System.out.println(i + " " + temp.nodeName()); System.out.println(" x: " + po.x + " y: " + po.y); System.out.println(" width: " + d.width + " height: " + d.height); graph1.draw(new Rectangle(po.x, po.y, d.width, d.height)); ++i; } } } graph1.dispose(); ImageIO.write(img, "png", new File(PATH + NAME)); driver.quit(); }
From source file:enrichment.Disambiguate.java
/**prerequisites: * cd silk_2.5.3/*_links//from www . j a v a2 s . c om * cat *.nt|sort -t' ' -k3 > $filename * * @param args $filename * @throws IOException * @throws URISyntaxException */ public static void main(String[] args) { File file = new File(args[0]); if (file.isDirectory()) { args = file.list(new OnlyExtFilenameFilter("nt")); } BufferedReader in; for (int q = 0; q < args.length; q++) { String filename = null; if (file.isDirectory()) { filename = file.getPath() + File.separator + args[q]; } else { filename = args[q]; } try { FileWriter output = new FileWriter(filename + "_disambiguated.nt"); String prefix = "@prefix rdrel: <http://rdvocab.info/RDARelationshipsWEMI/> .\n" + "@prefix dbpedia: <http://de.dbpedia.org/resource/> .\n" + "@prefix frbr: <http://purl.org/vocab/frbr/core#> .\n" + "@prefix lobid: <http://lobid.org/resource/> .\n" + "@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n" + "@prefix foaf: <http://xmlns.com/foaf/0.1/> .\n" + "@prefix mo: <http://purl.org/ontology/mo/> .\n" + "@prefix wikipedia: <https://de.wikipedia.org/wiki/> ."; output.append(prefix + "\n\n"); in = new BufferedReader(new InputStreamReader(new FileInputStream(filename))); HashMap<String, HashMap<String, ArrayList<String>>> hm = new HashMap<String, HashMap<String, ArrayList<String>>>(); String s; HashMap<String, ArrayList<String>> hmLobid = new HashMap<String, ArrayList<String>>(); Stack<String> old_object = new Stack<String>(); while ((s = in.readLine()) != null) { String[] triples = s.split(" "); String object = triples[2].substring(1, triples[2].length() - 1); if (old_object.size() > 0 && !old_object.firstElement().equals(object)) { hmLobid = new HashMap<String, ArrayList<String>>(); old_object = new Stack<String>(); } old_object.push(object); String subject = triples[0].substring(1, triples[0].length() - 1); System.out.print("\nSubject=" + object); System.out.print("\ntriples[2]=" + triples[2]); hmLobid.put(subject, getAllCreators(new URI(subject))); hm.put(object, hmLobid); } // get all dbpedia resources for (String key_one : hm.keySet()) { System.out.print("\n==============\n==== " + key_one + "\n==============="); int resources_cnt = hm.get(key_one).keySet().size(); ArrayList<String>[] creators = new ArrayList[resources_cnt]; HashMap<String, Integer> creators_backed = new HashMap<String, Integer>(); int x = 0; // get all lobid_resources subsumed under the dbpedia resource for (String subject_uri : hm.get(key_one).keySet()) { creators[x] = new ArrayList<String>(); System.out.print("\n subject_uri=" + subject_uri); Iterator<String> ite = hm.get(key_one).get(subject_uri).iterator(); int y = 0; // get all creators of the lobid resource while (ite.hasNext()) { String creator = ite.next(); System.out.print("\n " + creator); if (creators_backed.containsKey(creator)) { y = creators_backed.get(creator); } else { y = creators_backed.size(); creators_backed.put(creator, y); } while (creators[x].size() <= y) { creators[x].add("-"); } creators[x].set(y, creator); y++; } x++; } if (creators_backed.size() == 1) { System.out .println("\n" + "Every resource pointing to " + key_one + " has the same creator!"); for (String key_two : hm.get(key_one).keySet()) { output.append("<" + key_two + "> rdrel:workManifested <" + key_one + "> .\n"); output.append("<" + key_two + "> mo:wikipedia <" + key_one.replaceAll("dbpedia\\.org/resource", "wikipedia\\.org/wiki") + "> .\n"); } } /*else { for (int a = 0; a < creators.length; a++) { System.out.print(creators[a].toString()+","); } }*/ } output.flush(); if (output != null) { output.close(); } } catch (Exception e) { System.out.print("Exception while working on " + filename + ": \n"); e.printStackTrace(System.out); } } }
From source file:com.thed.zapi.cloud.sample.CycleExecutionReportByVersion.java
public static void main(String[] args) throws JSONException, URISyntaxException, ParseException, IOException { String API_GET_EXECUTIONS = "{SERVER}/public/rest/api/1.0/executions/search/cycle/"; String API_GET_CYCLES = "{SERVER}/public/rest/api/1.0/cycles/search?"; // Delimiter used in CSV file final String NEW_LINE_SEPARATOR = "\n"; final String fileName = "F:\\cycleExecutionReport.csv"; /** Declare JIRA,Zephyr URL,access and secret Keys */ // JIRA Cloud URL of the instance String jiraBaseURL = "https://demo.atlassian.net"; // Replace zephyr baseurl <ZAPI_Cloud_URL> shared with the user for ZAPI Cloud Installation String zephyrBaseUrl = "<ZAPI_Cloud_URL>"; // zephyr accessKey , we can get from Addons >> zapi section String accessKey = "YjE2MjdjMGEtNzExNy0zYjY1LWFkMzQtNjcwMDM3OTljFkbWluIGFkbWlu"; // zephyr secretKey , we can get from Addons >> zapi section String secretKey = "qufnbimi96Ob2hq3ISF08yZ8Qw4c1eHGeGlk"; /** Declare parameter values here */ String userName = "admin"; String versionId = "-1"; String projectId = "10100"; String projectName = "Support"; String versionName = "Unscheduled"; ZFJCloudRestClient client = ZFJCloudRestClient.restBuilder(zephyrBaseUrl, accessKey, secretKey, userName) .build();//from w w w . ja v a 2s .c o m /** * Get List of Cycles by Project and Version */ final String getCyclesUri = API_GET_CYCLES.replace("{SERVER}", zephyrBaseUrl) + "projectId=" + projectId + "&versionId=" + versionId; Map<String, String> cycles = getCyclesByProjectVersion(getCyclesUri, client, accessKey); // System.out.println("cycles :"+ cycles.toString()); /** * Iterating over the Cycles and writing the report to CSV * */ FileWriter fileWriter = null; System.out.println("Writing CSV file....."); try { fileWriter = new FileWriter(fileName); // Write the CSV file header fileWriter.append("Cycle Execution Report By Version and Project"); fileWriter.append(NEW_LINE_SEPARATOR); fileWriter.append("PROJECT:" + "," + projectName); fileWriter.append(NEW_LINE_SEPARATOR); fileWriter.append("VERSION:" + "," + versionName); fileWriter.append(NEW_LINE_SEPARATOR); JSONArray executions; int totalUnexecutedCount = 0; int totalExecutionCount = 0; for (String key : cycles.keySet()) { int executionCount = 0; int unexecutedCount = 0; final String getExecutionsUri = API_GET_EXECUTIONS.replace("{SERVER}", zephyrBaseUrl) + key + "?projectId=" + projectId + "&versionId=" + versionId; fileWriter.append("Cycle:" + "," + cycles.get(key)); fileWriter.append(NEW_LINE_SEPARATOR); executions = getExecutionsByCycleId(getExecutionsUri, client, accessKey); // System.out.println("executions :" + executions.toString()); HashMap<String, Integer> counter = new HashMap<String, Integer>(); String[] statusName = new String[executions.length()]; for (int i = 0; i < executions.length(); i++) { JSONObject executionObj = executions.getJSONObject(i).getJSONObject("execution"); // System.out.println("executionObj // "+executionObj.toString()); JSONObject statusObj = executionObj.getJSONObject("status"); // System.out.println("statusObj :"+statusObj.toString()); statusName[i] = statusObj.getString("name"); } if (statusName.length != 0) { // System.out.println(statusName.toString()); for (String a : statusName) { if (counter.containsKey(a)) { int oldValue = counter.get(a); counter.put(a, oldValue + 1); } else { counter.put(a, 1); } } for (String status : counter.keySet()) { fileWriter.append(" " + "," + " " + "," + status + "," + counter.get(status)); fileWriter.append(NEW_LINE_SEPARATOR); if (status.equalsIgnoreCase("UNEXECUTED")) { unexecutedCount += counter.get(status); } else { executionCount += counter.get(status); } } } totalExecutionCount += executionCount; totalUnexecutedCount += unexecutedCount; fileWriter.append(NEW_LINE_SEPARATOR); } fileWriter.append(NEW_LINE_SEPARATOR); fileWriter.append("TOTAL CYCLES:" + "," + cycles.size()); fileWriter.append(NEW_LINE_SEPARATOR); fileWriter.append("TOTAL EXECUTIONS:" + "," + totalExecutionCount); fileWriter.append(NEW_LINE_SEPARATOR); fileWriter.append("TOTAL ASSIGNED:" + "," + (totalUnexecutedCount + totalExecutionCount)); System.out.println("CSV file was created successfully !!!"); } catch (Exception e) { System.out.println("Error in CsvFileWriter !!!"); e.printStackTrace(); } finally { try { fileWriter.flush(); fileWriter.close(); } catch (IOException e) { System.out.println("Error while flushing/closing fileWriter !!!"); e.printStackTrace(); } } }
From source file:eu.fbk.dkm.sectionextractor.PageClassMerger.java
public static void main(String args[]) throws IOException { CommandLineWithLogger commandLineWithLogger = new CommandLineWithLogger(); commandLineWithLogger.addOption(OptionBuilder.withArgName("file").hasArg() .withDescription("WikiData ID file").isRequired().withLongOpt("wikidata-id").create("i")); commandLineWithLogger.addOption(OptionBuilder.withArgName("file").hasArg() .withDescription("Airpedia Person file").isRequired().withLongOpt("airpedia").create("a")); commandLineWithLogger.addOption(OptionBuilder.withArgName("file").hasArg().withDescription("Output file") .isRequired().withLongOpt("output").create("o")); CommandLine commandLine = null;//from ww w . j av a 2 s . c o m try { commandLine = commandLineWithLogger.getCommandLine(args); PropertyConfigurator.configure(commandLineWithLogger.getLoggerProps()); } catch (Exception e) { System.exit(1); } String wikiIDFileName = commandLine.getOptionValue("wikidata-id"); String airpediaFileName = commandLine.getOptionValue("airpedia"); String outputFileName = commandLine.getOptionValue("output"); HashMap<Integer, String> wikiIDs = new HashMap<>(); HashSet<Integer> airpediaClasses = new HashSet<>(); List<String> strings; logger.info("Loading file " + wikiIDFileName); strings = Files.readLines(new File(wikiIDFileName), Charsets.UTF_8); for (String line : strings) { line = line.trim(); if (line.length() == 0) { continue; } if (line.startsWith("#")) { continue; } String[] parts = line.split("\t"); if (parts.length < 2) { continue; } int id; try { id = Integer.parseInt(parts[0]); } catch (Exception e) { continue; } wikiIDs.put(id, parts[1]); } logger.info("Loading file " + airpediaFileName); strings = Files.readLines(new File(airpediaFileName), Charsets.UTF_8); for (String line : strings) { line = line.trim(); if (line.length() == 0) { continue; } if (line.startsWith("#")) { continue; } String[] parts = line.split("\t"); if (parts.length < 2) { continue; } int id; try { id = Integer.parseInt(parts[0]); } catch (Exception e) { continue; } airpediaClasses.add(id); } logger.info("Saving information"); BufferedWriter writer = new BufferedWriter(new FileWriter(outputFileName)); for (int i : wikiIDs.keySet()) { if (!airpediaClasses.contains(i)) { continue; } writer.append(wikiIDs.get(i)).append("\n"); } writer.close(); }