List of usage examples for java.util HashMap containsKey
public boolean containsKey(Object key)
From source file:org.openbaton.nfvo.core.core.VNFLifecycleOperationGranting.java
@Override public Map<String, VimInstance> grantLifecycleOperation( VirtualNetworkFunctionRecord virtualNetworkFunctionRecord) throws VimException, PluginException { Map<String, VimInstance> result = new HashMap<>(); //HashMap holds how many VNFCInstances are needed to deploy on a specific VimInstance HashMap<VimInstance, Integer> countVDUsOnVimInstances = new HashMap<>(); //Find how many VNFCInstances are needed to deploy on a specific VimInstance log.info("Granting Lifecycle Operation for vnfr: " + virtualNetworkFunctionRecord.getName()); for (VirtualDeploymentUnit vdu : virtualNetworkFunctionRecord.getVdu()) { for (String vimName : vdu.getVimInstanceName()) { VimInstance vimInstance = null; for (VimInstance vi : vimInstanceRepository.findByProjectId(vdu.getProjectId())) { if (vimName.equals(vi.getName())) vimInstance = vi;//w ww. j a v a 2 s . c om } if (countVDUsOnVimInstances.containsKey(vimInstance)) { countVDUsOnVimInstances.put(vimInstance, countVDUsOnVimInstances.get(vimInstance) + vdu.getVnfc().size() - vdu.getVnfc_instance().size()); } else { log.debug("VimInstance: " + vdu.getVimInstanceName() + "\n VNFC: " + vdu.getVnfc() + "\nVNFCINST: " + vdu.getVnfc_instance()); countVDUsOnVimInstances.put(vimInstance, vdu.getVnfc().size() - vdu.getVnfc_instance().size()); } } } //Check if enough resources are available for the deployment log.debug("Checking if enough resources are available on the defined VimInstance."); for (VirtualDeploymentUnit virtualDeploymentUnit : virtualNetworkFunctionRecord.getVdu()) { VimInstance vimInstanceChosen = pickVimInstance(virtualDeploymentUnit.getVimInstanceName(), countVDUsOnVimInstances, virtualNetworkFunctionRecord); if (vimInstanceChosen != null) { log.info("Enough resources are available for deploying VDU in vimInstance: " + vimInstanceChosen.getName()); result.put(virtualDeploymentUnit.getId(), vimInstanceChosen); } } return result; }
From source file:com.gisgraphy.domain.geoloc.service.fulltextsearch.FulltextQueryTest.java
@Test public void testToQueryStringShouldreturnCorrectParamsForSpellChecking() { boolean savedSpellCheckingValue = SpellCheckerConfig.activeByDefault; try {/*from w w w.j a v a 2 s . c o m*/ SpellCheckerConfig.activeByDefault = true; SpellCheckerConfig.enabled = false; Pagination pagination = paginate().from(3).to(10); Output output = Output.withFormat(OutputFormat.ATOM).withLanguageCode("FR").withStyle(OutputStyle.SHORT) .withIndentation(); FulltextQuery fulltextQuery = new FulltextQuery("Saint-Andr", pagination, output, ONLY_ADM_PLACETYPE, "fr").withSpellChecking(); // split parameters HashMap<String, String> parameters = GeolocTestHelper.splitURLParams(fulltextQuery.toQueryString(), "&"); // check parameters assertTrue("the fulltextquery should have spellchecking enabled even if spellchecker is disabled", fulltextQuery.hasSpellChecking()); assertTrue("spellchecker should not be listed if spellchecker is disabled", !parameters.containsKey(Constants.SPELLCHECKER_ENABLED_PARAMETER)); //active spellchecker and re test SpellCheckerConfig.enabled = true; fulltextQuery = new FulltextQuery("Saint-Andr", pagination, output, ONLY_ADM_PLACETYPE, "fr") .withSpellChecking(); parameters = GeolocTestHelper.splitURLParams(fulltextQuery.toQueryString(), "&"); assertTrue("the fulltextquery should have spellchecking enabled when spellchecker is enabled", fulltextQuery.hasSpellChecking()); assertEquals("spellchecker should be enabled", "true", parameters.get(Constants.SPELLCHECKER_ENABLED_PARAMETER)); assertEquals("spellchecker should be enabled", String.valueOf(SpellCheckerConfig.collateResults), parameters.get(Constants.SPELLCHECKER_COLLATE_RESULTS_PARAMETER)); assertEquals("spellchecker should be enabled", String.valueOf(SpellCheckerConfig.numberOfSuggestion), parameters.get(Constants.SPELLCHECKER_NUMBER_OF_SUGGESTION_PARAMETER)); assertEquals("spellchecker should be enabled", SpellCheckerConfig.spellcheckerDictionaryName.toString(), parameters.get(Constants.SPELLCHECKER_DICTIONARY_NAME_PARAMETER)); } catch (RuntimeException e) { fail(e.getMessage()); } finally { SpellCheckerConfig.activeByDefault = savedSpellCheckingValue; } }
From source file:chatbot.Chatbot.java
/** ************************************************************************************************ * sets the values in tf (term frequency) and tdocfreq (count of * documents in which a term appears)//from w w w .j av a2s . co m * @param intlineCount is -1 for query */ private void processDoc(String doc, Integer intlineCount) { if (isNullOrEmpty(doc)) return; String line = removePunctuation(doc); line = removeStopWords(line); if (isNullOrEmpty(line.trim())) return; ArrayList<String> tokens = splitToArrayList(line.trim()); HashSet<String> tokensNoDup = new HashSet<String>(); HashMap<String, Integer> tdocfreq = new HashMap<String, Integer>(); for (int i = 0; i < tokens.size(); i++) { String token = tokens.get(i); Integer tcount = new Integer(0); if (tdocfreq.containsKey(token)) tcount = tdocfreq.get(token); int tcountint = tcount.intValue() + 1; tcount = new Integer(tcountint); tdocfreq.put(token, tcount); if (!docfreq.containsKey(token)) docfreq.put(token, new Integer(1)); else { if (!tokensNoDup.contains(token)) { Integer intval = docfreq.get(token); int intvalint = intval.intValue(); docfreq.put(token, new Integer(intvalint + 1)); tokensNoDup.add(token); } } } tf.put(intlineCount, tdocfreq); }
From source file:com.acentera.utils.ProjectsHelpers.java
public static JSONObject getProjectImagesAsJson(List<DropletImage> lstImages) throws JsonProcessingException { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.WRAP_ROOT_VALUE, false); mapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); ObjectWriter ow = mapper.writer();//w w w . j a v a2 s.c om JSONObject jso = new JSONObject(); HashMap<String, Distro> hmDistro = new HashMap<String, Distro>(); Iterator<DropletImage> itrImages = lstImages.iterator(); long distroId = 1; List<DropletImage> realListImages = new ArrayList<DropletImage>(); while (itrImages.hasNext()) { DropletImage img = itrImages.next(); if (img.getSlug() == null) { continue; } if (!(hmDistro.containsKey(img.getDistribution()))) { if (img.getDistribution().compareTo("Arch Linux") == 0) { continue; } Distro d = new Distro(); d.setId(distroId); d.setName(img.getDistribution()); distroId++; hmDistro.put(img.getDistribution(), d); } if (!(img.getSlug().toLowerCase().startsWith(img.getDistribution().toLowerCase()))) { continue; } realListImages.add(img); } jso.put("images", mapper.writeValueAsString(realListImages)); jso.put("distros", mapper.writeValueAsString(hmDistro.values())); return jso; }
From source file:com.github.wellcomer.query3.core.Autocomplete.java
/** ?. ?, TreeSet ? ? ?. ? TreeSet . //from w w w . ja v a 2s. c om @param queryList ?? ?. @param scanModifiedOnly ? ?. @param mergePrevious ?? ? ?? . */ public void autolearn(QueryList queryList, boolean scanModifiedOnly, boolean mergePrevious) throws IOException { FileTime timestamp; long modifiedSince = 0; Path timestampFilePath = Paths.get(filePath, ".timestamp"); if (scanModifiedOnly) { // try { // ? ? ? timestamp = Files.getLastModifiedTime(timestampFilePath); modifiedSince = timestamp.toMillis(); } catch (IOException e) { // ? ? Files.createFile(timestampFilePath); } } HashMap<String, TreeSet<String>> fields = new HashMap<>(); // - ? ?, - ? Iterator<Query> queryIterator = queryList.iterator(modifiedSince); // ? ?? ? ? ? String k, v; while (queryIterator.hasNext()) { Query query = queryIterator.next(); for (Map.Entry<String, String> entry : query.entrySet()) { k = entry.getKey().toLowerCase(); v = entry.getValue().trim(); if (v.length() < 2) continue; if (!fields.containsKey(k)) { TreeSet<String> treeSet = new TreeSet<>(); try { if (mergePrevious) { // ? ? List<String> lines = Files.readAllLines(Paths.get(filePath, k), charset); treeSet.addAll(lines); } } catch (IOException e) { e.printStackTrace(); } fields.put(k, treeSet); } TreeSet<String> treeSet = fields.get(k); treeSet.add(v); } } for (Map.Entry<String, TreeSet<String>> entry : fields.entrySet()) { k = entry.getKey(); ArrayList<String> lines = new ArrayList<>(fields.get(k)); FileWriter fileWriter = new FileWriter(Paths.get(filePath, k).toString()); fileWriter.write(StringUtils.join(lines, System.getProperty("line.separator"))); fileWriter.flush(); fileWriter.close(); } try { Files.setLastModifiedTime(timestampFilePath, FileTime.fromMillis(System.currentTimeMillis())); } catch (IOException e) { if (e.getClass().getSimpleName().equals("NoSuchFileException")) Files.createFile(timestampFilePath); e.printStackTrace(); } }
From source file:eu.sisob.uma.extractors.adhoc.email.EmailExtractor.java
/** * * @param input_file/* w w w. ja va 2 s . c o m*/ * @param data_dir * @param output_file * @param norepeat_output_file * @param notfound_output_file * @param notfound_norepeat_output_file * @param filters * @param error_sw */ public static void extract_emails(File input_file, File data_dir, File output_file, File norepeat_output_file, File notfound_output_file, File notfound_norepeat_output_file, List<String> filters, StringWriter error_sw) { CSVReader reader = null; try { reader = new CSVReader(new FileReader(input_file), CSV_SEPARATOR); } catch (FileNotFoundException ex) { Logger.getRootLogger().error("Error reading " + input_file.getName() + " - " + ex.toString()); } int idStaffIdentifier = -1; int idName = -1; int idFirstName = -1; int idLastName = -1; int idInitials = -1; int idUnitOfAssessment_Description = -1; int idInstitutionName = -1; int idWebAddress = -1; int idResearchGroupDescription = -1; int idResearcherWebAddress = -1; int idResearcherWebAddressType = -1; int idResearcherWebAddressExt = -1; int idScoreUrl = -1; String filter_literal = "("; for (String filter : filters) { filter_literal += filter + ","; } filter_literal += ")"; String[] nextLine; try { if ((nextLine = reader.readNext()) != null) { //Locate indexes //Locate indexes for (int i = 0; i < nextLine.length; i++) { String column_name = nextLine[i]; if (column_name.equals(FileFormatConversor.CSV_COL_ID)) idStaffIdentifier = i; else if (column_name.equals(FileFormatConversor.CSV_COL_NAME)) idName = i; else if (column_name.equals(FileFormatConversor.CSV_COL_FIRSTNAME)) idFirstName = i; else if (column_name.equals(FileFormatConversor.CSV_COL_LASTNAME)) idLastName = i; else if (column_name.equals(FileFormatConversor.CSV_COL_INITIALS)) idInitials = i; else if (column_name.equals(FileFormatConversor.CSV_COL_SUBJECT)) idUnitOfAssessment_Description = i; else if (column_name.equals(FileFormatConversor.CSV_COL_INSTITUTION_NAME)) idInstitutionName = i; else if (column_name.equals(FileFormatConversor.CSV_COL_INSTITUTION_URL)) idWebAddress = i; else if (column_name.equals(FileFormatConversor.CSV_COL_RESEARCHER_PAGE_URL)) idResearcherWebAddress = i; else if (column_name.equals(FileFormatConversor.CSV_COL_RESEARCHER_PAGE_TYPE)) idResearcherWebAddressType = i; else if (column_name.equals(FileFormatConversor.CSV_COL_RESEARCHER_PAGE_EXT)) idResearcherWebAddressExt = i; else if (column_name.equals(FileFormatConversor.CSV_COL_SCORE_URL)) idScoreUrl = i; } } } catch (Exception ex) { String error_msg = "Error reading headers of " + input_file.getName(); Logger.getRootLogger().error(error_msg + " - " + ex.toString()); if (error_sw != null) error_sw.append(error_msg + "\r\n"); return; } if (idResearcherWebAddress != -1 && idStaffIdentifier != -1 && idLastName != -1 && idInitials != -1) { //if(!test_only_output) { try { String header = ""; header += "\"" + FileFormatConversor.CSV_COL_ID + "\"" + CSV_SEPARATOR; header += "\"" + FileFormatConversor.CSV_COL_LASTNAME + "\"" + CSV_SEPARATOR; header += "\"" + FileFormatConversor.CSV_COL_INITIALS + "\"" + CSV_SEPARATOR; if (idFirstName != -1) header += "\"" + FileFormatConversor.CSV_COL_INITIALS + "\"" + CSV_SEPARATOR; if (idName != -1) header += "\"" + FileFormatConversor.CSV_COL_NAME + "\"" + CSV_SEPARATOR; header += "\"" + FileFormatConversor.CSV_COL_EMAIL + "\"" + CSV_SEPARATOR; if (idInstitutionName != -1) header += "\"" + FileFormatConversor.CSV_COL_INSTITUTION_NAME + "\"" + CSV_SEPARATOR; if (idWebAddress != -1) header += "\"" + FileFormatConversor.CSV_COL_INSTITUTION_URL + "\"" + CSV_SEPARATOR; header += "\"" + FileFormatConversor.CSV_COL_RESEARCHER_PAGE_URL + "\"" + CSV_SEPARATOR; if (idResearcherWebAddressExt != -1) header += "\"" + FileFormatConversor.CSV_COL_RESEARCHER_PAGE_EXT + "\"" + CSV_SEPARATOR; if (idResearcherWebAddressType != -1) header += "\"" + FileFormatConversor.CSV_COL_RESEARCHER_PAGE_TYPE + "\"" + CSV_SEPARATOR; if (idScoreUrl != -1) header += "\"" + FileFormatConversor.CSV_COL_SCORE_URL + "\"" + CSV_SEPARATOR; header += "\"" + FileFormatConversor.CSV_COL_SCORE_EMAIL + "\""; header += "\r\n"; FileUtils.write(output_file, header, "UTF-8", false); header = ""; header += "\"" + FileFormatConversor.CSV_COL_ID + "\"" + CSV_SEPARATOR; header += "\"" + FileFormatConversor.CSV_COL_LASTNAME + "\"" + CSV_SEPARATOR; header += "\"" + FileFormatConversor.CSV_COL_INITIALS + "\"" + CSV_SEPARATOR; if (idFirstName != -1) header += "\"" + FileFormatConversor.CSV_COL_INITIALS + "\"" + CSV_SEPARATOR; if (idName != -1) header += "\"" + FileFormatConversor.CSV_COL_NAME + "\"" + CSV_SEPARATOR; if (idInstitutionName != -1) header += "\"" + FileFormatConversor.CSV_COL_INSTITUTION_NAME + "\"" + CSV_SEPARATOR; if (idWebAddress != -1) header += "\"" + FileFormatConversor.CSV_COL_INSTITUTION_URL + "\"" + CSV_SEPARATOR; header += "\"" + FileFormatConversor.CSV_COL_RESEARCHER_PAGE_URL + "\"" + CSV_SEPARATOR; if (idResearcherWebAddressExt != -1) header += "\"" + FileFormatConversor.CSV_COL_RESEARCHER_PAGE_EXT + "\"" + CSV_SEPARATOR; if (idResearcherWebAddressType != -1) header += "\"" + FileFormatConversor.CSV_COL_RESEARCHER_PAGE_TYPE + "\"" + CSV_SEPARATOR; if (idScoreUrl != -1) header += "\"" + FileFormatConversor.CSV_COL_SCORE_URL + "\""; header += "\r\n"; FileUtils.write(notfound_output_file, header, "UTF-8", false); } catch (IOException ex) { Logger.getLogger("root").error(ex.toString()); error_sw.append("Error creating output files\r\n"); } } try { //if(!test_only_output) { Pattern p1 = Pattern.compile("([a-zA-Z0-9#._-]+)+"); while ((nextLine = reader.readNext()) != null) { nextLine[idLastName] = nextLine[idLastName].replaceAll("[^a-zA-Z]", " ").toLowerCase(); nextLine[idInitials] = nextLine[idInitials].replaceAll("[^a-zA-Z]", " ").toLowerCase(); if (idFirstName != -1) nextLine[idFirstName] = nextLine[idFirstName].replaceAll("[^a-zA-Z]", " ") .toLowerCase(); if (idName != -1) nextLine[idName] = nextLine[idName].replaceAll("[^a-zA-Z]", " ").toLowerCase(); String content = ""; String researcher_page_url = nextLine[idResearcherWebAddress]; Logger.getLogger("root").info("Go with " + researcher_page_url); if (p1.matcher(researcher_page_url).matches()) { File f = new File(data_dir, researcher_page_url); if (researcher_page_url.endsWith(".doc") || researcher_page_url.endsWith(".docx")) { Logger.getLogger("root") .error("The document " + researcher_page_url + " could not loaded"); error_sw.append("The document " + researcher_page_url + " could not loaded"); } else if (researcher_page_url.endsWith(".pdf")) { PDFParser parser = null; PDFTextStripper pdfStripper = null; PDDocument pdDoc = null; COSDocument cosDoc = null; try { parser = new PDFParser(new FileInputStream(f)); } catch (IOException e) { Logger.getLogger("root").error(e.toString()); error_sw.append("Unable to open PDF called " + researcher_page_url); } if (parser != null) { try { parser.parse(); cosDoc = parser.getDocument(); pdfStripper = new PDFTextStripper(); pdDoc = new PDDocument(cosDoc); pdfStripper.setStartPage(1); pdfStripper.setEndPage(2); content = pdfStripper.getText(pdDoc); } catch (Exception e) { Logger.getLogger("root").error(e.toString()); error_sw.append("An exception occured in parsing the PDF Document."); } finally { try { if (cosDoc != null) cosDoc.close(); if (pdDoc != null) pdDoc.close(); } catch (Exception e) { Logger.getLogger("root").error(e.toString()); } } } } } else { try { Logger.getRootLogger().info("Reading " + researcher_page_url); File temp; temp = File.createTempFile("temp-file-name", ".tmp"); URL fetched_url = Downloader.fetchURL(researcher_page_url); FileUtils.copyURLToFile(fetched_url, temp); long sizeInBytes = temp.length(); long sizeInMb = sizeInBytes / (1024 * 1024); if (sizeInMb > 100) { content = ""; } else { content = FileUtils.readFileToString(temp); temp.delete(); } } catch (Exception ex) { Logger.getLogger("root").error("" + researcher_page_url + " could not loaded", ex); error_sw.append("" + researcher_page_url + " could not loaded"); content = ""; } catch (java.lang.OutOfMemoryError ex2) { Logger.getLogger("root").error( researcher_page_url + " could not loaded (Jsoup OutOfMemoryError)", ex2); error_sw.append("" + researcher_page_url + " could not loaded"); content = ""; } } if (!content.equals("")) { //final String RE_MAIL = "([\\w\\-]([\\.\\w])+[\\w]+@([\\w\\-]+\\.)+[A-Za-z]{2,4})"; final String RE_MAIL = "([\\w\\-]([\\.\\w]){1,16}[\\w]{1,16}@([\\w\\-]{1,16}\\.){1,16}[A-Za-z]{2,4})"; Pattern p = Pattern.compile(RE_MAIL); Matcher m = p.matcher(content); List<String> emails = new ArrayList<String>(); while (m.find()) { String email = m.group(1); if (!emails.contains(email)) { // Apply filter boolean pass = true; if (filters.size() > 0) { pass = false; for (String filter : filters) { String filter2 = filter.replace("*", ".*?"); Pattern pattern = Pattern.compile(filter2); if (pattern.matcher(email).matches()) { pass = true; break; } else { } } } if (pass) { Logger.getRootLogger().info(researcher_page_url + " => " + email + " PASS FILTER! " + filter_literal); emails.add(email); } else { Logger.getRootLogger().info(researcher_page_url + " => " + email + " REFUSE BY FILTER! " + filter_literal); } } } if (emails.size() < MAX_MAIL_PER_PAGE) { for (String email : emails) { String score_email = ""; String lastname = nextLine[idLastName]; if (lastname.length() > 5) lastname = lastname.substring(0, 6); if (email.toLowerCase().contains(lastname)) { score_email = "A"; } else { int temp_id = idFirstName; if (temp_id == -1) temp_id = idInitials; if (!nextLine[idInitials].trim().equals("")) { String firstname = nextLine[temp_id].split(" ")[0]; if (firstname.length() > 5) firstname = firstname.substring(0, 5); if (firstname.length() > 1) { if (email.toLowerCase().contains(firstname)) { score_email = "A"; } } } if (score_email.equals("")) { String initials = ""; String[] arr = nextLine[temp_id].split(" "); for (int i = 0; i < arr.length; i++) { if (arr[i].length() > 0) initials += arr[i].charAt(0); } initials += nextLine[idLastName].charAt(0); if (email.toLowerCase().contains(initials)) { score_email = "B"; } else { score_email = "Z"; } } } String result = ""; result += "\"" + nextLine[idStaffIdentifier] + "\"" + CSV_SEPARATOR; result += "\"" + nextLine[idLastName] + "\"" + CSV_SEPARATOR; result += "\"" + nextLine[idInitials] + "\"" + CSV_SEPARATOR; if (idFirstName != -1) result += "\"" + nextLine[idFirstName] + "\"" + CSV_SEPARATOR; if (idName != -1) result += "\"" + nextLine[idName] + "\"" + CSV_SEPARATOR; result += "\"" + email + "\"" + CSV_SEPARATOR; if (idInstitutionName != -1) result += "\"" + nextLine[idInstitutionName] + "\"" + CSV_SEPARATOR; if (idWebAddress != -1) result += "\"" + nextLine[idWebAddress] + "\"" + CSV_SEPARATOR; result += "\"" + nextLine[idResearcherWebAddress] + "\"" + CSV_SEPARATOR; if (idResearcherWebAddressExt != -1) result += "\"" + nextLine[idResearcherWebAddressExt] + "\"" + CSV_SEPARATOR; if (idResearcherWebAddressType != -1) result += "\"" + nextLine[idResearcherWebAddressType] + "\"" + CSV_SEPARATOR; if (idScoreUrl != -1) result += "\"" + nextLine[idScoreUrl] + "\"" + CSV_SEPARATOR; result += "\"" + score_email + "\""; result += "\r\n"; try { FileUtils.write(output_file, result, "UTF-8", true); } catch (IOException ex) { Logger.getLogger("root").error(ex.toString()); } } } else { content = ""; } if (emails.size() == 0) content = ""; } if (content == "") { String result = ""; result += "\"" + nextLine[idStaffIdentifier] + "\"" + CSV_SEPARATOR; result += "\"" + nextLine[idLastName] + "\"" + CSV_SEPARATOR; result += "\"" + nextLine[idInitials] + "\"" + CSV_SEPARATOR; if (idFirstName != -1) result += "\"" + nextLine[idFirstName] + "\"" + CSV_SEPARATOR; if (idName != -1) result += "\"" + nextLine[idName] + "\"" + CSV_SEPARATOR; if (idInstitutionName != -1) result += "\"" + nextLine[idInstitutionName] + "\"" + CSV_SEPARATOR; if (idWebAddress != -1) result += "\"" + nextLine[idWebAddress] + "\"" + CSV_SEPARATOR; result += "\"" + nextLine[idResearcherWebAddress] + "\"" + CSV_SEPARATOR; if (idResearcherWebAddressExt != -1) result += "\"" + nextLine[idResearcherWebAddressExt] + "\"" + CSV_SEPARATOR; if (idResearcherWebAddressType != -1) result += "\"" + nextLine[idResearcherWebAddressType] + "\"" + CSV_SEPARATOR; if (idScoreUrl != -1) result += "\"" + nextLine[idScoreUrl] + "\""; result += "\r\n"; try { FileUtils.write(notfound_output_file, result, "UTF-8", true); } catch (IOException ex) { Logger.getLogger("root").error(ex.toString()); } } } reader.close(); } Logger.getLogger("root").info("Applying deduplication algoritm - Counting duplications"); boolean finish = false; String alternate_filename_1 = "file1"; String alternate_filename_2 = "file2"; File alternate_file_s = new File(output_file.getParentFile(), alternate_filename_1); File alternate_file_d = new File(output_file.getParentFile(), alternate_filename_2); FileUtils.copyFile(output_file, alternate_file_s); //FileUtils.write(output_file_wor_notfound, "", "UTF-8", false); FileUtils.write(norepeat_output_file, "", "UTF-8", false); while (!finish) { reader = null; try { reader = new CSVReader(new FileReader(alternate_file_s), CSV_SEPARATOR); } catch (FileNotFoundException ex) { Logger.getRootLogger() .error("Error reading " + input_file.getName() + " - " + ex.toString()); } HashMap<String, Integer> count_dictionary = new HashMap<String, Integer>(); int idEmail = 3; if (idFirstName != -1) idEmail++; if (idName != -1) idEmail++; try { FileUtils.write(alternate_file_d, "", "UTF-8", false); } catch (IOException ex) { Logger.getLogger("root").error(ex.toString()); } finish = true; while ((nextLine = reader.readNext()) != null) { Integer count = 1; if (count_dictionary.containsKey(nextLine[idEmail].toString())) count = count_dictionary.get(nextLine[idEmail].toString()); else { if (count_dictionary.size() < max_in_mem) { count_dictionary.put(nextLine[idEmail].toString(), count + 1); } else { try { for (int i = 0; i < nextLine.length; i++) nextLine[i] = "\"" + nextLine[i] + "\""; FileUtils.write(alternate_file_d, StringUtil.join(Arrays.asList(nextLine), String.valueOf(CSV_SEPARATOR)) + "\r\n", "UTF-8", true); finish = false; } catch (IOException ex) { Logger.getLogger("root").error(ex.toString()); } } } } reader.close(); Logger.getLogger("root").info("Applying deduplication algoritm - Removing duplications"); reader = null; try { reader = new CSVReader(new FileReader(alternate_file_s), CSV_SEPARATOR); } catch (FileNotFoundException ex) { Logger.getRootLogger() .error("Error reading " + input_file.getName() + " - " + ex.toString()); } String previous_id = "%previous%"; String previous_email = "%previous_email%"; List<String[]> cache = new ArrayList<String[]>(); while ((nextLine = reader.readNext()) != null) { String id = nextLine[idStaffIdentifier].toString(); if (previous_id.equals(id)) { cache.add(nextLine); previous_id = id; } else { //Process String[] winner_line = null; String max_score = "Z"; for (String[] act_line : cache) { String act_score = "Z"; try { act_score = act_line[act_line.length - 1]; } catch (Exception ex) { } String email = act_line[idEmail].toString(); if (count_dictionary.containsKey(email) && count_dictionary.get(email) > 0) { if (max_score.compareTo(act_score) > 0 && !act_score.equals("")) { winner_line = act_line; max_score = act_score; } count_dictionary.put(email, 0); } } if (winner_line != null) { try { for (int i = 0; i < winner_line.length; i++) winner_line[i] = "\"" + winner_line[i] + "\""; FileUtils.write(norepeat_output_file, StringUtil.join(Arrays.asList(winner_line), String.valueOf(CSV_SEPARATOR)) + "\r\n", "UTF-8", true); } catch (IOException ex) { Logger.getLogger("root").error(ex.toString()); } } else { // try { // FileUtils.write(output_file_wor_notfound, StringUtil.join(Arrays.asList(winner_line), String.valueOf(CSV_SEPARATOR)) + "\r\n", "UTF-8", true); // } catch (IOException ex) { // Logger.getLogger("root").error(ex.toString()); // } } cache.clear(); cache.add(nextLine); previous_id = id; } } //Process if (cache.size() > 0) { String[] winner_line = null; String max_score = "Z"; for (String[] act_line : cache) { String act_score = "Z"; try { act_score = (act_line[act_line.length - 1]); } catch (Exception ex) { } String email = act_line[idEmail]; if (count_dictionary.containsKey(email) && count_dictionary.get(email) > 0) { if (max_score.compareTo(act_score) > 0 && !act_score.equals("")) { winner_line = act_line; max_score = act_score; } count_dictionary.put(email, 0); } } if (winner_line != null) { try { for (int i = 0; i < winner_line.length; i++) winner_line[i] = "\"" + winner_line[i] + "\""; FileUtils.write(norepeat_output_file, StringUtil.join(Arrays.asList(winner_line), String.valueOf(CSV_SEPARATOR)) + "\r\n", "UTF-8", true); } catch (IOException ex) { Logger.getLogger("root").error(ex.toString()); } } else { // try { // FileUtils.write(output_file_wor_notfound, StringUtil.join(Arrays.asList(winner_line), String.valueOf(CSV_SEPARATOR)) + "\r\n", "UTF-8", true); // } catch (IOException ex) { // Logger.getLogger("root").error(ex.toString()); // } } } reader.close(); // if (!finish) { FileUtils.copyFile(alternate_file_d, alternate_file_s); alternate_file_s = new File(output_file.getParentFile(), alternate_filename_1); alternate_file_d = new File(output_file.getParentFile(), alternate_filename_2); } } FileUtils.forceDelete(alternate_file_s); FileUtils.forceDelete(alternate_file_d); Logger.getLogger("root").info("Applying deduplication algoritm - Finish"); } catch (Exception ex) { String error_msg = "Error extracting emails from extractor " + input_file.getName(); Logger.getRootLogger().error(error_msg + " - " + ex.toString()); if (error_sw != null) error_sw.append(error_msg + "\r\n"); return; } } }
From source file:GeneticAlgorithm.SystemToSolve.java
public void solve_ODE_model(double[] parameters, String mem_address) throws ModelOverdeterminedException, InstantiationException, IllegalAccessException, IllegalArgumentException, NoSuchMethodException, XMLStreamException, IOException { if (SteadyState_OR_TimeCourse_data == true) { //solve model for steady state values for (int i = 0; i < conditions_list.size(); i++) { HashMap Enz_Conc_to_Update = conditions_list.get(i).get_proteins_info(); HashMap BC_true_Met_to_Update = conditions_list.get(i).get_BCTrue_metabolites_info(); if (BC_true_Met_to_Update.size() > 0) { modelreactions.modExtMet(BC_true_Met_to_Update); for (Compound compound : Compounds) { if (compound.getBoundaryCondition() == true) { if (BC_true_Met_to_Update.containsKey(compound.getID()) == true) { compound.setConcentration((double) BC_true_Met_to_Update.get(compound.getID())); }//from w w w . j a va 2 s . c om } } } if (Enz_Conc_to_Update.size() > 0) { modelreactions.modEnzymes(Enz_Conc_to_Update); for (ModelReaction rxn : Reactions) { if (Enz_Conc_to_Update.containsKey(rxn.getEnzyme().getID()) == true) { rxn.getEnzyme() .setConcentration((double) Enz_Conc_to_Update.get(rxn.getEnzyme().getID())); } } } String tmpname = mem_address; tmpname += "_" + i + ".xml"; File tmpxml = new File(tmpname); if (tmpxml.exists()) { String rando = Double.toHexString(RN(0.01, 10) * RN(0.01, 10)).replaceAll("\\.", "_"); tmpname += rando; tmpxml = new File(tmpname); } if (BC_true_Met_to_Update.size() > 0 || Enz_Conc_to_Update.size() > 0) { modelreactions.exportsbml(tmpxml); link = tmpname; } else { link = originallink; } sosLIBssSolver steadystateresults = new sosLIBssSolver(Compounds, Reactions, parameters, link); if (steadystateresults.solveSS() == true) { conditions_list.get(i).store_Solved_metmap(steadystateresults.getSolvedMMap()); conditions_list.get(i).store_Solved_fluxmap(steadystateresults.getSolvedFMap()); good_OR_bad_solution_forModel = true; } else { good_OR_bad_solution_forModel = false; } tmpxml.delete(); } } else { //solve model for time series output/values double[] time = data.returnTime(); double[] parameter = parameters; int activeCompNum = 0; for (Compound comp : Compounds) { if (comp.getBoundaryCondition() == false) { activeCompNum++; } } String[] paranames = modelreactions.getParameterNames(); String[] reactionID = modelreactions.getReactionID(); String[] variables = new String[activeCompNum]; String[] reactionID2 = modelreactions.getReactionID2(); int count = 0; for (Compound comp : Compounds) { if (comp.getBoundaryCondition() == false) { variables[count] = comp.getID(); count++; } } TCodesolve ode = new TCodesolve(link, variables, getParametersCount(), paranames, reactionID, reactionID2); double output[][] = ode.runsolver(parameter, time); for (int j = 1; j < variables.length + 1 + reactionID2.length; j++) { double[] temparray = new double[time.length]; for (int k = 0; k < time.length; k++) { temparray[k] = output[k][j]; } if ((j - 1) < activeCompNum) { TCestMetMap.put(variables[j - 1], temparray); } else { TCestFluxMap.put(reactionID2[j - activeCompNum - 1], temparray); } } } }
From source file:edu.cornell.mannlib.vitro.webapp.utils.dataGetter.IndividualsForClassesDataGetter.java
/** * Get the classes and classes to restrict by - if any *//*from ww w . j a v a2 s.c o m*/ protected Map<String, Object> getClassIntersectionsMap(Model displayModel) { QuerySolutionMap initBindings = new QuerySolutionMap(); initBindings.add("dataGetterUri", ResourceFactory.createResource(this.dataGetterURI)); try { QueryExecution qexec = QueryExecutionFactory.create(dataGetterQuery, displayModel, initBindings); Map<String, Object> classesAndRestrictions = new HashMap<String, Object>(); List<String> classes = new ArrayList<String>(); displayModel.enterCriticalSection(Lock.READ); try { List<String> restrictClasses = new ArrayList<String>(); HashMap<String, String> restrictClassesPresentMap = new HashMap<String, String>(); ResultSet resultSet = qexec.execSelect(); while (resultSet.hasNext()) { QuerySolution soln = resultSet.next(); classes.add(DataGetterUtils.nodeToString(soln.get("class"))); String restrictClass = DataGetterUtils.nodeToString(soln.get("restrictClass")); if (!restrictClass.isEmpty() && !restrictClassesPresentMap.containsKey(restrictClass)) { restrictClasses.add(restrictClass); restrictClassesPresentMap.put(restrictClass, "true"); } } if (classes.size() == 0) { log.debug("No classes defined in display model for " + this.dataGetterURI); this.classIntersectionsMap = null; } classesAndRestrictions.put("classes", classes); classesAndRestrictions.put("restrictClasses", restrictClasses); return classesAndRestrictions; } finally { qexec.close(); } } finally { displayModel.leaveCriticalSection(); } }
From source file:com.vizury.videocache.product.ProductDetail.java
private ProductDetail[] getProductDataFromList(CacheConnect cache, String productList, HashMap<String, ProductDetail> recommendedProductDetail, int numberOfRecommendedProducts) { String[] productIdArray = productList.replace("\"", "").split(","); List<ProductDetail> productDetailList = new ArrayList<>(); List<ProductDetail> requestProductDetailList = new ArrayList<>(); for (String pid : productIdArray) { if (!pid.equals(productId)) { if (!recommendedProductDetail.containsKey(namespace + "_1_" + pid)) { requestProductDetailList.add(new ProductDetail(pid, namespace)); }/*from w w w . j a va 2 s .c o m*/ productDetailList.add(new ProductDetail(pid, namespace)); } } Map<String, Object> productDetailMap = cache.getBulk(requestProductDetailList, "_1_"); if (productDetailMap != null) { ListIterator iterator = productDetailList.listIterator(); while (iterator.hasNext()) { ProductDetail productDetail = (ProductDetail) iterator.next(); if (productDetailMap.containsKey(namespace + "_1_" + productDetail.getProductId())) { productDetail.jsonToProductDetail( (String) productDetailMap.get(namespace + "_1_" + productDetail.getProductId())); recommendedProductDetail.put(namespace + "_1_" + productDetail.getProductId(), productDetail); } else { iterator.set(recommendedProductDetail.get(namespace + "_1_" + productDetail.getProductId())); } } } else { return null; } if (productDetailList.size() <= numberOfRecommendedProducts) { return productDetailList.toArray(new ProductDetail[productDetailList.size()]); } else { Random rand = new Random(); int randomIndex; int index; ProductDetail[] productDetail = new ProductDetail[numberOfRecommendedProducts]; for (index = 0; index < numberOfRecommendedProducts; index++) { randomIndex = rand.nextInt(productDetailList.size()); productDetail[index] = productDetailList.get(randomIndex); productDetailList.remove(randomIndex); } return productDetail; } }
From source file:edu.cornell.mannlib.vitro.webapp.controller.freemarker.ManagePublicationsForIndividualController.java
HashMap<String, List<Map<String, String>>> getPublications(String subjectUri, VitroRequest vreq) { VClassDao vcDao = vreq.getUnfilteredAssertionsWebappDaoFactory().getVClassDao(); String queryStr = QueryUtils.subUriForQueryVar(PUBLICATION_QUERY, "subject", subjectUri); String subclass = ""; log.debug("queryStr = " + queryStr); HashMap<String, List<Map<String, String>>> subclassToPublications = new HashMap<String, List<Map<String, String>>>(); try {/*from w w w. ja v a 2s . c o m*/ ResultSet results = QueryUtils.getQueryResults(queryStr, vreq); while (results.hasNext()) { QuerySolution soln = results.nextSolution(); RDFNode subclassUri = soln.get("subclass"); if (subclassUri != null) { String subclassUriStr = soln.get("subclass").toString(); VClass vClass = vcDao.getVClassByURI(subclassUriStr); subclass = ((vClass.getName() == null) ? subclassUriStr : vClass.getName()); } else { subclass = "Unclassified Publication"; } if (!subclassToPublications.containsKey(subclass)) { subclassToPublications.put(subclass, new ArrayList<Map<String, String>>()); //list of publication information } List<Map<String, String>> publicationsList = subclassToPublications.get(subclass); publicationsList.add(QueryUtils.querySolutionToStringValueMap(soln)); } } catch (Exception e) { log.error(e, e); } return subclassToPublications; }