List of usage examples for com.google.common.collect Multimap get
Collection<V> get(@Nullable K key);
From source file:org.crypto.sse.EMM2Lev.java
public static List<String> query(byte[][] keys, Multimap<String, byte[]> dictionary, byte[][] array) throws InvalidKeyException, InvalidAlgorithmParameterException, NoSuchAlgorithmException, NoSuchProviderException, NoSuchPaddingException, IOException { byte[] l = CryptoPrimitives.generateCmac(keys[0], Integer.toString(0)); List<byte[]> tempList = new ArrayList<byte[]>(dictionary.get(new String(l))); if (!(tempList.size() == 0)) { String temp = (new String(CryptoPrimitives.decryptAES_CTR_String(tempList.get(0), keys[1]))) .split("\t\t\t")[0]; String[] result = temp.split(separator); List<String> resultFinal = new ArrayList<String>(Arrays.asList(result)); // We remove the flag that identifies the size of the dataset if (result[0].equals("1")) { resultFinal.remove(0);//w w w . j a v a2 s . co m return resultFinal; } else if (result[0].equals("2")) { resultFinal.remove(0); List<String> resultFinal2 = new ArrayList<String>(); for (String key : resultFinal) { boolean flag = true; int counter = 0; while (flag) { if (counter < key.length() && Character.isDigit(key.charAt(counter))) { counter++; } else { flag = false; } } String temp2 = (new String(CryptoPrimitives.decryptAES_CTR_String( array[Integer.parseInt((String) key.subSequence(0, counter))], keys[1]))) .split("\t\t\t")[0]; String[] result3 = temp2.split(separator); List<String> tmp = new ArrayList<String>(Arrays.asList(result3)); resultFinal2.addAll(tmp); } return resultFinal2; } else if (result[0].equals("3")) { resultFinal.remove(0); List<String> resultFinal2 = new ArrayList<String>(); for (String key : resultFinal) { boolean flag = true; int counter = 0; while (flag) { if (counter < key.length() && Character.isDigit(key.charAt(counter))) { counter++; } else { flag = false; } } String temp2 = (new String(CryptoPrimitives.decryptAES_CTR_String( array[Integer.parseInt((String) key.subSequence(0, counter))], keys[1]))) .split("\t\t\t")[0]; String[] result3 = temp2.split(separator); List<String> tmp = new ArrayList<String>(Arrays.asList(result3)); resultFinal2.addAll(tmp); } List<String> resultFinal3 = new ArrayList<String>(); for (String key : resultFinal2) { boolean flag = true; int counter = 0; while (flag) { if (counter < key.length() && Character.isDigit(key.charAt(counter))) { counter++; } else { flag = false; } } if (counter == 0) { break; } String temp2 = (new String(CryptoPrimitives.decryptAES_CTR_String( array[Integer.parseInt((String) key.subSequence(0, counter))], keys[1]))) .split("\t\t\t")[0]; String[] result3 = temp2.split(separator); List<String> tmp = new ArrayList<String>(Arrays.asList(result3)); resultFinal3.addAll(tmp); } return resultFinal3; } } return new ArrayList<String>(); }
From source file:org.gradle.plugins.ide.internal.IdeDependenciesExtractor.java
private static void downloadAuxiliaryArtifacts(DependencyHandler dependencyHandler, Multimap<ComponentIdentifier, IdeExtendedRepoFileDependency> dependencies, List<Class<? extends Artifact>> artifactTypes) { if (artifactTypes.isEmpty()) { return;//from w w w .ja va2 s .com } ArtifactResolutionQuery query = dependencyHandler.createArtifactResolutionQuery(); query.forComponents(dependencies.keySet()); @SuppressWarnings("unchecked") Class<? extends Artifact>[] artifactTypesArray = (Class<? extends Artifact>[]) new Class<?>[artifactTypes .size()]; query.withArtifacts(JvmLibrary.class, artifactTypes.toArray(artifactTypesArray)); Set<ComponentArtifactsResult> componentResults = query.execute().getResolvedComponents(); for (ComponentArtifactsResult componentResult : componentResults) { for (IdeExtendedRepoFileDependency dependency : dependencies.get(componentResult.getId())) { for (ArtifactResult sourcesResult : componentResult.getArtifacts(SourcesArtifact.class)) { if (sourcesResult instanceof ResolvedArtifactResult) { dependency.addSourceFile(((ResolvedArtifactResult) sourcesResult).getFile()); } } for (ArtifactResult javadocResult : componentResult.getArtifacts(JavadocArtifact.class)) { if (javadocResult instanceof ResolvedArtifactResult) { dependency.addJavadocFile(((ResolvedArtifactResult) javadocResult).getFile()); } } } } }
From source file:org.prebake.service.bake.WorkingDir.java
/** * Compute the list of files under the working directory that match a * product's output globs./* w w w.jav a2 s .co m*/ */ static ImmutableList<Path> matching(final Path workingDir, final Set<Path> exclusions, ImmutableGlobSet outputMatcher) throws IOException { // Get the prefix map so we only walk subtrees that are important. // E.g. for output globs // [foo/lib/bar/*.lib, foo/lib/**.o, foo/lib/**.so, foo/bin/*.a] // this should yield the map // "foo/lib" => [foo/lib/**.o, foo/lib/**.so] // "foo/lib/bar" => [foo/lib/bar/*.lib] // "foo/bin" => [foo/bin/*.a] // Note that the keys are sorted so that foo/lib always occurs before // foo/lib/bar so that the walker below does not do any unnecessary stating. final Map<String, List<Glob>> groupedByDir; { Multimap<String, Glob> byPrefix = outputMatcher.getGlobsGroupedByPrefix(); groupedByDir = new TreeMap<String, List<Glob>>(new Comparator<String>() { // Sort so that shorter paths occur first. That way we can start // walking the prefixes, and pick up the extra globs just in time when // we start walking those paths. public int compare(String a, String b) { long delta = ((long) a.length()) - b.length(); return delta < 0 ? -1 : delta != 0 ? 1 : a.compareTo(b); } }); Path root = workingDir.getRoot(); for (String prefix : byPrefix.keySet()) { prefix = FsUtil.denormalizePath(root, prefix); String pathPrefix = workingDir.resolve(prefix).toString(); groupedByDir.put(pathPrefix, ImmutableList.copyOf(byPrefix.get(prefix))); } } class Walker { final ImmutableList.Builder<Path> out = ImmutableList.builder(); final Set<String> walked = Sets.newHashSet(); void walk(Path p, GlobSet globs) throws IOException { // TODO: handle symbolic links String pStr = p.toString(); List<Glob> extras = groupedByDir.get(pStr); if (extras != null) { globs = new MutableGlobSet(globs).addAll(extras); walked.add(pStr); } BasicFileAttributes attrs = Attributes.readBasicFileAttributes(p); if (attrs.isRegularFile()) { Path relPath = workingDir.relativize(p); if (globs.matches(relPath) && !exclusions.contains(relPath)) { out.add(relPath); } } else { for (Path child : p.newDirectoryStream()) { walk(child, globs); } } } } Walker w = new Walker(); for (Map.Entry<String, List<Glob>> e : groupedByDir.entrySet()) { String prefix = e.getKey(); if (w.walked.contains(prefix)) { continue; } // already walked Path p = workingDir.resolve(prefix); if (!p.notExists()) { w.walk(p, ImmutableGlobSet.empty()); } } return w.out.build(); }
From source file:org.kiji.hive.utils.DataRequestOptimizer.java
/** * This method propogates the configuration of a family in a KijiDataRequest by replacing * it with a page of fully qualified columns with the same configuration. * * @param kijiDataRequest to use as a base. * @param qualifiersPage a page of fully qualified columns to replace families in the original * data request with. * @return A new data request with the appropriate families replaced with the page of fully * qualified columns./*ww w .j a v a 2s. c o m*/ */ public static KijiDataRequest expandFamilyWithPagedQualifiers(KijiDataRequest kijiDataRequest, Collection<KijiColumnName> qualifiersPage) { // Organize the page of column names by family. Multimap<String, KijiColumnName> familyToQualifiersMap = ArrayListMultimap.create(); for (KijiColumnName kijiColumnName : qualifiersPage) { familyToQualifiersMap.put(kijiColumnName.getFamily(), kijiColumnName); } // Build a new data request KijiDataRequestBuilder qualifierRequestBuilder = KijiDataRequest.builder(); for (Column column : kijiDataRequest.getColumns()) { KijiColumnName kijiColumnName = column.getColumnName(); if (kijiColumnName.isFullyQualified() || !familyToQualifiersMap.containsKey(kijiColumnName.getFamily())) { // If the column is fully qualified or it's not in qualifiersPage add this column as is. qualifierRequestBuilder.newColumnsDef(column); } else { // Iterate through the paged qualifiers and add for (KijiColumnName columnName : familyToQualifiersMap.get(kijiColumnName.getFamily())) { qualifierRequestBuilder.newColumnsDef().withFilter(column.getFilter()) .withPageSize(column.getPageSize()).withMaxVersions(column.getMaxVersions()) .add(columnName.getFamily(), columnName.getQualifier()); } } } return qualifierRequestBuilder.build(); }
From source file:com.moz.fiji.hive.utils.DataRequestOptimizer.java
/** * This method propogates the configuration of a family in a FijiDataRequest by replacing * it with a page of fully qualified columns with the same configuration. * * @param fijiDataRequest to use as a base. * @param qualifiersPage a page of fully qualified columns to replace families in the original * data request with. * @return A new data request with the appropriate families replaced with the page of fully * qualified columns./*from www .j a v a 2s. c o m*/ */ public static FijiDataRequest expandFamilyWithPagedQualifiers(FijiDataRequest fijiDataRequest, Collection<FijiColumnName> qualifiersPage) { // Organize the page of column names by family. Multimap<String, FijiColumnName> familyToQualifiersMap = ArrayListMultimap.create(); for (FijiColumnName fijiColumnName : qualifiersPage) { familyToQualifiersMap.put(fijiColumnName.getFamily(), fijiColumnName); } // Build a new data request FijiDataRequestBuilder qualifierRequestBuilder = FijiDataRequest.builder(); for (Column column : fijiDataRequest.getColumns()) { FijiColumnName fijiColumnName = column.getColumnName(); if (fijiColumnName.isFullyQualified() || !familyToQualifiersMap.containsKey(fijiColumnName.getFamily())) { // If the column is fully qualified or it's not in qualifiersPage add this column as is. qualifierRequestBuilder.newColumnsDef(column); } else { // Iterate through the paged qualifiers and add for (FijiColumnName columnName : familyToQualifiersMap.get(fijiColumnName.getFamily())) { qualifierRequestBuilder.newColumnsDef().withFilter(column.getFilter()) .withPageSize(column.getPageSize()).withMaxVersions(column.getMaxVersions()) .add(columnName.getFamily(), columnName.getQualifier()); } } } return qualifierRequestBuilder.build(); }
From source file:eu.interedition.collatex.util.ParallelSegmentationApparatus.java
public static void generate(VariantGraphRanking ranking, GeneratorCallback callback) { callback.start();/*from ww w .ja v a 2 s . co m*/ final Set<Witness> allWitnesses = ranking.witnesses(); for (Iterator<Map.Entry<Integer, Collection<VariantGraph.Vertex>>> rowIt = ranking.getByRank().asMap() .entrySet().iterator(); rowIt.hasNext();) { final Map.Entry<Integer, Collection<VariantGraph.Vertex>> row = rowIt.next(); final int rank = row.getKey(); final Collection<VariantGraph.Vertex> vertices = row.getValue(); if (vertices.size() == 1 && Iterables.getOnlyElement(vertices).tokens().isEmpty()) { // skip start and end vertex continue; } // spreading vertices with same rank according to their registered transpositions final Multimap<Integer, VariantGraph.Vertex> verticesByTranspositionRank = HashMultimap.create(); for (VariantGraph.Vertex v : vertices) { int transpositionRank = 0; for (VariantGraph.Transposition transposition : v.transpositions()) { for (VariantGraph.Vertex tv : transposition) { transpositionRank += (ranking.apply(tv).intValue() - rank); } } verticesByTranspositionRank.put(transpositionRank, v); } // render segments for (Iterator<Integer> transpositionRankIt = Ordering.natural() .immutableSortedCopy(verticesByTranspositionRank.keySet()).iterator(); transpositionRankIt .hasNext();) { final Multimap<Witness, Token> tokensByWitness = HashMultimap.create(); for (VariantGraph.Vertex v : verticesByTranspositionRank.get(transpositionRankIt.next())) { for (Token token : v.tokens()) { tokensByWitness.put(token.getWitness(), token); } } final SortedMap<Witness, Iterable<Token>> cellContents = Maps.newTreeMap(Witness.SIGIL_COMPARATOR); for (Witness witness : allWitnesses) { cellContents.put(witness, tokensByWitness.containsKey(witness) ? Iterables.unmodifiableIterable(tokensByWitness.get(witness)) : Collections.<Token>emptySet()); } callback.segment(cellContents); } } callback.end(); }
From source file:t9.T9algo.java
private static String nums_to_text(String num_string) throws FileNotFoundException, IOException { // throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. String li[] = num_string.split("\\*"); TreeMap<String, String[]> z = new TreeMap<>(); for (String num_word : li) { String tmp[] = textonyms(num_word); z.put(num_word, tmp);// w w w .j av a 2 s. c o m } String bnc_str; StringBuilder sb = new StringBuilder(); BufferedReader br = new BufferedReader( new FileReader("/home/rb/NetBeansProjects/Test/src/test/all.num.o5.txt")); try { String line = br.readLine(); while (line != null) { sb.append(line); sb.append(System.lineSeparator()); line = br.readLine(); } bnc_str = sb.toString();//.split("\\n"); // System.out.println(everything[1]); } finally { br.close(); } Multimap<String, String> d3 = ArrayListMultimap.create(); StringBuilder s_words = new StringBuilder(); StringBuilder all_words = new StringBuilder(); // TreeMap<String,String> d3= new TreeMap<>(); for (Map.Entry<String, String[]> entry : z.entrySet()) { String key = entry.getKey(); String[] value = entry.getValue(); for (String item : value) { String pattern = "(\\S*)" + item; Pattern r = Pattern.compile(pattern); Matcher m = r.matcher(bnc_str); int i; for (i = m.start() - 2; i >= 0; --i) if (bnc_str.charAt(i) == 32) break; d3.put(item, bnc_str.substring(i + 1, m.start() - 1)); } // all_words.append(bnc_str.substring(i+1,m.start()-1)+" "); all_words.append(d3.get(key) + " "); } return all_words.toString(); }
From source file:org.crypto.sse.DynRH_Disk.java
public static TreeMultimap<String, byte[]> tokenUpdate(byte[] key, Multimap<String, String> lookup) throws InvalidKeyException, InvalidAlgorithmParameterException, NoSuchAlgorithmException, NoSuchProviderException, NoSuchPaddingException, IOException { // A lexicographic sorted tree to hide order of insertion TreeMultimap<String, byte[]> tokenUp = TreeMultimap.create(Ordering.natural(), Ordering.usingToString()); // Key generation SecureRandom random = new SecureRandom(); random.setSeed(CryptoPrimitives.randomSeed(16)); byte[] iv = new byte[16]; for (String word : lookup.keySet()) { byte[] key1 = CryptoPrimitives.generateCmac(key, 1 + new String()); byte[] key2 = CryptoPrimitives.generateCmac(key, 2 + word); for (String id : lookup.get(word)) { random.nextBytes(iv);/*from www .j a va 2 s .co m*/ int counter = 0; if (state.get(word) != null) { counter = state.get(word); } state.put(word, counter + 1); byte[] l = CryptoPrimitives.generateCmac(key2, "" + counter); byte[] value = CryptoPrimitives.encryptAES_CTR_String(key1, iv, id, sizeOfFileIdentifer); tokenUp.put(new String(l), value); } } return tokenUp; }
From source file:org.sonar.plugins.groovy.codenarc.Converter.java
public static String convert() throws Exception { Multimap<RuleSet, Rule> rulesBySet = loadRules(); StringBuilder xmlStringBuilder = new StringBuilder(); String version = IOUtils.toString(Converter.class.getResourceAsStream("/codenarc-version.txt")); xmlStringBuilder.append("<!-- Generated using CodeNarc " + version + " -->"); xmlStringBuilder.append(LINE_SEPARATOR); Converter converter = new Converter(); start(xmlStringBuilder);// ww w .ja v a 2 s . co m for (RuleSet ruleSet : RuleSet.values()) { startSet(xmlStringBuilder, ruleSet.getLabel()); ArrayList<Rule> rules = Lists.newArrayList(rulesBySet.get(ruleSet)); for (Rule rule : rules) { converter.rule(xmlStringBuilder, rule); } } end(xmlStringBuilder); return xmlStringBuilder.toString(); }
From source file:com.qcadoo.mes.workPlans.pdf.document.operation.grouping.container.util.OrderIdOperationNumberOperationComponentIdMap.java
public Collection<Long> get(Long orderId, String operationNumber) { Multimap<String, Long> innerMap = map.get(orderId); return isEmpty(innerMap) ? null : innerMap.get(operationNumber); }