Example usage for com.google.common.collect Sets newHashSet

List of usage examples for com.google.common.collect Sets newHashSet

Introduction

In this page you can find the example usage for com.google.common.collect Sets newHashSet.

Prototype

public static <E> HashSet<E> newHashSet() 

Source Link

Document

Creates a mutable, initially empty HashSet instance.

Usage

From source file:org.auraframework.tools.definition.AuraCompiler.java

public static void main(String[] args) throws Throwable {
    CommandLineLogger cll = new CommandLineLogger();
    File componentsDir = new File(args[0]);
    File outputDir = new File(args[1]);
    int i;/*from  ww  w .j  av a  2s .c o  m*/
    Set<String> ns = Sets.newHashSet();
    for (i = 2; i < args.length; i++) {
        ns.add(args[i]);
    }
    try {
        new RegistrySerializer(componentsDir, outputDir, ns.toArray(new String[ns.size()]), cll).execute();
    } catch (RegistrySerializerException rse) {
        cll.error(rse.getMessage(), rse.getCause());
        System.exit(1);
    }
}

From source file:org.gradoop.examples.dimspan.TLFDataCopier.java

/**
 * Main program to run the example. Arguments are the available options.
 *
 * @param args program arguments//from  w  ww .j a  va2 s  .c  o  m
 * @throws Exception
 */
@SuppressWarnings("unchecked")
public static void main(String[] args) throws Exception {
    CommandLine cmd = parseArguments(args, TLFDataCopier.class.getName());
    if (cmd == null) {
        return;
    }
    performSanityCheck(cmd);

    // read arguments from command line
    final String inputPath = cmd.getOptionValue(OPTION_INPUT_PATH);
    final String outputPath = cmd.getOptionValue(OPTION_OUTPUT_PATH);
    int copyCount = Integer.parseInt(cmd.getOptionValue(COPY_COUNT));

    // Create data source and sink
    DataSource dataSource = new TLFDataSource(inputPath, GRADOOP_CONFIG);
    DataSink dataSink = new TLFDataSink(outputPath, GRADOOP_CONFIG);

    DataSet<GraphTransaction> input = dataSource.getGraphCollection().getGraphTransactions();

    DataSet<GraphTransaction> output = input
            .flatMap((FlatMapFunction<GraphTransaction, GraphTransaction>) (graphTransaction, collector) -> {
                for (int i = 1; i <= copyCount; i++) {
                    collector.collect(graphTransaction);
                }
            }).returns(TypeExtractor
                    .getForObject(new GraphTransaction(new GraphHead(), Sets.newHashSet(), Sets.newHashSet())));

    // execute and write to disk
    dataSink.write(GRADOOP_CONFIG.getGraphCollectionFactory().fromTransactions(output), true);
    getExecutionEnvironment().execute();
}

From source file:org.apache.ctakes.temporal.data.analysis.PrintInconsistentAnnotations.java

public static void main(String[] args) throws Exception {
    Options options = CliFactory.parseArguments(Options.class, args);
    int windowSize = 50;

    List<Integer> patientSets = options.getPatients().getList();
    List<Integer> trainItems = THYMEData.getPatientSets(patientSets, THYMEData.TRAIN_REMAINDERS);
    List<File> files = THYMEData.getFilesFor(trainItems, options.getRawTextDirectory());

    CollectionReader reader = UriCollectionReader.getCollectionReaderFromFiles(files);
    AggregateBuilder aggregateBuilder = new AggregateBuilder();
    aggregateBuilder.add(UriToDocumentTextAnnotator.getDescription());
    aggregateBuilder.add(AnalysisEngineFactory.createEngineDescription(XMIReader.class,
            XMIReader.PARAM_XMI_DIRECTORY, options.getXMIDirectory()));

    int totalDocTimeRels = 0;
    int totalInconsistentDocTimeRels = 0;
    for (Iterator<JCas> casIter = new JCasIterator(reader, aggregateBuilder.createAggregate()); casIter
            .hasNext();) {/*from   w ww .j a v a  2  s  .  c om*/
        JCas jCas = casIter.next();
        String text = jCas.getDocumentText();
        JCas goldView = jCas.getView("GoldView");

        // group events by their narrative container
        Multimap<Annotation, EventMention> containers = HashMultimap.create();
        for (TemporalTextRelation relation : JCasUtil.select(goldView, TemporalTextRelation.class)) {
            if (relation.getCategory().equals("CONTAINS")) {
                Annotation arg1 = relation.getArg1().getArgument();
                Annotation arg2 = relation.getArg2().getArgument();
                if (arg2 instanceof EventMention) {
                    EventMention event = (EventMention) arg2;
                    containers.put(arg1, event);
                }
            }
        }

        // check each container for inconsistent DocTimeRels
        for (Annotation container : containers.keySet()) {
            Set<String> docTimeRels = Sets.newHashSet();
            for (EventMention event : containers.get(container)) {
                docTimeRels.add(event.getEvent().getProperties().getDocTimeRel());
            }
            totalDocTimeRels += docTimeRels.size();

            boolean inconsistentDocTimeRels;
            if (container instanceof EventMention) {
                EventMention mention = ((EventMention) container);
                String containerDocTimeRel = mention.getEvent().getProperties().getDocTimeRel();
                inconsistentDocTimeRels = false;
                for (String docTimeRel : docTimeRels) {
                    if (docTimeRel.equals(containerDocTimeRel)) {
                        continue;
                    }
                    if (containerDocTimeRel.equals("BEFORE/OVERLAP")
                            && (docTimeRel.equals("BEFORE") || docTimeRel.equals("OVERLAP"))) {
                        continue;
                    }
                    inconsistentDocTimeRels = true;
                    break;
                }
            } else {
                if (docTimeRels.size() == 1) {
                    inconsistentDocTimeRels = false;
                } else if (docTimeRels.contains("BEFORE/OVERLAP")) {
                    inconsistentDocTimeRels = docTimeRels.size() == 1
                            && (docTimeRels.contains("BEFORE") || docTimeRels.contains("OVERLAP"));
                } else {
                    inconsistentDocTimeRels = true;
                }
            }

            // if inconsistent: print events, DocTimeRels and surrounding context
            if (inconsistentDocTimeRels) {
                totalInconsistentDocTimeRels += docTimeRels.size();

                List<Integer> offsets = Lists.newArrayList();
                offsets.add(container.getBegin());
                offsets.add(container.getEnd());
                for (EventMention event : containers.get(container)) {
                    offsets.add(event.getBegin());
                    offsets.add(event.getEnd());
                }
                Collections.sort(offsets);
                int begin = Math.max(offsets.get(0) - windowSize, 0);
                int end = Math.min(offsets.get(offsets.size() - 1) + windowSize, text.length());
                System.err.printf("Inconsistent DocTimeRels in %s, ...%s...\n",
                        new File(ViewUriUtil.getURI(jCas)).getName(),
                        text.substring(begin, end).replaceAll("([\r\n])[\r\n]+", "$1"));
                if (container instanceof EventMention) {
                    System.err.printf("Container: \"%s\" (docTimeRel=%s)\n", container.getCoveredText(),
                            ((EventMention) container).getEvent().getProperties().getDocTimeRel());
                } else {
                    System.err.printf("Container: \"%s\"\n", container.getCoveredText());
                }
                Ordering<EventMention> byBegin = Ordering.natural()
                        .onResultOf(new Function<EventMention, Integer>() {
                            @Override
                            public Integer apply(@Nullable EventMention event) {
                                return event.getBegin();
                            }
                        });
                for (EventMention event : byBegin.sortedCopy(containers.get(container))) {
                    System.err.printf("* \"%s\" (docTimeRel=%s)\n", event.getCoveredText(),
                            event.getEvent().getProperties().getDocTimeRel());
                }
                System.err.println();
            }
        }
    }

    System.err.printf("Inconsistent DocTimeRels: %.1f%% (%d/%d)\n",
            100.0 * totalInconsistentDocTimeRels / totalDocTimeRels, totalInconsistentDocTimeRels,
            totalDocTimeRels);
}

From source file:edu.byu.nlp.data.app.AnnotationStream2Csv.java

public static void main(String[] args) throws IOException {
    // parse CLI arguments
    new ArgumentParser(AnnotationStream2Csv.class).parseArgs(args);
    Preconditions.checkNotNull(jsonStream, "You must provide a valid --json-stream!");

    Dataset data = readData(jsonStream);

    // optionally aggregate by instance
    String header = "annotator,start,end,annotation,label,source,num_correct_annotations,num_annotations,cum_num_annotations,num_annotators,cum_num_annotators\n";

    // iterate over instances and (optionally) annotations
    final StringBuilder bld = new StringBuilder();

    switch (row) {
    case ANNOTATION:

        // sort all annotations by end time
        Map<FlatInstance<SparseFeatureVector, Integer>, DatasetInstance> ann2InstMap = Maps
                .newIdentityHashMap();/*from   w ww  .j  a v a  2  s .c o m*/
        List<FlatInstance<SparseFeatureVector, Integer>> annotationList = Lists.newArrayList();
        for (DatasetInstance inst : data) {
            for (FlatInstance<SparseFeatureVector, Integer> ann : inst.getAnnotations().getRawAnnotations()) {
                ann2InstMap.put(ann, inst); // record instance of each annotations
                annotationList.add(ann);
            }
        }
        Collections.sort(annotationList, new Comparator<FlatInstance<SparseFeatureVector, Integer>>() {
            @Override
            public int compare(FlatInstance<SparseFeatureVector, Integer> o1,
                    FlatInstance<SparseFeatureVector, Integer> o2) {
                // no null checking since we want to fail if annotation time is not set. 
                return Long.compare(o1.getEndTimestamp(), o2.getEndTimestamp());
            }
        });

        Set<Integer> annotators = Sets.newHashSet();
        for (Enumeration<FlatInstance<SparseFeatureVector, Integer>> item : Iterables2
                .enumerate(annotationList)) {
            FlatInstance<SparseFeatureVector, Integer> ann = item.getElement();
            DatasetInstance inst = ann2InstMap.get(ann);
            annotators.add(ann.getAnnotator());

            bld.append(ann.getAnnotator() + ",");
            bld.append(ann.getStartTimestamp() + ",");
            bld.append(ann.getEndTimestamp() + ",");
            bld.append(ann.getAnnotation() + ",");
            bld.append(inst.getLabel() + ",");
            bld.append(
                    data.getInfo().getIndexers().getInstanceIdIndexer().get(inst.getInfo().getSource()) + ",");
            bld.append((!inst.hasLabel() ? "NA" : ann.getAnnotation() == inst.getLabel() ? 1 : 0) + ","); // num correct
            bld.append(1 + ","); // num annotations
            bld.append((item.getIndex() + 1) + ","); // cumulative num annotations
            bld.append(1 + ","); // num annotators
            bld.append(annotators.size() + ""); // cumulative num annotators
            bld.append("\n");
        }
        break;
    case INSTANCE:
        int cumNumAnnotations = 0;
        for (DatasetInstance inst : data) {
            cumNumAnnotations += inst.getInfo().getNumAnnotations();

            int numCorrectAnnotations = 0;
            // sum over all the annotators who put the correct answer (if available)
            if (inst.hasLabel()) {
                Integer correctLabel = inst.getLabel();
                for (int j = 0; j < data.getInfo().getNumAnnotators(); j++) {
                    numCorrectAnnotations += inst.getAnnotations().getLabelAnnotations()
                            .getRow(j)[correctLabel];
                }
            }

            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append(inst.getLabel() + ",");
            bld.append(inst.getInfo().getSource() + ",");
            bld.append(numCorrectAnnotations + ",");
            bld.append(inst.getInfo().getNumAnnotations() + ",");
            bld.append(cumNumAnnotations + ",");
            bld.append(inst.getInfo().getNumAnnotators() + ",");
            bld.append("NA"); // cumulative num annotators
            bld.append("\n");
        }
        break;

    case ANNOTATOR:
        Multiset<Integer> perAnnotatorAnnotationCounts = HashMultiset.create();
        Multiset<Integer> perAnnotatorCorrectAnnotationCounts = HashMultiset.create();
        for (DatasetInstance inst : data) {
            for (FlatInstance<SparseFeatureVector, Integer> ann : inst.getAnnotations().getRawAnnotations()) {
                int annotatorId = ann.getAnnotator();

                perAnnotatorAnnotationCounts.add(annotatorId);

                if (inst.getLabel() == ann.getAnnotation()) {
                    perAnnotatorCorrectAnnotationCounts.add(annotatorId);
                }

            }
        }

        for (String annotatorId : data.getInfo().getAnnotatorIdIndexer()) {

            bld.append(annotatorId + ",");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append("NA,");
            bld.append(perAnnotatorCorrectAnnotationCounts.count(annotatorId) + ",");
            bld.append(perAnnotatorAnnotationCounts.count(annotatorId) + ",");
            bld.append("NA,");
            bld.append("1,"); // num annotators
            bld.append("NA"); // cumulative num annotators
            bld.append("\n");
        }

        break;

    default:
        Preconditions.checkArgument(false, "unknown row type: " + row);
        break;
    }

    // output to console
    if (out == null) {
        System.out.println(header);
        System.out.println(bld.toString());
    } else {
        File outfile = new File(out);
        Files.write(header, outfile, Charsets.UTF_8);
        Files.append(bld, outfile, Charsets.UTF_8);
    }

}

From source file:org.commoncrawl.mapred.pipelineV3.crawllistgen.PartitionUtils.java

public static void main(String[] args) {

    TextBytes partitionKeyOut = new TextBytes();
    Set<Long> emptySet = Sets.newHashSet();
    FlexBuffer scratchBuffer = new FlexBuffer();
    TextBytes urlOut = new TextBytes();
    TextBytes domainBytes = new TextBytes();

    generatePartitionKeyGivenURL(emptySet, new TextBytes("http://www.google.com/someurl"), 0, partitionKeyOut);
    System.out.println("ParitiionKey:" + partitionKeyOut.toString());
    System.out.println("Parition:" + getPartitionGivenPartitionKey(partitionKeyOut, scratchBuffer, 10));
    System.out.println("Domain:" + getDomainGivenPartitionKey(partitionKeyOut, domainBytes));
    System.out.println("Type:" + getTypeGivenPartitionKey(partitionKeyOut));
    getURLGivenPartitionKey(partitionKeyOut, urlOut);
    System.out.println("URL:" + urlOut.toString());

    generatePartitionKeyGivenDomain(emptySet, "google.com", 0, partitionKeyOut);
    System.out.println("ParitiionKey:" + partitionKeyOut.toString());
    System.out.println("Parition:" + getPartitionGivenPartitionKey(partitionKeyOut, scratchBuffer, 10));
    System.out.println("Domain:" + getDomainGivenPartitionKey(partitionKeyOut, domainBytes));
    System.out.println("Type:" + getTypeGivenPartitionKey(partitionKeyOut));
    getURLGivenPartitionKey(partitionKeyOut, urlOut);
    System.out.println("URL:" + urlOut.toString());

}

From source file:org.apache.ctakes.temporal.data.analysis.CompareFeatureStructures.java

public static void main(String[] args) throws Exception {
    Options options = CliFactory.parseArguments(Options.class, args);
    List<Class<?>> annotationClasses = Lists.newArrayList();
    for (String annotationClassName : options.getAnnotationClassNames()) {
        annotationClasses.add(Class.forName(annotationClassName));
    }// w w w  .  j  a v a  2  s  .  c  om

    MyersDiff<String> stringDiff = new MyersDiff<String>();
    MyersDiff<FeatureStructure> fsDiff = new MyersDiff<FeatureStructure>(new FeatureStructureEqualizer());

    File originalDir = options.getDirectory1();
    File revisedDir = options.getDirectory2();
    Patch<String> dirPatch = stringDiff.diff(originalDir.list(), revisedDir.list());
    if (!dirPatch.getDeltas().isEmpty()) {
        log("--- %s files\n", originalDir);
        log("+++ %s files\n", revisedDir);
        log(dirPatch);
    } else {
        for (String fileName : originalDir.list()) {
            File originalFile = new File(originalDir, fileName);
            File revisedFile = new File(revisedDir, fileName);
            JCas originalJCas = readXMI(originalFile);
            JCas revisedJCas = readXMI(revisedFile);
            List<String> originalViews = getViewNames(originalJCas);
            List<String> revisedViews = getViewNames(revisedJCas);
            Patch<String> viewsPatch = stringDiff.diff(originalViews, revisedViews);
            if (!viewsPatch.getDeltas().isEmpty()) {
                log("--- %s views\n", originalFile);
                log("+++ %s views\n", revisedFile);
                log(viewsPatch);
            } else {
                for (String viewName : originalViews) {
                    JCas originalView = originalJCas.getView(viewName);
                    JCas revisedView = revisedJCas.getView(viewName);
                    List<FeatureStructure> originalFSes = toFeatureStructures(originalView, annotationClasses);
                    List<FeatureStructure> revisedFSes = toFeatureStructures(revisedView, annotationClasses);
                    Patch<FeatureStructure> fsPatch = fsDiff.diff(originalFSes, revisedFSes);
                    if (!fsPatch.getDeltas().isEmpty()) {
                        log("--- %s view %s\n", originalFile, viewName);
                        log("+++ %s view %s\n", revisedFile, viewName);
                        for (Delta<FeatureStructure> fsDelta : fsPatch.getDeltas()) {
                            logHeader(fsDelta);
                            switch (fsDelta.getType()) {
                            case DELETE:
                            case INSERT:
                                log(fsDelta);
                                break;
                            case CHANGE:
                                List<String> originalLines = toLines(fsDelta.getOriginal().getLines());
                                List<String> revisedLines = toLines(fsDelta.getRevised().getLines());
                                Patch<String> linesPatch = stringDiff.diff(originalLines, revisedLines);
                                ListMultimap<Integer, String> deletes = ArrayListMultimap.create();
                                ListMultimap<Integer, String> inserts = ArrayListMultimap.create();
                                Set<Integer> skips = Sets.newHashSet();
                                for (Delta<String> linesDelta : linesPatch.getDeltas()) {
                                    Chunk<String> originalChunk = linesDelta.getOriginal();
                                    Chunk<String> revisedChunk = linesDelta.getRevised();
                                    int start = originalChunk.getPosition();
                                    deletes.putAll(start, originalChunk.getLines());
                                    inserts.putAll(start, revisedChunk.getLines());
                                    for (int i = start; i < start + originalChunk.size(); ++i) {
                                        skips.add(i);
                                    }
                                }
                                for (int i = 0; i < originalLines.size(); ++i) {
                                    if (!skips.contains(i)) {
                                        log(" %s\n", originalLines.get(i));
                                    }
                                    for (String line : deletes.get(i)) {
                                        log("-%s\n", line);
                                    }
                                    for (String line : inserts.get(i)) {
                                        log("+%s\n", line);
                                    }
                                }
                                break;
                            }
                        }
                    }
                }
            }
        }
    }

}

From source file:org.pshdl.model.types.builtIn.busses.memorymodel.v4.MemoryModelAST.java

public static void main(String[] args) throws FileNotFoundException, RecognitionException, IOException {
    final Set<Problem> problems = Sets.newHashSet();
    final String string = Files.toString(new File(args[0]), Charsets.UTF_8);
    final Unit parseUnit = parseUnit(string, problems, 0);
    for (final Problem problem : problems) {
        System.err.println("MemoryModelAST.main()" + problem);
    }//  ww w.j  a  v  a2 s  . com
    System.out.println("MemorModelAST.main()" + parseUnit);
}

From source file:com.complexible.stardog.examples.api.VersioningExample.java

public static void main(String[] args) throws Exception {
    // As always, we need to create and start a Stardog server for our example
    Server aServer = Stardog.buildServer().bind(SNARLProtocolConstants.EMBEDDED_ADDRESS).start();

    try {//from   www  .  j ava 2 s.  co  m
        String aDB = "versionedDB";

        // Create an `AdminConnection` to Stardog to set up the database for the example
        AdminConnection dbms = AdminConnectionConfiguration.toEmbeddedServer().credentials("admin", "admin")
                .connect();

        // If the database exists, drop and it create it fresh
        ConnectionConfiguration aConfig;
        try {
            if (dbms.list().contains(aDB)) {
                dbms.drop(aDB);
            }

            aConfig = dbms.disk(aDB).set(VersioningOptions.ENABLED, true)
                    .set(DatabaseOptions.NAMESPACES, Lists.newArrayList(namespace("", NS),
                            namespace("foaf", FOAF.NAMESPACE), namespace("dc", DC.NAMESPACE)))
                    .create();
        } finally {
            dbms.close();
        }

        // Obtain a `Connection` to the database and request a view of the connection as a
        // [VersioningConnection](http://docs.stardog.com/java/snarl/com/complexible/stardog/api/versioning/VersioningConnection.html)
        VersioningConnection aConn = aConfig.connect().as(VersioningConnection.class);

        try {
            // Now, let's make some changes to the databases
            aConn.begin();
            aConn.add().statement(Alice, DC.PUBLISHER, literal("Alice"))
                    .statement(Bob, DC.PUBLISHER, literal("Bob")).statement(Alice, RDF.TYPE, FOAF.PERSON, Alice)
                    .statement(Alice, FOAF.MBOX, literal("mailto:alice@example.org"), Alice)
                    .statement(Bob, RDF.TYPE, FOAF.PERSON, Bob)
                    .statement(Bob, FOAF.MBOX, literal("mailto:bob@example.org"), Bob);

            // And we'll commit our changes with a commit message
            aConn.commit("Adding Alice and Bob");

            // Let's change Alice's email
            aConn.begin();
            aConn.remove().statements(Alice, FOAF.MBOX, literal("mailto:alice@example.org"), Alice);
            aConn.add().statement(Alice, FOAF.MBOX, literal("mailto:alice@another.example.org"), Alice);
            aConn.commit("Changing Alice's email");

            // Print the contents of the database and verify they are correct
            RDFWriters.write(aConn.get().context(Contexts.ALL).iterator(), RDFFormat.TRIG, aConn.namespaces(),
                    System.out);

            // We can still use the regular commit function from the `Connection` interface. This will also create a new
            // version along with its metadata but it will not have a commit message
            aConn.begin();
            aConn.add().statement(Charlie, DC.PUBLISHER, literal("Charlie"))
                    .statement(Charlie, RDF.TYPE, FOAF.PERSON, Charlie)
                    .statement(Charlie, FOAF.MBOX, literal("mailto:charlie@example.org"), Charlie);
            aConn.commit();

            RDFWriters.write(aConn.get().context(Contexts.ALL).iterator(), RDFFormat.TRIG, aConn.namespaces(),
                    System.out);

            // Lets try an example with the basic versioning API to list all versions
            Iteration<Version, StardogException> resultIt = aConn.versions().find().oldestFirst().iterator();

            try {
                System.out.println("\nVersions: ");
                while (resultIt.hasNext()) {
                    Version aVersion = resultIt.next();

                    System.out.println(aVersion);
                }
            } finally {
                // don't forget to close your iteration!
                resultIt.close();
            }

            // We're at a good point with our data, we think it's a 1.0 version, so let's tag it so we could
            // come back to this state if need be
            String aTag = "Release 1.0";

            // Get the head (current) revision, that's what we're going to tag.
            Version aHeadVersion = aConn.versions().getHead();
            aConn.tags().create(aHeadVersion, aTag);

            // Now you can see the effects of having created the tag
            System.out.println(aHeadVersion.getTags());

            System.out.println("Tagged " + aHeadVersion.getURI() + " " + aTag);

            // Let's show a quick example of how to print out the diffs between two versions as SPARQL Update queries
            // These are the versions we want to calculate the diff between
            Version aTo = aHeadVersion;
            Version aFrom = aHeadVersion.getRelativeVersion(-2);

            System.out.println();
            System.out.println("Print the diffs between HEAD-2 and HEAD:");

            try {
                // We'll write the diffs as SPARQL update queries
                UpdateHandler aWriter = new UpdateSPARQLWriter(System.out);
                aWriter.startRDF();

                Iterable<Namespace> aNamespaces = aConn.namespaces();
                for (Namespace aNamespace : aNamespaces) {
                    aWriter.handleNamespace(aNamespace.getPrefix(), aNamespace.getName());
                }

                UpdateSequence aDiff;

                final Set<Statement> aAdditions = Sets.newHashSet();
                final Set<Statement> aRemovals = Sets.newHashSet();

                Version aVersion = aFrom;
                // Iterate over the versions, grabbing the diff, and coalescing the changes
                while (aVersion != null) {
                    aDiff = aVersion.getDiff();
                    for (UpdateOperation aOp : aDiff) {
                        Set<Statement> aAddTarget = aOp.getType() == UpdateOperation.Type.ADD ? aAdditions
                                : aRemovals;
                        Set<Statement> aRemoveTarget = aOp.getType() == UpdateOperation.Type.REMOVE ? aAdditions
                                : aRemovals;
                        for (Statement aStmt : toStatements(aOp)) {
                            aAddTarget.add(aStmt);
                            aRemoveTarget.remove(aStmt);
                        }
                    }
                    aVersion = aVersion.equals(aTo) ? null : aVersion.getNext();
                }

                // now that we have all the changes, create the diff and write it out
                aDiff = Updates.newSequence(Updates.add(aAdditions), Updates.remove(aRemovals));

                Updates.handle(aDiff, aWriter);

                aWriter.endRDF();
            } catch (RDFHandlerException e) {
                throw new StardogException(e);
            }

            System.out.println();

            // Finally, we can revert.  Let's undo our last to commits and print the current data in the database
            // so we can see that we're back to where we started.
            aConn.revert(aHeadVersion.getRelativeVersion(-2), aHeadVersion, "Undo last two commits");

            RDFWriters.write(aConn.get().context(Contexts.ALL).iterator(), RDFFormat.TRIG, aConn.namespaces(),
                    System.out);
        } finally {
            // Always close your connections when you're done
            aConn.close();
        }
    } finally {
        // You MUST stop the server if you've started it!
        aServer.stop();
    }
}

From source file:com.enonic.cms.core.search.builder.ContentIndexDataFieldValueSetFactory.java

public static Set<ContentIndexDataFieldAndValue> create(ContentIndexDataElement element) {
    final Set<ContentIndexDataFieldAndValue> contentIndexDataFieldAndValues = Sets.newHashSet();

    addStringFieldValue(element, contentIndexDataFieldAndValues);
    addNumericFieldValue(element, contentIndexDataFieldAndValues);
    addDateFieldValue(element, contentIndexDataFieldAndValues);

    if (!contentIndexDataFieldAndValues.isEmpty()) {
        addOrderbyValue(element, contentIndexDataFieldAndValues);
    }/*from   ww  w .jav a  2 s .  c  o  m*/

    return contentIndexDataFieldAndValues;
}

From source file:nxminetilities.modules.AdditionalFacades.java

public static void LoadFacades(String facadeIdList) {
    Set<Integer> facadeBlockIds = Sets.newHashSet();
    for (String id : facadeIdList.trim().split("\\s*,\\s*")) {
        try {//from  www  . j  a v a2 s  . com
            facadeBlockIds.add(Integer.parseInt(id));
        } catch (Exception e) {
            continue;
        }
    }

    // Add nxMinetilities blocks
    if (Minetilities.bColouredStone.blockID != 0) {
        facadeBlockIds.add(Minetilities.bColouredStone.blockID);
    }
    if (Minetilities.bColouredBrick.blockID != 0) {
        facadeBlockIds.add(Minetilities.bColouredBrick.blockID);
    }
    if (Minetilities.bColouredChiselledStone.blockID != 0) {
        facadeBlockIds.add(Minetilities.bColouredChiselledStone.blockID);
    }

    for (int blockId : facadeBlockIds) {
        ItemStack is = new ItemStack(blockId, 1, -1);

        try {
            if (is.getHasSubtypes()) {
                Set<String> names = Sets.newHashSet();
                for (int meta = 0; meta < 15; meta++) {
                    ItemStack metaIs = new ItemStack(blockId, 1, meta);
                    if (!Strings.isNullOrEmpty(metaIs.getItemName()) && names.add(metaIs.getItemName())) {
                        FacadeHelper.addBuildcraftFacade(metaIs);
                        facadeCount++;
                    }
                }
            } else {
                FacadeHelper.addBuildcraftFacade(is);
                facadeCount++;
            }
        } catch (Exception e) {
            Minetilities.nxLog.severe("[AdditionalFacades] ERROR! Exception thrown adding block: " + blockId);
        }
    }

    Minetilities.nxLog.info("[AdditionalFacades] Successfully added " + facadeCount + " facades.");
}