Example usage for com.google.common.collect Maps newHashMap

List of usage examples for com.google.common.collect Maps newHashMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newHashMap.

Prototype

public static <K, V> HashMap<K, V> newHashMap() 

Source Link

Document

Creates a mutable, empty HashMap instance.

Usage

From source file:org.sbs.util.MapUtils.java

public static void main(String[] args) {
    System.out.println((null instanceof Object));
    HashMap<String, Object> m1 = Maps.newHashMap();
    m1.put("a", "1");
    m1.put("b", "x");
    m1.put("d", Lists.newArrayList(1, 2, 34));
    HashMap<String, Object> m2 = Maps.newHashMap();
    m2.put("a", "x");
    m2.put("b", "y");
    m2.put("c", "3");
    m1.put("d", Lists.newArrayList(1, 2, 34, 5));
    m1.put("e", Maps.newHashMap());
    System.out.println(mager(m1, m2));
    System.out.println(m1);/*from  ww  w .ja  v  a2s.com*/
}

From source file:org.lisapark.octopus.util.xml.ConstraintUtils.java

public static void main(String[] args) {

    List<Map<String, Object>> data = getTestData();

    Map<String, List<String>> prodVarMap = createProdMap();
    Map<String, List<String>> machineVarMap = createMachineMap();

    // Make a map to collect profit and unit values
    Map<String, Integer> profitMap = Maps.newHashMap();
    Map<String, Integer> unitValueMap = Maps.newHashMap();
    Map<String, IntVar> intVarMap = Maps.newHashMap();

    // Create constrainer
    Constrainer model = new Constrainer("SIMPP");

    // Collect variables for products and machines
    //        for(Map<String, Object> map : data){
    //            // Use only those data that presented in Product and Machine Lists
    //            if (prodVarMap.containsKey((String)map.get(PROD))
    //                    && machineVarMap.containsKey((String)map.get(MACHINE))) {
    //                String name = getName(map);
    //                profitMap.put(name, (Integer) map.get(PROFIT));
    //                unitValueMap.put(name, (Integer) map.get(UNIT_VALUE));
    //                
    //                // Create map of all Solution IntegerVariables
    //                IntVar intvar = (IntVar) model.addIntVar(LOW, HIGH, name);
    //                intVarMap.put(name, intvar);
    ///*  ww w  .  ja v  a2s.  co m*/
    //                prodVarMap.get((String) map.get(PROD)).add(name);
    //                machineVarMap.get((String) map.get(MACHINE)).add(name);
    //            }
    //        }

    // Converting collections to arrays and IntegerVariables
    //======================================================================

    // Creating Cost criteria variable
    IntVar cost = (IntVar) model.addIntVar(1, HIGH_COST, COST);

    //        // Machines
    //        Map<String, Pair<int[], IntVar[]>> machineVarMapArray = 
    //                createVarMapArray(machineVarMap, unitValueMap, intVarMap);
    //        
    //        // Products
    //        Map<String, Pair<int[], IntVar[]>> prodVarMapArray = 
    //                createVarMapArray(prodVarMap, unitValueMap, intVarMap);
    //        
    //        // create profits and unitValues  arrays
    //        IntExpArray varMapArray =
    //                mergeVarMapArray(profitMap, intVarMap);

    // Now we are ready to make a model
    //======================================================================

    // Add products constraints to the model
    //        for(Entry entry : prodVarMapArray.entrySet()){
    //            String name     = (String) entry.getKey();
    //            int[] values    = (int[]) ((Pair)entry.getValue()).getFirst();
    //            IntVar[] intVars = (IntVar[]) ((Pair)entry.getValue()).getSecond();
    //            IntExpArray intexps = new IntExpArray(model, intVars);
    //            int lowB = (int) ((int)getProdMap().get(name) * .8);
    //            int uppB = (int) ((int)getProdMap().get(name) * 1.2);
    //            IntVar intVar = (IntVar) model.addIntVar(lowB, uppB, name);
    ////            model.addConstraint(Choco.eq(Choco.scalar(values, intVars), intVar));
    ////            int upRange = (int)getProdMap().get(name);
    ////            model.postConstraint(intVar.eq(intVars., values));
    //        }
    //        
    //        // Add machines constraints to the model
    //        for(Entry entry : machineVarMapArray.entrySet()){
    //            String name     = (String) entry.getKey();
    //            int[] values    = (int[]) ((Pair)entry.getValue()).getFirst();
    //            IntVar[] intVars = (IntVar[]) ((Pair)entry.getValue()).getSecond();
    //            model.addConstraint(Choco.leq(Choco.scalar(values, intVars), (int)getMachineMap().get(name)));
    //        }
    //        
    // Add fixed values constraints ti the model
    // To Be Implemented (TBI)

    // Add cost criteria constrains
    // Plan should maximize profit
    //        int[] values =  varMapArray.getFirst();
    //        IntVar[] intVars = varMapArray.getSecond();
    //        model.addConstraint(Choco.geq(Choco.scalar(values, intVars), cost));
    //         
    //        Solver solver = new CPSolver();
    //        solver.read(model);
    ////        solver.setValIntIterator(new DecreasingDomain());
    //        solver.maximize(solver.getVar(cost), false);

}

From source file:ShuffleBlamedForParser.java

public static void main(String[] args) throws Exception {
    File inputFile = new File(args[0]);

    Preconditions.checkArgument(inputFile.exists(), "Please provide valid file; currentFile = " + inputFile);

    Pattern BLAMED_FOR = Pattern.compile("TaskAttemptImpl:(.*) blamed for read error from (.*) at");

    //HDP 2.3.4 (as the log file format changed)
    //BLAMED_FOR = Pattern.compile("TaskAttemptImpl\\|:(.*) blamed for read error from (.*) at");

    Pattern HOST_PATTERN = Pattern.compile("task=(.*), containerHost=(.*), localityMatchType");

    Map<String, String> hostMap = Maps.newHashMap();
    Map<String, Integer> fetcherFailure = Maps.newHashMap();

    try (BufferedReader reader = new BufferedReader(new FileReader(inputFile))) {
        while (reader.ready()) {
            String line = reader.readLine();
            if (line.contains("task") && line.contains("containerHost")) {
                Matcher matcher = HOST_PATTERN.matcher(line);
                while (matcher.find()) {
                    String attempt = matcher.group(1).trim();
                    String host = matcher.group(2).trim();
                    fetcherFailure.put(attempt, 0); //Just initializing
                    hostMap.put(attempt, host);
                }//from ww  w .j a  va  2 s .c o m
            }
        }
    }

    Set<String> hosts = new HashSet(hostMap.values());
    System.out.println("Unique hosts : " + hosts.size());

    Set<String> srcMachines = new HashSet<String>();
    Set<String> fetcherMachines = new HashSet<String>();

    try (BufferedReader reader = new BufferedReader(new FileReader(inputFile))) {
        try (FileWriter writer = new FileWriter(new File(".", "output.txt"))) {
            while (reader.ready()) {
                String line = reader.readLine();
                if (line.contains("blamed for read error")) {
                    Matcher matcher = BLAMED_FOR.matcher(line);
                    while (matcher.find()) {
                        String srcAttempt = matcher.group(1).trim();
                        String fetcherAttempt = matcher.group(2).trim();
                        fetcherFailure.put(fetcherAttempt, fetcherFailure.get(fetcherAttempt) + 1);
                        if (hostMap.get(srcAttempt) == null) {
                            System.out.println("ISSUE");
                        }
                        String s = "src=" + srcAttempt + ", srcMachine=" + hostMap.get(srcAttempt.trim())
                                + ", fetcher=" + fetcherAttempt + ", fetcherMachine="
                                + hostMap.get(fetcherAttempt.trim())
                                //+ ", size=" + hostMap.size()
                                + ", failure=" + fetcherFailure.get(fetcherAttempt);
                        srcMachines.add(hostMap.get(srcAttempt.trim()));
                        fetcherMachines.add(hostMap.get(fetcherAttempt.trim()));
                        System.out.println(s);
                        writer.write(s + "\n");
                    }
                }
            }
        }
    }

    //Summary
    System.out.println();
    System.out.println();
    System.out.println("Source Machines being blamed for ");
    for (String src : srcMachines) {
        System.out.println("\t" + src);
    }
    System.out.println();
    System.out.println();

    System.out.println("Fetcher Machines");
    for (String fetcher : fetcherMachines) {
        System.out.println("\t" + fetcher);
    }
}

From source file:org.apache.curator.example.DiscoveryExample.java

public static void main(String[] args) throws Exception {
    // This method is scaffolding to get the example up and running

    TestingServer server = new TestingServer();
    CuratorFramework client = null;//from   ww w  .ja va2s .  c  o m
    ServiceDiscovery<InstanceDetails> serviceDiscovery = null;
    Map<String, ServiceProvider<InstanceDetails>> providers = Maps.newHashMap();
    try {
        client = CuratorFrameworkFactory.newClient(server.getConnectString(),
                new ExponentialBackoffRetry(1000, 3));
        client.start();

        JsonInstanceSerializer<InstanceDetails> serializer = new JsonInstanceSerializer<InstanceDetails>(
                InstanceDetails.class);
        serviceDiscovery = ServiceDiscoveryBuilder.builder(InstanceDetails.class).client(client).basePath(PATH)
                .serializer(serializer).build();
        serviceDiscovery.start();

        processCommands(serviceDiscovery, providers, client);
    } finally {
        for (ServiceProvider<InstanceDetails> cache : providers.values()) {
            CloseableUtils.closeQuietly(cache);
        }

        CloseableUtils.closeQuietly(serviceDiscovery);
        CloseableUtils.closeQuietly(client);
        CloseableUtils.closeQuietly(server);
    }
}

From source file:org.apache.streams.sysomos.example.SysomosMongo.java

public static void main(String[] args) {
    LOGGER.info(StreamsConfigurator.config.toString());

    Config sysomos = StreamsConfigurator.config.getConfig("sysomos");
    Config mongo = StreamsConfigurator.config.getConfig("mongo");

    SysomosConfiguration config = new SysomosConfiguration();
    config.setHeartbeatIds(sysomos.getStringList("heartbeatIds"));
    config.setApiBatchSize(sysomos.getLong("apiBatchSize"));
    config.setApiKey(sysomos.getString("apiKey"));
    config.setMinDelayMs(sysomos.getLong("minDelayMs"));
    config.setScheduledDelayMs(sysomos.getLong("scheduledDelayMs"));
    config.setMaxBatchSize(sysomos.getLong("maxBatchSize"));

    SysomosProvider provider = new SysomosProvider(config);
    MongoPersistWriter writer = new MongoPersistWriter();

    Map<String, Object> streamConfig = Maps.newHashMap();
    streamConfig.put(LocalStreamBuilder.TIMEOUT_KEY, 20 * 60 * 1000);
    StreamBuilder builder = new LocalStreamBuilder(1000, streamConfig);

    builder.newPerpetualStream("SysomosProvider", provider);
    builder.addStreamsProcessor("SysomosActivityConverter", new SysomosTypeConverter(), 10, "SysomosProvider");
    builder.addStreamsPersistWriter("mongo", writer, 1, "SysomosActivityConverter");
    builder.start();//w  w  w .  j a v a 2  s .  c om
}

From source file:org.carrot2.examples.core.LoadingAttributeValuesFromXml.java

public static void main(String[] args) throws Exception {
    InputStream xmlStream = null;
    try {//from w w  w.j av  a 2 s.c  o m
        xmlStream = LoadingAttributeValuesFromXml.class.getResourceAsStream("algorithm-lingo-attributes.xml");

        // Load attribute value sets from the XML stream
        final AttributeValueSets attributeValueSets = AttributeValueSets.deserialize(xmlStream);

        // Get the desired set of attribute values for use with further processing
        final Map<String, Object> defaultAttributes = attributeValueSets.getDefaultAttributeValueSet()
                .getAttributeValues();

        final Map<String, Object> fasterClusteringAttributes = attributeValueSets
                .getAttributeValueSet("faster-clustering").getAttributeValues();

        // Perform processing using the attribute values
        final Controller controller = ControllerFactory.createSimple();

        // Initialize the controller with one attribute set
        controller.init(fasterClusteringAttributes);

        // Perform clustering using the attribute set provided at initialization time
        Map<String, Object> requestAttributes = Maps.newHashMap();
        CommonAttributesDescriptor.attributeBuilder(requestAttributes)
                .documents(Lists.newArrayList(SampleDocumentData.DOCUMENTS_DATA_MINING)).query("data mining");
        ProcessingResult results = controller.process(requestAttributes, LingoClusteringAlgorithm.class);
        ConsoleFormatter.displayClusters(results.getClusters());

        // Perform clustering using some other attribute set, in this case the
        // one that is the default in the XML file.
        requestAttributes = CommonAttributesDescriptor.attributeBuilder(Maps.newHashMap(defaultAttributes))
                .documents(Lists.newArrayList(SampleDocumentData.DOCUMENTS_DATA_MINING))
                .query("data mining").map;

        results = controller.process(requestAttributes, LingoClusteringAlgorithm.class);
        ConsoleFormatter.displayClusters(results.getClusters());
    } finally {
        CloseableUtils.close(xmlStream);
    }
}

From source file:org.apache.streams.datasift.example.DatasiftInstagramElasticsearch.java

public static void main(String[] args) {
    LOGGER.info(StreamsConfigurator.config.toString());

    Config datasift = StreamsConfigurator.config.getConfig("datasift");
    DatasiftConfiguration datasiftConfiguration = DatasiftStreamConfigurator.detectConfiguration(datasift);

    Config elasticsearch = StreamsConfigurator.config.getConfig("elasticsearch");
    ElasticsearchWriterConfiguration elasticsearchWriterConfiguration = ElasticsearchConfigurator
            .detectWriterConfiguration(elasticsearch);

    Map<String, Object> streamConfig = Maps.newHashMap();
    streamConfig.put(LocalStreamBuilder.TIMEOUT_KEY, 20 * 60 * 1000 * 1000);

    StreamBuilder builder = new LocalStreamBuilder(100, streamConfig);

    DatasiftStreamProvider stream = new DatasiftStreamProvider(new DatasiftStreamProvider.DeleteHandler(),
            datasiftConfiguration);/*from   w  w  w.  j  a va2s .  c  o m*/
    DatasiftTypeConverterProcessor datasiftTypeConverter = new DatasiftTypeConverterProcessor(Activity.class);
    RegexMentionsExtractor regexMentionsExtractor = new RegexMentionsExtractor();
    ElasticsearchPersistWriter writer = new ElasticsearchPersistWriter(elasticsearchWriterConfiguration);

    builder.newPerpetualStream("stream", stream);
    builder.addStreamsProcessor("converter", datasiftTypeConverter, 2, "stream");
    builder.addStreamsProcessor("RegexMentionsExtractor", regexMentionsExtractor, 2,
            "CleanAdditionalProperties");
    builder.addStreamsPersistWriter(ElasticsearchPersistWriter.STREAMS_ID, writer, 1, "RegexMentionsExtractor");
    builder.start();

}

From source file:com.sfxie.extension.zookeeper.curator.discover.DiscoveryExample.java

public static void main(String[] args) throws Exception {
    // This method is scaffolding to get the example up and running

    TestingServer server = new TestingServer("192.168.23.4", 2181);
    CuratorFramework client = null;//from  w  w w.  j a  va 2  s .  c om
    ServiceDiscovery<InstanceDetails> serviceDiscovery = null;
    Map<String, ServiceProvider<InstanceDetails>> providers = Maps.newHashMap();
    try {
        client = CuratorFrameworkFactory.newClient(server.getConnectString(),
                new ExponentialBackoffRetry(1000, 3));
        client.start();

        JsonInstanceSerializer<InstanceDetails> serializer = new JsonInstanceSerializer<InstanceDetails>(
                InstanceDetails.class);
        serviceDiscovery = ServiceDiscoveryBuilder.builder(InstanceDetails.class).client(client).basePath(PATH)
                .serializer(serializer).build();
        serviceDiscovery.start();

        processCommands(serviceDiscovery, providers, client);
    } finally {
        for (ServiceProvider<InstanceDetails> cache : providers.values()) {
            CloseableUtils.closeQuietly(cache);
        }

        CloseableUtils.closeQuietly(serviceDiscovery);
        CloseableUtils.closeQuietly(client);
        CloseableUtils.closeQuietly(server);
    }
}

From source file:org.apache.flink.streaming.connectors.elasticsearch.examples.ElasticsearchExample.java

public static void main(String[] args) throws Exception {

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    DataStreamSource<String> source = env.addSource(new SourceFunction<String>() {
        private static final long serialVersionUID = 1L;

        private volatile boolean running = true;

        @Override//from w  ww  .j  av  a 2s. co m
        public void run(SourceContext<String> ctx) throws Exception {
            for (int i = 0; i < 20 && running; i++) {
                ctx.collect("message #" + i);
            }
        }

        @Override
        public void cancel() {
            running = false;
        }
    });

    Map<String, String> config = Maps.newHashMap();
    // This instructs the sink to emit after every element, otherwise they would be buffered
    config.put(ElasticsearchSink.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");

    source.addSink(new ElasticsearchSink<>(config, new IndexRequestBuilder<String>() {
        @Override
        public IndexRequest createIndexRequest(String element, RuntimeContext ctx) {
            Map<String, Object> json = new HashMap<>();
            json.put("data", element);

            return Requests.indexRequest().index("my-index").type("my-type").source(json);
        }
    }));

    env.execute("Elasticsearch Example");
}

From source file:org.carrot2.examples.clustering.MoreConfigurationsOfOneAlgorithmInCachingController.java

@SuppressWarnings({ "unchecked" })
public static void main(String[] args) {
    /*/*www  .j  ava  2s  .  c  om*/
     * Create a controller that caches all documents.
     */
    final Controller controller = ControllerFactory.createCachingPooling(IDocumentSource.class);

    /*
     * You can define global values for some attributes. These will apply to all
     * configurations we will define below, unless the specific configuration
     * overrides the global attributes.
     */
    final Map<String, Object> globalAttributes = new HashMap<String, Object>();

    CompletePreprocessingPipelineDescriptor.attributeBuilder(globalAttributes).documentAssigner()
            .exactPhraseAssignment(false);

    /*
     * Now we will define two different configurations of the Lingo algorithm. One
     * will be optimized for speed of clustering, while the other will optimize the
     * quality of clusters.
     */
    final Map<String, Object> fastAttributes = Maps.newHashMap();
    LingoClusteringAlgorithmDescriptor.attributeBuilder(fastAttributes).desiredClusterCountBase(20)
            .matrixReducer().factorizationQuality(FactorizationQuality.LOW);

    CompletePreprocessingPipelineDescriptor.attributeBuilder(fastAttributes).caseNormalizer().dfThreshold(2);

    final Map<String, Object> accurateAttributes = Maps.newHashMap();
    LingoClusteringAlgorithmDescriptor.attributeBuilder(accurateAttributes).desiredClusterCountBase(40)
            .matrixReducer().factorizationQuality(FactorizationQuality.HIGH);

    CompletePreprocessingPipelineDescriptor.attributeBuilder(accurateAttributes).documentAssigner()
            .exactPhraseAssignment(true);

    CompletePreprocessingPipelineDescriptor.attributeBuilder(fastAttributes).caseNormalizer().dfThreshold(1);

    /*
     * We initialize the controller passing the global attributes and the two 
     * configurations. Notice that a configuration consists of the component
     * class (can be a document source as well as a clustering algorithm), its 
     * string identifier and attributes.
     */
    controller.init(globalAttributes,
            new ProcessingComponentConfiguration(LingoClusteringAlgorithm.class, "lingo-fast", fastAttributes),
            new ProcessingComponentConfiguration(LingoClusteringAlgorithm.class, "lingo-accurate",
                    accurateAttributes));

    /*
     * Now we can call the two different clustering configurations. Notice that 
     * because we now use string identifiers instead of classes, we pass the document
     * source class name rather than the class itself.
     */
    final Map<String, Object> attributes = new HashMap<String, Object>();
    CommonAttributesDescriptor.attributeBuilder(attributes).query("data mining");

    final ProcessingResult fastResult = controller.process(attributes, Bing3WebDocumentSource.class.getName(),
            "lingo-fast");
    ConsoleFormatter.displayClusters(fastResult.getClusters());

    final ProcessingResult accurateResult = controller.process(attributes,
            Bing3WebDocumentSource.class.getName(), "lingo-accurate");
    ConsoleFormatter.displayClusters(accurateResult.getClusters());
}