Example usage for java.util HashMap values

List of usage examples for java.util HashMap values

Introduction

In this page you can find the example usage for java.util HashMap values.

Prototype

public Collection<V> values() 

Source Link

Document

Returns a Collection view of the values contained in this map.

Usage

From source file:com.uber.hoodie.TestHoodieClient.java

public void testCommitWritesRelativePaths() throws Exception {

    HoodieWriteConfig cfg = getConfigBuilder().withAutoCommit(false).build();
    HoodieWriteClient client = new HoodieWriteClient(jsc, cfg);
    FileSystem fs = FSUtils.getFs();
    HoodieTableMetaClient metaClient = new HoodieTableMetaClient(fs, basePath);
    HoodieTable table = HoodieTable.getHoodieTable(metaClient, cfg);

    String commitTime = "000";
    List<HoodieRecord> records = dataGen.generateInserts(commitTime, 200);
    JavaRDD<HoodieRecord> writeRecords = jsc.parallelize(records, 1);

    JavaRDD<WriteStatus> result = client.bulkInsert(writeRecords, commitTime);

    assertTrue("Commit should succeed", client.commit(commitTime, result));
    assertTrue("After explicit commit, commit file should be created",
            HoodieTestUtils.doesCommitExist(basePath, commitTime));

    // Get parquet file paths from commit metadata
    String actionType = table.getCompactedCommitActionType();
    HoodieInstant commitInstant = new HoodieInstant(false, actionType, commitTime);
    HoodieTimeline commitTimeline = table.getCompletedCompactionCommitTimeline();
    HoodieCommitMetadata commitMetadata = HoodieCommitMetadata
            .fromBytes(commitTimeline.getInstantDetails(commitInstant).get());
    String basePath = table.getMetaClient().getBasePath();
    Collection<String> commitPathNames = commitMetadata.getFileIdAndFullPaths(basePath).values();

    // Read from commit file
    String filename = HoodieTestUtils.getCommitFilePath(basePath, commitTime);
    FileInputStream inputStream = new FileInputStream(filename);
    String everything = IOUtils.toString(inputStream);
    HoodieCommitMetadata metadata = HoodieCommitMetadata.fromJsonString(everything.toString());
    HashMap<String, String> paths = metadata.getFileIdAndFullPaths(basePath);
    inputStream.close();//  w ww. j  av a  2 s  . c o  m

    // Compare values in both to make sure they are equal.
    for (String pathName : paths.values()) {
        assertTrue(commitPathNames.contains(pathName));
    }
}

From source file:org.apache.flink.contrib.streaming.state.RocksDBKeyedStateBackend.java

/**
 * For backwards compatibility, remove again later!
 *///from   w w w.ja va  2s.c o m
@Deprecated
private void restoreOldSavepointKeyedState(Collection<KeyGroupsStateHandle> restoreState) throws Exception {

    if (restoreState.isEmpty()) {
        return;
    }

    Preconditions.checkState(1 == restoreState.size(), "Only one element expected here.");
    HashMap<String, RocksDBStateBackend.FinalFullyAsyncSnapshot> namedStates;
    try (FSDataInputStream inputStream = restoreState.iterator().next().openInputStream()) {
        namedStates = InstantiationUtil.deserializeObject(inputStream, userCodeClassLoader);
    }

    Preconditions.checkState(1 == namedStates.size(), "Only one element expected here.");
    DataInputView inputView = namedStates.values().iterator().next().stateHandle.getState(userCodeClassLoader);

    // clear k/v state information before filling it
    kvStateInformation.clear();

    // first get the column family mapping
    int numColumns = inputView.readInt();
    Map<Byte, StateDescriptor<?, ?>> columnFamilyMapping = new HashMap<>(numColumns);
    for (int i = 0; i < numColumns; i++) {
        byte mappingByte = inputView.readByte();

        ObjectInputStream ooIn = new InstantiationUtil.ClassLoaderObjectInputStream(
                new DataInputViewStream(inputView), userCodeClassLoader);

        StateDescriptor stateDescriptor = (StateDescriptor) ooIn.readObject();

        columnFamilyMapping.put(mappingByte, stateDescriptor);

        // this will fill in the k/v state information
        getColumnFamily(stateDescriptor, null);
    }

    // try and read until EOF
    try {
        // the EOFException will get us out of this...
        while (true) {
            byte mappingByte = inputView.readByte();
            ColumnFamilyHandle handle = getColumnFamily(columnFamilyMapping.get(mappingByte), null);
            byte[] keyAndNamespace = BytePrimitiveArraySerializer.INSTANCE.deserialize(inputView);

            ByteArrayInputStreamWithPos bis = new ByteArrayInputStreamWithPos(keyAndNamespace);

            K reconstructedKey = keySerializer.deserialize(new DataInputViewStreamWrapper(bis));
            int len = bis.getPosition();

            int keyGroup = (byte) KeyGroupRangeAssignment.assignToKeyGroup(reconstructedKey, numberOfKeyGroups);

            if (keyGroupPrefixBytes == 1) {
                // copy and override one byte (42) between key and namespace
                System.arraycopy(keyAndNamespace, 0, keyAndNamespace, 1, len);
                keyAndNamespace[0] = (byte) keyGroup;
            } else {
                byte[] largerKey = new byte[1 + keyAndNamespace.length];

                // write key-group
                largerKey[0] = (byte) ((keyGroup >> 8) & 0xFF);
                largerKey[1] = (byte) (keyGroup & 0xFF);

                // write key
                System.arraycopy(keyAndNamespace, 0, largerKey, 2, len);

                //skip one byte (42), write namespace
                System.arraycopy(keyAndNamespace, 1 + len, largerKey, 2 + len,
                        keyAndNamespace.length - len - 1);
                keyAndNamespace = largerKey;
            }

            byte[] value = BytePrimitiveArraySerializer.INSTANCE.deserialize(inputView);
            db.put(handle, keyAndNamespace, value);
        }
    } catch (EOFException e) {
        // expected
    }
}

From source file:ece356.UserDBAO.java

public static ArrayList<DoctorData> queryDoctor(HashMap<String, String> doctorParam, String user)
        throws ClassNotFoundException, SQLException {
    Connection con = null;/*from  w  w  w .jav  a  2s  .  c o  m*/
    PreparedStatement pstmt = null;
    ArrayList<DoctorData> ret;
    try {
        con = getConnection();
        String query;
        boolean reviewByFriends = false;
        if (doctorParam.containsKey("reviewByFriends")) {

            if (doctorParam.get("reviewByFriends").equals("yes")) {

                query = "select * from doctorSearchView where username in (select username from doctorSearchView left join review on doctorSearchView.doc_spec_username = review.doc_username where doctorSearchView.patient_username in "
                        + "(select friend.sent_username as friend "
                        + "from friend where friend.isAccepted = 1 AND friend.recieved_username like '%" + user
                        + "%'" + "union " + "select friend.recieved_username as friend "
                        + "from friend where friend.isAccepted = 1 AND friend.sent_username like '%" + user
                        + "%'))";
                reviewByFriends = true;
            } else {
                query = "SELECT * FROM doctorSearchView ";
            }
            doctorParam.remove("reviewByFriends");
        } else {
            query = "SELECT * FROM doctorSearchView ";
            //pstmt = con.prepareStatement(query);

        }
        // Query for general doctor information
        ArrayList<String> keys = new ArrayList<String>(doctorParam.keySet());
        ArrayList<String> values = new ArrayList<String>(doctorParam.values());

        HashMap<Integer, Integer> h1 = new HashMap<>();
        int counter = 0;
        if (!keys.isEmpty()) {
            counter++;
            if (!reviewByFriends)
                query = query + " where";
            else
                query = query + " AND";

            for (String key : keys) {
                if (key.equals("averageRating") || key.equals("yearsLicensed")) {
                    query = query + " " + key + " >= ?";
                    query += " AND";
                    h1.put(counter, counter);
                } else if (key.equals("gender")) {
                    query = query + " " + key + " = ?";
                    query += " AND";
                    h1.put(counter, counter);
                } else if (keys.equals("reviewByFriends")) {

                } else {
                    query = query + " " + key + " LIKE ?";
                    query += " AND";
                }
                counter++;
            }
            query = query.substring(0, query.length() - 4);
            System.out.println(query);
        }

        query += " group by first_name, last_name, gender, averageRating, numberOfReviews";

        pstmt = con.prepareStatement(query);

        if (!values.isEmpty()) {
            counter = 1;
            for (String value : values) {
                if (h1.containsKey(counter)) {
                    pstmt.setString(counter, value);
                } else {
                    pstmt.setString(counter, "%" + value + "%");
                }
                counter++;
            }
        }
        System.out.println(pstmt);
        ResultSet resultSet;
        resultSet = pstmt.executeQuery();

        ret = new ArrayList();

        while (resultSet.next()) {
            DoctorData doctor = new DoctorData();
            doctor.userName = resultSet.getString("username");
            doctor.firstName = resultSet.getString("first_name");
            doctor.middleInitial = resultSet.getString("middle_initial");
            doctor.lastName = resultSet.getString("last_name");
            doctor.gender = resultSet.getString("gender");
            doctor.averageRating = resultSet.getDouble("averageRating");
            doctor.numberOfReviews = resultSet.getInt("numberOfReviews");
            ret.add(doctor);
        }
        return ret;
    } catch (Exception e) {
        System.out.println("EXCEPTION:%% " + e);
    } finally {
        if (pstmt != null) {
            pstmt.close();
        }
        if (con != null) {
            con.close();
        }
    }
    return null;
}

From source file:com.adobe.acs.commons.exporters.impl.users.UsersExportServlet.java

/**
 * Generates a CSV file representing the User Data.
 *
 * @param request  the Sling HTTP Request object
 * @param response the Sling HTTP Response object
 * @throws IOException// ww w . j  a va 2 s.  c  om
 * @throws ServletException
 */
public void doGet(SlingHttpServletRequest request, SlingHttpServletResponse response)
        throws IOException, ServletException {
    response.setContentType("text/csv");
    response.setCharacterEncoding("UTF-8");

    final Parameters parameters = new Parameters(request);

    log.debug("Users to CSV Export Parameters: {}", parameters.toString());

    final Csv csv = new Csv();
    final Writer writer = response.getWriter();
    csv.writeInit(writer);

    final Iterator<Resource> resources = request.getResourceResolver().findResources(QUERY, Query.JCR_SQL2);

    // Using a HashMap to satisfy issue with duplicate results in AEM 6.1 GA
    HashMap<String, CsvUser> csvUsers = new LinkedHashMap<String, CsvUser>();

    while (resources.hasNext()) {
        try {
            Resource resource = resources.next();
            CsvUser csvUser = new CsvUser(resource);

            if (!csvUsers.containsKey(csvUser.getPath())
                    && checkGroups(parameters.getGroups(), parameters.getGroupFilter(), csvUser)) {
                csvUsers.put(csvUser.getPath(), csvUser);
            }

        } catch (RepositoryException e) {
            log.error("Unable to extract a user from resource.", e);
        }
    }

    List<String> columns = new ArrayList<String>();
    columns.add("Path");
    columns.add("User ID");
    columns.add("First Name");
    columns.add("Last Name");
    columns.add("E-mail Address");
    columns.add("Created Date");
    columns.add("Last Modified Date");

    for (String customProperty : parameters.getCustomProperties()) {
        columns.add(customProperty);
    }

    columns.add("All Groups");
    columns.add("Direct Groups");
    columns.add("Indirect Groups");

    csv.writeRow(columns.toArray(new String[columns.size()]));

    for (final CsvUser csvUser : csvUsers.values()) {
        List<String> values = new ArrayList<String>();
        try {
            values.add(csvUser.getPath());
            values.add(csvUser.getID());
            values.add(csvUser.getFirstName());
            values.add(csvUser.getLastName());
            values.add(csvUser.getEmail());
            values.add(csvUser.getCreatedDate());
            values.add(csvUser.getLastModifiedDate());

            for (String customProperty : parameters.getCustomProperties()) {
                values.add(csvUser.getCustomProperty(customProperty));
            }

            values.add(StringUtils.join(csvUser.getAllGroups(), GROUP_DELIMITER));
            values.add(StringUtils.join(csvUser.getDeclaredGroups(), GROUP_DELIMITER));
            values.add(StringUtils.join(csvUser.getTransitiveGroups(), GROUP_DELIMITER));

            csv.writeRow(values.toArray(new String[values.size()]));
        } catch (RepositoryException e) {
            log.error("Unable to export user to CSV report", e);
        }
    }

    csv.close();
}

From source file:org.apache.hadoop.raid.TestRaidPurge.java

/**
 * Test that higher-pri codecs could purge lower-pri codecs 
 *//*w ww. j  a  v a 2s . c  om*/
public void testPurgePreference() throws Exception {
    LOG.info("Test testPurgePreference started");
    createClusters(true, 3);
    Utils.loadTestCodecs(conf, new Builder[] { Utils.getXORBuilder(), // priority 100
            Utils.getRSBuilder(), // priority 300
            Utils.getDirXORBuilder(), // priority 400
            Utils.getDirRSBuilder(), // priority 600
            Utils.getRSBuilder().setParityDir("/test-raidrs").setCodeId("testrs").simulatedBlockFixed(true) });
    mySetup(1, 1);
    Path dir = new Path("/user/test/raidtest");
    Path file1 = new Path(dir + "/file1");
    HashMap<String, PolicyInfo> infos = new HashMap<String, PolicyInfo>();
    for (Codec code : Codec.getCodecs()) {
        PolicyInfo pi = new PolicyInfo("testPurgePreference", conf);
        pi.setSrcPath("/user/test/raidtest");
        pi.setCodecId(code.id);
        pi.setDescription("test policy");
        pi.setProperty("targetReplication", "1");
        pi.setProperty("metaReplication", "1");
        infos.put(code.id, pi);
    }

    try {
        LOG.info("Create a old file");
        TestRaidNode.createOldFile(fileSys, file1, 1, 9, 8192L);
        FileStatus stat = fileSys.getFileStatus(file1);
        FileStatus dirStat = fileSys.getFileStatus(dir);
        HashMap<String, Path> parityFiles = new HashMap<String, Path>();
        // Create the parity files.
        LOG.info("Start Raiding");
        for (PolicyInfo pi : infos.values()) {
            Codec code = Codec.getCodec(pi.getCodecId());
            FileStatus fsStat = (code.isDirRaid) ? dirStat : stat;
            RaidNode.doRaid(conf, pi, fsStat, new RaidNode.Statistics(), Reporter.NULL);
            Path parity = RaidNode.getOriginalParityFile(new Path(code.parityDirectory), fsStat.getPath());
            assertTrue(fileSys.exists(parity));
            parityFiles.put(pi.getCodecId(), parity);
        }
        LOG.info("Finished Raiding");
        // Check purge of a single parity file.
        PurgeMonitor purgeMonitor = new PurgeMonitor(conf, null, null);
        LOG.info("Purge dir-rs");
        purgeMonitor.purgeCode(Codec.getCodec("dir-rs"));
        // Calling purge under the Dir-RS path has no effect.
        assertTrue(fileSys.exists(parityFiles.get("testrs")));
        assertTrue(fileSys.exists(parityFiles.get("dir-rs")));
        assertTrue(fileSys.exists(parityFiles.get("rs")));
        assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
        assertTrue(fileSys.exists(parityFiles.get("xor")));

        LOG.info("Purge rs");
        purgeMonitor.purgeCode(Codec.getCodec("rs"));
        // Calling purge under the rs path will delete rs
        assertTrue(fileSys.exists(parityFiles.get("testrs")));
        assertTrue(fileSys.exists(parityFiles.get("dir-rs")));
        assertFalse(fileSys.exists(parityFiles.get("rs")));
        assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
        assertTrue(fileSys.exists(parityFiles.get("xor")));

        LOG.info("Purge dir-xor");
        purgeMonitor.purgeCode(Codec.getCodec("dir-xor"));
        // Calling purge under the Dir-xor path will delete dir-xor
        assertTrue(fileSys.exists(parityFiles.get("testrs")));
        assertTrue(fileSys.exists(parityFiles.get("dir-rs")));
        assertFalse(fileSys.exists(parityFiles.get("dir-xor")));
        assertTrue(fileSys.exists(parityFiles.get("xor")));

        LOG.info("Purge xor");
        purgeMonitor.purgeCode(Codec.getCodec("xor"));
        assertFalse(fileSys.exists(parityFiles.get("xor")));
        assertTrue(fileSys.exists(parityFiles.get("testrs")));
        assertTrue(fileSys.exists(parityFiles.get("dir-rs")));

        LOG.info("delete dir-rs parity file");
        fileSys.delete(parityFiles.get("dir-rs"), true);
        assertFalse(fileSys.exists(parityFiles.get("dir-rs")));

        //Recreate RS and Dir-XOR
        LOG.info("Raid rs");
        RaidNode.doRaid(conf, infos.get("rs"), stat, new RaidNode.Statistics(), Reporter.NULL);
        assertTrue(fileSys.exists(parityFiles.get("rs")));
        LOG.info("Raid dir-xor");
        RaidNode.doRaid(conf, infos.get("dir-xor"), dirStat, new RaidNode.Statistics(), Reporter.NULL);
        assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
        LOG.info("Raid xor");
        RaidNode.doRaid(conf, infos.get("xor"), stat, new RaidNode.Statistics(), Reporter.NULL);
        assertTrue(fileSys.exists(parityFiles.get("xor")));

        LOG.info("Purge dir-xor");
        purgeMonitor.purgeCode(Codec.getCodec("dir-xor"));
        // Calling purge under the Dir-XOR path succeeds
        assertTrue(fileSys.exists(parityFiles.get("testrs")));
        assertTrue(fileSys.exists(parityFiles.get("rs")));
        assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
        assertTrue(fileSys.exists(parityFiles.get("xor")));

        LOG.info("Purge rs");
        purgeMonitor.purgeCode(Codec.getCodec("rs"));
        // Calling purge under the Dir-XOR path succeeds
        assertTrue(fileSys.exists(parityFiles.get("testrs")));
        assertFalse(fileSys.exists(parityFiles.get("rs")));
        assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
        assertTrue(fileSys.exists(parityFiles.get("xor")));

        LOG.info("Purge testrs");
        purgeMonitor.purgeCode(Codec.getCodec("testrs"));
        // Calling purge under the Dir-XOR path succeeds
        assertFalse(fileSys.exists(parityFiles.get("testrs")));
        assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
        assertTrue(fileSys.exists(parityFiles.get("xor")));

        LOG.info("delete dir-xor parity file");
        fileSys.delete(parityFiles.get("dir-xor"), true);
        assertFalse(fileSys.exists(parityFiles.get("dir-xor")));

        LOG.info("Raid rs");
        RaidNode.doRaid(conf, infos.get("rs"), stat, new RaidNode.Statistics(), Reporter.NULL);
        assertTrue(fileSys.exists(parityFiles.get("rs")));

        LOG.info("Purge xor");
        purgeMonitor.purgeCode(Codec.getCodec("xor"));
        assertTrue(fileSys.exists(parityFiles.get("rs")));
        assertFalse(fileSys.exists(parityFiles.get("xor")));

        LOG.info("delete rs");
        fileSys.delete(parityFiles.get("rs"), true);
        assertFalse(fileSys.exists(parityFiles.get("testrs")));
        LOG.info("Raid testrs");
        RaidNode.doRaid(conf, infos.get("testrs"), stat, new RaidNode.Statistics(), Reporter.NULL);
        assertTrue(fileSys.exists(parityFiles.get("testrs")));
        LOG.info("Raid xor");
        RaidNode.doRaid(conf, infos.get("xor"), stat, new RaidNode.Statistics(), Reporter.NULL);
        assertTrue(fileSys.exists(parityFiles.get("xor")));
        LOG.info("Purge xor");
        purgeMonitor.purgeCode(Codec.getCodec("xor"));
        assertTrue(fileSys.exists(parityFiles.get("testrs")));
        assertFalse(fileSys.exists(parityFiles.get("xor")));
        LOG.info("delete testrs");
        fileSys.delete(parityFiles.get("testrs"), true);

        // The following is har related stuff

        Path rsParity = parityFiles.get("rs");
        Path xorParity = parityFiles.get("xor");
        PolicyInfo infoXor = infos.get("xor");
        PolicyInfo infoRs = infos.get("rs");
        // Now check the purge of a parity har.
        // Delete the RS parity for now.
        fileSys.delete(rsParity, true);
        // Recreate the XOR parity.
        Path xorHar = new Path("/raid", "user/test/raidtest/raidtest" + RaidNode.HAR_SUFFIX);
        RaidNode.doRaid(conf, infoXor, stat, new RaidNode.Statistics(), Reporter.NULL);
        assertTrue(fileSys.exists(xorParity));
        assertFalse(fileSys.exists(xorHar));

        // Create the har.
        long cutoff = System.currentTimeMillis();
        // create an instance of the RaidNode
        Configuration localConf = new Configuration(conf);
        RaidNode cnode = RaidNode.createRaidNode(localConf);
        FileStatus raidStat = fileSys.getFileStatus(new Path("/raid"));
        cnode.recurseHar(Codec.getCodec("xor"), fileSys, raidStat, "/raid", fileSys, cutoff,
                Codec.getCodec(infoXor.getCodecId()).tmpHarDirectory);

        // Call purge to get rid of the parity file. The har should remain.
        purgeMonitor.purgeCode(Codec.getCodec("xor"));
        // XOR har should exist but xor parity file should have been purged.
        assertFalse(fileSys.exists(xorParity));
        assertTrue(fileSys.exists(xorHar));

        // Now create the RS parity.
        RaidNode.doRaid(conf, infoRs, stat, new RaidNode.Statistics(), Reporter.NULL);
        purgeMonitor.purgeCode(Codec.getCodec("xor"));
        // XOR har should get deleted.
        assertTrue(fileSys.exists(rsParity));
        assertFalse(fileSys.exists(xorParity));
        assertFalse(fileSys.exists(xorHar));
        LOG.info("Test testPurgePreference completed");
    } finally {
        stopClusters();
    }
}

From source file:org.apache.axis.wsdl.toJava.JavaGeneratorFactory.java

/**
 * setFaultContext://from w w  w. j a v  a 2s  .  c  om
 * Processes the symbol table and sets the COMPLEX_TYPE_FAULT
 * on each TypeEntry that is a complexType and is referenced in
 * a fault message.  TypeEntries that are the base or derived
 * from such a TypeEntry are also marked with COMPLEX_TYPE_FAULT.
 * The containing MessageEntry is marked with cOMPLEX_TYPE_FAULT, and
 * all MessageEntries for faults are tagged with the
 * EXCEPTION_CLASS_NAME variable, which indicates the java exception
 * class name.
 *
 * @param symbolTable SymbolTable
 */
private void setFaultContext(SymbolTable symbolTable) {

    Iterator it = symbolTable.getHashMap().values().iterator();

    while (it.hasNext()) {
        Vector v = (Vector) it.next();

        for (int i = 0; i < v.size(); ++i) {
            SymTabEntry entry = (SymTabEntry) v.elementAt(i);

            // Inspect each BindingEntry in the Symbol Table
            if (entry instanceof BindingEntry) {
                BindingEntry bEntry = (BindingEntry) entry;
                HashMap allOpFaults = bEntry.getFaults();
                Iterator ops = allOpFaults.values().iterator();

                // set the context for all faults for this binding.
                while (ops.hasNext()) {
                    ArrayList faults = (ArrayList) ops.next();

                    for (int j = 0; j < faults.size(); ++j) {
                        FaultInfo info = (FaultInfo) faults.get(j);

                        setFaultContext(info, symbolTable);
                    }
                }
            }
        }
    }
}

From source file:com.laxser.blitz.lama.provider.jdbc.JdbcDataAccess.java

private int[] batchUpdate2(String sql, Modifier modifier, List<Map<String, Object>> parametersList) {
    if (parametersList.size() == 0) {
        return new int[0];
    }/*from   www. j  a v  a  2s. c  om*/
    // sql --> args[]
    HashMap<String, List<Object[]>> batches = new HashMap<String, List<Object[]>>();
    // sql --> named args
    HashMap<String, List<Map<String, Object>>> batches2 = new HashMap<String, List<Map<String, Object>>>();
    // sql --> [2,3,6,9] positions of parametersList
    Map<String, List<Integer>> positions = new HashMap<String, List<Integer>>();

    for (int i = 0; i < parametersList.size(); i++) {
        SQLInterpreterResult ir = interpret(sql, modifier, parametersList.get(i));
        List<Object[]> args = batches.get(ir.getSQL());
        List<Integer> position = positions.get(ir.getSQL());
        List<Map<String, Object>> maplist = batches2.get(ir.getSQL());
        if (args == null) {
            args = new LinkedList<Object[]>();
            batches.put(ir.getSQL(), args);
            position = new LinkedList<Integer>();
            positions.put(ir.getSQL(), position);
            maplist = new LinkedList<Map<String, Object>>();
            batches2.put(ir.getSQL(), maplist);
        }
        position.add(i);
        args.add(ir.getParameters());
        maplist.add(parametersList.get(i));
    }
    if (batches.size() == 1) {
        SQLThreadLocal.set(SQLType.WRITE, sql, modifier, parametersList);
        int[] updated = jdbc.batchUpdate(modifier, batches.keySet().iterator().next(),
                batches.values().iterator().next());
        SQLThreadLocal.remove();
        return updated;
    }
    int[] batchUpdated = new int[parametersList.size()];
    for (Map.Entry<String, List<Object[]>> batch : batches.entrySet()) {
        String batchSQL = batch.getKey();
        List<Object[]> values = batch.getValue();
        List<Map<String, Object>> map = batches2.get(batchSQL);
        SQLThreadLocal.set(SQLType.WRITE, sql, modifier, map);
        int[] updated = jdbc.batchUpdate(modifier, batchSQL, values);
        SQLThreadLocal.remove();
        List<Integer> position = positions.get(batchSQL);
        int i = 0;
        for (Integer p : position) {
            batchUpdated[p] = updated[i++];
        }
    }
    return batchUpdated;

}

From source file:com.fluidops.iwb.provider.XMLProvider.java

@Override
public void gather(final List<Statement> res) throws Exception {
    HashMap<String, MappingRule> mappingRules = initializeGather();

    Document doc = null;//  ww  w  .  ja  va2s .co  m
    if (config.dataSource != null) {

        TreeDataSource ds = config.lookupAndRefreshDataSource(TreeDataSource.class);
        doc = ds.getDocument();

    } else {
        // legacy support

        // load XML in DOM
        InputStream in = null;

        try {
            in = getInputStream();
            doc = getDocument(in);
        } finally {
            IOUtils.closeQuietly(in);
        }
    }

    // execute mapping rules in specification one by one
    for (MappingRule mr : mappingRules.values())
        processMappingRule(res, mappingRules, doc, mr);
}

From source file:org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.java

/**
 * Remove stale storages from storageMap. We must not remove any storages
 * as long as they have associated block replicas.
 *///from  w w  w  . java2s  . c  o m
private void pruneStorageMap(final StorageReport[] reports) {
    synchronized (storageMap) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Number of storages reported in heartbeat=" + reports.length
                    + "; Number of storages in storageMap=" + storageMap.size());
        }

        HashMap<String, DatanodeStorageInfo> excessStorages;

        // Init excessStorages with all known storages.
        excessStorages = new HashMap<String, DatanodeStorageInfo>(storageMap);

        // Remove storages that the DN reported in the heartbeat.
        for (final StorageReport report : reports) {
            excessStorages.remove(report.getStorage().getStorageID());
        }

        // For each remaining storage, remove it if there are no associated
        // blocks.
        for (final DatanodeStorageInfo storageInfo : excessStorages.values()) {
            try {
                if (storageInfo.numBlocks() == 0) {
                    storageMap.remove(storageInfo.getStorageID());
                    LOG.info("Removed storage " + storageInfo + " from DataNode" + this);
                } else if (LOG.isDebugEnabled()) {
                    // This can occur until all block reports are received.
                    LOG.debug("Deferring removal of stale storage " + storageInfo + " with "
                            + storageInfo.numBlocks() + " blocks");
                }
            } catch (IOException e) {
                // Skip for a bit
                LOG.warn(e, e);
            }
        }
    }
}

From source file:com.knowbout.epg.processor.ScheduleParser.java

private void processChannels() {
    log.debug("Processing channels for schedule");
    HashMap<String, StationLineup> stationMap = new HashMap<String, StationLineup>();
    List<Station> stations = Station.selectAll();
    //Process all stations
    for (Station station : stations) {
        //HACK(CE): This is work around a CW bug.
        if (!station.getCallSign().equals("KTLA")) {
            StationLineup sl = new StationLineup(station);
            StationLineup existing = stationMap.get(sl.getCallSign());
            //This will happen for affiliations because we are changing the callsigns for them
            //So that the east and west coast lineup.
            if (existing == null) {
                stationMap.put(sl.getCallSign(), sl);
            } else {
                existing.merge(sl);//w  ww.j  av  a  2  s .c  o  m
                existing.setDualChannels(true);
            }
        }
    }

    Set<StationLineup> processed = new HashSet<StationLineup>();
    //Now process check each station to see if it is on both coasts, or there is a east and pacific station
    for (StationLineup station : stationMap.values().toArray(new StationLineup[0])) {
        //If it might be a pacific station, but only on the pacific headend, then lets check
        if (!processed.contains(station)) {
            if (station.possiblePacificChannel()) {
                String eastCallSign = station.strippedPacificCallSign();
                StationLineup eastLineup = stationMap.get(eastCallSign);
                if (eastLineup != null) {
                    //Check to see if both are on both headends
                    if (eastLineup.isOnMultipleHeadends() && station.isOnMultipleHeadends()) {
                        //Add them both to the list since everyone has access to both channels
                        coveredStations.put(station.getCallSign(), station);
                        coveredStations.put(eastLineup.getCallSign(), eastLineup);
                        processed.add(station);
                        processed.add(eastLineup);
                    } else {
                        //Combine them into a single station {
                        //Now check to see if it is on both headends
                        eastLineup.merge(station);
                        if (eastLineup.isOnMultipleHeadends()) {
                            eastLineup.setDualChannels(true);
                            coveredStations.put(eastLineup.getCallSign(), eastLineup);

                        }
                        processed.add(station);
                        processed.add(eastLineup);
                    }
                } else {
                    //It might not have really been a pacific station, there are about 10 that are like that
                    if (station.isOnMultipleHeadends()) {
                        coveredStations.put(station.getCallSign(), station);
                    }
                }
            }
        }
    }
    for (StationLineup station : stationMap.values().toArray(new StationLineup[0])) {
        if (!processed.contains(station)) {
            if (station.isOnMultipleHeadends()) {
                coveredStations.put(station.getCallSign(), station);
            }
            processed.add(station);
        }
    }
    log.debug("Finished processing channels for schedule");

}