Example usage for java.util SortedMap clear

List of usage examples for java.util SortedMap clear

Introduction

In this page you can find the example usage for java.util SortedMap clear.

Prototype

void clear();

Source Link

Document

Removes all of the mappings from this map (optional operation).

Usage

From source file:com.palantir.paxos.PaxosConsensusFastTest.java

@Test
public void learnerRecovery() {
    for (int i = 0; i < NUM_POTENTIAL_LEADERS * 3; i++) {
        state.gainLeadership(i % NUM_POTENTIAL_LEADERS);
    }//  www.j  a  v a2 s  . c  o m
    PaxosLearnerImpl learner = (PaxosLearnerImpl) ((DelegatingInvocationHandler) Proxy
            .getInvocationHandler(state.learner(0))).getDelegate();
    PaxosStateLog<PaxosValue> log = learner.log;
    SortedMap<Long, PaxosValue> cache = learner.state;
    log.truncate(log.getGreatestLogEntry());
    cache.clear();
    state.gainLeadership(0);
}

From source file:com.splicemachine.derby.utils.SpliceAdmin.java

public static void SYSCS_GET_REGION_SERVER_CONFIG_INFO(final String configRoot, final int showDisagreementsOnly,
        final ResultSet[] resultSet) throws StandardException, SQLException {
    Map<String, DatabaseVersion> dbVersions = EngineDriver.driver().dbAdministrator()
            .getClusterDatabaseVersions();
    boolean matchName = (configRoot != null && !configRoot.equals(""));
    int hostIdx = 0;
    String hostName;/* w w w  .ja v a 2  s . com*/
    ResultSetBuilder rsBuilder;
    RowBuilder rowBuilder;

    try {
        rsBuilder = new ResultSetBuilder();
        rsBuilder.getColumnBuilder().addColumn("HOST_NAME", Types.VARCHAR, 32)
                .addColumn("CONFIG_NAME", Types.VARCHAR, 128).addColumn("CONFIG_VALUE", Types.VARCHAR, 128);

        rowBuilder = rsBuilder.getRowBuilder();
        // We arbitrarily pick DatabaseVersion MBean even though
        // we do not fetch anything from it. We just use it as our
        // mechanism for our region server context.
        SortedMap<String, String> configMap = new TreeMap<>();

        SConfiguration config = EngineDriver.driver().getConfiguration();
        Map<String, Object> configRootMap = config.getConfigMap();

        for (Map.Entry<String, DatabaseVersion> databaseVersion : dbVersions.entrySet()) {
            hostName = databaseVersion.getKey();
            configMap.clear();
            for (Map.Entry<String, Object> conf : configRootMap.entrySet()) {
                configMap.put(conf.getKey(), conf.getValue().toString());
            }

            // Iterate through sorted configs and add to result set
            Set<Entry<String, String>> configSet = configMap.entrySet();
            for (Entry<String, String> configEntry : configSet) {
                rowBuilder.getDvd(0).setValue(hostName);
                rowBuilder.getDvd(1).setValue(configEntry.getKey());
                rowBuilder.getDvd(2).setValue(configEntry.getValue());
                rowBuilder.addRow();
            }
            hostIdx++;
        }

        resultSet[0] = rsBuilder.buildResultSet((EmbedConnection) getDefaultConn());

        configMap.clear();

    } catch (StandardException se) {
        throw PublicAPI.wrapStandardException(se);
    }
}

From source file:edu.brown.statistics.AbstractStatistics.java

/**
 * Reads a stored mapped from the JSON object and populates the data
 * structure/*from w  w  w  . j a  va  2s.  c  o  m*/
 * 
 * @param <U>
 * @param <V>
 * @param map
 * @param name
 * @param key_map
 * @param object
 * @throws JSONException
 */
@SuppressWarnings("unchecked")
protected <U, V> void readMap(SortedMap<U, V> map, String name, Map<String, U> key_map, Class<?> value_class,
        JSONObject object) throws JSONException {
    map.clear();
    JSONObject jsonObject = object.getJSONObject(name);
    Iterator<String> keys = jsonObject.keys();
    boolean first = true;
    while (keys.hasNext()) {
        String key_name = keys.next();
        U key_object = null;
        V value = null;

        if (value_class.equals(Long.class)) {
            value = (V) new Long(jsonObject.getLong(key_name));
        } else {
            value = (V) jsonObject.get(key_name);
        }
        key_object = key_map.get(key_name);
        if (key_object == null) {
            LOG.warn("Failed to retrieve key object '" + key_name + "' for " + name);
            if (LOG.isDebugEnabled() && first) {
                LOG.warn(jsonObject.toString(2));
            }
            first = false;
            continue;
        }
        map.put(key_object, value);
    } // FOR
    LOG.debug("Added " + map.size() + " values to " + name);
    return;
}

From source file:edu.umd.cfar.lamp.viper.util.Range.java

/**
 * Subsumes the Instants in the Span into this Range. 
 * @param start the first instant to add
 * @param stop the stop instant, exclusive
 * @return <code>true</code> iff the operation modified this Range
 *///from  w  w w  .  j a va2  s  .c  o  m
public boolean add(Comparable start, Comparable stop) {
    Comparable old = (Comparable) spans.get(start);
    if (old != null && old.compareTo(stop) >= 0) {
        return false;
    }
    SortedMap head = spans.headMap(start);
    if (!head.isEmpty()) {
        Comparable oldStart = (Comparable) head.lastKey();
        Comparable oldEnd = (Comparable) head.get(oldStart);
        if (oldEnd.compareTo(stop) >= 0) {
            return false;
        } else {
            if (oldEnd.compareTo(start) >= 0) {
                start = oldStart;
                spans.remove(oldStart);
            }
        }
    }
    SortedMap sub = spans.subMap(start, stop);
    if (!sub.isEmpty()) {
        Comparable oldStart = (Comparable) sub.lastKey();
        Comparable oldEnd = (Comparable) sub.get(oldStart);
        if (oldStart.compareTo(start) == 0 && oldEnd.compareTo(stop) >= 0) {
            return false;
        } else if (oldEnd.compareTo(stop) > 0) {
            stop = oldEnd;
        }
        sub.clear();
    }
    if (spans.containsKey(stop)) {
        stop = (Comparable) spans.remove(stop);
    }
    spans.put(start, stop);
    return true;
}

From source file:edu.umd.cfar.lamp.viper.util.Range.java

/**
 * @see edu.umd.cfar.lamp.viper.util.IntervalIndexList#remove(java.lang.Comparable, java.lang.Comparable)
 *//*from w  w w.  java 2  s .  c  o m*/
public boolean remove(Comparable start, Comparable end) {
    boolean someFound = false;
    SortedMap head = spans.headMap(start);
    if (!head.isEmpty()) {
        Comparable oldStart = (Comparable) head.lastKey();
        Comparable oldEnd = (Comparable) head.get(oldStart);
        if (oldEnd.compareTo(start) > 0) {
            // if there is a span that goes into the span to
            // be removed, replace it.
            head.put(oldStart, start);
            someFound = true;
            double toCheck = oldEnd.compareTo(end);
            if (toCheck > 0) {
                // if the span to be removed is a strict subset 
                // of some existing span, you also have
                // to add back the end.
                spans.put(end, oldEnd);
                return true;
            } else if (toCheck == 0) {
                return true;
            }
        }
    }
    SortedMap sub = spans.subMap(start, end);
    if (!sub.isEmpty()) {
        someFound = true;
        Comparable oldStart = (Comparable) sub.lastKey();
        Comparable oldEnd = (Comparable) sub.get(oldStart);
        if (oldEnd.compareTo(end) > 0) {
            // if there is a span that starts during the
            // span to removed that goes past the end,
            // have to add back the difference.
            spans.put(end, oldEnd);
        }
        sub.clear();
    }
    return someFound;
}

From source file:org.alfresco.repo.domain.node.NodePropertyHelper.java

public Map<QName, Serializable> convertToPublicProperties(
        Map<NodePropertyKey, NodePropertyValue> propertyValues) {
    Map<QName, Serializable> propertyMap = new HashMap<QName, Serializable>(propertyValues.size(), 1.0F);
    // Shortcut//from   ww  w  .  j a v a2 s.  c  o  m
    if (propertyValues.size() == 0) {
        return propertyMap;
    }
    // We need to process the properties in order
    SortedMap<NodePropertyKey, NodePropertyValue> sortedPropertyValues = new TreeMap<NodePropertyKey, NodePropertyValue>(
            propertyValues);
    // A working map. Ordering is important.
    SortedMap<NodePropertyKey, NodePropertyValue> scratch = new TreeMap<NodePropertyKey, NodePropertyValue>();
    // Iterate (sorted) over the map entries and extract values with the same qname
    Long currentQNameId = Long.MIN_VALUE;
    Iterator<Map.Entry<NodePropertyKey, NodePropertyValue>> iterator = sortedPropertyValues.entrySet()
            .iterator();
    while (true) {
        Long nextQNameId = null;
        NodePropertyKey nextPropertyKey = null;
        NodePropertyValue nextPropertyValue = null;
        // Record the next entry's values
        if (iterator.hasNext()) {
            Map.Entry<NodePropertyKey, NodePropertyValue> entry = iterator.next();
            nextPropertyKey = entry.getKey();
            nextPropertyValue = entry.getValue();
            nextQNameId = nextPropertyKey.getQnameId();
        }
        // If the QName is going to change, and we have some entries to process, then process them.
        if (scratch.size() > 0 && (nextQNameId == null || !nextQNameId.equals(currentQNameId))) {
            QName currentQName = qnameDAO.getQName(currentQNameId).getSecond();
            PropertyDefinition currentPropertyDef = dictionaryService.getProperty(currentQName);
            // We have added something to the scratch properties but the qname has just changed
            Serializable collapsedValue = null;
            // We can shortcut if there is only one value
            if (scratch.size() == 1) {
                // There is no need to collapse list indexes
                collapsedValue = collapsePropertiesWithSameQNameAndListIndex(currentPropertyDef, scratch);
            } else {
                // There is more than one value so the list indexes need to be collapsed
                collapsedValue = collapsePropertiesWithSameQName(currentPropertyDef, scratch);
            }
            boolean forceCollection = false;
            // If the property is multi-valued then the output property must be a collection
            if (currentPropertyDef != null && currentPropertyDef.isMultiValued()) {
                forceCollection = true;
            } else if (scratch.size() == 1 && scratch.firstKey().getListIndex().intValue() > -1) {
                // This is to handle cases of collections where the property is d:any but not
                // declared as multiple.
                forceCollection = true;
            }
            if (forceCollection && collapsedValue != null && !(collapsedValue instanceof Collection<?>)) {
                // Can't use Collections.singletonList: ETHREEOH-1172
                ArrayList<Serializable> collection = new ArrayList<Serializable>(1);
                collection.add(collapsedValue);
                collapsedValue = collection;
            }

            // Store the value
            propertyMap.put(currentQName, collapsedValue);
            // Reset
            scratch.clear();
        }
        if (nextQNameId != null) {
            // Add to the current entries
            scratch.put(nextPropertyKey, nextPropertyValue);
            currentQNameId = nextQNameId;
        } else {
            // There is no next value to process
            break;
        }
    }
    // Done
    return propertyMap;
}

From source file:org.apache.accumulo.server.tabletserver.ScanRunState.java

public static Pair<Text, KeyExtent> verifyTabletInformation(KeyExtent extent, TServerInstance instance,
        SortedMap<Key, Value> tabletsKeyValues, String clientAddress, ZooLock lock)
        throws AccumuloSecurityException, DistributedStoreException, AccumuloException {

    log.debug("verifying extent " + extent);
    if (extent.isRootTablet()) {
        return verifyRootTablet(extent, instance);
    }/*w w w  .j  ava2s  .  co m*/
    String tableToVerify = MetadataTable.ID;
    if (extent.isMeta())
        tableToVerify = RootTable.ID;

    List<ColumnFQ> columnsToFetch = Arrays
            .asList(new ColumnFQ[] { TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN,
                    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN,
                    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN,
                    TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN,
                    TabletsSection.ServerColumnFamily.TIME_COLUMN });

    ScannerImpl scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SystemCredentials.get(), tableToVerify,
            Authorizations.EMPTY);
    scanner.setRange(extent.toMetadataRange());

    TreeMap<Key, Value> tkv = new TreeMap<Key, Value>();
    for (Entry<Key, Value> entry : scanner)
        tkv.put(entry.getKey(), entry.getValue());

    // only populate map after success
    if (tabletsKeyValues == null) {
        tabletsKeyValues = tkv;
    } else {
        tabletsKeyValues.clear();
        tabletsKeyValues.putAll(tkv);
    }

    Text metadataEntry = extent.getMetadataEntry();

    Value dir = checkTabletMetadata(extent, instance, tabletsKeyValues, metadataEntry);
    if (dir == null)
        return null;

    Value oldPrevEndRow = null;
    for (Entry<Key, Value> entry : tabletsKeyValues.entrySet()) {
        if (TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
            oldPrevEndRow = entry.getValue();
        }
    }

    if (oldPrevEndRow != null) {
        SortedMap<Text, SortedMap<ColumnFQ, Value>> tabletEntries;
        tabletEntries = MetadataTableUtil.getTabletEntries(tabletsKeyValues, columnsToFetch);

        KeyExtent fke;
        try {
            fke = MetadataTableUtil.fixSplit(metadataEntry, tabletEntries.get(metadataEntry), instance,
                    SystemCredentials.get(), lock);
        } catch (IOException e) {
            log.error("Error fixing split " + metadataEntry);
            throw new AccumuloException(e.toString());
        }

        if (!fke.equals(extent)) {
            return new Pair<Text, KeyExtent>(null, fke);
        }

        // reread and reverify metadata entries now that metadata entries were fixed
        tabletsKeyValues.clear();
        return verifyTabletInformation(fke, instance, tabletsKeyValues, clientAddress, lock);
    }

    return new Pair<Text, KeyExtent>(new Text(dir.get()), null);
}

From source file:org.apache.accumulo.tserver.TabletServer.java

public static Pair<Text, KeyExtent> verifyTabletInformation(AccumuloServerContext context, KeyExtent extent,
        TServerInstance instance, SortedMap<Key, Value> tabletsKeyValues, String clientAddress, ZooLock lock)
        throws AccumuloSecurityException, DistributedStoreException, AccumuloException {

    log.debug("verifying extent " + extent);
    if (extent.isRootTablet()) {
        return verifyRootTablet(extent, instance);
    }//from  ww w.  j  a va2  s. co m
    String tableToVerify = MetadataTable.ID;
    if (extent.isMeta())
        tableToVerify = RootTable.ID;

    List<ColumnFQ> columnsToFetch = Arrays
            .asList(new ColumnFQ[] { TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN,
                    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN,
                    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN,
                    TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN,
                    TabletsSection.ServerColumnFamily.TIME_COLUMN });

    ScannerImpl scanner = new ScannerImpl(context, tableToVerify, Authorizations.EMPTY);
    scanner.setRange(extent.toMetadataRange());

    TreeMap<Key, Value> tkv = new TreeMap<Key, Value>();
    for (Entry<Key, Value> entry : scanner)
        tkv.put(entry.getKey(), entry.getValue());

    // only populate map after success
    if (tabletsKeyValues == null) {
        tabletsKeyValues = tkv;
    } else {
        tabletsKeyValues.clear();
        tabletsKeyValues.putAll(tkv);
    }

    Text metadataEntry = extent.getMetadataEntry();

    Value dir = checkTabletMetadata(extent, instance, tabletsKeyValues, metadataEntry);
    if (dir == null)
        return null;

    Value oldPrevEndRow = null;
    for (Entry<Key, Value> entry : tabletsKeyValues.entrySet()) {
        if (TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
            oldPrevEndRow = entry.getValue();
        }
    }

    if (oldPrevEndRow != null) {
        SortedMap<Text, SortedMap<ColumnFQ, Value>> tabletEntries;
        tabletEntries = MetadataTableUtil.getTabletEntries(tabletsKeyValues, columnsToFetch);

        KeyExtent fke;
        try {
            fke = MasterMetadataUtil.fixSplit(context, metadataEntry, tabletEntries.get(metadataEntry),
                    instance, lock);
        } catch (IOException e) {
            log.error("Error fixing split " + metadataEntry);
            throw new AccumuloException(e.toString());
        }

        if (!fke.equals(extent)) {
            return new Pair<Text, KeyExtent>(null, fke);
        }

        // reread and reverify metadata entries now that metadata entries were fixed
        tabletsKeyValues.clear();
        return verifyTabletInformation(context, fke, instance, tabletsKeyValues, clientAddress, lock);
    }

    return new Pair<Text, KeyExtent>(new Text(dir.get()), null);
}

From source file:org.apache.cassandra.hadoop.ColumnFamilyRecordReader.java

@Override
public boolean next(ByteBuffer key, SortedMap<ByteBuffer, IColumn> value) throws IOException {
    if (this.nextKeyValue()) {
        key.clear();/*from  w  w  w  .j  a  va2 s .  co  m*/
        key.put(this.getCurrentKey());
        key.rewind();

        value.clear();
        value.putAll(this.getCurrentValue());

        return true;
    }
    return false;
}

From source file:org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl.java

/**
 * It "atomically" copies all the hlogs queues from another region server and returns them all
 * sorted per peer cluster (appended with the dead server's znode).
 * @param znode pertaining to the region server to copy the queues from
 * @return HLog queues sorted per peer cluster
 *//*  w  ww . jav a 2  s  .  co  m*/
private SortedMap<String, SortedSet<String>> copyQueuesFromRSUsingMulti(String znode) {
    SortedMap<String, SortedSet<String>> queues = new TreeMap<String, SortedSet<String>>();
    // hbase/replication/rs/deadrs
    String deadRSZnodePath = ZKUtil.joinZNode(this.queuesZNode, znode);
    List<String> peerIdsToProcess = null;
    List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
    try {
        peerIdsToProcess = ZKUtil.listChildrenNoWatch(this.zookeeper, deadRSZnodePath);
        if (peerIdsToProcess == null)
            return queues; // node already processed
        for (String peerId : peerIdsToProcess) {
            ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(peerId);
            if (!peerExists(replicationQueueInfo.getPeerId())) {
                LOG.warn("Peer " + peerId + " didn't exist, skipping the replay");
                // Protection against moving orphaned queues
                continue;
            }
            String newPeerId = peerId + "-" + znode;
            String newPeerZnode = ZKUtil.joinZNode(this.myQueuesZnode, newPeerId);
            // check the logs queue for the old peer cluster
            String oldClusterZnode = ZKUtil.joinZNode(deadRSZnodePath, peerId);
            List<String> hlogs = ZKUtil.listChildrenNoWatch(this.zookeeper, oldClusterZnode);
            if (hlogs == null || hlogs.size() == 0) {
                listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode));
                continue; // empty log queue.
            }
            // create the new cluster znode
            SortedSet<String> logQueue = new TreeSet<String>();
            queues.put(newPeerId, logQueue);
            ZKUtilOp op = ZKUtilOp.createAndFailSilent(newPeerZnode, HConstants.EMPTY_BYTE_ARRAY);
            listOfOps.add(op);
            // get the offset of the logs and set it to new znodes
            for (String hlog : hlogs) {
                String oldHlogZnode = ZKUtil.joinZNode(oldClusterZnode, hlog);
                byte[] logOffset = ZKUtil.getData(this.zookeeper, oldHlogZnode);
                LOG.debug("Creating " + hlog + " with data " + Bytes.toString(logOffset));
                String newLogZnode = ZKUtil.joinZNode(newPeerZnode, hlog);
                listOfOps.add(ZKUtilOp.createAndFailSilent(newLogZnode, logOffset));
                // add ops for deleting
                listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldHlogZnode));
                logQueue.add(hlog);
            }
            // add delete op for peer
            listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode));
        }
        // add delete op for dead rs
        listOfOps.add(ZKUtilOp.deleteNodeFailSilent(deadRSZnodePath));
        LOG.debug(" The multi list size is: " + listOfOps.size());
        ZKUtil.multiOrSequential(this.zookeeper, listOfOps, false);
        LOG.info("Atomically moved the dead regionserver logs. ");
    } catch (KeeperException e) {
        // Multi call failed; it looks like some other regionserver took away the logs.
        LOG.warn("Got exception in copyQueuesFromRSUsingMulti: ", e);
        queues.clear();
    } catch (InterruptedException e) {
        LOG.warn("Got exception in copyQueuesFromRSUsingMulti: ", e);
        queues.clear();
        Thread.currentThread().interrupt();
    }
    return queues;
}