Example usage for java.util NavigableMap entrySet

List of usage examples for java.util NavigableMap entrySet

Introduction

In this page you can find the example usage for java.util NavigableMap entrySet.

Prototype

Set<Map.Entry<K, V>> entrySet();

Source Link

Document

Returns a Set view of the mappings contained in this map.

Usage

From source file:org.apache.hadoop.hbase.client.coprocessor.AggregationClient.java

/**
 * This is the client side interface/handler for calling the median method for a
 * given cf-cq combination. This method collects the necessary parameters
 * to compute the median and returns the median.
 * @param table/*from w  w  w .j  a v a  2s.  c  om*/
 * @param ci
 * @param scan
 * @return R the median
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> R median(final HTable table,
        ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) throws Throwable {
    Pair<NavigableMap<byte[], List<S>>, List<S>> p = getMedianArgs(table, ci, scan);
    byte[] startRow = null;
    byte[] colFamily = scan.getFamilies()[0];
    NavigableSet<byte[]> quals = scan.getFamilyMap().get(colFamily);
    NavigableMap<byte[], List<S>> map = p.getFirst();
    S sumVal = p.getSecond().get(0);
    S sumWeights = p.getSecond().get(1);
    double halfSumVal = ci.divideForAvg(sumVal, 2L);
    double movingSumVal = 0;
    boolean weighted = false;
    if (quals.size() > 1) {
        weighted = true;
        halfSumVal = ci.divideForAvg(sumWeights, 2L);
    }

    for (Map.Entry<byte[], List<S>> entry : map.entrySet()) {
        S s = weighted ? entry.getValue().get(1) : entry.getValue().get(0);
        double newSumVal = movingSumVal + ci.divideForAvg(s, 1L);
        if (newSumVal > halfSumVal)
            break; // we found the region with the median
        movingSumVal = newSumVal;
        startRow = entry.getKey();
    }
    // scan the region with median and find it
    Scan scan2 = new Scan(scan);
    // inherit stop row from method parameter
    if (startRow != null)
        scan2.setStartRow(startRow);
    ResultScanner scanner = null;
    try {
        int cacheSize = scan2.getCaching();
        if (!scan2.getCacheBlocks() || scan2.getCaching() < 2) {
            scan2.setCacheBlocks(true);
            cacheSize = 5;
            scan2.setCaching(cacheSize);
        }
        scanner = table.getScanner(scan2);
        Result[] results = null;
        byte[] qualifier = quals.pollFirst();
        // qualifier for the weight column
        byte[] weightQualifier = weighted ? quals.pollLast() : qualifier;
        R value = null;
        do {
            results = scanner.next(cacheSize);
            if (results != null && results.length > 0) {
                for (int i = 0; i < results.length; i++) {
                    Result r = results[i];
                    // retrieve weight
                    Cell kv = r.getColumnLatest(colFamily, weightQualifier);
                    R newValue = ci.getValue(colFamily, weightQualifier, kv);
                    S s = ci.castToReturnType(newValue);
                    double newSumVal = movingSumVal + ci.divideForAvg(s, 1L);
                    // see if we have moved past the median
                    if (newSumVal > halfSumVal) {
                        return value;
                    }
                    movingSumVal = newSumVal;
                    kv = r.getColumnLatest(colFamily, qualifier);
                    value = ci.getValue(colFamily, qualifier, kv);
                }
            }
        } while (results != null && results.length > 0);
    } finally {
        if (scanner != null) {
            scanner.close();
        }
    }
    return null;
}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CQLKeyValueService.java

@Override
public void multiPut(Map<String, ? extends Map<Cell, byte[]>> valuesByTable, final long timestamp)
        throws KeyAlreadyExistsException {
    Map<ResultSetFuture, String> resultSetFutures = Maps.newHashMap();
    for (Entry<String, ? extends Map<Cell, byte[]>> e : valuesByTable.entrySet()) {
        final String table = e.getKey();
        // We sort here because some key value stores are more efficient if you store adjacent keys together.
        NavigableMap<Cell, byte[]> sortedMap = ImmutableSortedMap.copyOf(e.getValue());

        Iterable<List<Entry<Cell, byte[]>>> partitions = partitionByCountAndBytes(sortedMap.entrySet(),
                getMultiPutBatchCount(), getMultiPutBatchSizeBytes(), table,
                CQLKeyValueServices.MULTIPUT_ENTRY_SIZING_FUNCTION);

        for (final List<Entry<Cell, byte[]>> p : partitions) {
            List<Entry<Cell, Value>> partition = Lists.transform(p,
                    new Function<Entry<Cell, byte[]>, Entry<Cell, Value>>() {
                        @Override
                        public Entry<Cell, Value> apply(Entry<Cell, byte[]> input) {
                            return Maps.immutableEntry(input.getKey(),
                                    Value.create(input.getValue(), timestamp));
                        }//from w  w w. ja  v  a 2  s .  c  om
                    });
            resultSetFutures.put(getPutPartitionResultSetFuture(table, partition, TransactionType.NONE), table);
        }
    }

    for (Entry<ResultSetFuture, String> result : resultSetFutures.entrySet()) {
        ResultSet resultSet;
        try {
            resultSet = result.getKey().getUninterruptibly();
            resultSet.all();
        } catch (Throwable t) {
            throw Throwables.throwUncheckedException(t);
        }
        CQLKeyValueServices.logTracedQuery(getPutQuery(result.getValue(), CassandraConstants.NO_TTL), resultSet,
                session, cqlStatementCache.NORMAL_QUERY);
    }
}

From source file:org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient.java

/**
 * This is the client side interface/handler for calling the median method for a
 * given cf-cq combination. This method collects the necessary parameters
 * to compute the median and returns the median.
 * @param table/*w ww  . j ava 2  s. co m*/
 * @param ci
 * @param scan
 * @return R the median
 * @throws Throwable
 */
public <R, S, P extends Message, Q extends Message, T extends Message> R median(final long transactionId,
        final TransactionalTable table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) throws Throwable {
    Pair<NavigableMap<byte[], List<S>>, List<S>> p = getMedianArgs(transactionId, table, ci, scan);
    byte[] startRow = null;
    byte[] colFamily = scan.getFamilies()[0];
    NavigableSet<byte[]> quals = scan.getFamilyMap().get(colFamily);
    NavigableMap<byte[], List<S>> map = p.getFirst();
    S sumVal = p.getSecond().get(0);
    S sumWeights = p.getSecond().get(1);
    double halfSumVal = ci.divideForAvg(sumVal, 2L);
    double movingSumVal = 0;
    boolean weighted = false;
    if (quals.size() > 1) {
        weighted = true;
        halfSumVal = ci.divideForAvg(sumWeights, 2L);
    }

    for (Map.Entry<byte[], List<S>> entry : map.entrySet()) {
        S s = weighted ? entry.getValue().get(1) : entry.getValue().get(0);
        double newSumVal = movingSumVal + ci.divideForAvg(s, 1L);
        if (newSumVal > halfSumVal)
            break; // we found the region with the median
        movingSumVal = newSumVal;
        startRow = entry.getKey();
    }
    // scan the region with median and find it
    Scan scan2 = new Scan(scan);
    // inherit stop row from method parameter
    if (startRow != null)
        scan2.setStartRow(startRow);
    ResultScanner scanner = null;
    try {
        int cacheSize = scan2.getCaching();
        if (!scan2.getCacheBlocks() || scan2.getCaching() < 2) {
            scan2.setCacheBlocks(true);
            cacheSize = 5;
            scan2.setCaching(cacheSize);
        }
        scanner = table.getScanner(scan2);
        Result[] results = null;
        byte[] qualifier = quals.pollFirst();
        // qualifier for the weight column
        byte[] weightQualifier = weighted ? quals.pollLast() : qualifier;
        R value = null;
        do {
            results = scanner.next(cacheSize);
            if (results != null && results.length > 0) {
                for (int i = 0; i < results.length; i++) {
                    Result r = results[i];
                    // retrieve weight
                    Cell kv = r.getColumnLatest(colFamily, weightQualifier);
                    R newValue = ci.getValue(colFamily, weightQualifier, kv);
                    S s = ci.castToReturnType(newValue);
                    double newSumVal = movingSumVal + ci.divideForAvg(s, 1L);
                    // see if we have moved past the median
                    if (newSumVal > halfSumVal) {
                        return value;
                    }
                    movingSumVal = newSumVal;
                    kv = r.getColumnLatest(colFamily, qualifier);
                    value = ci.getValue(colFamily, qualifier, kv);
                }
            }
        } while (results != null && results.length > 0);
    } finally {
        if (scanner != null) {
            scanner.close();
        }
    }
    return null;
}

From source file:org.lilyproject.repository.impl.HBaseTypeManager.java

private void extractSupertypes(Result result, Long version, RecordType recordType) {
    if (version != null) {
        NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> allVersionsMap = result.getMap();
        NavigableMap<byte[], NavigableMap<Long, byte[]>> supertypeVersionsMap = allVersionsMap
                .get(TypeCf.SUPERTYPE.bytes);
        if (supertypeVersionsMap != null) {
            for (Entry<byte[], NavigableMap<Long, byte[]>> entry : supertypeVersionsMap.entrySet()) {
                SchemaId supertypeId = new SchemaIdImpl(entry.getKey());
                Entry<Long, byte[]> ceilingEntry = entry.getValue().ceilingEntry(version);
                if (ceilingEntry != null) {
                    if (!isDeletedField(ceilingEntry.getValue())) {
                        recordType.addSupertype(supertypeId, Bytes.toLong(ceilingEntry.getValue()));
                    }//from w  w w .j  av  a 2 s.c o  m
                }
            }
        }
    } else {
        NavigableMap<byte[], byte[]> supertypeMap = result.getFamilyMap(TypeCf.SUPERTYPE.bytes);
        if (supertypeMap != null) {
            for (Entry<byte[], byte[]> entry : supertypeMap.entrySet()) {
                if (!isDeletedField(entry.getValue())) {
                    recordType.addSupertype(new SchemaIdImpl(entry.getKey()), Bytes.toLong(entry.getValue()));
                }
            }
        }
    }
}

From source file:org.lilyproject.repository.impl.HBaseTypeManager.java

private void extractFieldTypeEntries(Result result, Long version, RecordType recordType) {
    if (version != null) {
        NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> allVersionsMap = result.getMap();
        NavigableMap<byte[], NavigableMap<Long, byte[]>> fieldTypeEntriesVersionsMap = allVersionsMap
                .get(TypeCf.FIELDTYPE_ENTRY.bytes);
        if (fieldTypeEntriesVersionsMap != null) {
            for (Entry<byte[], NavigableMap<Long, byte[]>> entry : fieldTypeEntriesVersionsMap.entrySet()) {
                SchemaId fieldTypeId = new SchemaIdImpl(entry.getKey());
                Entry<Long, byte[]> ceilingEntry = entry.getValue().ceilingEntry(version);
                if (ceilingEntry != null) {
                    FieldTypeEntry fieldTypeEntry = decodeFieldTypeEntry(ceilingEntry.getValue(), fieldTypeId);
                    if (fieldTypeEntry != null) {
                        recordType.addFieldTypeEntry(fieldTypeEntry);
                    }//from w w  w .  ja va 2 s  .com
                }
            }
        }
    } else {
        NavigableMap<byte[], byte[]> versionableMap = result.getFamilyMap(TypeCf.FIELDTYPE_ENTRY.bytes);
        if (versionableMap != null) {
            for (Entry<byte[], byte[]> entry : versionableMap.entrySet()) {
                SchemaId fieldTypeId = new SchemaIdImpl(entry.getKey());
                FieldTypeEntry fieldTypeEntry = decodeFieldTypeEntry(entry.getValue(), fieldTypeId);
                if (fieldTypeEntry != null) {
                    recordType.addFieldTypeEntry(fieldTypeEntry);
                }
            }
        }
    }
}

From source file:com.eucalyptus.tests.awssdk.S3ListMpuTests.java

@Test
public void keyMarker() throws Exception {
    testInfo(this.getClass().getSimpleName() + " - keyMarker");

    try {//w  w w .j a v  a  2  s . c o m
        int numKeys = 3 + random.nextInt(3); // 3-5 keys
        int numUploads = 3 + random.nextInt(3); // 3-5 uploads

        print("Number of keys: " + numKeys);
        print("Number of uploads per key: " + numUploads);

        // Generate some mpus
        TreeMap<String, List<String>> keyUploadIdMap = initiateMpusForMultipleKeys(s3ClientA, accountA, numKeys,
                numUploads, new String());

        // Starting with every key in the ascending order, list the mpus using that key as the key marker and verify that the results.
        for (String keyMarker : keyUploadIdMap.keySet()) {

            // Compute what the sorted mpus should look like
            NavigableMap<String, List<String>> tailMap = keyUploadIdMap.tailMap(keyMarker, false);

            // List mpus using the key marker and verify
            MultipartUploadListing listing = listMpu(s3ClientA, accountA, bucketName, keyMarker, null, null,
                    null, null, false);
            assertTrue(
                    "Expected " + (tailMap.size() * numUploads) + " mpu listings, but got "
                            + listing.getMultipartUploads().size(),
                    (tailMap.size() * numUploads) == listing.getMultipartUploads().size());

            Iterator<MultipartUpload> mpuIterator = listing.getMultipartUploads().iterator();

            for (Entry<String, List<String>> tailMapEntry : tailMap.entrySet()) {
                for (String uploadId : tailMapEntry.getValue()) {
                    MultipartUpload mpu = mpuIterator.next();
                    assertTrue("Expected key to be " + tailMapEntry.getKey() + ", but got " + mpu.getKey(),
                            mpu.getKey().equals(tailMapEntry.getKey()));
                    assertTrue("Expected upload ID to be " + uploadId + ", but got " + mpu.getUploadId(),
                            mpu.getUploadId().equals(uploadId));
                    verifyCommonElements(mpu);
                }
            }

            assertTrue("Expected mpu iterator to be empty", !mpuIterator.hasNext());
        }

    } catch (AmazonServiceException ase) {
        printException(ase);
        assertThat(false, "Failed to run keyMarker");
    }
}

From source file:org.trafodion.dtm.TmAuditTlog.java

public void getTransactionState(TransactionState ts) throws IOException {
    if (LOG.isTraceEnabled())
        LOG.trace("getTransactionState start; transid: " + ts.getTransactionId());

    // This request might be for a transaction not originating on this node, so we need to open
    // the appropriate Tlog
    HTableInterface unknownTransactionTable;
    long lvTransid = ts.getTransactionId();
    int lv_ownerNid = (int) (lvTransid >> 32);
    int lv_lockIndex = (int) (lvTransid & tLogHashKey);
    String lv_tLogName = new String(
            "TRAFODION._DTM_.TLOG" + String.valueOf(lv_ownerNid) + "_LOG_" + Integer.toHexString(lv_lockIndex));
    HConnection unknownTableConnection = HConnectionManager.createConnection(this.config);
    unknownTransactionTable = unknownTableConnection.getTable(TableName.valueOf(lv_tLogName));

    try {//from   w ww  . jav  a  2 s .c  o  m
        String transidString = new String(String.valueOf(lvTransid));
        Get g;
        long key = (((lvTransid & tLogHashKey) << tLogHashShiftFactor) + (lvTransid & 0xFFFFFFFF));
        if (LOG.isTraceEnabled())
            LOG.trace("key: " + key + ", hexkey: " + Long.toHexString(key) + ", transid: " + lvTransid);
        g = new Get(Bytes.toBytes(key));
        TransState lvTxState = TransState.STATE_NOTX;
        String stateString = "";
        String transidToken = "";
        try {
            Result r = unknownTransactionTable.get(g);
            if (r == null) {
                if (LOG.isTraceEnabled())
                    LOG.trace("getTransactionState: tLog result is null: " + transidString);
            }
            if (r.isEmpty()) {
                if (LOG.isTraceEnabled())
                    LOG.trace("getTransactionState: tLog empty result: " + transidString);
            }
            byte[] value = r.getValue(TLOG_FAMILY, ASN_STATE);
            if (value == null) {
                ts.setStatus(TransState.STATE_NOTX);
                if (LOG.isTraceEnabled())
                    LOG.trace("getTransactionState: tLog value is null: " + transidString);
                return;
            }
            if (value.length == 0) {
                ts.setStatus(TransState.STATE_NOTX);
                if (LOG.isTraceEnabled())
                    LOG.trace("getTransactionState: tLog transaction not found: " + transidString);
                return;
            }
            ts.clearParticipatingRegions();
            String recordString = new String(Bytes.toString(value));
            StringTokenizer st = new StringTokenizer(recordString, ",");
            if (st.hasMoreElements()) {
                String asnToken = st.nextElement().toString();
                transidToken = st.nextElement().toString();
                stateString = st.nextElement().toString();
                if (LOG.isTraceEnabled())
                    LOG.trace("getTransactionState: transaction: " + transidToken + " stateString is: "
                            + stateString);
            }
            if (stateString.compareTo("COMMITTED") == 0) {
                lvTxState = TransState.STATE_COMMITTED;
            } else if (stateString.compareTo("ABORTED") == 0) {
                lvTxState = TransState.STATE_ABORTED;
            } else if (stateString.compareTo("ACTIVE") == 0) {
                lvTxState = TransState.STATE_ACTIVE;
            } else if (stateString.compareTo("PREPARED") == 0) {
                lvTxState = TransState.STATE_PREPARED;
            } else if (stateString.compareTo("NOTX") == 0) {
                lvTxState = TransState.STATE_NOTX;
            } else if (stateString.compareTo("FORGOTTEN") == 0) {
                // Need to get the previous state record so we know how to drive the regions
                String keyS = new String(r.getRow());
                Get get = new Get(r.getRow());
                get.setMaxVersions(versions); // will return last n versions of row
                Result lvResult = unknownTransactionTable.get(get);
                // byte[] b = lvResult.getValue(TLOG_FAMILY, ASN_STATE);  // returns current version of value
                List<Cell> list = lvResult.getColumnCells(TLOG_FAMILY, ASN_STATE); // returns all versions of this column
                for (Cell element : list) {
                    String stringValue = new String(CellUtil.cloneValue(element));
                    st = new StringTokenizer(stringValue, ",");
                    if (st.hasMoreElements()) {
                        if (LOG.isTraceEnabled())
                            LOG.trace("Performing secondary search on (" + transidToken + ")");
                        String asnToken = st.nextElement().toString();
                        transidToken = st.nextElement().toString();
                        String stateToken = st.nextElement().toString();
                        if ((stateToken.compareTo("COMMITTED") == 0)
                                || (stateToken.compareTo("ABORTED") == 0)) {
                            String rowKey = new String(r.getRow());
                            if (LOG.isTraceEnabled())
                                LOG.trace("Secondary search found record for (" + transidToken
                                        + ") with state: " + stateToken);
                            lvTxState = (stateToken.compareTo("COMMITTED") == 0) ? TransState.STATE_COMMITTED
                                    : TransState.STATE_ABORTED;
                            break;
                        } else {
                            if (LOG.isTraceEnabled())
                                LOG.trace("Secondary search skipping entry for (" + transidToken
                                        + ") with state: " + stateToken);
                        }
                    }
                }
            } else if (stateString.compareTo("ABORTING") == 0) {
                lvTxState = TransState.STATE_ABORTING;
            } else if (stateString.compareTo("COMMITTING") == 0) {
                lvTxState = TransState.STATE_COMMITTING;
            } else if (stateString.compareTo("PREPARING") == 0) {
                lvTxState = TransState.STATE_PREPARING;
            } else if (stateString.compareTo("FORGETTING") == 0) {
                lvTxState = TransState.STATE_FORGETTING;
            } else if (stateString.compareTo("FORGETTING_HEUR") == 0) {
                lvTxState = TransState.STATE_FORGETTING_HEUR;
            } else if (stateString.compareTo("BEGINNING") == 0) {
                lvTxState = TransState.STATE_BEGINNING;
            } else if (stateString.compareTo("HUNGCOMMITTED") == 0) {
                lvTxState = TransState.STATE_HUNGCOMMITTED;
            } else if (stateString.compareTo("HUNGABORTED") == 0) {
                lvTxState = TransState.STATE_HUNGABORTED;
            } else if (stateString.compareTo("IDLE") == 0) {
                lvTxState = TransState.STATE_IDLE;
            } else if (stateString.compareTo("FORGOTTEN_HEUR") == 0) {
                lvTxState = TransState.STATE_FORGOTTEN_HEUR;
            } else if (stateString.compareTo("ABORTING_PART2") == 0) {
                lvTxState = TransState.STATE_ABORTING_PART2;
            } else if (stateString.compareTo("TERMINATING") == 0) {
                lvTxState = TransState.STATE_TERMINATING;
            } else {
                lvTxState = TransState.STATE_BAD;
            }

            // get past the filler
            st.nextElement();

            // Load the TransactionState object up with regions
            while (st.hasMoreElements()) {
                String tableNameToken = st.nextToken();
                HTable table = new HTable(config, tableNameToken);
                NavigableMap<HRegionInfo, ServerName> regions = table.getRegionLocations();
                Iterator<Map.Entry<HRegionInfo, ServerName>> it = regions.entrySet().iterator();
                while (it.hasNext()) { // iterate entries.
                    NavigableMap.Entry<HRegionInfo, ServerName> pairs = it.next();
                    HRegionInfo regionKey = pairs.getKey();
                    if (LOG.isTraceEnabled())
                        LOG.trace("getTransactionState: transaction: " + transidToken + " adding region: "
                                + regionKey.getRegionNameAsString());
                    ServerName serverValue = regions.get(regionKey);
                    String hostAndPort = new String(serverValue.getHostAndPort());
                    StringTokenizer tok = new StringTokenizer(hostAndPort, ":");
                    String hostName = new String(tok.nextElement().toString());
                    int portNumber = Integer.parseInt(tok.nextElement().toString());
                    TransactionRegionLocation loc = new TransactionRegionLocation(regionKey, serverValue);
                    ts.addRegion(loc);
                }
            }
            ts.setStatus(lvTxState);

            if (LOG.isTraceEnabled())
                LOG.trace("getTransactionState: returning transid: " + ts.getTransactionId() + " state: "
                        + lvTxState);
        } catch (Exception e) {
            LOG.error("getTransactionState Exception " + Arrays.toString(e.getStackTrace()));
            throw e;
        }
    } catch (Exception e2) {
        LOG.error("getTransactionState Exception2 " + e2);
        e2.printStackTrace();
    }
    if (LOG.isTraceEnabled())
        LOG.trace("getTransactionState end transid: " + ts.getTransactionId());
    return;
}

From source file:org.apache.hadoop.hbase.coprocessor.TestMasterObserver.java

@Test
public void testRegionTransitionOperations() throws Exception {
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();

    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    CPMasterObserver cp = (CPMasterObserver) host.findCoprocessor(CPMasterObserver.class.getName());
    cp.enableBypass(false);// www .  j  a v  a2  s  . co m
    cp.resetStates();

    HTable table = UTIL.createTable(TEST_TABLE, TEST_FAMILY);

    try {
        UTIL.createMultiRegions(table, TEST_FAMILY);
        UTIL.waitUntilAllRegionsAssigned(TEST_TABLE);

        NavigableMap<HRegionInfo, ServerName> regions = table.getRegionLocations();
        Map.Entry<HRegionInfo, ServerName> firstGoodPair = null;
        for (Map.Entry<HRegionInfo, ServerName> e : regions.entrySet()) {
            if (e.getValue() != null) {
                firstGoodPair = e;
                break;
            }
        }
        assertNotNull("Found a non-null entry", firstGoodPair);
        LOG.info("Found " + firstGoodPair.toString());
        // Try to force a move
        Collection<ServerName> servers = master.getClusterStatus().getServers();
        String destName = null;
        String serverNameForFirstRegion = firstGoodPair.getValue().toString();
        LOG.info("serverNameForFirstRegion=" + serverNameForFirstRegion);
        boolean found = false;
        // Find server that is NOT carrying the first region
        for (ServerName info : servers) {
            LOG.info("ServerName=" + info);
            if (!serverNameForFirstRegion.equals(info.getServerName())) {
                destName = info.toString();
                found = true;
                break;
            }
        }
        assertTrue("Found server", found);
        LOG.info("Found " + destName);
        master.getMasterRpcServices().moveRegion(null, RequestConverter.buildMoveRegionRequest(
                firstGoodPair.getKey().getEncodedNameAsBytes(), Bytes.toBytes(destName)));
        assertTrue("Coprocessor should have been called on region move", cp.wasMoveCalled());

        // make sure balancer is on
        master.balanceSwitch(true);
        assertTrue("Coprocessor should have been called on balance switch", cp.wasBalanceSwitchCalled());

        // turn balancer off
        master.balanceSwitch(false);

        // wait for assignments to finish, if any
        AssignmentManager mgr = master.getAssignmentManager();
        Collection<RegionState> transRegions = mgr.getRegionStates().getRegionsInTransition().values();
        for (RegionState state : transRegions) {
            mgr.getRegionStates().waitOnRegionToClearRegionsInTransition(state.getRegion());
        }

        // move half the open regions from RS 0 to RS 1
        HRegionServer rs = cluster.getRegionServer(0);
        byte[] destRS = Bytes.toBytes(cluster.getRegionServer(1).getServerName().toString());
        //Make sure no regions are in transition now
        waitForRITtoBeZero(master);
        List<HRegionInfo> openRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices());
        int moveCnt = openRegions.size() / 2;
        for (int i = 0; i < moveCnt; i++) {
            HRegionInfo info = openRegions.get(i);
            if (!info.isMetaTable()) {
                master.getMasterRpcServices().moveRegion(null, RequestConverter
                        .buildMoveRegionRequest(openRegions.get(i).getEncodedNameAsBytes(), destRS));
            }
        }
        //Make sure no regions are in transition now
        waitForRITtoBeZero(master);
        // now trigger a balance
        master.balanceSwitch(true);
        boolean balanceRun = master.balance();
        assertTrue("Coprocessor should be called on region rebalancing", cp.wasBalanceCalled());
    } finally {
        UTIL.deleteTable(TEST_TABLE);
    }
}

From source file:org.apache.tez.dag.app.rm.LlapTaskSchedulerService.java

private void preemptTasks(int forPriority, int numTasksToPreempt, String[] potentialHosts) {
    Set<String> preemptHosts;
    if (potentialHosts == null) {
        preemptHosts = null;/*from ww w . j av a 2  s .c om*/
    } else {
        preemptHosts = Sets.newHashSet(potentialHosts);
    }
    writeLock.lock();
    List<TaskInfo> preemptedTaskList = null;
    try {
        NavigableMap<Integer, TreeSet<TaskInfo>> orderedMap = runningTasks.descendingMap();
        Iterator<Entry<Integer, TreeSet<TaskInfo>>> iterator = orderedMap.entrySet().iterator();
        int preemptedCount = 0;
        while (iterator.hasNext() && preemptedCount < numTasksToPreempt) {
            Entry<Integer, TreeSet<TaskInfo>> entryAtPriority = iterator.next();
            if (entryAtPriority.getKey() > forPriority) {
                Iterator<TaskInfo> taskInfoIterator = entryAtPriority.getValue().iterator();
                while (taskInfoIterator.hasNext() && preemptedCount < numTasksToPreempt) {
                    TaskInfo taskInfo = taskInfoIterator.next();
                    if (preemptHosts == null || preemptHosts.contains(taskInfo.assignedInstance.getHost())) {
                        // Candidate for preemption.
                        preemptedCount++;
                        LOG.info("preempting {} for task at priority {} with potentialHosts={}", taskInfo,
                                forPriority, potentialHosts == null ? "" : Arrays.toString(potentialHosts));
                        taskInfo.setPreemptedInfo(clock.getTime());
                        if (preemptedTaskList == null) {
                            preemptedTaskList = new LinkedList<>();
                        }
                        dagStats.registerTaskPreempted(taskInfo.assignedInstance.getHost());
                        preemptedTaskList.add(taskInfo);
                        registerPendingPreemption(taskInfo.assignedInstance.getHost());
                        // Remove from the runningTaskList
                        taskInfoIterator.remove();
                    }
                }

                // Remove entire priority level if it's been emptied.
                if (entryAtPriority.getValue().isEmpty()) {
                    iterator.remove();
                }
            } else {
                // No tasks qualify as preemptable
                LOG.info("DBG: No tasks qualify as killable to schedule tasks at priority {}", forPriority);
                break;
            }
        }
    } finally {
        writeLock.unlock();
    }
    // Send out the preempted request outside of the lock.
    if (preemptedTaskList != null) {
        for (TaskInfo taskInfo : preemptedTaskList) {
            LOG.info("DBG: Preempting task {}", taskInfo);
            getContext().preemptContainer(taskInfo.containerId);
            // Preemption will finally be registered as a deallocateTask as a result of preemptContainer
            // That resets preemption info and allows additional tasks to be pre-empted if required.
        }
    }
    // The schedule loop will be triggered again when the deallocateTask request comes in for the
    // preempted task.
}

From source file:org.lilyproject.repository.impl.HBaseRepository.java

private void clearData(RecordId recordId, Record originalRecord, Long upToVersion)
        throws IOException, RepositoryException, InterruptedException {
    Get get = new Get(recordId.toBytes());
    get.addFamily(RecordCf.DATA.bytes);//w  w  w.ja v a  2  s.  com
    get.setFilter(new ColumnPrefixFilter(new byte[] { RecordColumn.DATA_PREFIX }));
    // Only read versions that exist(ed) at the time the record was deleted, since this code could
    // run concurrently with the re-creation of the same record.
    if (upToVersion != null) {
        get.setTimeRange(1 /* inclusive */, upToVersion + 1 /* exclusive */);
    } else {
        get.setTimeRange(1, 2);
    }
    get.setMaxVersions();
    Result result = recordTable.get(get);

    if (result != null && !result.isEmpty()) {
        boolean dataToDelete = false;
        Delete delete = new Delete(recordId.toBytes());
        Set<BlobReference> blobsToDelete = new HashSet<BlobReference>();

        NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = result.getMap();
        Set<Entry<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>> familiesSet = map.entrySet();
        for (Entry<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> family : familiesSet) {
            if (Arrays.equals(RecordCf.DATA.bytes, family.getKey())) {
                NavigableMap<byte[], NavigableMap<Long, byte[]>> columnsSet = family.getValue();
                for (Entry<byte[], NavigableMap<Long, byte[]>> column : columnsSet.entrySet()) {
                    try {
                        byte[] columnQualifier = column.getKey();
                        SchemaId schemaId = new SchemaIdImpl(
                                Bytes.tail(columnQualifier, columnQualifier.length - 1));
                        FieldType fieldType = typeManager.getFieldTypeById(schemaId);
                        ValueType valueType = fieldType.getValueType();
                        NavigableMap<Long, byte[]> cells = column.getValue();
                        Set<Entry<Long, byte[]>> cellsSet = cells.entrySet();
                        for (Entry<Long, byte[]> cell : cellsSet) {
                            // Get blobs to delete
                            if (valueType.getDeepestValueType() instanceof BlobValueType) {
                                Object blobValue = null;
                                if (fieldType.getScope() == Scope.NON_VERSIONED) {
                                    // Read the blob value from the original record,
                                    // since the delete marker has already been put in the field by the delete call
                                    if (originalRecord != null) {
                                        blobValue = originalRecord.getField(fieldType.getName());
                                    }
                                } else {
                                    byte[] value = cell.getValue();
                                    if (!isDeleteMarker(value)) {
                                        blobValue = valueType.read(EncodingUtil.stripPrefix(value));
                                    }
                                }
                                try {
                                    if (blobValue != null) {
                                        blobsToDelete.addAll(
                                                getReferencedBlobs((FieldTypeImpl) fieldType, blobValue));
                                    }
                                } catch (BlobException e) {
                                    log.warn("Failure occurred while clearing blob data", e);
                                    // We do a best effort here
                                }
                            }
                            // Get cells to delete
                            // Only delete if in NON_VERSIONED scope
                            // The NON_VERSIONED fields will get filled in with a delete marker
                            // This is needed to avoid non-versioned fields to be lost due to the hbase delete thombstone
                            // See trac ticket http://dev.outerthought.org/trac/outerthought_lilyproject/ticket/297
                            if (fieldType.getScope() != Scope.NON_VERSIONED) {
                                delete.deleteColumn(RecordCf.DATA.bytes, columnQualifier, cell.getKey());
                            }
                            dataToDelete = true;
                        }
                    } catch (FieldTypeNotFoundException e) {
                        log.warn("Failure occurred while clearing blob data", e);
                        // We do a best effort here
                    } catch (TypeException e) {
                        log.warn("Failure occurred while clearing blob data", e);
                        // We do a best effort here
                    }
                }
            } else {
                //skip
            }
        }
        // Delete the blobs
        blobManager.handleBlobReferences(recordId, null, blobsToDelete);

        // Delete data
        if (dataToDelete && upToVersion != null) { // Avoid a delete action when no data was found to delete
            // Do not delete the NON-VERSIONED record type column.
            // If the thombstone was not processed yet (major compaction)
            // a re-creation of the record would then loose its record type since the NON-VERSIONED
            // field is always stored at timestamp 1L
            // Re-creating the record will always overwrite the (NON-VERSIONED) record type.
            // So, there is no risk of old record type information ending up in the new record.
            delete.deleteColumns(RecordCf.DATA.bytes, RecordColumn.VERSIONED_RT_ID.bytes, upToVersion);
            delete.deleteColumns(RecordCf.DATA.bytes, RecordColumn.VERSIONED_RT_VERSION.bytes, upToVersion);
            delete.deleteColumns(RecordCf.DATA.bytes, RecordColumn.VERSIONED_MUTABLE_RT_ID.bytes, upToVersion);
            delete.deleteColumns(RecordCf.DATA.bytes, RecordColumn.VERSIONED_MUTABLE_RT_VERSION.bytes,
                    upToVersion);
            recordTable.delete(delete);
        }
    }
}