Example usage for java.util NavigableSet size

List of usage examples for java.util NavigableSet size

Introduction

In this page you can find the example usage for java.util NavigableSet size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this set (its cardinality).

Usage

From source file:org.apache.hadoop.hbase.util.FSVisitor.java

/**
 * Iterate over recovered.edits of the specified region
 *
 * @param fs {@link FileSystem}// w  w  w .  j  av a  2 s .  c  o m
 * @param regionDir {@link Path} to the Region directory
 * @param visitor callback object to get the recovered.edits files
 * @throws IOException if an error occurred while scanning the directory
 */
public static void visitRegionRecoveredEdits(final FileSystem fs, final Path regionDir,
        final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
    NavigableSet<Path> files = HLogUtil.getSplitEditFilesSorted(fs, regionDir);
    if (files == null || files.size() == 0)
        return;

    for (Path source : files) {
        // check to see if the file is zero length, in which case we can skip it
        FileStatus stat = fs.getFileStatus(source);
        if (stat.getLen() <= 0)
            continue;

        visitor.recoveredEdits(regionDir.getName(), source.getName());
    }
}

From source file:c5db.client.ProtobufUtil.java

/**
 * Convert a client Scan to a protocol buffer Scan
 *
 * @param scan the client Scan to convert
 * @return the converted protocol buffer Scan
 * @throws IOException/*  www .  j  a va  2  s . c  om*/
 */
@NotNull
public static c5db.client.generated.Scan toScan(final Scan scan) throws IOException {

    boolean cacheBlocks = scan.getCacheBlocks();
    int batchSize = scan.getBatch();
    long maxResultSize = scan.getMaxResultSize();

    Boolean loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
    if (loadColumnFamiliesOnDemand == null) {
        loadColumnFamiliesOnDemand = false;
    }
    int maxVersions = scan.getMaxVersions();
    c5db.client.generated.TimeRange timeRange;
    if (scan.getTimeRange().isAllTime()) {
        timeRange = new c5db.client.generated.TimeRange(0, Long.MAX_VALUE);
    } else {
        timeRange = new c5db.client.generated.TimeRange(scan.getTimeRange().getMin(),
                scan.getTimeRange().getMax());
    }

    List<NameBytesPair> attributes = new ArrayList<>();
    Map<String, byte[]> attributesMap = scan.getAttributesMap();
    if (!attributes.isEmpty()) {
        for (Map.Entry<String, byte[]> attribute : attributesMap.entrySet()) {
            attributes.add(new NameBytesPair(attribute.getKey(), ByteBuffer.wrap(attribute.getValue())));
        }
    }

    ByteBuffer startRow = ByteBuffer.wrap(scan.getStartRow());
    ByteBuffer stopRow = ByteBuffer.wrap(scan.getStopRow());
    c5db.client.generated.Filter filter = ProtobufUtil.toFilter(scan.getFilter());

    List<Column> columns = new ArrayList<>();
    if (scan.hasFamilies()) {
        for (Map.Entry<byte[], NavigableSet<byte[]>> family : scan.getFamilyMap().entrySet()) {
            NavigableSet<byte[]> qualifierSet = family.getValue();
            List<ByteBuffer> qualifiers = new ArrayList<>();
            if (qualifierSet != null && qualifierSet.size() > 0) {
                for (byte[] qualifier : qualifierSet) {
                    qualifiers.add(ByteBuffer.wrap(qualifier));
                }
            }
            Column column = new Column(ByteBuffer.wrap(family.getKey()), qualifiers);
            columns.add(column);
        }
    }

    int storeLimit = scan.getMaxResultsPerColumnFamily();
    int storeOffset = scan.getRowOffsetPerColumnFamily();

    return new c5db.client.generated.Scan(columns, attributes, startRow, stopRow, filter, timeRange,
            maxVersions, cacheBlocks, batchSize, maxResultSize, storeLimit, storeOffset,
            loadColumnFamiliesOnDemand, false);
}

From source file:nz.co.fortytwo.signalk.util.Util.java

public static void populateTree(SignalKModel signalkModel, SignalKModel temp, String p) {
    NavigableSet<String> node = signalkModel.getTree(p);
    if (logger.isDebugEnabled())
        logger.debug("Found node:" + p + " = " + node);
    if (node != null && node.size() > 0) {
        addNodeToTemp(signalkModel, temp, node);
    } else {/*from  ww  w. j  a  v  a 2 s .c o  m*/
        temp.put(p, signalkModel.get(p));
    }

}

From source file:org.apache.hadoop.hbase.snapshot.CopyRecoveredEditsTask.java

@Override
public Void call() throws IOException {
    NavigableSet<Path> files = HLog.getSplitEditFilesSorted(this.fs, regiondir);
    if (files == null || files.size() == 0)
        return null;

    // copy over each file.
    // this is really inefficient (could be trivially parallelized), but is
    // really simple to reason about.
    for (Path source : files) {
        // check to see if the file is zero length, in which case we can skip it
        FileStatus stat = fs.getFileStatus(source);
        if (stat.getLen() <= 0)
            continue;

        // its not zero length, so copy over the file
        Path out = new Path(outputDir, source.getName());
        LOG.debug("Copying " + source + " to " + out);
        FileUtil.copy(fs, source, fs, out, true, fs.getConf());

        // check for errors to the running operation after each file
        this.rethrowException();
    }/*from w ww .  j ava  2s .c  o m*/
    return null;
}

From source file:org.apache.kylin.metadata.badquery.BadQueryHistoryManager.java

public BadQueryHistory addEntryToProject(BadQueryEntry badQueryEntry, String project) throws IOException {
    if (StringUtils.isEmpty(project) || badQueryEntry.getAdj() == null || badQueryEntry.getSql() == null)
        throw new IllegalArgumentException();

    BadQueryHistory badQueryHistory = getBadQueriesForProject(project);
    NavigableSet<BadQueryEntry> entries = badQueryHistory.getEntries();
    if (entries.size() >= kylinConfig.getBadQueryHistoryNum()) {
        entries.pollFirst();// w  w w. java  2  s .c  om
    }
    entries.add(badQueryEntry);

    getStore().putResource(badQueryHistory.getResourcePath(), badQueryHistory, BAD_QUERY_INSTANCE_SERIALIZER);
    return badQueryHistory;
}

From source file:org.apache.kylin.common.persistence.FileResourceStore.java

@Override
synchronized protected List<RawResource> getAllResourcesImpl(String folderPath, long timeStart,
        long timeEndExclusive) throws IOException {
    NavigableSet<String> resources = listResources(folderPath);
    if (resources == null)
        return Collections.emptyList();

    List<RawResource> result = Lists.newArrayListWithCapacity(resources.size());
    try {/*from w ww .  j a v a 2s  . c o  m*/
        for (String res : resources) {
            long ts = getResourceTimestampImpl(res);
            if (timeStart <= ts && ts < timeEndExclusive) {
                RawResource resource = getResourceImpl(res);
                if (resource != null) // can be null if is a sub-folder
                    result.add(resource);
            }
        }
    } catch (IOException ex) {
        for (RawResource rawResource : result) {
            IOUtils.closeQuietly(rawResource.inputStream);
        }
        throw ex;
    }
    return result;
}

From source file:org.apache.hadoop.hbase.master.TestMasterRestartAfterDisablingTable.java

@Test
public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch() throws Exception {
    final int NUM_MASTERS = 2;
    final int NUM_RS = 1;
    final int NUM_REGIONS_TO_CREATE = 4;

    // Start the cluster
    log("Starting cluster");
    Configuration conf = HBaseConfiguration.create();
    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    log("Waiting for active/ready master");
    cluster.waitForActiveAndReadyMaster();
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testmasterRestart", null);
    HMaster master = cluster.getMaster();

    // Create a table with regions
    byte[] table = Bytes.toBytes("tableRestart");
    byte[] family = Bytes.toBytes("family");
    log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
    HTable ht = TEST_UTIL.createTable(table, family);
    int numRegions = TEST_UTIL.createMultiRegions(conf, ht, family, NUM_REGIONS_TO_CREATE);
    numRegions += 1; // catalogs
    log("Waiting for no more RIT\n");
    blockUntilNoRIT(zkw, master);/*ww w. jav  a  2s  . co  m*/
    log("Disabling table\n");
    TEST_UTIL.getHBaseAdmin().disableTable(table);

    NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    assertEquals("The number of regions for the table tableRestart should be 0 and only"
            + "the catalog and namespace tables should be present.", 2, regions.size());

    List<MasterThread> masterThreads = cluster.getMasterThreads();
    MasterThread activeMaster = null;
    if (masterThreads.get(0).getMaster().isActiveMaster()) {
        activeMaster = masterThreads.get(0);
    } else {
        activeMaster = masterThreads.get(1);
    }
    activeMaster.getMaster().stop("stopping the active master so that the backup can become active");
    cluster.hbaseCluster.waitOnMaster(activeMaster);
    cluster.waitForActiveAndReadyMaster();

    assertTrue("The table should not be in enabled state",
            cluster.getMaster().getAssignmentManager().getTableStateManager().isTableState(
                    TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.DISABLED,
                    ZooKeeperProtos.Table.State.DISABLING));
    log("Enabling table\n");
    // Need a new Admin, the previous one is on the old master
    HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
    admin.enableTable(table);
    admin.close();
    log("Waiting for no more RIT\n");
    blockUntilNoRIT(zkw, master);
    log("Verifying there are " + numRegions + " assigned on cluster\n");
    regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    assertEquals("The assigned regions were not onlined after master"
            + " switch except for the catalog and namespace tables.", 6, regions.size());
    assertTrue("The table should be in enabled state",
            cluster.getMaster().getAssignmentManager().getTableStateManager()
                    .isTableState(TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.ENABLED));
    ht.close();
    TEST_UTIL.shutdownMiniCluster();
}

From source file:org.apache.kylin.common.persistence.HDFSResourceStore.java

@Override
protected List<RawResource> getAllResourcesImpl(String folderPath, long timeStart, long timeEndExclusive)
        throws IOException {
    NavigableSet<String> resources = listResources(folderPath);
    if (resources == null)
        return Collections.emptyList();
    List<RawResource> result = Lists.newArrayListWithCapacity(resources.size());
    try {/*  w  w  w .  j av a2  s .  c  o m*/
        for (String res : resources) {
            long ts = getResourceTimestampImpl(res);
            if (timeStart <= ts && ts < timeEndExclusive) {
                RawResource resource = getResourceImpl(res);
                if (resource != null) // can be null if is a sub-folder
                    result.add(resource);
            }
        }
    } catch (IOException ex) {
        for (RawResource rawResource : result) {
            IOUtils.closeQuietly(rawResource.inputStream);
        }
        throw ex;
    }
    return result;
}

From source file:org.structnetalign.merge.BronKerboschMergeJob.java

@Override
public List<NavigableSet<Integer>> call() throws Exception {

    logger.info("Searching for cliques on job " + index + " containing " + graph.getVertexCount()
            + " vertices and " + graph.getHomologyCount() + " homology edges");

    // find the cliques
    BronKerboschCliqueFinder<Integer, HomologyEdge> finder = new BronKerboschCliqueFinder<>();

    // these cliques are ordered from largest to smallest
    Collection<Set<Integer>> cliques = finder.transform(graph.getHomology());

    // just report the cliques we're using
    logger.info("Job " + index + ": " + "Found " + cliques.size() + " maximal cliques");
    int i = 1;/*  w  w  w  . j av  a 2s. c o  m*/
    for (Set<Integer> clique : cliques) {
        logger.debug("Job " + index + ": " + "Clique " + i + ": " + clique);
        i++;
    }

    // partition the cliques by sets of interactions
    // we call these (maximal) degenerate sets
    List<NavigableSet<Integer>> simpleDegenerateSets = new ArrayList<NavigableSet<Integer>>();
    for (Set<Integer> clique : cliques) {
        NavigableMap<String, NavigableSet<Integer>> degenerateSetMap = new TreeMap<>();
        for (int v : clique) {
            Collection<Integer> neighbors = graph.getInteractionNeighbors(v);
            String hash = hashVertexInteractions(neighbors);
            NavigableSet<Integer> degenerateSet = degenerateSetMap.get(hash);
            if (degenerateSet == null) {
                degenerateSet = new TreeSet<>();
                degenerateSetMap.put(hash, degenerateSet);
            }
            degenerateSet.add(v);
            logger.trace("Job " + index + ": " + "Found " + hash + " --> " + degenerateSetMap.get(hash));
        }
        for (NavigableSet<Integer> set : degenerateSetMap.values()) {
            simpleDegenerateSets.add(set);
        }
    }

    /*
     * Now sort the degenerate sets from largest to smallest.
     * Take into account the edge case where the sizes are the same.
     */
    Comparator<NavigableSet<Integer>> comparator = new Comparator<NavigableSet<Integer>>() {
        @Override
        public int compare(NavigableSet<Integer> clique1, NavigableSet<Integer> clique2) {
            if (CollectionUtils.isEqualCollection(clique1, clique2))
                return 0;
            if (clique1.size() < clique2.size()) {
                return 1;
            } else if (clique1.size() > clique2.size()) {
                return -1;
            } else {
                Iterator<Integer> iter1 = clique1.iterator();
                Iterator<Integer> iter2 = clique2.iterator();
                while (iter1.hasNext()) { // we know they're the same size
                    int v1 = iter1.next();
                    int v2 = iter2.next();
                    if (v1 < v2) {
                        return 1;
                    } else if (v1 > v2) {
                        return -1;
                    }
                }
            }
            // they're the same throughout, so they're equal
            return 0;
        }
    };
    List<NavigableSet<Integer>> sortedDegenerateSets = new ArrayList<>(simpleDegenerateSets.size());
    sortedDegenerateSets.addAll(simpleDegenerateSets);
    Collections.sort(sortedDegenerateSets, comparator);

    /*
     * Now we want to return only the maximal maximal degenerate sets.
     */

    TreeSet<String> verticesAlreadyUsed = new TreeSet<String>();

    List<NavigableSet<Integer>> finalDegenerateSets = new ArrayList<>(sortedDegenerateSets.size());

    int nTrivial = 0;
    int nWeak = 0; // a degenerate set is weak if it contains a vertex that is added first

    forcliques: for (NavigableSet<Integer> set : sortedDegenerateSets) {

        // discard trivial degenerate sets
        if (set.size() < 2) {
            nTrivial++;
            continue;
        }

        // verify that we haven't already used any vertex in this degenerate set
        for (int v : set) {
            String hash = NetworkUtils.hash(v); // use MD5 for safety
            if (verticesAlreadyUsed.contains(hash)) {
                // discard this degenerate set and do NOT say we've used any of these vertices
                nWeak++;
                continue forcliques;
            }
        }

        // we haven't used any vertex in this degenerate set
        // now add all of these vertices
        // do NOT add before, or we'll add vertices we haven't used yet
        for (int v : set) {
            String hash = NetworkUtils.hash(v);
            verticesAlreadyUsed.add(hash);
        }
        finalDegenerateSets.add(set); // keep this degenerate set
    }

    logger.info("Job " + index + ": " + "Found " + finalDegenerateSets.size()
            + " strong nontrivial maximal degenerate sets found (" + nTrivial + " trivial and " + nWeak
            + " weak)");

    return finalDegenerateSets;
}

From source file:org.apache.hadoop.hbase.master.TestRollingRestart.java

@Test(timeout = 500000)
public void testBasicRollingRestart() throws Exception {

    // Start a cluster with 2 masters and 4 regionservers
    final int NUM_MASTERS = 2;
    final int NUM_RS = 3;
    final int NUM_REGIONS_TO_CREATE = 20;

    int expectedNumRS = 3;

    // Start the cluster
    log("Starting cluster");
    Configuration conf = HBaseConfiguration.create();
    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    log("Waiting for active/ready master");
    cluster.waitForActiveAndReadyMaster();
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testRollingRestart", null);
    HMaster master = cluster.getMaster();

    // Create a table with regions
    byte[] table = Bytes.toBytes("tableRestart");
    byte[] family = Bytes.toBytes("family");
    log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
    HTable ht = TEST_UTIL.createTable(table, family);
    int numRegions = TEST_UTIL.createMultiRegions(conf, ht, family, NUM_REGIONS_TO_CREATE);
    numRegions += 1; // catalogs
    log("Waiting for no more RIT\n");
    blockUntilNoRIT(zkw, master);// w  w w .j  a  v a2 s.co m
    log("Disabling table\n");
    TEST_UTIL.getHBaseAdmin().disableTable(table);
    log("Waiting for no more RIT\n");
    blockUntilNoRIT(zkw, master);
    NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    log("Verifying only catalog and namespace regions are assigned\n");
    if (regions.size() != 2) {
        for (String oregion : regions)
            log("Region still online: " + oregion);
    }
    assertEquals(2, regions.size());
    log("Enabling table\n");
    TEST_UTIL.getHBaseAdmin().enableTable(table);
    log("Waiting for no more RIT\n");
    blockUntilNoRIT(zkw, master);
    log("Verifying there are " + numRegions + " assigned on cluster\n");
    regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
    assertRegionsAssigned(cluster, regions);
    assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());

    // Add a new regionserver
    log("Adding a fourth RS");
    RegionServerThread restarted = cluster.startRegionServer();
    expectedNumRS++;
    restarted.waitForServerOnline();
    log("Additional RS is online");
    log("Waiting for no more RIT");
    blockUntilNoRIT(zkw, master);
    log("Verifying there are " + numRegions + " assigned on cluster");
    assertRegionsAssigned(cluster, regions);
    assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());

    // Master Restarts
    List<MasterThread> masterThreads = cluster.getMasterThreads();
    MasterThread activeMaster = null;
    MasterThread backupMaster = null;
    assertEquals(2, masterThreads.size());
    if (masterThreads.get(0).getMaster().isActiveMaster()) {
        activeMaster = masterThreads.get(0);
        backupMaster = masterThreads.get(1);
    } else {
        activeMaster = masterThreads.get(1);
        backupMaster = masterThreads.get(0);
    }

    // Bring down the backup master
    log("Stopping backup master\n\n");
    backupMaster.getMaster().stop("Stop of backup during rolling restart");
    cluster.hbaseCluster.waitOnMaster(backupMaster);

    // Bring down the primary master
    log("Stopping primary master\n\n");
    activeMaster.getMaster().stop("Stop of active during rolling restart");
    cluster.hbaseCluster.waitOnMaster(activeMaster);

    // Start primary master
    log("Restarting primary master\n\n");
    activeMaster = cluster.startMaster();
    cluster.waitForActiveAndReadyMaster();
    master = activeMaster.getMaster();

    // Start backup master
    log("Restarting backup master\n\n");
    backupMaster = cluster.startMaster();

    assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());

    // RegionServer Restarts

    // Bring them down, one at a time, waiting between each to complete
    List<RegionServerThread> regionServers = cluster.getLiveRegionServerThreads();
    int num = 1;
    int total = regionServers.size();
    for (RegionServerThread rst : regionServers) {
        ServerName serverName = rst.getRegionServer().getServerName();
        log("Stopping region server " + num + " of " + total + " [ " + serverName + "]");
        rst.getRegionServer().stop("Stopping RS during rolling restart");
        cluster.hbaseCluster.waitOnRegionServer(rst);
        log("Waiting for RS shutdown to be handled by master");
        waitForRSShutdownToStartAndFinish(activeMaster, serverName);
        log("RS shutdown done, waiting for no more RIT");
        blockUntilNoRIT(zkw, master);
        log("Verifying there are " + numRegions + " assigned on cluster");
        assertRegionsAssigned(cluster, regions);
        expectedNumRS--;
        assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
        log("Restarting region server " + num + " of " + total);
        restarted = cluster.startRegionServer();
        restarted.waitForServerOnline();
        expectedNumRS++;
        log("Region server " + num + " is back online");
        log("Waiting for no more RIT");
        blockUntilNoRIT(zkw, master);
        log("Verifying there are " + numRegions + " assigned on cluster");
        assertRegionsAssigned(cluster, regions);
        assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
        num++;
    }
    Thread.sleep(1000);
    assertRegionsAssigned(cluster, regions);

    // TODO: Bring random 3 of 4 RS down at the same time

    ht.close();
    // Stop the cluster
    TEST_UTIL.shutdownMiniCluster();
}