Example usage for java.util TreeSet first

List of usage examples for java.util TreeSet first

Introduction

In this page you can find the example usage for java.util TreeSet first.

Prototype

public E first() 

Source Link

Usage

From source file:com.citic.zxyjs.zwlscx.mapreduce.lib.input.HFileOutputFormatBase.java

/**
 * Write out a {@link SequenceFile} that can be read by
 * {@link TotalOrderPartitioner} that contains the split points in
 * startKeys./*from w ww  . j a va  2  s  . c  om*/
 */
private static void writePartitions(Configuration conf, Path partitionsPath,
        List<ImmutableBytesWritable> startKeys) throws IOException {
    LOG.info("Writing partition information to " + partitionsPath);
    if (startKeys.isEmpty()) {
        throw new IllegalArgumentException("No regions passed");
    }

    // We're generating a list of split points, and we don't ever
    // have keys < the first region (which has an empty start key)
    // so we need to remove it. Otherwise we would end up with an
    // empty reducer with index 0
    TreeSet<ImmutableBytesWritable> sorted = new TreeSet<ImmutableBytesWritable>(startKeys);

    ImmutableBytesWritable first = sorted.first();
    if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
        throw new IllegalArgumentException("First region of table should have empty start key. Instead has: "
                + Bytes.toStringBinary(first.get()));
    }
    sorted.remove(first);

    // Write the actual file
    FileSystem fs = partitionsPath.getFileSystem(conf);
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath,
            ImmutableBytesWritable.class, NullWritable.class);

    try {
        for (ImmutableBytesWritable startKey : sorted) {
            writer.append(startKey, NullWritable.get());
        }
    } finally {
        writer.close();
    }
}

From source file:com.neusoft.hbase.test.hadoop.dataload.HFileOutputFormatBase.java

/**
 * Write out a {@link org.apache.hadoop.io.SequenceFile} that can be read by
 * {@link org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner} that contains the split points in
 * startKeys./*w  w  w  .  j  a  v  a2 s  .  c o m*/
 */
@SuppressWarnings("deprecation")
private static void writePartitions(Configuration conf, Path partitionsPath,
        List<ImmutableBytesWritable> startKeys) throws IOException {
    LOG.info("Writing partition information to " + partitionsPath);
    if (startKeys.isEmpty()) {
        throw new IllegalArgumentException("No regions passed");
    }

    // We're generating a list of split points, and we don't ever
    // have keys < the first region (which has an empty start key)
    // so we need to remove it. Otherwise we would end up with an
    // empty reducer with index 0
    TreeSet<ImmutableBytesWritable> sorted = new TreeSet<ImmutableBytesWritable>(startKeys);

    ImmutableBytesWritable first = sorted.first();
    if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
        throw new IllegalArgumentException("First region of table should have empty start key. Instead has: "
                + Bytes.toStringBinary(first.get()));
    }
    sorted.remove(first);

    // Write the actual file
    FileSystem fs = partitionsPath.getFileSystem(conf);
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath,
            ImmutableBytesWritable.class, NullWritable.class);

    try {
        for (ImmutableBytesWritable startKey : sorted) {
            writer.append(startKey, NullWritable.get());
        }
    } finally {
        writer.close();
    }
}

From source file:org.apache.phoenix.mapreduce.MultiHfileOutputFormat.java

private static void writePartitions(Configuration conf, Path partitionsPath,
        Set<CsvTableRowkeyPair> tablesStartKeys) throws IOException {

    LOG.info("Writing partition information to " + partitionsPath);
    if (tablesStartKeys.isEmpty()) {
        throw new IllegalArgumentException("No regions passed");
    }//  w  ww .j  av  a  2s.c  o  m

    // We're generating a list of split points, and we don't ever
    // have keys < the first region (which has an empty start key)
    // so we need to remove it. Otherwise we would end up with an
    // empty reducer with index 0
    TreeSet<CsvTableRowkeyPair> sorted = new TreeSet<CsvTableRowkeyPair>(tablesStartKeys);

    CsvTableRowkeyPair first = sorted.first();
    if (!first.getRowkey().equals(HConstants.EMPTY_BYTE_ARRAY)) {
        throw new IllegalArgumentException("First region of table should have empty start key. Instead has: "
                + Bytes.toStringBinary(first.getRowkey().get()));
    }
    sorted.remove(first);

    // Write the actual file
    FileSystem fs = partitionsPath.getFileSystem(conf);
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath, CsvTableRowkeyPair.class,
            NullWritable.class);

    try {
        for (CsvTableRowkeyPair startKey : sorted) {
            writer.append(startKey, NullWritable.get());
        }
    } finally {
        writer.close();
    }

}

From source file:com.ailk.oci.ocnosql.tools.load.single.SingleColumnImportTsv.java

/**
 * Write out a SequenceFile that can be read by TotalOrderPartitioner
 * that contains the split points in startKeys.
 * @param partitionsPath output path for SequenceFile
 * @param startKeys the region start keys
 *///from   www.  j ava 2  s.  c  om
private static void writePartitions(Configuration conf, Path partitionsPath,
        List<ImmutableBytesWritable> startKeys) throws IOException {
    if (startKeys.isEmpty()) {
        throw new IllegalArgumentException("No regions passed");
    }

    // We're generating a list of split points, and we don't ever
    // have keys < the first region (which has an empty start key)
    // so we need to remove it. Otherwise we would end up with an
    // empty reducer with index 0
    TreeSet<ImmutableBytesWritable> sorted = new TreeSet<ImmutableBytesWritable>(startKeys);

    ImmutableBytesWritable first = sorted.first();
    if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
        throw new IllegalArgumentException("First region of table should have empty start key. Instead has: "
                + Bytes.toStringBinary(first.get()));
    }
    sorted.remove(first);

    // Write the actual file
    FileSystem fs = partitionsPath.getFileSystem(conf);
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath,
            ImmutableBytesWritable.class, NullWritable.class);

    try {
        for (ImmutableBytesWritable startKey : sorted) {
            writer.append(startKey, NullWritable.get());
        }
    } finally {
        writer.close();
    }
}

From source file:org.apache.htrace.core.TracerId.java

/**
 * <p>Get the best IP address that represents this node.</p>
 *
 * This is complicated since nodes can have multiple network interfaces,
 * and each network interface can have multiple IP addresses.  What we're
 * looking for here is an IP address that will serve to identify this node
 * to HTrace.  So we prefer site-local addresess (i.e. private ones on the
 * LAN) to publicly routable interfaces.  If there are multiple addresses
 * to choose from, we select the one which comes first in textual sort
 * order.  This should ensure that we at least consistently call each node
 * by a single name.//from w w w. j a va2  s  .c  o m
 */
static String getBestIpString() {
    Enumeration<NetworkInterface> ifaces;
    try {
        ifaces = NetworkInterface.getNetworkInterfaces();
    } catch (SocketException e) {
        LOG.error("Error getting network interfaces", e);
        return "127.0.0.1";
    }
    TreeSet<String> siteLocalCandidates = new TreeSet<String>();
    TreeSet<String> candidates = new TreeSet<String>();
    while (ifaces.hasMoreElements()) {
        NetworkInterface iface = ifaces.nextElement();
        for (Enumeration<InetAddress> addrs = iface.getInetAddresses(); addrs.hasMoreElements();) {
            InetAddress addr = addrs.nextElement();
            if (!addr.isLoopbackAddress()) {
                if (addr.isSiteLocalAddress()) {
                    siteLocalCandidates.add(addr.getHostAddress());
                } else {
                    candidates.add(addr.getHostAddress());
                }
            }
        }
    }
    if (!siteLocalCandidates.isEmpty()) {
        return siteLocalCandidates.first();
    }
    if (!candidates.isEmpty()) {
        return candidates.first();
    }
    return "127.0.0.1";
}

From source file:org.rhwlab.ace3d.SegmentationLinePlot.java

public void setTree(BHCTree tree) {
    XYSeriesCollection collect = new XYSeriesCollection();
    XYSeries series = new XYSeries("");
    collect.addSeries(series);/*from  ww w . j  av  a  2  s.  c  om*/

    TreeMap<Integer, TreeSet<NucleusLogNode>> map = tree.allTreeCuts(500);

    for (Integer i : map.keySet()) {
        TreeSet<NucleusLogNode> nodes = map.get(i);
        double lnP = nodes.first().getLogPosterior();
        series.add((double) i, Math.exp(lnP));

    }
    int t = tree.getTime();
    int nu = tree.getNu();

    JFreeChart chart = ChartFactory.createXYLineChart(
            String.format("Time=%d,nu=%d,alpha=%e", tree.getTime(), tree.getNu(), tree.getAlpha()), "Index",
            "Probability", collect, PlotOrientation.VERTICAL, false, true, true);
    XYPlot plot = (XYPlot) chart.getPlot();

    ChartPanel panel = new ChartPanel(chart);
    this.add(panel);
}

From source file:cz.zcu.kiv.eegdatabase.logic.schemagen.ScenarioSchemaGenerator.java

private String getElement() {

    InputStream xsd = new ByteArrayInputStream(content);

    XSOMParser parser = new XSOMParser();
    String myString = content.toString();

    try {/* www. j a  v  a 2  s . co m*/
        parser.parse(xsd);
    } catch (SAXException e) {
        if (e.getException() != null)
            e.getException().printStackTrace();
    }

    Set<SchemaDocument> docSet = parser.getDocuments();
    Map<String, XSElementDecl> elementDeclMap = null;
    for (SchemaDocument doc : docSet) {
        elementDeclMap = doc.getSchema().getElementDecls();
        if (!elementDeclMap.isEmpty())
            break;
    }

    TreeSet<String> elementNameSet = new TreeSet(elementDeclMap.keySet());
    String elementName = elementNameSet.first();

    System.out.println(elementName);

    return elementName;
}

From source file:gov.nih.nci.caarray.application.translation.magetab.TermSourceTranslator.java

/**
 * @param matches//from  w  w  w.j  a v  a 2s.c  om
 * @return
 */
private TermSource getBestMatch(Set<TermSource> matches) {
    TreeSet<TermSource> sorted = new TreeSet<TermSource>(new TermSourceVersionComparator());
    sorted.addAll(matches);
    return sorted.first();
}

From source file:org.apache.hadoop.hbase.zookeeper.lock.ZKInterProcessWriteLock.java

/**
 * {@inheritDoc}/*  ww  w.  j a v  a2 s.  c  o m*/
 */
@Override
protected String getLockPath(String createdZNode, List<String> children) throws IOException {
    TreeSet<String> sortedChildren = new TreeSet<String>(ZNodeComparator.COMPARATOR);
    sortedChildren.addAll(children);
    String pathToWatch = sortedChildren.lower(createdZNode);
    if (pathToWatch != null) {
        String nodeHoldingLock = sortedChildren.first();
        String znode = ZKUtil.joinZNode(parentLockNode, nodeHoldingLock);
        handleLockMetadata(znode);
    }
    return pathToWatch;
}

From source file:org.apache.hadoop.hbase.zookeeper.lock.HWriteLockImpl.java

/**
 * {@inheritDoc}/*from www .  j  av a2 s.com*/
 */
@Override
protected String getLockPath(String createdZNode, List<String> children)
        throws IOException, InterruptedException {
    TreeSet<String> sortedChildren = new TreeSet<String>(new ZNodeComparator(zkWrapper.getIdentifier()));
    sortedChildren.addAll(children);
    String pathToWatch = sortedChildren.lower(createdZNode);
    if (pathToWatch != null) {
        String nodeHoldingLock = sortedChildren.first();
        try {
            handleLockMetadata(nodeHoldingLock);
        } catch (IOException e) {
            LOG.warn("Error processing lock metadata in " + nodeHoldingLock, e);
        }
    }
    return pathToWatch;
}