Example usage for java.util SortedSet toArray

List of usage examples for java.util SortedSet toArray

Introduction

In this page you can find the example usage for java.util SortedSet toArray.

Prototype

<T> T[] toArray(T[] a);

Source Link

Document

Returns an array containing all of the elements in this set; the runtime type of the returned array is that of the specified array.

Usage

From source file:org.sventon.service.javahl.JavaHLRepositoryServiceTest.java

@Test
public void testGetLogEntries() throws Exception {
    final Map<String, String> propMap = new HashMap<String, String>();
    final ChangePath cp1 = mock(ChangePath.class);
    final ChangePath cp2 = mock(ChangePath.class);
    final ChangePath[] changePaths = { cp1, cp2 };
    final int rev = 4711;
    final Date date = new Date();
    final String dateString = DateUtil.formatISO8601(date);

    MapUtils.putAll(propMap, new String[][] { { "svn:author", "daAuthor" }, { "svn:date", dateString },
            { "svn:log", "Added new text in my finest file" } });

    when(cp1.getPath()).thenReturn("/trunk/src/main/da/path/myfile.txt");
    when(cp1.getAction()).thenReturn('M');
    when(cp1.getCopySrcPath()).thenReturn(null);
    when(cp1.getCopySrcRevision()).thenReturn(-1L);

    when(cp2.getPath()).thenReturn("/branches/lemontree/src/main/da/path/myfile.txt");
    when(cp2.getAction()).thenReturn('A');
    when(cp2.getCopySrcPath()).thenReturn(null);
    when(cp2.getCopySrcRevision()).thenReturn(-1L);

    when(connection.getRepositoryRootUrl()).thenReturn(new SVNURL("svn://myhost/repro"));

    // Yiks! We probably need to refactor this later...
    // Matching for SVNClient.logMessages() is also a little bit too loose.
    doAnswer(new Answer() {
        @Override/*w  w w  . ja  v a2  s.com*/
        public Object answer(InvocationOnMock invocation) throws Throwable {
            Object[] args = invocation.getArguments();
            final LogMessageCallback cb = (LogMessageCallback) args[8];
            cb.singleMessage(changePaths, rev, propMap, false);

            return null;
        }
    }).when(client).logMessages(eq("svn://myhost/repro/da/path"), (Revision) any(), (RevisionRange[]) any(),
            eq(false), eq(false), eq(false), (String[]) any(), anyInt(), (LogMessageCallback) any());

    final List<LogEntry> logEntries = service.getLogEntries(connection, null, 1, 100, "da/path", 100, false,
            false);

    // Verify number of LogEntries
    assertEquals(1, logEntries.size());

    // Verify ChangePath
    final LogEntry logEntry = logEntries.get(0);
    final SortedSet<ChangedPath> changedPaths = logEntry.getChangedPaths();
    assertEquals(2, changedPaths.size());

    ChangedPath[] paths = new ChangedPath[2];
    changedPaths.toArray(paths);

    assertEquals("/branches/lemontree/src/main/da/path/myfile.txt", paths[0].getPath());
    assertEquals(ChangeType.ADDED, paths[0].getType());
    assertEquals("/trunk/src/main/da/path/myfile.txt", paths[1].getPath());
    assertEquals(ChangeType.MODIFIED, paths[1].getType());

    //Verify Properties
    assertEquals("daAuthor", logEntry.getAuthor());
    // TODO: check this! We fail because we're GMT+1&DLS and date is in UTC...
    //assertEquals(date, logEntry.getDate());
    assertEquals("Added new text in my finest file", logEntry.getMessage());
    assertEquals(4711, logEntry.getRevision());
}

From source file:com.tamingtext.classifier.mlt.MoreLikeThisCategorizer.java

public CategoryHits[] categorize(Reader reader) throws IOException {
    Query query = moreLikeThis.like(reader);

    HashMap<String, CategoryHits> categoryHash = new HashMap<String, CategoryHits>(25);

    for (ScoreDoc sd : indexSearcher.search(query, maxResults).scoreDocs) {
        String cat = getDocClass(sd.doc);
        if (cat == null)
            continue;
        CategoryHits ch = categoryHash.get(cat);
        if (ch == null) {
            ch = new CategoryHits();
            ch.setLabel(cat);/*from  w ww  .j  a  v  a2  s .  co  m*/
            categoryHash.put(cat, ch);
        }

        ch.incrementScore(sd.score);
    }

    SortedSet<CategoryHits> sortedCats = new TreeSet<CategoryHits>(CategoryHits.byScoreComparator());
    sortedCats.addAll(categoryHash.values());
    return sortedCats.toArray(new CategoryHits[0]);
}

From source file:nz.ac.otago.psyanlab.common.designer.util.LongSparseArrayAdapter.java

private Long[] sortKeys(Context context, LongSparseArray<T> items) {
    Locale locale = context.getResources().getConfiguration().locale;
    final Collator collator = Collator.getInstance(locale);
    collator.setStrength(Collator.SECONDARY);

    SortedSet<Long> sortedKeys = new TreeSet<Long>(new Comparator<Long>() {
        @Override/*from   w ww .  j  ava2s .  c  om*/
        public int compare(Long lhs, Long rhs) {
            return collator.compare(mItems.get(lhs).toString(), mItems.get(rhs).toString());
        }
    });

    for (int i = 0; i < items.size(); i++) {
        sortedKeys.add(items.keyAt(i));
    }

    return sortedKeys.toArray(new Long[sortedKeys.size()]);
}

From source file:org.apache.hadoop.hbase.master.TestLoadBalancer.java

private String printMock(List<HServerInfo> balancedCluster) {
    SortedSet<HServerInfo> sorted = new TreeSet<HServerInfo>(balancedCluster);
    HServerInfo[] arr = sorted.toArray(new HServerInfo[sorted.size()]);
    StringBuilder sb = new StringBuilder(sorted.size() * 4 + 4);
    sb.append("{ ");
    for (int i = 0; i < arr.length; i++) {
        if (i != 0) {
            sb.append(" , ");
        }// w w  w. j  a v  a  2  s. c  om
        sb.append(arr[i].getLoad().getNumberOfRegions());
    }
    sb.append(" }");
    return sb.toString();
}

From source file:org.apache.hadoop.hbase.master.balancer.BalancerTestBase.java

protected String printMock(List<ServerAndLoad> balancedCluster) {
    SortedSet<ServerAndLoad> sorted = new TreeSet<ServerAndLoad>(balancedCluster);
    ServerAndLoad[] arr = sorted.toArray(new ServerAndLoad[sorted.size()]);
    StringBuilder sb = new StringBuilder(sorted.size() * 4 + 4);
    sb.append("{ ");
    for (int i = 0; i < arr.length; i++) {
        if (i != 0) {
            sb.append(" , ");
        }//from  w  ww  .  ja v  a  2 s .  c o  m
        sb.append(arr[i].getServerName().getHostname());
        sb.append(":");
        sb.append(arr[i].getLoad());
    }
    sb.append(" }");
    return sb.toString();
}

From source file:org.apache.hadoop.hbase.master.TestDefaultLoadBalancer.java

private String printMock(List<ServerAndLoad> balancedCluster) {
    SortedSet<ServerAndLoad> sorted = new TreeSet<ServerAndLoad>(balancedCluster);
    ServerAndLoad[] arr = sorted.toArray(new ServerAndLoad[sorted.size()]);
    StringBuilder sb = new StringBuilder(sorted.size() * 4 + 4);
    sb.append("{ ");
    for (int i = 0; i < arr.length; i++) {
        if (i != 0) {
            sb.append(" , ");
        }//from   ww w .  j  a  v  a2  s.  c  o  m
        sb.append(arr[i].getLoad());
    }
    sb.append(" }");
    return sb.toString();
}

From source file:org.apache.ctakes.ytex.R.RGramMatrixExporterImpl.java

private void outputGramMatrix(double[][] gramMatrix, SortedSet<Long> instanceIds, String dataFilePrefix)
        throws IOException {
    BufferedWriter w = null;/*  w w w.  j  ava2 s.  co  m*/
    BufferedWriter wId = null;
    try {
        w = new BufferedWriter(new FileWriter(dataFilePrefix + "data.txt"));
        wId = new BufferedWriter(new FileWriter(dataFilePrefix + "instance_id.txt"));
        Long instanceIdArray[] = instanceIds.toArray(new Long[] {});
        // write instance id corresponding to row
        for (int h = 0; h < instanceIdArray.length; h++) {
            wId.write(Long.toString(instanceIdArray[h]));
            wId.write("\n");
        }
        for (int i = 0; i < instanceIdArray.length; i++) {
            // write line from gram matrix
            for (int j = 0; j < instanceIdArray.length; j++) {
                w.write(Double.toString(gramMatrix[i][j]));
                if (j < instanceIdArray.length - 1)
                    w.write(" ");
            }
            w.write("\n");
        }
    } finally {
        if (w != null) {
            w.close();
        }
        if (wId != null) {
            wId.close();
        }
    }
}

From source file:org.calrissian.accumulorecipes.commons.hadoop.GroupedKeyRangePartitioner.java

private synchronized Text[] getCutPoints() throws IOException {
    if (cutPointArray == null) {

        Path[] cf = DistributedCache.getLocalCacheFiles(conf);
        if (cf != null) {
            Map<String, String> curFilesAndGroups = getCurFilesAndGroups();
            SortedMap<String, SortedSet<String>> cutPointMap = new TreeMap<String, SortedSet<String>>();
            for (Path path : cf) {
                String group = null;
                for (Map.Entry<String, String> groupSplits : curFilesAndGroups.entrySet()) {
                    if (path.toString().endsWith(groupSplits.getKey()))
                        group = groupSplits.getValue();
                }/*from  w ww .ja  v  a  2s.  co  m*/

                if (group != null) {
                    Scanner in = new Scanner(new BufferedReader(new FileReader(path.toString())));

                    try {
                        while (in.hasNextLine()) {
                            String split = new String(Base64.decodeBase64(in.nextLine().getBytes()));

                            SortedSet<String> splits = cutPointMap.get(group);
                            if (splits == null) {
                                splits = new TreeSet<String>();
                                cutPointMap.put(group, splits);
                            }
                        }

                        SortedSet<Text> treeSet = new TreeSet<Text>();
                        for (Map.Entry<String, SortedSet<String>> entry : cutPointMap.entrySet()) {
                            treeSet.add(new Text(entry.getKey() + NULL_BYTE + NULL_BYTE));

                            for (String string : entry.getValue())
                                treeSet.add(new Text(entry.getKey() + NULL_BYTE + string));

                            treeSet.add(new Text(entry.getKey() + NULL_BYTE + END_BYTE));
                        }

                        cutPointArray = treeSet.toArray(new Text[] {});
                    } finally {
                        in.close();
                    }

                    break;
                } else {
                    throw new FileNotFoundException(
                            "A file was not found in distribution cache files: " + path.toString());
                }
            }
        }
    }
    return cutPointArray;
}

From source file:net.zypr.api.Protocol.java

public String[] getAllVerbs() {
    SortedSet<String> sortedSet = new TreeSet<String>();
    for (Enumeration enumeration = _services.elements(); enumeration.hasMoreElements();) {
        String[] verbs = ((ServiceVO) (enumeration.nextElement())).getVerbs();
        for (int index = 0; index < verbs.length; index++)
            sortedSet.add(verbs[index]);
    }/*from  ww  w. j a  v  a  2  s  .  c  o  m*/
    return (sortedSet.toArray(new String[0]));
}

From source file:de.ailis.xadrian.data.Sector.java

/**
 * Returns the ore asteroids of this sector.
 * /*from  w ww  .  j  a v  a2  s .c  o  m*/
 * @return The ore asteroids
 */
public Asteroid[] getOreAsteroids() {
    final SortedSet<Asteroid> asteroids = new TreeSet<Asteroid>();
    for (final Asteroid asteroid : getAsteroids()) {
        if (asteroid.getWare().isOre()) {
            asteroids.add(asteroid);
        }
    }
    return asteroids.toArray(new Asteroid[asteroids.size()]);
}