Example usage for java.util LinkedHashSet size

List of usage examples for java.util LinkedHashSet size

Introduction

In this page you can find the example usage for java.util LinkedHashSet size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this set (its cardinality).

Usage

From source file:com.geewhiz.pacify.TestArchive.java

@Test
public void checkBigZip() {
    String testFolder = "testArchive/correct/bigZip";

    LinkedHashSet<Defect> defects = createPrepareValidateAndReplace(testFolder,
            createPropertyResolveManager(propertiesToUseWhileResolving));

    Assert.assertEquals("We shouldnt get any defects.", 0, defects.size());

    checkIfResultIsAsExpected(testFolder);
}

From source file:com.geewhiz.pacify.TestArchive.java

@Test
public void checkJar() throws ArchiveException, IOException {
    String testFolder = "testArchive/correct/jar";

    File targetResourceFolder = new File("target/test-resources/", testFolder);

    LinkedHashSet<Defect> defects = createPrepareValidateAndReplace(testFolder,
            createPropertyResolveManager(propertiesToUseWhileResolving));

    Assert.assertEquals("We shouldnt get any defects.", 0, defects.size());

    File expectedArchive = new File(targetResourceFolder, "expectedResult/archive.jar");
    File resultArchive = new File(targetResourceFolder, "package/archive.jar");

    JarInputStream expected = new JarInputStream(new FileInputStream(expectedArchive));
    JarInputStream result = new JarInputStream(new FileInputStream(resultArchive));

    Assert.assertNotNull("SRC jar should contain the manifest as first entry", expected.getManifest());
    Assert.assertNotNull("RESULT jar should contain the manifest as first entry", result.getManifest());

    expected.close();/* w  w w  .  j av a2s.co  m*/
    result.close();

    checkIfResultIsAsExpected(testFolder);
}

From source file:com.geewhiz.pacify.TestArchive.java

@Test
public void checkJarWhereTheSourceIsntAJarPerDefinition() throws ArchiveException, IOException {
    LoggingUtils.setLogLevel(logger, Level.ERROR);

    String testFolder = "testArchive/correct/jarWhereSourceIsntAJarPerDefinition";

    File testResourceFolder = new File("src/test/resources/", testFolder);
    File targetResourceFolder = new File("target/test-resources/", testFolder);

    LinkedHashSet<Defect> defects = createPrepareValidateAndReplace(testFolder,
            createPropertyResolveManager(propertiesToUseWhileResolving));

    Assert.assertEquals("We shouldnt get any defects.", 0, defects.size());

    JarInputStream in = new JarInputStream(
            new FileInputStream(new File(testResourceFolder, "package/archive.jar")));
    JarInputStream out = new JarInputStream(
            new FileInputStream(new File(targetResourceFolder, "package/archive.jar")));

    Assert.assertNull("SRC jar should be a jar which is packed via zip, so the first entry isn't the manifest.",
            in.getManifest());//from   w w w  .  ja v  a  2 s. c o  m
    Assert.assertNotNull("RESULT jar should contain the manifest as first entry", out.getManifest());

    in.close();
    out.close();

    checkIfResultIsAsExpected(testFolder);
}

From source file:edu.emory.cci.aiw.cvrg.eureka.etl.ksb.PropositionDefinitionFinder.java

public List<String> searchPropositions(String inSearchKey) throws PropositionFinderException {
    LinkedHashSet<String> nodesToLoad = new LinkedHashSet<>();
    try {/* www  .  j  a  v a 2  s.com*/
        List<PropositionDefinition> searchResults = knowledgeSource
                .getMatchingPropositionDefinitions(inSearchKey);
        for (PropositionDefinition pf : searchResults) {
            if (nodesToLoad.size() > etlProperties.getSearchLimit()) {
                break;
            } else {
                if (pf != null) {
                    readParentsForSearchResult(pf, nodesToLoad);
                }
            }
        }
    } catch (KnowledgeSourceReadException e) {
        throw new PropositionFinderException(e);
    }
    return new ArrayList<>(nodesToLoad);
}

From source file:net.sf.jasperreports.engine.JRPropertiesMap.java

/**
 * Returns the names of the properties./*from w  w  w  .j  a  v  a  2s. c o  m*/
 *  
 * @return the names of the properties
 */
public String[] getPropertyNames() {
    String[] names;
    if (hasOwnProperties()) {
        if (base == null) {
            names = propertiesList.toArray(new String[propertiesList.size()]);
        } else {
            LinkedHashSet<String> namesSet = new LinkedHashSet<String>();
            collectPropertyNames(namesSet);
            names = namesSet.toArray(new String[namesSet.size()]);
        }
    } else if (base != null) {
        names = base.getPropertyNames();
    } else {
        names = new String[0];
    }
    return names;
}

From source file:com.alibaba.wasp.plan.parser.druid.DruidDMLParser.java

public Pair<List<Pair<String, byte[]>>, List<ColumnStruct>> buildFieldsPair(
        MetaEventOperation metaEventOperation, FTable table, LinkedHashSet<String> columns, ValuesClause values)
        throws IOException {
    // Convert a row's each field into byte[] value
    List<SQLExpr> exprValues = values.getValues();
    if (exprValues.size() != columns.size()) {
        throw new IOException("Insert clause " + columns.size() + " columns " + " not match "
                + exprValues.size() + " values ");
    }/*from w ww  . ja  va 2 s.  c  o  m*/
    Pair<String, byte[]>[] array = new Pair[table.getPrimaryKeys().size()];
    // Construct all ColumnAction
    List<ColumnStruct> cols = new ArrayList<ColumnStruct>(columns.size());
    assert (columns.size() == exprValues.size());
    Iterator<String> iter = columns.iterator();
    int i = 0;
    while (iter.hasNext()) {
        String columnName = iter.next();
        // Get the column's info
        Field column = metaEventOperation.getColumnInfo(table, columnName);
        byte[] value = convert(column, exprValues.get(i));
        Iterator<Entry<String, Field>> pkIter = table.getPrimaryKeys().entrySet().iterator();
        int j = 0;
        while (pkIter.hasNext()) {
            if (pkIter.next().getKey().equalsIgnoreCase(columnName)) {
                array[j] = new Pair<String, byte[]>(columnName, value);
                break;
            }
            j++;
        }
        // Check the input is the same as DataType
        checkType(column, exprValues.get(i));
        ColumnStruct columnAction = new ColumnStruct(table.getTableName(), column.getFamily(), columnName,
                column.getType(), value);
        cols.add(columnAction);
        i++;
    }

    return new Pair<List<Pair<String, byte[]>>, List<ColumnStruct>>(Arrays.asList(array), cols);
}

From source file:therian.operator.copy.ContainerCopierTest.java

@Test
public void testSingletonToEmptySet() {
    final LinkedHashSet<Book> targetValue = new LinkedHashSet<>();
    final Position.Readable<Set<Book>> target = Positions.readOnly(LocalTypes.SET_OF_BOOK, targetValue);
    therianContext.eval(Copy.to(target, Positions.readOnly(books[0])));
    assertSame(targetValue, target.getValue());
    assertEquals(1, targetValue.size());
    assertEquals(books[0], targetValue.iterator().next());
}

From source file:com.janrain.backplane2.server.Token.java

@Override
public void validate() throws SimpleDBException {
    super.validate();
    if (getType().isPrivileged()) {
        AbstractMessage.validateNotBlank(TokenField.ISSUED_TO_CLIENT_ID.getFieldName(),
                get(TokenField.ISSUED_TO_CLIENT_ID));
        AbstractMessage.validateNotBlank(TokenField.CLIENT_SOURCE_URL.getFieldName(),
                get(TokenField.CLIENT_SOURCE_URL));
        AbstractMessage.validateNotBlank(TokenField.BACKING_GRANTS.getFieldName(),
                get(Token.TokenField.BACKING_GRANTS));
    } else {// ww  w.j a  v a2s.  co  m
        Scope anonScope = getScope();
        LinkedHashSet<String> buses = anonScope.getScopeMap().get(BackplaneMessage.Field.BUS);
        LinkedHashSet<String> channels = anonScope.getScopeMap().get(BackplaneMessage.Field.CHANNEL);
        if (buses == null || buses.size() > 1 || channels == null || channels.size() > 1) {
            throw new SimpleDBException(
                    "invalid scope for anonymous token, must have exactly one bus and one channel specified: "
                            + anonScope);
        }
    }
}

From source file:org.alfresco.util.cache.AbstractAsynchronouslyRefreshedCache.java

private void queueRefreshAndSubmit(LinkedHashSet<String> tenantIds) {
    if ((tenantIds == null) || (tenantIds.size() == 0)) {
        return;/*from ww w  .  j  a v  a2 s  .co m*/
    }
    refreshLock.writeLock().lock();
    try {
        for (String tenantId : tenantIds) {
            if (logger.isDebugEnabled()) {
                logger.debug("Async cache adding refresh to queue for tenant " + tenantId + " on " + this);
            }
            refreshQueue.add(new Refresh(tenantId));
        }
    } finally {
        refreshLock.writeLock().unlock();
    }
    submit();
}

From source file:com.datatorrent.lib.io.fs.AbstractFSDirectoryInputOperatorTest.java

@Test
public void testPartitioning() throws Exception {
    TestFSDirectoryInputOperator oper = new TestFSDirectoryInputOperator();
    oper.getScanner().setFilePatternRegexp(".*partition([\\d]*)");
    oper.setDirectory(new File(testMeta.dir).getAbsolutePath());

    Path path = new Path(new File(testMeta.dir).getAbsolutePath());
    FileContext.getLocalFSFileContext().delete(path, true);
    for (int file = 0; file < 4; file++) {
        FileUtils.write(new File(testMeta.dir, "partition00" + file), "");
    }/*from w  ww.  j a  v a2  s. c om*/

    List<Partition<AbstractFSDirectoryInputOperator<String>>> partitions = Lists.newArrayList();
    partitions.add(new DefaultPartition<AbstractFSDirectoryInputOperator<String>>(oper));
    Collection<Partition<AbstractFSDirectoryInputOperator<String>>> newPartitions = oper
            .definePartitions(partitions, 1);
    Assert.assertEquals(2, newPartitions.size());
    Assert.assertEquals(2, oper.getCurrentPartitions());

    for (Partition<AbstractFSDirectoryInputOperator<String>> p : newPartitions) {
        Assert.assertNotSame(oper, p.getPartitionedInstance());
        Assert.assertNotSame(oper.getScanner(), p.getPartitionedInstance().getScanner());
        Set<String> consumed = Sets.newHashSet();
        LinkedHashSet<Path> files = p.getPartitionedInstance().getScanner()
                .scan(FileSystem.getLocal(new Configuration(false)), path, consumed);
        Assert.assertEquals("partition " + files, 2, files.size());
    }
}