Example usage for java.util List containsAll

List of usage examples for java.util List containsAll

Introduction

In this page you can find the example usage for java.util List containsAll.

Prototype

boolean containsAll(Collection<?> c);

Source Link

Document

Returns true if this list contains all of the elements of the specified collection.

Usage

From source file:org.openecomp.sdc.be.components.impl.GroupBusinessLogic.java

/**
 * dissociate artifacts from a given group
 * //from  ww w  .j  a  v a  2  s. c o m
 * @param componentId
 * @param userId
 * @param componentType
 * @param groups
 * @param shouldLockComp
 * @param inTransaction
 * @return
 */
public Either<List<GroupDefinition>, ResponseFormat> dissociateArtifactsFromGroup(String componentId,
        String userId, ComponentTypeEnum componentType, List<GroupDefinition> groups, boolean shouldLockComp,
        boolean inTransaction) {

    Either<List<GroupDefinition>, ResponseFormat> result = null;

    if (shouldLockComp == true && inTransaction == true) {
        BeEcompErrorManager.getInstance().logInternalFlowError("dissociateArtifactsFromGroup",
                "Cannot lock component since we are inside a transaction", ErrorSeverity.ERROR);
        // Cannot lock component since we are in a middle of another
        // transaction.
        ActionStatus actionStatus = ActionStatus.INVALID_CONTENT;
        result = Either.right(componentsUtils.getResponseFormat(actionStatus));
        return result;
    }

    Component component = null;

    try {

        if (groups == null || groups.isEmpty()) {
            return Either.right(componentsUtils.getResponseFormat(ActionStatus.OK));
        }

        Either<Component, ResponseFormat> validateGroupsBeforeUpdate = validateGroupsBeforeUpdate(componentId,
                userId, componentType, groups, inTransaction);
        if (validateGroupsBeforeUpdate.isRight()) {
            result = Either.right(validateGroupsBeforeUpdate.right().value());
            return result;
        }

        component = validateGroupsBeforeUpdate.left().value();

        if (shouldLockComp) {
            Either<Boolean, ResponseFormat> lockComponent = lockComponent(component,
                    "Group - Dissociate Artifacts");
            if (lockComponent.isRight()) {
                return Either.right(lockComponent.right().value());
            }
        }

        List<GroupDefinition> updatedGroups = new ArrayList<>();

        List<GroupDefinition> componentGroups = component.getGroups();
        // per group, associate to it the artifacts
        for (GroupDefinition groupDefinition : groups) {

            GroupDefinition componentGroup = componentGroups.stream()
                    .filter(p -> p.getUniqueId().equals(groupDefinition.getUniqueId())).findFirst()
                    .orElse(null);
            if (componentGroup != null) {
                List<String> componentArtifacts = componentGroup.getArtifacts();
                int artifactsSizeInGroup = componentArtifacts == null ? 0 : componentArtifacts.size();
                List<String> artifactsToDissociate = groupDefinition.getArtifacts();

                // if no artifcats sent
                if (artifactsToDissociate == null || true == artifactsToDissociate.isEmpty()) {
                    continue;
                }

                if (artifactsSizeInGroup > 0) {

                    boolean containsAll = componentArtifacts.containsAll(artifactsToDissociate);
                    if (false == containsAll) { // At least one artifact is
                        // not associated to the
                        // group
                        log.debug("Some of the artifacts already dissociated to group {}",
                                groupDefinition.getUniqueId());
                        return Either.right(componentsUtils.getResponseFormat(
                                ActionStatus.GROUP_ARTIFACT_ALREADY_DISSOCIATED, componentGroup.getName()));
                    }
                } else {
                    if (artifactsSizeInGroup == 0) {
                        if (artifactsToDissociate != null && false == artifactsToDissociate.isEmpty()) {
                            log.debug("No artifact is found under the group {}", groupDefinition.getUniqueId());
                            return Either.right(componentsUtils.getResponseFormat(
                                    ActionStatus.GROUP_ARTIFACT_ALREADY_DISSOCIATED, componentGroup.getName()));
                        }
                    }
                }
            }

            Either<GroupDefinition, StorageOperationStatus> associateArtifactsToGroup = groupOperation
                    .dissociateArtifactsFromGroup(groupDefinition.getUniqueId(), groupDefinition.getArtifacts(),
                            true);

            if (associateArtifactsToGroup.isRight()) {
                ActionStatus actionStatus = componentsUtils
                        .convertFromStorageResponse(associateArtifactsToGroup.right().value());
                result = Either.right(componentsUtils.getResponseFormat(actionStatus));
                log.debug("Failed to update group {} under component {}, error: {}", groupDefinition.getName(),
                        component.getNormalizedName(), actionStatus.name());
                return result;
            }
            updatedGroups.add(associateArtifactsToGroup.left().value());

        }

        result = Either.left(updatedGroups);
        return result;

    } finally {

        if (false == inTransaction) {

            if (result == null || result.isRight()) {
                log.debug("Going to execute rollback on create group.");
                titanGenericDao.rollback();
            } else {
                log.debug("Going to execute commit on create group.");
                titanGenericDao.commit();
            }

        }
        // unlock resource
        if (shouldLockComp && component != null) {
            graphLockOperation.unlockComponent(componentId, componentType.getNodeType());
        }

    }

}

From source file:org.wso2.carbon.apimgt.core.dao.impl.ApiDAOImplIT.java

/**
 * Compare the results of attribute search in store
 *
 * @param userRoles        List of the roles of the user.
 * @param attributeMap     Map containing the attributes to be searched
 * @param expectedAPINames List of expected APIs.
 * @return true if returned API list has all expected APIs, false otherwise
 * @throws APIMgtDAOException if error occurs while accessing data layer
 */// www . ja v  a2 s. c om
private boolean compareResults(List<String> userRoles, Map<String, String> attributeMap,
        String[] expectedAPINames) throws APIMgtDAOException {

    ApiDAO apiDAO = DAOFactory.getApiDAO();
    List<API> apiList = apiDAO.searchAPIsByAttributeInStore(userRoles, attributeMap, 10, 0);
    List<String> resultAPINameList = new ArrayList<>();
    for (API api : apiList) {
        resultAPINameList.add(api.getName());
    }
    List<String> expectedAPINameList = Arrays.asList(expectedAPINames);
    //check if returned API list has all expected APIs
    return resultAPINameList.containsAll(expectedAPINameList)
            && expectedAPINameList.containsAll(resultAPINameList);
}

From source file:org.apache.hadoop.hive.metastore.TestHiveMetaStore.java

public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf) throws Exception {
    try {//from  www.  j a va2s  .c  om
        String dbName = "compdb";
        String tblName = "comptbl";
        String typeName = "Person";
        List<String> vals = makeVals("2008-07-01 14:13:12", "14");
        List<String> vals2 = makeVals("2008-07-01 14:13:12", "15");
        List<String> vals3 = makeVals("2008-07-02 14:13:12", "15");
        List<String> vals4 = makeVals("2008-07-03 14:13:12", "151");

        client.dropTable(dbName, tblName);
        silentDropDatabase(dbName);
        Database db = new Database();
        db.setName(dbName);
        client.createDatabase(db);
        db = client.getDatabase(dbName);
        Path dbPath = new Path(db.getLocationUri());
        FileSystem fs = FileSystem.get(dbPath.toUri(), hiveConf);
        boolean inheritPerms = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
        FsPermission dbPermission = fs.getFileStatus(dbPath).getPermission();
        if (inheritPerms) {
            //Set different perms for the database dir for further tests
            dbPermission = new FsPermission((short) 488);
            fs.setPermission(dbPath, dbPermission);
        }

        client.dropType(typeName);
        Type typ1 = new Type();
        typ1.setName(typeName);
        typ1.setFields(new ArrayList<FieldSchema>(2));
        typ1.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
        typ1.getFields().add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
        client.createType(typ1);

        Table tbl = new Table();
        tbl.setDbName(dbName);
        tbl.setTableName(tblName);
        StorageDescriptor sd = new StorageDescriptor();
        tbl.setSd(sd);
        sd.setCols(typ1.getFields());
        sd.setCompressed(false);
        sd.setNumBuckets(1);
        sd.setParameters(new HashMap<String, String>());
        sd.getParameters().put("test_param_1", "Use this for comments etc");
        sd.setBucketCols(new ArrayList<String>(2));
        sd.getBucketCols().add("name");
        sd.setSerdeInfo(new SerDeInfo());
        sd.getSerdeInfo().setName(tbl.getTableName());
        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
        sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
        sd.setSortCols(new ArrayList<Order>());
        sd.setStoredAsSubDirectories(false);
        sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
        sd.setInputFormat(HiveInputFormat.class.getName());
        sd.setOutputFormat(HiveOutputFormat.class.getName());

        //skewed information
        SkewedInfo skewInfor = new SkewedInfo();
        skewInfor.setSkewedColNames(Arrays.asList("name"));
        List<String> skv = Arrays.asList("1");
        skewInfor.setSkewedColValues(Arrays.asList(skv));
        Map<List<String>, String> scvlm = new HashMap<List<String>, String>();
        scvlm.put(skv, "location1");
        skewInfor.setSkewedColValueLocationMaps(scvlm);
        sd.setSkewedInfo(skewInfor);

        tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
        tbl.getPartitionKeys().add(new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, ""));
        tbl.getPartitionKeys().add(new FieldSchema("hr", serdeConstants.STRING_TYPE_NAME, ""));

        client.createTable(tbl);

        if (isThriftClient) {
            // the createTable() above does not update the location in the 'tbl'
            // object when the client is a thrift client and the code below relies
            // on the location being present in the 'tbl' object - so get the table
            // from the metastore
            tbl = client.getTable(dbName, tblName);
        }

        assertEquals(dbPermission, fs.getFileStatus(new Path(tbl.getSd().getLocation())).getPermission());

        Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
        Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
        Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
        Partition part4 = makePartitionObject(dbName, tblName, vals4, tbl, "/part4");

        // check if the partition exists (it shouldn't)
        boolean exceptionThrown = false;
        try {
            Partition p = client.getPartition(dbName, tblName, vals);
        } catch (Exception e) {
            assertEquals("partition should not have existed", NoSuchObjectException.class, e.getClass());
            exceptionThrown = true;
        }
        assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
        Partition retp = client.add_partition(part);
        assertNotNull("Unable to create partition " + part, retp);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation())).getPermission());
        Partition retp2 = client.add_partition(part2);
        assertNotNull("Unable to create partition " + part2, retp2);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp2.getSd().getLocation())).getPermission());
        Partition retp3 = client.add_partition(part3);
        assertNotNull("Unable to create partition " + part3, retp3);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp3.getSd().getLocation())).getPermission());
        Partition retp4 = client.add_partition(part4);
        assertNotNull("Unable to create partition " + part4, retp4);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp4.getSd().getLocation())).getPermission());

        Partition part_get = client.getPartition(dbName, tblName, part.getValues());
        if (isThriftClient) {
            // since we are using thrift, 'part' will not have the create time and
            // last DDL time set since it does not get updated in the add_partition()
            // call - likewise part2 and part3 - set it correctly so that equals check
            // doesn't fail
            adjust(client, part, dbName, tblName);
            adjust(client, part2, dbName, tblName);
            adjust(client, part3, dbName, tblName);
        }
        assertTrue("Partitions are not same", part.equals(part_get));

        String partName = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=14";
        String part2Name = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=15";
        String part3Name = "ds=" + FileUtils.escapePathName("2008-07-02 14:13:12") + "/hr=15";
        String part4Name = "ds=" + FileUtils.escapePathName("2008-07-03 14:13:12") + "/hr=151";

        part_get = client.getPartition(dbName, tblName, partName);
        assertTrue("Partitions are not the same", part.equals(part_get));

        // Test partition listing with a partial spec - ds is specified but hr is not
        List<String> partialVals = new ArrayList<String>();
        partialVals.add(vals.get(0));
        Set<Partition> parts = new HashSet<Partition>();
        parts.add(part);
        parts.add(part2);

        List<Partition> partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
        assertTrue("Should have returned 2 partitions", partial.size() == 2);
        assertTrue("Not all parts returned", partial.containsAll(parts));

        Set<String> partNames = new HashSet<String>();
        partNames.add(partName);
        partNames.add(part2Name);
        List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
        assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
        assertTrue("Not all part names returned", partialNames.containsAll(partNames));

        partNames.add(part3Name);
        partNames.add(part4Name);
        partialVals.clear();
        partialVals.add("");
        partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
        assertTrue("Should have returned 4 partition names", partialNames.size() == 4);
        assertTrue("Not all part names returned", partialNames.containsAll(partNames));

        // Test partition listing with a partial spec - hr is specified but ds is not
        parts.clear();
        parts.add(part2);
        parts.add(part3);

        partialVals.clear();
        partialVals.add("");
        partialVals.add(vals2.get(1));

        partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
        assertEquals("Should have returned 2 partitions", 2, partial.size());
        assertTrue("Not all parts returned", partial.containsAll(parts));

        partNames.clear();
        partNames.add(part2Name);
        partNames.add(part3Name);
        partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
        assertEquals("Should have returned 2 partition names", 2, partialNames.size());
        assertTrue("Not all part names returned", partialNames.containsAll(partNames));

        // Verify escaped partition names don't return partitions
        exceptionThrown = false;
        try {
            String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
            client.getPartition(dbName, tblName, badPartName);
        } catch (NoSuchObjectException e) {
            exceptionThrown = true;
        }
        assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);

        Path partPath = new Path(part.getSd().getLocation());

        assertTrue(fs.exists(partPath));
        client.dropPartition(dbName, tblName, part.getValues(), true);
        assertFalse(fs.exists(partPath));

        // Test append_partition_by_name
        client.appendPartition(dbName, tblName, partName);
        Partition part5 = client.getPartition(dbName, tblName, part.getValues());
        assertTrue("Append partition by name failed", part5.getValues().equals(vals));
        ;
        Path part5Path = new Path(part5.getSd().getLocation());
        assertTrue(fs.exists(part5Path));

        // Test drop_partition_by_name
        assertTrue("Drop partition by name failed", client.dropPartition(dbName, tblName, partName, true));
        assertFalse(fs.exists(part5Path));

        // add the partition again so that drop table with a partition can be
        // tested
        retp = client.add_partition(part);
        assertNotNull("Unable to create partition " + part, retp);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation())).getPermission());

        // test add_partitions

        List<String> mvals1 = makeVals("2008-07-04 14:13:12", "14641");
        List<String> mvals2 = makeVals("2008-07-04 14:13:12", "14642");
        List<String> mvals3 = makeVals("2008-07-04 14:13:12", "14643");
        List<String> mvals4 = makeVals("2008-07-04 14:13:12", "14643"); // equal to 3
        List<String> mvals5 = makeVals("2008-07-04 14:13:12", "14645");

        Exception savedException;

        // add_partitions(empty list) : ok, normal operation
        client.add_partitions(new ArrayList<Partition>());

        // add_partitions(1,2,3) : ok, normal operation
        Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1");
        Partition mpart2 = makePartitionObject(dbName, tblName, mvals2, tbl, "/mpart2");
        Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3");
        client.add_partitions(Arrays.asList(mpart1, mpart2, mpart3));

        if (isThriftClient) {
            // do DDL time munging if thrift mode
            adjust(client, mpart1, dbName, tblName);
            adjust(client, mpart2, dbName, tblName);
            adjust(client, mpart3, dbName, tblName);
        }
        verifyPartitionsPublished(client, dbName, tblName, Arrays.asList(mvals1.get(0)),
                Arrays.asList(mpart1, mpart2, mpart3));

        Partition mpart4 = makePartitionObject(dbName, tblName, mvals4, tbl, "/mpart4");
        Partition mpart5 = makePartitionObject(dbName, tblName, mvals5, tbl, "/mpart5");

        // create dir for /mpart5
        Path mp5Path = new Path(mpart5.getSd().getLocation());
        warehouse.mkdirs(mp5Path, true);
        assertTrue(fs.exists(mp5Path));
        assertEquals(dbPermission, fs.getFileStatus(mp5Path).getPermission());

        // add_partitions(5,4) : err = duplicate keyvals on mpart4
        savedException = null;
        try {
            client.add_partitions(Arrays.asList(mpart5, mpart4));
        } catch (Exception e) {
            savedException = e;
        } finally {
            assertNotNull(savedException);
        }

        // check that /mpart4 does not exist, but /mpart5 still does.
        assertTrue(fs.exists(mp5Path));
        assertFalse(fs.exists(new Path(mpart4.getSd().getLocation())));

        // add_partitions(5) : ok
        client.add_partitions(Arrays.asList(mpart5));

        if (isThriftClient) {
            // do DDL time munging if thrift mode
            adjust(client, mpart5, dbName, tblName);
        }

        verifyPartitionsPublished(client, dbName, tblName, Arrays.asList(mvals1.get(0)),
                Arrays.asList(mpart1, mpart2, mpart3, mpart5));

        //// end add_partitions tests

        client.dropTable(dbName, tblName);

        client.dropType(typeName);

        // recreate table as external, drop partition and it should
        // still exist
        tbl.setParameters(new HashMap<String, String>());
        tbl.getParameters().put("EXTERNAL", "TRUE");
        client.createTable(tbl);
        retp = client.add_partition(part);
        assertTrue(fs.exists(partPath));
        client.dropPartition(dbName, tblName, part.getValues(), true);
        assertTrue(fs.exists(partPath));

        for (String tableName : client.getTables(dbName, "*")) {
            client.dropTable(dbName, tableName);
        }

        client.dropDatabase(dbName);

    } catch (Exception e) {
        System.err.println(StringUtils.stringifyException(e));
        System.err.println("testPartition() failed.");
        throw e;
    }
}

From source file:org.wso2.carbon.event.formatter.admin.internal.EventFormatterAdminService.java

private boolean checkStreamAttributeValidity(List<String> outputEventAttributes,
        StreamDefinition streamDefinition) {

    if (streamDefinition != null) {
        List<String> inComingStreamAttributes = new ArrayList<String>();
        final String PROPERTY_META_PREFIX = "meta_";
        final String PROPERTY_CORRELATION_PREFIX = "correlation_";

        List<Attribute> metaAttributeList = streamDefinition.getMetaData();
        List<Attribute> correlationAttributeList = streamDefinition.getCorrelationData();
        List<Attribute> payloadAttributeList = streamDefinition.getPayloadData();

        if (metaAttributeList != null) {
            for (Attribute attribute : metaAttributeList) {
                inComingStreamAttributes.add(PROPERTY_META_PREFIX + attribute.getName());
            }//from  w  ww  . ja  va  2 s . co  m
        }
        if (correlationAttributeList != null) {
            for (Attribute attribute : correlationAttributeList) {
                inComingStreamAttributes.add(PROPERTY_CORRELATION_PREFIX + attribute.getName());
            }
        }
        if (payloadAttributeList != null) {
            for (Attribute attribute : payloadAttributeList) {
                inComingStreamAttributes.add(attribute.getName());
            }
        }

        if (outputEventAttributes.size() > 0) {
            if (inComingStreamAttributes.containsAll(outputEventAttributes)) {
                return true;
            } else {
                return false;
            }
        }

        return true;
    } else {
        return false;
    }

}

From source file:org.auraframework.impl.root.component.BaseComponentDefTest.java

/**
 * Test method for {@link BaseComponentDef#getModelDefDescriptors()}.
 *///from  ww w. ja  v  a  2 s  .  co m
@Test
public void testGetModelDefDescriptors() throws QuickFixException {
    DefDescriptor<T> grandParentDesc = addSourceAutoCleanup(getDefClass(),
            String.format(baseTag, "extensible='true'", ""));
    DefDescriptor<ModelDef> grandParentModelDesc = DefDescriptorImpl.getAssociateDescriptor(grandParentDesc,
            ModelDef.class, DefDescriptor.JAVASCRIPT_PREFIX);
    addSourceAutoCleanup(grandParentModelDesc, "{obj:{}}");

    DefDescriptor<T> parentDesc = addSourceAutoCleanup(getDefClass(),
            String.format(baseTag, String.format("extends='%s' extensible='true' model='js://test.jsModel'",
                    grandParentDesc.getDescriptorName()), ""));

    DefDescriptor<T> compDesc = addSourceAutoCleanup(getDefClass(),
            String.format(baseTag, String.format(
                    "extends='%s' model='java://org.auraframework.components.test.java.model.TestModel'",
                    parentDesc.getDescriptorName()), ""));

    List<DefDescriptor<ModelDef>> dds = definitionService.getDefinition(compDesc).getModelDefDescriptors();
    assertNotNull(dds);

    assertEquals(3, dds.size());
    List<String> names = Lists.transform(dds, new Function<DefDescriptor<?>, String>() {
        @Override
        public String apply(DefDescriptor<?> input) {
            return input.getQualifiedName();
        }
    });
    Set<String> expected = ImmutableSet.of("java://org.auraframework.components.test.java.model.TestModel",
            "js://test.jsModel", grandParentModelDesc.getQualifiedName());
    if (!names.containsAll(expected)) {
        fail("Missing expected models. Expected: " + expected + ", Actual: " + names);
    }
    if (!expected.containsAll(names)) {
        fail("Unexpected models. Expected: " + expected + ", Actual: " + names);
    }
}

From source file:org.apache.giraph.zk.ZooKeeperManager.java

/**
 * If this task has been selected, online a ZooKeeper server.  Otherwise,
 * wait until this task knows that the ZooKeeper servers have been onlined.
 *//*from w w w .ja v  a 2 s.  co  m*/
public void onlineZooKeeperServers() {
    Integer taskId = zkServerPortMap.get(myHostname);
    if ((taskId != null) && (taskId.intValue() == taskPartition)) {
        File zkDirFile = new File(this.zkDir);
        try {
            if (LOG.isInfoEnabled()) {
                LOG.info("onlineZooKeeperServers: Trying to delete old " + "directory " + this.zkDir);
            }
            FileUtils.deleteDirectory(zkDirFile);
        } catch (IOException e) {
            LOG.warn("onlineZooKeeperServers: Failed to delete " + "directory " + this.zkDir, e);
        }
        generateZooKeeperConfigFile(new ArrayList<String>(zkServerPortMap.keySet()));
        ProcessBuilder processBuilder = new ProcessBuilder();
        List<String> commandList = Lists.newArrayList();
        String javaHome = System.getProperty("java.home");
        if (javaHome == null) {
            throw new IllegalArgumentException("onlineZooKeeperServers: java.home is not set!");
        }
        commandList.add(javaHome + "/bin/java");
        String zkJavaOptsString = GiraphConstants.ZOOKEEPER_JAVA_OPTS.get(conf);
        String[] zkJavaOptsArray = zkJavaOptsString.split(" ");
        if (zkJavaOptsArray != null) {
            commandList.addAll(Arrays.asList(zkJavaOptsArray));
        }
        commandList.add("-cp");
        Path fullJarPath = new Path(conf.get(GiraphConstants.ZOOKEEPER_JAR));
        commandList.add(fullJarPath.toString());
        commandList.add(QuorumPeerMain.class.getName());
        commandList.add(configFilePath);
        processBuilder.command(commandList);
        File execDirectory = new File(zkDir);
        processBuilder.directory(execDirectory);
        processBuilder.redirectErrorStream(true);
        if (LOG.isInfoEnabled()) {
            LOG.info("onlineZooKeeperServers: Attempting to " + "start ZooKeeper server with command "
                    + commandList + " in directory " + execDirectory.toString());
        }
        try {
            synchronized (this) {
                zkProcess = processBuilder.start();
                zkProcessCollector = new StreamCollector(zkProcess.getInputStream());
                zkProcessCollector.start();
            }
            Runnable runnable = new Runnable() {
                public void run() {
                    LOG.info("run: Shutdown hook started.");
                    synchronized (this) {
                        if (zkProcess != null) {
                            LOG.warn("onlineZooKeeperServers: " + "Forced a shutdown hook kill of the "
                                    + "ZooKeeper process.");
                            zkProcess.destroy();
                            int exitCode = -1;
                            try {
                                exitCode = zkProcess.waitFor();
                            } catch (InterruptedException e) {
                                LOG.warn("run: Couldn't get exit code.");
                            }
                            LOG.info("onlineZooKeeperServers: ZooKeeper process exited " + "with " + exitCode
                                    + " (note that 143 " + "typically means killed).");
                        }
                    }
                }
            };
            Runtime.getRuntime().addShutdownHook(new Thread(runnable));
            LOG.info("onlineZooKeeperServers: Shutdown hook added.");
        } catch (IOException e) {
            LOG.error("onlineZooKeeperServers: Failed to start " + "ZooKeeper process", e);
            throw new RuntimeException(e);
        }

        // Once the server is up and running, notify that this server is up
        // and running by dropping a ready stamp.
        int connectAttempts = 0;
        final int maxConnectAttempts = conf.getZookeeperConnectionAttempts();
        while (connectAttempts < maxConnectAttempts) {
            try {
                if (LOG.isInfoEnabled()) {
                    LOG.info("onlineZooKeeperServers: Connect attempt " + connectAttempts + " of "
                            + maxConnectAttempts + " max trying to connect to " + myHostname + ":" + zkBasePort
                            + " with poll msecs = " + pollMsecs);
                }
                InetSocketAddress zkServerAddress = new InetSocketAddress(myHostname, zkBasePort);
                Socket testServerSock = new Socket();
                testServerSock.connect(zkServerAddress, 5000);
                if (LOG.isInfoEnabled()) {
                    LOG.info("onlineZooKeeperServers: Connected to " + zkServerAddress + "!");
                }
                break;
            } catch (SocketTimeoutException e) {
                LOG.warn("onlineZooKeeperServers: Got " + "SocketTimeoutException", e);
            } catch (ConnectException e) {
                LOG.warn("onlineZooKeeperServers: Got " + "ConnectException", e);
            } catch (IOException e) {
                LOG.warn("onlineZooKeeperServers: Got " + "IOException", e);
            }

            ++connectAttempts;
            try {
                Thread.sleep(pollMsecs);
            } catch (InterruptedException e) {
                LOG.warn("onlineZooKeeperServers: Sleep of " + pollMsecs + " interrupted - " + e.getMessage());
            }
        }
        if (connectAttempts == maxConnectAttempts) {
            throw new IllegalStateException(
                    "onlineZooKeeperServers: Failed to connect in " + connectAttempts + " tries!");
        }
        Path myReadyPath = new Path(serverDirectory, myHostname + HOSTNAME_TASK_SEPARATOR + taskPartition);
        try {
            if (LOG.isInfoEnabled()) {
                LOG.info("onlineZooKeeperServers: Creating my filestamp " + myReadyPath);
            }
            fs.createNewFile(myReadyPath);
        } catch (IOException e) {
            LOG.error("onlineZooKeeperServers: Failed (maybe previous " + "task failed) to create filestamp "
                    + myReadyPath, e);
        }
    } else {
        List<String> foundList = new ArrayList<String>();
        int readyRetrievalAttempt = 0;
        while (true) {
            try {
                FileStatus[] fileStatusArray = fs.listStatus(serverDirectory);
                foundList.clear();
                if ((fileStatusArray != null) && (fileStatusArray.length > 0)) {
                    for (int i = 0; i < fileStatusArray.length; ++i) {
                        String[] hostnameTaskArray = fileStatusArray[i].getPath().getName()
                                .split(HOSTNAME_TASK_SEPARATOR);
                        if (hostnameTaskArray.length != 2) {
                            throw new RuntimeException("getZooKeeperServerList: Task 0 failed " + "to parse "
                                    + fileStatusArray[i].getPath().getName());
                        }
                        foundList.add(hostnameTaskArray[0]);
                    }
                    if (LOG.isInfoEnabled()) {
                        LOG.info("onlineZooKeeperServers: Got " + foundList + " " + foundList.size()
                                + " hosts from " + fileStatusArray.length + " ready servers when " + serverCount
                                + " required (polling period is " + pollMsecs + ") on attempt "
                                + readyRetrievalAttempt);
                    }
                    if (foundList.containsAll(zkServerPortMap.keySet())) {
                        break;
                    }
                } else {
                    if (LOG.isInfoEnabled()) {
                        LOG.info("onlineZooKeeperSErvers: Empty " + "directory " + serverDirectory
                                + ", waiting " + pollMsecs + " msecs.");
                    }
                }
                Thread.sleep(pollMsecs);
                ++readyRetrievalAttempt;
            } catch (IOException e) {
                throw new RuntimeException(e);
            } catch (InterruptedException e) {
                LOG.warn("onlineZooKeeperServers: Strange interrupt from " + e.getMessage(), e);
            }
        }
    }
}

From source file:org.infinispan.server.test.configs.ExampleConfigsTest.java

private void verifyTopologyHinting(String container1, String container2, String container3, String manager,
        String cache) {/*from  ww  w .  j  a  v  a 2s . c  om*/
    RemoteInfinispanMBeans s1 = createRemotes(container1, manager, cache);
    RemoteInfinispanMBeans s2 = createRemotes(container2, manager, cache);
    RemoteInfinispanMBeans s3 = createRemotes(container3, manager, cache);

    RemoteCache<Object, Object> s1Cache = createCache(s1);
    RemoteCache<Object, Object> s2Cache = createCache(s2);
    RemoteCache<Object, Object> s3Cache = createCache(s3);

    assertEquals(3, s1.manager.getClusterSize());
    assertEquals(3, s2.manager.getClusterSize());
    assertEquals(3, s3.manager.getClusterSize());
    int total_elements = 0;
    s1Cache.clear();
    s2Cache.clear();
    s3Cache.clear();

    long s0Entries = 0;
    long s1Entries = 0;
    long s2Entries = 0;
    List<String> s1Bulk = new ArrayList<String>();
    List<String> s2Bulk = new ArrayList<String>();

    // By using topology information we divide our 3 nodes into 2 groups and generate enough elements so there
    // is at least 1 element in each group and at least 5 elements total,
    // and keep track of elements that went to server 2 and 3
    while (s0Entries == 0 || s1Entries == 0 || s2Entries == 0 || total_elements < 5) {
        s1Cache.put("machine" + total_elements, "machine");

        if (s1Entries + 1 == s2.cache.getNumberOfEntries()) {
            s1Bulk.add("machine" + total_elements);
        }
        if (s2Entries + 1 == s3.cache.getNumberOfEntries()) {
            s2Bulk.add("machine" + total_elements);
        }

        total_elements++;
        s1Entries = s2.cache.getNumberOfEntries();
        s2Entries = s3.cache.getNumberOfEntries();
        s0Entries = s1.cache.getNumberOfEntries();
        if (total_elements > 10)
            break; // in case something goes wrong - do not cycle forever
    }

    assertTrue("Unexpected number of entries in server1: " + s0Entries, s0Entries > 0);
    assertTrue("Unexpected number of entries in server2: " + s1Entries, s1Entries > 0);
    assertTrue(
            "Instead of " + total_elements * 2 + " total elements there were "
                    + (s0Entries + s1Entries + s2Entries),
            s0Entries + s1Entries + s2Entries == total_elements * 2);
    assertTrue("Server 1 elements are not contained in server 2", s2Bulk.containsAll(s1Bulk));

    // Now we remove the keys from server 2 therefore they should be removed from server 3 and that should imply
    // that server 3 and server 1 have the same elements
    for (String key : s1Bulk) {
        s2Cache.remove(key);
    }
    s0Entries = s1.cache.getNumberOfEntries();
    s1Entries = s2.cache.getNumberOfEntries();
    s2Entries = s3.cache.getNumberOfEntries();

    assertEquals("There were " + s1Entries + " left in the 2nd server", 0, s1Entries);
    assertEquals(s0Entries, s2Entries);
    assertNotEquals(s0Entries, s1Entries);
    assertEquals(s1Cache.getBulk(), s3Cache.getBulk());
}

From source file:org.opencb.opencga.storage.core.manager.variant.operations.VariantStatsStorageOperation.java

/**
 * Must provide a list of cohorts or a aggregation_mapping_properties file.
 * @param studyId   StudyId/*from w w  w  .  java2  s.co  m*/
 * @param aggregation Aggregation type for this study. {@link #getAggregation}
 * @param cohorts   List of cohorts
 * @param options   Options, where the aggregation mapping properties file will be
 * @param sessionId User's sessionId
 * @return          Checked list of cohorts
 * @throws CatalogException if an error on Catalog
 * @throws IOException if an IO error reading the aggregation map file (if any)
 */
protected List<Long> checkCohorts(long studyId, Aggregation aggregation, List<String> cohorts,
        QueryOptions options, String sessionId) throws CatalogException, IOException {
    List<Long> cohortIds;
    String userId = catalogManager.getUserManager().getId(sessionId);

    // Check aggregation mapping properties
    String tagMap = options.getString(Options.AGGREGATION_MAPPING_PROPERTIES.key());
    List<Long> cohortsByAggregationMapFile = Collections.emptyList();
    if (!isBlank(tagMap)) {
        if (!Aggregation.isAggregated(aggregation)) {
            throw nonAggregatedWithMappingFile();
        }
        cohortsByAggregationMapFile = createCohortsByAggregationMapFile(studyId, tagMap, sessionId);
    } else if (Aggregation.isAggregated(aggregation)) {
        if (aggregation.equals(Aggregation.BASIC)) {
            cohortsByAggregationMapFile = createCohortsIfNeeded(studyId,
                    Collections.singleton(StudyEntry.DEFAULT_COHORT), sessionId);
        } else {
            throw missingAggregationMappingFile(aggregation);
        }
    }

    if (cohorts == null || cohorts.isEmpty()) {
        // If no aggregation map file provided
        if (cohortsByAggregationMapFile.isEmpty()) {
            throw missingCohorts();
        } else {
            cohortIds = cohortsByAggregationMapFile;
        }
    } else {
        cohortIds = new ArrayList<>(cohorts.size());
        for (String cohort : cohorts) {
            if (!cohort.contains(":")) {
                cohort = studyId + ":" + cohort;
            }
            long cohortId = catalogManager.getCohortManager().getId(userId, cohort);
            if (cohortId < 0) {
                throw new CatalogException("Cohort '" + cohort + "' not found");
            }
            cohortIds.add(cohortId);
        }
        if (!cohortsByAggregationMapFile.isEmpty()) {
            if (cohortIds.size() != cohortsByAggregationMapFile.size()
                    || !cohortIds.containsAll(cohortsByAggregationMapFile)) {
                throw differentCohortsThanMappingFile();
            }
        }
    }
    return cohortIds;
}

From source file:org.kuali.rice.kew.server.WorkflowUtilityTest.java

@Test
public void testGetPrincipalIdsInRouteLog() throws Exception {
    Set<String> NonSITMembers = new HashSet<String>(Arrays.asList(new String[] { getPrincipalIdForName("user1"),
            getPrincipalIdForName("user2"), getPrincipalIdForName("user3"), getPrincipalIdForName("dewey") }));

    Set<String> WorkflowAdminMembers = new HashSet<String>(
            Arrays.asList(new String[] { getPrincipalIdForName("ewestfal"), getPrincipalIdForName("rkirkend"),
                    getPrincipalIdForName("jhopf"), getPrincipalIdForName("bmcgough"),
                    getPrincipalIdForName("shenl"), getPrincipalIdForName("quickstart") }));

    WorkflowDocument document = WorkflowDocumentFactory.createDocument(getPrincipalIdForName("rkirkend"),
            RouteLogTestSetup.DOCUMENT_TYPE_NAME);
    document.route("");

    WorkflowDocumentActionsService wdas = KewApiServiceLocator.getWorkflowDocumentActionsService();
    // just look at the current node
    List<String> principalIds = wdas.getPrincipalIdsInRouteLog(document.getDocumentId(), false);
    // should contain ewestfal and NonSIT group members
    assertTrue(principalIds.contains(getPrincipalIdForName("ewestfal")));
    assertTrue(principalIds.containsAll(NonSITMembers));

    // should NOT contain jitrue and WorkflowAdmin group members as they are in the rule for the future node
    assertFalse(principalIds.contains(getPrincipalIdForName("jitrue")));
    assertFalse(principalIds.containsAll(WorkflowAdminMembers));

    // this time look at future nodes too
    principalIds = wdas.getPrincipalIdsInRouteLog(document.getDocumentId(), true);

    // should contain ewestfal and NonSIT group members
    assertTrue(principalIds.contains(getPrincipalIdForName("ewestfal")));
    assertTrue(principalIds.containsAll(NonSITMembers));

    // should also contain jitrue and WorkflowAdmin group members
    assertTrue(principalIds.contains(getPrincipalIdForName("jitrue")));
    assertTrue(principalIds.containsAll(WorkflowAdminMembers));
}

From source file:com.tasktop.c2c.server.profile.tests.service.BaseProfileServiceTest.java

@Test
public void testGetProfileProjects() throws EntityNotFoundException {
    Profile profile = createMockProfile(entityManager);
    List<Project> projects = MockProjectFactory.create(entityManager, 5);
    for (Project project : projects) {
        entityManager.persist(project.addProfile(profile));
    }/*from  w  ww  . ja v a2  s  . co m*/

    List<Project> foundProjects = profileService.getProfileProjects(profile.getId());
    assertNotNull(foundProjects);
    assertTrue(projects.containsAll(foundProjects));
    assertTrue(foundProjects.containsAll(projects));
    for (Project project : foundProjects) {
        assertNotNull(project.getId());
    }
}