Example usage for java.util Map forEach

List of usage examples for java.util Map forEach

Introduction

In this page you can find the example usage for java.util Map forEach.

Prototype

default void forEach(BiConsumer<? super K, ? super V> action) 

Source Link

Document

Performs the given action for each entry in this map until all entries have been processed or the action throws an exception.

Usage

From source file:org.nanoframework.orm.jedis.sharded.RedisClientImpl.java

@Override
public Map<String, Boolean> push(final String key, final Map<String, Object> scanMap, final Mark push,
        final Mark policy) {
    Assert.hasText(key);/*  www.ja v a2  s  .co m*/
    Assert.notEmpty(scanMap);
    Assert.notNull(push);
    Assert.notNull(policy);

    ShardedJedis jedis = null;
    try {
        jedis = POOL.getJedis(config.getRedisType());
        final ShardedJedisPipeline pipeline = jedis.pipelined();
        final Map<String, Response<String>> okResponses = Maps.newHashMap();
        for (Entry<String, Object> entry : scanMap.entrySet()) {
            switch (push) {
            case LPUSH:
                switch (policy) {
                case KEY:
                    pipeline.lpush(key, entry.getKey());
                    break;
                case VALUE:
                    pipeline.lpush(key, toJSONString(entry.getValue()));
                    break;
                default:
                    throw new RedisClientException("(policy)");
                }

                break;
            case RPUSH:
                switch (policy) {
                case KEY:
                    pipeline.rpush(key, entry.getKey());
                    break;
                case VALUE:
                    pipeline.rpush(key, toJSONString(entry.getValue()));
                    break;
                default:
                    throw new RedisClientException("(policy)");
                }

                break;
            default:
                throw new RedisClientException("(PUSH)");
            }

            okResponses.put(entry.getKey(), pipeline.set(entry.getKey(), toJSONString(entry.getValue())));
        }

        pipeline.sync();
        final Map<String, Boolean> values = Maps.newHashMap();
        okResponses.forEach((scanKey, okResponse) -> values.put(scanKey, isOK(okResponse.get())));
        return values;
    } catch (final Throwable e) {
        throw new RedisClientException(e.getMessage(), e);
    } finally {
        POOL.close(jedis);
    }
}

From source file:org.apache.samza.execution.JobNode.java

/**
 * Serializes the {@link Serde} instances for operators, adds them to the provided config, and
 * sets the serde configuration for the input/output/intermediate streams appropriately.
 *
 * We try to preserve the number of Serde instances before and after serialization. However we don't
 * guarantee that references shared between these serdes instances (e.g. an Jackson ObjectMapper shared
 * between two json serdes) are shared after deserialization too.
 *
 * Ideally all the user defined objects in the application should be serialized and de-serialized in one pass
 * from the same output/input stream so that we can maintain reference sharing relationships.
 *
 * @param configs the configs to add serialized serde instances and stream serde configs to
 *//*from  www. j ava  2  s  .co  m*/
void addSerdeConfigs(Map<String, String> configs) {
    // collect all key and msg serde instances for streams
    Map<String, Serde> streamKeySerdes = new HashMap<>();
    Map<String, Serde> streamMsgSerdes = new HashMap<>();
    Map<StreamSpec, InputOperatorSpec> inputOperators = streamGraph.getInputOperators();
    inEdges.forEach(edge -> {
        String streamId = edge.getStreamSpec().getId();
        InputOperatorSpec inputOperatorSpec = inputOperators.get(edge.getStreamSpec());
        streamKeySerdes.put(streamId, inputOperatorSpec.getKeySerde());
        streamMsgSerdes.put(streamId, inputOperatorSpec.getValueSerde());
    });
    Map<StreamSpec, OutputStreamImpl> outputStreams = streamGraph.getOutputStreams();
    outEdges.forEach(edge -> {
        String streamId = edge.getStreamSpec().getId();
        OutputStreamImpl outputStream = outputStreams.get(edge.getStreamSpec());
        streamKeySerdes.put(streamId, outputStream.getKeySerde());
        streamMsgSerdes.put(streamId, outputStream.getValueSerde());
    });

    // collect all key and msg serde instances for stores
    Map<String, Serde> storeKeySerdes = new HashMap<>();
    Map<String, Serde> storeMsgSerdes = new HashMap<>();
    streamGraph.getAllOperatorSpecs().forEach(opSpec -> {
        if (opSpec instanceof StatefulOperatorSpec) {
            ((StatefulOperatorSpec) opSpec).getStoreDescriptors().forEach(storeDescriptor -> {
                storeKeySerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getKeySerde());
                storeMsgSerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getMsgSerde());
            });
        }
    });

    // collect all key and msg serde instances for tables
    Map<String, Serde> tableKeySerdes = new HashMap<>();
    Map<String, Serde> tableValueSerdes = new HashMap<>();
    tables.forEach(tableSpec -> {
        tableKeySerdes.put(tableSpec.getId(), tableSpec.getSerde().getKeySerde());
        tableValueSerdes.put(tableSpec.getId(), tableSpec.getSerde().getValueSerde());
    });

    // for each unique stream or store serde instance, generate a unique name and serialize to config
    HashSet<Serde> serdes = new HashSet<>(streamKeySerdes.values());
    serdes.addAll(streamMsgSerdes.values());
    serdes.addAll(storeKeySerdes.values());
    serdes.addAll(storeMsgSerdes.values());
    serdes.addAll(tableKeySerdes.values());
    serdes.addAll(tableValueSerdes.values());
    SerializableSerde<Serde> serializableSerde = new SerializableSerde<>();
    Base64.Encoder base64Encoder = Base64.getEncoder();
    Map<Serde, String> serdeUUIDs = new HashMap<>();
    serdes.forEach(serde -> {
        String serdeName = serdeUUIDs.computeIfAbsent(serde,
                s -> serde.getClass().getSimpleName() + "-" + UUID.randomUUID().toString());
        configs.putIfAbsent(String.format(SerializerConfig.SERDE_SERIALIZED_INSTANCE(), serdeName),
                base64Encoder.encodeToString(serializableSerde.toBytes(serde)));
    });

    // set key and msg serdes for streams to the serde names generated above
    streamKeySerdes.forEach((streamId, serde) -> {
        String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX(), streamId);
        String keySerdeConfigKey = streamIdPrefix + StreamConfig.KEY_SERDE();
        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
    });

    streamMsgSerdes.forEach((streamId, serde) -> {
        String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX(), streamId);
        String valueSerdeConfigKey = streamIdPrefix + StreamConfig.MSG_SERDE();
        configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
    });

    // set key and msg serdes for stores to the serde names generated above
    storeKeySerdes.forEach((storeName, serde) -> {
        String keySerdeConfigKey = String.format(StorageConfig.KEY_SERDE(), storeName);
        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
    });

    storeMsgSerdes.forEach((storeName, serde) -> {
        String msgSerdeConfigKey = String.format(StorageConfig.MSG_SERDE(), storeName);
        configs.put(msgSerdeConfigKey, serdeUUIDs.get(serde));
    });

    // set key and msg serdes for tables to the serde names generated above
    tableKeySerdes.forEach((tableId, serde) -> {
        String keySerdeConfigKey = String.format(JavaTableConfig.TABLE_KEY_SERDE, tableId);
        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
    });

    tableValueSerdes.forEach((tableId, serde) -> {
        String valueSerdeConfigKey = String.format(JavaTableConfig.TABLE_VALUE_SERDE, tableId);
        configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
    });
}

From source file:org.wso2.carbon.identity.oauth.endpoint.token.OAuth2TokenEndpointTest.java

@Test(dataProvider = "testIssueAccessTokenDataProvider", groups = "testWithConnection")
public void testIssueAccessToken(String clientId, String authzHeader, Object paramMapObj, String grantType,
        String idToken, Object headerObj, Object customResponseParamObj, Exception e, int expectedStatus,
        String expectedErrorCode) throws Exception {
    MultivaluedMap<String, String> paramMap = (MultivaluedMap<String, String>) paramMapObj;
    ResponseHeader[] responseHeaders = (ResponseHeader[]) headerObj;
    Map<String, String> customResponseParameters = (Map<String, String>) customResponseParamObj;

    Map<String, String[]> requestParams = new HashMap<>();

    if (clientId != null) {
        requestParams.put(OAuth.OAUTH_CLIENT_ID, clientId.split(","));
    }/* w w  w  . j a v a 2 s .  c  om*/
    requestParams.put(OAuth.OAUTH_GRANT_TYPE, new String[] { grantType });
    requestParams.put(OAuth.OAUTH_SCOPE, new String[] { "scope1" });
    requestParams.put(OAuth.OAUTH_REDIRECT_URI, new String[] { APP_REDIRECT_URL });
    requestParams.put(OAuth.OAUTH_USERNAME, new String[] { USERNAME });
    requestParams.put(OAuth.OAUTH_PASSWORD, new String[] { "password" });

    HttpServletRequest request = mockHttpRequest(requestParams, new HashMap<String, Object>());
    when(request.getHeader(OAuthConstants.HTTP_REQ_HEADER_AUTHZ)).thenReturn(authzHeader);
    when(request.getHeaderNames()).thenReturn(Collections.enumeration(new ArrayList<String>() {
        {
            add(OAuthConstants.HTTP_REQ_HEADER_AUTHZ);
        }
    }));

    spy(EndpointUtil.class);
    doReturn(REALM).when(EndpointUtil.class, "getRealmInfo");
    doReturn(oAuth2Service).when(EndpointUtil.class, "getOAuth2Service");

    when(oAuth2Service.issueAccessToken(any(OAuth2AccessTokenReqDTO.class)))
            .thenReturn(oAuth2AccessTokenRespDTO);
    when(oAuth2AccessTokenRespDTO.getAccessToken()).thenReturn(ACCESS_TOKEN);
    when(oAuth2AccessTokenRespDTO.getRefreshToken()).thenReturn(REFRESH_TOKEN);
    when(oAuth2AccessTokenRespDTO.getExpiresIn()).thenReturn(3600L);
    when(oAuth2AccessTokenRespDTO.getAuthorizedScopes()).thenReturn("scope1");
    when(oAuth2AccessTokenRespDTO.getIDToken()).thenReturn(idToken);
    when(oAuth2AccessTokenRespDTO.getResponseHeaders()).thenReturn(responseHeaders);
    when(oAuth2AccessTokenRespDTO.getParameters()).thenReturn(customResponseParameters);

    mockOAuthServerConfiguration();
    mockStatic(IdentityDatabaseUtil.class);
    when(IdentityDatabaseUtil.getDBConnection()).thenReturn(connection);

    Map<String, Class<? extends OAuthValidator<HttpServletRequest>>> grantTypeValidators = new Hashtable<>();
    grantTypeValidators.put(GrantType.PASSWORD.toString(), PasswordValidator.class);

    when(oAuthServerConfiguration.getSupportedGrantTypeValidators()).thenReturn(grantTypeValidators);
    when(oAuth2Service.getOauthApplicationState(CLIENT_ID_VALUE)).thenReturn("ACTIVE");

    Response response;
    try {
        response = oAuth2TokenEndpoint.issueAccessToken(request, paramMap);
    } catch (InvalidRequestParentException ire) {
        InvalidRequestExceptionMapper invalidRequestExceptionMapper = new InvalidRequestExceptionMapper();
        response = invalidRequestExceptionMapper.toResponse(ire);
    }

    assertNotNull(response, "Token response is null");
    assertEquals(response.getStatus(), expectedStatus, "Unexpected HTTP response status");

    assertNotNull(response.getEntity(), "Response entity is null");

    final String responseBody = response.getEntity().toString();
    if (customResponseParameters != null) {
        customResponseParameters
                .forEach((key, value) -> assertTrue(responseBody.contains(key) && responseBody.contains(value),
                        "Expected custom response parameter: " + key + " not found in token response."));
    }

    if (expectedErrorCode != null) {
        assertTrue(responseBody.contains(expectedErrorCode), "Expected error code not found");
    } else if (HttpServletResponse.SC_OK == expectedStatus) {
        assertTrue(responseBody.contains(ACCESS_TOKEN), "Successful response should contain access token");
    }
}

From source file:com.baidu.rigel.biplatform.ma.report.utils.QueryUtils.java

/**
 * ??/*w  w  w .  j  a v a  2  s .co m*/
 * 
 * @param reportModel
 *            
 * @param area
 *            
 * @return 
 * @throws QueryModelBuildException
 */
public static Cube getCubeWithExtendArea(ReportDesignModel reportModel, ExtendArea area)
        throws QueryModelBuildException {
    Cube oriCube = getCubeFromReportModel(reportModel, area);
    Map<String, List<Dimension>> filterDims = collectFilterDim(reportModel);
    MiniCube cube = new MiniCube(area.getCubeId());
    String areaId = area.getId();
    LogicModel logicModel = area.getLogicModel();
    if (area.getType() == ExtendAreaType.SELECTION_AREA || area.getType() == ExtendAreaType.LITEOLAP_CHART
            || area.getType() == ExtendAreaType.LITEOLAP_TABLE) {
        LiteOlapExtendArea liteOlapArea = (LiteOlapExtendArea) reportModel
                .getExtendById(area.getReferenceAreaId());
        logicModel = liteOlapArea.getLogicModel();
        areaId = area.getReferenceAreaId();
    }
    if (logicModel == null) {
        throw new QueryModelBuildException("logic model is empty");
    }
    Item[] items = logicModel.getItems(area.getType() != ExtendAreaType.TABLE);
    Map<String, Dimension> dimensions = new HashMap<String, Dimension>();
    Map<String, Measure> measures = new HashMap<String, Measure>();

    for (Item item : items) {
        OlapElement olapElement = oriCube.getDimensions().get(item.getOlapElementId());
        if (olapElement == null) { // ???
            olapElement = oriCube.getMeasures().get(item.getOlapElementId());
            if (olapElement != null) {
                Measure measure = (Measure) olapElement;
                measures.put(measure.getName(), measure);
            }
        } else {
            MiniCubeDimension dim = (MiniCubeDimension) DeepcopyUtils.deepCopy(olapElement);
            dim.setLevels(Maps.newLinkedHashMap());
            ;
            ((Dimension) olapElement).getLevels().values().forEach(level -> {
                level.setDimension(dim);
                dim.getLevels().put(level.getName(), level);
            });
            dimensions.put(dim.getName(), dim);
        }
    }
    if (area.getType() == ExtendAreaType.LITEOLAP) {
        /**
         * TODO liteOlapitems?
         */
        Map<String, Item> candDims = ((LiteOlapExtendArea) area).getCandDims();
        for (String elementId : candDims.keySet()) {
            OlapElement element = ReportDesignModelUtils.getDimOrIndDefineWithId(reportModel.getSchema(),
                    area.getCubeId(), elementId);
            MiniCubeDimension dim = (MiniCubeDimension) DeepcopyUtils.deepCopy(element);
            dim.setLevels(Maps.newLinkedHashMap());
            ((Dimension) element).getLevels().values().forEach(level -> {
                level.setDimension(dim);
                dim.getLevels().put(level.getName(), level);
            });
            dimensions.put(element.getName(), (Dimension) element);
        }
        Map<String, Item> candInds = ((LiteOlapExtendArea) area).getCandInds();
        for (String elementId : candInds.keySet()) {
            OlapElement element = ReportDesignModelUtils.getDimOrIndDefineWithId(reportModel.getSchema(),
                    area.getCubeId(), elementId);
            if (element instanceof CallbackMeasure) {
                CallbackMeasure m = DeepcopyUtils.deepCopy((CallbackMeasure) element);
                String url = ((CallbackMeasure) element).getCallbackUrl();
                m.setCallbackUrl(HttpUrlUtils.getBaseUrl(url));
                m.setCallbackParams(HttpUrlUtils.getParams(url));
                measures.put(m.getName(), m);
            } else {
                measures.put(element.getName(), (Measure) element);
            }
        }
    }
    if (filterDims != null) { // && filterDims.get(area.getCubeId()) != null) {
        List<Dimension> dims = filterDims.get(area.getCubeId());
        if (dims != null) {
            for (Dimension dim : dims) {
                if (dim != null) {
                    dimensions.put(dim.getName(), dim);
                }
            }
        }

        // TODO ???cube??
        filterDims.forEach((key, dimArray) -> {
            if (key != null && !key.equals(area.getCubeId())) {
                dimArray.stream().filter(dim -> {
                    return dim instanceof TimeDimension;
                }).forEach(dim -> {
                    for (Dimension tmp : oriCube.getDimensions().values()) {
                        if (dim.getName().equals(tmp.getName())) {
                            MiniCubeDimension tmpDim = (MiniCubeDimension) DeepcopyUtils.deepCopy(dim);
                            tmpDim.setLevels((LinkedHashMap<String, Level>) tmp.getLevels());
                            tmpDim.setFacttableColumn(tmp.getFacttableColumn());
                            tmpDim.setFacttableCaption(tmp.getFacttableCaption());
                            dimensions.put(tmpDim.getName(), tmpDim);
                        }
                    }
                });
            }
        });
    }
    cube.setDimensions(dimensions);
    modifyMeasures(measures, oriCube);
    cube.setMeasures(measures);
    cube.setSource(((MiniCube) oriCube).getSource());
    cube.setPrimaryKey(((MiniCube) oriCube).getPrimaryKey());
    cube.setId(oriCube.getId() + "_" + areaId);
    return cube;
}

From source file:org.ballerinalang.bre.bvm.BLangVM.java

private boolean invokeJoinWorkers(Map<String, BLangVMWorkers.WorkerExecutor> workers,
        Set<String> joinWorkerNames, int joinCount, long timeout) {
    ExecutorService exec = ThreadPoolFactory.getInstance().getWorkerExecutor();
    Semaphore resultCounter = new Semaphore(-joinCount + 1);
    workers.forEach((k, v) -> {
        if (joinWorkerNames.contains(k)) {
            v.setResultCounterSemaphore(resultCounter);
        }//from   w  w w .j a va  2 s .  c  om
        exec.submit(v);
    });
    try {
        return resultCounter.tryAcquire(timeout, TimeUnit.SECONDS);
    } catch (InterruptedException ignore) {
        return false;
    }
}

From source file:org.apache.hadoop.hive.ql.parse.TestReplicationScenarios.java

static void internalBeforeClassSetup(Map<String, String> additionalProperties, boolean forMigration)
        throws Exception {
    hconf = new HiveConf(TestReplicationScenarios.class);
    String metastoreUri = System.getProperty("test." + MetastoreConf.ConfVars.THRIFT_URIS.getHiveName());
    if (metastoreUri != null) {
        hconf.set(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), metastoreUri);
        return;/*  ww w .  j  a  v  a  2s  . c om*/
    }
    isMigrationTest = forMigration;

    hconf.set(MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS.getHiveName(), DBNOTIF_LISTENER_CLASSNAME); // turn on db notification listener on metastore
    hconf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true);
    hconf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true);
    hconf.setVar(HiveConf.ConfVars.REPLCMDIR, TEST_PATH + "/cmroot/");
    proxySettingName = "hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts";
    hconf.set(proxySettingName, "*");
    hconf.setVar(HiveConf.ConfVars.REPLDIR, TEST_PATH + "/hrepl/");
    hconf.set(MetastoreConf.ConfVars.THRIFT_CONNECTION_RETRIES.getHiveName(), "3");
    hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hconf.set(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
    hconf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true);
    hconf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    hconf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager");
    hconf.set(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname,
            "org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore");
    hconf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true);
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");

    additionalProperties.forEach((key, value) -> {
        hconf.set(key, value);
    });

    MetaStoreTestUtils.startMetaStoreWithRetry(hconf);

    Path testPath = new Path(TEST_PATH);
    FileSystem fs = FileSystem.get(testPath.toUri(), hconf);
    fs.mkdirs(testPath);
    driver = DriverFactory.newDriver(hconf);
    SessionState.start(new CliSessionState(hconf));
    metaStoreClient = new HiveMetaStoreClient(hconf);

    FileUtils.deleteDirectory(new File("metastore_db2"));
    HiveConf hconfMirrorServer = new HiveConf();
    hconfMirrorServer.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname,
            "jdbc:derby:;databaseName=metastore_db2;create=true");
    MetaStoreTestUtils.startMetaStoreWithRetry(hconfMirrorServer);
    hconfMirror = new HiveConf(hconf);
    String thriftUri = MetastoreConf.getVar(hconfMirrorServer, MetastoreConf.ConfVars.THRIFT_URIS);
    MetastoreConf.setVar(hconfMirror, MetastoreConf.ConfVars.THRIFT_URIS, thriftUri);

    if (forMigration) {
        hconfMirror.setBoolVar(HiveConf.ConfVars.HIVE_STRICT_MANAGED_TABLES, true);
        hconfMirror.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true);
        hconfMirror.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname,
                "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
    }
    driverMirror = DriverFactory.newDriver(hconfMirror);
    metaStoreClientMirror = new HiveMetaStoreClient(hconfMirror);

    PersistenceManagerProvider.setTwoMetastoreTesting(true);
}

From source file:com.google.pubsub.flic.controllers.GCEController.java

/**
 * Instantiates the load test on Google Compute Engine.
 *//*from w ww.  j a  v  a  2  s  . c o m*/
private GCEController(String projectName, Map<String, Map<ClientParams, Integer>> types,
        ScheduledExecutorService executor, Storage storage, Compute compute, Pubsub pubsub) throws Throwable {
    super(executor);
    this.projectName = projectName;
    this.types = types;
    this.storage = storage;
    this.compute = compute;

    // For each unique type of CPS Publisher, create a Topic if it does not already exist, and then
    // delete and recreate any subscriptions attached to it so that we do not have backlog from
    // previous runs.
    List<SettableFuture<Void>> pubsubFutures = new ArrayList<>();
    types.values().forEach(paramsMap -> {
        paramsMap.keySet().stream().map(p -> p.getClientType()).distinct().filter(ClientType::isCpsPublisher)
                .forEach(clientType -> {
                    SettableFuture<Void> pubsubFuture = SettableFuture.create();
                    pubsubFutures.add(pubsubFuture);
                    executor.execute(() -> {
                        String topic = Client.TOPIC_PREFIX + Client.getTopicSuffix(clientType);
                        try {
                            pubsub.projects().topics()
                                    .create("projects/" + projectName + "/topics/" + topic, new Topic())
                                    .execute();
                        } catch (GoogleJsonResponseException e) {
                            if (e.getStatusCode() != ALREADY_EXISTS) {
                                pubsubFuture.setException(e);
                                return;
                            }
                            log.info("Topic already exists, reusing.");
                        } catch (IOException e) {
                            pubsubFuture.setException(e);
                            return;
                        }
                        // Recreate each subscription attached to the topic.
                        paramsMap.keySet().stream()
                                .filter(p -> p.getClientType() == clientType.getSubscriberType())
                                .map(p -> p.subscription).forEach(subscription -> {
                                    try {
                                        pubsub.projects().subscriptions().delete(
                                                "projects/" + projectName + "/subscriptions/" + subscription)
                                                .execute();
                                    } catch (IOException e) {
                                        log.debug(
                                                "Error deleting subscription, assuming it has not yet been created.",
                                                e);
                                    }
                                    try {
                                        pubsub.projects().subscriptions().create(
                                                "projects/" + projectName + "/subscriptions/" + subscription,
                                                new Subscription()
                                                        .setTopic(
                                                                "projects/" + projectName + "/topics/" + topic)
                                                        .setAckDeadlineSeconds(10))
                                                .execute();
                                    } catch (IOException e) {
                                        pubsubFuture.setException(e);
                                    }
                                });
                        pubsubFuture.set(null);
                    });
                });
    });
    try {
        createStorageBucket();
        createFirewall();

        List<SettableFuture<Void>> filesRemaining = new ArrayList<>();
        Files.walk(Paths.get(resourceDirectory)).filter(Files::isRegularFile).forEach(filePath -> {
            SettableFuture<Void> fileRemaining = SettableFuture.create();
            filesRemaining.add(fileRemaining);
            executor.execute(() -> {
                try {
                    uploadFile(filePath);
                    fileRemaining.set(null);
                } catch (Exception e) {
                    fileRemaining.setException(e);
                }
            });
        });
        List<SettableFuture<Void>> createGroupFutures = new ArrayList<>();
        types.forEach((zone, paramsMap) -> paramsMap.forEach((param, n) -> {
            SettableFuture<Void> createGroupFuture = SettableFuture.create();
            createGroupFutures.add(createGroupFuture);
            executor.execute(() -> {
                try {
                    createManagedInstanceGroup(zone, param.getClientType());
                    createGroupFuture.set(null);
                } catch (Exception e) {
                    createGroupFuture.setException(e);
                }
            });
        }));

        // Wait for files and instance groups to be created.
        Futures.allAsList(pubsubFutures).get();
        log.info("Pub/Sub actions completed.");
        Futures.allAsList(filesRemaining).get();
        log.info("File uploads completed.");
        Futures.allAsList(createGroupFutures).get();
        log.info("Instance group creation completed.");

        // Everything is set up, let's start our instances
        log.info("Starting instances.");
        List<SettableFuture<Void>> resizingFutures = new ArrayList<>();
        types.forEach((zone, paramsMap) -> paramsMap.forEach((type, n) -> {
            SettableFuture<Void> resizingFuture = SettableFuture.create();
            resizingFutures.add(resizingFuture);
            executor.execute(() -> {
                try {
                    startInstances(zone, type.getClientType(), n);
                    resizingFuture.set(null);
                } catch (Exception e) {
                    resizingFuture.setException(e);
                }
            });
        }));
        Futures.allAsList(resizingFutures).get();

        // We wait for all instances to finish starting, and get the external network address of each
        // newly created instance.
        List<SettableFuture<Void>> startFutures = new ArrayList<>();
        for (String zone : types.keySet()) {
            Map<ClientParams, Integer> paramsMap = types.get(zone);
            for (ClientParams type : paramsMap.keySet()) {
                SettableFuture<Void> startFuture = SettableFuture.create();
                startFutures.add(startFuture);
                executor.execute(() -> {
                    int numErrors = 0;
                    while (true) {
                        try {
                            addInstanceGroupInfo(zone, type);
                            startFuture.set(null);
                            return;
                        } catch (IOException e) {
                            numErrors++;
                            if (numErrors > 3) {
                                startFuture.setException(new Exception("Failed to get instance information."));
                                return;
                            }
                            log.error("Transient error getting status for instance group, continuing", e);
                        }
                    }
                });
            }
        }

        Futures.allAsList(startFutures).get();
        log.info("Successfully started all instances.");
    } catch (ExecutionException e) {
        shutdown(e.getCause());
        throw e.getCause();
    } catch (Exception e) {
        shutdown(e);
        throw e;
    }
}

From source file:com.streamsets.pipeline.stage.origin.jdbc.cdc.oracle.OracleCDCSource.java

private Record generateRecord(String sql, Map<String, String> attributes, int operationCode)
        throws UnparseableSQLException, StageException {
    String operation;/*from  w  w w  .  j a va2  s.  com*/
    SchemaAndTable table = new SchemaAndTable(attributes.get(SCHEMA), attributes.get(TABLE));
    int sdcOperationType = ParseUtil.getOperation(operationCode);
    operation = OperationType.getLabelFromIntCode(sdcOperationType);
    attributes.put(OperationType.SDC_OPERATION_TYPE, String.valueOf(sdcOperationType));
    attributes.put(OPERATION, operation);
    String id = useLocalBuffering ? attributes.get(RS_ID) + OFFSET_DELIM + attributes.get(SSN)
            : attributes.get(SCN) + OFFSET_DELIM + attributes.get(SEQ);
    Record record = getContext().createRecord(id);
    if (configBean.parseQuery) {
        Map<String, String> columns;
        if (configBean.useNewParser) {
            Set<String> columnsExpected = null;
            if (configBean.allowNulls && table.isNotEmpty()) {
                columnsExpected = tableSchemas.get(table).keySet();
            }
            columns = SQLParserUtils.process(sqlParser.get(), sql, operationCode, configBean.allowNulls,
                    configBean.baseConfigBean.caseSensitive, columnsExpected);
        } else {
            // Walk it and attach our sqlListener
            sqlListener.get().reset();
            if (configBean.baseConfigBean.caseSensitive) {
                sqlListener.get().setCaseSensitive();
            }

            if (configBean.allowNulls) {
                sqlListener.get().allowNulls();
            }
            if (configBean.allowNulls && table.isNotEmpty()) {
                sqlListener.get().setColumns(tableSchemas.get(table).keySet());
            }

            parseTreeWalker.get().walk(sqlListener.get(), ParseUtil.getParserRuleContext(sql, operationCode));
            columns = sqlListener.get().getColumns();
        }

        String rowId = columns.get(ROWID);
        columns.remove(ROWID);
        if (rowId != null) {
            attributes.put(ROWID_KEY, rowId);
        }
        Map<String, Field> fields = new HashMap<>();

        List<UnsupportedFieldTypeException> fieldTypeExceptions = new ArrayList<>();
        for (Map.Entry<String, String> column : columns.entrySet()) {
            String columnName = column.getKey();
            Field createdField = null;
            try {
                createdField = objectToField(table, columnName, column.getValue());
            } catch (UnsupportedFieldTypeException ex) {
                LOG.error("Unsupported field type exception", ex);
                if (configBean.sendUnsupportedFields) {
                    createdField = Field.create(column.getValue());
                }
                fieldTypeExceptions.add(ex);
            }
            if (createdField != null) {
                if (decimalColumns.containsKey(table) && decimalColumns.get(table).containsKey(columnName)) {
                    String precision = String.valueOf(decimalColumns.get(table).get(columnName).precision);
                    String scale = String.valueOf(decimalColumns.get(table).get(columnName).scale);
                    attributes.put("jdbc." + columnName + ".precision", precision);
                    attributes.put("jdbc." + columnName + ".scale", scale);
                    createdField.setAttribute(HeaderAttributeConstants.ATTR_PRECISION, precision);
                    createdField.setAttribute(HeaderAttributeConstants.ATTR_SCALE, scale);
                }
                fields.put(columnName, createdField);
            }
        }
        record.set(Field.create(fields));
        attributes.forEach((k, v) -> record.getHeader().setAttribute(k, v));

        Joiner errorStringJoiner = Joiner.on(", ");
        List<String> errorColumns = Collections.emptyList();
        if (!fieldTypeExceptions.isEmpty()) {
            errorColumns = fieldTypeExceptions.stream().map(ex -> {
                String fieldTypeName = JDBCTypeNames.getOrDefault(ex.fieldType, "unknown");
                return "[Column = '" + ex.column + "', Type = '" + fieldTypeName + "', Value = '" + ex.columnVal
                        + "']";
            }).collect(Collectors.toList());
        }
        if (!fieldTypeExceptions.isEmpty()) {
            boolean add = handleUnsupportedFieldTypes(record, errorStringJoiner.join(errorColumns));
            if (add) {
                return record;
            } else {
                return null;
            }
        } else {
            return record;
        }
    } else {
        attributes.forEach((k, v) -> record.getHeader().setAttribute(k, v));
        Map<String, Field> fields = new HashMap<>();
        fields.put("sql", Field.create(sql));
        record.set(Field.create(fields));
        return record;
    }

}

From source file:sg.ncl.MainController.java

@RequestMapping("/admin/nodesStatus")
public String adminNodesStatus(Model model, HttpSession session) throws IOException {

    if (!validateIfAdmin(session)) {
        return NO_PERMISSION_PAGE;
    }//from w w w .ja  v a 2 s  .  c  om

    // get number of active users and running experiments
    Map<String, String> testbedStatsMap = getTestbedStats();
    testbedStatsMap.put(USER_DASHBOARD_FREE_NODES, "0");
    testbedStatsMap.put(USER_DASHBOARD_TOTAL_NODES, "0");

    Map<String, List<Map<String, String>>> nodesStatus = getNodesStatus();
    Map<String, Map<String, Long>> nodesStatusCount = new HashMap<>();

    List<Map<String, String>> allNodes = new ArrayList<>();
    nodesStatus.forEach((key, value) -> allNodes.addAll(value));
    Map<String, List<Map<String, String>>> allNodesStatus = new HashMap<>();
    allNodesStatus.put("All Servers", allNodes);
    countNodeStatus(testbedStatsMap, allNodesStatus, nodesStatusCount);

    model.addAttribute("nodesStatus", allNodesStatus);
    model.addAttribute("nodesStatusCount", nodesStatusCount);

    model.addAttribute(USER_DASHBOARD_LOGGED_IN_USERS_COUNT,
            testbedStatsMap.get(USER_DASHBOARD_LOGGED_IN_USERS_COUNT));
    model.addAttribute(USER_DASHBOARD_RUNNING_EXPERIMENTS_COUNT,
            testbedStatsMap.get(USER_DASHBOARD_RUNNING_EXPERIMENTS_COUNT));
    model.addAttribute(USER_DASHBOARD_FREE_NODES, testbedStatsMap.get(USER_DASHBOARD_FREE_NODES));
    model.addAttribute(USER_DASHBOARD_TOTAL_NODES, testbedStatsMap.get(USER_DASHBOARD_TOTAL_NODES));
    return "node_status";
}

From source file:org.openecomp.sdc.be.model.operations.impl.ComponentInstanceOperation.java

private StorageOperationStatus setCompInstCapabilitiesFromGraph(
        Map<String, Map<String, CapabilityDefinition>> resourcesCapabilities, Component component,
        NodeTypeEnum compInstType, ComponentInstance resourceInstance, List<String> respourceDerivedList) {

    StorageOperationStatus status;//from   ww w. ja  va  2 s  .c o m
    ComponentOperation componentOperation = getComponentOperation(compInstType);
    Either<Map<String, List<CapabilityDefinition>>, TitanOperationStatus> eitherCapabilities = componentOperation
            .getCapabilities(component, compInstType, true);
    if (eitherCapabilities.isLeft()) {
        status = StorageOperationStatus.OK;
        Map<String, List<CapabilityDefinition>> capabilities = eitherCapabilities.left().value();
        if (capabilities != null && !capabilities.isEmpty()) {
            capabilities.forEach((type, list) -> {
                if (list != null && !list.isEmpty()) {
                    list.forEach((capability) -> {
                        // We want to set ownerId only for instances coming
                        // from atomic resources, otherwise we don't want
                        // to overwrite the existing ownerId of underlying
                        // component instances
                        if (isAtomicResource(component)) {
                            capability.setOwnerId(resourceInstance.getUniqueId());
                            capability.setOwnerName(resourceInstance.getName());
                            capability.setCapabilitySources(respourceDerivedList);
                        }
                    });
                }
            });
            resourceInstance.setCapabilities(capabilities);
        }
    } else {
        status = StorageOperationStatus.GENERAL_ERROR;
    }
    return status;

}