Example usage for java.util Properties containsKey

List of usage examples for java.util Properties containsKey

Introduction

In this page you can find the example usage for java.util Properties containsKey.

Prototype

@Override
    public boolean containsKey(Object key) 

Source Link

Usage

From source file:org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.java

/**
 * Creates a FlinkKafkaProducer for a given topic. The sink produces its input to
 * the topic. It accepts a keyed {@link KeyedSerializationSchema} and possibly a custom {@link FlinkKafkaPartitioner}.
 *
 * <p>If a partitioner is not provided, written records will be partitioned by the attached key of each
 * record (as determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If written records do not
 * have a key (i.e., {@link KeyedSerializationSchema#serializeKey(Object)} returns {@code null}), they
 * will be distributed to Kafka partitions in a round-robin fashion.
 *
 * @param defaultTopicId The default topic to write data to
 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
 *                          If a partitioner is not provided, records will be partitioned by the key of each record
 *                          (determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If the keys
 *                          are {@code null}, then records will be distributed to Kafka partitions in a
 *                          round-robin fashion.
 * @param semantic Defines semantic that will be used by this producer (see {@link FlinkKafkaProducer.Semantic}).
 * @param kafkaProducersPoolSize Overwrite default KafkaProducers pool size (see {@link FlinkKafkaProducer.Semantic#EXACTLY_ONCE}).
 *//*  w w  w .  ja  va2s.  c o  m*/
public FlinkKafkaProducer(String defaultTopicId, KeyedSerializationSchema<IN> serializationSchema,
        Properties producerConfig, Optional<FlinkKafkaPartitioner<IN>> customPartitioner,
        FlinkKafkaProducer.Semantic semantic, int kafkaProducersPoolSize) {
    super(new FlinkKafkaProducer.TransactionStateSerializer(), new FlinkKafkaProducer.ContextStateSerializer());

    this.defaultTopicId = checkNotNull(defaultTopicId, "defaultTopicId is null");
    this.schema = checkNotNull(serializationSchema, "serializationSchema is null");
    this.producerConfig = checkNotNull(producerConfig, "producerConfig is null");
    this.flinkKafkaPartitioner = checkNotNull(customPartitioner, "customPartitioner is null").orElse(null);
    this.semantic = checkNotNull(semantic, "semantic is null");
    this.kafkaProducersPoolSize = kafkaProducersPoolSize;
    checkState(kafkaProducersPoolSize > 0, "kafkaProducersPoolSize must be non empty");

    ClosureCleaner.clean(this.flinkKafkaPartitioner, true);
    ClosureCleaner.ensureSerializable(serializationSchema);

    // set the producer configuration properties for kafka record key value serializers.
    if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
        this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
                ByteArraySerializer.class.getName());
    } else {
        LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
    }

    if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
        this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                ByteArraySerializer.class.getName());
    } else {
        LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
    }

    // eagerly ensure that bootstrap servers are set.
    if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
        throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG
                + " must be supplied in the producer config properties.");
    }

    if (!producerConfig.containsKey(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG)) {
        long timeout = DEFAULT_KAFKA_TRANSACTION_TIMEOUT.toMilliseconds();
        checkState(timeout < Integer.MAX_VALUE && timeout > 0, "timeout does not fit into 32 bit integer");
        this.producerConfig.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, (int) timeout);
        LOG.warn("Property [{}] not specified. Setting it to {}", ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,
                DEFAULT_KAFKA_TRANSACTION_TIMEOUT);
    }

    // Enable transactionTimeoutWarnings to avoid silent data loss
    // See KAFKA-6119 (affects versions 0.11.0.0 and 0.11.0.1):
    // The KafkaProducer may not throw an exception if the transaction failed to commit
    if (semantic == FlinkKafkaProducer.Semantic.EXACTLY_ONCE) {
        final Object object = this.producerConfig.get(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG);
        final long transactionTimeout;
        if (object instanceof String && StringUtils.isNumeric((String) object)) {
            transactionTimeout = Long.parseLong((String) object);
        } else if (object instanceof Number) {
            transactionTimeout = ((Number) object).longValue();
        } else {
            throw new IllegalArgumentException(
                    ProducerConfig.TRANSACTION_TIMEOUT_CONFIG + " must be numeric, was " + object);
        }
        super.setTransactionTimeout(transactionTimeout);
        super.enableTransactionTimeoutWarnings(0.8);
    }

    this.topicPartitionsMap = new HashMap<>();
}

From source file:org.apache.hive.beeline.BeeLine.java

private String getDefaultConnectionUrl() throws BeelineHS2ConnectionFileParseException {
    HS2ConnectionFileParser userHS2ConnFileParser = getUserHS2ConnFileParser();
    if (!userHS2ConnFileParser.configExists()) {
        // nothing to do if there is no user HS2 connection configuration file
        return null;
    }//from   w  ww  .  java2  s .co m
    // get the connection properties from user specific config file
    Properties userConnectionProperties = userHS2ConnFileParser.getConnectionProperties();
    // load the HS2 connection url properties from hive-site.xml if it is present in the classpath
    HS2ConnectionFileParser hiveSiteParser = getHiveSiteHS2ConnectionFileParser();
    Properties hiveSiteConnectionProperties = hiveSiteParser.getConnectionProperties();
    // add/override properties found from hive-site with user-specific properties
    for (String key : userConnectionProperties.stringPropertyNames()) {
        if (hiveSiteConnectionProperties.containsKey(key)) {
            debug("Overriding connection url property " + key + " from user connection configuration file");
        }
        hiveSiteConnectionProperties.setProperty(key, userConnectionProperties.getProperty(key));
    }
    // return the url based on the aggregated connection properties
    return HS2ConnectionFileUtils.getUrl(hiveSiteConnectionProperties);
}

From source file:org.dasein.persist.riak.RiakCache.java

@Override
protected void init(Class<T> cls, Key... keys) {
    Properties props = new Properties();

    try {/*  w w w  .  j ava2  s.com*/
        InputStream is = DaseinSequencer.class.getResourceAsStream(DaseinSequencer.PROPERTIES);

        if (is != null) {
            props.load(is);
        }
    } catch (Exception e) {
        std.error("Problem reading " + DaseinSequencer.PROPERTIES + ": " + e.getMessage(), e);
    }
    useSsl = false;
    proxyPort = 0;
    riakPort = Integer.parseInt(props.getProperty("dasein.persist.riak.port", "8089").trim());
    if (props.containsKey("dasein.persist.riak.useSsl")) {
        useSsl = props.getProperty("dasein.persist.riak.useSsl").trim().equalsIgnoreCase("true");
    }
    String cname = cls.getName();

    while (cname != null) {
        String prop = props.getProperty("dasein.persist.riak.host." + cname);

        if (prop != null) {
            prop = prop.trim();
            if (prop.length() > 0) {
                riakHost = prop;
                prop = props.getProperty("dasein.persist.riak.port." + cname);
                if (prop != null) {
                    prop = prop.trim();
                    if (prop.length() > 0) {
                        riakPort = Integer.parseInt(prop);
                    }
                }
                if (props.containsKey("dasein.persist.riak.useSsl." + cname)) {
                    useSsl = props.getProperty("dasein.persist.riak.useSsl." + cname).trim()
                            .equalsIgnoreCase("true");
                }
                break;
            }
        }
        int idx = cname.lastIndexOf(".");

        if (idx < 1) {
            cname = null;
        } else {
            cname = cname.substring(0, idx);
        }
    }
    if (riakHost == null) {
        riakHost = props.getProperty("dasein.persist.riak.host", "localhost").trim();
    }
    if (props.containsKey("dasein.persist.riak.proxyHost")) {
        proxyHost = props.getProperty("dasein.persist.riak.proxyHost");
        if (props.containsKey("dasein.persist.riak.proxyPort")) {
            proxyPort = Integer.parseInt(props.getProperty("dasein.persist.riak.proxyPort"));
        }
    }
}

From source file:org.alfresco.reporting.script.AlfrescoReporting.java

/**
 * Validate if the unique sum of properties exists in the table definition.
 * Update the table definition if columns are not yet defined
 * @param props unique set of columns and their type
 * @throws SQLException/*w  w  w.  j  a  v  a 2  s .co  m*/
 */
private void setTableDefinition(Properties props, String tableName) throws SQLException {
    logger.debug("Enter setTableDefinition tableName=" + tableName);
    // get the existing table definition
    Connection conn = dbhb.getConnection();
    conn.setAutoCommit(true);
    Statement stmt = conn.createStatement();

    Properties tableDesc = dbhb.getTableDescription(stmt, tableName);

    // check if our properties are defined or not
    Enumeration keys = props.keys();
    while (keys.hasMoreElements()) {
        String key = (String) keys.nextElement();
        String type = props.getProperty(key, "-");

        if ((!"-".equals(type)) && ((!"".equals(type))) && (!tableDesc.containsKey(key))) {
            if (logger.isDebugEnabled())
                logger.debug("Adding column: " + key + "=" + type);
            dbhb.extendTable(stmt, tableName, key, type);
        } else {
            if (logger.isDebugEnabled())
                logger.debug("DEFINITION Column " + key + " already exists.");
        } // end if else
    } // end while
    logger.debug("Exit setTableDefinition");
}

From source file:org.apache.accumulo.proxy.ProxyServer.java

public ProxyServer(Properties props) {

    String useMock = props.getProperty("useMockInstance");
    if (useMock != null && Boolean.parseBoolean(useMock))
        instance = DeprecationUtil.makeMockInstance(this.getClass().getName());
    else {/*ww w. j  a v a2s . c  o  m*/
        ClientConfiguration clientConf;
        if (props.containsKey("clientConfigurationFile")) {
            String clientConfFile = props.getProperty("clientConfigurationFile");
            try {
                clientConf = new ClientConfiguration(clientConfFile);
            } catch (ConfigurationException e) {
                throw new RuntimeException(e);
            }
        } else {
            clientConf = ClientConfiguration.loadDefault();
        }
        instance = new ZooKeeperInstance(clientConf.withInstance(props.getProperty("instance"))
                .withZkHosts(props.getProperty("zookeepers")));
    }

    try {
        String tokenProp = props.getProperty("tokenClass", PasswordToken.class.getName());
        tokenClass = Class.forName(tokenProp).asSubclass(AuthenticationToken.class);
    } catch (ClassNotFoundException e) {
        throw new RuntimeException(e);
    }

    final String serverTypeStr = props.getProperty(Proxy.THRIFT_SERVER_TYPE, Proxy.THRIFT_SERVER_TYPE_DEFAULT);
    ThriftServerType tempServerType = Proxy.DEFAULT_SERVER_TYPE;
    if (!Proxy.THRIFT_SERVER_TYPE_DEFAULT.equals(serverTypeStr)) {
        tempServerType = ThriftServerType.get(serverTypeStr);
    }

    serverType = tempServerType;

    scannerCache = CacheBuilder.newBuilder().expireAfterAccess(10, TimeUnit.MINUTES).maximumSize(1000)
            .removalListener(new CloseScanner()).build();

    writerCache = CacheBuilder.newBuilder().expireAfterAccess(10, TimeUnit.MINUTES).maximumSize(1000)
            .removalListener(new CloseWriter()).build();

    conditionalWriterCache = CacheBuilder.newBuilder().expireAfterAccess(10, TimeUnit.MINUTES).maximumSize(1000)
            .removalListener(new CloseConditionalWriter()).build();
}

From source file:com.googlecode.fascinator.redbox.plugins.curation.mint.CurationManager.java

/**
 * This method encapsulates the logic for curation in Mint
 * /*w ww  . jav  a  2  s.  co m*/
 * @param oid The object ID being curated
 * @returns JsonSimple The response object to send back to the
 * queue consumer
 */
private JsonSimple curation(JsonSimple message, String task, String oid) {
    JsonSimple response = new JsonSimple();

    //*******************
    // Collect object data

    // Transformer config
    JsonSimple itemConfig = getConfigFromStorage(oid);
    if (itemConfig == null) {
        log.error("Error accessing item configuration!");
        return new JsonSimple();
    }
    // Object properties
    Properties metadata = getObjectMetadata(oid);
    if (metadata == null) {
        log.error("Error accessing item metadata!");
        return new JsonSimple();
    }
    // Object metadata
    JsonSimple data = getDataFromStorage(oid);
    if (data == null) {
        log.error("Error accessing item data!");
        return new JsonSimple();
    }

    //*******************
    // Validate what we can see

    // Check object state
    boolean curated = false;
    boolean alreadyCurated = itemConfig.getBoolean(false, "curation", "alreadyCurated");
    boolean errors = false;

    // Can we already see this PID?
    String thisPid = null;
    if (metadata.containsKey(pidProperty)) {
        curated = true;
        thisPid = metadata.getProperty(pidProperty);

        // Or does it claim to have one from pre-ingest curation?
    } else {
        if (alreadyCurated) {
            // Make sure we can actually see an ID
            String id = data.getString(null, "metadata", "dc.identifier");
            if (id == null) {
                log.error("Item claims to be curated, but has no" + " 'dc.identifier': '{}'", oid);
                errors = true;

                // Let's fix this so it doesn't show up again
            } else {
                try {
                    log.info("Update object properties with ingested" + " ID: '{}'", oid);
                    // Metadata writes can be awkward... thankfully this is
                    //  code that should only ever execute once per object.
                    DigitalObject object = storage.getObject(oid);
                    metadata = object.getMetadata();
                    metadata.setProperty(pidProperty, id);
                    object.close();
                    metadata = getObjectMetadata(oid);
                    curated = true;
                    audit(response, oid, "Persitent ID set in properties");

                } catch (StorageException ex) {
                    log.error("Error accessing object '{}' in storage: ", oid, ex);
                    errors = true;
                }

            }
        }
    }

    //*******************
    // Decision making

    // Errors have occurred, email someone and do nothing
    if (errors) {
        emailObjectLink(response, oid, "An error occurred curating this object, some"
                + " manual intervention may be required; please see" + " the system logs.");
        audit(response, oid, "Errors during curation; aborted.");
        return response;
    }

    //***
    // What should happen per task if we have already been curated?
    if (curated) {

        // Happy ending
        if (task.equals("curation-response")) {
            log.info("Confirmation of curated object '{}'.", oid);

            // Send out upstream responses to objects waiting
            JSONArray responses = data.writeArray("responses");
            for (Object thisResponse : responses) {
                JsonSimple json = new JsonSimple((JsonObject) thisResponse);
                String broker = json.getString(brokerUrl, "broker");
                String responseOid = json.getString(null, "oid");
                String responseTask = json.getString(null, "task");
                JsonObject responseObj = createTask(response, broker, responseOid, responseTask);
                // Don't forget to tell them where it came from
                String id = json.getString(null, "quoteId");
                if (id != null) {
                    responseObj.put("originId", id);
                }
                responseObj.put("originOid", oid);
                // If NLA Integration is enabled, use the NLA ID instead
                if (nlaIntegrationEnabled && metadata.containsKey(nlaIdProperty) && useNlaIdForRelationships) {
                    responseObj.put("curatedPid", metadata.getProperty(nlaIdProperty));
                } else {
                    responseObj.put("curatedPid", thisPid);
                }
            }

            // Set a flag to let publish events that may come in later
            //  that this is ready to publish (if not already set)
            if (!metadata.containsKey(READY_PROPERTY)) {
                try {
                    DigitalObject object = storage.getObject(oid);
                    metadata = object.getMetadata();
                    metadata.setProperty(READY_PROPERTY, "ready");
                    object.close();
                    metadata = getObjectMetadata(oid);
                    audit(response, oid, "This object is ready for publication");

                } catch (StorageException ex) {
                    log.error("Error accessing object '{}' in storage: ", oid, ex);
                    emailObjectLink(response, oid, "This object is ready for publication, but an"
                            + " error occured writing to storage. Please" + " see the system log");
                }

                // Since the flag hasn't been set we also know this is the
                //   first time through, so generate some notifications
                emailObjectLink(response, oid,
                        "This email is confirming that the object linked" + " below has completed curation.");
                audit(response, oid, "Curation completed.");
            }

            // Schedule a followup to re-index and transform
            createTask(response, oid, "reharvest");
            return response;
        }

        // A response has come back from downstream
        if (task.equals("curation-pending")) {
            String childOid = message.getString(null, "originOid");
            String childId = message.getString(null, "originId");
            String curatedPid = message.getString(null, "curatedPid");

            boolean isReady = false;
            try {
                // False here will make sure we aren't sending out a bunch
                //  of requests again.
                isReady = checkChildren(response, data, oid, thisPid, false, childOid, childId, curatedPid);
            } catch (TransactionException ex) {
                log.error("Error updating related objects '{}': ", oid, ex);
                emailObjectLink(response, oid, "An error occurred curating this object, some"
                        + " manual intervention may be required; please see" + " the system logs.");
                audit(response, oid, "Errors curating relations; aborted.");
                return response;
            }

            // If it is ready
            if (isReady) {
                createTask(response, oid, "curation-response");
            }
            return response;
        }

        // The object has finished in-house curation
        if (task.equals("curation-confirm")) {
            // If NLA Integration is required and not completed yet
            if (nlaIntegrationEnabled && !metadata.containsKey(nlaIdProperty)) {
                // Make sure we only run for required datasources (Parties People at this stage)
                boolean sendToNla = false;
                for (String key : nlaIncludeTest.keySet()) {
                    String value = metadata.getProperty(key);
                    String testValue = nlaIncludeTest.get(key);
                    if (value != null && value.equals(testValue)) {
                        sendToNla = true;
                    }
                }

                if (sendToNla) {
                    // Check if we've released the party into the NLA feed yet
                    if (!metadata.containsKey(NLA_READY_PROPERTY) || !metadata.containsKey(NLA_DATE_PROPERTY)) {
                        try {
                            DigitalObject object = storage.getObject(oid);
                            metadata = object.getMetadata();
                            // Set Date
                            metadata.setProperty(NLA_DATE_PROPERTY, nlaDate.format(new Date()));
                            // Set Flag
                            metadata.setProperty(NLA_READY_PROPERTY, "ready");
                            // Cleanup
                            object.close();
                            metadata = getObjectMetadata(oid);
                            audit(response, oid, "This object is ready to go to the NLA");
                            // The EAC-CPF Template needs to be updated
                            createTask(response, oid, "reharvest");

                        } catch (StorageException ex) {
                            log.error("Error accessing object '{}' in storage: ", oid, ex);
                            emailObjectLink(response, oid, "This object is ready for the NLA, but an"
                                    + " error occured writing to storage. Please" + " see the system log");
                            return response;
                        }

                        // Since the flag hasn't been set we also know this is the
                        //   first time through, so generate some notifications
                        emailObjectLink(response, oid,
                                "This email is confirming that the object linked"
                                        + " below has completed curation and is ready to"
                                        + " be harvested by the National Library. NOTE:"
                                        + " This object is not ready for publication until"
                                        + " after the NLA has harvested it.");
                    } else {
                        audit(response, oid, "Curation attempt: This object is still waiting on the NLA");
                    }
                    return response;
                }
            } // Finish NLA

            // The object has finished, work on downstream 'children'
            boolean isReady = false;
            try {
                isReady = checkChildren(response, data, oid, thisPid, true);
            } catch (TransactionException ex) {
                log.error("Error processing related objects '{}': ", oid, ex);
                emailObjectLink(response, oid, "An error occurred curating this object, some"
                        + " manual intervention may be required; please see" + " the system logs.");
                audit(response, oid, "Errors curating relations; aborted.");
                return response;
            }

            // If it is ready on the first pass...
            if (isReady) {
                createTask(response, oid, "curation-response");
            } else {
                // Otherwise we are going to have to wait for children
                audit(response, oid, "Curation complete, but still waiting" + " on relations.");
            }

            return response;
        }

        // Since it is already curated, we are just storing any new
        //  relationships / responses and passing things along
        if (task.equals("curation-request") || task.equals("curation-query")) {
            alreadyCurated = message.getBoolean(false, "alreadyCurated");
            try {
                storeRequestData(message, oid);
            } catch (TransactionException ex) {
                log.error("Error storing request data '{}': ", oid, ex);
                emailObjectLink(response, oid, "An error occurred curating this object, some"
                        + " manual intervention may be required; please see" + " the system logs.");
                audit(response, oid, "Errors during curation; aborted.");
                return response;
            }
            // Requests
            if (task.equals("curation-request")) {
                JsonObject taskObj = createTask(response, oid, "curation");
                taskObj.put("alreadyCurated", true);
                return response;

                // Queries
            } else {
                // Rather then push to 'curation-response' we are just
                // sending a single response to the querying object
                JsonSimple respond = new JsonSimple(message.getObject("respond"));
                String broker = respond.getString(brokerUrl, "broker");
                String responseOid = respond.getString(null, "oid");
                String responseTask = respond.getString(null, "task");
                JsonObject responseObj = createTask(response, broker, responseOid, responseTask);
                // Don't forget to tell them where it came from
                responseObj.put("originOid", oid);
                responseObj.put("curatedPid", thisPid);
            }
        }

        // Same as above, but this is a second stage request, let's be a
        //   little sterner in case log filtering is occurring
        if (task.equals("curation")) {
            alreadyCurated = message.getBoolean(false, "alreadyCurated");
            log.info("Request to curate ignored. This object '{}' has" + " already been curated.", oid);
            JsonObject taskObj = createTask(response, oid, "curation-confirm");
            taskObj.put("alreadyCurated", true);
            return response;
        }

        //***
        // What should happen per task if we have *NOT* already been curated?
    } else {
        // Whoops! We shouldn't be confirming or responding to a non-curated item!!!
        if (task.equals("curation-confirm") || task.equals("curation-pending")) {
            emailObjectLink(response, oid,
                    "ERROR: Something has gone wrong with curation of this"
                            + " object. The system has received a '" + task + "'"
                            + " event, but the record does not appear to be"
                            + " curated. Please check the system logs for any" + " errors.");
            return response;
        }

        // Standard stuff - a request to curate non-curated data
        if (task.equals("curation-request")) {
            try {
                storeRequestData(message, oid);
            } catch (TransactionException ex) {
                log.error("Error storing request data '{}': ", oid, ex);
                emailObjectLink(response, oid, "An error occurred curating this object, some"
                        + " manual intervention may be required; please see" + " the system logs.");
                audit(response, oid, "Errors during curation; aborted.");
                return response;
            }

            if (manualConfirmation) {
                emailObjectLink(response, oid, "A curation request has been recieved for this"
                        + " object. You can find a link below to approve" + " the request.");
                audit(response, oid, "Curation request received. Pending");
            } else {
                createTask(response, oid, "curation");
            }
            return response;
        }

        // We can't do much here, just store the response address
        if (task.equals("curation-query")) {
            try {
                storeRequestData(message, oid);
            } catch (TransactionException ex) {
                log.error("Error storing request data '{}': ", oid, ex);
                emailObjectLink(response, oid, "An error occurred curating this object, some"
                        + " manual intervention may be required; please see" + " the system logs.");
                audit(response, oid, "Errors during curation; aborted.");
                return response;
            }
            return response;
        }

        // The actual curation event
        if (task.equals("curation")) {
            audit(response, oid, "Object curation requested.");
            List<String> list = itemConfig.getStringList("transformer", "curation");

            // Pass through whichever curation transformer are configured
            if (list != null && !list.isEmpty()) {
                for (String id : list) {
                    JsonObject order = newTransform(response, id, oid);
                    JsonObject config = (JsonObject) order.get("config");
                    // Make sure it even has an override...
                    JsonObject override = itemConfig.getObject("transformerOverrides", id);
                    if (override != null) {
                        config.putAll(override);
                    }
                }

            } else {
                log.warn("This object has no configured transformers!");
            }

            // Force an index update after the ID has been created,
            //   but before "curation-confirm"
            JsonObject order = newIndex(response, oid);
            order.put("forceCommit", true);

            // Don't forget to come back
            createTask(response, oid, "curation-confirm");
            return response;
        }
    }

    log.error("Invalid message received. Unknown task:\n{}", message.toString(true));
    emailObjectLink(response, oid, "The curation manager has received an invalid curation message"
            + " for this object. Please see the system logs.");
    return response;
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

@Override
public boolean checkAccess(String username, String password, Properties props) throws Exception {
    BasicAWSCredentials _cred = new BasicAWSCredentials(username, password);
    if (props.containsKey("default-bucket-location")) {
        bucketLocation = RegionUtils.getRegion(props.getProperty("default-bucket-location"));
    }/*www  .j a  v  a2 s  .  c o  m*/

    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setMaxConnections(Main.dseIOThreads * 2);
    clientConfig.setConnectionTimeout(10000);
    clientConfig.setSocketTimeout(10000);
    String s3Target = null;

    if (props.containsKey("s3-target")) {
        s3Target = props.getProperty("s3-target");
    }
    if (props.containsKey("proxy-host")) {
        clientConfig.setProxyHost(props.getProperty("proxy-host"));
    }
    if (props.containsKey("proxy-domain")) {
        clientConfig.setProxyDomain(props.getProperty("proxy-domain"));
    }
    if (props.containsKey("proxy-password")) {
        clientConfig.setProxyPassword(props.getProperty("proxy-password"));
    }
    if (props.containsKey("proxy-port")) {
        clientConfig.setProxyPort(Integer.parseInt(props.getProperty("proxy-port")));
    }
    if (props.containsKey("proxy-username")) {
        clientConfig.setProxyUsername(props.getProperty("proxy-username"));
    }
    s3Service = new AmazonS3Client(_cred, clientConfig);
    if (s3Target != null) {
        TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
            @Override
            public boolean isTrusted(X509Certificate[] certificate, String authType) {
                return true;
            }
        };
        SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
        clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        s3Service.setEndpoint(s3Target);
    }
    s3Service.listBuckets();
    return true;
}

From source file:com.dtolabs.rundeck.plugin.resources.ec2.InstanceToNodeMapper.java

/**
 * Convert an AWS EC2 Instance to a RunDeck INodeEntry based on the mapping input
 */// www.jav  a 2 s  .c o  m
@SuppressWarnings("unchecked")
static INodeEntry instanceToNode(final Instance inst, final Properties mapping) throws GeneratorException {
    final NodeEntryImpl node = new NodeEntryImpl();

    //evaluate single settings.selector=tags/* mapping
    if ("tags/*".equals(mapping.getProperty("attributes.selector"))) {
        //iterate through instance tags and generate settings
        for (final Tag tag : inst.getTags()) {
            if (null == node.getAttributes()) {
                node.setAttributes(new HashMap<String, String>());
            }
            node.getAttributes().put(tag.getKey(), tag.getValue());
        }
    }
    if (null != mapping.getProperty("tags.selector")) {
        final String selector = mapping.getProperty("tags.selector");
        final String value = applySelector(inst, selector, mapping.getProperty("tags.default"), true);
        if (null != value) {
            final String[] values = value.split(",");
            final HashSet<String> tagset = new HashSet<String>();
            for (final String s : values) {
                tagset.add(s.trim());
            }
            if (null == node.getTags()) {
                node.setTags(tagset);
            } else {
                final HashSet orig = new HashSet(node.getTags());
                orig.addAll(tagset);
                node.setTags(orig);
            }
        }
    }
    if (null == node.getTags()) {
        node.setTags(new HashSet());
    }
    final HashSet orig = new HashSet(node.getTags());
    //apply specific tag selectors
    final Pattern tagPat = Pattern.compile("^tag\\.(.+?)\\.selector$");
    //evaluate tag selectors
    for (final Object o : mapping.keySet()) {
        final String key = (String) o;
        final String selector = mapping.getProperty(key);
        //split selector by = if present
        final String[] selparts = selector.split("=");
        final Matcher m = tagPat.matcher(key);
        if (m.matches()) {
            final String tagName = m.group(1);
            if (null == node.getAttributes()) {
                node.setAttributes(new HashMap<String, String>());
            }
            final String value = applySelector(inst, selparts[0], null);
            if (null != value) {
                if (selparts.length > 1 && !value.equals(selparts[1])) {
                    continue;
                }
                //use add the tag if the value is not null
                orig.add(tagName);
            }
        }
    }
    node.setTags(orig);

    //apply default values which do not have corresponding selector
    final Pattern attribDefPat = Pattern.compile("^([^.]+?)\\.default$");
    //evaluate selectors
    for (final Object o : mapping.keySet()) {
        final String key = (String) o;
        final String value = mapping.getProperty(key);
        final Matcher m = attribDefPat.matcher(key);
        if (m.matches() && (!mapping.containsKey(key + ".selector")
                || "".equals(mapping.getProperty(key + ".selector")))) {
            final String attrName = m.group(1);
            if (null == node.getAttributes()) {
                node.setAttributes(new HashMap<String, String>());
            }
            if (null != value) {
                node.getAttributes().put(attrName, value);
            }
        }
    }

    final Pattern attribPat = Pattern.compile("^([^.]+?)\\.selector$");
    //evaluate selectors
    for (final Object o : mapping.keySet()) {
        final String key = (String) o;
        final String selector = mapping.getProperty(key);
        final Matcher m = attribPat.matcher(key);
        if (m.matches()) {
            final String attrName = m.group(1);
            if (attrName.equals("tags")) {
                //already handled
                continue;
            }
            if (null == node.getAttributes()) {
                node.setAttributes(new HashMap<String, String>());
            }
            final String value = applySelector(inst, selector, mapping.getProperty(attrName + ".default"));
            if (null != value) {
                //use nodename-settingname to make the setting unique to the node
                node.getAttributes().put(attrName, value);
            }
        }
    }
    //        String hostSel = mapping.getProperty("hostname.selector");
    //        String host = applySelector(inst, hostSel, mapping.getProperty("hostname.default"));
    //        if (null == node.getHostname()) {
    //            System.err.println("Unable to determine hostname for instance: " + inst.getInstanceId());
    //            return null;
    //        }
    String name = node.getNodename();
    if (null == name || "".equals(name)) {
        name = node.getHostname();
    }
    if (null == name || "".equals(name)) {
        name = inst.getInstanceId();
    }
    node.setNodename(name);

    // Set ssh port on hostname if not 22
    String sshport = node.getAttributes().get("sshport");
    if (sshport != null && !sshport.equals("") && !sshport.equals("22")) {
        node.setHostname(node.getHostname() + ":" + sshport);
    }

    return node;
}

From source file:com.dtolabs.rundeck.plugin.resources.gcp.InstanceToNodeMapper.java

/**
 * Convert an GCP GCE Instance to a RunDeck INodeEntry based on the mapping input
 *///from ww w. j a v a 2  s  .c  o m
@SuppressWarnings("unchecked")
static INodeEntry instanceToNode(final Instance inst, final Properties mapping) throws GeneratorException {
    final NodeEntryImpl node = new NodeEntryImpl();
    logger.error("instancetoNode call");
    //evaluate single settings.selector=tags/* mapping
    if ("tags/*".equals(mapping.getProperty("attributes.selector"))) {
        //iterate through instance tags and generate settings
        /*for (final String tag : inst.getTags().getItems()) {
        if (null == node.getAttributes()) {
            node.setAttribute(new HashMap<String, String>());
        }
        node.getAttribute().put(tag.getKey(), tag.getValue());
        }*/
    }
    if (null != mapping.getProperty("tags.selector")) {
        final String selector = mapping.getProperty("tags.selector");
        final String value = applySelector(inst, selector, mapping.getProperty("tags.default"), true);
        if (null != value) {
            final String[] values = value.split(",");
            final HashSet<String> tagset = new HashSet<String>();
            for (final String s : values) {
                tagset.add(s.trim());
            }
            if (null == node.getTags()) {
                node.setTags(tagset);
            } else {
                final HashSet orig = new HashSet(node.getTags());
                orig.addAll(tagset);
                node.setTags(orig);
            }
        }
    }
    if (null == node.getTags()) {
        node.setTags(new HashSet());
    }
    final HashSet orig = new HashSet(node.getTags());
    //apply specific tag selectors
    final Pattern tagPat = Pattern.compile("^tag\\.(.+?)\\.selector$");
    //evaluate tag selectors
    for (final Object o : mapping.keySet()) {
        final String key = (String) o;
        final String selector = mapping.getProperty(key);
        //split selector by = if present
        final String[] selparts = selector.split("=");
        final Matcher m = tagPat.matcher(key);
        if (m.matches()) {
            final String tagName = m.group(1);
            if (null == node.getAttributes()) {
                node.setAttributes(new HashMap<String, String>());
            }
            final String value = applySelector(inst, selparts[0], null);
            if (null != value) {
                if (selparts.length > 1 && !value.equals(selparts[1])) {
                    continue;
                }
                //use add the tag if the value is not null
                orig.add(tagName);
            }
        }
    }
    node.setTags(orig);

    //apply default values which do not have corresponding selector
    final Pattern attribDefPat = Pattern.compile("^([^.]+?)\\.default$");
    //evaluate selectors
    for (final Object o : mapping.keySet()) {
        final String key = (String) o;
        final String value = mapping.getProperty(key);
        final Matcher m = attribDefPat.matcher(key);
        if (m.matches() && (!mapping.containsKey(key + ".selector")
                || "".equals(mapping.getProperty(key + ".selector")))) {
            final String attrName = m.group(1);
            if (null == node.getAttributes()) {
                node.setAttributes(new HashMap<String, String>());
            }
            if (null != value) {
                node.getAttributes().put(attrName, value);
            }
        }
    }

    final Pattern attribPat = Pattern.compile("^([^.]+?)\\.selector$");
    //evaluate selectors
    for (final Object o : mapping.keySet()) {
        final String key = (String) o;
        final String selector = mapping.getProperty(key);
        final Matcher m = attribPat.matcher(key);
        if (m.matches()) {
            final String attrName = m.group(1);
            if (attrName.equals("tags")) {
                //already handled
                continue;
            }
            if (null == node.getAttributes()) {
                node.setAttributes(new HashMap<String, String>());
            }
            final String value = applySelector(inst, selector, mapping.getProperty(attrName + ".default"));
            if (null != value) {
                //use nodename-settingname to make the setting unique to the node
                node.getAttributes().put(attrName, value);
            }
        }
    }
    String hostSel = mapping.getProperty("hostname.selector");
    //logger.error("This is the hostSel variable " + hostSel);
    String host = applySelector(inst, hostSel, mapping.getProperty("hostname.default"));
    //logger.error("This is the host variable " + host);
    //logger.error("This is the hostname.default mapping param " + mapping.getProperty("hostname.default"));
    if (null == node.getHostname()) {
        System.err.println("Unable to determine hostname for instance: " + inst.getId());
        return null;
    }
    String name = node.getNodename();
    if (null == name || "".equals(name)) {
        name = node.getHostname();
    }
    if (null == name || "".equals(name)) {
        name = inst.getId().toString();
    }
    node.setNodename(name);

    // Set ssh port on hostname if not 22
    String sshport = node.getAttributes().get("sshport");
    if (sshport != null && !sshport.equals("") && !sshport.equals("22")) {
        node.setHostname(node.getHostname() + ":" + sshport);
    }

    return node;
}

From source file:gobblin.data.management.conversion.hive.validation.ValidationJob.java

public ValidationJob(String jobId, Properties props) throws IOException {
    super(jobId, log);

    // Set the conversion config prefix for Avro to ORC
    props.setProperty(HiveDatasetFinder.HIVE_DATASET_CONFIG_PREFIX_KEY, HIVE_DATASET_CONFIG_AVRO_PREFIX);

    Config config = ConfigFactory.parseProperties(props);
    this.props = props;
    this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), ValidationJob.class);
    this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, EventConstants.CONVERSION_NAMESPACE)
            .build();//from ww w . ja  v  a 2s.  c o m
    this.updateProvider = UpdateProviderFactory.create(props);
    this.datasetFinder = new ConvertibleHiveDatasetFinder(getSourceFs(), props, this.eventSubmitter);
    this.fs = FileSystem.get(new Configuration());

    int maxLookBackDays = Integer.parseInt(props.getProperty(HiveSource.HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS_KEY,
            DEFAULT_HIVE_SOURCE_MAXIMUM_LOOKBACK_DAYS));
    int skipRecentThanDays = Integer.parseInt(props.getProperty(HIVE_SOURCE_SKIP_RECENT_THAN_DAYS_KEY,
            DEFAULT_HIVE_SOURCE_SKIP_RECENT_THAN_DAYS));
    this.maxLookBackTime = new DateTime().minusDays(maxLookBackDays).getMillis();
    this.skipRecentThanTime = new DateTime().minusDays(skipRecentThanDays).getMillis();

    int maxThreadCount = Integer.parseInt(props.getProperty(MAX_THREAD_COUNT, DEFAULT_MAX_THREAD_COUNT));
    this.exec = Executors.newFixedThreadPool(maxThreadCount,
            ExecutorsUtils.newThreadFactory(Optional.of(LoggerFactory.getLogger(ValidationJob.class)),
                    Optional.of("getValidationOutputFromHive")));
    this.futures = Lists.newArrayList();
    EventSubmitter.submit(Optional.of(this.eventSubmitter), EventConstants.VALIDATION_SETUP_EVENT);

    this.pool = HiveMetastoreClientPool.get(props,
            Optional.fromNullable(props.getProperty(HiveDatasetFinder.HIVE_METASTORE_URI_KEY)));
    Preconditions.checkArgument(props.containsKey(VALIDATION_TYPE_KEY),
            "Missing property " + VALIDATION_TYPE_KEY);
    this.validationType = ValidationType.valueOf(props.getProperty(VALIDATION_TYPE_KEY));
    this.ignoreDataPathIdentifierList = COMMA_BASED_SPLITTER
            .splitToList(props.getProperty(HIVE_VALIDATION_IGNORE_DATA_PATH_IDENTIFIER_KEY,
                    DEFAULT_HIVE_VALIDATION_IGNORE_DATA_PATH_IDENTIFIER));
    this.throwables = new ArrayList<>();
    this.isNestedORC = Boolean.parseBoolean(props.getProperty(IS_NESTED_ORC, DEFAULT_IS_NESTED_ORC));
    this.hiveSettings = Splitter.on(";").trimResults().omitEmptyStrings()
            .splitToList(props.getProperty(HIVE_SETTINGS, StringUtils.EMPTY));
}