Example usage for com.amazonaws AmazonServiceException getMessage

List of usage examples for com.amazonaws AmazonServiceException getMessage

Introduction

In this page you can find the example usage for com.amazonaws AmazonServiceException getMessage.

Prototype

@Override
    public String getMessage() 

Source Link

Usage

From source file:nl.nekoconeko.glaciercmd.GlacierClient.java

License:Open Source License

private GetJobOutputResult waitForJobCompletion(String vault, String jobId) throws InterruptedException {
    GetJobOutputRequest job = new GetJobOutputRequest();
    job.setVaultName(vault);/*  w w  w . jav a  2 s  . co m*/
    job.setJobId(jobId);

    GetJobOutputResult res = null;
    boolean stop = false;
    while (!stop) {
        int code = 0;
        String error = "";
        try {
            res = this.client.getJobOutput(job);
            code = res.getStatus();
        } catch (AmazonServiceException e) {
            error = e.getMessage();
            code = e.getStatusCode();
        }

        switch (code) {
        case 200:
            stop = true;
            break;
        case 400:
            if (error.contains("The job is not currently available for download")) {
                Formatter.printInfoLine("Job is not finished yet, waiting...");
            } else {
                Formatter.printErrorLine("AWS returned code 400. Body Malformed.");
                System.exit(1);
            }
            break;
        case 404:
            Formatter.printErrorLine("AWS returned code 404. Vault or Job could not be found.");
            System.exit(1);
        default:
            Formatter.printInfoLine(String.format("Job returned code %d, waiting 30 seconds", code));
            break;
        }
        if (!stop) {
            Thread.sleep(1000 * 60);
        }
    }

    return res;
}

From source file:org.akvo.flow.deploy.Deploy.java

License:Open Source License

public static void main(String[] args) throws IOException {
    if (args.length != 7) {
        System.err.println("Missing argument, please provide S3 access key, S3 secret key, "
                + "instanceId , apkPath, version, GAE username and GAE password");
        return;/*from ww w .  j  a  va2s .com*/
    }

    File file = new File(args[APK_PATH]);
    if (!file.exists()) {
        System.err.println("Can't find apk at " + args[APK_PATH]);
        return;
    }

    final String accessKey = args[S3_ACCESS_KEY];
    final String secretKey = args[S3_SECRET_KEY];
    final String instance = args[INSTANCE_ID];
    final String accountId = args[ACCOUNT_ID];
    final String accountSecret = args[ACCOUNT_SECRET];
    final String version = args[VERSION];

    final String s3Path = "apk/" + instance + "/" + file.getName();
    final String s3Url = "http://akvoflow.s3.amazonaws.com/apk/" + instance + '/' + file.getName();
    final String host = instance + ".appspot.com";

    try {
        uploadS3(accessKey, secretKey, s3Path, file);
        updateVersion(host, accountId, accountSecret, s3Url, version, getMD5Checksum(file));
    } catch (AmazonServiceException ase) {
        System.err.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.err.println("Error Message:    " + ase.getMessage());
        System.err.println("HTTP Status Code: " + ase.getStatusCode());
        System.err.println("AWS Error Code:   " + ase.getErrorCode());
        System.err.println("Error Type:       " + ase.getErrorType());
        System.err.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.err.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.err.println("Error Message: " + ace.getMessage());
    } catch (IOException e) {
        System.err.println("Error updating APK version in GAE");
        e.printStackTrace();
    }

}

From source file:org.apache.airavata.gfac.ec2.AmazonInstanceScheduler.java

License:Apache License

/**
 * Monitors the CPU Utilization using Amazon Cloud Watch. In order to monitor the instance, Cloud Watch Monitoring
 * should be enabled for the running instance.
 *
 * @param credential EC2 credentials// w w w . jav a2 s. c  om
 * @param instanceId instance id
 * @return average CPU utilization of the instance
 */
public static double monitorInstance(AWSCredentials credential, String instanceId) {
    try {
        AmazonCloudWatchClient cw = new AmazonCloudWatchClient(credential);

        long offsetInMilliseconds = 1000 * 60 * 60 * 24;
        GetMetricStatisticsRequest request = new GetMetricStatisticsRequest()
                .withStartTime(new Date(new Date().getTime() - offsetInMilliseconds)).withNamespace("AWS/EC2")
                .withPeriod(60 * 60)
                .withDimensions(new Dimension().withName("InstanceId").withValue(instanceId))
                .withMetricName("CPUUtilization").withStatistics("Average", "Maximum").withEndTime(new Date());
        GetMetricStatisticsResult getMetricStatisticsResult = cw.getMetricStatistics(request);

        double avgCPUUtilization = 0;
        List dataPoint = getMetricStatisticsResult.getDatapoints();
        for (Object aDataPoint : dataPoint) {
            Datapoint dp = (Datapoint) aDataPoint;
            avgCPUUtilization = dp.getAverage();
            log.info(instanceId + " instance's average CPU utilization : " + dp.getAverage());
        }

        return avgCPUUtilization;

    } catch (AmazonServiceException ase) {
        log.error("Caught an AmazonServiceException, which means the request was made  "
                + "to Amazon EC2, but was rejected with an error response for some reason.");
        log.error("Error Message:    " + ase.getMessage());
        log.error("HTTP Status Code: " + ase.getStatusCode());
        log.error("AWS Error Code:   " + ase.getErrorCode());
        log.error("Error Type:       " + ase.getErrorType());
        log.error("Request ID:       " + ase.getRequestId());

    }
    return 0;
}

From source file:org.apache.druid.storage.s3.S3DataSegmentPuller.java

License:Apache License

FileUtils.FileCopyResult getSegmentFiles(final S3Coords s3Coords, final File outDir)
        throws SegmentLoadingException {

    log.info("Pulling index at path[%s] to outDir[%s]", s3Coords, outDir);

    if (!isObjectInBucket(s3Coords)) {
        throw new SegmentLoadingException("IndexFile[%s] does not exist.", s3Coords);
    }/* w w w.j  a va  2  s.c  om*/

    try {
        org.apache.commons.io.FileUtils.forceMkdir(outDir);

        final URI uri = URI.create(StringUtils.format("s3://%s/%s", s3Coords.bucket, s3Coords.path));
        final ByteSource byteSource = new ByteSource() {
            @Override
            public InputStream openStream() throws IOException {
                try {
                    return buildFileObject(uri).openInputStream();
                } catch (AmazonServiceException e) {
                    if (e.getCause() != null) {
                        if (S3Utils.S3RETRY.apply(e)) {
                            throw new IOException("Recoverable exception", e);
                        }
                    }
                    throw new RuntimeException(e);
                }
            }
        };
        if (CompressionUtils.isZip(s3Coords.path)) {
            final FileUtils.FileCopyResult result = CompressionUtils.unzip(byteSource, outDir, S3Utils.S3RETRY,
                    false);
            log.info("Loaded %d bytes from [%s] to [%s]", result.size(), s3Coords.toString(),
                    outDir.getAbsolutePath());
            return result;
        }
        if (CompressionUtils.isGz(s3Coords.path)) {
            final String fname = Files.getNameWithoutExtension(uri.getPath());
            final File outFile = new File(outDir, fname);

            final FileUtils.FileCopyResult result = CompressionUtils.gunzip(byteSource, outFile,
                    S3Utils.S3RETRY);
            log.info("Loaded %d bytes from [%s] to [%s]", result.size(), s3Coords.toString(),
                    outFile.getAbsolutePath());
            return result;
        }
        throw new IAE("Do not know how to load file type at [%s]", uri.toString());
    } catch (Exception e) {
        try {
            org.apache.commons.io.FileUtils.deleteDirectory(outDir);
        } catch (IOException ioe) {
            log.warn(ioe, "Failed to remove output directory [%s] for segment pulled from [%s]",
                    outDir.getAbsolutePath(), s3Coords.toString());
        }
        throw new SegmentLoadingException(e, e.getMessage());
    }
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

private void printAmazonServiceException(AmazonServiceException ase) {
    LOG.info("Caught an AmazonServiceException, which means your request made it "
            + "to Amazon S3, but was rejected with an error response for some reason.");
    LOG.info("Error Message: " + ase.getMessage());
    LOG.info("HTTP Status Code: " + ase.getStatusCode());
    LOG.info("AWS Error Code: " + ase.getErrorCode());
    LOG.info("Error Type: " + ase.getErrorType());
    LOG.info("Request ID: " + ase.getRequestId());
    LOG.info("Class Name: " + ase.getClass().getName());
}

From source file:org.apache.nifi.processors.aws.dynamodb.AbstractDynamoDBProcessor.java

License:Apache License

protected List<FlowFile> processServiceException(final ProcessSession session, List<FlowFile> flowFiles,
        AmazonServiceException exception) {
    List<FlowFile> failedFlowFiles = new ArrayList<>();
    for (FlowFile flowFile : flowFiles) {
        Map<String, String> attributes = new HashMap<>();
        attributes.put(DYNAMODB_ERROR_EXCEPTION_MESSAGE, exception.getMessage());
        attributes.put(DYNAMODB_ERROR_CODE, exception.getErrorCode());
        attributes.put(DYNAMODB_ERROR_MESSAGE, exception.getErrorMessage());
        attributes.put(DYNAMODB_ERROR_TYPE, exception.getErrorType().name());
        attributes.put(DYNAMODB_ERROR_SERVICE, exception.getServiceName());
        attributes.put(DYNAMODB_ERROR_RETRYABLE, Boolean.toString(exception.isRetryable()));
        attributes.put(DYNAMODB_ERROR_REQUEST_ID, exception.getRequestId());
        attributes.put(DYNAMODB_ERROR_STATUS_CODE, Integer.toString(exception.getStatusCode()));
        attributes.put(DYNAMODB_ERROR_EXCEPTION_MESSAGE, exception.getMessage());
        attributes.put(DYNAMODB_ERROR_RETRYABLE, Boolean.toString(exception.isRetryable()));
        flowFile = session.putAllAttributes(flowFile, attributes);
        failedFlowFiles.add(flowFile);//from  w  ww .j av a  2 s  .  c om
    }
    return failedFlowFiles;
}

From source file:org.apache.nifi.processors.aws.dynamodb.DeleteDynamoDB.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    List<FlowFile> flowFiles = session
            .get(context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger());
    if (flowFiles == null || flowFiles.size() == 0) {
        return;/*w ww .  j a  v a2  s  .c  o m*/
    }

    Map<ItemKeys, FlowFile> keysToFlowFileMap = new HashMap<>();

    final String table = context.getProperty(TABLE).evaluateAttributeExpressions().getValue();

    final String hashKeyName = context.getProperty(HASH_KEY_NAME).evaluateAttributeExpressions().getValue();
    final String hashKeyValueType = context.getProperty(HASH_KEY_VALUE_TYPE).getValue();
    final String rangeKeyName = context.getProperty(RANGE_KEY_NAME).evaluateAttributeExpressions().getValue();
    final String rangeKeyValueType = context.getProperty(RANGE_KEY_VALUE_TYPE).getValue();

    TableWriteItems tableWriteItems = new TableWriteItems(table);

    for (FlowFile flowFile : flowFiles) {
        final Object hashKeyValue = getValue(context, HASH_KEY_VALUE_TYPE, HASH_KEY_VALUE, flowFile);
        final Object rangeKeyValue = getValue(context, RANGE_KEY_VALUE_TYPE, RANGE_KEY_VALUE, flowFile);

        if (!isHashKeyValueConsistent(hashKeyName, hashKeyValue, session, flowFile)) {
            continue;
        }

        if (!isRangeKeyValueConsistent(rangeKeyName, rangeKeyValue, session, flowFile)) {
            continue;
        }

        if (rangeKeyValue == null || StringUtils.isBlank(rangeKeyValue.toString())) {
            tableWriteItems.addHashOnlyPrimaryKeysToDelete(hashKeyName, hashKeyValue);
        } else {
            tableWriteItems.addHashAndRangePrimaryKeyToDelete(hashKeyName, hashKeyValue, rangeKeyName,
                    rangeKeyValue);
        }
        keysToFlowFileMap.put(new ItemKeys(hashKeyValue, rangeKeyValue), flowFile);
    }

    if (keysToFlowFileMap.isEmpty()) {
        return;
    }

    final DynamoDB dynamoDB = getDynamoDB();

    try {
        BatchWriteItemOutcome outcome = dynamoDB.batchWriteItem(tableWriteItems);

        handleUnprocessedItems(session, keysToFlowFileMap, table, hashKeyName, hashKeyValueType, rangeKeyName,
                rangeKeyValueType, outcome);

        // All non unprocessed items are successful
        for (FlowFile flowFile : keysToFlowFileMap.values()) {
            getLogger().debug("Successfully deleted item from dynamodb : " + table);
            session.transfer(flowFile, REL_SUCCESS);
        }
    } catch (AmazonServiceException exception) {
        getLogger().error("Could not process flowFiles due to service exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processServiceException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    } catch (AmazonClientException exception) {
        getLogger().error("Could not process flowFiles due to client exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processClientException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    } catch (Exception exception) {
        getLogger().error("Could not process flowFiles due to exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    }
}

From source file:org.apache.nifi.processors.aws.dynamodb.GetDynamoDB.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    List<FlowFile> flowFiles = session
            .get(context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger());
    if (flowFiles == null || flowFiles.size() == 0) {
        return;/*w ww  . j  ava2  s.  c o m*/
    }

    Map<ItemKeys, FlowFile> keysToFlowFileMap = new HashMap<>();

    final String table = context.getProperty(TABLE).evaluateAttributeExpressions().getValue();
    TableKeysAndAttributes tableKeysAndAttributes = new TableKeysAndAttributes(table);

    final String hashKeyName = context.getProperty(HASH_KEY_NAME).evaluateAttributeExpressions().getValue();
    final String rangeKeyName = context.getProperty(RANGE_KEY_NAME).evaluateAttributeExpressions().getValue();
    final String jsonDocument = context.getProperty(JSON_DOCUMENT).evaluateAttributeExpressions().getValue();

    for (FlowFile flowFile : flowFiles) {
        final Object hashKeyValue = getValue(context, HASH_KEY_VALUE_TYPE, HASH_KEY_VALUE, flowFile);
        final Object rangeKeyValue = getValue(context, RANGE_KEY_VALUE_TYPE, RANGE_KEY_VALUE, flowFile);

        if (!isHashKeyValueConsistent(hashKeyName, hashKeyValue, session, flowFile)) {
            continue;
        }

        if (!isRangeKeyValueConsistent(rangeKeyName, rangeKeyValue, session, flowFile)) {
            continue;
        }

        keysToFlowFileMap.put(new ItemKeys(hashKeyValue, rangeKeyValue), flowFile);

        if (rangeKeyValue == null || StringUtils.isBlank(rangeKeyValue.toString())) {
            tableKeysAndAttributes.addHashOnlyPrimaryKey(hashKeyName, hashKeyValue);
        } else {
            tableKeysAndAttributes.addHashAndRangePrimaryKey(hashKeyName, hashKeyValue, rangeKeyName,
                    rangeKeyValue);
        }
    }

    if (keysToFlowFileMap.isEmpty()) {
        return;
    }

    final DynamoDB dynamoDB = getDynamoDB();

    try {
        BatchGetItemOutcome result = dynamoDB.batchGetItem(tableKeysAndAttributes);

        // Handle processed items and get the json document
        List<Item> items = result.getTableItems().get(table);
        for (Item item : items) {
            ItemKeys itemKeys = new ItemKeys(item.get(hashKeyName), item.get(rangeKeyName));
            FlowFile flowFile = keysToFlowFileMap.get(itemKeys);

            if (item.get(jsonDocument) != null) {
                ByteArrayInputStream bais = new ByteArrayInputStream(item.getJSON(jsonDocument).getBytes());
                flowFile = session.importFrom(bais, flowFile);
            }

            session.transfer(flowFile, REL_SUCCESS);
            keysToFlowFileMap.remove(itemKeys);
        }

        // Handle unprocessed keys
        Map<String, KeysAndAttributes> unprocessedKeys = result.getUnprocessedKeys();
        if (unprocessedKeys != null && unprocessedKeys.size() > 0) {
            KeysAndAttributes keysAndAttributes = unprocessedKeys.get(table);
            List<Map<String, AttributeValue>> keys = keysAndAttributes.getKeys();

            for (Map<String, AttributeValue> unprocessedKey : keys) {
                Object hashKeyValue = getAttributeValue(context, HASH_KEY_VALUE_TYPE,
                        unprocessedKey.get(hashKeyName));
                Object rangeKeyValue = getAttributeValue(context, RANGE_KEY_VALUE_TYPE,
                        unprocessedKey.get(rangeKeyName));
                sendUnprocessedToUnprocessedRelationship(session, keysToFlowFileMap, hashKeyValue,
                        rangeKeyValue);
            }
        }

        // Handle any remaining items
        for (ItemKeys key : keysToFlowFileMap.keySet()) {
            FlowFile flowFile = keysToFlowFileMap.get(key);
            flowFile = session.putAttribute(flowFile, DYNAMODB_KEY_ERROR_NOT_FOUND,
                    DYNAMODB_KEY_ERROR_NOT_FOUND_MESSAGE + key.toString());
            session.transfer(flowFile, REL_NOT_FOUND);
            keysToFlowFileMap.remove(key);
        }

    } catch (AmazonServiceException exception) {
        getLogger().error("Could not process flowFiles due to service exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processServiceException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    } catch (AmazonClientException exception) {
        getLogger().error("Could not process flowFiles due to client exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processClientException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    } catch (Exception exception) {
        getLogger().error("Could not process flowFiles due to exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    }
}

From source file:org.apache.nifi.processors.aws.dynamodb.PutDynamoDB.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    List<FlowFile> flowFiles = session
            .get(context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger());
    if (flowFiles == null || flowFiles.size() == 0) {
        return;// w w w .ja  v  a  2s. com
    }

    Map<ItemKeys, FlowFile> keysToFlowFileMap = new HashMap<>();

    final String table = context.getProperty(TABLE).evaluateAttributeExpressions().getValue();

    final String hashKeyName = context.getProperty(HASH_KEY_NAME).evaluateAttributeExpressions().getValue();
    final String hashKeyValueType = context.getProperty(HASH_KEY_VALUE_TYPE).getValue();
    final String rangeKeyName = context.getProperty(RANGE_KEY_NAME).evaluateAttributeExpressions().getValue();
    final String rangeKeyValueType = context.getProperty(RANGE_KEY_VALUE_TYPE).getValue();
    final String jsonDocument = context.getProperty(JSON_DOCUMENT).evaluateAttributeExpressions().getValue();
    final String charset = context.getProperty(DOCUMENT_CHARSET).evaluateAttributeExpressions().getValue();

    TableWriteItems tableWriteItems = new TableWriteItems(table);

    for (FlowFile flowFile : flowFiles) {
        final Object hashKeyValue = getValue(context, HASH_KEY_VALUE_TYPE, HASH_KEY_VALUE, flowFile);
        final Object rangeKeyValue = getValue(context, RANGE_KEY_VALUE_TYPE, RANGE_KEY_VALUE, flowFile);

        if (!isHashKeyValueConsistent(hashKeyName, hashKeyValue, session, flowFile)) {
            continue;
        }

        if (!isRangeKeyValueConsistent(rangeKeyName, rangeKeyValue, session, flowFile)) {
            continue;
        }

        if (!isDataValid(flowFile, jsonDocument)) {
            flowFile = session.putAttribute(flowFile, AWS_DYNAMO_DB_ITEM_SIZE_ERROR,
                    "Max size of item + attribute should be 400kb but was " + flowFile.getSize()
                            + jsonDocument.length());
            session.transfer(flowFile, REL_FAILURE);
            continue;
        }

        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        session.exportTo(flowFile, baos);

        try {
            if (rangeKeyValue == null || StringUtils.isBlank(rangeKeyValue.toString())) {
                tableWriteItems.addItemToPut(new Item().withKeyComponent(hashKeyName, hashKeyValue)
                        .withJSON(jsonDocument, IOUtils.toString(baos.toByteArray(), charset)));
            } else {
                tableWriteItems.addItemToPut(new Item().withKeyComponent(hashKeyName, hashKeyValue)
                        .withKeyComponent(rangeKeyName, rangeKeyValue)
                        .withJSON(jsonDocument, IOUtils.toString(baos.toByteArray(), charset)));
            }
        } catch (IOException ioe) {
            getLogger().error("IOException while creating put item : " + ioe.getMessage());
            flowFile = session.putAttribute(flowFile, DYNAMODB_ITEM_IO_ERROR, ioe.getMessage());
            session.transfer(flowFile, REL_FAILURE);
        }
        keysToFlowFileMap.put(new ItemKeys(hashKeyValue, rangeKeyValue), flowFile);
    }

    if (keysToFlowFileMap.isEmpty()) {
        return;
    }

    final DynamoDB dynamoDB = getDynamoDB();

    try {
        BatchWriteItemOutcome outcome = dynamoDB.batchWriteItem(tableWriteItems);

        handleUnprocessedItems(session, keysToFlowFileMap, table, hashKeyName, hashKeyValueType, rangeKeyName,
                rangeKeyValueType, outcome);

        // Handle any remaining flowfiles
        for (FlowFile flowFile : keysToFlowFileMap.values()) {
            getLogger().debug("Successful posted items to dynamodb : " + table);
            session.transfer(flowFile, REL_SUCCESS);
        }
    } catch (AmazonServiceException exception) {
        getLogger().error("Could not process flowFiles due to service exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processServiceException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    } catch (AmazonClientException exception) {
        getLogger().error("Could not process flowFiles due to client exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processClientException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    } catch (Exception exception) {
        getLogger().error("Could not process flowFiles due to exception : " + exception.getMessage());
        List<FlowFile> failedFlowFiles = processException(session, flowFiles, exception);
        session.transfer(failedFlowFiles, REL_FAILURE);
    }
}

From source file:org.apache.s4.serializer.dynamodb.EventCountAndReportPE.java

License:Apache License

public void onEvent(TopicEvent event) {
    if (firstEvent) {
        logger.info("Handling new Event [{}]", getId());
        firstEvent = false;//from w w w .  ja va 2 s.  co  m
        firstInsert = true;
    }
    count += event.getCount();
    //        countUsedEvents++; // SB
    //        logger.info("Used Data Events counter [{}]", countUsedEvents); // SB

    if (false) { // BEGINNING OF THE BLOCK!!!!!!!!!!!

        if (firstInsert) {

            firstInsert = false;

            try {

                // Data fusion config file:
                try {
                    //              File fusionPropsFile = new File(System.getProperty("user.home") + "/DataFusion.properties");
                    File fusionPropsFile = new File("/home/ec2-user/DataFusion.properties");
                    if (!fusionPropsFile.exists()) {

                        fusionPropsFile = new File(System.getProperty("user.home") + "/DataFusion.properties");
                        if (!fusionPropsFile.exists()) {
                            logger.error(
                                    "Cannot find Data fusion properties file in this location :[{}]. Make sure it is available at this place and includes AWS credentials (accessKey, secretKey)",
                                    fusionPropsFile.getAbsolutePath());
                        }
                    }
                    fusionProperties.load(new FileInputStream(fusionPropsFile));
                    accuracy = Double.parseDouble(fusionProperties.getProperty("accuracy"));
                    confidence = Double.parseDouble(fusionProperties.getProperty("confidence"));

                } catch (Exception e) {
                    logger.error("Cannot find Data fusion config file", e);
                }

                // Create and configure DynamoDB client
                AWSCredentials credentials = new BasicAWSCredentials(awsProperties.getProperty("accessKey"),
                        awsProperties.getProperty("secretKey"));

                AmazonDynamoDBClient dynamoDBClient = new AmazonDynamoDBClient(credentials);
                logger.info("Create DynamoDB client");
                dynamoDBClient.setEndpoint("dynamodb.eu-west-1.amazonaws.com");
                logger.info("DynamoDB client credentials are accepted and endpoint selected");

                //                try {

                // Extracted context, e.g query, activity
                String searchQueryAPI = "Test KnowledgeDiscovery API Query";
                String object = "Object detected";

                Map<String, AttributeValue> itemRT = new HashMap<String, AttributeValue>();
                Map<String, AttributeValue> itemDQ = new HashMap<String, AttributeValue>();

                Iterable<String> dataSplit = Splitter.on(' ').omitEmptyStrings().trimResults().split(getId());
                // List<String> dataList = Lists.newArrayList(Elements.getElements(dataSplit));
                // String receivedMsgs = dataList.get(dataList.size()-1);
                // countReceivedMsgs = Integer.parseInt(receivedMsgs);;

                int i = 0;
                for (String token : dataSplit) {
                    i++;
                    receivedMsgs = token;
                }
                int k = 0;
                for (String token : dataSplit) {
                    k++;
                    if (k == (i - 2)) {
                        receivedAppID = token;
                    } else if (k == (i - 1)) {
                        receivedUserID = token;
                    }
                }

                appID = Double.parseDouble(receivedAppID);
                userID = Double.parseDouble(receivedUserID);

                // STUPID HARDCODE but fast for prototype, should change to class later :)
                if (appID == 0 && userID > 0) {
                    // CV app and serialization table
                    rtEventsTableName = "TableEventVector_CV";
                    tableDataQuality = "EventVectorQuality_CV";
                    db_orig = db_base_dir + "/cv.db";
                    countReceivedMsgs_CV = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev_CV;
                    countReceivedMsgsPrev_CV = Integer.parseInt(receivedMsgs);
                    countUsedMsgs_CV++;
                    countReceivedMsgs = countReceivedMsgs_CV;
                    countUsedMsgs = countUsedMsgs_CV;
                } else if (appID == 1 && userID > 0) {
                    // NLP
                    rtEventsTableName = "TableEventVector_NLP";
                    tableDataQuality = "EventVectorSetQuality_NLP";
                    db_orig = db_base_dir + "/nlp.db";
                    countReceivedMsgs_NLP = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev_NLP;
                    countReceivedMsgsPrev_NLP = Integer.parseInt(receivedMsgs);
                    countUsedMsgs_NLP++;
                    countReceivedMsgs = countReceivedMsgs_NLP;
                    countUsedMsgs = countUsedMsgs_NLP;
                } else if (appID == 2 && userID > 0) {
                    // Audio
                    rtEventsTableName = "TableEventVector_Audio";
                    tableDataQuality = "EventVectorQuality_Audio";
                    db_orig = db_base_dir + "/audio.db";
                    countReceivedMsgs_Audio = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev_Audio;
                    countReceivedMsgsPrev_Audio = Integer.parseInt(receivedMsgs);
                    countUsedMsgs_Audio++;
                    countReceivedMsgs = countReceivedMsgs_Audio;
                    countUsedMsgs = countUsedMsgs_Audio;
                } else {
                    // all others Events available in DB
                    rtEventsTableName = "TableEventVector";
                    tableDataQuality = "EventVectorQuality";
                    countReceivedMsgs = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev;
                    countReceivedMsgsPrev = Integer.parseInt(receivedMsgs);
                    countUsedMsgs++;
                }

                try {
                    // Users database connection
                    db_conn = DriverManager.getConnection("jdbc:sqlite:" + db_orig);

                    //Actual invocation of Users DB without "rating" field
                    db_stmt = db_conn.prepareStatement(
                            "SELECT id, title, country, name, surname FROM user WHERE appID = ? AND userID = ?");
                    db_stmt.setDouble(1, userID);
                    db_stmt.setDouble(2, appID);
                    rs = db_stmt.executeQuery();

                    // Index updates/inserts
                    String ID = rs.getString(1);
                    String location = rs.getString(2);
                    String country = rs.getString(3);
                    String name = rs.getString(4);
                    String surname = rs.getString(5);

                    // resultSet adjustment according to the Accuracy and Confidence levels (1 / number of results and multiplied by 100%)
                    accuracyRT = (1 / rs.getFetchSize()) * 100;
                    confidence = sqrt(accuracyRT * accuracyRT + accuracy * accuracy);

                    // Collect to DynamoDB items (CandidateSet and CandidateSetQuality)

                    itemRT.put("id", new AttributeValue().withS(placesID));
                    itemRT.put("country", new AttributeValue().withS(country));
                    itemRT.put("name", new AttributeValue().withS(String.valueOf(lat)));
                    itemRT.put("surname", new AttributeValue().withS(String.valueOf(lon)));
                    itemRT.put("query", new AttributeValue().withS(searchQueryAPI));
                    itemRT.put("rating", new AttributeValue().withN(String.valueOf(count)));
                    itemRT.put("title", new AttributeValue().withS(location));
                    itemRT.put("topic", new AttributeValue().withS(getId()));
                    itemRT.put("event", new AttributeValue().withS(activity));
                    itemRT.put("ts", new AttributeValue().withS(dateFormatter.format(new Date())));

                    itemDQ.put("TimeStamp", new AttributeValue().withS(dateFormatter.format(new Date())));
                    itemDQ.put("ReceivedMsgs", new AttributeValue().withN(String.valueOf(countReceivedMsgs)));
                    itemDQ.put("UsedMsgs", new AttributeValue().withN(String.valueOf(countUsedMsgs)));
                    itemDQ.put("Accuracy", new AttributeValue().withN(String.valueOf(count)));
                    itemDQ.put("Timeliness", new AttributeValue().withS(dateFormatter.format(new Date())));
                    itemDQ.put("Completeness", new AttributeValue().withN(String.valueOf(count)));
                    itemDQ.put("Consistency", new AttributeValue().withN(String.valueOf(count)));
                    itemDQ.put("Confidence", new AttributeValue().withN(String.valueOf(count)));
                    itemDQ.put("Privacy", new AttributeValue().withS("anonymised"));

                    PutItemRequest itemRequestRT = new PutItemRequest().withTableName(rtEventsTableName)
                            .withItem(itemRT);
                    PutItemRequest itemRequestDQ = new PutItemRequest().withTableName(tableDataQuality)
                            .withItem(itemDQ);
                    dynamoDBClient.putItem(itemRequestRT);
                    dynamoDBClient.putItem(itemRequestDQ);
                    itemRT.clear();
                    itemDQ.clear();

                    logger.info("TableEvent set size [{}], last known size [{}] ", countReceivedMsgs,
                            countReceivedMsgsPrev);
                    logger.info("Wrote EventVector to DynamoDB [{}] ", rtEventsTableName);
                    logger.info("Wrote EventVector Quality measurements to DynamoDB [{}] ", tableDataQuality);

                    // Closing second "try"
                } catch (Exception e) {
                    //                logger.error("Cannot close DB file", e);
                } finally {
                    try {
                        rs.close();
                    } catch (SQLException e) {
                        logger.error("Cannot close ResultSet", e);
                    }
                    try {
                        db_stmt.close();
                    } catch (SQLException e) {
                        logger.error("Cannot close Statement", e);
                    }
                    try {
                        db_conn.close();
                    } catch (SQLException e) {
                        logger.error("Cannot close DB file", e);
                    }
                }
                // Closing first "try"
            } catch (AmazonServiceException ase) {
                logger.error(
                        "Caught an AmazonServiceException, which means your request made it to AWS, but was rejected with an error response for some reason.");
                logger.error("Error Message: " + ase.getMessage());
                logger.error("HTTP Status Code: " + ase.getStatusCode());
                logger.error("AWS Error Code: " + ase.getErrorCode());
                logger.error("Error Type: " + ase.getErrorType());
                logger.error("Request ID: " + ase.getRequestId());

            }

        } // end of if (count == 1)

    } // END OF THE BLOCK !!!!!!!!!!!!!!!

}