Example usage for com.amazonaws.auth.profile ProfileCredentialsProvider ProfileCredentialsProvider

List of usage examples for com.amazonaws.auth.profile ProfileCredentialsProvider ProfileCredentialsProvider

Introduction

In this page you can find the example usage for com.amazonaws.auth.profile ProfileCredentialsProvider ProfileCredentialsProvider.

Prototype

public ProfileCredentialsProvider() 

Source Link

Document

Creates a new profile credentials provider that returns the AWS security credentials configured for the default profile.

Usage

From source file:com.climate.oada.dao.impl.S3ResourceDAO.java

License:Open Source License

/**
 * Upload file to S3./*from   ww w .  j av  a2 s. c  o m*/
 *
 * @param local
 *            - local file to upload.
 * @return boolean
 */
boolean uploadS3(File local) {
    boolean retval = false;
    AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider());
    try {
        LOG.debug("Uploading a new object to S3 from local, file name " + local.getName());
        s3client.putObject(new PutObjectRequest(bucketName, keyName, local));
        retval = true;
    } catch (AmazonServiceException ase) {
        logAWSServiceException(ase);
    } catch (AmazonClientException ace) {
        logAWSClientException(ace);
    }
    return retval;
}

From source file:com.climate.oada.dao.impl.S3ResourceDAO.java

License:Open Source License

@Override
public List<FileResource> getFileUrls(Long userId, String type) {
    List<FileResource> retval = new ArrayList<FileResource>();
    long validfor = new Long(validHours).longValue() * HOURS_TO_MILLISECONDS;
    try {//from w w w. jav  a  2 s  .  c  o  m
        AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider());
        String prefix = userId.toString() + S3_SEPARATOR + type;

        LOG.debug("Listing objects from bucket " + bucketName + " with prefix " + prefix);

        ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName)
                .withPrefix(prefix);
        ObjectListing objectListing;
        do {
            objectListing = s3client.listObjects(listObjectsRequest);
            for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
                LOG.debug(" - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");

                Date expiration = new Date();
                long milliSeconds = expiration.getTime();
                milliSeconds += validfor;
                expiration.setTime(milliSeconds);

                GeneratePresignedUrlRequest generatePresignedUrlRequest = new GeneratePresignedUrlRequest(
                        bucketName, objectSummary.getKey());
                generatePresignedUrlRequest.setMethod(HttpMethod.GET);
                generatePresignedUrlRequest.setExpiration(expiration);

                FileResource res = new FileResource();
                res.setFileURL(s3client.generatePresignedUrl(generatePresignedUrlRequest));
                retval.add(res);
            }
            listObjectsRequest.setMarker(objectListing.getNextMarker());
        } while (objectListing.isTruncated());
    } catch (AmazonServiceException ase) {
        logAWSServiceException(ase);
    } catch (AmazonClientException ace) {
        logAWSClientException(ace);
    } catch (Exception e) {
        LOG.error("Unable to retrieve S3 file URLs " + e.getMessage());
    }
    return retval;
}

From source file:com.cloudhub.aws.extractor.AWSCSVExtractor.java

License:Apache License

public AWSCSVExtractor(final String bucketName, final String dataFolder) {
    this.bucketName = bucketName;
    this.dataFolder = dataFolder;
    this.s3client = new AmazonS3Client(new ProfileCredentialsProvider());
}

From source file:com.dssmp.agent.AgentAWSCredentialsProviderChain.java

License:Apache License

public AgentAWSCredentialsProviderChain(AgentConfiguration config) {
    super(new AgentAWSCredentialsProvider(config), new EnvironmentVariableCredentialsProvider(),
            new SystemPropertiesCredentialsProvider(), new ProfileCredentialsProvider(),
            new InstanceProfileCredentialsProvider());
}

From source file:com.dxc.temp.SimpleQueueServiceSample.java

License:Open Source License

public static void main(String[] args) throws Exception {

    BasicConfigurator.configure();//from w  ww.  j  ava2 s  .c o  m

    /*
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (~/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }
    System.out.println(String.format("Found AWSAccessKeyId: %s", credentials.getAWSAccessKeyId()));
    System.out.println(String.format("Found AWSAccessSecretKey: %s", credentials.getAWSSecretKey()));

    AmazonSQS sqs = new AmazonSQSClient(credentials);
    Region usEast1 = Region.getRegion(Regions.US_EAST_1);
    sqs.setRegion(usEast1);

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon SQS");
    System.out.println("===========================================\n");

    try {
        // Create a queue
        System.out.println("Creating a new SQS queue called MyQueue.\n");
        CreateQueueRequest createQueueRequest = new CreateQueueRequest("MyQueue");
        String myQueueUrl = sqs.createQueue(createQueueRequest).getQueueUrl();

        // List queues
        System.out.println("Listing all queues in your account.\n");
        for (String queueUrl : sqs.listQueues().getQueueUrls()) {
            System.out.println("  QueueUrl: " + queueUrl);
        }
        System.out.println();

        // Send a message
        System.out.println("Sending a message to MyQueue.\n");
        sqs.sendMessage(new SendMessageRequest(myQueueUrl, "Message body text"));

        // Receive messages
        System.out.println("Receiving messages from MyQueue.\n");
        ReceiveMessageRequest receiveMessageRequest = new ReceiveMessageRequest(myQueueUrl);
        List<Message> messages = sqs.receiveMessage(receiveMessageRequest).getMessages();
        for (Message message : messages) {
            System.out.println("  Message");
            System.out.println("    MessageId:     " + message.getMessageId());
            System.out.println("    ReceiptHandle: " + message.getReceiptHandle());
            System.out.println("    MD5OfBody:     " + message.getMD5OfBody());
            System.out.println("    Body:          " + message.getBody());
            for (Entry<String, String> entry : message.getAttributes().entrySet()) {
                System.out.println("  Attribute");
                System.out.println("    Name:  " + entry.getKey());
                System.out.println("    Value: " + entry.getValue());
            }
        }
        System.out.println();

        // Delete a message
        //            System.out.println("Deleting a message.\n");
        //            String messageReceiptHandle = messages.get(0).getReceiptHandle();
        //            sqs.deleteMessage(new DeleteMessageRequest(myQueueUrl, messageReceiptHandle));

        // Delete a queue
        // You must wait 60 seconds after deleting a queue before you can create another with the same name
        //            System.out.println("Deleting the test queue.\n");
        //            sqs.deleteQueue(new DeleteQueueRequest(myQueueUrl));
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon SQS, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with SQS, such as not "
                + "being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.example.S3Sample02.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*//from w  w w.  ja v a  2  s  . c o  m
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (~/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);

    //        AP_SOUTHEAST_2

    //       Region usWest2 = Region.getRegion(Regions.AP_SOUTHEAST_2 );
    //       s3.setRegion(usWest2);

    //        String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();

    String bucketName = "imos-test-data-1";

    String key = "MyObjectKey" + UUID.randomUUID();

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        //            System.out.println("Creating bucket " + bucketName + "\n");
        //            s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        /*            System.out.println("Listing buckets");
                    for (Bucket bucket : s3.listBuckets()) {
        System.out.println(" - " + bucket.getName());
                    }
                    System.out.println();
        */

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        System.out.println("done\n");

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        System.out.println("done\n");

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();
        System.out.println("done\n");

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */

        //            System.out.println("Deleting an object\n");
        //            s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        /*
                    System.out.println("Deleting bucket " + bucketName + "\n");
                    s3.deleteBucket(bucketName);
        */

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.forerunnergames.peril.client.assets.S3AssetUpdater.java

License:Open Source License

S3AssetUpdater(final String bucketPath) {
    Arguments.checkIsNotNull(bucketPath, "bucketPath");
    Preconditions.checkIsTrue(AssetSettings.isValidS3BucketPath(bucketPath),
            AssetSettings.VALID_S3_BUCKET_PATH_DESCRIPTION);

    bucketName = AssetSettings.getS3BucketName(bucketPath);
    executorService = Executors.newSingleThreadExecutor();
    final ClientConfiguration clientConfig = new ClientConfiguration().withMaxErrorRetry(10)
            .withConnectionTimeout(10_000).withSocketTimeout(10_000).withTcpKeepAlive(true);
    final AmazonS3 s3 = new AmazonS3Client(new ProfileCredentialsProvider(), clientConfig);
    transferManager = new TransferManager(s3);
}

From source file:com.gemmystar.api.contents.S3UploadScheduledTask.java

License:Open Source License

public void uploadToS3(File uploadedFile) {

    try {/*from  w ww  .j a v a 2s .c o  m*/
        AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider());
        LOGGER.debug("Uploading a new object to S3 from a {}", uploadedFile.getName());
        String keyName = GemmyConstant.S3_KEY_PREFIX_VIDEO + uploadedFile.getName();
        s3client.putObject(new PutObjectRequest(s3BucketName, keyName, uploadedFile));

        contentsService.saveS3Key(getContentsId(uploadedFile.getName()), keyName);

        LOGGER.debug("upload success.");

        uploadedFile.delete();

    } catch (Exception e) {
        LOGGER.error(e.toString(), e);
    }
}

From source file:com.getcake.sparkjdbc.SparkJDBCServer.java

License:Apache License

public void start(String[] args) throws Exception {
    int sparkWebMinThreads, sparkWebMaxThreads, sparkWebIdleTimeoutMillis, sparkWebPort;

    try {//  w  ww  . ja v a2 s  .  c om
        if (args == null || args.length != 1) {
            log("=== SparkJDBCServer " + CODE_VERSION + " - usage: <spark config file>");
            System.exit(-1);
        }
        sparkConfigFileName = args[0];
        log("=== SparkJDBCServer " + CODE_VERSION + " - sparkConfigFileName: " + sparkConfigFileName);

        geoController = GeoController.getInstance();
        // properties = AwsUtil.loadProperties(sparkConfigFileName);       
        properties = geoController.initWithMsSqlDao(sparkConfigFileName);
        sparkMaster = properties.getProperty("spark.master");
        appName = properties.getProperty("spark.app.name");

        sparkConf = new SparkConf();
        if ("local".equalsIgnoreCase(sparkMaster)) {
            sparkConf.setMaster(sparkMaster);
            sparkConf.setAppName(appName);
            sparkConf.set("spark.executor.memory", properties.getProperty("spark.executor.memory"));
            sparkConf.set("spark.driver.memory", properties.getProperty("spark.driver.memory"));
        }

        log("sparkMaster: " + sparkMaster);
        log("spark.executor.memory: " + sparkConf.get("spark.executor.memory"));
        log("spark.driver.memory: " + sparkConf.get("spark.driver.memory"));

        javaSparkContext = new JavaSparkContext(sparkConf);

        if ("ProfileCredentialsProvider".equalsIgnoreCase(properties.getProperty("credentialsProvider"))) {
            log("credentialsProvider: ProfileCredentialsProvider");
            credentialsProvider = new ProfileCredentialsProvider();
        } else {
            log("credentialsProvider: InstanceProfileCredentialsProvider");
            credentialsProvider = new InstanceProfileCredentialsProvider();
        }
        hadoopConf = javaSparkContext.sc().hadoopConfiguration();

        hadoopConf.set("fs.s3.awsAccessKeyId", credentialsProvider.getCredentials().getAWSAccessKeyId());
        hadoopConf.set("fs.s3.awsSecretAccessKey", credentialsProvider.getCredentials().getAWSSecretKey());
        hadoopConf.set("fs.s3.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem");

        hadoopConf.set("fs.s3n.awsAccessKeyId", credentialsProvider.getCredentials().getAWSAccessKeyId());
        hadoopConf.set("fs.s3n.awsSecretAccessKey", credentialsProvider.getCredentials().getAWSSecretKey());
        hadoopConf.set("fs.s3n.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem");

        hadoopConf.set("fs.s3a.awsxxAccessKeyId", credentialsProvider.getCredentials().getAWSAccessKeyId());
        hadoopConf.set("fs.s3a.awsSecretAccessKey", credentialsProvider.getCredentials().getAWSSecretKey());
        hadoopConf.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem");
        hadoopConf.set("fs.s3a.connection.ssl.enabled", "false");
        hadoopConf.set("fs.s3a.connection.maximum", "false");

        hiveContext = new HiveContext(javaSparkContext.sc());

        // hiveContext.sparkContext().addSparkListener(listener);
        // javaSparkContext.add

        // DataFrame citiesDF = hiveContext.read().load("C:/Projects/GeoServices/geodata/cities.csv");
        // citiesDF.registerTempTable("cities");

        // hiveContext.sql("CREATE TABLE IF NOT EXISTS cities (locationId INT, country STRING, region String, city String, latitude float, longitude float, metroCode String )");
        // hiveContext.sql("LOAD DATA LOCAL INPATH 'C:/Projects/GeoServices/geodata/cities.csv' INTO TABLE cities");
        // hiveContext.sql("CREATE TABLE IF NOT EXISTS cities (country STRING)");
        // hiveContext.sql("LOAD DATA LOCAL INPATH 'C:/Projects/GeoServices/geodata/cities-sample.csv' INTO TABLE cities");
        // log ("HiveThriftServer2.startWithContext loaded table cities");
        // HiveThriftServer2.listener_$eq(arg0);

        topLevelDataPath = properties.getProperty("topLevelDataPath");
        region = properties.getProperty("region");
        if (region != null) {
            topLevelDataPath += "-" + region;
        }

        initGeoData(topLevelDataPath);

        HiveThriftServer2.startWithContext(hiveContext);
        log("=== Spark JDBC Server started");

        sparkWebMinThreads = Integer.parseInt(properties.getProperty("sparkWebMinThreads"));
        sparkWebMaxThreads = Integer.parseInt(properties.getProperty("sparkWebMaxThreads"));
        sparkWebIdleTimeoutMillis = Integer.parseInt(properties.getProperty("sparkWebIdleTimeoutMillis"));
        sparkWebPort = Integer.parseInt(properties.getProperty("sparkWebPort"));
        Spark.port(sparkWebPort);
        Spark.threadPool(sparkWebMaxThreads, sparkWebMinThreads, sparkWebIdleTimeoutMillis);

        get("/geoservices/status", (request, response) -> {
            return "Spark JDBC Server Working";
        });

        post("/sparksql/geosourcefilepath", (request, response) -> {
            geoSourceFilePath = request.queryParams("geosourcefilepath");
            return "geoSourceFilePath set to " + geoSourceFilePath;
        });

        get("/sparksql/geosourcefilepath", (request, response) -> {
            return geoSourceFilePath;
        });

        post("/sparksql/geodataversion", (request, response) -> {
            geoDataVersion = request.queryParams("geodataversion");
            return geoDataVersion;
        });

        get("/sparksql/geodataversion", (request, response) -> {
            return geoDataVersion;
        });

        post("/sparksql/geodata", (request, response) -> {
            try {
                geoDataVersion = request.queryParams("geodataversion");
                return loadGeoDataByVersion(geoDataVersion);
            } catch (Throwable exc) {
                response.status(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
                exc.printStackTrace();
                log("/sparksql/geodata", exc);
                return exc.getLocalizedMessage();
            }
        });

        delete("/sparksql/geodata", (request, response) -> {
            try {
                geoDataVersion = request.queryParams("geodataversion");
                return unloadGeoDataByVersion(geoDataVersion);
            } catch (Throwable exc) {
                response.status(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
                exc.printStackTrace();
                log("/sparksql/geodata", exc);
                return exc.getLocalizedMessage();
            }
        });

        get("/sparksql/geodata", (request, response) -> {
            return geoDataVersion;
        });

        post("/sparksql/geotable", (request, response) -> {
            String tableName = null, fullPathTableName = null, respMsg;

            try {
                tableName = request.queryParams("tablename");
                fullPathTableName = geoSourceFilePath + geoDataVersion + "/" + tableName + geoDataVersion
                        + ".csv";
                return loadTable(tableName + geoDataVersion, fullPathTableName);
            } catch (Throwable exc) {
                response.status(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
                respMsg = "error loading table: " + fullPathTableName + " - err:" + exc.getLocalizedMessage();
                exc.printStackTrace();
                log("/sparksql/loadtable", exc);
                return respMsg;
            }
        });

        // new 
        post("/sparksql/table", (request, response) -> {
            String tableName = null, fullPathTableName = null, respMsg, fileName, metaFileName, fileListName;
            Boolean tmpheaderInCSVFileFlag = headerInCSVFileFlag;

            try {
                tableName = request.queryParams("tablename");
                metaFileName = request.queryParams("metafilename");
                fileListName = request.queryParams("filelistname");

                fileName = request.queryParams("filename");
                /* headerInCSVFileStr = request.queryParams("headerincsvfile");
                if (headerInCSVFileStr != null) {
                   headerInCSVFileFlag = Boolean.parseBoolean(headerInCSVFileStr);
                } */
                fullPathTableName = geoSourceFilePath + "/" + fileName;
                metaFileName = geoSourceFilePath + "/" + metaFileName;
                return loadFilesWithMeta(tableName, fullPathTableName, metaFileName, fileListName);
            } catch (Throwable exc) {
                response.status(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
                respMsg = "error loading table: " + fullPathTableName + " - err:" + exc.getLocalizedMessage();
                exc.printStackTrace();
                log("/sparksql/loadtable", exc);
                return respMsg;
            } finally {
                // headerInCSVFileFlag = tmpheaderInCSVFileFlag;
            }
        });

        delete("/sparksql/table", (request, response) -> {
            String tableName = null, fullPathTableName = "N/A", respMsg, fileName;

            try {
                tableName = request.queryParams("tablename");
                return unloadTable(tableName, fullPathTableName);
            } catch (Throwable exc) {
                response.status(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
                respMsg = "error loading table: " + fullPathTableName + " - err:" + exc.getLocalizedMessage();
                exc.printStackTrace();
                log("/sparksql/loadtable", exc);
                return respMsg;
            }
        });

        post("/sparksql/mssqldata", (request, response) -> {
            StringBuilder respMsg;

            try {
                respMsg = new StringBuilder();
                geoDataVersion = request.queryParams("geodataversion");
                respMsg.append(geoController.exportMsSqlGeoData(geoDataVersion));
                respMsg.append(System.getProperty("line.separator"));
                return respMsg.append(loadGeoDataByVersion(geoDataVersion));
            } catch (Throwable exc) {
                response.status(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
                exc.printStackTrace();
                log("/sparksql/geodata", exc);
                return exc.getLocalizedMessage();
            }
        });

        post("/sparksql/mssqlversioncheck", (request, response) -> {
            StringBuilder respMsg;
            MsSqlExportCheckResp msSqlExportCheckResp;
            ObjectMapper jsonMapper;

            try {
                jsonMapper = new ObjectMapper();
                respMsg = new StringBuilder();
                msSqlExportCheckResp = geoController.exportMsSqlGeoData();
                if (msSqlExportCheckResp.newIpVersion == null
                        || msSqlExportCheckResp.newIpVersion.trim().length() == 0) {
                    return jsonMapper.writeValueAsString(msSqlExportCheckResp);
                }
                respMsg.append(msSqlExportCheckResp.detailMsg);
                respMsg.append(System.getProperty("line.separator"));
                respMsg.append(loadGeoDataByVersion(msSqlExportCheckResp.newIpVersion));
                msSqlExportCheckResp.detailMsg = respMsg.toString();
                return jsonMapper.writeValueAsString(msSqlExportCheckResp);
            } catch (Throwable exc) {
                response.status(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
                log("", exc);
                return exc.getLocalizedMessage();
            }
        });

        log("=== Spark JDBC Web Services started");
    } catch (Throwable exc) {
        log("main", exc);
    }
}

From source file:com.github.scizeron.logback.appender.SqsAppender.java

License:Apache License

/**
 * /*from  w w w  .j  ava2 s  .com*/
 * @return
 */
private AWSCredentialsProvider getCredentials() {
    return new AWSCredentialsProviderChain(new StaticCredentialsProvider(new AppenderCredentials()),
            new SystemPropertiesCredentialsProvider(), new EnvironmentVariableCredentialsProvider(),
            new ProfileCredentialsProvider(), new InstanceProfileCredentialsProvider());
}