Example usage for com.amazonaws AmazonServiceException getMessage

List of usage examples for com.amazonaws AmazonServiceException getMessage

Introduction

In this page you can find the example usage for com.amazonaws AmazonServiceException getMessage.

Prototype

@Override
    public String getMessage() 

Source Link

Usage

From source file:com.transcend.rds.worker.CreateDBInstanceReadReplicaActionWorker.java

/**
 * createDBInstanceReadReplica *******************************************
 * Creates a read replica for a named master database instance All of the
 * characteristics of the read replica default to the characteristics of the
 * the master instance with the exception of InstanceClass,
 * AvailabilityZone,Port and AutoMinorVersionUpgrade that can be provided by
 * the user. Request: SourceDBInstanceIdentifier (R) DBInstanceIdentifier
 * for the read replica (R) DBInstanceClass AvailabilityZone Port
 * AutoMinorVersionUpgrade Response: Full details of new DBInstance created
 * Exceptions: DBIInstanceAlreadyExists DBInstanceNotFound
 * DBParameterGroupNotFound DBSecurityGroupNotFound InstanceQuotaExceeded
 * InsufficientDBInstanceCapacity InvalidDBInstanceState
 * StorageQuotaExceeded Processing 1. Confirm that source DBInstance exists
 * 2. Determined that requested DBInstance replica doesn't already exist for
 * that user 3. Confirm quotas have not been exceeded (instance,
 * availabilityZone, storage) 4. Validate and insert the DBInstance and
 * associated records 5. Call the instance manager to provision the read
 * replica 6. Return response giving details of newly created replica
 * instance including end point.//from ww  w .  jav  a 2s . c o  m
 */
@Override
protected CreateDBInstanceReadReplicaActionResultMessage doWork0(
        CreateDBInstanceReadReplicaActionRequestMessage req, ServiceRequestContext context) throws Exception {
    final Session sess = HibernateUtil.newSession();
    DBInstance dbInst = null;

    try {
        sess.beginTransaction();
        final AccountBean ac = context.getAccountBean();
        final long userId = ac.getId();
        final boolean autoUpgrade = req.getAutoMinorVersionUpgrade();
        String avZone = req.getAvailabilityZone();
        String DBInstanceClass = req.getDbInstanceClass();
        final String DBInstanceId = req.getDbInstanceIdentifier();
        int port = req.getPort();
        final String sourceDBInstanceId = req.getSourceDBInstanceIdentifier();

        if (sourceDBInstanceId == null || "".equals(sourceDBInstanceId)) {
            throw QueryFaults.MissingParameter(
                    "SourceDBInstanceIdentifier must be supplied for CreateDBInstanceReadReplica request.");
        }
        if (DBInstanceId == null || "".equals(DBInstanceId)) {
            throw QueryFaults.MissingParameter(
                    "DBInstanceIdentifier must be supplied for CreateDBInstanceReadReplica request.");
        }

        final RdsDbinstance source = InstanceEntity.selectDBInstance(sess, sourceDBInstanceId, userId);
        if (source == null || "".equals(source)) {
            throw RDSQueryFaults.DBInstanceNotFound(sourceDBInstanceId + " does not exist.");
        }
        if (!source.getDbinstanceStatus().equals(RDSUtilities.STATUS_AVAILABLE)) {
            throw RDSQueryFaults.InvalidDBInstanceState();
        }
        if (port == -1) {
            logger.debug("request did not include port; port value is set with " + source.getPort()
                    + " from the source DBInstance.");
            port = source.getPort();
        }
        if (DBInstanceClass == null || "".equals(DBInstanceClass)) {
            logger.debug("request did not include DBInstanceClass; DBInstanceClass value is set with "
                    + source.getDbinstanceClass() + " from the source DBInstance.");
            DBInstanceClass = source.getDbinstanceClass();
        }
        if (avZone == null || "".equals(avZone)) {
            logger.debug(
                    "AvailabilityZone is not included in the request; it is set to the default zone of the account: "
                            + ac.getDefZone());
            avZone = ac.getDefZone();
        }

        logger.debug("Preparing the request for CreateDBInstance");
        final CreateDBInstanceActionRequestMessage.Builder createDBInstanceReq = CreateDBInstanceActionRequestMessage
                .newBuilder();
        createDBInstanceReq.setAllocatedStorage(source.getAllocatedStorage());
        createDBInstanceReq.setAutoMinorVersionUpgrade(autoUpgrade);
        createDBInstanceReq.setAvailabilityZone(avZone);
        createDBInstanceReq.setBackupRetentionPeriod(source.getBackupRetentionPeriod());
        createDBInstanceReq.setDbInstanceClass(DBInstanceClass);
        createDBInstanceReq.setDbInstanceIdentifier(DBInstanceId);
        createDBInstanceReq.setDbParameterGroupName(source.getDbParameterGroup());
        final List<RdsDbsecurityGroup> dbSecGrps = source.getSecurityGroups();
        final LinkedList<String> dbSecGrpNames = new LinkedList<String>();
        for (final RdsDbsecurityGroup secGrp : dbSecGrps) {
            dbSecGrpNames.add(secGrp.getDbsecurityGroupName());
        }
        createDBInstanceReq.addAllDbSecurityGroups(dbSecGrpNames);
        createDBInstanceReq.setEngine(source.getEngine());
        createDBInstanceReq.setEngineVersion(source.getEngineVersion());
        createDBInstanceReq.setLicenseModel(source.getLicenseModel());
        createDBInstanceReq.setMasterUsername(source.getMasterUsername());
        createDBInstanceReq.setMasterUserPassword(source.getMasterUserPassword());
        createDBInstanceReq.setMultiAZ(false);
        createDBInstanceReq.setPort(port);
        createDBInstanceReq.setPreferredBackupWindow(source.getPreferredBackupWindow());
        createDBInstanceReq.setPreferredMaintenanceWindow(source.getPreferredMaintenanceWindow());
        logger.debug("Request: " + createDBInstanceReq.toString());

        logger.debug("Calling CreateDBInstance...");
        final CreateDBInstanceActionWorker createAction = new CreateDBInstanceActionWorker();
        dbInst = createAction.createDBInstance(createDBInstanceReq.buildPartial(), context, true);

        logger.debug("Adding another authorization to the underlying ec2 security group");
        final String internal = "rds-" + ac.getId() + "-" + source.getDbinstanceId() + "-" + source.getPort();
        final List<RdsDbsecurityGroup> secGrps = SecurityGroupEntity.selectAllSecurityGroups(sess, internal,
                ac.getId(), null, 0);
        if (secGrps.size() != 1) {
            throw RDSQueryFaults.InternalFailure();
        }

        final String rds_host = Appctx.getBean("internalServiceIp");

        final String RdsServerCidrip = rds_host + "/32";
        final RdsDbsecurityGroup masterSecGrp = secGrps.get(0);
        final List<RdsIPRangeBean> ips = masterSecGrp.getIPRange(sess);
        boolean authorized = false;
        for (final RdsIPRangeBean ip : ips) {
            if (ip.getCidrip().equals(RdsServerCidrip)) {
                authorized = true;
                logger.debug("Authorization already exists for " + RdsServerCidrip);
            }
        }
        final int port0 = source.getPort();
        if (!authorized) {
            logger.debug("Authorizing ingress for " + RdsServerCidrip + " to access the source DBInstance.");
            final CallStruct callEc2SecGrp = new CallStruct();
            callEc2SecGrp.setAc(AccountUtil.toAccount(ac));
            callEc2SecGrp.setCtx(new TemplateContext(null));
            callEc2SecGrp.setName(internal);
            callEc2SecGrp.setStackId("rds." + ac.getId() + "." + sourceDBInstanceId);
            final Map<String, Object> props = new HashMap<String, Object>();
            props.put(Constants.AVAILABILITYZONE, ac.getDefZone());
            props.put(Constants.GROUPNAME, internal);
            props.put(Constants.CIDRIP, RdsServerCidrip);
            props.put(Constants.SOURCESECURITYGROUPNAME, null);
            // SourceSecurityGroupOwnerId is not required
            props.put(Constants.SOURCESECURITYGROUPOWNERID, null);

            // hardcoded values below
            props.put(Constants.FROMPORT, port0);
            props.put(Constants.TOPORT, port0);
            props.put(Constants.IPPROTOCOL, Constants.DEFAULT_RDS_PROTOCOL);

            callEc2SecGrp.setProperties(props);
            callEc2SecGrp.setType(SecurityGroupIngress.TYPE);
            final SecurityGroupIngress provider0 = new SecurityGroupIngress();
            try {
                provider0.create(callEc2SecGrp);
            } catch (final AmazonServiceException e) {
                logger.debug(e.getMessage());
            } catch (final AmazonClientException e) {
                logger.debug(e.getMessage());
            }

            final RdsIPRangeBean newAuth = new RdsIPRangeBean(masterSecGrp.getId(), RdsServerCidrip);
            ips.add(newAuth);
            sess.save(newAuth);
        }

        // modify the source DBInstance's status and commit the transaction
        source.setDbinstanceStatus(RDSUtilities.STATUS_MODIFYING);
        source.getReplicas().add(DBInstanceId);
        sess.save(source);
        sess.getTransaction().commit();

        final Connection master = getConnection("root", source.getMasterUserPassword(),
                source.getEngine().toLowerCase(), source.getAddress(), source.getPort());
        logger.debug("Checking to see if the source DBInstance has RDS replication user already...");
        final String checkPermission = "SELECT User from mysql.user";
        final Statement check = master.createStatement();
        final ResultSet existingGrant = check.executeQuery(checkPermission);
        boolean exist = false;
        while (existingGrant.next()) {
            final String user = existingGrant.getString("User");
            logger.debug("User: " + user);
            if (user.equals(RDS_Constants.RDS_REPLICATION_USER)) {
                exist = true;
            }
        }

        // create a new user and grant replication privilege
        if (!exist) {
            logger.debug("Replicaion user for RDS does not exist; creating a replication user...");
            final String grantPermission = "GRANT REPLICATION SLAVE ON *.* TO \'"
                    + RDS_Constants.RDS_REPLICATION_USER + "\'@\'%\' IDENTIFIED BY \'"
                    + RDS_Constants.RDS_REPLICATION_PASSWORD + "\'";
            final PreparedStatement grant = master.prepareStatement(grantPermission);
            grant.execute();
        }

        logger.debug("Flushing tables with read lock on the source DBInstance...");
        final String flushTables = "FLUSH TABLES WITH READ LOCK";
        final Statement flushTablesAndLock = master.createStatement();
        flushTablesAndLock.execute(flushTables);

        logger.debug("Getting the master status");
        final String getMasterStatus = "SHOW MASTER STATUS";
        final Statement queryMasterStatus = master.createStatement();
        final ResultSet masterStatus = queryMasterStatus.executeQuery(getMasterStatus);
        String masterFile = null;
        int position = -1;
        while (masterStatus.next()) {
            masterFile = masterStatus.getString("File");
            position = masterStatus.getInt("Position");
            // ignore Binlog_Do_DB and Binlog_Ignore_DB for now
        }
        logger.debug("Master file is " + masterFile + " and the position is set at " + position);
        if (masterFile == null || position == -1) {
            RDSQueryFaults.InternalFailure("Master status could not be retrieved from the source DBInstance.");
        }

        logger.debug("Unlocking the tables...");
        final String unlockTables = "UNLOCK TABLES";
        final Statement unlock = master.createStatement();
        unlock.execute(unlockTables);
        logger.debug("Successfully unlocked the tables.");

        logger.debug("Close the connection to the source DBInstance.");
        master.close();

        logger.debug("Updating the databag to run the replication_server.rb recipe");
        final String task = "mysqldump";
        final String target = "?";
        final String databagName = "rds-" + ac.getId() + "-" + source.getDbinstanceId();
        final String replication_item = "{\"Task\":\"" + task + "\", " + "\"TargetHostname\":\"" + target
                + "\"}";
        ChefUtil.createDatabagItem(databagName, "Replication");
        ChefUtil.putDatabagItem(databagName, "Replication", replication_item);

        logger.debug(
                "Starting a new thread to wait for read replica to spin up while returning the response message.");
        final String DBInstanceId0 = DBInstanceId;
        final String sourceDBInstanceId0 = sourceDBInstanceId;
        final String avZone0 = avZone;
        final String masterFile0 = masterFile;
        final int position0 = position;
        final int port1 = port;
        final Executable r = new ExecutorHelper.Executable() {
            @Override
            public void run() {
                HibernateUtil.withNewSession(new HibernateUtil.Operation<Object>() {
                    @Override
                    public Object ex(final Session s, final Object... as) throws Exception {
                        replicationHelper(s, ac, DBInstanceId0, sourceDBInstanceId0, avZone0, port0, port1,
                                masterFile0, position0);
                        return null;
                    }
                });
            }
        };
        ExecutorHelper.execute(r);

    } catch (final ErrorResponse rde) {
        sess.getTransaction().rollback();
        throw rde;
    } catch (final Exception e) {
        e.printStackTrace();
        sess.getTransaction().rollback();
        final String msg = "CreateDBInstanceReadReplica: Class: " + e.getClass() + "Msg:" + e.getMessage();
        logger.error(msg);
        throw RDSQueryFaults.InternalFailure();
    } finally {
        sess.close();
    }
    CreateDBInstanceReadReplicaActionResultMessage.Builder resp = CreateDBInstanceReadReplicaActionResultMessage
            .newBuilder();
    resp.setDbInstance(dbInst);
    return resp.buildPartial();
}

From source file:com.transcend.rds.worker.CreateDBInstanceReadReplicaActionWorker.java

private void replicationHelper(Session sess, final AccountBean ac, final String dbInstanceId,
        final String sourceDBInstanceId, final String avZone, final int masterPort, final int replicaPort,
        final String masterFile, final int position) throws Exception {
    logger.debug("Locking before accessing the critical database table...");
    String address = null;//from w ww.  j  a v  a 2 s .c o  m
    boolean wait = true;
    int count = 0;
    while (wait) {
        logger.debug("Waiting for Resource(s) to be created... " + count + "th trial!");
        Thread.sleep(30000);
        logger.debug("Renewing the session...");
        sess = HibernateUtil.newSession();
        sess.beginTransaction();
        final RdsDbinstance inst = InstanceEntity.selectDBInstance(sess, dbInstanceId, ac.getId());
        String stat = null;
        if (inst != null) {
            stat = inst.getDbinstanceStatus();
        }
        logger.debug("DBInstance existence: " + inst + "; DBInstanceStatus: " + stat);
        if (inst != null && stat.equals("restoring")) {
            inst.setDbinstanceStatus(RDSUtilities.STATUS_MODIFYING);
            inst.setRead_only(true);
            inst.setSourceDbinstanceId(sourceDBInstanceId);
            address = inst.getAddress();
            sess.save(inst);
            sess.getTransaction().commit();
            sess.close();
            wait = false;
            break;
        }
        sess.close();
        ++count;
    }
    logger.debug("Unlocking after accessing the critical database table...");

    sess = HibernateUtil.newSession();
    sess.beginTransaction();
    if (address == null) {
        throw RDSQueryFaults.InternalFailure();
    }

    // modify the ec2 security group of master DBInstance to grant access to
    // CIDRIP of slave DBInstance's IP/32
    logger.debug("Adding another authorization to the underlying ec2 security group");
    final String internal = "rds-" + ac.getId() + "-" + sourceDBInstanceId + "-" + masterPort;
    List<RdsDbsecurityGroup> secGrps = SecurityGroupEntity.selectAllSecurityGroups(sess, internal, ac.getId(),
            null, 0);
    if (secGrps.size() != 1) {
        throw RDSQueryFaults.InternalFailure();
    }
    final RdsDbsecurityGroup masterSecGrp = secGrps.get(0);
    List<RdsIPRangeBean> ips = masterSecGrp.getIPRange(sess);
    final String slaveCidrip = address + "/32";
    final boolean authorized = false;
    if (!authorized) {
        logger.debug("Authorizing ingress for " + slaveCidrip + " to access the source DBInstance.");
        final CallStruct callEc2SecGrp = new CallStruct();
        callEc2SecGrp.setAc(AccountUtil.toAccount(ac));
        callEc2SecGrp.setCtx(new TemplateContext(null));
        callEc2SecGrp.setName(internal);
        callEc2SecGrp.setStackId("rds." + ac.getId() + "." + sourceDBInstanceId);
        final Map<String, Object> props = new HashMap<String, Object>();
        props.put(Constants.AVAILABILITYZONE, ac.getDefZone());
        props.put(Constants.GROUPNAME, internal);
        props.put(Constants.CIDRIP, slaveCidrip);
        props.put(Constants.SOURCESECURITYGROUPNAME, null);
        // SourceSecurityGroupOwnerId is not required
        props.put(Constants.SOURCESECURITYGROUPOWNERID, null);
        // hardcoded values below
        props.put(Constants.FROMPORT, masterPort);
        props.put(Constants.TOPORT, masterPort);
        props.put(Constants.IPPROTOCOL, Constants.DEFAULT_RDS_PROTOCOL);

        callEc2SecGrp.setProperties(props);
        callEc2SecGrp.setType(SecurityGroupIngress.TYPE);
        final SecurityGroupIngress provider0 = new SecurityGroupIngress();
        try {
            provider0.create(callEc2SecGrp);
        } catch (final AmazonServiceException e) {
            logger.debug(e.getMessage());
        } catch (final AmazonClientException e) {
            logger.debug(e.getMessage());
        }

        final RdsIPRangeBean newAuth = new RdsIPRangeBean(masterSecGrp.getId(), slaveCidrip);
        ips.add(newAuth);
        sess.save(newAuth);
    }

    final String replicaInternal = "rds-" + ac.getId() + "-" + dbInstanceId + "-" + replicaPort;
    secGrps = SecurityGroupEntity.selectAllSecurityGroups(sess, replicaInternal, ac.getId(), null, 0);
    if (secGrps.size() != 1) {
        throw RDSQueryFaults.InternalFailure();
    }
    final RdsDbsecurityGroup slaveSecGrp = secGrps.get(0);
    final String rds_host = Appctx.getBean("internalServiceIp");
    final String RdsServerCidrip = rds_host + "/32";
    logger.debug("Authorizing ingress for " + RdsServerCidrip + " to access the read replica DBInstance.");
    final CallStruct callEc2SecGrp = new CallStruct();
    callEc2SecGrp.setAc(AccountUtil.toAccount(ac));
    callEc2SecGrp.setCtx(new TemplateContext(null));
    callEc2SecGrp.setName(replicaInternal);
    callEc2SecGrp.setStackId("rds." + ac.getId() + "." + dbInstanceId);
    final Map<String, Object> props = new HashMap<String, Object>();
    props.put(Constants.AVAILABILITYZONE, ac.getDefZone());
    props.put(Constants.GROUPNAME, replicaInternal);
    props.put(Constants.CIDRIP, RdsServerCidrip);
    props.put(Constants.SOURCESECURITYGROUPNAME, null);
    // SourceSecurityGroupOwnerId is not required
    props.put(Constants.SOURCESECURITYGROUPOWNERID, null);

    // hardcoded values below
    props.put(Constants.FROMPORT, replicaPort);
    props.put(Constants.TOPORT, replicaPort);
    props.put(Constants.IPPROTOCOL, Constants.DEFAULT_RDS_PROTOCOL);

    callEc2SecGrp.setProperties(props);
    callEc2SecGrp.setType(SecurityGroupIngress.TYPE);
    final SecurityGroupIngress provider0 = new SecurityGroupIngress();
    try {
        provider0.create(callEc2SecGrp);
    } catch (final AmazonServiceException e) {
        logger.debug(e.getMessage());
    } catch (final AmazonClientException e) {
        logger.debug(e.getMessage());
    }

    final RdsIPRangeBean newAuth = new RdsIPRangeBean(slaveSecGrp.getId(), RdsServerCidrip);
    ips = slaveSecGrp.getIPRange(sess);
    ips.add(newAuth);
    sess.save(newAuth);

    logger.debug("Copy the mysqldump file from the source DBInstance to the DBInstanceReadReplica");
    String key_dir = (String) ConfigurationUtil.getConfiguration(Arrays.asList(new String[] { "KEYS_DIR" }));
    final String key = ac.getDefKeyName() + ".pem";
    if (key_dir.charAt(key_dir.length() - 1) != '/') {
        key_dir += '/';
    }
    final String key_path = key_dir + key;
    logger.debug("Account's ec2 key is at: " + key_path);

    final RdsDbinstance source = InstanceEntity.selectDBInstance(sess, sourceDBInstanceId, ac.getId());
    final String masterHostname = source.getAddress();
    final String slaveHostname = address;

    logger.debug("check if mysqldump is completed or not before trying to scp the dump;"
            + " databagitem Replication should be modified to have Task = mysqldump_complete");
    String databagName = "rds-" + ac.getId() + "-" + source.getDbinstanceId();
    String databagItem = ChefUtil.getDatabagItem(databagName, "Replication");
    JsonNode replicationItem = JsonUtil.load(databagItem);
    JsonNode task = replicationItem.get("Task");
    String taskValue = task.getTextValue();

    boolean dumpReady = false;
    while (!dumpReady) {
        databagItem = ChefUtil.getDatabagItem(databagName, "Replication");
        replicationItem = JsonUtil.load(databagItem);
        task = replicationItem.get("Task");
        taskValue = task.getTextValue();
        if (taskValue.equals("mysqldump_complete")) {
            dumpReady = true;
        }
    }
    logger.debug("Replication databag item: " + replicationItem.toString() + " and Task = " + taskValue);

    // scp the key file into source DBInstance
    final String scpCommand = "scp -o StrictHostKeyChecking=no -i " + key_path + " " + key_path + " root@"
            + masterHostname + ":/root";
    logger.debug("SCP command is: " + scpCommand);
    Runtime.getRuntime().exec(scpCommand);
    logger.debug("Scp'ed the client key to the source DBInstance.");

    // set the flag for scp and wait till this process is completed by chef
    String replication_item = "{\"Task\":\"" + "scp" + "\", " + "\"TargetHostname\":\"" + slaveHostname + "\", "
            + "\"Key\":\"" + key + "\"" + "}";
    ChefUtil.createDatabagItem(databagName, "Replication", replication_item);

    boolean scpDone = false;
    while (!scpDone) {
        databagItem = ChefUtil.getDatabagItem(databagName, "Replication");
        replicationItem = JsonUtil.load(databagItem);
        task = replicationItem.get("Task");
        taskValue = task.getTextValue();
        if (taskValue.equals("scp_complete")) {
            scpDone = true;
        }
    }
    logger.debug("Replication databag item: " + replicationItem.toString() + " and Task = " + taskValue);
    ChefUtil.deleteDatabagItem(databagName, "Replication");

    // apply the mysqldump to the read replica DBInstance, then change the
    // master; restart mysql server on read replica
    databagName = "rds-" + ac.getId() + "-" + dbInstanceId;
    replication_item = "{\"Task\":\"slave\"}";
    ChefUtil.createDatabagItem(databagName, "Replication", replication_item);

    boolean restored = false;
    while (!restored) {
        databagItem = ChefUtil.getDatabagItem(databagName, "Replication");
        replicationItem = JsonUtil.load(databagItem);
        task = replicationItem.get("Task");
        taskValue = task.getTextValue();
        if (taskValue.equals("slave_complete")) {
            restored = true;
        }
    }
    logger.debug("Replication databag item: " + replicationItem.toString() + " and Task = " + taskValue);

    final Connection slave = getConnection("root", source.getMasterUserPassword(),
            source.getEngine().toLowerCase(), slaveHostname, replicaPort);
    final Statement changeMaster = slave.createStatement();
    final String change = "CHANGE MASTER TO MASTER_HOST=\'" + masterHostname + "\', MASTER_USER=\'"
            + RDS_Constants.RDS_REPLICATION_USER + "\', MASTER_PASSWORD=\'"
            + RDS_Constants.RDS_REPLICATION_PASSWORD + "\', MASTER_LOG_FILE=\'" + masterFile
            + "\', MASTER_LOG_POS=" + position;
    changeMaster.execute(change);
    logger.debug("Modified the master of the read replica to the source DBInstance.");

    final Statement startSlave = slave.createStatement();
    startSlave.execute("START SLAVE");
    final Statement flushTableRL = slave.createStatement();
    flushTableRL.execute("FLUSH TABLES WITH READ LOCK");
    final Statement setReadOnly = slave.createStatement();
    setReadOnly.execute("SET GLOBAL read_only = ON");
    logger.debug("Slave service started and configured for read_only.");

    logger.debug("Closing the connection to the replica DBInstance.");
    slave.close();

    ChefUtil.deleteDatabagItem(databagName, "Replication");

    logger.debug("Modifying the DBInstanceStatus for both DBInstances");
    final RdsDbinstance inst = InstanceEntity.selectDBInstance(sess, sourceDBInstanceId, ac.getId());
    inst.setDbinstanceStatus(RDSUtilities.STATUS_AVAILABLE);
    final RdsDbinstance inst2 = InstanceEntity.selectDBInstance(sess, dbInstanceId, ac.getId());
    inst2.setDbinstanceStatus(RDSUtilities.STATUS_AVAILABLE);
    sess.save(inst);
    sess.save(inst2);

    sess.getTransaction().commit();
    sess.close();
    logger.debug("CreateDBInstanceReadReplica completed successfully.");

}

From source file:com.tweettrends.pravar.FilterStreamExample.java

License:Apache License

public static void main(String[] args) {
    final SimpleQueueService simpleQueueService = new SimpleQueueService();
    final List<Future<String>> results = new ArrayList<Future<String>>();
    final HashMap<Future<String>, Message> map = new HashMap<Future<String>, Message>(100);
    SimpleNotificationService simpleNotificationService = new SimpleNotificationService();
    simpleNotificationService.subscribeToTopic();

    try {//  ww w. ja v a 2s .  c  o m
        //simpleQueueService.createQueue();
        simpleQueueService.listQueues();
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon SQS, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with SQS, such as not "
                + "being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }

    new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                FilterStreamExample.run("9sB7Y7zyxFTgEpk87ZwuZMFZR",
                        "TPvVJJ09FhQeduDR10xJw8t5LJ4i75uu6GYQefVtHt7ebUTgZi",
                        "840399362987560960-MTKPBj2U67boTVP4ug6LWiUdvksF0gO",
                        "adanfdOhMgPmil1TsWpD1vKvfdY6ErRVX2xCqPS6NgaEF", simpleQueueService);
            } catch (InterruptedException e) {
                System.out.println(e);
            }
        }
    }).start();

    ExecutorService pool = Executors.newCachedThreadPool();

    Thread thread = new Thread(new Runnable() {
        @Override
        public void run() {
            System.out.println("Entered runnable");
            while (true) {
                Iterator<Future<String>> iterator = results.iterator();

                try {
                    sharedSemaphore.acquire();
                    while (iterator.hasNext()) {
                        Future<String> result = iterator.next();
                        String sentiment = result.get();
                        Message message = map.get(result);
                        // send sentiment and message to SNS
                        System.out.println("Notifying SNS");
                        //simpleNotificationService.publishToTopic(message.getBody());
                        iterator.remove();
                    }
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } catch (ExecutionException e) {
                    e.printStackTrace();
                } finally {
                    sharedSemaphore.release();
                }

            }
        }
    });
    thread.start();

    while (true) {
        List<Message> messages = simpleQueueService.receiveMessages();
        if (messages != null) {
            System.out.println("Received Messages!");

            for (Message msg : messages) {
                System.out.println("Message body : " + msg.getBody());
            }
            simpleQueueService.deleteMessages(messages);

            for (Message message : messages) {
                Worker worker = new Worker(message);
                Future<String> result = pool.submit(worker);

                try {
                    sharedSemaphore.acquire();
                    results.add(result);
                    map.put(result, message);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } finally {
                    sharedSemaphore.release();
                }

            }
        }
    }

}

From source file:com.ub.ml.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*//from   ww w  .j  av  a  2s.  c  om
     * Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users) 
     * and save the following lines after replacing the underlined values with your own.
     *
     * [default]
     * aws_access_key_id = YOUR_ACCESS_KEY_ID
     * aws_secret_access_key = YOUR_SECRET_ACCESS_KEY
     */

    AmazonS3 s3 = new AmazonS3Client();
    Region usWest2 = Region.getRegion(Regions.US_EAST_1);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        //System.out.println("Deleting an object\n");
        //s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        //System.out.println("Deleting bucket " + bucketName + "\n");
        //s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.uiintl.backup.agent.AwsBackupAgent.java

License:Open Source License

void handleAwsException(AmazonClientException ace) {

    if (ace instanceof AmazonServiceException) {
        AmazonServiceException ase = (AmazonServiceException) ace;

        logger.error(// w  w  w .  ja  v  a 2s  .c o m
                "Caught an AmazonServiceException, which means your request made it to Amazon S3, but was rejected with an error response for some reason.");
        logger.error("Error Message: {}", ase.getMessage());
        logger.error("HTTP Status Code: {}", ase.getStatusCode());
        logger.error("AWS Error Code: {}", ase.getErrorCode());
        logger.error("Error Type: {}", ase.getErrorType());
        logger.error("Request ID: {}", ase.getRequestId());
        logger.error("Stacktrace: ", ase);
    } else {
        logger.error(
                "Caught an AmazonClientException, which means the client encountered a serious internal problem while trying to communicate with S3, such as not being able to access the network.");
        logger.error("Error Message: {}", ace.getMessage());
    }
}

From source file:com.uiintl.backup.agent.samples.S3Sample.java

License:Open Source License

public static void main2(String[] args) throws IOException {
    /*/*from  w  w  w  .j  a va  2s .  c  o m*/
     * This credentials provider implementation loads your AWS credentials
     * from a properties file at the root of your classpath.
     *
     * Important: Be sure to fill in your AWS access credentials in the
     *            AwsCredentials.properties file before you try to run this
     *            sample.
     * http://aws.amazon.com/security-credentials
     */
    AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider());
    Region usWest2 = Region.getRegion(Regions.AP_SOUTHEAST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.vb.services.database.rds.MySQLDBUtils.java

private void updateMySQLDBInstanceSpecificationsConfig() {

    MySQLDBInstanceSpecificationsConfig mysqlDBInstanceSpecificationsConfig = createDefaultDBInstanceSpecificationsConfig();
    this.mysqlDB.setDbInstanceSpecificationsConfig(mysqlDBInstanceSpecificationsConfig);

    try {/*from   www  . j  ava  2 s .c  o m*/
        this.createDBInstanceRequest.setEngine(this.mysqlDB.getDbInstanceSpecificationsConfig().getDbEngine());
        this.createDBInstanceRequest
                .setLicenseModel(this.mysqlDB.getDbInstanceSpecificationsConfig().getLicenceModel());
        this.createDBInstanceRequest
                .setEngineVersion(this.mysqlDB.getDbInstanceSpecificationsConfig().getDbVersion());
        this.createDBInstanceRequest
                .setDBInstanceClass(this.mysqlDB.getDbInstanceSpecificationsConfig().getDbInstanceClass());
        this.createDBInstanceRequest
                .setMultiAZ(this.mysqlDB.getDbInstanceSpecificationsConfig().getMultiAZDeployment());
        this.createDBInstanceRequest
                .setStorageType(this.mysqlDB.getDbInstanceSpecificationsConfig().getStorageType());
        this.createDBInstanceRequest
                .setAllocatedStorage(this.mysqlDB.getDbInstanceSpecificationsConfig().getAllocatedStorage());
    } catch (AmazonServiceException ase) {
        System.out.println("ERROR : error during MySQLDBInstanceSpecificationsConfig setup.");
        System.out.println("Caught an AmazonServiceException");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
        ase.printStackTrace();
    } catch (AmazonClientException ace) {
        System.out.println("ERROR : error during MySQLDBInstanceSpecificationsConfig setup.");
        System.out.println("Caught an AmazonClientException");
        System.out.println("Error Message: " + ace.getMessage());
        ace.printStackTrace();
    }
}

From source file:com.vb.services.database.rds.MySQLDBUtils.java

private void updateMySQLDBSettingsConfig(String dbInstanceIdentifier, String masterUserName,
        String masterPassword) {/*from  w w w.j  a  v  a 2  s.  c o  m*/
    MySQLDBSettingsConfig mysqlDBSettingsConfig = createDBSettingsConfig(dbInstanceIdentifier, masterUserName,
            masterPassword);
    this.mysqlDB.setDbSettingsConfig(mysqlDBSettingsConfig);

    try {
        this.createDBInstanceRequest
                .setDBInstanceIdentifier(this.mysqlDB.getDbSettingsConfig().getDbInstanceIdentifier());
        this.createDBInstanceRequest.setMasterUsername(this.mysqlDB.getDbSettingsConfig().getMasterUserName());
        this.createDBInstanceRequest
                .setMasterUserPassword(this.mysqlDB.getDbSettingsConfig().getMasterPassword());
    } catch (AmazonServiceException ase) {
        System.out.println("ERROR : error during MySQLDBSettingsConfig setup.");
        System.out.println("Caught an AmazonServiceException");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
        ase.printStackTrace();
    } catch (AmazonClientException ace) {
        System.out.println("ERROR : error during MySQLDBSettingsConfig setup.");
        System.out.println("Caught an AmazonClientException");
        System.out.println("Error Message: " + ace.getMessage());
        ace.printStackTrace();
    }

}

From source file:com.vb.services.database.rds.MySQLDBUtils.java

private void updateMySQLDBNetworkSecurityConfig() {

    MySQLDBNetworkSecurityConfig mysqlDBNetworkSecurityConfig = new MySQLDBNetworkSecurityConfig();
    this.mysqlDB.setDbNetworkSecurityConfig(mysqlDBNetworkSecurityConfig);

    try {//from   ww  w. j  a  v  a2 s  . c o m

    } catch (AmazonServiceException ase) {
        System.out.println("ERROR : error during MySQLDBNetworkSecurityConfig setup.");
        System.out.println("Caught an AmazonServiceException");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
        ase.printStackTrace();
    } catch (AmazonClientException ace) {
        System.out.println("ERROR : error during MySQLDBNetworkSecurityConfig setup.");
        System.out.println("Caught an AmazonClientException");
        System.out.println("Error Message: " + ace.getMessage());
        ace.printStackTrace();
    }
}

From source file:com.venu.springmvc.dao.AmazonDynamoDBDAO.java

License:Open Source License

public static void main(String[] args) throws Exception {
    init();// w  w  w . j a v a  2  s .co  m

    try {
        String tableName = "my-favorite-movies-table";

        // Create a table with a primary hash key named 'name', which holds a string
        CreateTableRequest createTableRequest = new CreateTableRequest().withTableName(tableName)
                .withKeySchema(new KeySchemaElement().withAttributeName("name").withKeyType(KeyType.HASH))
                .withAttributeDefinitions(new AttributeDefinition().withAttributeName("name")
                        .withAttributeType(ScalarAttributeType.S))
                .withProvisionedThroughput(
                        new ProvisionedThroughput().withReadCapacityUnits(1L).withWriteCapacityUnits(1L));

        // Create table if it does not exist yet
        TableUtils.createTableIfNotExists(dynamoDB, createTableRequest);
        // wait for the table to move into ACTIVE state
        TableUtils.waitUntilActive(dynamoDB, tableName);

        // Describe our new table
        DescribeTableRequest describeTableRequest = new DescribeTableRequest().withTableName(tableName);
        TableDescription tableDescription = dynamoDB.describeTable(describeTableRequest).getTable();
        System.out.println("Table Description: " + tableDescription);

        // Add an item
        Map<String, AttributeValue> item = newItem("Gundamma katha", 1989, "****", "James", "Sara", "Venu");
        PutItemRequest putItemRequest = new PutItemRequest(tableName, item);
        PutItemResult putItemResult = dynamoDB.putItem(putItemRequest);
        System.out.println("Result: " + putItemResult);

        // Add another item
        item = newItem("Manadesam", 1980, "*****", "James", "Billy Bob", "Abburi");
        putItemRequest = new PutItemRequest(tableName, item);
        putItemResult = dynamoDB.putItem(putItemRequest);
        System.out.println("Result: " + putItemResult);

        // Scan items for movies with a year attribute greater than 1985
        HashMap<String, Condition> scanFilter = new HashMap<String, Condition>();
        Condition condition = new Condition().withComparisonOperator(ComparisonOperator.GT.toString())
                .withAttributeValueList(new AttributeValue().withN("1985"));
        scanFilter.put("year", condition);
        ScanRequest scanRequest = new ScanRequest(tableName).withScanFilter(scanFilter);
        ScanResult scanResult = dynamoDB.scan(scanRequest);
        System.out.println("Result: " + scanResult);

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to AWS, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with AWS, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}