Example usage for java.util.concurrent ExecutionException getMessage

List of usage examples for java.util.concurrent ExecutionException getMessage

Introduction

In this page you can find the example usage for java.util.concurrent ExecutionException getMessage.

Prototype

public String getMessage() 

Source Link

Document

Returns the detail message string of this throwable.

Usage

From source file:io.dropwizard.primer.auth.PrimerAuthenticatorRequestFilter.java

@Override
@Metered(name = "primer")
public void filter(ContainerRequestContext requestContext) throws IOException {
    if (!configuration.isEnabled()) {
        return;//w  ww  .  jav a 2 s  .  c om
    }
    //Short circuit for all white listed urls
    if (PrimerAuthorizationRegistry.isWhilisted(requestContext.getUriInfo().getPath())) {
        return;
    }
    Optional<String> token = getToken(requestContext);
    if (!token.isPresent()) {
        requestContext
                .abortWith(Response.status(Response.Status.BAD_REQUEST)
                        .entity(objectMapper.writeValueAsBytes(
                                PrimerError.builder().errorCode("PR000").message("Bad request").build()))
                        .build());
    } else {
        try {
            JsonWebToken webToken = authorize(requestContext, token.get());
            //Stamp authorization headers for downstream services which can
            // use this to stop token forgery & misuse
            stampHeaders(requestContext, webToken);
        } catch (ExecutionException e) {
            if (e.getCause() instanceof PrimerException) {
                handleException(e.getCause(), requestContext, token.get());
            } else {
                handleException(e, requestContext, token.get());
            }
        } catch (UncheckedExecutionException e) {
            if (e.getCause() instanceof CompletionException) {
                handleException(e.getCause().getCause(), requestContext, token.get());
            } else {
                handleException(e.getCause(), requestContext, token.get());
            }
        } catch (Exception e) {
            log.error("Execution error: {}", e.getMessage());
            handleError(Response.Status.INTERNAL_SERVER_ERROR, "PR000", "Error", token.get(), requestContext);
        }
    }
}

From source file:ca.zadrox.dota2esportticker.service.UpdateMatchService.java

private void updateMatches(boolean doResults) {

    if (!checkForConnectivity()) {
        LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(UPDATE_NO_CONNECTIVITY));
        return;/*from   ww  w . j  a  v a 2  s  .  co m*/
    }

    LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(UPDATE_STARTED));

    final String BASE_URL = "http://www.gosugamers.net/dota2/gosubet";
    final String MATCH_LINK_URL_BASE = "http://www.gosugamers.net";

    try {

        String rawHtml = new OkHttpClient().newCall(new Request.Builder().url(BASE_URL).build()).execute()
                .body().string();

        rawHtml = rawHtml.substring(rawHtml.indexOf("<div id=\"col1\" class=\"rows\">"),
                rawHtml.indexOf("<div id=\"col2\" class=\"rows\">"));
        Document doc = Jsoup.parse(rawHtml);

        Elements tables = doc.getElementsByClass("matches");

        ArrayList<ArrayList<String>> matchLinks = new ArrayList<ArrayList<String>>(tables.size());

        int numSeries = 0;
        for (Element table : tables) {
            Elements links = table.getElementsByClass("match");
            if (links.size() != 0) {
                ArrayList<String> innerMatchLink = new ArrayList<String>(links.size());
                for (Element link : links) {
                    String linkHref = link.attr("href");
                    innerMatchLink.add(MATCH_LINK_URL_BASE + linkHref);
                    numSeries++;
                }
                matchLinks.add(innerMatchLink);
            }
        }

        // needed if there are massive reschedules to update content properly.
        Uri resultsUri = MatchContract.SeriesEntry.buildSeriesUriWithAfterTime(TimeUtils.getUTCTime());

        Cursor c = getContentResolver().query(resultsUri,
                new String[] { MatchContract.SeriesEntry.COLUMN_GG_MATCH_PAGE }, null, null, null);

        while (c.moveToNext()) {
            if (!matchLinks.get(0).contains(c.getString(0))) {
                matchLinks.get(0).add(c.getString(0));
            }
        }

        Iterator<ArrayList<String>> iterator = matchLinks.iterator();
        int numResults = 0;
        ExecutorService executorService = Executors.newFixedThreadPool(10);
        ArrayList<Future<BundledMatchItem>> seriesItemFutures = new ArrayList<Future<BundledMatchItem>>(
                numSeries);

        LogUtils.LOGD(TAG, "Starting Retrieval, num elements gathered: " + numSeries);
        int i = 0;
        while (iterator.hasNext()) {

            ArrayList<String> matchList = iterator.next();
            for (String matchUrl : matchList) {
                boolean hasResult = !iterator.hasNext();
                if (!doResults && hasResult) {
                    continue;
                } else if (hasResult) {
                    numResults++;
                }
                seriesItemFutures.add(executorService.submit(new MatchGetter(matchUrl, hasResult)));
                i++;
            }
        }
        executorService.shutdown();
        executorService.awaitTermination(20L, TimeUnit.SECONDS);
        LogUtils.LOGD(TAG, "Stopping Retrieval, elements submitted for fetching: " + i);

        ContentValues[] seriesEntries = new ContentValues[i];
        ContentValues[] resultEntries = new ContentValues[numResults];
        int seriesEntryWriteIndex = 0;
        int resultEntryWriteIndex = 0;

        for (Future<BundledMatchItem> seriesItemFuture : seriesItemFutures) {
            try {
                BundledMatchItem seriesItem = seriesItemFuture.get();
                if (seriesItem != null) {
                    seriesEntries[seriesEntryWriteIndex] = seriesItem.mMatch;
                    seriesEntryWriteIndex++;
                    if (seriesItem.hasResult) {
                        resultEntries[resultEntryWriteIndex] = seriesItem.mResult;
                        resultEntryWriteIndex++;
                    }
                }
            } catch (ExecutionException e) {
                Log.e(TAG, "Should never get here");
            }
        }

        this.getContentResolver().bulkInsert(MatchContract.SeriesEntry.CONTENT_URI, seriesEntries);

        if (doResults)
            this.getContentResolver().bulkInsert(MatchContract.ResultEntry.CONTENT_URI, resultEntries);

        PrefUtils.setLastUpdateTime(this, TimeUtils.getUTCTime());

    } catch (IOException e) {
        Log.e(TAG, e.getMessage(), e);
        e.printStackTrace();
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(UPDATE_COMPLETE));

    PrefUtils.setLastResultsUpdateTime(this, TimeUtils.getUTCTime());
}

From source file:org.n52.wps.server.handler.RequestHandler.java

/**
 * Handle a request after its type is determined. The request is scheduled
 * for execution. If the server has enough free resources, the client will
 * be served immediately. If time runs out, the client will be asked to come
 * back later with a reference to the result.
 * //from  w  w w. jav  a  2 s  . c  om
 * @param req The request of the client.
 * @throws ExceptionReport
 */
public void handle() throws ExceptionReport {
    Response resp = null;
    if (req == null) {
        throw new ExceptionReport("Internal Error", "");
    }
    if (req instanceof ExecuteRequest) {
        // cast the request to an executerequest
        ExecuteRequest execReq = (ExecuteRequest) req;

        execReq.updateStatusAccepted();

        ExceptionReport exceptionReport = null;
        try {
            if (execReq.isStoreResponse()) {
                resp = new ExecuteResponse(execReq);
                InputStream is = resp.getAsStream();
                IOUtils.copy(is, os);
                is.close();
                pool.submit(execReq);
                return;
            }
            try {
                // retrieve status with timeout enabled
                try {
                    resp = pool.submit(execReq).get();
                } catch (ExecutionException ee) {
                    LOGGER.warn("exception while handling ExecuteRequest.");
                    // the computation threw an error
                    // probably the client input is not valid
                    if (ee.getCause() instanceof ExceptionReport) {
                        exceptionReport = (ExceptionReport) ee.getCause();
                    } else {
                        exceptionReport = new ExceptionReport(
                                "An error occurred in the computation: " + ee.getMessage(),
                                ExceptionReport.NO_APPLICABLE_CODE);
                    }
                } catch (InterruptedException ie) {
                    LOGGER.warn("interrupted while handling ExecuteRequest.");
                    // interrupted while waiting in the queue
                    exceptionReport = new ExceptionReport("The computation in the process was interrupted.",
                            ExceptionReport.NO_APPLICABLE_CODE);
                }
            } finally {
                if (exceptionReport != null) {
                    LOGGER.debug("ExceptionReport not null: " + exceptionReport.getMessage());
                    // NOT SURE, if this exceptionReport is also written to the DB, if required... test please!
                    throw exceptionReport;
                }
                // send the result to the outputstream of the client.
                /*   if(((ExecuteRequest) req).isQuickStatus()) {
                      resp = new ExecuteResponse(execReq);
                   }*/
                else if (resp == null) {
                    LOGGER.warn("null response handling ExecuteRequest.");
                    throw new ExceptionReport("Problem with handling threads in RequestHandler",
                            ExceptionReport.NO_APPLICABLE_CODE);
                }
                if (!execReq.isStoreResponse()) {
                    InputStream is = resp.getAsStream();
                    IOUtils.copy(is, os);
                    is.close();
                    LOGGER.info("Served ExecuteRequest.");
                }
            }
        } catch (RejectedExecutionException ree) {
            LOGGER.warn("exception handling ExecuteRequest.", ree);
            // server too busy?
            throw new ExceptionReport(
                    "The requested process was rejected. Maybe the server is flooded with requests.",
                    ExceptionReport.SERVER_BUSY);
        } catch (Exception e) {
            LOGGER.error("exception handling ExecuteRequest.", e);
            if (e instanceof ExceptionReport) {
                throw (ExceptionReport) e;
            }
            throw new ExceptionReport("Could not read from response stream.",
                    ExceptionReport.NO_APPLICABLE_CODE);
        }
    } else {
        // for GetCapabilities and DescribeProcess:
        resp = req.call();
        try {
            InputStream is = resp.getAsStream();
            IOUtils.copy(is, os);
            is.close();
        } catch (IOException e) {
            throw new ExceptionReport("Could not read from response stream.",
                    ExceptionReport.NO_APPLICABLE_CODE);
        }

    }
}

From source file:org.openhab.binding.gardena.internal.GardenaSmartImpl.java

/**
 * Communicates with Gardena Smart Home and parses the result.
 *///from ww  w.  ja  va  2s . c o m
private synchronized <T> T executeRequest(HttpMethod method, String url, Object contentObject, Class<T> result)
        throws GardenaException {
    try {
        if (logger.isTraceEnabled()) {
            logger.trace("{} request:  {}", method, url);
            if (contentObject != null) {
                logger.trace("{} data   :  {}", method, gson.toJson(contentObject));
            }
        }

        Request request = httpClient.newRequest(url).method(method)
                .timeout(config.getConnectionTimeout(), TimeUnit.SECONDS)
                .header(HttpHeader.CONTENT_TYPE, "application/json")
                .header(HttpHeader.ACCEPT, "application/json").header(HttpHeader.ACCEPT_ENCODING, "gzip");

        if (contentObject != null) {
            StringContentProvider content = new StringContentProvider(gson.toJson(contentObject));
            request.content(content);
        }

        if (!result.equals(SessionWrapper.class)) {
            verifySession();
            request.header("X-Session", session.getToken());
        }

        ContentResponse contentResponse = request.send();
        int status = contentResponse.getStatus();
        if (logger.isTraceEnabled()) {
            logger.trace("Status  : {}", status);
            logger.trace("Response: {}", contentResponse.getContentAsString());
        }

        if (status == 500) {
            throw new GardenaException(
                    gson.fromJson(contentResponse.getContentAsString(), Errors.class).toString());
        } else if (status != 200 && status != 204) {
            throw new GardenaException(String.format("Error %s %s", status, contentResponse.getReason()));
        }

        if (result == NoResult.class) {
            return null;
        }

        return gson.fromJson(contentResponse.getContentAsString(), result);
    } catch (ExecutionException ex) {
        Throwable cause = ex.getCause();
        if (cause instanceof HttpResponseException) {
            HttpResponseException responseException = (HttpResponseException) ex.getCause();
            int status = responseException.getResponse().getStatus();
            if (status == 401) {
                throw new GardenaUnauthorizedException(ex.getCause());
            }
        }
        throw new GardenaException(ex.getMessage(), ex);
    } catch (Exception ex) {
        throw new GardenaException(ex.getMessage(), ex);
    }
}

From source file:com.streamsets.pipeline.stage.processor.hive.HiveMetadataProcessor.java

@Override
protected void process(Record record, BatchMaker batchMaker) throws StageException {
    ELVars variables = getContext().createELVars();
    RecordEL.setRecordInContext(variables, record);
    TimeEL.setCalendarInContext(variables, Calendar.getInstance());
    TimeNowEL.setTimeNowInContext(variables, new Date());

    // Calculate record time for this particular record and persist it in the variables
    Date timeBasis = elEvals.timeDriverElEval.eval(variables, timeDriver, Date.class);
    Calendar calendar = Calendar.getInstance(timeZone);
    calendar.setTime(timeBasis);//  www.j  a  va 2  s .co  m
    TimeEL.setCalendarInContext(variables, calendar);

    String dbName = HiveMetastoreUtil.resolveEL(elEvals.dbNameELEval, variables, databaseEL);
    String tableName = HiveMetastoreUtil.resolveEL(elEvals.tableNameELEval, variables, tableEL);
    String targetPath;
    String avroSchema;
    String partitionStr = "";
    LinkedHashMap<String, String> partitionValMap;

    if (dbName.isEmpty()) {
        dbName = DEFAULT_DB;
    }
    try {
        // Validate Database and Table names
        if (!HiveMetastoreUtil.validateObjectName(dbName)) {
            throw new HiveStageCheckedException(Errors.HIVE_METADATA_03, "database name", dbName);
        }
        if (!HiveMetastoreUtil.validateObjectName(tableName)) {
            throw new HiveStageCheckedException(Errors.HIVE_METADATA_03, "table name", tableName);
        }

        partitionValMap = getPartitionValuesFromRecord(variables);

        if (partitioned) {
            partitionStr = externalTable
                    ? HiveMetastoreUtil.resolveEL(elEvals.partitionPathTemplateELEval, variables,
                            partitionPathTemplate)
                    : HiveMetastoreUtil.generatePartitionPath(partitionValMap);
            if (!partitionStr.startsWith("/"))
                partitionStr = "/" + partitionStr;
        }
        // First, find out if this record has all necessary data to process
        validateNames(dbName, tableName);
        String qualifiedName = HiveMetastoreUtil.getQualifiedTableName(dbName, tableName);
        LOG.trace("Generated table {} for record {}", qualifiedName, record.getHeader().getSourceId());

        if (externalTable) {
            // External table have location in the resolved EL
            targetPath = HiveMetastoreUtil.resolveEL(elEvals.tablePathTemplateELEval, variables,
                    tablePathTemplate);
        } else {
            // Internal table will be the database location + table name
            String databaseLocation;
            try {
                databaseLocation = databaseCache.get(dbName);
            } catch (ExecutionException e) {
                throw new HiveStageCheckedException(com.streamsets.pipeline.stage.lib.hive.Errors.HIVE_23,
                        e.getMessage());
            }
            targetPath = String.format("%s/%s", databaseLocation, tableName);
        }

        if (targetPath.isEmpty()) {
            throw new HiveStageCheckedException(Errors.HIVE_METADATA_02, targetPath);
        }

        // Obtain the record structure from current record
        LinkedHashMap<String, HiveTypeInfo> recordStructure = HiveMetastoreUtil.convertRecordToHMSType(record,
                elEvals.scaleEL, elEvals.precisionEL, elEvals.commentEL, decimalDefaultsConfig.scaleExpression,
                decimalDefaultsConfig.precisionExpression, commentExpression, variables);

        if (recordStructure.isEmpty()) { // If record has no data to process, No-op
            return;
        }

        TBLPropertiesInfoCacheSupport.TBLPropertiesInfo tblPropertiesInfo = HiveMetastoreUtil
                .getCacheInfo(cache, HMSCacheType.TBLPROPERTIES_INFO, qualifiedName, queryExecutor);

        if (tblPropertiesInfo != null) {
            HiveMetastoreUtil.validateTblPropertiesInfo(dataFormat, tblPropertiesInfo, tableName);

            if (tblPropertiesInfo.isExternal() != externalTable) {
                throw new HiveStageCheckedException(com.streamsets.pipeline.stage.lib.hive.Errors.HIVE_23,
                        "EXTERNAL", externalTable, tblPropertiesInfo.isExternal());
            }
        }

        TypeInfoCacheSupport.TypeInfo tableCache = HiveMetastoreUtil.getCacheInfo(cache, HMSCacheType.TYPE_INFO,
                qualifiedName, queryExecutor);

        if (tableCache != null) {
            //Checks number and name of partitions.
            HiveMetastoreUtil.validatePartitionInformation(tableCache, partitionValMap, qualifiedName);
            //Checks the type of partitions.
            Map<String, HiveTypeInfo> cachedPartitionTypeInfoMap = tableCache.getPartitionTypeInfo();
            for (Map.Entry<String, HiveTypeInfo> cachedPartitionTypeInfo : cachedPartitionTypeInfoMap
                    .entrySet()) {
                String partitionName = cachedPartitionTypeInfo.getKey();
                HiveTypeInfo expectedTypeInfo = cachedPartitionTypeInfo.getValue();
                HiveTypeInfo actualTypeInfo = partitionTypeInfo.get(partitionName);
                if (!expectedTypeInfo.equals(actualTypeInfo)) {
                    throw new HiveStageCheckedException(com.streamsets.pipeline.stage.lib.hive.Errors.HIVE_28,
                            partitionName, qualifiedName, expectedTypeInfo.toString(),
                            actualTypeInfo.toString());
                }
            }
            // Validate that the columns from record itself does not clash with partition columns
            for (String columnName : recordStructure.keySet()) {
                if (cachedPartitionTypeInfoMap.containsKey(columnName)) {
                    throw new HiveStageCheckedException(com.streamsets.pipeline.stage.lib.hive.Errors.HIVE_40,
                            columnName);
                }
            }
        }

        AvroSchemaInfoCacheSupport.AvroSchemaInfo schemaCache = HiveMetastoreUtil.getCacheInfo(cache,
                HMSCacheType.AVRO_SCHEMA_INFO, qualifiedName, queryExecutor);

        // True if there was a schema drift (including detection of new table)
        boolean schemaDrift = false;

        // Build final structure of how the table should look like
        LinkedHashMap<String, HiveTypeInfo> finalStructure;
        if (tableCache != null) {
            // Table already exists in Hive - so it's columns will be preserved and in their original order
            finalStructure = new LinkedHashMap<>();
            finalStructure.putAll(tableCache.getColumnTypeInfo());

            // If there is any diff (any new columns), we will append them at the end of the table
            LinkedHashMap<String, HiveTypeInfo> columnDiff = tableCache.getDiff(recordStructure);
            if (!columnDiff.isEmpty()) {
                LOG.trace("Detected drift for table {} - new columns: {}", qualifiedName,
                        StringUtils.join(columnDiff.keySet(), ","));
                schemaDrift = true;
                finalStructure.putAll(columnDiff);
            }
        } else {
            LOG.trace("{} is a new table", qualifiedName);
            // This table doesn't exists yet, so we'll use record own structure as the final table's structure
            schemaDrift = true;
            finalStructure = recordStructure;
        }

        // Generate schema only if the table do not exist or it's schema is changed.
        if (schemaDrift) {
            avroSchema = HiveMetastoreUtil.generateAvroSchema(finalStructure, qualifiedName);
            LOG.trace("Schema Drift. Generated new Avro schema for table {}: {}", qualifiedName, avroSchema);

            // Add custom metadata attributes if they are specified
            Map<String, String> metadataHeaderAttributeMap = new LinkedHashMap();
            if (metadataHeadersToAddExist) {
                metadataHeaderAttributeMap = generateResolvedHeaderAttributeMap(metadataHeaderAttributeConfigs,
                        variables);
            }

            handleSchemaChange(dbName, tableName, recordStructure, targetPath, avroSchema, batchMaker,
                    qualifiedName, tableCache, schemaCache, metadataHeaderAttributeMap);
        } else {
            if (schemaCache == null) { // Table exists in Hive, but this is cold start so the cache is null
                avroSchema = HiveMetastoreUtil.generateAvroSchema(finalStructure, qualifiedName);
                LOG.trace("Cold Start. Generated new Avro schema for table {}: {}", qualifiedName, avroSchema);
                updateAvroCache(schemaCache, avroSchema, qualifiedName);
            } else // No schema change, table already exists in Hive, and we have avro schema in cache.
                avroSchema = schemaCache.getSchema();
        }

        if (partitioned) {
            PartitionInfoCacheSupport.PartitionInfo pCache = HiveMetastoreUtil.getCacheInfo(cache,
                    HMSCacheType.PARTITION_VALUE_INFO, qualifiedName, queryExecutor);

            PartitionInfoCacheSupport.PartitionValues partitionValues = new PartitionInfoCacheSupport.PartitionValues(
                    partitionValMap);

            // If the partition information exist (thus this is not a cold start)
            if (pCache != null) {
                // If we detected drift, we need to persist that information and "roll" all partitions next time
                // we will see them.
                if (schemaDrift) {
                    pCache.setAllPartitionsToBeRolled();
                }

                // If we performed drift for the table and this is the firs time we see this partition, we need to
                // set the roll flag anyway.
                if (pCache.shouldRoll(partitionValues)) {
                    schemaDrift = true;
                }
            }

            // Append partition path to target path as all paths from now should be with the partition info
            targetPath += partitionStr;

            Map<PartitionInfoCacheSupport.PartitionValues, String> diff = detectNewPartition(partitionValues,
                    pCache, targetPath);

            // Send new partition metadata if new partition is detected.
            if (diff != null) {
                // Add custom metadata attributes if they are specified
                Map<String, String> partitionMetadataHeaderAttributeMap = new LinkedHashMap<>();
                if (metadataHeadersToAddExist) {
                    partitionMetadataHeaderAttributeMap = generateResolvedHeaderAttributeMap(
                            metadataHeaderAttributeConfigs, variables);
                }
                handleNewPartition(partitionValMap, pCache, dbName, tableName, targetPath, batchMaker,
                        qualifiedName, diff, partitionMetadataHeaderAttributeMap);
            }
        }

        // Send record to HDFS target.
        if (dataFormat == HMPDataFormat.PARQUET) {
            targetPath = targetPath + TEMP_AVRO_DIR_NAME;
        }

        changeRecordFieldToLowerCase(record);
        updateRecordForHDFS(record, schemaDrift, avroSchema, targetPath);
        batchMaker.addRecord(record, hdfsLane);
    } catch (HiveStageCheckedException error) {
        LOG.error("Error happened when processing record", error);
        LOG.trace("Record that caused the error: {}", record.toString());
        errorRecordHandler.onError(new OnRecordErrorException(record, error.getErrorCode(), error.getParams()));
    }
}

From source file:voldemort.restclient.R2Store.java

public String getSerializerInfoXml() throws VoldemortException {
    RestRequestBuilder rb = null;/*from w ww.ja v  a 2  s. c o  m*/
    try {
        String base64Key = new String(Base64.encodeBase64(getName().getBytes("UTF-8")));
        rb = new RestRequestBuilder(
                new URI(this.restBootstrapURL + "/" + SCHEMATA_STORE_NAME + "/" + base64Key));
        rb.setHeader("Accept", "binary");
        rb.setHeader(RestMessageHeaders.X_VOLD_REQUEST_ORIGIN_TIME_MS,
                String.valueOf(System.currentTimeMillis()));
        if (this.routingTypeCode != null) {
            rb.setHeader(RestMessageHeaders.X_VOLD_ROUTING_TYPE_CODE, this.routingTypeCode);
        }
        if (this.zoneId != INVALID_ZONE_ID) {
            rb.setHeader(RestMessageHeaders.X_VOLD_ZONE_ID, String.valueOf(this.zoneId));
        }

        RestResponse response = fetchGetResponse(rb, FETCH_SCHEMA_TIMEOUT_MS);
        return response.getEntity().asString("UTF-8");
    } catch (ExecutionException e) {
        if (e.getCause() instanceof RestException) {
            RestException exception = (RestException) e.getCause();
            if (logger.isDebugEnabled()) {
                logger.debug("REST EXCEPTION STATUS : " + exception.getResponse().getStatus());
            }
        } else {
            throw new VoldemortException("Unknown HTTP request execution exception: " + e.getMessage(), e);
        }
    } catch (InterruptedException e) {
        if (logger.isDebugEnabled()) {
            logger.debug("Operation interrupted : " + e.getMessage(), e);
        }
        throw new VoldemortException("Operation interrupted exception: " + e.getMessage(), e);
    } catch (URISyntaxException e) {
        throw new VoldemortException("Illegal HTTP URL" + e.getMessage(), e);
    } catch (UnsupportedEncodingException e) {
        throw new VoldemortException("Unsupported Encoding exception while encoding the key" + e.getMessage(),
                e);
    }
    return null;
}

From source file:voldemort.restclient.R2Store.java

@Override
public List<Versioned<byte[]>> get(ByteArray key, byte[] transforms) throws VoldemortException {
    List<Versioned<byte[]>> resultList = new ArrayList<Versioned<byte[]>>();
    String base64Key = RestUtils.encodeVoldemortKey(key.get());
    RestRequestBuilder rb = null;// w  w  w  .j  a v a  2  s  .  c  om
    try {
        rb = new RestRequestBuilder(new URI(this.restBootstrapURL + "/" + getName() + "/" + base64Key));
        String timeoutStr = Long
                .toString(this.config.getTimeoutConfig().getOperationTimeout(VoldemortOpCode.GET_OP_CODE));
        rb.setHeader("Accept", MULTIPART_CONTENT_TYPE);

        RestResponse response = fetchGetResponse(rb, timeoutStr);
        final ByteString entity = response.getEntity();
        if (entity != null) {
            resultList = parseGetResponse(entity);
        } else {
            if (logger.isDebugEnabled()) {
                logger.debug("Did not get any response!");
            }
        }
    } catch (ExecutionException e) {
        if (e.getCause() instanceof RestException) {
            RestException exception = (RestException) e.getCause();
            if (logger.isDebugEnabled()) {
                logger.debug("REST EXCEPTION STATUS : " + exception.getResponse().getStatus());
            }

        } else {
            throw new VoldemortException("Unknown HTTP request execution exception: " + e.getMessage(), e);
        }
    } catch (InterruptedException e) {
        if (logger.isDebugEnabled()) {
            logger.debug("Operation interrupted : " + e.getMessage(), e);
        }
        throw new VoldemortException("Operation interrupted exception: " + e.getMessage(), e);
    } catch (URISyntaxException e) {
        throw new VoldemortException("Illegal HTTP URL" + e.getMessage(), e);
    }

    return resultList;
}

From source file:voldemort.restclient.R2Store.java

@Override
public List<Version> getVersions(ByteArray key) {
    List<Version> resultList = new ArrayList<Version>();
    String base64Key = RestUtils.encodeVoldemortKey(key.get());
    RestRequestBuilder rb = null;/*  w ww .jav  a 2 s . co  m*/
    try {
        rb = new RestRequestBuilder(new URI(this.restBootstrapURL + "/" + getName() + "/" + base64Key));
        String timeoutStr = Long.toString(
                this.config.getTimeoutConfig().getOperationTimeout(VoldemortOpCode.GET_VERSION_OP_CODE));

        rb.setHeader(RestMessageHeaders.X_VOLD_GET_VERSION, "true");

        RestResponse response = fetchGetResponse(rb, timeoutStr);
        final ByteString entity = response.getEntity();
        if (entity != null) {
            resultList = parseGetVersionResponse(entity);
        } else {
            if (logger.isDebugEnabled()) {
                logger.debug("Did not get any response!");
            }
        }
    } catch (ExecutionException e) {
        if (e.getCause() instanceof RestException) {
            RestException exception = (RestException) e.getCause();
            if (logger.isDebugEnabled()) {
                logger.debug("REST EXCEPTION STATUS : " + exception.getResponse().getStatus());
            }

        } else {
            throw new VoldemortException("Unknown HTTP request execution exception: " + e.getMessage(), e);
        }
    } catch (InterruptedException e) {
        if (logger.isDebugEnabled()) {
            logger.debug("Operation interrupted : " + e.getMessage(), e);
        }
        throw new VoldemortException("Operation interrupted exception: " + e.getMessage(), e);
    } catch (URISyntaxException e) {
        throw new VoldemortException("Illegal HTTP URL" + e.getMessage(), e);
    }

    return resultList;
}

From source file:ezbake.services.graph.TitanGraphStore.java

/**
 * Adds all vertices and their properties from the specified Thrift graph to the Titan graph. All properties of
 * vertices are grouped by visibility to minimize the number of created transactions. The total number of
 * transactions should be approximately the same number of visibility variants.
 *///from  w  w w  .j av  a2  s.  c  om
private void processVertices(String graphName, Graph thriftGraph,
        final SecureTitanGraph<AccumuloSecurityToken> titanGraph, Map<String, Object> vertexIdMapping)
        throws InvalidRequestException {
    final List<ezbake.services.graph.thrift.types.Vertex> thriftVertices = thriftGraph.getVertices();

    // stores first by visibility string, then by titan vertex id
    // the second paramter needs to be a map so that we don't end up creating
    // duplicate VertexRecords with the same titan id since properties aren't sorted by visibility
    final Map<String, Map<Object, VertexRecord>> vertexRecords = Maps.newHashMap();

    // first loop creates a vertex and adds the titanid to localid mapping, and
    // organizes verticies->properties by visibility
    for (final ezbake.services.graph.thrift.types.Vertex thriftVertex : thriftVertices) {
        Object titanVertexId = null;
        final Map<String, List<Property>> props = thriftVertex.getProperties();

        // selector vertices are created using a different method
        if (thriftVertex.isSetSelectorProperty()) {
            final String selectorKey = thriftVertex.getSelectorProperty();
            final Property selProp = props.get(selectorKey).get(0);

            // create the unique selector key for the cache
            final String selectorVal = GraphConverter.getJavaPropValue(selProp.getValue()).toString();
            try {
                final Cache<String, Long> selectorCache = graphSelectorCache.get(graphName);
                titanVertexId = selectorCache.get(selectorKey + SEL_KEY_SEP + selectorVal,
                        new Callable<Long>() {
                            @Override
                            public Long call() throws Exception {
                                final SelectorVertexFactory vertexFactory = new SelectorVertexFactory(
                                        titanGraph);
                                final Long id = vertexFactory.getSelectorId(selectorKey, selectorVal);
                                return id;
                            }
                        });
            } catch (final ExecutionException e) {
                log("Error loading selector vertex into cache.", e);
                throw new InvalidRequestException(
                        "Error loading selector vertex into cache: " + e.getMessage());
            }
        } else {
            final SecureTitanTx<AccumuloSecurityToken> tx = titanGraph.newTransaction();
            final TitanVertex titanVertex = tx.addVertex(null);
            tx.commit();
            titanVertexId = titanVertex.getId();
        }

        // add the vertex id mapping so edges can be properly added later
        // no need to rollback these entries since it's scope is not persistent
        // and is only used if adding all vertices was successful
        vertexIdMapping.put(thriftVertex.getId().getLocalId(), titanVertexId);

        // capture and categorize by visibility the properties of this vertex
        if (props != null) {
            for (final Map.Entry<String, List<Property>> entry : props.entrySet()) {
                final String name = entry.getKey();
                for (final Property prop : entry.getValue()) {
                    // first, get the visibililty of each property
                    final String visi = getVisibility(prop.getVisibility());

                    // second, get the map of vertex id to record by visibility
                    Map<Object, VertexRecord> rec = vertexRecords.get(visi);
                    if (rec == null) {
                        rec = Maps.newHashMap();
                    }

                    // third, get the actual vertex record by vertex id
                    // then add the property and update maps
                    VertexRecord vr = rec.get(titanVertexId);
                    if (vr == null) {
                        vr = new VertexRecord(visi);
                    }
                    vr.addProperty(name, prop);

                    rec.put(titanVertexId, vr);
                    vertexRecords.put(visi, rec);
                }
            }
        }
    }

    // now create the transactions to save off the properties at the respective visibilities,
    // the number of transactions will equal the number of variant visibilities for all vertex properties
    // in the thrift graph
    for (final Map.Entry<String, Map<Object, VertexRecord>> stringMapEntry : vertexRecords.entrySet()) {
        final SecureTitanTx<AccumuloSecurityToken> tx = startWriteTransaction(graphName,
                stringMapEntry.getKey());
        final Map<Object, VertexRecord> recs = stringMapEntry.getValue();
        for (final Map.Entry<Object, VertexRecord> objectVertexRecordEntry : recs.entrySet()) {
            final VertexRecord rec = objectVertexRecordEntry.getValue();
            final TitanVertex tv = tx.getVertex(objectVertexRecordEntry.getKey());
            for (final Map.Entry<String, List<Property>> entry : rec.getProperties().entrySet()) {
                final String name = entry.getKey();
                for (final Property prop : entry.getValue()) {
                    tv.addProperty(name, GraphConverter.getJavaPropValue(prop.getValue()));
                }
            }
        }
        tx.commit();
    }
}

From source file:voldemort.restclient.R2Store.java

@Override
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys,
        Map<ByteArray, byte[]> transforms) throws VoldemortException {

    Map<ByteArray, List<Versioned<byte[]>>> resultMap = new HashMap<ByteArray, List<Versioned<byte[]>>>();
    int numberOfKeys = 0;

    try {//from ww w .  j  a  v  a 2 s . c  om
        Iterator<ByteArray> it = keys.iterator();
        StringBuilder keyArgs = null;

        while (it.hasNext()) {
            ByteArray key = it.next();
            String base64Key = RestUtils.encodeVoldemortKey(key.get());
            if (keyArgs == null) {
                keyArgs = new StringBuilder();
                keyArgs.append(base64Key);
            } else {
                keyArgs.append("," + base64Key);
            }
            numberOfKeys++;
        }

        // Rerouting getall() requests with single key to get(). This is a
        // temporary fix to handle the NPE when getAll requests are made
        // with single key.
        // TODO a common way to handle getAll with any number of keys
        if (numberOfKeys == 1) {
            List<Versioned<byte[]>> resultList = new ArrayList<Versioned<byte[]>>();
            it = keys.iterator();
            ByteArray key = it.next();
            byte[] singleKeyTransforms = null;
            if (transforms != null) {
                singleKeyTransforms = transforms.get(key);
            }
            resultList = this.get(key, singleKeyTransforms);
            resultMap.put(key, resultList);
        } else {

            RestRequestBuilder rb = new RestRequestBuilder(
                    new URI(this.restBootstrapURL + "/" + getName() + "/" + keyArgs.toString()));

            rb.setMethod(GET);
            rb.setHeader("Accept", MULTIPART_CONTENT_TYPE);
            String timeoutStr = Long.toString(
                    this.config.getTimeoutConfig().getOperationTimeout(VoldemortOpCode.GET_ALL_OP_CODE));
            rb.setHeader(RestMessageHeaders.X_VOLD_REQUEST_TIMEOUT_MS, timeoutStr);
            rb.setHeader(RestMessageHeaders.X_VOLD_REQUEST_ORIGIN_TIME_MS,
                    String.valueOf(System.currentTimeMillis()));
            if (this.routingTypeCode != null) {
                rb.setHeader(RestMessageHeaders.X_VOLD_ROUTING_TYPE_CODE, this.routingTypeCode);
            }
            if (this.zoneId != INVALID_ZONE_ID) {
                rb.setHeader(RestMessageHeaders.X_VOLD_ZONE_ID, String.valueOf(this.zoneId));
            }

            RestRequest request = rb.build();
            Future<RestResponse> f = client.restRequest(request);

            // This will block
            RestResponse response = f.get();

            // Parse the response
            final ByteString entity = response.getEntity();

            String contentType = response.getHeader(CONTENT_TYPE);
            if (entity != null) {
                if (contentType.equalsIgnoreCase(MULTIPART_CONTENT_TYPE)) {

                    resultMap = parseGetAllResults(entity);
                } else {
                    if (logger.isDebugEnabled()) {
                        logger.debug("Did not receive a multipart response");
                    }
                }

            } else {
                if (logger.isDebugEnabled()) {
                    logger.debug("Did not get any response!");
                }
            }
        }
    } catch (ExecutionException e) {
        if (e.getCause() instanceof RestException) {
            RestException exception = (RestException) e.getCause();
            if (logger.isDebugEnabled()) {
                logger.debug("REST EXCEPTION STATUS : " + exception.getResponse().getStatus());
            }
        } else {
            throw new VoldemortException("Unknown HTTP request execution exception: " + e.getMessage(), e);
        }
    } catch (InterruptedException e) {
        if (logger.isDebugEnabled()) {
            logger.debug("Operation interrupted : " + e.getMessage(), e);
        }
        throw new VoldemortException("Operation interrupted exception: " + e.getMessage(), e);
    } catch (URISyntaxException e) {
        throw new VoldemortException("Illegal HTTP URL" + e.getMessage(), e);
    }

    return resultMap;
}