List of usage examples for java.io IOException getCause
public synchronized Throwable getCause()
From source file:org.springframework.integration.ip.tcp.TcpOutboundGatewayTests.java
/** * Sends 2 concurrent messages on a shared connection. The GW single threads * these requests. The first will timeout; the second should receive its * own response, not that for the first. * @throws Exception/*from www .j a v a2s . c om*/ */ private void testGoodNetGWTimeoutGuts(final int port, AbstractConnectionFactory ccf) throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean done = new AtomicBoolean(); /* * The payload of the last message received by the remote side; * used to verify the correct response is received. */ final AtomicReference<String> lastReceived = new AtomicReference<String>(); final CountDownLatch serverLatch = new CountDownLatch(2); Executors.newSingleThreadExecutor().execute(new Runnable() { public void run() { try { ServerSocket server = ServerSocketFactory.getDefault().createServerSocket(port); latch.countDown(); int i = 0; while (!done.get()) { Socket socket = server.accept(); i++; while (!socket.isClosed()) { try { ObjectInputStream ois = new ObjectInputStream(socket.getInputStream()); String request = (String) ois.readObject(); logger.debug("Read " + request); ObjectOutputStream oos = new ObjectOutputStream(socket.getOutputStream()); if (i < 2) { Thread.sleep(1000); } oos.writeObject(request.replace("Test", "Reply")); logger.debug("Replied to " + request); lastReceived.set(request); serverLatch.countDown(); } catch (IOException e) { logger.debug("error on write " + e.getClass().getSimpleName()); socket.close(); } } } } catch (Exception e) { if (!done.get()) { e.printStackTrace(); } } } }); assertTrue(latch.await(10000, TimeUnit.MILLISECONDS)); final TcpOutboundGateway gateway = new TcpOutboundGateway(); gateway.setConnectionFactory(ccf); gateway.setRequestTimeout(Integer.MAX_VALUE); QueueChannel replyChannel = new QueueChannel(); gateway.setRequiresReply(true); gateway.setOutputChannel(replyChannel); gateway.setRemoteTimeout(500); @SuppressWarnings("unchecked") Future<Integer>[] results = new Future[2]; for (int i = 0; i < 2; i++) { final int j = i; results[j] = (Executors.newSingleThreadExecutor().submit(new Callable<Integer>() { public Integer call() throws Exception { // increase the timeout after the first send if (j > 0) { gateway.setRemoteTimeout(5000); } gateway.handleMessage(MessageBuilder.withPayload("Test" + j).build()); return j; } })); Thread.sleep(50); } // wait until the server side has processed both requests assertTrue(serverLatch.await(10, TimeUnit.SECONDS)); List<String> replies = new ArrayList<String>(); int timeouts = 0; for (int i = 0; i < 2; i++) { try { int result = results[i].get(); String reply = (String) replyChannel.receive(1000).getPayload(); logger.debug(i + " got " + result + " " + reply); replies.add(reply); } catch (ExecutionException e) { if (timeouts >= 2) { fail("Unexpected " + e.getMessage()); } else { assertNotNull(e.getCause()); assertTrue(e.getCause() instanceof MessageTimeoutException); } timeouts++; continue; } } assertEquals("Expected exactly one ExecutionException", 1, timeouts); assertEquals(1, replies.size()); assertEquals(lastReceived.get().replace("Test", "Reply"), replies.get(0)); done.set(true); assertEquals(0, TestUtils.getPropertyValue(gateway, "pendingReplies", Map.class).size()); gateway.stop(); }
From source file:com.ut.healthelink.service.impl.transactionInManagerImpl.java
@Override public String copyUplaodedPath(configurationTransport transportDetails, MultipartFile fileUpload) { //save the file as is to input folder MultipartFile file = fileUpload;/*from w w w.j a v a2s . com*/ String fileName = file.getOriginalFilename(); InputStream inputStream; OutputStream outputStream; try { inputStream = file.getInputStream(); File newFile = null; //Set the directory to save the brochures to fileSystem dir = new fileSystem(); String filelocation = transportDetails.getfileLocation(); filelocation = filelocation.replace("/bowlink/", ""); dir.setDirByName(filelocation); newFile = new File(dir.getDir() + fileName); if (newFile.exists()) { int i = 1; while (newFile.exists()) { int iDot = fileName.lastIndexOf("."); newFile = new File(dir.getDir() + fileName.substring(0, iDot) + "_(" + ++i + ")" + fileName.substring(iDot)); } fileName = newFile.getName(); newFile.createNewFile(); } else { newFile.createNewFile(); } //Save the attachment outputStream = new FileOutputStream(newFile); int read = 0; byte[] bytes = new byte[1024]; while ((read = inputStream.read(bytes)) != -1) { outputStream.write(bytes, 0, read); } outputStream.close(); return fileName; } catch (IOException e) { System.err.println("copyUplaodedPath " + e.getCause()); e.printStackTrace(); return null; } }
From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCapacityScheduler.java
@Test public void testRefreshQueuesMaxAllocationPerQueueLarge() throws Exception { // verify we can't set the allocation per queue larger then cluster setting CapacityScheduler cs = new CapacityScheduler(); cs.setConf(new YarnConfiguration()); cs.setRMContext(resourceManager.getRMContext()); CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); setupQueueConfiguration(conf);//from w ww . ja va 2s. c o m cs.init(conf); cs.start(); // change max allocation for B3 queue to be larger then cluster max setMaxAllocMb(conf, B3, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB + 2048); try { cs.reinitialize(conf, mockContext); fail("should have thrown exception"); } catch (IOException e) { assertTrue("maximum allocation exception", e.getCause().getMessage().contains("maximum allocation")); } setMaxAllocMb(conf, B3, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB); cs.reinitialize(conf, mockContext); setMaxAllocVcores(conf, B3, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES + 1); try { cs.reinitialize(conf, mockContext); fail("should have thrown exception"); } catch (IOException e) { assertTrue("maximum allocation exception", e.getCause().getMessage().contains("maximum allocation")); } }
From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCapacityScheduler.java
@Test public void testRefreshQueuesMaxAllocationCSError() throws Exception { // Try to refresh the cluster level max allocation size to be smaller // and it should error out CapacityScheduler cs = new CapacityScheduler(); cs.setConf(new YarnConfiguration()); cs.setRMContext(resourceManager.getRMContext()); CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); setupQueueConfiguration(conf);/*from w w w .j a va 2 s. c om*/ setMaxAllocMb(conf, 10240); setMaxAllocVcores(conf, 10); setMaxAllocMb(conf, A1, 4096); setMaxAllocVcores(conf, A1, 4); cs.init(conf); cs.start(); cs.reinitialize(conf, mockContext); checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY); assertEquals("max allocation MB in CS", 10240, cs.getMaximumResourceCapability().getMemorySize()); assertEquals("max allocation vcores in CS", 10, cs.getMaximumResourceCapability().getVirtualCores()); setMaxAllocMb(conf, 6144); try { cs.reinitialize(conf, mockContext); fail("should have thrown exception"); } catch (IOException e) { assertTrue("max allocation exception", e.getCause().toString().contains("not be decreased")); } setMaxAllocMb(conf, 10240); cs.reinitialize(conf, mockContext); setMaxAllocVcores(conf, 8); try { cs.reinitialize(conf, mockContext); fail("should have thrown exception"); } catch (IOException e) { assertTrue("max allocation exception", e.getCause().toString().contains("not be decreased")); } }
From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCapacityScheduler.java
@Test public void testRefreshQueuesMaxAllocationRefresh() throws Exception { // queue refresh should not allow changing the maximum allocation setting // per queue to be smaller than previous setting CapacityScheduler cs = new CapacityScheduler(); CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); setupQueueConfiguration(conf);/*from w ww . j a v a2 s . co m*/ cs.setConf(new YarnConfiguration()); cs.setRMContext(resourceManager.getRMContext()); cs.init(conf); cs.start(); cs.reinitialize(conf, mockContext); checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY); assertEquals("max allocation in CS", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, cs.getMaximumResourceCapability().getMemorySize()); assertEquals("max gpu allocation in CS", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_GPUS, cs.getMaximumResourceCapability().getGPUs()); assertEquals("max allocation for A1", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, conf.getMaximumAllocationPerQueue(A1).getMemorySize()); assertEquals("max allocation", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, conf.getMaximumAllocation().getMemorySize()); CSQueue rootQueue = cs.getRootQueue(); CSQueue queueA = findQueue(rootQueue, A); CSQueue queueA1 = findQueue(queueA, A1); assertEquals("queue max allocation", ((LeafQueue) queueA1).getMaximumAllocation().getMemorySize(), 8192); setMaxAllocMb(conf, A1, 4096); try { cs.reinitialize(conf, mockContext); fail("should have thrown exception"); } catch (IOException e) { assertTrue("max allocation exception", e.getCause().toString().contains("not be decreased")); } setMaxAllocMb(conf, A1, 8192); cs.reinitialize(conf, mockContext); setMaxAllocVcores(conf, A1, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES - 1); try { cs.reinitialize(conf, mockContext); fail("should have thrown exception"); } catch (IOException e) { assertTrue("max allocation exception", e.getCause().toString().contains("not be decreased")); } }
From source file:com.bouncestorage.swiftproxy.v1.ObjectResource.java
@PUT public Response putObject(@NotNull @PathParam("container") String container, @NotNull @Encoded @PathParam("object") String objectName, @NotNull @PathParam("account") String account, @QueryParam("multipart-manifest") String multiPartManifest, @QueryParam("signature") String signature, @QueryParam("expires") String expires, @HeaderParam(DYNAMIC_OBJECT_MANIFEST) String objectManifest, @HeaderParam("X-Auth-Token") String authToken, @HeaderParam(HttpHeaders.CONTENT_LENGTH) String contentLengthParam, @HeaderParam("Transfer-Encoding") String transferEncoding, @HeaderParam(HttpHeaders.CONTENT_TYPE) MediaType contentType, @HeaderParam("X-Detect-Content-Type") boolean detectContentType, @HeaderParam("X-Copy-From") String copyFrom, @HeaderParam("X-Copy-From-Account") String copyFromAccount, @HeaderParam(HttpHeaders.ETAG) String eTag, @HeaderParam(HttpHeaders.CONTENT_DISPOSITION) String contentDisposition, @HeaderParam(HttpHeaders.CONTENT_ENCODING) String contentEncoding, @HeaderParam("X-Delete-At") long deleteAt, @HeaderParam("X-Delete-After") long deleteAfter, @HeaderParam(HttpHeaders.IF_MATCH) String ifMatch, @HeaderParam(HttpHeaders.IF_NONE_MATCH) String ifNoneMatch, @HeaderParam(HttpHeaders.IF_MODIFIED_SINCE) Date ifModifiedSince, @HeaderParam(HttpHeaders.IF_UNMODIFIED_SINCE) Date ifUnmodifiedSince, @HeaderParam(SwiftHeaders.OBJECT_COPY_FRESH_METADATA) boolean freshMetadata, @Context Request request) { //objectName = normalizePath(objectName); if (objectName.length() > InfoResource.CONFIG.swift.max_object_name_length) { return badRequest(); }/*from w ww .j a v a2s . co m*/ if (transferEncoding != null && !"chunked".equals(transferEncoding)) { return Response.status(Response.Status.NOT_IMPLEMENTED).build(); } if (contentLengthParam == null && !"chunked".equals(transferEncoding)) { return Response.status(Response.Status.LENGTH_REQUIRED).build(); } long contentLength = contentLengthParam == null ? 0 : Long.parseLong(contentLengthParam); logger.info("PUT {}", objectName); if (copyFromAccount == null) { copyFromAccount = account; } if (copyFrom != null) { Pair<String, String> copy = validateCopyParam(copyFrom); return copyObject(copy.getFirst(), copy.getSecond(), copyFromAccount, authToken, container + "/" + objectName, account, null, contentType.toString(), contentEncoding, contentDisposition, ifMatch, ifModifiedSince, ifUnmodifiedSince, freshMetadata, request); } Map<String, String> metadata = getUserMetadata(request); validateUserMetadata(metadata); InputStream copiedStream = null; BlobStore blobStore = getBlobStore(authToken).get(container, objectName); if ("put".equals(multiPartManifest)) { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); try (TeeInputStream tee = new TeeInputStream(request.getInputStream(), buffer, true)) { ManifestEntry[] manifest = readSLOManifest(tee); validateManifest(manifest, blobStore, authToken); Pair<Long, String> sizeAndEtag = getManifestTotalSizeAndETag(Arrays.asList(manifest)); metadata.put(STATIC_OBJECT_MANIFEST, sizeAndEtag.getFirst() + " " + sizeAndEtag.getSecond()); copiedStream = new ByteArrayInputStream(buffer.toByteArray()); } catch (IOException e) { throw propagate(e); } } else if (objectManifest != null) { metadata.put(DYNAMIC_OBJECT_MANIFEST, objectManifest); } if (!blobStore.containerExists(container)) { return notFound(); } HashCode contentMD5 = null; if (eTag != null) { try { contentMD5 = HashCode.fromBytes(BaseEncoding.base16().lowerCase().decode(eTag)); } catch (IllegalArgumentException iae) { throw new ClientErrorException(422, iae); // Unprocessable Entity } if (contentMD5.bits() != Hashing.md5().bits()) { // Unprocessable Entity throw new ClientErrorException(contentMD5.bits() + " != " + Hashing.md5().bits(), 422); } } try (InputStream is = copiedStream != null ? copiedStream : request.getInputStream()) { BlobBuilder.PayloadBlobBuilder builder = blobStore.blobBuilder(objectName).userMetadata(metadata) .payload(is); if (contentDisposition != null) { builder.contentDisposition(contentDisposition); } if (contentEncoding != null) { builder.contentEncoding(contentEncoding); } if (contentType != null) { builder.contentType(contentType.toString()); } if (contentLengthParam != null) { builder.contentLength(contentLength); } if (contentMD5 != null) { builder.contentMD5(contentMD5); } try { String remoteETag; try { remoteETag = blobStore.putBlob(container, builder.build()); } catch (HttpResponseException e) { HttpResponse response = e.getResponse(); if (response == null) { throw e; } int code = response.getStatusCode(); if (code == 400 && !"openstack-swift".equals(blobStore.getContext().unwrap().getId())) { // swift expects 422 for md5 mismatch throw new ClientErrorException(response.getStatusLine(), 422, e.getCause()); } else { throw new ClientErrorException(response.getStatusLine(), code, e.getCause()); } } BlobMetadata meta = blobStore.blobMetadata(container, objectName); return Response.status(Response.Status.CREATED).header(HttpHeaders.ETAG, remoteETag) .header(HttpHeaders.LAST_MODIFIED, meta.getLastModified()) .header(HttpHeaders.CONTENT_LENGTH, 0).header(HttpHeaders.CONTENT_TYPE, contentType) .header(HttpHeaders.DATE, new Date()).build(); } catch (ContainerNotFoundException e) { return notFound(); } } catch (IOException e) { return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build(); } }
From source file:ch.entwine.weblounge.common.impl.request.Http11ProtocolHandler.java
/** * Method generateResponse./* ww w . j a v a 2s . co m*/ * * @param resp * @param type * @param is * @return boolean * @throws IOException * if generating the response fails */ public static boolean generateResponse(HttpServletResponse resp, Http11ResponseType type, InputStream is) throws IOException { /* first generate the response headers */ generateHeaders(resp, type); /* adjust the statistics */ ++stats[STATS_BODY_GENERATED]; incResponseStats(type.type, bodyStats); /* generate the response body */ try { if (resp.isCommitted()) log.warn("Response is already committed!"); switch (type.type) { case RESPONSE_OK: if (!type.isHeaderOnly() && is != null) { resp.setBufferSize(BUFFER_SIZE); OutputStream os = null; try { os = resp.getOutputStream(); IOUtils.copy(is, os); } catch (IOException e) { if (RequestUtils.isCausedByClient(e)) return true; } finally { IOUtils.closeQuietly(os); } } break; case RESPONSE_PARTIAL_CONTENT: if (type.from < 0 || type.to < 0 || type.from > type.to || type.to > type.size) { resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Invalid partial content parameters"); log.warn("Invalid partial content parameters"); } else if (!type.isHeaderOnly() && is != null) { resp.setBufferSize(BUFFER_SIZE); OutputStream os = resp.getOutputStream(); if (is.skip(type.from) != type.from) { resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Premature end of input stream"); log.warn("Premature end of input stream"); break; } try { /* get the temporary buffer for this thread */ byte[] tmp = buffer.get(); if (tmp == null) { tmp = new byte[BUFFER_SIZE]; buffer.set(tmp); } int read = type.to - type.from; int copy = read; int write = 0; read = is.read(tmp); while (copy > 0 && read >= 0) { copy -= read; write = copy > 0 ? read : read + copy; os.write(tmp, 0, write); stats[STATS_BYTES_WRITTEN] += write; read = is.read(tmp); } if (copy > 0) { resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Premature end of input stream"); log.warn("Premature end of input stream"); break; } os.flush(); os.close(); } catch (SocketException e) { log.debug("Request cancelled by client"); } } break; case RESPONSE_NOT_MODIFIED: /* NOTE: we MUST NOT return any content (RFC 2616)!!! */ break; case RESPONSE_PRECONDITION_FAILED: if (type.err == null) resp.sendError(HttpServletResponse.SC_PRECONDITION_FAILED); else resp.sendError(HttpServletResponse.SC_PRECONDITION_FAILED, type.err); break; case RESPONSE_REQUESTED_RANGE_NOT_SATISFIABLE: if (type.err == null) resp.sendError(HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE); else resp.sendError(HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE, type.err); break; case RESPONSE_METHOD_NOT_ALLOWED: if (type.err == null) resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED); else resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, type.err); break; case RESPONSE_INTERNAL_SERVER_ERROR: default: if (type.err == null) resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); else resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, type.err); } } catch (IOException e) { if (e instanceof EOFException) { log.debug("Request canceled by client"); return true; } ++stats[STATS_ERRORS]; String message = e.getCause() != null ? e.getCause().getMessage() : e.getMessage(); Throwable cause = e.getCause() != null ? e.getCause() : e; log.warn("I/O exception while sending response: {}", message, cause); throw e; } return true; }
From source file:ch.iterate.openstack.swift.Client.java
/** * @param container The name of the container * @param name The name of the object * @param entity The name of the request entity (make sure to set the Content-Type * @param metadata The metadata for the object * @param md5sum The 32 character hex encoded MD5 sum of the data * @param objectSize The total size in bytes of the object to be stored * @param segmentSize Optional size in bytes of the object segments to be stored (forces large object support) default 4G * @param dynamicLargeObject Optional setting to use dynamic large objects, False/null will use static large objects if required * @param segmentContainer Optional name of container to store file segments, defaults to storing chunks in the same container as the file sill appear * @param segmentFolder Optional name of folder for storing file segments, defaults to ".chunks/" * @param leaveSegments Optional setting to leave segments of large objects in place when the manifest is overwrtten/changed * @return The ETAG if the save was successful, null otherwise * @throws GenericException There was a protocol level error talking to CloudFiles *//*w w w . j a va2 s. co m*/ public String storeObject(Region region, String container, String name, HttpEntity entity, Map<String, String> metadata, String md5sum, Long objectSize, Long segmentSize, Boolean dynamicLargeObject, String segmentContainer, String segmentFolder, Boolean leaveSegments) throws IOException, InterruptedException { /* * Default values for large object support. We also use the defaults combined with the inputs * to determine whether to store as a large object. */ /* * The maximum size of a single object (5GiB). */ long singleObjectSizeLimit = (long) (5 * Math.pow(1024, 3)); /* * The default minimum segment size (1MiB). */ long minSegmentSize = 1024L * 1024L; /* * Set the segment size. * * Defaults to 4GiB segments, and will not permit smaller than 1MiB segments. */ long actualSegmentSize = (segmentSize == null) ? (long) (4 * Math.pow(1024, 3)) : Math.max(segmentSize, minSegmentSize); /* * Determines if we will store using large objects - we may do this for 3 reasons: * * - A segmentSize has been specified and the object size is greater than the minimum segment size * - If an objectSize is provided and is larger than the single object size limit of 5GiB * - A segmentSize has been specified, but no objectSize given (we take this as a request for segmentation) * * The last case may fail if the user does not provide at least as much data as the minimum segment * size configured on the server, and will always produce a large object structure (even if only one * small segment is required). */ objectSize = (objectSize == null) ? -1 : objectSize; boolean useLargeObject = ((segmentSize != null) && (objectSize > actualSegmentSize)) || (objectSize > singleObjectSizeLimit) || ((segmentSize != null) && (objectSize == -1)); if (!useLargeObject) { return storeObject(region, container, name, entity, metadata, md5sum); } else { /* * We need to upload a large object as defined by the method * parameters. For now this is done sequentially, but a parallel * version using appropriate random access to the underlying data * may be desirable. * * We make the assumption that the given file size will not be * greater than int.MAX_VALUE * segmentSize * */ leaveSegments = (leaveSegments == null) ? Boolean.FALSE : leaveSegments; dynamicLargeObject = (dynamicLargeObject == null) ? Boolean.FALSE : dynamicLargeObject; segmentFolder = (segmentFolder == null) ? ".file-segments" : segmentFolder; segmentContainer = (segmentContainer == null) ? container : segmentContainer; Map<String, List<StorageObject>> oldSegmentsToRemove = null; /* * If we have chosen not to leave existing large object segments in place (default) * then we need to collect information about any existing file segments so that we can * deal with them after we complete the upload of the new manifest. * * We should only delete existing segments after a successful upload of a new manifest file * because this constitutes an object update and the older file should remain available * until the new file can be downloaded. */ if (!leaveSegments) { ObjectMetadata existingMetadata; String manifestDLO = null; Boolean manifestSLO = Boolean.FALSE; try { existingMetadata = getObjectMetaData(region, container, name); if (existingMetadata.getMetaData().containsKey(Constants.MANIFEST_HEADER)) { manifestDLO = existingMetadata.getMetaData().get(Constants.MANIFEST_HEADER); } else if (existingMetadata.getMetaData().containsKey(Constants.X_STATIC_LARGE_OBJECT)) { JSONParser parser = new JSONParser(); String manifestSLOValue = existingMetadata.getMetaData() .get(Constants.X_STATIC_LARGE_OBJECT); manifestSLO = (Boolean) parser.parse(manifestSLOValue); } } catch (NotFoundException e) { /* * Just means no object exists already, so continue */ } catch (ParseException e) { /* * X_STATIC_LARGE_OBJECT header existed but failed to parse. * If a static large object already exists this must be set to "true". * If we got here then the X_STATIC_LARGE_OBJECT header existed, but failed * to parse as a boolean, so fail upload as a precaution. */ return null; } if (manifestDLO != null) { /* * We have found an existing dynamic large object, so use the prefix to get a list of * existing objects. If we're putting up a new dlo, make sure the segment prefixes are * different, then we can delete anything that's not in the new list if necessary. */ String oldContainer = manifestDLO.substring(0, manifestDLO.indexOf('/', 1)); String oldPath = manifestDLO.substring(manifestDLO.indexOf('/', 1), manifestDLO.length()); oldSegmentsToRemove = new HashMap<String, List<StorageObject>>(); oldSegmentsToRemove.put(oldContainer, listObjects(region, oldContainer, oldPath)); } else if (manifestSLO) { /* * We have found an existing static large object, so grab the manifest data that * details the existing segments - delete any later that we don't need any more */ } } int segmentNumber = 1; long timeStamp = System.currentTimeMillis() / 1000L; String segmentBase = String.format("%s/%d/%d", segmentFolder, timeStamp, objectSize); /* * Create subInputStream from the OutputStream we will pass to the * HttpEntity for writing content. */ final PipedInputStream contentInStream = new PipedInputStream(64 * 1024); final PipedOutputStream contentOutStream = new PipedOutputStream(contentInStream); SubInputStream segmentStream = new SubInputStream(contentInStream, actualSegmentSize, false); /* * Fork the call to entity.writeTo() that allows us to grab any exceptions raised */ final HttpEntity e = entity; final Callable<Boolean> writer = new Callable<Boolean>() { public Boolean call() throws Exception { e.writeTo(contentOutStream); return Boolean.TRUE; } }; ExecutorService writeExecutor = Executors.newSingleThreadExecutor(); final Future<Boolean> future = writeExecutor.submit(writer); /* * Check the future for exceptions after we've finished uploading segments */ Map<String, List<StorageObject>> newSegmentsAdded = new HashMap<String, List<StorageObject>>(); List<StorageObject> newSegments = new LinkedList<StorageObject>(); JSONArray manifestSLO = new JSONArray(); boolean finished = false; /* * Upload each segment of the file by reading sections of the content input stream * until the entire underlying stream is complete */ while (!finished) { String segmentName = String.format("%s/%08d", segmentBase, segmentNumber); String etag; boolean error = false; try { etag = storeObject(region, segmentContainer, segmentStream, "application/octet-stream", segmentName, new HashMap<String, String>()); } catch (IOException ex) { // Finished storing the object System.out.println("Caught IO Exception: " + ex.getMessage()); ex.printStackTrace(); throw ex; } String segmentPath = segmentContainer + "/" + segmentName; long bytesUploaded = segmentStream.getBytesProduced(); /* * Create the appropriate manifest structure if we're making a static large * object. * * ETAG returned by the simple upload * total size of segment uploaded * path of segment */ if (!dynamicLargeObject) { JSONObject segmentJSON = new JSONObject(); segmentJSON.put("path", segmentPath); segmentJSON.put("etag", etag); segmentJSON.put("size_bytes", bytesUploaded); manifestSLO.add(segmentJSON); newSegments.add(new StorageObject(segmentName)); } segmentNumber++; if (!finished) { finished = segmentStream.endSourceReached(); } newSegmentsAdded.put(segmentContainer, newSegments); System.out.println("JSON: " + manifestSLO.toString()); if (error) return ""; segmentStream.readMoreBytes(actualSegmentSize); } /* * Attempts to retrieve the return value from the write operation * Any exceptions raised can then be handled appropriately */ try { future.get(); } catch (InterruptedException ex) { /* * The write was interrupted... delete the segments? */ } catch (ExecutionException ex) { /* * This should always be an IOException or a RuntimeException * because the call to entity.writeTo() only throws IOException */ Throwable t = ex.getCause(); if (t instanceof IOException) { throw (IOException) t; } else { throw (RuntimeException) t; } } /* * Create an appropriate manifest depending on our DLO/SLO choice */ String manifestEtag = null; if (dynamicLargeObject) { /* * Empty manifest with header detailing the shared prefix of object segments */ long manifestTimeStamp = System.currentTimeMillis() / 1000L; metadata.put("X-Object-Manifest", segmentBase); metadata.put("x-object-meta-mtime", String.format("%s", manifestTimeStamp)); manifestEtag = storeObject(region, container, new ByteArrayInputStream(new byte[0]), entity.getContentType().getValue(), name, metadata); } else { /* * Manifest containing json list specifying details of the object segments. */ URIBuilder urlBuild = new URIBuilder(region.getStorageUrl(container, name)); urlBuild.setParameter("multipart-manifest", "put"); URI url; try { url = urlBuild.build(); String manifestContent = manifestSLO.toString(); InputStreamEntity manifestEntity = new InputStreamEntity( new ByteArrayInputStream(manifestContent.getBytes()), -1); manifestEntity.setChunked(true); manifestEntity.setContentType(entity.getContentType()); HttpPut method = new HttpPut(url); method.setEntity(manifestEntity); method.setHeader("x-static-large-object", "true"); Response response = this.execute(method, new DefaultResponseHandler()); if (response.getStatusCode() == HttpStatus.SC_CREATED) { manifestEtag = response.getResponseHeader(HttpHeaders.ETAG).getValue(); } else { throw new GenericException(response); } } catch (URISyntaxException ex) { ex.printStackTrace(); } } /* * Delete stale segments of overwritten large object if requested. */ if (!leaveSegments) { /* * Before deleting old segments, remove any objects from the delete list * that are also part of a new static large object that were updated during the upload. */ if (!(oldSegmentsToRemove == null)) { for (String c : oldSegmentsToRemove.keySet()) { List<StorageObject> rmv = oldSegmentsToRemove.get(c); if (newSegmentsAdded.containsKey(c)) { rmv.removeAll(newSegmentsAdded.get(c)); } List<String> rmvNames = new LinkedList<String>(); for (StorageObject s : rmv) { rmvNames.add(s.getName()); } deleteObjects(region, c, rmvNames); } } } return manifestEtag; } }
From source file:es.pode.publicacion.negocio.servicios.SrvPublicacionServiceImpl.java
/**Este metodo recoge los properties * /* w w w. j av a 2s . co m*/ * @param sKey * La clave de la propiedad * @return * Devuelve la propiedad como un String */ public static String getPropertyValue(String sKey) { String sReturn = new String(); try { if (props == null) { InputStream fIsSpringProperties = SrvPublicacionServiceBase.class .getResourceAsStream(FILE_NAME_PROPERTIES); props = new java.util.Properties(); props.load(fIsSpringProperties); } sReturn = props.getProperty(sKey); if (logger.isDebugEnabled()) logger.debug("propiedad obtenida: " + sReturn.toString()); } catch (IOException e) { logger.warn("Excepcion intentando obtener propiedad [" + sKey + "] del fichero de propiedades del publicador[" + e.getCause() + "]"); } // devolvemos la propiedad return sReturn; }
From source file:com.mario22gmail.license.nfc_project.NavigationDrawerActivity.java
public void EnterButtonClick(View view) throws ReaderException, GeneralSecurityException, IOException, SmartCardException { if (card != null) { try {//from w w w . ja va2s. c om mDBApi.getSession().startOAuth2Authentication(NavigationDrawerActivity.this); Log.i(nfcDebugTag, "Card Detected din buton : " + card.getCardDetails().cardName); card.authenticate(DESFireEV1.AuthType.Native, 2, (byte) 0, 0, (byte) 0, null); Log.i(nfcDebugTag, "Applicatie authentificata"); card.selectApplication(11); card.authenticate(DESFireEV1.AuthType.Native, 2, (byte) 0, 0, (byte) 0, null); Log.i(nfcDebugTag, "Applicatie authentificata"); card.getReader().setTimeout(5000); ArrayList<WebsitesCredentials> credentials = null; try { credentials = GetWebsitesFromDesfire(); } catch (IOException e) { Log.i(nfcDebugTag, "io exception" + e.getMessage()); e.printStackTrace(); } catch (SmartCardException e) { Log.i(nfcDebugTag, "smart card exception" + e.getMessage()); e.printStackTrace(); } catch (GeneralSecurityException e) { Log.i(nfcDebugTag, "general security exception" + e.getMessage()); e.printStackTrace(); } FragmentCardContent fragmentCardContent = new FragmentCardContent(); fragmentCardContent.InitializeCredentials(credentials); ChangeFragment(fragmentCardContent); // DesFireCreateApplication(card, 4); // DesFireCreateApplication(card, 3); // String textCard = "Mario e tare"; // byte[] textBytes = textCard.getBytes(); // int[] appIds = card.getApplicationIDs(); // // for(int i = 0; i < appIds.length; i++) // { // Log.i(nfcDebugTag,appIds[i] + " "); // } //create file nr 1 // card.createFile(1, new DESFireFile.StdDataFileSettings( // DESFireEV1.CommunicationType.Plain, 0, 0, 0, 0, textBytes.length)); // Log.i(nfcDebugTag, "fisier creat"); //write data to file nr 1 // card.authenticate(DESFireEV1.AuthType.Native, 2, (byte) 0, 0, // (byte) 0, null); // Log.i(nfcDebugTag, "Applicatie authentificata"); // card.writeData(1, 0, textBytes); // Log.i(nfcDebugTag, "fisier scris"); // // card.selectApplication(0x00); // card.getReader().close(); // Log.i(nfcDebugTag, "aplicatie 0 selectata din buton"); // FragmentWebCredentialsOnCard chooseFragment = new FragmentWebCredentialsOnCard(); // android.support.v4.app.FragmentTransaction fragmentTransaction = getSupportFragmentManager().beginTransaction(); // fragmentTransaction.replace(R.id.FragmentContainer, chooseFragment); // fragmentTransaction.commit(); } catch (TagLostException e) { Log.i(nfcDebugTag, "Tag Lost" + e.getMessage()); TextView errorLabel = (TextView) findViewById(R.id.textViewTagLostError); errorLabel.setTextColor(Color.RED); errorLabel.setText("Aproprie cardul nfc"); } catch (DESFireException e) { Log.i(nfcDebugTag, "Desfire Exception" + e.getMessage()); } catch (Exception e) { Log.i(nfcDebugTag, "Nu e card" + e + " "); Log.i(nfcDebugTag, "Nu e card " + e.getCause()); TextView errorLabel = (TextView) findViewById(R.id.textViewTagLostError); errorLabel.setTextColor(Color.RED); errorLabel.setText("Aproprie cardul nfc"); } } }