List of usage examples for org.apache.commons.lang3 StringUtils substringBeforeLast
public static String substringBeforeLast(final String str, final String separator)
Gets the substring before the last occurrence of a separator.
From source file:org.apache.cxf.jaxrs.openapi.OpenApiCustomizer.java
public OpenAPIConfiguration customize(final OpenAPIConfiguration configuration) { if (configuration == null) { return configuration; }//from w w w.ja v a 2 s.c o m if (dynamicBasePath) { final MessageContext ctx = createMessageContext(); // If the JAX-RS application with custom path is defined, it might be present twice, in the // request URI as well as in each resource operation URI. To properly represent server URL, // the application path should be removed from it. final String url = StringUtils.removeEnd( StringUtils.substringBeforeLast(ctx.getUriInfo().getRequestUri().toString(), "/"), applicationPath); final Collection<Server> servers = configuration.getOpenAPI().getServers(); if (servers == null || servers.stream().noneMatch(s -> s.getUrl().equalsIgnoreCase(url))) { configuration.getOpenAPI().setServers(Collections.singletonList(new Server().url(url))); } } return configuration; }
From source file:org.apache.cxf.jaxrs.swagger.DefaultSwagger2Serializers.java
@Override public void writeTo(final Swagger data, final Class<?> type, final Type genericType, final Annotation[] annotations, final MediaType mediaType, final MultivaluedMap<String, Object> headers, final OutputStream out) throws IOException { if (dynamicBasePath) { MessageContext ctx = JAXRSUtils.createContextValue(JAXRSUtils.getCurrentMessage(), null, MessageContext.class); String currentBasePath = StringUtils.substringBeforeLast(ctx.getHttpServletRequest().getRequestURI(), "/"); if (!currentBasePath.equals(beanConfig.getBasePath())) { data.setBasePath(currentBasePath); data.setHost(beanConfig.getHost()); data.setInfo(beanConfig.getInfo()); }//from w w w .j a va2s . c o m if (beanConfig.getSwagger() != null && beanConfig.getSwagger().getSecurityDefinitions() != null && data.getSecurityDefinitions() == null) { data.setSecurityDefinitions(beanConfig.getSwagger().getSecurityDefinitions()); } } if (replaceTags || javadocProvider != null) { Map<String, ClassResourceInfo> operations = new HashMap<>(); Map<Pair<String, String>, OperationResourceInfo> methods = new HashMap<>(); for (ClassResourceInfo cri : cris) { for (OperationResourceInfo ori : cri.getMethodDispatcher().getOperationResourceInfos()) { String normalizedPath = getNormalizedPath(cri.getURITemplate().getValue(), ori.getURITemplate().getValue()); operations.put(normalizedPath, cri); methods.put(ImmutablePair.of(ori.getHttpMethod(), normalizedPath), ori); } } if (replaceTags && data.getTags() != null) { data.getTags().clear(); } for (final Map.Entry<String, Path> entry : data.getPaths().entrySet()) { Tag tag = null; if (replaceTags && operations.containsKey(entry.getKey())) { ClassResourceInfo cri = operations.get(entry.getKey()); tag = new Tag(); tag.setName(cri.getURITemplate().getValue().replaceAll("/", "_")); if (javadocProvider != null) { tag.setDescription(javadocProvider.getClassDoc(cri)); } data.addTag(tag); } for (Map.Entry<HttpMethod, Operation> subentry : entry.getValue().getOperationMap().entrySet()) { if (replaceTags && tag != null) { subentry.getValue().setTags(Collections.singletonList(tag.getName())); } Pair<String, String> key = ImmutablePair.of(subentry.getKey().name(), entry.getKey()); if (methods.containsKey(key) && javadocProvider != null) { OperationResourceInfo ori = methods.get(key); subentry.getValue().setSummary(javadocProvider.getMethodDoc(ori)); for (int i = 0; i < subentry.getValue().getParameters().size(); i++) { subentry.getValue().getParameters().get(i) .setDescription(javadocProvider.getMethodParameterDoc(ori, i)); } addParameters(subentry.getValue().getParameters()); if (subentry.getValue().getResponses() != null && !subentry.getValue().getResponses().isEmpty()) { subentry.getValue().getResponses().entrySet().iterator().next().getValue() .setDescription(javadocProvider.getMethodResponseDoc(ori)); } } } } } if (replaceTags && data.getTags() != null) { Collections.sort(data.getTags(), new Comparator<Tag>() { @Override public int compare(final Tag tag1, final Tag tag2) { return tag1.getName().compareTo(tag2.getName()); } }); } super.writeTo(data, type, genericType, annotations, mediaType, headers, out); }
From source file:org.apache.cxf.jaxrs.swagger.Swagger2Customizer.java
public Swagger customize(Swagger data) { if (dynamicBasePath) { MessageContext ctx = createMessageContext(); String currentBasePath = StringUtils.substringBeforeLast(ctx.getHttpServletRequest().getRequestURI(), "/"); if (!currentBasePath.equals(beanConfig.getBasePath())) { data.setBasePath(currentBasePath); data.setHost(beanConfig.getHost()); data.setInfo(beanConfig.getInfo()); }/*from w w w . j a va 2s . c om*/ if (beanConfig.getSwagger() != null && beanConfig.getSwagger().getSecurityDefinitions() != null && data.getSecurityDefinitions() == null) { data.setSecurityDefinitions(beanConfig.getSwagger().getSecurityDefinitions()); } } if (replaceTags || javadocProvider != null) { Map<String, ClassResourceInfo> operations = new HashMap<>(); Map<Pair<String, String>, OperationResourceInfo> methods = new HashMap<>(); for (ClassResourceInfo cri : cris) { for (OperationResourceInfo ori : cri.getMethodDispatcher().getOperationResourceInfos()) { String normalizedPath = getNormalizedPath(cri.getURITemplate().getValue(), ori.getURITemplate().getValue()); operations.put(normalizedPath, cri); methods.put(ImmutablePair.of(ori.getHttpMethod(), normalizedPath), ori); } } if (replaceTags && data.getTags() != null) { data.getTags().clear(); } for (final Map.Entry<String, Path> entry : data.getPaths().entrySet()) { Tag tag = null; if (replaceTags && operations.containsKey(entry.getKey())) { ClassResourceInfo cri = operations.get(entry.getKey()); tag = new Tag(); tag.setName(cri.getURITemplate().getValue().replaceAll("/", "_")); if (javadocProvider != null) { tag.setDescription(javadocProvider.getClassDoc(cri)); } data.addTag(tag); } for (Map.Entry<HttpMethod, Operation> subentry : entry.getValue().getOperationMap().entrySet()) { if (replaceTags && tag != null) { subentry.getValue().setTags(Collections.singletonList(tag.getName())); } Pair<String, String> key = ImmutablePair.of(subentry.getKey().name(), entry.getKey()); if (methods.containsKey(key) && javadocProvider != null) { OperationResourceInfo ori = methods.get(key); subentry.getValue().setSummary(javadocProvider.getMethodDoc(ori)); for (int i = 0; i < subentry.getValue().getParameters().size(); i++) { subentry.getValue().getParameters().get(i) .setDescription(javadocProvider.getMethodParameterDoc(ori, i)); } addParameters(subentry.getValue().getParameters()); if (subentry.getValue().getResponses() != null && !subentry.getValue().getResponses().isEmpty()) { subentry.getValue().getResponses().entrySet().iterator().next().getValue() .setDescription(javadocProvider.getMethodResponseDoc(ori)); } } } } } if (replaceTags && data.getTags() != null) { Collections.sort(data.getTags(), new Comparator<Tag>() { @Override public int compare(final Tag tag1, final Tag tag2) { return tag1.getName().compareTo(tag2.getName()); } }); } applyDefaultVersion(data); return data; }
From source file:org.apache.cxf.jaxrs.swagger.Swagger2Serializers.java
@Override public void writeTo(final Swagger data, final Class<?> type, final Type genericType, final Annotation[] annotations, final MediaType mediaType, final MultivaluedMap<String, Object> headers, final OutputStream out) throws IOException { if (dynamicBasePath) { MessageContext ctx = JAXRSUtils.createContextValue(JAXRSUtils.getCurrentMessage(), null, MessageContext.class); data.setBasePath(StringUtils.substringBeforeLast(ctx.getHttpServletRequest().getRequestURI(), "/")); }// w ww. j a v a2 s .c o m if (replaceTags || javadocProvider != null) { Map<String, ClassResourceInfo> operations = new HashMap<>(); Map<Pair<String, String>, OperationResourceInfo> methods = new HashMap<>(); for (ClassResourceInfo cri : cris) { for (OperationResourceInfo ori : cri.getMethodDispatcher().getOperationResourceInfos()) { String normalizedPath = getNormalizedPath(cri.getURITemplate().getValue(), ori.getURITemplate().getValue()); operations.put(normalizedPath, cri); methods.put(ImmutablePair.of(ori.getHttpMethod(), normalizedPath), ori); } } if (replaceTags && data.getTags() != null) { data.getTags().clear(); } for (final Map.Entry<String, Path> entry : data.getPaths().entrySet()) { Tag tag = null; if (replaceTags && operations.containsKey(entry.getKey())) { ClassResourceInfo cri = operations.get(entry.getKey()); tag = new Tag(); tag.setName(cri.getURITemplate().getValue()); if (javadocProvider != null) { tag.setDescription(javadocProvider.getClassDoc(cri)); } data.addTag(tag); } for (Map.Entry<HttpMethod, Operation> subentry : entry.getValue().getOperationMap().entrySet()) { if (replaceTags && tag != null) { subentry.getValue().setTags(Collections.singletonList(tag.getName())); } Pair<String, String> key = ImmutablePair.of(subentry.getKey().name(), entry.getKey()); if (methods.containsKey(key) && javadocProvider != null) { OperationResourceInfo ori = methods.get(key); subentry.getValue().setSummary(javadocProvider.getMethodDoc(ori)); for (int i = 0; i < subentry.getValue().getParameters().size(); i++) { subentry.getValue().getParameters().get(i) .setDescription(javadocProvider.getMethodParameterDoc(ori, i)); } if (subentry.getValue().getResponses() != null && !subentry.getValue().getResponses().isEmpty()) { subentry.getValue().getResponses().entrySet().iterator().next().getValue() .setDescription(javadocProvider.getMethodResponseDoc(ori)); } } } } } super.writeTo(data, type, genericType, annotations, mediaType, headers, out); }
From source file:org.apache.nifi.controller.state.providers.zookeeper.ZooKeeperStateProvider.java
private void createNode(final String path, final byte[] data, final String componentId, final Map<String, String> stateValues, final List<ACL> acls) throws IOException, KeeperException { try {//from ww w. ja va 2 s .c o m if (data != null && data.length > ONE_MB) { throw new StateTooLargeException( "Failed to set cluster-wide state in ZooKeeper for component with ID " + componentId + " because the state had " + stateValues.size() + " values, which serialized to " + data.length + " bytes, and the maximum allowed by ZooKeeper is 1 MB (" + ONE_MB + " bytes)"); } getZooKeeper().create(path, data, acls, CreateMode.PERSISTENT); } catch (final InterruptedException ie) { throw new IOException("Failed to update cluster-wide state due to interruption", ie); } catch (final KeeperException ke) { final Code exceptionCode = ke.code(); if (Code.NONODE == exceptionCode) { final String parentPath = StringUtils.substringBeforeLast(path, "/"); createNode(parentPath, null, componentId, stateValues, Ids.OPEN_ACL_UNSAFE); createNode(path, data, componentId, stateValues, acls); return; } if (Code.SESSIONEXPIRED == exceptionCode) { invalidateClient(); createNode(path, data, componentId, stateValues, acls); return; } // Node already exists. Node must have been created by "someone else". Just set the data. if (Code.NODEEXISTS == exceptionCode) { try { getZooKeeper().setData(path, data, -1); return; } catch (final KeeperException ke1) { // Node no longer exists -- it was removed by someone else. Go recreate the node. if (ke1.code() == Code.NONODE) { createNode(path, data, componentId, stateValues, acls); return; } } catch (final InterruptedException ie) { throw new IOException("Failed to update cluster-wide state due to interruption", ie); } } throw ke; } }
From source file:org.apache.nifi.processors.standard.FetchFile.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get();/*w ww. j a va2s .co m*/ if (flowFile == null) { return; } final StopWatch stopWatch = new StopWatch(true); final String filename = context.getProperty(FILENAME).evaluateAttributeExpressions(flowFile).getValue(); final LogLevel levelFileNotFound = LogLevel .valueOf(context.getProperty(FILE_NOT_FOUND_LOG_LEVEL).getValue()); final LogLevel levelPermDenied = LogLevel.valueOf(context.getProperty(PERM_DENIED_LOG_LEVEL).getValue()); final File file = new File(filename); // Verify that file system is reachable and file exists Path filePath = file.toPath(); if (!Files.exists(filePath) && !Files.notExists(filePath)) { // see https://docs.oracle.com/javase/tutorial/essential/io/check.html for more details getLogger().log(levelFileNotFound, "Could not fetch file {} from file system for {} because the existence of the file cannot be verified; routing to failure", new Object[] { file, flowFile }); session.transfer(session.penalize(flowFile), REL_FAILURE); return; } else if (!Files.exists(filePath)) { getLogger().log(levelFileNotFound, "Could not fetch file {} from file system for {} because the file does not exist; routing to not.found", new Object[] { file, flowFile }); session.getProvenanceReporter().route(flowFile, REL_NOT_FOUND); session.transfer(session.penalize(flowFile), REL_NOT_FOUND); return; } // Verify read permission on file final String user = System.getProperty("user.name"); if (!isReadable(file)) { getLogger().log(levelPermDenied, "Could not fetch file {} from file system for {} due to user {} not having sufficient permissions to read the file; routing to permission.denied", new Object[] { file, flowFile, user }); session.getProvenanceReporter().route(flowFile, REL_PERMISSION_DENIED); session.transfer(session.penalize(flowFile), REL_PERMISSION_DENIED); return; } // If configured to move the file and fail if unable to do so, check that the existing file does not exist and that we have write permissions // for the parent file. final String completionStrategy = context.getProperty(COMPLETION_STRATEGY).getValue(); final String targetDirectoryName = context.getProperty(MOVE_DESTINATION_DIR) .evaluateAttributeExpressions(flowFile).getValue(); if (targetDirectoryName != null) { final File targetDir = new File(targetDirectoryName); if (COMPLETION_MOVE.getValue().equalsIgnoreCase(completionStrategy)) { if (targetDir.exists() && (!isWritable(targetDir) || !isDirectory(targetDir))) { getLogger().error( "Could not fetch file {} from file system for {} because Completion Strategy is configured to move the original file to {}, " + "but that is not a directory or user {} does not have permissions to write to that directory", new Object[] { file, flowFile, targetDir, user }); session.transfer(flowFile, REL_FAILURE); return; } final String conflictStrategy = context.getProperty(CONFLICT_STRATEGY).getValue(); if (CONFLICT_FAIL.getValue().equalsIgnoreCase(conflictStrategy)) { final File targetFile = new File(targetDir, file.getName()); if (targetFile.exists()) { getLogger().error( "Could not fetch file {} from file system for {} because Completion Strategy is configured to move the original file to {}, " + "but a file with name {} already exists in that directory and the Move Conflict Strategy is configured for failure", new Object[] { file, flowFile, targetDir, file.getName() }); session.transfer(flowFile, REL_FAILURE); return; } } } } // import content from file system try (final FileInputStream fis = new FileInputStream(file)) { flowFile = session.importFrom(fis, flowFile); } catch (final IOException ioe) { getLogger().error("Could not fetch file {} from file system for {} due to {}; routing to failure", new Object[] { file, flowFile, ioe.toString() }, ioe); session.transfer(session.penalize(flowFile), REL_FAILURE); return; } session.getProvenanceReporter().modifyContent(flowFile, "Replaced content of FlowFile with contents of " + file.toURI(), stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS); // It is critical that we commit the session before we perform the Completion Strategy. Otherwise, we could have a case where we // ingest the file, delete/move the file, and then NiFi is restarted before the session is committed. That would result in data loss. // As long as we commit the session right here, before we perform the Completion Strategy, we are safe. session.commit(); // Attempt to perform the Completion Strategy action Exception completionFailureException = null; if (COMPLETION_DELETE.getValue().equalsIgnoreCase(completionStrategy)) { // convert to path and use Files.delete instead of file.delete so that if we fail, we know why try { delete(file); } catch (final IOException ioe) { completionFailureException = ioe; } } else if (COMPLETION_MOVE.getValue().equalsIgnoreCase(completionStrategy)) { final File targetDirectory = new File(targetDirectoryName); final File targetFile = new File(targetDirectory, file.getName()); try { if (targetFile.exists()) { final String conflictStrategy = context.getProperty(CONFLICT_STRATEGY).getValue(); if (CONFLICT_KEEP_INTACT.getValue().equalsIgnoreCase(conflictStrategy)) { // don't move, just delete the original Files.delete(file.toPath()); } else if (CONFLICT_RENAME.getValue().equalsIgnoreCase(conflictStrategy)) { // rename to add a random UUID but keep the file extension if it has one. final String simpleFilename = targetFile.getName(); final String newName; if (simpleFilename.contains(".")) { newName = StringUtils.substringBeforeLast(simpleFilename, ".") + "-" + UUID.randomUUID().toString() + "." + StringUtils.substringAfterLast(simpleFilename, "."); } else { newName = simpleFilename + "-" + UUID.randomUUID().toString(); } move(file, new File(targetDirectory, newName), false); } else if (CONFLICT_REPLACE.getValue().equalsIgnoreCase(conflictStrategy)) { move(file, targetFile, true); } } else { move(file, targetFile, false); } } catch (final IOException ioe) { completionFailureException = ioe; } } // Handle completion failures if (completionFailureException != null) { getLogger().warn( "Successfully fetched the content from {} for {} but failed to perform Completion Action due to {}; routing to success", new Object[] { file, flowFile, completionFailureException }, completionFailureException); } }
From source file:org.apache.nifi.processors.standard.FetchFileTransfer.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get();//from w w w. j av a 2s . c om if (flowFile == null) { return; } final StopWatch stopWatch = new StopWatch(true); final String host = context.getProperty(HOSTNAME).evaluateAttributeExpressions(flowFile).getValue(); final int port = context.getProperty(UNDEFAULTED_PORT).evaluateAttributeExpressions(flowFile).asInteger(); final String filename = context.getProperty(REMOTE_FILENAME).evaluateAttributeExpressions(flowFile) .getValue(); // Try to get a FileTransfer object from our cache. BlockingQueue<FileTransferIdleWrapper> transferQueue; synchronized (fileTransferMap) { final Tuple<String, Integer> tuple = new Tuple<>(host, port); transferQueue = fileTransferMap.get(tuple); if (transferQueue == null) { transferQueue = new LinkedBlockingQueue<>(); fileTransferMap.put(tuple, transferQueue); } // periodically close idle connections if (System.currentTimeMillis() - lastClearTime > IDLE_CONNECTION_MILLIS) { closeConnections(false); lastClearTime = System.currentTimeMillis(); } } // we have a queue of FileTransfer Objects. Get one from the queue or create a new one. FileTransfer transfer; FileTransferIdleWrapper transferWrapper = transferQueue.poll(); if (transferWrapper == null) { transfer = createFileTransfer(context); } else { transfer = transferWrapper.getFileTransfer(); } // Pull data from remote system. final InputStream in; try { in = transfer.getInputStream(filename, flowFile); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { StreamUtils.copy(in, out); transfer.flush(); } }); transferQueue.offer(new FileTransferIdleWrapper(transfer, System.nanoTime())); } catch (final FileNotFoundException e) { getLogger().error( "Failed to fetch content for {} from filename {} on remote host {} because the file could not be found on the remote system; routing to {}", new Object[] { flowFile, filename, host, REL_NOT_FOUND.getName() }); session.transfer(session.penalize(flowFile), REL_NOT_FOUND); session.getProvenanceReporter().route(flowFile, REL_NOT_FOUND); return; } catch (final PermissionDeniedException e) { getLogger().error( "Failed to fetch content for {} from filename {} on remote host {} due to insufficient permissions; routing to {}", new Object[] { flowFile, filename, host, REL_PERMISSION_DENIED.getName() }); session.transfer(session.penalize(flowFile), REL_PERMISSION_DENIED); session.getProvenanceReporter().route(flowFile, REL_PERMISSION_DENIED); return; } catch (final ProcessException | IOException e) { try { transfer.close(); } catch (final IOException e1) { getLogger().warn("Failed to close connection to {}:{} due to {}", new Object[] { host, port, e.toString() }, e); } getLogger().error( "Failed to fetch content for {} from filename {} on remote host {}:{} due to {}; routing to comms.failure", new Object[] { flowFile, filename, host, port, e.toString() }, e); session.transfer(session.penalize(flowFile), REL_COMMS_FAILURE); return; } // Add FlowFile attributes final String protocolName = transfer.getProtocolName(); final Map<String, String> attributes = new HashMap<>(); attributes.put(protocolName + ".remote.host", host); attributes.put(protocolName + ".remote.port", String.valueOf(port)); attributes.put(protocolName + ".remote.filename", filename); if (filename.contains("/")) { final String path = StringUtils.substringBeforeLast(filename, "/"); final String filenameOnly = StringUtils.substringAfterLast(filename, "/"); attributes.put(CoreAttributes.PATH.key(), path); attributes.put(CoreAttributes.FILENAME.key(), filenameOnly); } else { attributes.put(CoreAttributes.FILENAME.key(), filename); } flowFile = session.putAllAttributes(flowFile, attributes); // emit provenance event and transfer FlowFile session.getProvenanceReporter().fetch(flowFile, protocolName + "://" + host + ":" + port + "/" + filename, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS); // it is critical that we commit the session before moving/deleting the remote file. Otherwise, we could have a situation where // we ingest the data, delete/move the remote file, and then NiFi dies/is shut down before the session is committed. This would // result in data loss! If we commit the session first, we are safe. session.commit(); final String completionStrategy = context.getProperty(COMPLETION_STRATEGY).getValue(); if (COMPLETION_DELETE.getValue().equalsIgnoreCase(completionStrategy)) { try { transfer.deleteFile(null, filename); } catch (final FileNotFoundException e) { // file doesn't exist -- effectively the same as removing it. Move on. } catch (final IOException ioe) { getLogger().warn( "Successfully fetched the content for {} from {}:{}{} but failed to remove the remote file due to {}", new Object[] { flowFile, host, port, filename, ioe }, ioe); } } else if (COMPLETION_MOVE.getValue().equalsIgnoreCase(completionStrategy)) { String targetDir = context.getProperty(MOVE_DESTINATION_DIR).evaluateAttributeExpressions(flowFile) .getValue(); if (!targetDir.endsWith("/")) { targetDir = targetDir + "/"; } final String simpleFilename = StringUtils.substringAfterLast(filename, "/"); final String target = targetDir + simpleFilename; try { transfer.rename(filename, target); } catch (final IOException ioe) { getLogger().warn( "Successfully fetched the content for {} from {}:{}{} but failed to rename the remote file due to {}", new Object[] { flowFile, host, port, filename, ioe }, ioe); } } }
From source file:org.apache.nifi.processors.standard.TailFile.java
private void processTailFile(final ProcessContext context, final ProcessSession session, final String tailFile) { // If user changes the file that is being tailed, we need to consume the already-rolled-over data according // to the Initial Start Position property boolean rolloverOccurred; TailFileObject tfo = states.get(tailFile); if (tfo.isTailFileChanged()) { rolloverOccurred = false;// ww w .j av a 2 s. c o m final String recoverPosition = context.getProperty(START_POSITION).getValue(); if (START_BEGINNING_OF_TIME.getValue().equals(recoverPosition)) { recoverRolledFiles(context, session, tailFile, tfo.getExpectedRecoveryChecksum(), tfo.getState().getTimestamp(), tfo.getState().getPosition()); } else if (START_CURRENT_FILE.getValue().equals(recoverPosition)) { cleanup(); tfo.setState(new TailFileState(tailFile, null, null, 0L, 0L, 0L, null, tfo.getState().getBuffer())); } else { final String filename = tailFile; final File file = new File(filename); try { final FileChannel fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ); getLogger().debug("Created FileChannel {} for {}", new Object[] { fileChannel, file }); final Checksum checksum = new CRC32(); final long position = file.length(); final long timestamp = file.lastModified(); try (final InputStream fis = new FileInputStream(file); final CheckedInputStream in = new CheckedInputStream(fis, checksum)) { StreamUtils.copy(in, new NullOutputStream(), position); } fileChannel.position(position); cleanup(); tfo.setState(new TailFileState(filename, file, fileChannel, position, timestamp, file.length(), checksum, tfo.getState().getBuffer())); } catch (final IOException ioe) { getLogger().error( "Attempted to position Reader at current position in file {} but failed to do so due to {}", new Object[] { file, ioe.toString() }, ioe); context.yield(); return; } } tfo.setTailFileChanged(false); } else { // Recover any data that may have rolled over since the last time that this processor ran. // If expectedRecoveryChecksum != null, that indicates that this is the first iteration since processor was started, so use whatever checksum value // was present when the state was last persisted. In this case, we must then null out the value so that the next iteration won't keep using the "recovered" // value. If the value is null, then we know that either the processor has already recovered that data, or there was no state persisted. In either case, // use whatever checksum value is currently in the state. Long expectedChecksumValue = tfo.getExpectedRecoveryChecksum(); if (expectedChecksumValue == null) { expectedChecksumValue = tfo.getState().getChecksum() == null ? null : tfo.getState().getChecksum().getValue(); } rolloverOccurred = recoverRolledFiles(context, session, tailFile, expectedChecksumValue, tfo.getState().getTimestamp(), tfo.getState().getPosition()); tfo.setExpectedRecoveryChecksum(null); } // initialize local variables from state object; this is done so that we can easily change the values throughout // the onTrigger method and then create a new state object after we finish processing the files. TailFileState state = tfo.getState(); File file = state.getFile(); FileChannel reader = state.getReader(); Checksum checksum = state.getChecksum(); if (checksum == null) { checksum = new CRC32(); } long position = state.getPosition(); long timestamp = state.getTimestamp(); long length = state.getLength(); // Create a reader if necessary. if (file == null || reader == null) { file = new File(tailFile); reader = createReader(file, position); if (reader == null) { context.yield(); return; } } final long startNanos = System.nanoTime(); // Check if file has rotated if (rolloverOccurred || (timestamp <= file.lastModified() && length > file.length()) || (timestamp < file.lastModified() && length >= file.length())) { // Since file has rotated, we close the reader, create a new one, and then reset our state. try { reader.close(); getLogger().debug("Closed FileChannel {}", new Object[] { reader, reader }); } catch (final IOException ioe) { getLogger().warn("Failed to close reader for {} due to {}", new Object[] { file, ioe }); } reader = createReader(file, 0L); position = 0L; checksum.reset(); } if (file.length() == position || !file.exists()) { // no data to consume so rather than continually running, yield to allow other processors to use the thread. getLogger().debug("No data to consume; created no FlowFiles"); tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum, state.getBuffer())); persistState(tfo, context); context.yield(); return; } // If there is data to consume, read as much as we can. final TailFileState currentState = state; final Checksum chksum = checksum; // data has been written to file. Stream it to a new FlowFile. FlowFile flowFile = session.create(); final FileChannel fileReader = reader; final AtomicLong positionHolder = new AtomicLong(position); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream rawOut) throws IOException { try (final OutputStream out = new BufferedOutputStream(rawOut)) { positionHolder.set(readLines(fileReader, currentState.getBuffer(), out, chksum)); } } }); // If there ended up being no data, just remove the FlowFile if (flowFile.getSize() == 0) { session.remove(flowFile); getLogger().debug("No data to consume; removed created FlowFile"); } else { // determine filename for FlowFile by using <base filename of log file>.<initial offset>-<final offset>.<extension> final String tailFilename = file.getName(); final String baseName = StringUtils.substringBeforeLast(tailFilename, "."); final String flowFileName; if (baseName.length() < tailFilename.length()) { flowFileName = baseName + "." + position + "-" + positionHolder.get() + "." + StringUtils.substringAfterLast(tailFilename, "."); } else { flowFileName = baseName + "." + position + "-" + positionHolder.get(); } final Map<String, String> attributes = new HashMap<>(3); attributes.put(CoreAttributes.FILENAME.key(), flowFileName); attributes.put(CoreAttributes.MIME_TYPE.key(), "text/plain"); attributes.put("tailfile.original.path", tailFile); flowFile = session.putAllAttributes(flowFile, attributes); session.getProvenanceReporter().receive(flowFile, file.toURI().toString(), "FlowFile contains bytes " + position + " through " + positionHolder.get() + " of source file", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos)); session.transfer(flowFile, REL_SUCCESS); position = positionHolder.get(); // Set timestamp to the latest of when the file was modified and the current timestamp stored in the state. // We do this because when we read a file that has been rolled over, we set the state to 1 millisecond later than the last mod date // in order to avoid ingesting that file again. If we then read from this file during the same second (or millisecond, depending on the // operating system file last mod precision), then we could set the timestamp to a smaller value, which could result in reading in the // rotated file a second time. timestamp = Math.max(state.getTimestamp(), file.lastModified()); length = file.length(); getLogger().debug("Created {} and routed to success", new Object[] { flowFile }); } // Create a new state object to represent our current position, timestamp, etc. tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum, state.getBuffer())); // We must commit session before persisting state in order to avoid data loss on restart session.commit(); persistState(tfo, context); }
From source file:org.apache.nifi.processors.standard.TailFile.java
/** * Returns a list of all Files that match the following criteria: * * <ul>/*from w w w . j a va 2s .c o m*/ * <li>Filename matches the Rolling Filename Pattern</li> * <li>Filename does not match the actual file being tailed</li> * <li>The Last Modified Time on the file is equal to or later than the * given minimum timestamp</li> * </ul> * * <p> * The List that is returned will be ordered by file timestamp, providing * the oldest file first. * </p> * * @param context the ProcessContext to use in order to determine Processor * configuration * @param minTimestamp any file with a Last Modified Time before this * timestamp will not be returned * @return a list of all Files that have rolled over * @throws IOException if unable to perform the listing of files */ private List<File> getRolledOffFiles(final ProcessContext context, final long minTimestamp, final String tailFilePath) throws IOException { final File tailFile = new File(tailFilePath); File directory = tailFile.getParentFile(); if (directory == null) { directory = new File("."); } String rollingPattern = context.getProperty(ROLLING_FILENAME_PATTERN).getValue(); if (rollingPattern == null) { return Collections.emptyList(); } else { rollingPattern = rollingPattern.replace("${filename}", StringUtils.substringBeforeLast(tailFile.getName(), ".")); } final List<File> rolledOffFiles = new ArrayList<>(); try (final DirectoryStream<Path> dirStream = Files.newDirectoryStream(directory.toPath(), rollingPattern)) { for (final Path path : dirStream) { final File file = path.toFile(); final long lastMod = file.lastModified(); if (file.lastModified() < minTimestamp) { getLogger().debug( "Found rolled off file {} but its last modified timestamp is before the cutoff (Last Mod = {}, Cutoff = {}) so will not consume it", new Object[] { file, lastMod, minTimestamp }); continue; } else if (file.equals(tailFile)) { continue; } rolledOffFiles.add(file); } } // Sort files based on last modified timestamp. If same timestamp, use filename as a secondary sort, as often // files that are rolled over are given a naming scheme that is lexicographically sort in the same order as the // timestamp, such as yyyy-MM-dd-HH-mm-ss Collections.sort(rolledOffFiles, new Comparator<File>() { @Override public int compare(final File o1, final File o2) { final int lastModifiedComp = Long.compare(o1.lastModified(), o2.lastModified()); if (lastModifiedComp != 0) { return lastModifiedComp; } return o1.getName().compareTo(o2.getName()); } }); return rolledOffFiles; }
From source file:org.apache.nifi.registry.web.api.ApplicationResource.java
protected URI getBaseUri() { final UriBuilder uriBuilder = uriInfo.getBaseUriBuilder(); URI uri = uriBuilder.build(); try {//from ww w .j a v a 2 s . c o m // check for proxy settings final String scheme = getFirstHeaderValue(PROXY_SCHEME_HTTP_HEADER, FORWARDED_PROTO_HTTP_HEADER); final String host = getFirstHeaderValue(PROXY_HOST_HTTP_HEADER, FORWARDED_HOST_HTTP_HEADER); final String port = getFirstHeaderValue(PROXY_PORT_HTTP_HEADER, FORWARDED_PORT_HTTP_HEADER); String baseContextPath = getFirstHeaderValue(PROXY_CONTEXT_PATH_HTTP_HEADER, FORWARDED_CONTEXT_HTTP_HEADER); // if necessary, prepend the context path String resourcePath = uri.getPath(); if (baseContextPath != null) { // normalize context path if (!baseContextPath.startsWith("/")) { baseContextPath = "/" + baseContextPath; } if (baseContextPath.endsWith("/")) { baseContextPath = StringUtils.substringBeforeLast(baseContextPath, "/"); } // determine the complete resource path resourcePath = baseContextPath + resourcePath; } // determine the port uri int uriPort = uri.getPort(); if (port != null) { if (StringUtils.isWhitespace(port)) { uriPort = -1; } else { try { uriPort = Integer.parseInt(port); } catch (final NumberFormatException nfe) { logger.warn(String.format( "Unable to parse proxy port HTTP header '%s'. Using port from request URI '%s'.", port, uriPort)); } } } // construct the URI uri = new URI((StringUtils.isBlank(scheme)) ? uri.getScheme() : scheme, uri.getUserInfo(), (StringUtils.isBlank(host)) ? uri.getHost() : host, uriPort, resourcePath, uri.getQuery(), uri.getFragment()); } catch (final URISyntaxException use) { throw new UriBuilderException(use); } return uri; }