List of usage examples for java.net URI getFragment
public String getFragment()
From source file:org.apache.nifi.web.api.VersionsResource.java
private VersionControlInformationEntity updateFlowVersion(final String groupId, final ComponentLifecycle componentLifecycle, final URI exampleUri, final Set<AffectedComponentEntity> affectedComponents, final boolean replicateRequest, final Revision revision, final VersionControlInformationEntity requestEntity, final VersionedFlowSnapshot flowSnapshot, final AsynchronousWebRequest<VersionControlInformationEntity> asyncRequest, final String idGenerationSeed, final boolean verifyNotModified, final boolean updateDescendantVersionedFlows) throws LifecycleManagementException, ResumeFlowException { // Steps 6-7: Determine which components must be stopped and stop them. final Set<String> stoppableReferenceTypes = new HashSet<>(); stoppableReferenceTypes.add(AffectedComponentDTO.COMPONENT_TYPE_PROCESSOR); stoppableReferenceTypes.add(AffectedComponentDTO.COMPONENT_TYPE_REMOTE_INPUT_PORT); stoppableReferenceTypes.add(AffectedComponentDTO.COMPONENT_TYPE_REMOTE_OUTPUT_PORT); stoppableReferenceTypes.add(AffectedComponentDTO.COMPONENT_TYPE_INPUT_PORT); stoppableReferenceTypes.add(AffectedComponentDTO.COMPONENT_TYPE_OUTPUT_PORT); final Set<AffectedComponentEntity> runningComponents = affectedComponents.stream() .filter(dto -> stoppableReferenceTypes.contains(dto.getComponent().getReferenceType())) .filter(dto -> "Running".equalsIgnoreCase(dto.getComponent().getState())) .collect(Collectors.toSet()); logger.info("Stopping {} Processors", runningComponents.size()); final CancellableTimedPause stopComponentsPause = new CancellableTimedPause(250, Long.MAX_VALUE, TimeUnit.MILLISECONDS); asyncRequest.setCancelCallback(stopComponentsPause::cancel); componentLifecycle.scheduleComponents(exampleUri, groupId, runningComponents, ScheduledState.STOPPED, stopComponentsPause);/*w w w . j ava 2 s . c o m*/ if (asyncRequest.isCancelled()) { return null; } asyncRequest.update(new Date(), "Disabling Affected Controller Services", 20); // Steps 8-9. Disable enabled controller services that are affected final Set<AffectedComponentEntity> enabledServices = affectedComponents.stream() .filter(dto -> AffectedComponentDTO.COMPONENT_TYPE_CONTROLLER_SERVICE .equals(dto.getComponent().getReferenceType())) .filter(dto -> "Enabled".equalsIgnoreCase(dto.getComponent().getState())) .collect(Collectors.toSet()); logger.info("Disabling {} Controller Services", enabledServices.size()); final CancellableTimedPause disableServicesPause = new CancellableTimedPause(250, Long.MAX_VALUE, TimeUnit.MILLISECONDS); asyncRequest.setCancelCallback(disableServicesPause::cancel); componentLifecycle.activateControllerServices(exampleUri, groupId, enabledServices, ControllerServiceState.DISABLED, disableServicesPause); if (asyncRequest.isCancelled()) { return null; } asyncRequest.update(new Date(), "Updating Flow", 40); logger.info("Updating Process Group with ID {} to version {} of the Versioned Flow", groupId, flowSnapshot.getSnapshotMetadata().getVersion()); // If replicating request, steps 10-12 are performed on each node individually, and this is accomplished // by replicating a PUT to /nifi-api/versions/process-groups/{groupId} try { if (replicateRequest) { final NiFiUser user = NiFiUserUtils.getNiFiUser(); final URI updateUri; try { updateUri = new URI(exampleUri.getScheme(), exampleUri.getUserInfo(), exampleUri.getHost(), exampleUri.getPort(), "/nifi-api/versions/process-groups/" + groupId, null, exampleUri.getFragment()); } catch (URISyntaxException e) { throw new RuntimeException(e); } final Map<String, String> headers = new HashMap<>(); headers.put("content-type", MediaType.APPLICATION_JSON); final VersionedFlowSnapshotEntity snapshotEntity = new VersionedFlowSnapshotEntity(); snapshotEntity.setProcessGroupRevision(dtoFactory.createRevisionDTO(revision)); snapshotEntity.setRegistryId(requestEntity.getVersionControlInformation().getRegistryId()); snapshotEntity.setVersionedFlow(flowSnapshot); snapshotEntity.setUpdateDescendantVersionedFlows(updateDescendantVersionedFlows); final NodeResponse clusterResponse; try { logger.debug("Replicating PUT request to {} for user {}", updateUri, user); if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) { clusterResponse = getRequestReplicator() .replicate(user, HttpMethod.PUT, updateUri, snapshotEntity, headers) .awaitMergedResponse(); } else { clusterResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), user, HttpMethod.PUT, updateUri, snapshotEntity, headers).awaitMergedResponse(); } } catch (final InterruptedException ie) { logger.warn("Interrupted while replicating PUT request to {} for user {}", updateUri, user); Thread.currentThread().interrupt(); throw new LifecycleManagementException("Interrupted while updating flows across cluster", ie); } final int updateFlowStatus = clusterResponse.getStatus(); if (updateFlowStatus != Status.OK.getStatusCode()) { final String explanation = getResponseEntity(clusterResponse, String.class); logger.error( "Failed to update flow across cluster when replicating PUT request to {} for user {}. Received {} response with explanation: {}", updateUri, user, updateFlowStatus, explanation); throw new LifecycleManagementException( "Failed to update Flow on all nodes in cluster due to " + explanation); } } else { // Step 10: Ensure that if any connection exists in the flow and does not exist in the proposed snapshot, // that it has no data in it. Ensure that no Input Port was removed, unless it currently has no incoming connections. // Ensure that no Output Port was removed, unless it currently has no outgoing connections. serviceFacade.verifyCanUpdate(groupId, flowSnapshot, true, verifyNotModified); // Step 11-12. Update Process Group to the new flow and update variable registry with any Variables that were added or removed final VersionControlInformationDTO requestVci = requestEntity.getVersionControlInformation(); final Bucket bucket = flowSnapshot.getBucket(); final VersionedFlow flow = flowSnapshot.getFlow(); final VersionedFlowSnapshotMetadata metadata = flowSnapshot.getSnapshotMetadata(); final VersionControlInformationDTO vci = new VersionControlInformationDTO(); vci.setBucketId(metadata.getBucketIdentifier()); vci.setBucketName(bucket.getName()); vci.setFlowDescription(flow.getDescription()); vci.setFlowId(flow.getIdentifier()); vci.setFlowName(flow.getName()); vci.setGroupId(groupId); vci.setRegistryId(requestVci.getRegistryId()); vci.setRegistryName(serviceFacade.getFlowRegistryName(requestVci.getRegistryId())); vci.setVersion(metadata.getVersion()); vci.setState(flowSnapshot.isLatest() ? VersionedFlowState.UP_TO_DATE.name() : VersionedFlowState.STALE.name()); serviceFacade.updateProcessGroupContents(revision, groupId, vci, flowSnapshot, idGenerationSeed, verifyNotModified, false, updateDescendantVersionedFlows); } } finally { if (!asyncRequest.isCancelled()) { if (logger.isDebugEnabled()) { logger.debug("Re-Enabling {} Controller Services: {}", enabledServices.size(), enabledServices); } asyncRequest.update(new Date(), "Re-Enabling Controller Services", 60); // Step 13. Re-enable all disabled controller services final CancellableTimedPause enableServicesPause = new CancellableTimedPause(250, Long.MAX_VALUE, TimeUnit.MILLISECONDS); asyncRequest.setCancelCallback(enableServicesPause::cancel); final Set<AffectedComponentEntity> servicesToEnable = getUpdatedEntities(enabledServices); logger.info("Successfully updated flow; re-enabling {} Controller Services", servicesToEnable.size()); try { componentLifecycle.activateControllerServices(exampleUri, groupId, servicesToEnable, ControllerServiceState.ENABLED, enableServicesPause); } catch (final IllegalStateException ise) { // Component Lifecycle will re-enable the Controller Services only if they are valid. If IllegalStateException gets thrown, we need to provide // a more intelligent error message as to exactly what happened, rather than indicate that the flow could not be updated. throw new ResumeFlowException( "Failed to re-enable Controller Services because " + ise.getMessage(), ise); } } if (!asyncRequest.isCancelled()) { if (logger.isDebugEnabled()) { logger.debug("Restart {} Processors: {}", runningComponents.size(), runningComponents); } asyncRequest.update(new Date(), "Restarting Processors", 80); // Step 14. Restart all components final Set<AffectedComponentEntity> componentsToStart = getUpdatedEntities(runningComponents); // If there are any Remote Group Ports that are supposed to be started and have no connections, we want to remove those from our Set. // This will happen if the Remote Group Port is transmitting when the version change happens but the new flow version does not have // a connection to the port. In such a case, the Port still is included in the Updated Entities because we do not remove them // when updating the flow (they are removed in the background). final Set<AffectedComponentEntity> avoidStarting = new HashSet<>(); for (final AffectedComponentEntity componentEntity : componentsToStart) { final AffectedComponentDTO componentDto = componentEntity.getComponent(); final String referenceType = componentDto.getReferenceType(); if (!AffectedComponentDTO.COMPONENT_TYPE_REMOTE_INPUT_PORT.equals(referenceType) && !AffectedComponentDTO.COMPONENT_TYPE_REMOTE_OUTPUT_PORT.equals(referenceType)) { continue; } boolean startComponent; try { startComponent = serviceFacade.isRemoteGroupPortConnected(componentDto.getProcessGroupId(), componentDto.getId()); } catch (final ResourceNotFoundException rnfe) { // Could occur if RPG is refreshed at just the right time. startComponent = false; } // We must add the components to avoid starting to a separate Set and then remove them below, // rather than removing the component here, because doing so would result in a ConcurrentModificationException. if (!startComponent) { avoidStarting.add(componentEntity); } } componentsToStart.removeAll(avoidStarting); final CancellableTimedPause startComponentsPause = new CancellableTimedPause(250, Long.MAX_VALUE, TimeUnit.MILLISECONDS); asyncRequest.setCancelCallback(startComponentsPause::cancel); logger.info("Restarting {} Processors", componentsToStart.size()); try { componentLifecycle.scheduleComponents(exampleUri, groupId, componentsToStart, ScheduledState.RUNNING, startComponentsPause); } catch (final IllegalStateException ise) { // Component Lifecycle will restart the Processors only if they are valid. If IllegalStateException gets thrown, we need to provide // a more intelligent error message as to exactly what happened, rather than indicate that the flow could not be updated. throw new ResumeFlowException("Failed to restart components because " + ise.getMessage(), ise); } } } asyncRequest.setCancelCallback(null); if (asyncRequest.isCancelled()) { return null; } asyncRequest.update(new Date(), "Complete", 100); return serviceFacade.getVersionControlInformation(groupId); }
From source file:org.apache.oozie.action.hadoop.TestJavaActionExecutor.java
public void testAddToCache() throws Exception { JavaActionExecutor ae = new JavaActionExecutor(); Configuration conf = new XConfiguration(); Path appPath = new Path(getFsTestCaseDir(), "wf"); URI appUri = appPath.toUri(); // test archive without fragment Path archivePath = new Path("test.jar"); Path archiveFullPath = new Path(appPath, archivePath); ae.addToCache(conf, appPath, archiveFullPath.toString(), true); assertTrue(conf.get("mapred.cache.archives").contains(archiveFullPath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test archive with fragment Path archiveFragmentPath = new Path("test.jar#a.jar"); Path archiveFragmentFullPath = new Path(appPath, archiveFragmentPath); conf.clear();//from w w w. ja v a 2 s. c om ae.addToCache(conf, appPath, archiveFragmentFullPath.toString(), true); assertTrue(conf.get("mapred.cache.archives").contains(archiveFragmentFullPath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test .so without fragment Path appSoPath = new Path("lib/a.so"); Path appSoFullPath = new Path(appPath, appSoPath); conf.clear(); ae.addToCache(conf, appPath, appSoFullPath.toString(), false); assertTrue(conf.get("mapred.cache.files").contains(appSoFullPath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test .so with fragment Path appSoFragmentPath = new Path("lib/a.so#a.so"); Path appSoFragmentFullPath = new Path(appPath, appSoFragmentPath); conf.clear(); ae.addToCache(conf, appPath, appSoFragmentFullPath.toString(), false); assertTrue(conf.get("mapred.cache.files").contains(appSoFragmentFullPath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test .jar without fragment where app path is on same cluster as jar path Path appJarPath = new Path("lib/a.jar"); Path appJarFullPath = new Path(appPath, appJarPath); conf = new Configuration(); conf.set(WorkflowAppService.HADOOP_USER, getTestUser()); ae.addToCache(conf, appPath, appJarFullPath.toString(), false); // assert that mapred.cache.files contains jar URI path (full on Hadoop-2) Path jarPath = HadoopShims.isYARN() ? new Path(appJarFullPath.toUri()) : new Path(appJarFullPath.toUri().getPath()); assertTrue(conf.get("mapred.cache.files").contains(jarPath.toString())); // assert that dist cache classpath contains jar URI path Path[] paths = DistributedCache.getFileClassPaths(conf); boolean pathFound = false; for (Path path : paths) { if (path.equals(jarPath)) { pathFound = true; break; } } assertTrue(pathFound); assertTrue(DistributedCache.getSymlink(conf)); // test .jar without fragment where app path is on a different cluster than jar path appJarPath = new Path("lib/a.jar"); appJarFullPath = new Path(appPath, appJarPath); Path appDifferentClusterPath = new Path(new URI(appUri.getScheme(), null, appUri.getHost() + "x", appUri.getPort(), appUri.getPath(), appUri.getQuery(), appUri.getFragment())); conf.clear(); conf.set(WorkflowAppService.HADOOP_USER, getTestUser()); ae.addToCache(conf, appDifferentClusterPath, appJarFullPath.toString(), false); // assert that mapred.cache.files contains absolute jar URI assertTrue(conf.get("mapred.cache.files").contains(appJarFullPath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test .jar with fragment Path appJarFragmentPath = new Path("lib/a.jar#a.jar"); Path appJarFragmentFullPath = new Path(appPath, appJarFragmentPath); conf.clear(); conf.set(WorkflowAppService.HADOOP_USER, getTestUser()); ae.addToCache(conf, appPath, appJarFragmentFullPath.toString(), false); assertTrue(conf.get("mapred.cache.files").contains(appJarFragmentFullPath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test regular file without fragment Path appFilePath = new Path("lib/a.txt"); Path appFileFullPath = new Path(appPath, appFilePath); conf.clear(); ae.addToCache(conf, appPath, appFileFullPath.toString(), false); assertTrue(conf.get("mapred.cache.files").contains(appFileFullPath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test regular file with fragment Path appFileFragmentPath = new Path("lib/a.txt#a.txt"); Path appFileFragmentFullPath = new Path(appPath, appFileFragmentPath); conf.clear(); ae.addToCache(conf, appPath, appFileFragmentFullPath.toString(), false); assertTrue(conf.get("mapred.cache.files").contains(appFileFragmentFullPath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test path starting with "/" for archive Path testPath = new Path("/tmp/testpath/a.jar#a.jar"); conf.clear(); ae.addToCache(conf, appPath, testPath.toString(), true); assertTrue(conf.get("mapred.cache.archives").contains(testPath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test path starting with "/" for cache.file conf.clear(); ae.addToCache(conf, appPath, testPath.toString(), false); assertTrue(conf.get("mapred.cache.files").contains(testPath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test absolute path for archive Path testAbsolutePath = new Path("hftp://namenode.test.com:8020/tmp/testpath/a.jar#a.jar"); conf.clear(); ae.addToCache(conf, appPath, testAbsolutePath.toString(), true); assertTrue(conf.get("mapred.cache.archives").contains(testAbsolutePath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test absolute path for cache files conf.clear(); ae.addToCache(conf, appPath, testAbsolutePath.toString(), false); assertTrue(conf.get("mapred.cache.files").contains(testAbsolutePath.toString())); assertTrue(DistributedCache.getSymlink(conf)); // test relative path for archive conf.clear(); ae.addToCache(conf, appPath, "lib/a.jar#a.jar", true); assertTrue(conf.get("mapred.cache.archives").contains(appUri.getPath() + "/lib/a.jar#a.jar")); assertTrue(DistributedCache.getSymlink(conf)); // test relative path for cache files conf.clear(); ae.addToCache(conf, appPath, "lib/a.jar#a.jar", false); assertTrue(conf.get("mapred.cache.files").contains(appUri.getPath() + "/lib/a.jar#a.jar")); assertTrue(DistributedCache.getSymlink(conf)); }