List of usage examples for java.lang Thread setName
public final synchronized void setName(String name)
From source file:com.twinsoft.convertigo.eclipse.wizards.setup.SetupWizard.java
public void postRegisterState(final String page) { if (!page.equals(previousPageName)) { previousPageName = page;/*from ww w .j ava 2s . c om*/ Thread th = new Thread(new Runnable() { public void run() { synchronized (SetupWizard.this) { try { String[] url = { "http://www.google-analytics.com/collect" }; HttpClient client = prepareHttpClient(url); PostMethod method = new PostMethod(url[0]); HeaderName.ContentType.setRequestHeader(method, MimeType.WwwForm.value()); // set parameters for POST method method.setParameter("v", "1"); method.setParameter("tid", "UA-660091-6"); method.setParameter("cid", getUniqueID()); method.setParameter("t", "pageview"); method.setParameter("dh", "http://www.convertigo.com"); method.setParameter("dp", "/StudioRegistrationWizard_" + page + ".html"); method.setParameter("dt", page + "_" + ProductVersion.productVersion); // execute HTTP post with parameters if (client != null) { client.executeMethod(method); } } catch (Exception e) { // ConvertigoPlugin.logWarning(e, // "Error while trying to send registration"); } } } }); th.setDaemon(true); th.setName("SetupWizard.register_steps"); th.start(); } else { previousPageName = page; } }
From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java
private void performVerification(final Set<NodeIdentifier> nodeIds, final String method, final URI uri, final Object entity, final Map<String, String> headers, final StandardAsyncClusterResponse clusterResponse, final boolean merge, final Object monitor) { logger.debug("Verifying that mutable request {} {} can be made", method, uri.getPath()); final Map<String, String> validationHeaders = new HashMap<>(headers); validationHeaders.put(REQUEST_VALIDATION_HTTP_HEADER, NODE_CONTINUE); final long startNanos = System.nanoTime(); final int numNodes = nodeIds.size(); final NodeRequestCompletionCallback completionCallback = new NodeRequestCompletionCallback() { final Set<NodeResponse> nodeResponses = Collections.synchronizedSet(new HashSet<>()); @Override/* w w w .j a v a 2s . c o m*/ public void onCompletion(final NodeResponse nodeResponse) { // Add the node response to our collection. We later need to know whether or // not this is the last node response, so we add the response and then check // the size within a synchronized block to ensure that those two things happen // atomically. Otherwise, we could have multiple threads checking the sizes of // the sets at the same time, which could result in multiple threads performing // the 'all nodes are complete' logic. final boolean allNodesResponded; synchronized (nodeResponses) { nodeResponses.add(nodeResponse); allNodesResponded = nodeResponses.size() == numNodes; } try { final long nanos = System.nanoTime() - startNanos; clusterResponse.addTiming("Completed Verification", nodeResponse.getNodeId().toString(), nanos); // If we have all of the node responses, then we can verify the responses // and if good replicate the original request to all of the nodes. if (allNodesResponded) { clusterResponse.addTiming("Verification Completed", "All Nodes", nanos); // Check if we have any requests that do not have a 150-Continue status code. final long dissentingCount = nodeResponses.stream() .filter(p -> p.getStatus() != NODE_CONTINUE_STATUS_CODE).count(); // If all nodes responded with 150-Continue, then we can replicate the original request // to all nodes and we are finished. if (dissentingCount == 0) { logger.debug( "Received verification from all {} nodes that mutable request {} {} can be made", numNodes, method, uri.getPath()); replicate(nodeIds, method, uri, entity, headers, false, clusterResponse, true, merge, monitor); return; } try { final Map<String, String> cancelLockHeaders = new HashMap<>(headers); cancelLockHeaders.put(REQUEST_TRANSACTION_CANCELATION_HTTP_HEADER, "true"); final Thread cancelLockThread = new Thread(new Runnable() { @Override public void run() { logger.debug("Found {} dissenting nodes for {} {}; canceling claim request", dissentingCount, method, uri.getPath()); final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest( nodeId, method, createURI(uri, nodeId), entity, cancelLockHeaders, null, clusterResponse); submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory, cancelLockHeaders); } }); cancelLockThread.setName("Cancel Flow Locks"); cancelLockThread.start(); // Add a NodeResponse for each node to the Cluster Response // Check that all nodes responded successfully. for (final NodeResponse response : nodeResponses) { if (response.getStatus() != NODE_CONTINUE_STATUS_CODE) { final Response clientResponse = response.getClientResponse(); final String message; if (clientResponse == null) { message = "Node " + response.getNodeId() + " is unable to fulfill this request due to: Unexpected Response Code " + response.getStatus(); logger.info( "Received a status of {} from {} for request {} {} when performing first stage of two-stage commit. The action will not occur", response.getStatus(), response.getNodeId(), method, uri.getPath()); } else { final String nodeExplanation = clientResponse.readEntity(String.class); message = "Node " + response.getNodeId() + " is unable to fulfill this request due to: " + nodeExplanation; logger.info( "Received a status of {} from {} for request {} {} when performing first stage of two-stage commit. " + "The action will not occur. Node explanation: {}", response.getStatus(), response.getNodeId(), method, uri.getPath(), nodeExplanation); } // if a node reports forbidden, use that as the response failure final RuntimeException failure; if (response.getStatus() == Status.FORBIDDEN.getStatusCode()) { if (response.hasThrowable()) { failure = new AccessDeniedException(message, response.getThrowable()); } else { failure = new AccessDeniedException(message); } } else { if (response.hasThrowable()) { failure = new IllegalClusterStateException(message, response.getThrowable()); } else { failure = new IllegalClusterStateException(message); } } clusterResponse.setFailure(failure, response.getNodeId()); } } } finally { if (monitor != null) { synchronized (monitor) { monitor.notify(); } logger.debug( "Notified monitor {} because request {} {} has failed due to at least 1 dissenting node", monitor, method, uri); } } } } catch (final Exception e) { clusterResponse.add(new NodeResponse(nodeResponse.getNodeId(), method, uri, e)); // If there was a problem, we need to ensure that we add all of the other nodes' responses // to the Cluster Response so that the Cluster Response is complete. for (final NodeResponse otherResponse : nodeResponses) { if (otherResponse.getNodeId().equals(nodeResponse.getNodeId())) { continue; } clusterResponse.add(otherResponse); } } } }; // Callback function for generating a NodeHttpRequestCallable that can be used to perform the work final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest(nodeId, method, createURI(uri, nodeId), entity, validationHeaders, completionCallback, clusterResponse); // replicate the 'verification request' to all nodes submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory, validationHeaders); }
From source file:bammerbom.ultimatecore.bukkit.commands.CmdMinecraftservers.java
@Override public void run(final CommandSender cs, String label, String[] args) { if (!r.perm(cs, "uc.minecraftservers", true, true)) { return;/* ww w . j a va 2 s. c o m*/ } if (!r.getCnfg().getBoolean("Metrics")) { r.sendMes(cs, "minecraftserversDisabled"); return; } Thread thread = new Thread(new Runnable() { @Override public void run() { if (!on) { runcheck(); } String os = ""; for (MinecraftServer str : online) { if (!os.equals("")) { os = os + ", " + ChatColor.GREEN + str.toString().toLowerCase() + r.positive + ""; } else { os = os + ChatColor.GREEN + str.toString().toLowerCase() + r.positive + ""; } } for (MinecraftServer str : problems) { if (!os.equals("")) { os = os + ", " + ChatColor.GOLD + str.toString().toLowerCase() + r.positive + ""; } else { os = os + ChatColor.GOLD + str.toString().toLowerCase() + r.positive + ""; } } for (MinecraftServer str : offline) { if (!os.equals("")) { os = os + ", " + ChatColor.DARK_RED + str.toString().toLowerCase() + r.positive + ""; } else { os = os + ChatColor.DARK_RED + str.toString().toLowerCase() + r.positive + ""; } } for (MinecraftServer str : unknown) { if (!os.equals("")) { os = os + ", " + ChatColor.GRAY + str.toString().toLowerCase() + r.positive + ""; } else { os = os + ChatColor.GRAY + str.toString().toLowerCase() + r.positive + ""; } } r.sendMes(cs, "minecraftserversMessage", "%Servers", ChatColor.RESET + os); } }); thread.setName("UltimateCore: Server Check Thread"); thread.start(); }
From source file:bammerbom.ultimatecore.spongeapi.commands.CmdMinecraftservers.java
@Override public void run(final CommandSender cs, String label, String[] args) { if (!r.perm(cs, "uc.minecraftservers", true, true)) { return;// w w w . ja v a 2 s .c o m } if (!r.getCnfg().getBoolean("Metrics")) { r.sendMes(cs, "minecraftserversDisabled"); return; } Thread thread = new Thread(new Runnable() { @Override public void run() { if (!on) { runcheck(); } String os = ""; for (MinecraftServer str : online) { if (!os.equals("")) { os = os + ", " + TextColors.GREEN + str.toString().toLowerCase() + r.positive + ""; } else { os = os + TextColors.GREEN + str.toString().toLowerCase() + r.positive + ""; } } for (MinecraftServer str : problems) { if (!os.equals("")) { os = os + ", " + TextColors.GOLD + str.toString().toLowerCase() + r.positive + ""; } else { os = os + TextColors.GOLD + str.toString().toLowerCase() + r.positive + ""; } } for (MinecraftServer str : offline) { if (!os.equals("")) { os = os + ", " + TextColors.DARK_RED + str.toString().toLowerCase() + r.positive + ""; } else { os = os + TextColors.DARK_RED + str.toString().toLowerCase() + r.positive + ""; } } for (MinecraftServer str : unknown) { if (!os.equals("")) { os = os + ", " + TextColors.GRAY + str.toString().toLowerCase() + r.positive + ""; } else { os = os + TextColors.GRAY + str.toString().toLowerCase() + r.positive + ""; } } r.sendMes(cs, "minecraftserversMessage", "%Servers", TextColors.RESET + os); } }); thread.setName("UltimateCore: Server Check Thread"); thread.start(); }
From source file:com._17od.upm.gui.DatabaseActions.java
public boolean syncWithRemoteDatabase() throws TransportException, ProblemReadingDatabaseFile, IOException, CryptoException, PasswordDatabaseException { boolean syncSuccessful = false; try {/*from www . ja v a 2 s. co m*/ fileMonitor.pause(); mainWindow.getContentPane().setCursor(new Cursor(Cursor.WAIT_CURSOR)); // Get the remote database options String remoteLocation = database.getDbOptions().getRemoteLocation(); String authDBEntry = database.getDbOptions().getAuthDBEntry(); String httpUsername = null; String httpPassword = null; if (!authDBEntry.equals("")) { httpUsername = database.getAccount(authDBEntry).getUserId(); httpPassword = database.getAccount(authDBEntry).getPassword(); } // Download the database that's already at the remote location Transport transport = Transport.getTransportForURL(new URL(remoteLocation)); File remoteDatabaseFile = transport.getRemoteFile(remoteLocation, database.getDatabaseFile().getName(), httpUsername, httpPassword); // Attempt to decrypt the database using the password the user entered PasswordDatabase remoteDatabase = null; char[] password = null; boolean successfullyDecryptedDb = false; try { remoteDatabase = dbPers.load(remoteDatabaseFile); successfullyDecryptedDb = true; } catch (InvalidPasswordException e) { // The password for the downloaded database is different to that of the open database // (most likely the user changed the local database's master password) boolean okClicked = false; do { password = askUserForPassword(Translator.translate("enterPaswordForRemoteDB")); if (password == null) { okClicked = false; } else { okClicked = true; try { remoteDatabase = dbPers.load(remoteDatabaseFile, password); successfullyDecryptedDb = true; } catch (InvalidPasswordException invalidPassword) { JOptionPane.showMessageDialog(mainWindow, Translator.translate("incorrectPassword")); } } } while (okClicked && !successfullyDecryptedDb); } /* If the local database revision > remote database version => upload local database If the local database revision < remote database version => replace local database with remote database If the local database revision = remote database version => do nothing */ if (successfullyDecryptedDb) { if (database.getRevision() > remoteDatabase.getRevision()) { transport.delete(remoteLocation, database.getDatabaseFile().getName(), httpUsername, httpPassword); transport.put(remoteLocation, database.getDatabaseFile(), httpUsername, httpPassword); syncSuccessful = true; } else if (database.getRevision() < remoteDatabase.getRevision()) { Util.copyFile(remoteDatabaseFile, database.getDatabaseFile()); database = new PasswordDatabase(remoteDatabase.getRevisionObj(), remoteDatabase.getDbOptions(), remoteDatabase.getAccountsHash(), database.getDatabaseFile()); doOpenDatabaseActions(); syncSuccessful = true; } else { syncSuccessful = true; } if (syncSuccessful) { setLocalDatabaseDirty(false); // Create a thread that will mark the database dirty after // a short period. Without this the database would remain // in a synced state until the user makes a change. The // longer we wait before syncing up the greater chance there // is that we'll miss changes made elsewhere and end up // with a conflicting version of the database. final long dirtyThreadStartTime = System.currentTimeMillis(); runSetDBDirtyThread = true; Thread setDBDirtyThread = new Thread(new Runnable() { public void run() { while (runSetDBDirtyThread) { try { Thread.sleep(1000); } catch (InterruptedException e1) { } long currentTime = System.currentTimeMillis(); if (currentTime - dirtyThreadStartTime > 5 * 60 * 1000) { LOG.info("SetDBDirtyThread setting database dirty"); setLocalDatabaseDirty(true); runSetDBDirtyThread = false; } } } }); setDBDirtyThread.setName("SetDBDirty"); setDBDirtyThread.start(); LOG.info("Started SetDBDirtyThread thread"); } } } finally { mainWindow.getContentPane().setCursor(new Cursor(Cursor.DEFAULT_CURSOR)); fileMonitor.start(); } return syncSuccessful; }
From source file:com.dell.asm.asmcore.asmmanager.app.AsmManagerApp.java
private void initAsmManagerApp() { try {/* ww w . j ava 2s.c o m*/ Properties props = ConfigurationUtils.resolveAndReadPropertiesFile(ASM_MANAGER_CONFIG_FILE, this.getClass().getClassLoader()); ASM_REPO_LOCATION = props.getProperty("asm_repo_location"); razorApiUrl = props.getProperty(RAZOR_API_URL_PROPERTY); razorRepoStoreLocation = props.getProperty(RAZOR_REPO_STORE_PROPERTY); _logger.info("razorApiUrl = " + razorApiUrl); razorCron = props.getProperty(RAZOR_JOB_CRON_PROPERTY); _logger.info("razorCron = " + razorCron); asmDeployerApiUrl = props.getProperty(ASM_DEPLOYER_URL_PROPERTY); _logger.info("asmDeployerApiUrl = " + asmDeployerApiUrl); scheduledInventoryCron = props.getProperty(SCHEDULEDINVENTORY_JOB_CRON_PROPERTY); _logger.info("Scheduled Inventory cron expression =" + scheduledInventoryCron); fileSystemMaintenanceCron = props.getProperty(FILE_SYSTEM_MAINTENANCE_JOB_CRON_PROPERTY); _logger.info("Scheduled File SystemMaintenance cron expression =" + fileSystemMaintenanceCron); // must call as soon as we read URLs from property file ProxyUtil.initAsmDeployerProxy(); final GenericDAO genericDAO = GenericDAO.getInstance(); SettingEntity portsToPingSetting = genericDAO.getByName(ASM_PORTS_TO_PING, SettingEntity.class); if (portsToPingSetting == null) { String value = props.getProperty(PORTS_TO_PING_PROPERTY); if (value == null) { value = "22,80,135"; } portsToPingSetting = new SettingEntity(); portsToPingSetting.setName(ASM_PORTS_TO_PING); portsToPingSetting.setValue(value); genericDAO.create(portsToPingSetting); } List<String> items = Arrays.asList(portsToPingSetting.getValue().split("\\s*,\\s*")); _logger.info("Ports to ping configured to " + Arrays.toString(items.toArray()) + " (" + portsToPingSetting.getValue() + ")"); setAsmManagerAppConfig(new AsmManagerAppConfig()); String sConnectTimeout = props.getProperty(DISCOVERY_THREAD_CONNECT_TIMEOUT_PROPERTY); _logger.info("sConnectTimeout = " + sConnectTimeout); try { CONNECT_TIMEOUT = Integer.parseInt(sConnectTimeout); CONNECT_TIMEOUT = CONNECT_TIMEOUT * 1000; } catch (Exception e) { _logger.error("Unable to parse CONNECT_TIMEOUT", e); } String staggerDeploymentsSecs = props.getProperty(MULTI_DEPLOYMENTS_STAGGER_SECS_KEY); try { MULTI_DEPLOYMENTS_STAGGER_SECS = Integer.parseInt(staggerDeploymentsSecs); } catch (NumberFormatException nfe) { _logger.error("Unable to parse MULTI_DEPLOYMENTS_STAGGER_SECS_KEY: " + staggerDeploymentsSecs + ", defaulting to 30 minutes"); MULTI_DEPLOYMENTS_STAGGER_SECS = 30 * 60; } String sPuppetModulestoFilter = props.getProperty(PUPPET_MODULE_FILTER_PROPERTY); _logger.info("sPuppetModulestoFilter = " + sPuppetModulestoFilter); String[] parts = sPuppetModulestoFilter.split(","); if (parts != null) { for (String sModule : parts) { if (sModule != null) { puppetModulesToFilter.add(sModule.trim()); _logger.info("Puppet module to filter:" + sModule.trim()); } } } // this prevents exceptions on copyProperties with null Date ConvertUtilsBean convertUtilsBean = BeanUtilsBean.getInstance().getConvertUtils(); convertUtilsBean.register(false, true, -1); } catch (IOException e) { _logger.info("Exception while parsing asmmanager properties file", e); throw new AsmManagerRuntimeException(e); } try { registerJobClasses(); createDefaultRazorSyncJob(); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Register Jobs Failed.", t); } // Perform all initialization on a new thread to avoid holding up server initialization. Runnable runnable = new Runnable() { @Override public void run() { _logger.info("Starting AsmManagerApp initialization thread."); AsmManagerInitLifecycleListener.setStatus(AsmManagerInitLifecycleListener.UPDATING_INVENTORY); // Install Quartz Job Listeners try { FirmwareUpdateScheduleListener scheduleListener = new FirmwareUpdateScheduleListener(); JobManager.getInstance().getScheduler().getListenerManager() .addSchedulerListener(scheduleListener); GroupMatcher<JobKey> groupMatcher = GroupMatcher .groupEquals(FirmwareUpdateJob.class.getSimpleName()); JobManager.getInstance().getScheduler().getListenerManager().addJobListener(scheduleListener, groupMatcher); } catch (Exception t) { _logger.error("Failed to install FirmwareUpdateScheduleListener", t); } try { setServiceContextUser(DBInit.DEFAULT_USER); } catch (Exception t) { _logger.error( "Unable to initialize AsmManagerApp. Set service context default user to Admin failed.", t); } try { // On Startup - Where we check to see if it exists (reverse) - If file does NOT exist - Run Inventory and create the file if (isRestore() || isRestartAfterApplianceUpdate()) { // Run inventory on all Devices runInventory(); } } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Running Inventory Failed.", t); } AsmManagerInitLifecycleListener.setStatus(AsmManagerInitLifecycleListener.UPDATING_TEMPLATES); try { createDefaultIOMCredential(); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Creation of Default IOM Credential Failed.", t); } try { updateExistingAddOnModules(); updateAddOnModules(); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Updating AddOnModules Failed.", t); } try { loadDefaultTemplates(); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Loading of Default Templates Failed.", t); } try { updateFirmwareRepositoryBundleComponents(); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Updating Firmware Bundle Components Failed.", t); } try { loadEmbeddedFirmware(); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Load embedded Firmware Failed.", t); } try { failCopyingAndPendingFirmwareRepositories(); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Cleanup of rogue Firmware states Failed.", t); } try { cleanUpDevices(); cleanUpJobs(); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Cleanup of Devices and Jobs Failed.", t); } try { ensureRazorReposExist(); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Ensuring Razor Repos Exist Failed.", t); } try { syncAppStateVars(); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Sync Application State Variables Failed.", t); } /** * Important to run this prior to revalidating ServiceTemplates since a broken OS * repository would then invalidate the template, which is the desired behavior. */ try { cleanUpOsRepositories(); } catch (Exception t) { _logger.error("Problem initializing AsmManagerApp. Cleaning up OSRepositories failed.", t); } try { final ServiceTemplate defaultTemplate = getServiceTemplateService().getDefaultTemplate(); //build a map of add on module components for future use. final List<AddOnModuleComponentEntity> addOnModuleComponentEntities = getAddOnModuleComponentsDAO() .getAll(true); Map<String, ServiceTemplateComponent> addOnModuleComponentsMap = new HashMap<>(); for (AddOnModuleComponentEntity entity : addOnModuleComponentEntities) { ServiceTemplateComponent component = MarshalUtil.unmarshal(ServiceTemplateComponent.class, entity.getMarshalledData()); addOnModuleComponentsMap.put(component.getId(), component); } // Correct differences between entity object values and entity marshaledTemplateData values reconcileServiceTemplateEntityData(); revalidateStoredTemplates(defaultTemplate, addOnModuleComponentsMap); revalidateDeployedTemplates(defaultTemplate, addOnModuleComponentEntities, addOnModuleComponentsMap); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Revalidating Templates Failed.", t); } try { createDefaultScheduledInventoryJob(); createDefaultFileSystemMaintenanceJob(); // please make sure there that all update deployment DAO calls made _before_ this line!!! createScheduledDeploymentStatusSyncJob(JOB_DELAY_SECS); } catch (Exception t) { _logger.error("Unable to initialize AsmManagerApp. Creation of default jobs failed.", t); } AsmManagerInitLifecycleListener.setStatus(AsmManagerInitLifecycleListener.READY); _logger.info("Finished AsmManagerApp initialization."); } }; // Run the initialization code in our thread group. Thread initThread = new Thread(new ThreadGroup(AsmManagerApp_THREADGROUP_NAME), runnable); initThread.setName(AsmManagerApp_INIT_THREAD_NAME); initThread.setDaemon(true); initThread.start(); }
From source file:org.apache.hadoop.hdfs.server.namenode.AvatarNode.java
/** * Shuts down the avatar node//from w w w . j ava2s. co m * @param synchronous - should the function wait for the shutdown to complete * @throws IOException */ public synchronized void shutdown(boolean synchronous) throws IOException { LOG.info("Failover: Asynchronous shutdown for: " + currentAvatar); // check permissions before any other actions super.namesystem.checkSuperuserPrivilege(); if (runInfo.shutdown) { LOG.info("Failover: Node already shut down"); return; } // check edit streams // if this fails, we still have a chance to fix it // and shutdown again verifyEditStreams(); runInfo.shutdown = true; Thread shutdownThread = new ShutdownAvatarThread(this); shutdownThread.setName("ShutDown thread for : " + serverAddress); shutdownThread.setDaemon(false); shutdownThread.start(); if (synchronous) { LOG.info("Failover: Waiting for shutdown to complete"); try { shutdownThread.join(); } catch (InterruptedException ie) { throw new IOException(ie); } } }
From source file:org.apache.jackrabbit.core.RepositoryImpl.java
/** * private constructor//from w w w.j a v a2 s .co m * * @param repConfig */ protected RepositoryImpl(RepositoryConfig repConfig) throws RepositoryException { log.info("Starting repository..."); boolean succeeded = false; try { this.repConfig = repConfig; // Acquire a lock on the repository home repLock = new RepositoryLock(repConfig.getHomeDir()); repLock.acquire(); // setup file systems repStore = repConfig.getFileSystemConfig().createFileSystem(); String fsRootPath = "/meta"; try { if (!repStore.exists(fsRootPath) || !repStore.isFolder(fsRootPath)) { repStore.createFolder(fsRootPath); } } catch (FileSystemException fse) { String msg = "failed to create folder for repository meta data"; log.error(msg, fse); throw new RepositoryException(msg, fse); } metaDataStore = new BasedFileSystem(repStore, fsRootPath); // init root node uuid rootNodeId = loadRootNodeId(metaDataStore); // load repository properties repProps = loadRepProps(); nodesCount = Long.parseLong(repProps.getProperty(STATS_NODE_COUNT_PROPERTY, "0")); propsCount = Long.parseLong(repProps.getProperty(STATS_PROP_COUNT_PROPERTY, "0")); // create registries nsReg = createNamespaceRegistry(new BasedFileSystem(repStore, "/namespaces")); ntReg = createNodeTypeRegistry(nsReg, new BasedFileSystem(repStore, "/nodetypes")); if (repConfig.getDataStoreConfig() != null) { assert InternalValue.USE_DATA_STORE; dataStore = createDataStore(); } else { dataStore = null; } // init workspace configs Iterator iter = repConfig.getWorkspaceConfigs().iterator(); while (iter.hasNext()) { WorkspaceConfig config = (WorkspaceConfig) iter.next(); WorkspaceInfo info = createWorkspaceInfo(config); wspInfos.put(config.getName(), info); } // initialize optional clustering // put here before setting up any other external event source that a cluster node // will be interested in if (repConfig.getClusterConfig() != null) { clusterNode = createClusterNode(); nsReg.setEventChannel(clusterNode); ntReg.setEventChannel(clusterNode); } // init version manager vMgr = createVersionManager(repConfig.getVersioningConfig(), delegatingDispatcher); if (clusterNode != null) { vMgr.setEventChannel(clusterNode.createUpdateChannel(null)); } // init virtual node type manager virtNTMgr = new VirtualNodeTypeStateManager(getNodeTypeRegistry(), delegatingDispatcher, NODETYPES_NODE_ID, SYSTEM_ROOT_NODE_ID); // initialize startup workspaces initStartupWorkspaces(); // initialize system search manager getSystemSearchManager(repConfig.getDefaultWorkspaceName()); // after the workspace is initialized we pass a system session to // the virtual node type manager // todo FIXME the *global* virtual node type manager is using a session that is bound to a single specific workspace... virtNTMgr.setSession(getSystemSession(repConfig.getDefaultWorkspaceName())); // now start cluster node as last step if (clusterNode != null) { try { clusterNode.start(); } catch (ClusterException e) { String msg = "Unable to start clustered node, forcing shutdown..."; log.error(msg, e); shutdown(); throw new RepositoryException(msg, e); } } // amount of time in seconds before an idle workspace is automatically // shut down int maxIdleTime = repConfig.getWorkspaceMaxIdleTime(); if (maxIdleTime != 0) { // start workspace janitor thread Thread wspJanitor = new Thread(new WorkspaceJanitor(maxIdleTime * 1000)); wspJanitor.setName("WorkspaceJanitor"); wspJanitor.setPriority(Thread.MIN_PRIORITY); wspJanitor.setDaemon(true); wspJanitor.start(); } succeeded = true; log.info("Repository started"); } catch (RepositoryException e) { log.error("failed to start Repository: " + e.getMessage(), e); throw e; } finally { if (!succeeded) { // repository startup failed, clean up... shutdown(); } } }
From source file:org.jboss.confluence.plugin.docbook_tools.docbookimport.DocbookImporter.java
/** * Process XSLT transformation./* ww w .j a v a 2 s . c om*/ * * @param xsltTemplate input stream with XSLT template file used to transform (closed inside this method) * @param xmlToTransform input stream with XML file to transform (closed inside this method) * @param xmlToTransformURL URL of <code>xmlToTransform</code> file (may be <code>file://</code> too). We need it to * correctly evaluate relative paths. * @param output stream to write transformed output to * @throws javax.xml.transform.TransformerException */ protected void processXslt(final InputStream xsltTemplate, final InputStream xmlToTransform, final String xmlToTransformURL, final OutputStream output) throws Exception { final XSLTErrorListener errorListener = new XSLTErrorListener(); final SAXErrorHandler eh = new SAXErrorHandler(); Thread th = new Thread(new Runnable() { public void run() { try { org.xml.sax.InputSource xmlSource = new org.xml.sax.InputSource(xmlToTransform); xmlSource.setSystemId(xmlToTransformURL); javax.xml.transform.Source xsltSource = new javax.xml.transform.stream.StreamSource( xsltTemplate); javax.xml.transform.Result result = new javax.xml.transform.stream.StreamResult(output); // prepare XInclude aware parser which resolves necessary entities correctly XMLReader reader = new ParserAdapter(saxParserFactory.newSAXParser().getParser()); reader.setEntityResolver(new JDGEntityResolver(reader.getEntityResolver())); reader.setErrorHandler(eh); SAXSource xmlSAXSource = new SAXSource(reader, xmlSource); javax.xml.transform.Transformer trans = transformerFact.newTransformer(xsltSource); trans.setErrorListener(errorListener); trans.transform(xmlSAXSource, result); } catch (Exception e) { if (e instanceof TransformerException) { errorListener.setException((TransformerException) e); } else { errorListener.setException(new TransformerException(e)); } } finally { FileUtils.closeInputStream(xmlToTransform); FileUtils.closeInputStream(xsltTemplate); } } }); th.setName("DocbookImporter XSLT transformation thread"); th.setDaemon(true); th.setContextClassLoader(DocbookImporter.class.getClassLoader()); th.start(); th.join(); if (eh.getException() != null) { throw eh.getException(); } if (errorListener.getException() != null) { throw errorListener.getException(); } }