List of usage examples for java.util.logging Level FINER
Level FINER
To view the source code for java.util.logging Level FINER.
Click Source Link
From source file:com.granule.json.utils.internal.JSONObject.java
/** * Internal method for doing a simple indention write. * @param writer The writer to use while writing the JSON text. * @param indentDepth How deep to indent the text. * @throws IOException Trhown if an error occurs on write. *///from w ww . j a va 2 s.c o m private void writeIndention(Writer writer, int indentDepth) throws IOException { if (logger.isLoggable(Level.FINER)) logger.entering(className, "writeIndention(Writer, int)"); try { for (int i = 0; i < indentDepth; i++) { writer.write(indent); } } catch (Exception ex) { IOException iox = new IOException("Error occurred on serialization of JSON text."); iox.initCause(ex); throw iox; } if (logger.isLoggable(Level.FINER)) logger.exiting(className, "writeIndention(Writer, int)"); }
From source file:org.csanchez.jenkins.plugins.kubernetes.KubernetesLauncher.java
@Override public void launch(SlaveComputer computer, TaskListener listener) { PrintStream logger = listener.getLogger(); if (!(computer instanceof KubernetesComputer)) { throw new IllegalArgumentException("This Launcher can be used only with KubernetesComputer"); }/* w ww .ja v a 2 s. c om*/ KubernetesComputer kubernetesComputer = (KubernetesComputer) computer; computer.setAcceptingTasks(false); KubernetesSlave slave = kubernetesComputer.getNode(); if (slave == null) { throw new IllegalStateException("Node has been removed, cannot launch " + computer.getName()); } if (launched) { LOGGER.log(INFO, "Agent has already been launched, activating: {}", slave.getNodeName()); computer.setAcceptingTasks(true); return; } KubernetesCloud cloud = slave.getKubernetesCloud(); final PodTemplate unwrappedTemplate = slave.getTemplate(); try { KubernetesClient client = cloud.connect(); Pod pod = getPodTemplate(client, slave, unwrappedTemplate); String podId = pod.getMetadata().getName(); String namespace = StringUtils.defaultIfBlank(slave.getNamespace(), client.getNamespace()); LOGGER.log(Level.FINE, "Creating Pod: {0} in namespace {1}", new Object[] { podId, namespace }); pod = client.pods().inNamespace(namespace).create(pod); LOGGER.log(INFO, "Created Pod: {0} in namespace {1}", new Object[] { podId, namespace }); logger.printf("Created Pod: %s in namespace %s%n", podId, namespace); // We need the pod to be running and connected before returning // otherwise this method keeps being called multiple times List<String> validStates = ImmutableList.of("Running"); int i = 0; int j = 100; // wait 600 seconds List<ContainerStatus> containerStatuses = null; // wait for Pod to be running for (; i < j; i++) { LOGGER.log(INFO, "Waiting for Pod to be scheduled ({1}/{2}): {0}", new Object[] { podId, i, j }); logger.printf("Waiting for Pod to be scheduled (%2$s/%3$s): %1$s%n", podId, i, j); Thread.sleep(6000); pod = client.pods().inNamespace(namespace).withName(podId).get(); if (pod == null) { throw new IllegalStateException("Pod no longer exists: " + podId); } containerStatuses = pod.getStatus().getContainerStatuses(); List<ContainerStatus> terminatedContainers = new ArrayList<>(); Boolean allContainersAreReady = true; for (ContainerStatus info : containerStatuses) { if (info != null) { if (info.getState().getWaiting() != null) { // Pod is waiting for some reason LOGGER.log(INFO, "Container is waiting {0} [{2}]: {1}", new Object[] { podId, info.getState().getWaiting(), info.getName() }); logger.printf("Container is waiting %1$s [%3$s]: %2$s%n", podId, info.getState().getWaiting(), info.getName()); // break; } if (info.getState().getTerminated() != null) { terminatedContainers.add(info); } else if (!info.getReady()) { allContainersAreReady = false; } } } if (!terminatedContainers.isEmpty()) { Map<String, Integer> errors = terminatedContainers.stream().collect(Collectors.toMap( ContainerStatus::getName, (info) -> info.getState().getTerminated().getExitCode())); // Print the last lines of failed containers logLastLines(terminatedContainers, podId, namespace, slave, errors, client); throw new IllegalStateException("Containers are terminated with exit codes: " + errors); } if (!allContainersAreReady) { continue; } if (validStates.contains(pod.getStatus().getPhase())) { break; } } String status = pod.getStatus().getPhase(); if (!validStates.contains(status)) { throw new IllegalStateException( "Container is not running after " + j + " attempts, status: " + status); } j = unwrappedTemplate.getSlaveConnectTimeout(); // now wait for agent to be online for (; i < j; i++) { if (slave.getComputer() == null) { throw new IllegalStateException("Node was deleted, computer is null"); } if (slave.getComputer().isOnline()) { break; } LOGGER.log(INFO, "Waiting for agent to connect ({1}/{2}): {0}", new Object[] { podId, i, j }); logger.printf("Waiting for agent to connect (%2$s/%3$s): %1$s%n", podId, i, j); Thread.sleep(1000); } if (!slave.getComputer().isOnline()) { if (containerStatuses != null) { logLastLines(containerStatuses, podId, namespace, slave, null, client); } throw new IllegalStateException( "Agent is not connected after " + j + " attempts, status: " + status); } computer.setAcceptingTasks(true); } catch (Throwable ex) { LOGGER.log(Level.WARNING, String.format("Error in provisioning; agent=%s, template=%s", slave, unwrappedTemplate), ex); LOGGER.log(Level.FINER, "Removing Jenkins node: {0}", slave.getNodeName()); try { slave.terminate(); } catch (IOException | InterruptedException e) { LOGGER.log(Level.WARNING, "Unable to remove Jenkins node", e); } throw Throwables.propagate(ex); } launched = true; try { // We need to persist the "launched" setting... slave.save(); } catch (IOException e) { LOGGER.log(Level.WARNING, "Could not save() agent: " + e.getMessage(), e); } }
From source file:com.sampas.socbs.core.data.arcsde.impl.ArcSDEConnectionPool.java
/** * DOCUMENT ME!// www . java2 s. c o m * * @return DOCUMENT ME! * * @throws DataSourceException * DOCUMENT ME! * @throws UnavailableArcSDEConnectionException * @throws IllegalStateException * DOCUMENT ME! */ public ArcSDEPooledConnection getConnection() throws DataSourceException, UnavailableArcSDEConnectionException { if (pool == null) { throw new IllegalStateException("The ConnectionPool has been closed."); } try { // String caller = null; // if (LOGGER.isLoggable(Level.FINER)) { // StackTraceElement[] stackTrace = // Thread.currentThread().getStackTrace(); // caller = stackTrace[3].getClassName() + "." + // stackTrace[3].getMethodName(); // } ArcSDEPooledConnection connection = (ArcSDEPooledConnection) this.pool.borrowObject(); if (LOGGER.isLoggable(Level.FINER)) { // System.err.println("-> " + caller + " got " + connection); LOGGER.finer(connection + " out of connection pool"); } connection.markActive(); return connection; } catch (NoSuchElementException e) { LOGGER.log(Level.WARNING, "Out of connections: " + e.getMessage(), e); throw new UnavailableArcSDEConnectionException(this.pool.getNumActive(), this.config); } catch (SeException se) { LOGGER.log(Level.WARNING, "ArcSDE error getting connection: " + se.getSeError().getErrDesc(), se); throw new DataSourceException("ArcSDE Error Message: " + se.getSeError().getErrDesc(), se); } catch (Exception e) { LOGGER.log(Level.WARNING, "Unknown problem getting connection: " + e.getMessage(), e); throw new DataSourceException("Unknown problem fetching connection from connection pool", e); } }
From source file:net.sourceforge.czt.gnast.Gnast.java
/** * Parses the arguments from the command line. * * @return a configured GnAST builder if parsing was successful; * {@code null} otherwise.//from ww w . j a va 2s . c o m * @throws NullPointerException if {@code args} is {@code null} */ @SuppressWarnings("static-access") private static GnastBuilder parseArguments(String[] args) { Options argOptions = new Options(); OptionGroup verboseOptions = new OptionGroup(); verboseOptions.addOption(OptionBuilder.withLongOpt("verbose") .withDescription("Verbose; display verbose debugging messages").create("v")); verboseOptions.addOption(OptionBuilder.withLongOpt("vverbose") .withDescription("Very verbose; more verbose debugging messages").create("vv")); verboseOptions.addOption(OptionBuilder.withLongOpt("vvverbose") .withDescription("Very very verbose; even more verbose debugging messages").create("vvv")); argOptions.addOptionGroup(verboseOptions); argOptions.addOption(OptionBuilder.withLongOpt("finalizers") .withDescription("Add AST finalisers. WARNING: ASTs will consume more memory!").create("f")); argOptions.addOption(OptionBuilder.withArgName("dir").hasArg().withLongOpt("destination") .withDescription("Generated files go into this directory").create("d")); argOptions.addOption(OptionBuilder.withArgName("dir1 dir2").hasArgs().withValueSeparator(',') .withLongOpt("templates").withDescription("Additional template directories").create("t")); argOptions.addOption(OptionBuilder.withArgName("file").hasArg().withLongOpt("mapping") .withDescription("XML type mapping properties file").create("m")); argOptions.addOption(OptionBuilder.withArgName("dir").hasArg().withLongOpt("source").withDescription( "The directory with all ZML schema files. The requested project namespace must be present, as well as all its parents.") .create("s")); argOptions.addOption(OptionBuilder.withArgName("url").hasArg().withLongOpt("namespace") .withDescription("The namespace of the project to be generated.").create("n")); // use GNU parser that allows longer option name (e.g. `-vvv`) CommandLineParser parser = new GnuParser(); CommandLine line; try { // parse the command line arguments line = parser.parse(argOptions, args); } catch (ParseException exp) { // oops, something went wrong System.err.println(exp.getMessage()); // automatically generate the help statement HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("gnast", argOptions, true); return null; } Level verbosity = line.hasOption("v") ? Level.INFO : (line.hasOption("vv") ? Level.FINE : (line.hasOption("vvv") ? Level.FINER : Level.OFF)); String[] templates = line.getOptionValues("t"); List<URL> templateDirs = new ArrayList<URL>(); for (String path : templates) { templateDirs.add(toURL(path)); } return new GnastBuilder().verbosity(verbosity).finalizers(line.hasOption("f")) .destination(toFile(line.getOptionValue("d"))).templates(templateDirs) .mapping(toURL(line.getOptionValue("m"))).sourceSchemas(schemaDirToURL(line.getOptionValue("s"))) .namespace(line.getOptionValue("n")); }
From source file:org.jafer.zclient.AbstractClient.java
/** * sets/checks default properties of the ZClient object * * @throws JaferException Description of Exception *//*from w ww. j a va 2 s . co m*/ private void setDefaults() { if (getRemoteAddress() == null) userIP = ""; else userIP = "<" + getRemoteAddress() + ">"; if (getDocument() == null) setDocument(DOMFactory.newDocument()); if (getDatabases() == null) setDatabases(DEFAULT_DATABASE_NAME); // moved check for null to // setDatabases(); if (getDataCacheSize() < 1 || getDataCacheSize() > MAX_DATACACHE_SIZE) setDataCacheSize(DEFAULT_DATACACHE_SIZE); if (getFetchSize() < 1 || getFetchSize() > getDataCacheSize()) setFetchSize(getDataCacheSize() < DEFAULT_FETCH_SIZE ? getDataCacheSize() : DEFAULT_FETCH_SIZE); if (getFetchView() < 0.0 || getFetchView() > 1.0) setFetchView(DEFAULT_FETCH_VIEW); if (getResultSetName() == null) setResultSetName(DEFAULT_RESULTSET_NAME); if (getElementSpec() == null) setElementSpec(DEFAULT_ELEMENT_SPEC); if (getAutoReconnect() < 0) setAutoReconnect(AUTO_RECONNECT); if (getTimeout() < 0) // if getTimeout() > Integer.MAX_VALUE then // returned int is negative value setTimeout(TIMEOUT); // if (getSearchProfile() == null) // setSearchProfile(DEFAULT_SEARCH_PROFILE); if (getRecordSchema() == null) setRecordSchema(DEFAULT_RECORD_SCHEMA); try { if (getRecordSyntax() == null) setRecordSyntax(Config.convertSyntax(Config.getRecordSyntax(getRecordSchema()))); } catch (JaferException ex) { setRecordSyntax(DEFAULT_RECORD_SYNTAX); } logger.log(Level.FINER, "Java version: " + System.getProperty("java.version")); logger.log(Level.FINER, "Java home: " + System.getProperty("java.home")); logger.log(Level.FINER, "Classpath: " + System.getProperty("java.class.path")); logger.log(Level.FINER, "Operating system: " + System.getProperty("os.name")); logger.log(Level.FINER, "ZClient property dataCacheSize: " + getDataCacheSize()); logger.log(Level.FINER, "ZClient property fetchSize: " + getFetchSize()); logger.log(Level.FINER, "ZClient property fetchView: " + getFetchView()); logger.log(Level.FINER, "ZClient property elementSpec: " + getElementSpec()); logger.log(Level.FINER, "ZClient property recordSchema: " + getRecordSchema()); logger.log(Level.FINER, "ZClient property host: " + getHost()); logger.log(Level.FINER, "ZClient property port: " + getPort()); logger.log(Level.FINER, "ZClient property dataBases: " + getDatabaseNames()); }
From source file:com.ibm.jaggr.core.impl.AbstractAggregatorImpl.java
@Override public void destroy() { final String sourceMethod = "destroy"; //$NON-NLS-1$ boolean isTraceLogging = log.isLoggable(Level.FINER); if (isTraceLogging) { log.entering(AbstractAggregatorImpl.class.getName(), sourceMethod); }/* www . ja v a 2 s .c o m*/ shutdown(); super.destroy(); if (isTraceLogging) { log.exiting(AbstractAggregatorImpl.class.getName(), sourceMethod); } }
From source file:org.b3log.solo.util.Statistics.java
/** * Blog statistic view count +1.//w w w . j av a 2 s .c o m * * <p> * If it is a search engine bot made the specified request, will NOT increment blog statistic view count. * </p> * * <p> * There is a cron job (/console/stat/viewcnt) to flush the blog view count from cache to datastore. * </p> * * @param request the specified request * @param response the specified response * @throws RepositoryException repository exception * @throws JSONException json exception * @see Requests#searchEngineBotRequest(javax.servlet.http.HttpServletRequest) */ public void incBlogViewCount(final HttpServletRequest request, final HttpServletResponse response) throws RepositoryException, JSONException { if (Requests.searchEngineBotRequest(request)) { return; } if (Requests.hasBeenServed(request, response)) { return; } final JSONObject statistic = statisticRepository.get(Statistic.STATISTIC); if (null == statistic) { return; } LOGGER.log(Level.FINEST, "Before inc blog view count[statistic={0}]", statistic); int blogViewCnt = statistic.getInt(Statistic.STATISTIC_BLOG_VIEW_COUNT); ++blogViewCnt; statistic.put(Statistic.STATISTIC_BLOG_VIEW_COUNT, blogViewCnt); // Repository cache prefix, Refers to GAERepository#CACHE_KEY_PREFIX statisticRepository.getCache().putAsync(REPOSITORY_CACHE_KEY_PREFIX + Statistic.STATISTIC, statistic); LOGGER.log(Level.FINER, "Inced blog view count[statistic={0}]", statistic); }
From source file:org.apache.reef.runtime.azbatch.evaluator.EvaluatorShim.java
private void onEvaluatorLaunch(final String launchCommand, final String evaluatorConfigString, final String fileResourcesUrl) { LOG.log(Level.FINEST, "Entering EvaluatorShim.onEvaluatorLaunch()."); if (StringUtils.isNotBlank(fileResourcesUrl)) { LOG.log(Level.FINER, "Downloading evaluator resource file archive from {0}.", fileResourcesUrl); try {/*from www . j a va 2 s . c o m*/ File tmpFile = downloadFile(fileResourcesUrl); extractFiles(tmpFile); } catch (StorageException | IOException e) { LOG.log(Level.SEVERE, "Failed to download evaluator file resources: {0}. {1}", new Object[] { fileResourcesUrl, e }); throw new RuntimeException(e); } } else { LOG.log(Level.FINER, "No file resources URL given."); } File evaluatorConfigurationFile = new File(this.reefFileNames.getEvaluatorConfigurationPath()); LOG.log(Level.FINER, "Persisting evaluator config at: {0}", evaluatorConfigurationFile.getAbsolutePath()); try { boolean newFileCreated = evaluatorConfigurationFile.createNewFile(); LOG.log(Level.FINEST, newFileCreated ? "Created a new file for persisting evaluator configuration at {0}." : "Using existing file for persisting evaluator configuration at {0}.", evaluatorConfigurationFile.getAbsolutePath()); Configuration evaluatorConfiguration = this.configurationSerializer.fromString(evaluatorConfigString); this.configurationSerializer.toFile(evaluatorConfiguration, evaluatorConfigurationFile); } catch (final IOException | BindException e) { LOG.log(Level.SEVERE, "An unexpected exception occurred while attempting to deserialize and write " + "Evaluator configuration file. {0}", e); throw new RuntimeException("Unable to write configuration.", e); } LOG.log(Level.INFO, "Launching the evaluator by invoking the following command: " + launchCommand); try { final List<String> command = Arrays.asList(launchCommand.split(" ")); this.evaluatorProcess = new ProcessBuilder().command(command) .redirectError(new File(this.azureBatchFileNames.getEvaluatorStdErrFilename())) .redirectOutput(new File(this.azureBatchFileNames.getEvaluatorStdOutFilename())).start(); // This will block the current thread until the Evaluator process completes. this.evaluatorProcessExitValue = EvaluatorShim.this.evaluatorProcess.waitFor(); LOG.log(Level.INFO, "Evaluator process completed with exit value: {0}.", this.evaluatorProcessExitValue); } catch (IOException | InterruptedException e) { throw new RuntimeException(e); } LOG.log(Level.FINEST, "Exiting EvaluatorShim.onEvaluatorLaunch()."); }
From source file:org.geotools.data.complex.XmlMappingFeatureIterator.java
private void setValues(Object value, Attribute setterTarget, Pair parentPair, AttributeMapping attMapping, Feature target, StepList xpath, Expression idExpression) throws IOException { if (LOGGER.isLoggable(Level.FINER)) { LOGGER.finer("setting target=" + setterTarget.getName() + ", targetXpath=" + attMapping.getTargetXPath() + ", value=" + value); }// w w w . ja v a 2 s. c o m String featureId = getId(idExpression, parentPair, attMapping, ""); // Attribute att = xpathAttributeBuilder.set(target, // xpath, value, featureId, attMapping.getTargetNodeInstance(), false, attMapping // .getSourceExpression()); Attribute att = setAttributeValue(target, featureId, null, attMapping, value, xpath, null); setClientProperties(att, parentPair.getXpath(), attMapping.getClientProperties()); }
From source file:ComputeNode.java
/** * map task// www . j av a2s. c o m */ private void sort(MapTask t) { lg.log(Level.INFO, "sort: Enter - " + t.getTaskId()); t.setStartTaskTime(System.currentTimeMillis()); // This is to make sure compute node fails only during sorting. isExecutingSortTask = true; myNodeStats.getNoOfJobs().incrementAndGet(); try { Thread.sleep(2 * 1000); } catch (Exception e) { e.printStackTrace(); } Iterator<Integer> iterator = t.getData().iterator(); while (iterator.hasNext()) { lg.log(Level.FINER, "sort: Received integer -> " + iterator.next()); } try { Collections.sort(t.getData()); synchronized (t) { t.wait(10 * t.getData().size()); } server.aggregateMapTasks(t); } catch (Exception e) { lg.log(Level.SEVERE, "Sort:Failure"); e.printStackTrace(); System.exit(1); } lg.log(Level.SEVERE, "sort: Exit - " + t.getTaskId()); t.setEndTaskTime(System.currentTimeMillis()); // This is to make sure compute node fails only during sorting. isExecutingSortTask = false; }