List of usage examples for java.lang Boolean getBoolean
public static boolean getBoolean(String name)
From source file:io.adeptj.runtime.server.Server.java
private Builder enableAJP(Builder undertowBuilder) { if (Boolean.getBoolean(SYS_PROP_ENABLE_AJP)) { Config ajpConf = Objects.requireNonNull(this.cfgReference.get()).getConfig(KEY_AJP); int ajpPort = ajpConf.getInt(KEY_PORT); undertowBuilder.addAjpListener(ajpPort, ajpConf.getString(KEY_HOST)); LOGGER.info("AJP enabled @ port: [{}]", ajpPort); }/*from ww w. j a v a 2 s . c om*/ return undertowBuilder; }
From source file:org.eclipse.jdt.ls.core.internal.JavaLanguageServerPlugin.java
private static void redirectStandardStreams() throws FileNotFoundException { in = System.in;//from w ww .j av a 2s . com out = System.out; err = System.err; System.setIn(new ByteArrayInputStream(new byte[0])); boolean isDebug = Boolean.getBoolean("jdt.ls.debug"); if (isDebug) { String id = "jdt.ls-" + new SimpleDateFormat("yyyyMMddHHmmss").format(new Date()); IWorkspaceRoot root = ResourcesPlugin.getWorkspace().getRoot(); File workspaceFile = root.getRawLocation().makeAbsolute().toFile(); File rootFile = new File(workspaceFile, ".metadata"); rootFile.mkdirs(); File outFile = new File(rootFile, ".out-" + id + ".log"); FileOutputStream stdFileOut = new FileOutputStream(outFile); System.setOut(new PrintStream(stdFileOut)); File errFile = new File(rootFile, ".error-" + id + ".log"); FileOutputStream stdFileErr = new FileOutputStream(errFile); System.setErr(new PrintStream(stdFileErr)); } else { System.setOut(new PrintStream(new ByteArrayOutputStream())); System.setErr(new PrintStream(new ByteArrayOutputStream())); } }
From source file:com.wavemaker.tools.data.ImportDB.java
private void checkImportMode() { if (this.properties.getProperty(IMPORT_DATABASE_SYSTEM_PROPERTY) != null) { setImportDatabase(Boolean.getBoolean(IMPORT_DATABASE_SYSTEM_PROPERTY)); }/* w ww . j a v a 2 s. c o m*/ if (this.properties.getProperty(GENERATE_SERVICE_CLASS_SYSTEM_PROPERTY) != null) { setGenerateServiceClass(Boolean.getBoolean(GENERATE_SERVICE_CLASS_SYSTEM_PROPERTY)); } }
From source file:org.geotools.data.wms.test.LocalGeoServerOnlineTest.java
/** * Check GetMap request functionality in the provided CRS. * <p>//from ww w . jav a2 s.c o m * Attempt is made to request the entire image. * * @param wms * @param layer * @param crs */ private void checkGetFeatureInfo(WebMapServer wms, Layer layer, CoordinateReferenceSystem crs) throws Exception { layer.clearCache(); CRSEnvelope latLon = layer.getLatLonBoundingBox(); GeneralEnvelope envelope = wms.getEnvelope(layer, crs); assertFalse(envelope.isEmpty() || envelope.isNull() || envelope.isInfinite()); assertNotNull("Envelope " + CRS.toSRS(crs), envelope); GetMapRequest getMap = wms.createGetMapRequest(); OperationType operationType = wms.getCapabilities().getRequest().getGetMap(); getMap.addLayer(layer); String version = wms.getCapabilities().getVersion(); String srs = CRS.toSRS(envelope.getCoordinateReferenceSystem()); getMap.setBBox(envelope); String format = format(operationType, "jpeg"); getMap.setFormat(format); getMap.setDimensions(500, 500); URL url = getMap.getFinalURL(); GetFeatureInfoRequest getFeatureInfo = wms.createGetFeatureInfoRequest(getMap); getFeatureInfo.setInfoFormat("text/html"); getFeatureInfo.setQueryLayers(Collections.singleton(layer)); getFeatureInfo.setQueryPoint(75, 100); URL url2 = getFeatureInfo.getFinalURL(); GetFeatureInfoResponse response = wms.issueRequest(getFeatureInfo); assertEquals("text/html", response.getContentType()); InputStream stream = response.getInputStream(); StringBuilderWriter writer = new StringBuilderWriter(); IOUtils.copy(stream, writer); String info = writer.toString(); assertTrue("response available", !info.isEmpty()); assertTrue("html", info.contains("<html") || info.contains("<HTML")); boolean forceXY = Boolean.getBoolean(GeoTools.FORCE_LONGITUDE_FIRST_AXIS_ORDER); String context = "srs=" + srs + " forceXY=" + forceXY + " Version=" + version; if (!info.contains("tasmania_water_bodies.3")) { System.out.println("FAIL: " + context + ": GetFeatureInfo BBOX=" + envelope); System.out.println("GETMAP --> " + url); System.out.println("GETFEATUREINFO --> " + url2); fail(context + ": GetFeatureInfo BBOX=" + envelope); } }
From source file:org.rhq.enterprise.server.storage.StorageClientManager.java
@TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED) public void persistStorageProperty(String key, String value) { if (Boolean.getBoolean("running.itests-2")) { // When running itests-2, there is no server props file, so avoid logging a confusing exception return;//from w w w . j av a 2s. c om } PropertiesFileUpdate updater = new PropertiesFileUpdate(getServerPropsFile().getAbsolutePath()); try { updater.update(key, value); } catch (IOException e) { // TODO should we propagate the exception? LOG.warn("Failed to persist property " + key + " due to unexpected I/O error", ThrowableUtil.getRootCause(e)); } }
From source file:org.apache.geode.cache.client.internal.PoolImpl.java
private void start() { if (this.startDisabled) return;//from w ww . ja va 2s. c o m final boolean isDebugEnabled = logger.isDebugEnabled(); if (isDebugEnabled) { List locators = getLocators(); if (!locators.isEmpty()) { logger.debug("PoolImpl - starting pool with locators: {}", locators); } else { logger.debug("PoolImpl -starting pool with servers: {}", getServers()); } } final String timerName = "poolTimer-" + getName() + "-"; backgroundProcessor = new ScheduledThreadPoolExecutorWithKeepAlive(BACKGROUND_TASK_POOL_SIZE, BACKGROUND_TASK_POOL_KEEP_ALIVE, TimeUnit.MILLISECONDS, new ThreadFactory() { AtomicInteger threadNum = new AtomicInteger(); public Thread newThread(final Runnable r) { Thread result = new Thread(r, timerName + threadNum.incrementAndGet()); result.setDaemon(true); return result; } }); ((ScheduledThreadPoolExecutorWithKeepAlive) backgroundProcessor) .setContinueExistingPeriodicTasksAfterShutdownPolicy(false); ((ScheduledThreadPoolExecutorWithKeepAlive) backgroundProcessor) .setExecuteExistingDelayedTasksAfterShutdownPolicy(false); source.start(this); connectionFactory.start(backgroundProcessor); endpointManager.addListener(new InstantiatorRecoveryListener(backgroundProcessor, this)); endpointManager.addListener(new DataSerializerRecoveryListener(backgroundProcessor, this)); if (Boolean.getBoolean(ON_DISCONNECT_CLEAR_PDXTYPEIDS)) { endpointManager.addListener(new PdxRegistryRecoveryListener(this)); } endpointManager.addListener(new LiveServerPinger(this)); manager.start(backgroundProcessor); if (queueManager != null) { if (isDebugEnabled) { logger.debug("starting queueManager"); } queueManager.start(backgroundProcessor); } if (isDebugEnabled) { logger.debug("scheduling pings every {} milliseconds", pingInterval); } if (this.statisticInterval > 0 && this.dsys.getConfig().getStatisticSamplingEnabled()) { backgroundProcessor.scheduleWithFixedDelay(new PublishClientStatsTask(), statisticInterval, statisticInterval, TimeUnit.MILLISECONDS); } // LOG: changed from config to info logger.info(LocalizedMessage.create( LocalizedStrings.PoolImpl_POOL_0_STARTED_WITH_MULTIUSER_SECURE_MODE_ENABLED_1, new Object[] { this.name, this.multiuserSecureModeEnabled })); }
From source file:fr.gael.dhus.database.DatabasePostInit.java
private void doforcePublic() { boolean force_public = Boolean.getBoolean("force.public"); logger.info("Force public (force.public) requested by user (" + force_public + ")"); if (!force_public) return;/*from w w w . j a v a 2 s .co m*/ Thread t = new Thread(new Runnable() { @Override public void run() { Iterator<Collection> collections = collectionDao.getAllCollectons(); while (collections.hasNext()) { Collection collection = collectionDao.read(collections.next().getId()); List<User> authUsers = collectionDao.getAuthorizedUsers(collection); if (collection.getId() == collectionDao.getRootCollection().getId()) { if (authUsers.contains(userDao.getPublicData())) { authUsers.remove(userDao.getPublicData()); } else { continue; } } else { if (!authUsers.contains(userDao.getPublicData())) { authUsers.add(userDao.getPublicData()); } else { continue; } } collection.setAuthorizedUsers(new HashSet<User>(authUsers)); collectionDao.update(collection); } logger.info("Force public (force.public) ended."); } }); t.start(); }
From source file:org.apache.sqoop.mapreduce.hcat.SqoopHCatUtilities.java
public void configureHCat(final SqoopOptions opts, final Job job, final ConnManager connMgr, final String dbTable, final Configuration config) throws IOException { if (configured) { LOG.info("Ignoring configuration request for HCatalog info"); return;/*ww w.j av a 2s. com*/ } options = opts; checkHomeDirs(opts); connManager = connMgr; dbTableName = dbTable; configuration = config; hCatJob = job; hCatDatabaseName = options.getHCatDatabaseName() != null ? options.getHCatDatabaseName() : DEFHCATDB; hCatDatabaseName = hCatDatabaseName.toLowerCase(); String optHCTabName = options.getHCatTableName(); hCatTableName = optHCTabName.toLowerCase(); if (!hCatTableName.equals(optHCTabName)) { LOG.warn("Provided HCatalog table name " + optHCTabName + " will be mapped to " + hCatTableName); } StringBuilder sb = new StringBuilder(); sb.append(hCatDatabaseName); sb.append('.').append(hCatTableName); hCatQualifiedTableName = sb.toString(); String principalID = System.getProperty(HCatConstants.HCAT_METASTORE_PRINCIPAL); if (principalID != null) { configuration.set(HCatConstants.HCAT_METASTORE_PRINCIPAL, principalID); } hCatStaticPartitionKeys = new ArrayList<String>(); hCatStaticPartitionValues = new ArrayList<String>(); String partKeysString = options.getHCatalogPartitionKeys(); String partKeysVals = options.getHCatalogPartitionValues(); // Already validated if (partKeysString != null) { String[] keys = partKeysString.split(","); for (int i = 0; i < keys.length; ++i) { String k = keys[i].trim(); hCatStaticPartitionKeys.add(k); } String[] vals = partKeysVals.split(","); for (int i = 0; i < vals.length; ++i) { String v = vals[i].trim(); hCatStaticPartitionValues.add(v); } } else { partKeysString = options.getHivePartitionKey(); if (partKeysString != null) { hCatStaticPartitionKeys.add(partKeysString); } partKeysVals = options.getHivePartitionValue(); hCatStaticPartitionValues.add(partKeysVals); } Properties userMapping = options.getMapColumnHive(); userHiveMapping = new LCKeyMap<String>(); for (Object o : userMapping.keySet()) { String v = (String) userMapping.get(o); userHiveMapping.put((String) o, v); } // Get the partition key filter if needed Map<String, String> filterMap = getHCatSPFilterMap(); String filterStr = getHCatSPFilterStr(); initDBColumnInfo(); if (options.doCreateHCatalogTable()) { LOG.info("Creating HCatalog table " + hCatQualifiedTableName + " for import"); createHCatTable(false); } else if (options.doDropAndCreateHCatalogTable()) { LOG.info("Dropping and Creating HCatalog table " + hCatQualifiedTableName + " for import"); createHCatTable(true); } // For serializing the schema to conf HCatInputFormat hif = HCatInputFormat.setInput(hCatJob, hCatDatabaseName, hCatTableName); // For serializing the schema to conf if (filterStr != null) { LOG.info("Setting hCatInputFormat filter to " + filterStr); hif.setFilter(filterStr); } hCatFullTableSchema = HCatInputFormat.getTableSchema(configuration); hCatFullTableSchemaFieldNames = hCatFullTableSchema.getFieldNames(); LOG.info("HCatalog full table schema fields = " + Arrays.toString(hCatFullTableSchema.getFieldNames().toArray())); if (filterMap != null) { LOG.info("Setting hCatOutputFormat filter to " + filterStr); } HCatOutputFormat.setOutput(hCatJob, OutputJobInfo.create(hCatDatabaseName, hCatTableName, filterMap)); hCatOutputSchema = HCatOutputFormat.getTableSchema(configuration); List<HCatFieldSchema> hCatPartitionSchemaFields = new ArrayList<HCatFieldSchema>(); int totalFieldsCount = hCatFullTableSchema.size(); int dataFieldsCount = hCatOutputSchema.size(); if (totalFieldsCount > dataFieldsCount) { for (int i = dataFieldsCount; i < totalFieldsCount; ++i) { hCatPartitionSchemaFields.add(hCatFullTableSchema.get(i)); } } hCatPartitionSchema = new HCatSchema(hCatPartitionSchemaFields); for (HCatFieldSchema hfs : hCatPartitionSchemaFields) { if (hfs.getType() != HCatFieldSchema.Type.STRING) { throw new IOException("The table provided " + getQualifiedHCatTableName() + " uses unsupported partitioning key type for column " + hfs.getName() + " : " + hfs.getTypeString() + ". Only string " + "fields are allowed in partition columns in HCatalog"); } } LOG.info("HCatalog table partitioning key fields = " + Arrays.toString(hCatPartitionSchema.getFieldNames().toArray())); List<HCatFieldSchema> outputFieldList = new ArrayList<HCatFieldSchema>(); for (String col : dbColumnNames) { try { HCatFieldSchema hfs = hCatFullTableSchema.get(col); if (hfs == null) { throw new IOException("Database column " + col + " not found in " + " hcatalog table."); } } catch (Exception e) { throw new IOException( "Caught Exception checking database column " + col + " in " + " hcatalog table.", e); } boolean skip = false; if (hCatStaticPartitionKeys != null) { for (String key : hCatStaticPartitionKeys) { if (col.equals(key)) { skip = true; break; } } } if (skip) { continue; } outputFieldList.add(hCatFullTableSchema.get(col)); } projectedSchema = new HCatSchema(outputFieldList); LOG.info( "HCatalog projected schema fields = " + Arrays.toString(projectedSchema.getFieldNames().toArray())); validateStaticPartitionKey(); validateHCatTableFieldTypes(); HCatOutputFormat.setSchema(configuration, hCatFullTableSchema); addJars(hCatJob, options); config.setBoolean(DEBUG_HCAT_IMPORT_MAPPER_PROP, Boolean.getBoolean(DEBUG_HCAT_IMPORT_MAPPER_PROP)); config.setBoolean(DEBUG_HCAT_EXPORT_MAPPER_PROP, Boolean.getBoolean(DEBUG_HCAT_EXPORT_MAPPER_PROP)); configured = true; }
From source file:io.adeptj.runtime.server.Server.java
private int handlePortAvailability(Config httpConf) { Integer port = Integer.getInteger(SYS_PROP_SERVER_PORT); if (port == null) { LOGGER.warn("No port specified via system property: [{}], using default port: [{}]", SYS_PROP_SERVER_PORT, httpConf.getInt(KEY_PORT)); port = httpConf.getInt(KEY_PORT); }// w w w . j ava2 s .co m // Note: Shall we do it ourselves or let server do it later? Problem may arise in OSGi Framework provisioning // as it is being started already and another server start(from same location) will again start new OSGi // Framework which may interfere with already started OSGi Framework as the bundle deployment, heap dump, // OSGi configurations directory is common, this is unknown at this moment but just to be on safer side doing this. if (Boolean.getBoolean(SYS_PROP_CHECK_PORT) && !isPortAvailable(port)) { LOGGER.error("Port: [{}] already used, shutting down JVM!!", port); // Let the LOGBACK cleans up it's state. LogbackManager.INSTANCE.getLoggerContext().stop(); System.exit(-1); // NOSONAR } return port; }