Example usage for org.apache.commons.lang3 StringUtils isNumeric

List of usage examples for org.apache.commons.lang3 StringUtils isNumeric

Introduction

In this page you can find the example usage for org.apache.commons.lang3 StringUtils isNumeric.

Prototype

public static boolean isNumeric(final CharSequence cs) 

Source Link

Document

Checks if the CharSequence contains only Unicode digits.

Usage

From source file:org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.java

/**
 * Creates a FlinkKafkaProducer for a given topic. The sink produces its input to
 * the topic. It accepts a keyed {@link KeyedSerializationSchema} and possibly a custom {@link FlinkKafkaPartitioner}.
 *
 * <p>If a partitioner is not provided, written records will be partitioned by the attached key of each
 * record (as determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If written records do not
 * have a key (i.e., {@link KeyedSerializationSchema#serializeKey(Object)} returns {@code null}), they
 * will be distributed to Kafka partitions in a round-robin fashion.
 *
 * @param defaultTopicId The default topic to write data to
 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
 *                          If a partitioner is not provided, records will be partitioned by the key of each record
 *                          (determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If the keys
 *                          are {@code null}, then records will be distributed to Kafka partitions in a
 *                          round-robin fashion.
 * @param semantic Defines semantic that will be used by this producer (see {@link FlinkKafkaProducer.Semantic}).
 * @param kafkaProducersPoolSize Overwrite default KafkaProducers pool size (see {@link FlinkKafkaProducer.Semantic#EXACTLY_ONCE}).
 */// ww  w.j ava 2  s . co m
public FlinkKafkaProducer(String defaultTopicId, KeyedSerializationSchema<IN> serializationSchema,
        Properties producerConfig, Optional<FlinkKafkaPartitioner<IN>> customPartitioner,
        FlinkKafkaProducer.Semantic semantic, int kafkaProducersPoolSize) {
    super(new FlinkKafkaProducer.TransactionStateSerializer(), new FlinkKafkaProducer.ContextStateSerializer());

    this.defaultTopicId = checkNotNull(defaultTopicId, "defaultTopicId is null");
    this.schema = checkNotNull(serializationSchema, "serializationSchema is null");
    this.producerConfig = checkNotNull(producerConfig, "producerConfig is null");
    this.flinkKafkaPartitioner = checkNotNull(customPartitioner, "customPartitioner is null").orElse(null);
    this.semantic = checkNotNull(semantic, "semantic is null");
    this.kafkaProducersPoolSize = kafkaProducersPoolSize;
    checkState(kafkaProducersPoolSize > 0, "kafkaProducersPoolSize must be non empty");

    ClosureCleaner.clean(this.flinkKafkaPartitioner, true);
    ClosureCleaner.ensureSerializable(serializationSchema);

    // set the producer configuration properties for kafka record key value serializers.
    if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
        this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
                ByteArraySerializer.class.getName());
    } else {
        LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
    }

    if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
        this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                ByteArraySerializer.class.getName());
    } else {
        LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
    }

    // eagerly ensure that bootstrap servers are set.
    if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
        throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG
                + " must be supplied in the producer config properties.");
    }

    if (!producerConfig.containsKey(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG)) {
        long timeout = DEFAULT_KAFKA_TRANSACTION_TIMEOUT.toMilliseconds();
        checkState(timeout < Integer.MAX_VALUE && timeout > 0, "timeout does not fit into 32 bit integer");
        this.producerConfig.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, (int) timeout);
        LOG.warn("Property [{}] not specified. Setting it to {}", ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,
                DEFAULT_KAFKA_TRANSACTION_TIMEOUT);
    }

    // Enable transactionTimeoutWarnings to avoid silent data loss
    // See KAFKA-6119 (affects versions 0.11.0.0 and 0.11.0.1):
    // The KafkaProducer may not throw an exception if the transaction failed to commit
    if (semantic == FlinkKafkaProducer.Semantic.EXACTLY_ONCE) {
        final Object object = this.producerConfig.get(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG);
        final long transactionTimeout;
        if (object instanceof String && StringUtils.isNumeric((String) object)) {
            transactionTimeout = Long.parseLong((String) object);
        } else if (object instanceof Number) {
            transactionTimeout = ((Number) object).longValue();
        } else {
            throw new IllegalArgumentException(
                    ProducerConfig.TRANSACTION_TIMEOUT_CONFIG + " must be numeric, was " + object);
        }
        super.setTransactionTimeout(transactionTimeout);
        super.enableTransactionTimeoutWarnings(0.8);
    }

    this.topicPartitionsMap = new HashMap<>();
}

From source file:org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer011.java

/**
 * Creates a FlinkKafkaProducer for a given topic. The sink produces its input to
 * the topic. It accepts a keyed {@link KeyedSerializationSchema} and possibly a custom {@link FlinkKafkaPartitioner}.
 *
 * <p>If a partitioner is not provided, written records will be partitioned by the attached key of each
 * record (as determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If written records do not
 * have a key (i.e., {@link KeyedSerializationSchema#serializeKey(Object)} returns {@code null}), they
 * will be distributed to Kafka partitions in a round-robin fashion.
 *
 * @param defaultTopicId The default topic to write data to
 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
 *                          If a partitioner is not provided, records will be partitioned by the key of each record
 *                          (determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If the keys
 *                          are {@code null}, then records will be distributed to Kafka partitions in a
 *                          round-robin fashion.
 * @param semantic Defines semantic that will be used by this producer (see {@link Semantic}).
 * @param kafkaProducersPoolSize Overwrite default KafkaProducers pool size (see {@link Semantic#EXACTLY_ONCE}).
 *//*w ww  .  ja va2  s . c om*/
public FlinkKafkaProducer011(String defaultTopicId, KeyedSerializationSchema<IN> serializationSchema,
        Properties producerConfig, Optional<FlinkKafkaPartitioner<IN>> customPartitioner, Semantic semantic,
        int kafkaProducersPoolSize) {
    super(new TransactionStateSerializer(), new ContextStateSerializer());

    this.defaultTopicId = checkNotNull(defaultTopicId, "defaultTopicId is null");
    this.schema = checkNotNull(serializationSchema, "serializationSchema is null");
    this.producerConfig = checkNotNull(producerConfig, "producerConfig is null");
    this.flinkKafkaPartitioner = checkNotNull(customPartitioner, "customPartitioner is null").orElse(null);
    this.semantic = checkNotNull(semantic, "semantic is null");
    this.kafkaProducersPoolSize = kafkaProducersPoolSize;
    checkState(kafkaProducersPoolSize > 0, "kafkaProducersPoolSize must be non empty");

    ClosureCleaner.clean(this.flinkKafkaPartitioner, true);
    ClosureCleaner.ensureSerializable(serializationSchema);

    // set the producer configuration properties for kafka record key value serializers.
    if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
        this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
                ByteArraySerializer.class.getName());
    } else {
        LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
    }

    if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
        this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                ByteArraySerializer.class.getName());
    } else {
        LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
    }

    // eagerly ensure that bootstrap servers are set.
    if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
        throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG
                + " must be supplied in the producer config properties.");
    }

    if (!producerConfig.containsKey(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG)) {
        long timeout = DEFAULT_KAFKA_TRANSACTION_TIMEOUT.toMilliseconds();
        checkState(timeout < Integer.MAX_VALUE && timeout > 0, "timeout does not fit into 32 bit integer");
        this.producerConfig.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, (int) timeout);
        LOG.warn("Property [{}] not specified. Setting it to {}", ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,
                DEFAULT_KAFKA_TRANSACTION_TIMEOUT);
    }

    // Enable transactionTimeoutWarnings to avoid silent data loss
    // See KAFKA-6119 (affects versions 0.11.0.0 and 0.11.0.1):
    // The KafkaProducer may not throw an exception if the transaction failed to commit
    if (semantic == Semantic.EXACTLY_ONCE) {
        final Object object = this.producerConfig.get(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG);
        final long transactionTimeout;
        if (object instanceof String && StringUtils.isNumeric((String) object)) {
            transactionTimeout = Long.parseLong((String) object);
        } else if (object instanceof Number) {
            transactionTimeout = ((Number) object).longValue();
        } else {
            throw new IllegalArgumentException(
                    ProducerConfig.TRANSACTION_TIMEOUT_CONFIG + " must be numeric, was " + object);
        }
        super.setTransactionTimeout(transactionTimeout);
        super.enableTransactionTimeoutWarnings(0.8);
    }

    this.topicPartitionsMap = new HashMap<>();
}

From source file:org.apache.flink.table.runtime.functions.SqlFunctionUtils.java

public static boolean isDigit(Object obj) {
    if ((obj instanceof Long) || (obj instanceof Integer) || (obj instanceof Short) || (obj instanceof Byte)) {
        return true;
    }//  w  w w  .j av  a 2  s  . co m
    if (obj instanceof String) {
        String s = obj.toString();
        if (s.isEmpty()) {
            return false;
        }
        return StringUtils.isNumeric(s);
    } else {
        return false;
    }
}

From source file:org.apache.hadoop.hbase.net.Address.java

/**
 * If hostname is a.b.c and the port is 123, return a:123 instead of a.b.c:123.
 * @return if host looks like it is resolved -- not an IP -- then strip the domain portion
 * otherwise returns same as {@link #toString()}}
 *//*w w  w  . j  ava  2s . c  o  m*/
public String toStringWithoutDomain() {
    String hostname = getHostname();
    String[] parts = hostname.split("\\.");
    if (parts.length > 1) {
        for (String part : parts) {
            if (!StringUtils.isNumeric(part)) {
                return Address.fromParts(parts[0], getPort()).toString();
            }
        }
    }
    return toString();
}

From source file:org.apache.jmeter.control.SwitchController.java

/**
 * @return the selection value as a int with the value set to zero if it is out of range.
 *///from w  ww .  j a  v a 2s. c o  m
private int getSelectionAsInt() {
    getProperty(SWITCH_VALUE).recoverRunningVersion(null);
    String sel = getSelection();
    if (StringUtils.isEmpty(sel)) {
        return 0;
    } else {
        try {
            if (StringUtils.isNumeric(sel)) {
                int ret = Integer.parseInt(sel);
                if (ret < 0 || ret >= getSubControllers().size()) {
                    // Out of range, we return first one
                    ret = 0;
                }
                return ret;
            }
        } catch (NumberFormatException e) {
            // it will be handled by code below
        }
        return scanControllerNames(sel);
    }
}

From source file:org.apache.jmeter.report.processor.ErrorsSummaryConsumer.java

/**
 * Determine if the HTTP status code is successful or not i.e. in range 200
 * to 399 inclusive/*from   w w w. j a  va2s .co m*/
 *
 * @param codeAsString
 *            status code to check
 * @return whether in range 200-399 or not
 * 
 *         FIXME Duplicates HTTPSamplerBase#isSuccessCode but it's in http
 *         protocol
 */
protected boolean isSuccessCode(String codeAsString) {
    if (StringUtils.isNumeric(codeAsString)) {
        try {
            int code = Integer.parseInt(codeAsString);
            return (code >= 200 && code <= 399);
        } catch (NumberFormatException ex) {
            return false;
        }
    }
    return false;
}

From source file:org.apache.myriad.health.HealthCheckUtils.java

private static String[] generateHostPortArray(String connectionString) {
    String[] split = connectionString.split(":");
    if (split.length != 2) {
        throw new IllegalArgumentException("The Connection String " + connectionString
                + " is invalid. It must be in <host>:<port> format");
    } else if (!StringUtils.isNumeric(split[1])) {
        throw new IllegalArgumentException(
                "The Connection String " + connectionString + " is invalid. The port must be an integer");
    } else {/*w  w w.j a v  a2  s.  c om*/
        return split;
    }
}

From source file:org.apache.nifi.processors.kafka.pubsub.KafkaProcessorUtils.java

static void buildCommonKafkaProperties(final ProcessContext context, final Class<?> kafkaConfigClass,
        final Map<String, Object> mapToPopulate) {
    for (PropertyDescriptor propertyDescriptor : context.getProperties().keySet()) {
        if (propertyDescriptor.equals(SSL_CONTEXT_SERVICE)) {
            // Translate SSLContext Service configuration into Kafka properties
            final SSLContextService sslContextService = context.getProperty(SSL_CONTEXT_SERVICE)
                    .asControllerService(SSLContextService.class);
            if (sslContextService != null && sslContextService.isKeyStoreConfigured()) {
                mapToPopulate.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, sslContextService.getKeyStoreFile());
                mapToPopulate.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG,
                        sslContextService.getKeyStorePassword());
                final String keyPass = sslContextService.getKeyPassword() == null
                        ? sslContextService.getKeyStorePassword()
                        : sslContextService.getKeyPassword();
                mapToPopulate.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, keyPass);
                mapToPopulate.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, sslContextService.getKeyStoreType());
            }// ww  w .ja v  a2s.  co m

            if (sslContextService != null && sslContextService.isTrustStoreConfigured()) {
                mapToPopulate.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
                        sslContextService.getTrustStoreFile());
                mapToPopulate.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG,
                        sslContextService.getTrustStorePassword());
                mapToPopulate.put(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, sslContextService.getTrustStoreType());
            }
        }

        String propertyName = propertyDescriptor.getName();
        String propertyValue = propertyDescriptor.isExpressionLanguageSupported()
                ? context.getProperty(propertyDescriptor).evaluateAttributeExpressions().getValue()
                : context.getProperty(propertyDescriptor).getValue();

        if (propertyValue != null) {
            // If the property name ends in ".ms" then it is a time period. We want to accept either an integer as number of milliseconds
            // or the standard NiFi time period such as "5 secs"
            if (propertyName.endsWith(".ms") && !StringUtils.isNumeric(propertyValue.trim())) { // kafka standard time notation
                propertyValue = String
                        .valueOf(FormatUtils.getTimeDuration(propertyValue.trim(), TimeUnit.MILLISECONDS));
            }

            if (isStaticStringFieldNamePresent(propertyName, kafkaConfigClass, CommonClientConfigs.class,
                    SslConfigs.class, SaslConfigs.class)) {
                mapToPopulate.put(propertyName, propertyValue);
            }
        }
    }
}

From source file:org.apache.openmeetings.web.user.calendar.CalendarPanel.java

@Override
protected void onInitialize() {
    final Form<Date> form = new Form<>("form");
    add(form);/*from  w  w w . java 2s .  c  om*/

    dialog = new AppointmentDialog("appointment", this, new CompoundPropertyModel<>(getDefault()));
    add(dialog);

    boolean isRtl = isRtl();
    Options options = new Options();
    options.set("isRTL", isRtl);
    options.set("height", Options.asString("parent"));
    options.set("header", isRtl
            ? "{left: 'agendaDay,agendaWeek,month', center: 'title', right: 'today nextYear,next,prev,prevYear'}"
            : "{left: 'prevYear,prev,next,nextYear today', center: 'title', right: 'month,agendaWeek,agendaDay'}");
    options.set("allDaySlot", false);
    options.set("axisFormat", Options.asString("H(:mm)"));
    options.set("defaultEventMinutes", 60);
    options.set("timeFormat", Options.asString("H(:mm)"));

    options.set("buttonText", new JSONObject().put("month", getString("801")).put("week", getString("800"))
            .put("day", getString("799")).put("today", getString("1555")).toString());

    options.set("locale", Options.asString(WebSession.get().getLocale().toLanguageTag()));

    calendar = new Calendar("calendar", new AppointmentModel(), options) {
        private static final long serialVersionUID = 1L;

        @Override
        protected void onInitialize() {
            super.onInitialize();
            add(new CalendarFunctionsBehavior(getMarkupId()));
        }

        @Override
        public boolean isSelectable() {
            return true;
        }

        @Override
        public boolean isDayClickEnabled() {
            return true;
        }

        @Override
        public boolean isEventClickEnabled() {
            return true;
        }

        @Override
        public boolean isEventDropEnabled() {
            return true;
        }

        @Override
        public boolean isEventResizeEnabled() {
            return true;
        }

        //no need to override onDayClick
        @Override
        public void onSelect(AjaxRequestTarget target, CalendarView view, LocalDateTime start,
                LocalDateTime end, boolean allDay) {
            Appointment a = getDefault();
            LocalDateTime s = start, e = end;
            if (CalendarView.month == view) {
                LocalDateTime now = ZonedDateTime.now(getZoneId()).toLocalDateTime();
                s = start.withHour(now.getHour()).withMinute(now.getMinute());
                e = s.plus(1, ChronoUnit.HOURS);
            }
            a.setStart(getDate(s));
            a.setEnd(getDate(e));
            dialog.setModelObjectWithAjaxTarget(a, target);

            dialog.open(target);
        }

        @Override
        public void onEventClick(AjaxRequestTarget target, CalendarView view, String eventId) {
            if (!StringUtils.isNumeric(eventId)) {
                return;
            }
            Appointment a = apptDao.get(Long.valueOf(eventId));
            dialog.setModelObjectWithAjaxTarget(a, target);

            dialog.open(target);
        }

        @Override
        public void onEventDrop(AjaxRequestTarget target, String eventId, long delta, boolean allDay) {
            if (!StringUtils.isNumeric(eventId)) {
                refresh(target);
                return;
            }
            Appointment a = apptDao.get(Long.valueOf(eventId));
            if (!AppointmentDialog.isOwner(a)) {
                return;
            }
            java.util.Calendar cal = WebSession.getCalendar();
            cal.setTime(a.getStart());
            cal.add(java.util.Calendar.MILLISECOND, (int) delta);
            a.setStart(cal.getTime());

            cal.setTime(a.getEnd());
            cal.add(java.util.Calendar.MILLISECOND, (int) delta);
            a.setEnd(cal.getTime());

            apptDao.update(a, getUserId());

            if (a.getCalendar() != null) {
                updatedeleteAppointment(target, CalendarDialog.DIALOG_TYPE.UPDATE_APPOINTMENT, a);
            }
        }

        @Override
        public void onEventResize(AjaxRequestTarget target, String eventId, long delta) {
            if (!StringUtils.isNumeric(eventId)) {
                refresh(target);
                return;
            }
            Appointment a = apptDao.get(Long.valueOf(eventId));
            if (!AppointmentDialog.isOwner(a)) {
                return;
            }
            java.util.Calendar cal = WebSession.getCalendar();
            cal.setTime(a.getEnd());
            cal.add(java.util.Calendar.MILLISECOND, (int) delta);
            a.setEnd(cal.getTime());

            apptDao.update(a, getUserId());

            if (a.getCalendar() != null) {
                updatedeleteAppointment(target, CalendarDialog.DIALOG_TYPE.UPDATE_APPOINTMENT, a);
            }
        }
    };

    form.add(calendar);

    populateGoogleCalendars();

    add(refreshTimer);
    add(syncTimer);

    calendarDialog = new CalendarDialog("calendarDialog", this,
            new CompoundPropertyModel<>(getDefaultCalendar()));

    add(calendarDialog);

    calendarListContainer.setOutputMarkupId(true);
    calendarListContainer
            .add(new ListView<OmCalendar>("items", new LoadableDetachableModel<List<OmCalendar>>() {
                private static final long serialVersionUID = 1L;

                @Override
                protected List<OmCalendar> load() {
                    List<OmCalendar> cals = new ArrayList<>(apptManager.getCalendars(getUserId()));
                    cals.addAll(apptManager.getGoogleCalendars(getUserId()));
                    return cals;
                }
            }) {
                private static final long serialVersionUID = 1L;

                @Override
                protected void populateItem(final ListItem<OmCalendar> item) {
                    item.setOutputMarkupId(true);
                    final OmCalendar cal = item.getModelObject();
                    item.add(new WebMarkupContainer("item").add(new Label("name", cal.getTitle())));
                    item.add(new AjaxEventBehavior(EVT_CLICK) {
                        private static final long serialVersionUID = 1L;

                        @Override
                        protected void onEvent(AjaxRequestTarget target) {
                            calendarDialog.open(target, CalendarDialog.DIALOG_TYPE.UPDATE_CALENDAR, cal);
                            target.add(calendarDialog);
                        }
                    });
                }
            });

    add(new Button("syncCalendarButton").add(new AjaxEventBehavior(EVT_CLICK) {
        private static final long serialVersionUID = 1L;

        @Override
        protected void onEvent(AjaxRequestTarget target) {
            syncCalendar(target);
        }
    }));

    add(new Button("submitCalendar").add(new AjaxEventBehavior(EVT_CLICK) {
        private static final long serialVersionUID = 1L;

        @Override
        protected void onEvent(AjaxRequestTarget target) {
            calendarDialog.open(target, CalendarDialog.DIALOG_TYPE.UPDATE_CALENDAR, getDefaultCalendar());
            target.add(calendarDialog);
        }
    }));

    add(calendarListContainer);

    super.onInitialize();
}

From source file:org.apache.syncope.client.cli.commands.install.InstallSetup.java

public void setup() throws FileNotFoundException, IllegalAccessException {
    installResultManager.printWelcome();

    System.out.println(//  www.j  a va2  s .c  o  m
            "Path to config files of Syncope CLI client will be: " + InstallConfigFileTemplate.dirPath());

    if (!FileSystemUtils.exists(InstallConfigFileTemplate.dirPath())) {
        throw new FileNotFoundException(
                "Directory: " + InstallConfigFileTemplate.dirPath() + " does not exists!");
    }

    if (!FileSystemUtils.canWrite(InstallConfigFileTemplate.dirPath())) {
        throw new IllegalAccessException("Permission denied on " + InstallConfigFileTemplate.dirPath());
    }
    System.out.println("- File system permission checked");
    System.out.println("");

    try (Scanner scanIn = new Scanner(System.in)) {
        System.out.print("Syncope server schema [http/https]: ");
        String syncopeServerSchemaFromSystemIn = scanIn.nextLine();
        boolean schemaFound = false;
        while (!schemaFound) {
            if (("http".equalsIgnoreCase(syncopeServerSchemaFromSystemIn))
                    || ("https".equalsIgnoreCase(syncopeServerSchemaFromSystemIn))) {
                syncopeServerSchema = syncopeServerSchemaFromSystemIn;
                schemaFound = true;
            } else {
                System.out.println("Please use one of below values: ");
                System.out.println("   - http");
                System.out.println("   - https");
                syncopeServerSchemaFromSystemIn = scanIn.nextLine();
            }
        }

        System.out.print("Syncope server hostname [e.g. " + syncopeServerHostname + "]: ");
        String syncopeServerHostnameFromSystemIn = scanIn.nextLine();
        boolean syncopeServerHostnameFound = false;
        while (!syncopeServerHostnameFound) {
            if (StringUtils.isNotBlank(syncopeServerHostnameFromSystemIn)) {
                syncopeServerHostname = syncopeServerHostnameFromSystemIn;
                syncopeServerHostnameFound = true;
            } else {
                System.out.print("Syncope server hostname [e.g. " + syncopeServerHostname + "]: ");
                syncopeServerHostnameFromSystemIn = scanIn.nextLine();
            }
        }

        System.out.print("Syncope server port [e.g. " + syncopeServerPort + "]: ");
        String syncopeServerPortFromSystemIn = scanIn.nextLine();
        boolean syncopeServerPortFound = false;
        while (!syncopeServerPortFound) {
            if (StringUtils.isNotBlank(syncopeServerPortFromSystemIn)) {
                syncopeServerPort = syncopeServerPortFromSystemIn;
                syncopeServerPortFound = true;
            } else if (!StringUtils.isNumeric(syncopeServerPortFromSystemIn)) {
                System.err.println(syncopeServerPortFromSystemIn + " is not a numeric string, try again");
                syncopeServerPortFromSystemIn = scanIn.nextLine();
            } else {
                System.out.print("Syncope server port [e.g. " + syncopeServerPort + "]: ");
                syncopeServerPortFromSystemIn = scanIn.nextLine();
            }
        }

        System.out.print("Syncope server rest context [e.g. " + syncopeServerRestContext + "]: ");
        String syncopeServerRestContextFromSystemIn = scanIn.nextLine();
        boolean syncopeServerRestContextFound = false;
        while (!syncopeServerRestContextFound) {
            if (StringUtils.isNotBlank(syncopeServerRestContextFromSystemIn)) {
                syncopeServerRestContext = syncopeServerRestContextFromSystemIn;
                syncopeServerRestContextFound = true;
            } else {
                System.out.print("Syncope server port [e.g. " + syncopeServerRestContext + "]: ");
                syncopeServerRestContextFromSystemIn = scanIn.nextLine();
            }
        }

        System.out.print("Syncope admin user: ");
        String syncopeAdminUserFromSystemIn = scanIn.nextLine();
        boolean syncopeAdminUserFound = false;
        while (!syncopeAdminUserFound) {
            if (StringUtils.isNotBlank(syncopeAdminUserFromSystemIn)) {
                syncopeAdminUser = syncopeAdminUserFromSystemIn;
                syncopeAdminUserFound = true;
            } else {
                System.out.print("Syncope admin user: ");
                syncopeAdminUserFromSystemIn = scanIn.nextLine();
            }
        }

        char[] syncopeAdminPasswordFromSystemConsole = System.console()
                .readPassword("Syncope admin password: ");
        boolean syncopeAdminPasswordFound = false;
        while (!syncopeAdminPasswordFound) {
            if (syncopeAdminPasswordFromSystemConsole != null
                    && syncopeAdminPasswordFromSystemConsole.length > 0) {
                syncopeAdminPassword = new String(syncopeAdminPasswordFromSystemConsole);
                syncopeAdminPasswordFound = true;
            } else {
                syncopeAdminPasswordFromSystemConsole = System.console()
                        .readPassword("Syncope admin password: ");
            }
        }
    }

    final JasyptUtils jasyptUtils = JasyptUtils.get();
    try {

        final String contentCliPropertiesFile = InstallConfigFileTemplate.cliPropertiesFile(syncopeServerSchema,
                syncopeServerHostname, syncopeServerPort, syncopeServerRestContext, syncopeAdminUser,
                jasyptUtils.encrypt(syncopeAdminPassword));
        FileSystemUtils.createFileWith(InstallConfigFileTemplate.configurationFilePath(),
                contentCliPropertiesFile);
    } catch (final IOException ex) {
        System.out.println(ex.getMessage());
    }

    try {
        final SyncopeService syncopeService = SyncopeServices.get(SyncopeService.class);
        final String syncopeVersion = syncopeService.platform().getVersion();
        installResultManager.installationSuccessful(syncopeVersion);
    } catch (final ProcessingException ex) {
        LOG.error("Error installing CLI", ex);
        installResultManager.manageProcessingException(ex);
    } catch (final Exception e) {
        LOG.error("Error installing CLI", e);
        installResultManager.manageException(e);
    }
}