Example usage for java.lang Integer equals

List of usage examples for java.lang Integer equals

Introduction

In this page you can find the example usage for java.lang Integer equals.

Prototype

public boolean equals(Object obj) 

Source Link

Document

Compares this object to the specified object.

Usage

From source file:gr.abiss.calipso.CalipsoServiceImpl.java

/**
 * @param history//  w w w  . j a  va2  s. c o  m
 * @param item
 */
public void runStateChangePlugins(History history, Item item, Integer state) {
    // assets to create based on Item info?
    AbstractItem abstractItem = history != null ? history : item;
    logger.info("RUNNING PLUGINS (" + state + "), item state: "
            + abstractItem.getSpace().getMetadata().getStatusValue(abstractItem.getStatus()));

    // run plugins

    Map<Integer, String> pluginsMap = abstractItem.getSpace().getMetadata().getStatesPluginMap();

    if (abstractItem.getStatus() != null && MapUtils.isNotEmpty(pluginsMap)) {
        String pluginClassNames = pluginsMap.get(abstractItem.getStatus());
        logger.info("pluginClassNames:" + pluginClassNames + ", status: "
                + (abstractItem != null ? abstractItem.getStatus() : null));
        logger.info("Running plugins for status: " + abstractItem.getStatus() + ", plugins: "
                + pluginsMap.get(abstractItem.getStatus()));
        if (pluginClassNames != null && pluginClassNames.length() > 0) {

            String[] pluginNames = pluginClassNames.split(" ");
            for (int i = 0; i < pluginNames.length; i++) {
                String pluginClassName = pluginNames[i];
                if (StringUtils.isNotBlank(pluginClassName)) {

                    logger.debug("Loading plugin class: " + pluginClassName);
                    // "clazz" is the class name to load
                    Class clazz = null;
                    try {
                        clazz = Class.forName(pluginClassName);
                        AbstractStatePlugin plugin = (AbstractStatePlugin) clazz.newInstance();
                        if (state.equals(AbstractStatePlugin.PRE_STATE_CHANGE)) {
                            plugin.executePreStateChange(this, item);
                        } else if (state.equals(AbstractStatePlugin.PRE_HISTORY_SAVE)) {
                            plugin.executePreHistoryChange(this, history);
                        } else if (state.equals(AbstractStatePlugin.POST_STATE_CHANGE)) {
                            plugin.executePostStateChange(this, history);
                        }

                    } catch (ClassNotFoundException e) {
                        logger.error("Cannot load State Plugin class: " + pluginClassName, e);
                        e.printStackTrace();
                    } catch (InstantiationException ie) {
                        logger.error("Cannot instantiate State Plugin class: " + pluginClassName, ie);
                        ie.printStackTrace();
                    } catch (IllegalAccessException iae) {
                        logger.error("Cannot load State Plugin class: " + pluginClassName, iae);
                        iae.printStackTrace();
                    }

                }
            }

        }
    }
}

From source file:io.druid.indexing.kafka.supervisor.KafkaSupervisor.java

private void discoverTasks() throws ExecutionException, InterruptedException {
    int taskCount = 0;
    List<String> futureTaskIds = Lists.newArrayList();
    List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    List<Task> tasks = taskStorage.getActiveTasks();

    for (Task task : tasks) {
        if (!(task instanceof KafkaIndexTask) || !dataSource.equals(task.getDataSource())) {
            continue;
        }// w  w  w .  ja  v a 2s  . c om

        taskCount++;
        final KafkaIndexTask kafkaTask = (KafkaIndexTask) task;
        final String taskId = task.getId();

        // Determine which task group this task belongs to based on one of the partitions handled by this task. If we
        // later determine that this task is actively reading, we will make sure that it matches our current partition
        // allocation (getTaskGroupIdForPartition(partition) should return the same value for every partition being read
        // by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
        // state, we will permit it to complete even if it doesn't match our current partition allocation to support
        // seamless schema migration.

        Iterator<Integer> it = kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet()
                .iterator();
        final Integer taskGroupId = (it.hasNext() ? getTaskGroupIdForPartition(it.next()) : null);

        if (taskGroupId != null) {
            // check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
            // and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
            TaskGroup taskGroup = taskGroups.get(taskGroupId);
            if (!isTaskInPendingCompletionGroups(taskId)
                    && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {

                futureTaskIds.add(taskId);
                futures.add(Futures.transform(taskClient.getStatusAsync(taskId),
                        new Function<KafkaIndexTask.Status, Boolean>() {
                            @Override
                            public Boolean apply(KafkaIndexTask.Status status) {
                                if (status == KafkaIndexTask.Status.PUBLISHING) {
                                    addDiscoveredTaskToPendingCompletionTaskGroups(taskGroupId, taskId,
                                            kafkaTask.getIOConfig().getStartPartitions()
                                                    .getPartitionOffsetMap());

                                    // update partitionGroups with the publishing task's offsets (if they are greater than what is
                                    // existing) so that the next tasks will start reading from where this task left off
                                    Map<Integer, Long> publishingTaskCurrentOffsets = taskClient
                                            .getCurrentOffsets(taskId, true);

                                    for (Map.Entry<Integer, Long> entry : publishingTaskCurrentOffsets
                                            .entrySet()) {
                                        Integer partition = entry.getKey();
                                        Long offset = entry.getValue();
                                        ConcurrentHashMap<Integer, Long> partitionOffsets = partitionGroups
                                                .get(getTaskGroupIdForPartition(partition));

                                        boolean succeeded;
                                        do {
                                            succeeded = true;
                                            Long previousOffset = partitionOffsets.putIfAbsent(partition,
                                                    offset);
                                            if (previousOffset != null && previousOffset < offset) {
                                                succeeded = partitionOffsets.replace(partition, previousOffset,
                                                        offset);
                                            }
                                        } while (!succeeded);
                                    }

                                } else {
                                    for (Integer partition : kafkaTask.getIOConfig().getStartPartitions()
                                            .getPartitionOffsetMap().keySet()) {
                                        if (!taskGroupId.equals(getTaskGroupIdForPartition(partition))) {
                                            log.warn(
                                                    "Stopping task [%s] which does not match the expected partition allocation",
                                                    taskId);
                                            try {
                                                stopTask(taskId, false).get();
                                            } catch (InterruptedException | ExecutionException e) {
                                                log.warn(e, "Exception while stopping task");
                                            }
                                            return false;
                                        }
                                    }

                                    if (taskGroups.putIfAbsent(taskGroupId,
                                            new TaskGroup(
                                                    ImmutableMap.copyOf(kafkaTask.getIOConfig()
                                                            .getStartPartitions().getPartitionOffsetMap()),
                                                    kafkaTask.getIOConfig().getMinimumMessageTime())) == null) {
                                        log.debug("Created new task group [%d]", taskGroupId);
                                    }

                                    if (!isTaskCurrent(taskGroupId, taskId)) {
                                        log.info(
                                                "Stopping task [%s] which does not match the expected parameters and ingestion spec",
                                                taskId);
                                        try {
                                            stopTask(taskId, false).get();
                                        } catch (InterruptedException | ExecutionException e) {
                                            log.warn(e, "Exception while stopping task");
                                        }
                                        return false;
                                    } else {
                                        taskGroups.get(taskGroupId).tasks.putIfAbsent(taskId, new TaskData());
                                    }
                                }
                                return true;
                            }
                        }, workerExec));
            }
        }
    }

    List<Boolean> results = Futures.successfulAsList(futures).get();
    for (int i = 0; i < results.size(); i++) {
        if (results.get(i) == null) {
            String taskId = futureTaskIds.get(i);
            log.warn("Task [%s] failed to return status, killing task", taskId);
            killTask(taskId);
        }
    }
    log.debug("Found [%d] Kafka indexing tasks for dataSource [%s]", taskCount, dataSource);
}

From source file:net.droidsolutions.droidcharts.core.plot.CategoryPlot.java

/**
 * A utility method that returns a list of datasets that are mapped to a
 * given range axis.//w  w  w.j a  v  a2 s  . c  o m
 * 
 * @param index
 *            the axis index.
 * 
 * @return A list of datasets.
 */
private List datasetsMappedToRangeAxis(int index) {
    Integer key = new Integer(index);
    List result = new ArrayList();
    for (int i = 0; i < this.datasets.size(); i++) {
        List mappedAxes = (List) this.datasetToRangeAxesMap.get(new Integer(i));
        if (mappedAxes == null) {
            if (key.equals(ZERO)) {
                result.add(this.datasets.get(i));
            }
        } else {
            if (mappedAxes.contains(key)) {
                result.add(this.datasets.get(i));
            }
        }
    }
    return result;
}

From source file:io.druid.indexing.jdbc.supervisor.JDBCSupervisor.java

private void discoverTasks() throws ExecutionException, InterruptedException, TimeoutException {
    int taskCount = 0;
    List<String> futureTaskIds = Lists.newArrayList();
    List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    List<Task> tasks = taskStorage.getActiveTasks();

    log.info("TaskStorage ActiveTasks is [%d]", tasks.size());

    for (Task task : tasks) {
        if (!(task instanceof JDBCIndexTask) || !dataSource.equals(task.getDataSource())) {
            continue;
        }//from ww  w  .j  a v a2 s .  c  o  m

        taskCount++;
        final JDBCIndexTask jdbcTask = (JDBCIndexTask) task;
        final String taskId = task.getId();

        // Determine which task group this task belongs to based on table handled by this task. If we
        // later determine that this task is actively reading, we will make sure that it matches our current partition
        // allocation (getTaskGroup(partition) should return the same value for every partition being read
        // by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
        // state, we will permit it to complete even if it doesn't match our current partition allocation to support
        // seamless schema migration.

        Iterator<Integer> it = jdbcTask.getIOConfig().getJdbcOffsets().getOffsetMaps().keySet().iterator();
        final Integer taskGroupId = (it.hasNext() ? getTaskGroup(it.next()) : null);

        log.info("taskGroupId is " + taskGroupId);

        if (taskGroupId != null) {
            // check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
            // and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
            TaskGroup taskGroup = taskGroups.get(taskGroupId);
            if (!isTaskInPendingCompletionGroups(taskId)
                    && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {
                log.info("TaskGroup info details taskId [%s] in taskGroupId [%s]", taskId, taskGroupId);
                futureTaskIds.add(taskId);
                futures.add(Futures.transform(taskClient.getStatusAsync(taskId),
                        new Function<JDBCIndexTask.Status, Boolean>() {
                            @Override
                            public Boolean apply(JDBCIndexTask.Status status) {
                                if (status == JDBCIndexTask.Status.PUBLISHING) {
                                    addDiscoveredTaskToPendingCompletionTaskGroups(taskGroupId, taskId,
                                            jdbcTask.getIOConfig().getJdbcOffsets().getOffsetMaps());

                                    // update groups with the publishing task's offsets (if they are greater than what is
                                    // existing) so that the next tasks will start reading from where this task left off
                                    Map<Integer, Long> publishingTaskCurrentOffsets = taskClient
                                            .getCurrentOffsets(taskId, true);
                                    for (Map.Entry<Integer, Long> entry : publishingTaskCurrentOffsets
                                            .entrySet()) {
                                        Integer partition = entry.getKey();
                                        long endOffset = entry.getValue();
                                        log.info("Current offset is [%s]", endOffset);
                                        ConcurrentHashMap<Integer, Long> offsetsMap = (ConcurrentHashMap<Integer, Long>) groups
                                                .get(getTaskGroup(partition));
                                        boolean succeeded;
                                        do {
                                            succeeded = true;
                                            Long previousOffset = offsetsMap.putIfAbsent(partition, endOffset);
                                            if (previousOffset != null && previousOffset < endOffset) {
                                                succeeded = offsetsMap.replace(partition, previousOffset,
                                                        endOffset);
                                            }
                                        } while (!succeeded);
                                    }

                                } else {
                                    for (Integer partition : jdbcTask.getIOConfig().getJdbcOffsets()
                                            .getOffsetMaps().keySet()) {
                                        if (!taskGroupId.equals(getTaskGroup(partition))) {
                                            log.warn(
                                                    "Stopping task [%s] which does not match the expected partition allocation",
                                                    taskId);
                                            try {
                                                stopTask(taskId, false).get(futureTimeoutInSeconds,
                                                        TimeUnit.SECONDS);
                                            } catch (InterruptedException | ExecutionException
                                                    | TimeoutException e) {
                                                log.warn(e, "Exception while stopping task");
                                            }
                                            return false;
                                        }

                                        if (taskGroups.putIfAbsent(taskGroupId, new TaskGroup(
                                                ImmutableMap.copyOf(jdbcTask.getIOConfig().getJdbcOffsets()
                                                        .getOffsetMaps()),
                                                jdbcTask.getIOConfig().getMinimumMessageTime())) == null) {
                                            log.info("Created new task group [%d] from discoverTasks",
                                                    taskGroupId);
                                        }

                                        if (!isTaskCurrent(taskGroupId, taskId)) {
                                            log.info(
                                                    "Stopping task [%s] which does not match the expected parameters and ingestion spec",
                                                    taskId);
                                            try {
                                                stopTask(taskId, false).get(futureTimeoutInSeconds,
                                                        TimeUnit.SECONDS);
                                            } catch (InterruptedException | ExecutionException
                                                    | TimeoutException e) {
                                                log.warn(e, "Exception while stopping task");
                                            }
                                            return false;
                                        } else {
                                            log.info("taskGroup put IfAbsent by [%s]", taskId);
                                            taskGroups.get(taskGroupId).tasks.putIfAbsent(taskId,
                                                    new TaskData());
                                        }
                                    }
                                }
                                return true;
                            }
                        }, workerExec));
            }
        }
    }

    List<Boolean> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
    for (int i = 0; i < results.size(); i++) {
        if (results.get(i) == null) {
            String taskId = futureTaskIds.get(i);
            log.warn("Task [%s] failed to return status, killing task", taskId);
            killTask(taskId);
        }
    }
    log.debug("Found [%d] JDBC indexing tasks for dataSource [%s]", taskCount, dataSource);
}

From source file:net.droidsolutions.droidcharts.core.plot.CategoryPlot.java

/**
 * Returns a list of the datasets that are mapped to the axis with the
 * specified index.//from w ww. ja v a2s  .  c o m
 * 
 * @param axisIndex
 *            the axis index.
 * 
 * @return The list (possibly empty, but never <code>null</code>).
 * 
 * @since 1.0.3
 */
private List datasetsMappedToDomainAxis(int axisIndex) {
    Integer key = new Integer(axisIndex);
    List result = new ArrayList();
    for (int i = 0; i < this.datasets.size(); i++) {
        List mappedAxes = (List) this.datasetToDomainAxesMap.get(new Integer(i));
        CategoryDataset dataset = (CategoryDataset) this.datasets.get(i);
        if (mappedAxes == null) {
            if (key.equals(ZERO)) {
                if (dataset != null) {
                    result.add(dataset);
                }
            }
        } else {
            if (mappedAxes.contains(key)) {
                if (dataset != null) {
                    result.add(dataset);
                }
            }
        }
    }
    return result;
}

From source file:edu.ku.brc.specify.tasks.subpane.qb.QueryBldrPane.java

/**
 * @param saveAs//  ww w . j  a  v a  2 s  .c om
 * @return
 */
protected boolean saveQuery(final boolean saveAs) {
    boolean result = false;
    if (!canSave(true)) {
        setSaveBtnEnabled(false);
        return false;
    }

    //if (!isExportMapping)
    //{
    if (!query.isNamed() || saveAs) {
        if (!getQueryNameFromUser(saveAs)) {
            return false;
        }
    }
    //} else
    //{
    //   if (!query.isNamed() || saveAs)
    //   {
    //      if (!getExportMappingQueryName())
    //      {
    //         return false;
    //      }
    //   }      
    //}

    UsageTracker.incrUsageCount("QB.SaveQuery." + query.getContextName());

    //This is necessary to indicate that a query has been changed when only field deletes have occurred.
    //If the query's timestampModified is not modified the schema export tool doesn't know the 
    //export schema needs to be rebuilt.
    if (!saveAs && query.getId() != null) {
        int origCount = BasicSQLUtils
                .getCountAsInt("select count(*) from spqueryfield where spqueryid=" + query.getId());
        if (origCount > query.getFields().size()) {
            query.setTimestampModified(new Timestamp(System.currentTimeMillis()));
        }
    }

    TableQRI tableQRI = (TableQRI) tableList.getSelectedValue();
    if (tableQRI != null) {
        short position = 0;

        Set<Integer> queryFldsWithoutPanels = new HashSet<Integer>();
        for (SpQueryField qf : query.getFields()) {
            //System.out.println(qf.getFieldName());
            queryFldsWithoutPanels.add(qf.getId());
        }

        for (QueryFieldPanel qfp : queryFieldItems) {
            if (qfp.getQueryField() != null) {
                SpQueryField qf = qfp.getQueryField();
                queryFldsWithoutPanels.remove(qf.getId());
                qf.setPosition(position);
                qfp.updateQueryField();

                position++;
            }
        }

        if (!checkCriteriaLengths(query)) {
            return false;
        }

        //Remove query fields for which panels could be created in order to prevent
        //repeat of missing fld message in getQueryFieldPanels() whenever this query is loaded.
        for (Integer qfId : queryFldsWithoutPanels) {
            //this is real lame but should hardly ever need to be executed
            for (SpQueryField qf : query.getFields()) {
                if (qfId != null && qf != null && qf.getId() != null && qfId.equals(qf.getId())) {
                    query.getFields().remove(qf);
                    break;
                }
            }
        }

        if (query.getSpQueryId() == null || saveAs) {
            if (query.getSpQueryId() != null && saveAs) {
                query = cloneTheQuery();
                if (schemaMapping != null) {
                    schemaMapping = cloneTheSchemaMapping(query);
                }
                queryNavBtn.setEnabled(true);
            }

            queryNavBtn = ((QueryTask) task).saveNewQuery(query, schemaMapping, false); // false tells it to
            // disable the
            // navbtn

            query.setNamed(true); //XXX this isn't getting persisted!!!!!!!!!
            //                if (query.getSpQueryId() != null && saveAs)
            //                {
            //                    try 
            //                    {
            //                       this.setupUI();
            //                    } catch (QueryTask.QueryBuilderContextException e) {
            //                       //It can't happen here. 
            //                    }
            //                }   

            SubPaneMgr.getInstance().renamePane(this, query.getName());

            return true;
        }

        if (schemaMapping != null) {
            //result =  DataModelObjBase.saveWithError(true, query, schemaMapping);
            result = DataModelObjBase.saveWithError(true, schemaMapping, query);
        } else {
            result = DataModelObjBase.saveWithError(true, query);
        }
        if (result) {
            DataProviderSessionIFace session = DataProviderFactory.getInstance().createSession();
            try {
                query = session.get(SpQuery.class, query.getId());
                query.forceLoad(true);
                schemaMapping = query.getMapping();
                if (schemaMapping != null) {
                    schemaMapping.forceLoad();
                }
            } finally {
                session.close();
            }
        }
        return result;
    }
    //else
    {
        log.error("No Context selected!");
        return false;
    }
}

From source file:edu.ku.brc.specify.conversion.ConvertVerifier.java

/**
 * //from  w  w w .j a  va  2  s .c o m
 */
private void verifyCEs() {
    newSQL = "SELECT c.CollectingEventID, c.StartTime, l.LocalityName, l.Latitude1, l.Longitude1, g.Name "
            + "FROM collectingevent c LEFT JOIN locality l ON c.LocalityID = l.LocalityID "
            + "LEFT JOIN geography g ON l.GeographyID = g.GeographyID ORDER BY c.CollectingEventID";

    oldSQL = "SELECT c.CollectingEventID, c.StartTime, l.LocalityName, l.Latitude1, l.Longitude1, g.ContinentOrOcean, g.Country, g.State,  g.County, g.IslandGroup, g.Island, g.WaterBody, g.Drainage "
            + "FROM collectingevent c LEFT JOIN locality l ON c.LocalityID = l.LocalityID "
            + "LEFT JOIN geography g ON l.GeographyID = g.GeographyID ORDER BY c.CollectingEventID";

    String newCntSQL = "SELECT count(*) "
            + "FROM collectingevent c LEFT JOIN locality l ON c.LocalityID = l.LocalityID "
            + "LEFT JOIN geography g ON l.GeographyID = g.GeographyID ORDER BY c.CollectingEventID";

    String oldCntSQL = "SELECT count(*) "
            + "FROM collectingevent c LEFT JOIN locality l ON c.LocalityID = l.LocalityID "
            + "LEFT JOIN geography g ON l.GeographyID = g.GeographyID ORDER BY c.CollectingEventID";

    log.info(newCntSQL);
    log.info(oldCntSQL);
    log.info(newSQL);
    log.info(oldSQL);

    Integer oldCnt = BasicSQLUtils.getCount(oldCntSQL);
    Integer newCnt = BasicSQLUtils.getCount(newCntSQL);
    String msg2 = "Record Counts [" + oldCnt + " / " + newCnt + "]";
    log.info(msg2);

    //tblWriter.logErrors("Record Counts", oldCnt + " / "  + newCnt);
    tblWriter.flush();

    try {
        getResultSets(oldSQL, newSQL);
        while (true) {

            boolean hasOldRec = oldDBRS.next();
            boolean hasNewRec = newDBRS.next();

            if (!hasOldRec || !hasNewRec) {
                break;
            }

            int col = 1;
            int newId = newDBRS.getInt(col++);
            Integer newStartTime = newDBRS.getInt(col++);
            String newLocalityName = newDBRS.getString(col++);

            Object bigDecObj = newDBRS.getObject(col);
            BigDecimal newLatitude = bigDecObj == null ? null : newDBRS.getBigDecimal(col);
            col++;

            bigDecObj = newDBRS.getObject(col);
            BigDecimal newLongitude = bigDecObj == null ? null : newDBRS.getBigDecimal(col);
            col++;

            String newGeoName = newDBRS.getString(col++);

            col = 1;
            int oldId = oldDBRS.getInt(col++);
            Integer oldStartTime = oldDBRS.getInt(col++);
            String oldLocalityName = oldDBRS.getString(col++);

            bigDecObj = newDBRS.getObject(col);
            Double oldLatitude = bigDecObj == null ? null : oldDBRS.getDouble(col);
            col++;

            bigDecObj = newDBRS.getObject(col);
            Double oldLongitude = bigDecObj == null ? null : oldDBRS.getDouble(col);
            col++;

            String oldNewIdStr = oldId + " / " + newId;

            if (newGeoName != null && !newGeoName.equals("Undefined")) {
                boolean fnd = false;
                for (int i = 6; i < 14; i++) {
                    //if (i == 7) System.out.println();
                    String name = oldDBRS.getString(i);
                    if (name != null) {
                        //System.out.println("["+name+"]");
                        if (name.equalsIgnoreCase(newGeoName)) {
                            fnd = true;
                            break;
                        }
                    }
                }

                if (!fnd) {
                    String msg = "No match found for new Geo [" + newGeoName + "] [" + oldId + " / " + newId
                            + "]";
                    log.error(msg);
                    tblWriter.logErrors(oldNewIdStr, msg);
                }
            }

            // StartTime
            if (oldStartTime == null && newStartTime != null) {
                String msg = "LocName[" + oldId + " / " + newId + "]  Old StartTime[" + oldStartTime
                        + "] is NULL   New StartTime[" + newStartTime + "] is not";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);

            } else if (oldStartTime != null && newStartTime == null) {
                String msg = "LocName[" + oldId + " / " + newId + "]  Old StartTime[" + oldStartTime
                        + "] is not null   New StartTime[" + newStartTime + "] is NULL";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);

            } else if (oldStartTime != null && newStartTime != null && !oldStartTime.equals(newStartTime)) {
                String msg = "LocName[" + oldId + " / " + newId + "]  Old StartTime[" + oldStartTime
                        + "] is NOT equals   New StartTime[" + newStartTime + "]";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);
            }

            // LocalityName
            if (oldLocalityName == null && newLocalityName != null) {
                String msg = "LocName[" + oldId + " / " + newId + "]  Old LocalityName[" + oldLocalityName
                        + "] is NULL   New LocalityName[" + newLocalityName + "] is not";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);
            } else if (oldLocalityName != null && newLocalityName == null) {
                String msg = "LocName[" + oldId + " / " + newId + "]  Old LocalityName[" + oldLocalityName
                        + "] is not null   New LocalityName[" + newLocalityName + "] is NULL";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);
            } else if (oldLocalityName != null && newLocalityName != null
                    && !oldLocalityName.equals(newLocalityName)) {
                String msg = "LocName[" + oldId + " / " + newId + "]  Old LocalityName[" + oldLocalityName
                        + "] is NOT equals   New LocalityName[" + newLocalityName + "]";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);
            }

            // Latitude
            if (oldLatitude == null && newLatitude != null) {
                String msg = "Latitude[" + oldId + " / " + newId + "]  Old Latitude[" + oldLatitude
                        + "] is NULL   New Latitude[" + newLatitude + "] is not";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);
            } else if (oldLatitude != null && newLatitude == null) {
                String msg = "Latitude[" + oldId + " / " + newId + "]  Old Latitude[" + oldLatitude
                        + "] is not null   New Latitude[" + newLatitude + "] is NULL";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);
            } else if (oldLatitude != null && newLatitude != null
                    && !oldLatitude.equals(newLatitude.doubleValue())) {
                String msg = "Latitude[" + oldId + " / " + newId + "]  Old Latitude[" + oldLatitude
                        + "] is NOT equals   New Latitude[" + newLatitude + "]";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);
            }

            // Longitude
            if (oldLongitude == null && newLongitude != null) {
                String msg = "Longitude[" + oldId + " / " + newId + "]  Old Longitude[" + oldLongitude
                        + "] is NULL   New Longitude[" + newLongitude + "] is not";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);
            } else if (oldLongitude != null && newLongitude == null) {
                String msg = "Longitude[" + oldId + " / " + newId + "]  Old Longitude[" + oldLongitude
                        + "] is not null   New Longitude[" + newLongitude + "] is NULL";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);
            } else if (oldLongitude != null && newLongitude != null
                    && !oldLongitude.equals(newLongitude.doubleValue())) {
                String msg = "Longitude[" + oldId + " / " + newId + "]  Old Longitude[" + oldLongitude
                        + "] is NOT equals   New Longitude[" + newLongitude + "]";
                log.error(msg);
                tblWriter.logErrors(oldNewIdStr, msg);
            }
        }

        oldDBRS.close();
        newDBRS.close();

    } catch (Exception ex) {
        ex.printStackTrace();
    }
}