Example usage for java.lang Long longValue

List of usage examples for java.lang Long longValue

Introduction

In this page you can find the example usage for java.lang Long longValue.

Prototype

@HotSpotIntrinsicCandidate
public long longValue() 

Source Link

Document

Returns the value of this Long as a long value.

Usage

From source file:hoot.services.db.DbUtils.java

public static long insertUser(Connection conn) throws Exception {
    Long newId = (long) -1;
    NumberExpression<Long> expression = NumberTemplate.create(Long.class, "nextval('users_id_seq')");
    Configuration configuration = getConfiguration();

    SQLQuery query = new SQLQuery(conn, configuration);

    List<Long> ids = query.from().list(expression);

    if (ids != null && ids.size() > 0) {
        newId = ids.get(0);/*from  w ww  .  java2  s  . com*/
        QUsers users = QUsers.users;

        new SQLInsertClause(conn, configuration, users).columns(users.id, users.displayName, users.email)
                .values(newId, "user-with-id-" + newId, "user-with-id-" + newId).execute();

    }
    return newId.longValue();
}

From source file:de.saly.elasticsearch.importer.imap.mailsource.ParallelPollingIMAPMailSource.java

@SuppressWarnings({ "rawtypes", "unchecked" })
protected void fetch(final Folder folder) throws MessagingException, IOException {

    if ((folder.getType() & Folder.HOLDS_MESSAGES) == 0) {
        logger.warn("Folder {} cannot hold messages", folder.getFullName());
        return;/*ww w.  ja  v  a 2s .  c o  m*/

    }

    final int messageCount = folder.getMessageCount();

    final UIDFolder uidfolder = (UIDFolder) folder;
    final long servervalidity = uidfolder.getUIDValidity();
    final State riverState = stateManager.getRiverState(folder);
    final Long localvalidity = riverState.getUidValidity();

    logger.info("Fetch mails from folder {} ({})", folder.getURLName().toString(), messageCount);

    logger.debug("Server uid validity: {}, Local uid validity: {}", servervalidity, localvalidity);

    if (localvalidity == null || localvalidity.longValue() != servervalidity) {
        logger.debug("UIDValidity fail, full resync " + localvalidity + "!=" + servervalidity);

        if (localvalidity != null) {
            mailDestination.clearDataForFolder(folder);
        }

        final ProcessResult result = process(messageCount, 1, folder.getFullName());

        riverState.setLastCount(result.getProcessedCount());

        if (result.getProcessedCount() > 0) {
            riverState.setLastIndexed(new Date());
        }

        if (result.getProcessedCount() > 0) {
            riverState.setLastTook(result.getTook());
        }

        riverState.setLastSchedule(new Date());

        if (result.getProcessedCount() > 0 && result.getHighestUid() > 0) {
            riverState.setLastUid(result.getHighestUid());
        }

        riverState.setUidValidity(servervalidity);
        stateManager.setRiverState(riverState);

        logger.info("Initiailly processed {} mails for folder {}", result.getProcessedCount(),
                folder.getFullName());
        logger.debug("Processed result {}", result.toString());

    } else {

        if (messageCount == 0) {
            logger.debug("Folder {} is empty", folder.getFullName());
        } else {

            if (withFlagSync) {
                // detect flag change
                final Message[] flagMessages = folder.getMessages();
                folder.fetch(flagMessages, IMAPUtils.FETCH_PROFILE_FLAGS_UID);

                for (final Message message : flagMessages) {
                    try {

                        final long uid = ((UIDFolder) message.getFolder()).getUID(message);

                        final String id = uid + "::" + message.getFolder().getURLName();

                        final int storedHashcode = mailDestination.getFlaghashcode(id);

                        if (storedHashcode == -1) {
                            // New mail which is not indexed yet
                            continue;
                        }

                        final int flagHashcode = message.getFlags().hashCode();

                        if (flagHashcode != storedHashcode) {
                            // flags change for this message, must update
                            mailDestination.onMessage(message);

                            if (logger.isDebugEnabled()) {
                                logger.debug("Update " + id + " because of flag change");
                            }
                        }
                    } catch (final Exception e) {
                        logger.error("Error detecting flagchanges for message "
                                + ((MimeMessage) message).getMessageID(), e);
                        stateManager.onError("Error detecting flagchanges", message, e);
                    }
                }
            }

            long highestUID = riverState.getLastUid(); // this uid is
                                                       // already
                                                       // processed

            logger.debug("highestUID: {}", highestUID);

            if (highestUID < 1) {
                logger.error("highestUID: {} not valid, set it to 1", highestUID);
                highestUID = 1;
            }

            Message[] msgsnew = uidfolder.getMessagesByUID(highestUID, UIDFolder.LASTUID);

            if (msgsnew.length > 0) {

                System.out.println("lastuid: " + uidfolder.getUID(msgsnew[msgsnew.length - 1]));

                // msgnew.size is always >= 1
                if (highestUID > 1 && uidfolder.getUID(msgsnew[msgsnew.length - 1]) <= highestUID) {
                    msgsnew = (Message[]) ArrayUtils.remove(msgsnew, msgsnew.length - 1);
                }

                if (msgsnew.length > 0) {

                    logger.info("{} new messages in folder {}", msgsnew.length, folder.getFullName());

                    final int start = msgsnew[0].getMessageNumber();

                    final ProcessResult result = process(messageCount, start, folder.getFullName());

                    riverState.setLastCount(result.getProcessedCount());

                    if (result.getProcessedCount() > 0) {
                        riverState.setLastIndexed(new Date());
                    }

                    if (result.getProcessedCount() > 0) {
                        riverState.setLastTook(result.getTook());
                    }

                    riverState.setLastSchedule(new Date());

                    if (result.getProcessedCount() > 0 && result.getHighestUid() > 0) {
                        riverState.setLastUid(result.getHighestUid());
                    }

                    riverState.setUidValidity(servervalidity);
                    stateManager.setRiverState(riverState);

                    logger.info("Not initiailly processed {} mails for folder {}", result.getProcessedCount(),
                            folder.getFullName());
                    logger.debug("Processed result {}", result.toString());
                } else {
                    logger.debug("no new messages");
                }
            } else {
                logger.debug("no new messages");
            }

        }
        // check for expunged/deleted messages
        final Set<Long> serverMailSet = new HashSet<Long>();

        final long oldmailUid = riverState.getLastUid();
        logger.debug("oldmailuid {}", oldmailUid);

        final Message[] msgsold = uidfolder.getMessagesByUID(1, oldmailUid);

        folder.fetch(msgsold, IMAPUtils.FETCH_PROFILE_UID);

        for (final Message m : msgsold) {
            try {
                final long uid = uidfolder.getUID(m);
                serverMailSet.add(uid);

            } catch (final Exception e) {
                stateManager.onError("Unable to handle old message ", m, e);
                logger.error("Unable to handle old message due to {}", e, e.toString());

                IMAPUtils.open(folder);
            }
        }

        if (deleteExpungedMessages) {

            final Set localMailSet = new HashSet(mailDestination.getCurrentlyStoredMessageUids(folder));

            logger.debug("Check now " + localMailSet.size() + " server mails for expunge");

            localMailSet.removeAll(serverMailSet);
            // localMailSet has now the ones that are not on server             

            logger.info(localMailSet.size()
                    + " messages were locally deleted, because they are expunged on server.");

            mailDestination.onMessageDeletes(localMailSet, folder);

        }

    }

}

From source file:cn.code.notes.gtask.remote.GTaskManager.java

private void addLocalNode(Node node) throws NetworkFailureException {
    if (mCancelled) {
        return;//from www .  j av a2 s  . co  m
    }

    SqlNote sqlNote;
    if (node instanceof TaskList) {
        if (node.getName().equals(GTaskStringUtils.MIUI_FOLDER_PREFFIX + GTaskStringUtils.FOLDER_DEFAULT)) {
            sqlNote = new SqlNote(mContext, Notes.ID_ROOT_FOLDER);
        } else if (node.getName()
                .equals(GTaskStringUtils.MIUI_FOLDER_PREFFIX + GTaskStringUtils.FOLDER_CALL_NOTE)) {
            sqlNote = new SqlNote(mContext, Notes.ID_CALL_RECORD_FOLDER);
        } else {
            sqlNote = new SqlNote(mContext);
            sqlNote.setContent(node.getLocalJSONFromContent());
            sqlNote.setParentId(Notes.ID_ROOT_FOLDER);
        }
    } else {
        sqlNote = new SqlNote(mContext);
        JSONObject js = node.getLocalJSONFromContent();
        try {
            if (js.has(GTaskStringUtils.META_HEAD_NOTE)) {
                JSONObject note = js.getJSONObject(GTaskStringUtils.META_HEAD_NOTE);
                if (note.has(NoteColumns.ID)) {
                    long id = note.getLong(NoteColumns.ID);
                    if (DataUtils.existInNoteDatabase(mContentResolver, id)) {
                        // the id is not available, have to create a new one
                        note.remove(NoteColumns.ID);
                    }
                }
            }

            if (js.has(GTaskStringUtils.META_HEAD_DATA)) {
                JSONArray dataArray = js.getJSONArray(GTaskStringUtils.META_HEAD_DATA);
                for (int i = 0; i < dataArray.length(); i++) {
                    JSONObject data = dataArray.getJSONObject(i);
                    if (data.has(DataColumns.ID)) {
                        long dataId = data.getLong(DataColumns.ID);
                        if (DataUtils.existInDataDatabase(mContentResolver, dataId)) {
                            // the data id is not available, have to create
                            // a new one
                            data.remove(DataColumns.ID);
                        }
                    }
                }

            }
        } catch (JSONException e) {
            Log.w(TAG, e.toString());
            e.printStackTrace();
        }
        sqlNote.setContent(js);

        Long parentId = mGidToNid.get(((Task) node).getParent().getGid());
        if (parentId == null) {
            Log.e(TAG, "cannot find task's parent id locally");
            throw new ActionFailureException("cannot add local node");
        }
        sqlNote.setParentId(parentId.longValue());
    }

    // create the local node
    sqlNote.setGtaskId(node.getGid());
    sqlNote.commit(false);

    // update gid-nid mapping
    mGidToNid.put(node.getGid(), sqlNote.getId());
    mNidToGid.put(sqlNote.getId(), node.getGid());

    // update meta
    updateRemoteMeta(node.getGid(), sqlNote);
}

From source file:cn.code.notes.gtask.remote.GTaskManager.java

private void updateLocalNode(Node node, Cursor c) throws NetworkFailureException {
    if (mCancelled) {
        return;/*w  w w.  j a v a  2s.c o m*/
    }

    SqlNote sqlNote;
    // update the note locally
    sqlNote = new SqlNote(mContext, c);
    sqlNote.setContent(node.getLocalJSONFromContent());

    Long parentId = (node instanceof Task) ? mGidToNid.get(((Task) node).getParent().getGid())
            : new Long(Notes.ID_ROOT_FOLDER);
    if (parentId == null) {
        Log.e(TAG, "cannot find task's parent id locally");
        throw new ActionFailureException("cannot update local node");
    }
    sqlNote.setParentId(parentId.longValue());
    sqlNote.commit(true);

    // update meta info
    updateRemoteMeta(node.getGid(), sqlNote);
}

From source file:com.microsoft.tfs.core.clients.workitem.internal.metadata.Metadata.java

private long getCachestamp(final String tableName, final DBConnection connection) {
    if (connection.getDBSpecificOperations().tableExists(MAXCOUNT_TABLE_NAME)) {
        final Long maxCacheStamp = connection.createStatement("select " //$NON-NLS-1$
                + ROW_VERSION_COLUMN_NAME + " from " //$NON-NLS-1$
                + MAXCOUNT_TABLE_NAME + " where " //$NON-NLS-1$
                + TABLE_NAME_COLUMN_NAME + " = '" //$NON-NLS-1$
                + tableName + "'").executeLongQuery(); //$NON-NLS-1$

        /*//  ww  w . ja  v  a 2s .c om
         * If there are no rows in the table, the query will return null
         */

        return (maxCacheStamp != null ? maxCacheStamp.longValue() : 0);
    } else {
        return 0;
    }
}

From source file:com.sshtools.j2ssh.connection.ConnectionProtocol.java

private void onMsgChannelOpen(SshMsgChannelOpen msg) throws IOException {
    synchronized (activeChannels) {
        log.info("Request for " + msg.getChannelType() + " channel recieved");

        // Try to get the channel implementation from the allowed channels
        ChannelFactory cf = (ChannelFactory) allowedChannels.get(msg.getChannelType());
        if (cf == null) {
            sendChannelOpenFailure(msg.getSenderChannelId(), SshMsgChannelOpenFailure.SSH_OPEN_CONNECT_FAILED,
                    "The channel type is not supported", "");
            log.info("Request for channel type " + msg.getChannelType() + " refused");

            return;
        }//  ww w  .j  av  a2  s .  c om

        try {
            log.info("Creating channel " + msg.getChannelType());

            Channel channel = cf.createChannel(msg.getChannelType(), msg.getChannelData());

            // Initialize the channel
            log.info("Initiating channel");

            Long channelId = getChannelId();
            channel.init(this, channelId.longValue(), msg.getSenderChannelId(), msg.getInitialWindowSize(),
                    msg.getMaximumPacketSize());

            activeChannels.put(channelId, channel);

            log.info("Sending channel open confirmation");

            // Send the confirmation message
            sendChannelOpenConfirmation(channel);

            // Open the channel for real
            channel.open();
        } catch (InvalidChannelException ice) {
            log.debug(ice);
            sendChannelOpenFailure(msg.getSenderChannelId(), SshMsgChannelOpenFailure.SSH_OPEN_CONNECT_FAILED,
                    ice.getMessage(), "");
        }
    }
}

From source file:com.inkubator.hrm.service.impl.LoanNewApplicationServiceImpl.java

private String generateLoanNumber() {
    /** generate number form codification, from loan module */
    TransactionCodefication transactionCodefication = transactionCodeficationDao
            .getEntityByModulCode(HRMConstant.LOAN);
    Long currentMaxId = loanNewApplicationDao.getCurrentMaxId();
    currentMaxId = currentMaxId != null ? currentMaxId : 0;
    String nomor = KodefikasiUtil.getKodefikasi(((int) currentMaxId.longValue()),
            transactionCodefication.getCode());
    return nomor;
}

From source file:com.inkubator.hrm.service.impl.LoanNewApplicationServiceImpl.java

private String generateLoanCancelationNumber() {
    /** generate number form codification, from loan module */
    TransactionCodefication transactionCodefication = transactionCodeficationDao
            .getEntityByModulCode(HRMConstant.LOAN_CANCELLATION_KODE);
    Long currentMaxId = LoanNewCancelationDao.getCurrentMaxId();
    currentMaxId = currentMaxId != null ? currentMaxId : 0;
    String nomor = KodefikasiUtil.getKodefikasi(((int) currentMaxId.longValue()),
            transactionCodefication.getCode());
    return nomor;
}

From source file:com.ushahidi.swiftriver.core.api.dao.impl.JpaDropDao.java

/**
 * Populate the rivers_droplets table//from  w  w w .j a  va 2  s  . co m
 * 
 * @param drops
 */
private void insertRiverDrops(final List<Drop> drops) {

    // Get a lock on rivers_droplets
    Sequence seq = sequenceDao.findById("rivers_droplets");

    // Mapping of drop id to list index position
    final Map<Long, Integer> dropIndex = new HashMap<Long, Integer>();

    // List of rivers in a drop
    Map<Long, Set<Long>> dropRiversMap = new HashMap<Long, Set<Long>>();
    Map<Long, Set<Long>> dropChannelsMap = new HashMap<Long, Set<Long>>();

    // Registry for all channels and rivers
    Set<Long> allChannelIds = new HashSet<Long>();

    int i = 0;
    for (Drop drop : drops) {
        if (drop.getRiverIds() == null || drop.getChannelIds() == null) {
            logger.debug("No rivers or channels for drop {}", drop.getId());
            continue;
        }

        Set<Long> rivers = new HashSet<Long>();
        Set<Long> channels = new HashSet<Long>();

        rivers.addAll(drop.getRiverIds());
        channels.addAll(drop.getChannelIds());

        dropRiversMap.put(drop.getId(), rivers);
        dropChannelsMap.put(drop.getId(), channels);

        allChannelIds.addAll(channels);

        dropIndex.put(drop.getId(), i++);
    }

    // No rivers found, exit
    if (dropIndex.size() == 0)
        return;

    // Find already existing rivers_droplets
    String sql = "SELECT droplet_id, river_id FROM rivers_droplets WHERE droplet_id in (:ids)";

    MapSqlParameterSource params = new MapSqlParameterSource();
    params.addValue("ids", dropIndex.keySet());

    List<Map<String, Object>> results = this.namedJdbcTemplate.queryForList(sql, params);

    logger.debug("Skipping {} entries from rivers_droplets", results.size());

    // Remove existing rivers_droplets entries from our Set
    for (Map<String, Object> row : results) {
        Long dropletId = ((Number) row.get("droplet_id")).longValue();
        Long riverId = ((Number) row.get("river_id")).longValue();

        Set<Long> riverSet = dropRiversMap.remove(dropletId);
        if (riverSet != null) {
            riverSet.remove(riverId);

            // Only add back the destination rivers if the set is non empty
            if (!riverSet.isEmpty()) {
                dropRiversMap.put(dropletId, riverSet);
            }
        }
    }

    // If all drops are duplicates, return early
    if (dropRiversMap.isEmpty()) {
        logger.info("No drops to add to the rivers");
        return;
    }

    // Associate the channels with active rivers
    sql = "SELECT rc.id, rc.river_id " + "FROM river_channels rc "
            + "INNER JOIN rivers r ON (rc.river_id = r.id) " + "WHERE rc.id IN (:channelIds) "
            + "AND r.river_active = 1";
    MapSqlParameterSource channelParams = new MapSqlParameterSource();
    channelParams.addValue("channelIds", allChannelIds);

    Map<Long, Long> riverChannelsMap = new HashMap<Long, Long>();
    for (Map<String, Object> row : namedJdbcTemplate.queryForList(sql, channelParams)) {

        Long channelId = ((Number) row.get("id")).longValue();
        Long riverId = ((Number) row.get("river_id")).longValue();

        riverChannelsMap.put(channelId, riverId);
    }

    // Map to hold the association between a drop, river and channel
    // During the association, we verify that the river is in the drop's
    // destination river list
    final List<Map<String, Long>> riverDropChannelList = new ArrayList<Map<String, Long>>();
    Set<RiverDropKey> riverDropKeySet = new HashSet<JpaDropDao.RiverDropKey>();
    for (Long dropletId : dropChannelsMap.keySet()) {
        for (Long channelId : dropChannelsMap.get(dropletId)) {
            if (riverChannelsMap.containsKey(channelId)) {
                Long riverId = riverChannelsMap.get(channelId);

                // Does the river drop key already exist? 
                RiverDropKey riverDropKey = new RiverDropKey(riverId, dropletId);
                if (riverDropKeySet.contains(riverDropKey))
                    continue;

                // Does not exist. Add to the in-memory registry
                riverDropKeySet.add(riverDropKey);

                if (dropRiversMap.containsKey(dropletId) && dropRiversMap.get(dropletId).contains(riverId)) {
                    Map<String, Long> entry = new HashMap<String, Long>();
                    entry.put("dropletId", dropletId);
                    entry.put("channelId", channelId);
                    entry.put("riverId", riverId);
                    riverDropChannelList.add(entry);
                }
            }
        }
    }

    logger.debug("Posting drops to rivers");

    // Insert the remaining items in the set into the DB
    sql = "INSERT INTO `rivers_droplets` (`id`, `droplet_id`, `river_id`, "
            + "`river_channel_id`, `droplet_date_pub`) " + "VALUES (?, ?, ?, ?, ?)";

    final long startKey = sequenceDao.getIds(seq, riverDropChannelList.size());

    // Map to hold to hold the no. of drops created per channel
    final Map<Long, Long> channelDropCountMap = new HashMap<Long, Long>();

    // A map to hold the new max_drop_id and drop_count per river
    final Map<Long, long[]> riverDropsMap = new HashMap<Long, long[]>();
    jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
        public void setValues(PreparedStatement ps, int i) throws SQLException {
            Map<String, Long> dropEntry = riverDropChannelList.get(i);
            long id = startKey + i;

            Long dropletId = dropEntry.get("dropletId");
            Long riverId = dropEntry.get("riverId");
            Long channelId = dropEntry.get("channelId");
            Drop drop = drops.get(dropIndex.get(dropletId));

            ps.setLong(1, id);
            ps.setLong(2, dropletId);
            ps.setLong(3, riverId);
            ps.setLong(4, channelId);
            ps.setTimestamp(5, new java.sql.Timestamp(drop.getDatePublished().getTime()));

            // Get updated max_drop_id and drop_count for the rivers table
            long[] update = riverDropsMap.get(riverId);
            if (update == null) {
                long[] u = { id, 1 };
                riverDropsMap.put(riverId, u);
            } else {
                update[0] = Math.max(update[0], id);
                update[1] = update[1] + 1;
            }

            // Update the drop count for the channel
            Long channelDropCount = channelDropCountMap.remove(channelId);
            channelDropCount = (channelDropCount == null) ? 1L : Long.valueOf(channelDropCount.longValue() + 1);
            channelDropCountMap.put(channelId, channelDropCount);
        }

        public int getBatchSize() {
            return riverDropChannelList.size();
        }
    });
    logger.debug("Drops successfully posted to rivers");

    // Update river max_drop_id and drop_count
    logger.debug("Updating river drop counters");
    sql = "UPDATE rivers SET max_drop_id = ?, drop_count = drop_count + ? WHERE id = ?";
    final List<Entry<Long, long[]>> riverUpdate = new ArrayList<Entry<Long, long[]>>();
    riverUpdate.addAll(riverDropsMap.entrySet());

    this.jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
        public void setValues(PreparedStatement ps, int i) throws SQLException {
            Entry<Long, long[]> entry = riverUpdate.get(i);
            ps.setLong(1, entry.getValue()[0]);
            ps.setLong(2, entry.getValue()[1]);
            ps.setLong(3, entry.getKey());
        }

        public int getBatchSize() {
            return riverUpdate.size();
        }
    });
    logger.debug("{} rivers successfully updated", riverUpdate.size());

    // Update the drop_count in TABLE `river_channels`
    logger.debug("Updating river channel statistics");
    sql = "UPDATE river_channels SET drop_count = drop_count + ? WHERE id = ?";
    final List<Entry<Long, Long>> riverChannelUpdate = new ArrayList<Entry<Long, Long>>();
    riverChannelUpdate.addAll(channelDropCountMap.entrySet());

    this.jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
        public void setValues(PreparedStatement ps, int i) throws SQLException {
            Entry<Long, Long> entry = riverChannelUpdate.get(i);
            ps.setLong(1, entry.getValue());
            ps.setLong(2, entry.getKey());
        }

        @Override
        public int getBatchSize() {
            return riverChannelUpdate.size();
        }
    });
    logger.debug("{} channels updated", riverChannelUpdate.size());

    // Insert the trend data
    logger.debug("Updating trend statistics");
    try {
        insertRiverTagTrends(drops, dropIndex, riverDropChannelList);
    } catch (Exception e) {
        logger.error("An error occurred while inserting the trend data", e);
    }

}

From source file:com.netflix.conductor.dao.dynomite.RedisExecutionDAO.java

@Override
public List<Workflow> getWorkflowsByType(String workflowName, Long startTime, Long endTime) {
    Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
    Preconditions.checkNotNull(startTime, "startTime cannot be null");
    Preconditions.checkNotNull(endTime, "endTime cannot be null");

    List<Workflow> workflows = new LinkedList<Workflow>();

    // Get all date strings between start and end
    List<String> dateStrs = dateStrBetweenDates(startTime, endTime);
    dateStrs.forEach(dateStr -> {/*w  w w . ja va 2  s  .c o m*/
        String key = nsKey(WORKFLOW_DEF_TO_WORKFLOWS, workflowName, dateStr);
        dynoClient.smembers(key).forEach(wfId -> {

            try {

                Workflow wf = getWorkflow(wfId);
                if (wf.getCreateTime().longValue() >= startTime.longValue()
                        && wf.getCreateTime().longValue() <= endTime.longValue()) {
                    workflows.add(wf);
                }

            } catch (Exception e) {
                logger.error(e.getMessage(), e);
            }
        });
    });

    return workflows;
}