List of usage examples for java.sql Timestamp getTime
public long getTime()
From source file:org.dcache.chimera.FsSqlDriver.java
/** * * creates an entry in t_inodes table with initial values. * for optimization, initial value of reference count may be defined. * for newly created files , file size is zero. For directories 512. * * @param id/*from ww w . j a va 2 s.com*/ * @param uid * @param gid * @param mode * @param nlink */ Stat createInode(String id, int type, int uid, int gid, int mode, int nlink, long size) { Timestamp now = new Timestamp(System.currentTimeMillis()); KeyHolder keyHolder = new GeneratedKeyHolder(); _jdbc.update(con -> { PreparedStatement ps = con.prepareStatement( "INSERT INTO t_inodes (ipnfsid,itype,imode,inlink,iuid,igid,isize,iio," + "ictime,iatime,imtime,icrtime,igeneration) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?)", Statement.RETURN_GENERATED_KEYS); ps.setString(1, id); ps.setInt(2, type); ps.setInt(3, mode & UnixPermission.S_PERMS); ps.setInt(4, nlink); ps.setInt(5, uid); ps.setInt(6, gid); ps.setLong(7, size); ps.setInt(8, _ioMode); ps.setTimestamp(9, now); ps.setTimestamp(10, now); ps.setTimestamp(11, now); ps.setTimestamp(12, now); ps.setLong(13, 0); return ps; }, keyHolder); Stat stat = new Stat(); stat.setIno((Long) keyHolder.getKeys().get("inumber")); stat.setId(id); stat.setCrTime(now.getTime()); stat.setGeneration(0); stat.setSize(size); stat.setATime(now.getTime()); stat.setCTime(now.getTime()); stat.setMTime(now.getTime()); stat.setUid(uid); stat.setGid(gid); stat.setMode(mode & UnixPermission.S_PERMS | type); stat.setNlink(nlink); stat.setDev(17); stat.setRdev(13); return stat; }
From source file:org.cloudgraph.rdb.service.GraphDispatcher.java
/** * Attempts to lock the given datastore entity. If the given entity has no * locking data or the entity is already locked by the given user, the lock is * refreshed. Otherwise the lock is overwritten (slammed) if another user has * an expired lock. If another user has a current lock on the given datastore * entity, a LockedEntityException is thrown. * /* ww w . j ava 2s . co m*/ * @param entity * - the datastore entity * @param dataObject * - the value object * @param lockedDateProperty * - the last locked date property definition metadata * @param lockedByNameProperty * - the last locked by name property definition metadata * @param snapshotDate * - the query snapshot date */ private void lock(PlasmaDataObject dataObject, Map<String, PropertyPair> entity, PlasmaProperty lockedDateProperty, PlasmaProperty lockedByNameProperty, Timestamp snapshotDate) throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { if (lockedDateProperty != null && lockedByNameProperty != null) { PropertyPair lockedDatePair = entity.get(lockedDateProperty.getName()); PropertyPair lockedByNamePair = entity.get(lockedByNameProperty.getName()); String lockedByName = (String) lockedByNamePair.getValue(); Date lockedDate = (Date) lockedDatePair.getValue(); CoreHelper.unflagLocked(dataObject); // log.info("flag locked"); if (lockedByName == null || username.equals(lockedByName)) // no // lock // or // same // user { if (log.isDebugEnabled()) { log.debug("locking " + entity.getClass().getSimpleName() + " (" + dataObject.getUUIDAsString() + ")"); } entity.put(lockedByNameProperty.getName(), new PropertyPair(lockedByNameProperty, username)); entity.put(lockedDateProperty.getName(), new PropertyPair(lockedDateProperty, this.snapshotMap.getSnapshotDate())); } else // another user has existing or expired lock { long timeout = 300000L; DataAccessProvider providerConf = PlasmaRuntime.getInstance() .getDataAccessProvider(DataAccessProviderName.JDBC); if (providerConf.getConcurrency() != null) if (providerConf.getConcurrency().getPessimisticLockTimeoutMillis() > 0) timeout = providerConf.getConcurrency().getPessimisticLockTimeoutMillis(); if (snapshotDate.getTime() - lockedDate.getTime() > timeout) // existing // lock // expired { if (log.isDebugEnabled()) { log.debug( "locking " + entity.getClass().getSimpleName() + " (" + dataObject.getUUIDAsString() + ") - existing lock by '" + lockedByName + "' expired"); } entity.put(lockedByNameProperty.getName(), new PropertyPair(lockedByNameProperty, username)); entity.put(lockedDateProperty.getName(), new PropertyPair(lockedDateProperty, this.snapshotMap.getSnapshotDate())); } else { if (log.isWarnEnabled()) { log.warn("could not issue lock for user '" + String.valueOf(username) + "' for snapshot date " + String.valueOf(snapshotDate)); } throw new LockedEntityException(entity.getClass().getSimpleName(), lockedByName, lockedDate); } } } }
From source file:org.cloudgraph.cassandra.service.GraphDispatcher.java
/** * Attempts to lock the given datastore entity. If the given entity has no locking data or * the entity is already locked by the given user, the lock is refreshed. Otherwise the lock is * overwritten (slammed) if another user has an expired lock. If another user has a current * lock on the given datastore entity, a LockedEntityException is thrown. * //w w w. j av a2s.c o m * @param entity - the datastore entity * @param dataObject - the value object * @param lockedDateProperty - the last locked date property definition metadata * @param lockedByNameProperty - the last locked by name property definition metadata * @param snapshotDate - the query snapshot date */ private void lock(PlasmaDataObject dataObject, Map<String, PropertyPair> entity, PlasmaProperty lockedDateProperty, PlasmaProperty lockedByNameProperty, Timestamp snapshotDate) throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { if (lockedDateProperty != null && lockedByNameProperty != null) { PropertyPair lockedDatePair = entity.get(lockedDateProperty.getName()); PropertyPair lockedByNamePair = entity.get(lockedByNameProperty.getName()); String lockedByName = (String) lockedByNamePair.getValue(); Date lockedDate = (Date) lockedDatePair.getValue(); CoreHelper.unflagLocked(dataObject); //log.info("flag locked"); if (lockedByName == null || username.equals(lockedByName)) // no lock or same user { if (log.isDebugEnabled()) { log.debug("locking " + entity.getClass().getSimpleName() + " (" + dataObject.getUUIDAsString() + ")"); } entity.put(lockedByNameProperty.getName(), new PropertyPair(lockedByNameProperty, username)); entity.put(lockedDateProperty.getName(), new PropertyPair(lockedDateProperty, this.snapshotMap.getSnapshotDate())); } else // another user has existing or expired lock { long timeout = 300000L; DataAccessProvider providerConf = PlasmaConfig.getInstance() .getDataAccessProvider(DataAccessProviderName.JDBC); if (providerConf.getConcurrency() != null) if (providerConf.getConcurrency().getPessimisticLockTimeoutMillis() > 0) timeout = providerConf.getConcurrency().getPessimisticLockTimeoutMillis(); if (snapshotDate.getTime() - lockedDate.getTime() > timeout) // existing lock expired { if (log.isDebugEnabled()) { log.debug( "locking " + entity.getClass().getSimpleName() + " (" + dataObject.getUUIDAsString() + ") - existing lock by '" + lockedByName + "' expired"); } entity.put(lockedByNameProperty.getName(), new PropertyPair(lockedByNameProperty, username)); entity.put(lockedDateProperty.getName(), new PropertyPair(lockedDateProperty, this.snapshotMap.getSnapshotDate())); } else { if (log.isWarnEnabled()) { log.warn("could not issue lock for user '" + String.valueOf(username) + "' for snapshot date " + String.valueOf(snapshotDate)); } throw new LockedEntityException(entity.getClass().getSimpleName(), lockedByName, lockedDate); } } } }
From source file:org.kawanfw.sql.jdbc.ResultSetHttp.java
/** * Because ORACLE always format Date as Timestamp, we must test the length * of the string return a java.sql.Date formated from a Timestamp.toString() * or java.sql.Date.toString() input//from w w w .j a va 2 s . c om * * @param s * the Timestamp.toString() or java.sql.Date.toString() input * @return the java.sql.Date corresponding to the Timestamp.toString() or * java.sql.Date.toString() input */ private java.sql.Date timestampOrDateValueOf(String s) { // Because ORACLE always format Date as Timestamp, we must test the // length of the string if (s.length() > "yyyy-mm-dd".length()) { Timestamp ts = Timestamp.valueOf(s); java.sql.Date date = new java.sql.Date(ts.getTime()); return date; } else { return java.sql.Date.valueOf(s); } }
From source file:org.apache.geode.internal.InternalDataSerializer.java
private static void writeTimestamp(Timestamp o, DataOutput out) throws IOException { InternalDataSerializer.checkOut(out); if (logger.isTraceEnabled(LogMarker.SERIALIZER)) { logger.trace(LogMarker.SERIALIZER, "Writing Timestamp: {}", o); }/* w ww. j ava 2s. c om*/ DataSerializer.writePrimitiveLong(o.getTime(), out); }
From source file:org.isatools.isatab_v1.ISATABPersistenceTest.java
@SuppressWarnings("static-access") @Test/*w w w . j a v a2 s.c om*/ public void testPersistence() throws Exception { out.println("\n\n_______________________ ISATAB Persistence Test _______________________\n\n"); String baseDir = System.getProperty("basedir"); String filesPath = baseDir + "/target/test-classes/test-data/isatab/isatab_v1_200810/griffin_gauguier_200810"; ISATABLoader loader = new ISATABLoader(filesPath); FormatSetInstance isatabInstance = loader.load(); BIIObjectStore store = new BIIObjectStore(); ISATABMapper isatabMapper = new ISATABMapper(store, isatabInstance); isatabMapper.map(); assertTrue("Oh no! No mapped object! ", store.size() > 0); DotGraphGenerator dotter = new DotGraphGenerator(store.values(Processing.class)); String dotPath = filesPath + "/graph.dot"; // WILL NEVER WORK WITH THIS CAUSE IT ASSIGNS IDs!!! // dotter.createGraph ( dotPath ); // out.println ( "Graph saved into " + dotPath ); out.println("\n_____________ Persisting the objects:\n" + isatabMapper.report(store)); // Test the repository too String repoPath = baseDir + "/target/bii_test_repo/meta_data"; // File repoDir = new File ( repoPath ); // if ( !repoDir.exists () ) // FileUtils.forceMkdir ( repoDir ); ISATABPersister persister = new ISATABPersister(store, DaoFactory.getInstance(entityManager)); Timestamp ts = persister.persist(filesPath); transaction.commit(); // TODO: close sesssion, retrieve objects from DB, check they correspond to the submission Study study2 = store.getType(Study.class, "S:GG200810:2"); String study2FileName = "study_" + DataLocationManager.getObfuscatedStudyFileName(study2); String submissionRepoPath2 = repoPath + "/" + study2FileName; assertTrue("Oh no! Submission directory not created in the submission repo: " + submissionRepoPath2 + "!", new File(submissionRepoPath2).exists()); assertTrue("Oh no! Submission file investigation.csv didn't go to the submission repository " + submissionRepoPath2 + "!", new File(submissionRepoPath2 + "/investigation.csv").exists()); assertTrue( "Oh no! Submission file s-Study-Griffin.txt didn't go to the submission repository " + submissionRepoPath2 + "!", new File(submissionRepoPath2 + "/s-Study-Griffin.txt").exists()); Study study1 = store.getType(Study.class, "S:GG200810:1"); String study1FileName = "study_" + DataLocationManager.getObfuscatedStudyFileName(study1); String submissionRepoPath1 = repoPath + "/" + study1FileName; assertTrue("Oh no! Submission file a-S1.A3.txt didn't go to the submission repository " + submissionRepoPath1 + "!", new File(submissionRepoPath1 + "/a-S1.A3.txt").exists()); String medaRepoPath = baseDir + "/target/bii_test_repo/meda", nonMedaRepoPath = baseDir + "/target/bii_test_repo/generic"; assertTrue("Oh no! MEDA file repo wasn't created: " + medaRepoPath + "!", new File(medaRepoPath).exists()); assertTrue("Oh no! non-MEDA file repo wasn't created: " + nonMedaRepoPath + "!", new File(nonMedaRepoPath).exists()); assertTrue("Oh no! non-MEDA file clinchem.txt didn't go to its repository " + nonMedaRepoPath, new File(nonMedaRepoPath + "/" + study2FileName + "/raw_data/clinchem.txt").exists()); boolean hasSomeAnn = false; for (Study study : store.valuesOfType(Study.class)) { for (Assay assay : study.getAssays()) { for (Xref xref : assay.getXrefs()) { ReferenceSource xsrc = xref.getSource(); if (StringUtils.contains(xsrc.getDescription(), "Data Files Repository")) { hasSomeAnn = true; break; } } } } assertTrue("Ops! I didn't find any assay annotation about their linked data files!", hasSomeAnn); out.println("\n\n\n\n________________ Done, Submission TS: " + ts.getTime() + " (" + ts + " + " + ts.getNanos() + "ns)"); out.println(" Results:\n" + store.toStringVerbose()); out.println("\n\n___________________ /end: ISATAB Persistence Test ___________________\n\n"); }
From source file:org.finra.dm.dao.impl.DmDaoImpl.java
private Timestamp subtractMinutes(Timestamp timestamp, int minutes) { long current = timestamp.getTime(); long result = current - minutes * 60L * 1000L; return new Timestamp(result); }
From source file:com.mirth.connect.donkey.server.data.jdbc.JdbcDao.java
private ConnectorMessage getConnectorMessageFromResultSet(String channelId, ResultSet resultSet, boolean includeContent, boolean includeMetaDataMap) { try {//from ww w .ja v a 2 s . c om ConnectorMessage connectorMessage = new ConnectorMessage(); long messageId = resultSet.getLong("message_id"); int metaDataId = resultSet.getInt("id"); Calendar receivedDate = Calendar.getInstance(); receivedDate.setTimeInMillis(resultSet.getTimestamp("received_date").getTime()); Calendar sendDate = null; Timestamp sendDateTimestamp = resultSet.getTimestamp("send_date"); if (sendDateTimestamp != null) { sendDate = Calendar.getInstance(); sendDate.setTimeInMillis(sendDateTimestamp.getTime()); } Calendar responseDate = null; Timestamp responseDateTimestamp = resultSet.getTimestamp("response_date"); if (responseDateTimestamp != null) { responseDate = Calendar.getInstance(); responseDate.setTimeInMillis(responseDateTimestamp.getTime()); } connectorMessage.setChannelName(getDeployedChannelName(channelId)); connectorMessage.setMessageId(messageId); connectorMessage.setMetaDataId(metaDataId); connectorMessage.setChannelId(channelId); connectorMessage.setServerId(resultSet.getString("server_id")); connectorMessage.setConnectorName(resultSet.getString("connector_name")); connectorMessage.setReceivedDate(receivedDate); connectorMessage.setStatus(Status.fromChar(resultSet.getString("status").charAt(0))); connectorMessage.setSendAttempts(resultSet.getInt("send_attempts")); connectorMessage.setSendDate(sendDate); connectorMessage.setResponseDate(responseDate); connectorMessage.setErrorCode(resultSet.getInt("error_code")); connectorMessage.setChainId(resultSet.getInt("chain_id")); connectorMessage.setOrderId(resultSet.getInt("order_id")); if (includeContent) { if (metaDataId > 0) { // For destination connectors, retrieve and load any content that is stored on the source connector loadMessageContent(connectorMessage, getDestinationMessageContentFromSource(channelId, messageId, metaDataId)); } // Retrive all content for the connector and load it into the connector message loadMessageContent(connectorMessage, getMessageContent(channelId, messageId, metaDataId)); } if (includeMetaDataMap) { connectorMessage.setMetaDataMap(getMetaDataMap(channelId, messageId, metaDataId)); } return connectorMessage; } catch (SQLException e) { throw new DonkeyDaoException(e); } }
From source file:com.mirth.connect.donkey.server.data.jdbc.JdbcDao.java
private Map<String, Object> getMetaDataMap(String channelId, long messageId, int metaDataId) { PreparedStatement statement = null; ResultSet resultSet = null;/* ww w .j a v a2 s . c o m*/ try { Map<String, Object> values = new HashMap<String, Object>(); values.put("localChannelId", getLocalChannelId(channelId)); // do not cache this statement since metadata columns may be added/removed statement = connection.prepareStatement(querySource.getQuery("getMetaDataMap", values)); statement.setLong(1, messageId); statement.setInt(2, metaDataId); Map<String, Object> metaDataMap = new HashMap<String, Object>(); resultSet = statement.executeQuery(); if (resultSet.next()) { ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); int columnCount = resultSetMetaData.getColumnCount(); for (int i = 1; i <= columnCount; i++) { MetaDataColumnType metaDataColumnType = MetaDataColumnType .fromSqlType(resultSetMetaData.getColumnType(i)); Object value = null; switch (metaDataColumnType) {//@formatter:off case STRING: value = resultSet.getString(i); break; case NUMBER: value = resultSet.getBigDecimal(i); break; case BOOLEAN: value = resultSet.getBoolean(i); break; case TIMESTAMP: Timestamp timestamp = resultSet.getTimestamp(i); if (timestamp != null) { value = Calendar.getInstance(); ((Calendar) value).setTimeInMillis(timestamp.getTime()); } break; default: throw new Exception("Unrecognized MetaDataColumnType"); } //@formatter:on metaDataMap.put(resultSetMetaData.getColumnName(i).toUpperCase(), value); } } return metaDataMap; } catch (Exception e) { throw new DonkeyDaoException(e); } finally { close(resultSet); close(statement); } }
From source file:com.mirth.connect.donkey.server.data.jdbc.JdbcDao.java
private Map<Long, Map<Integer, Map<String, Object>>> getMetaDataMaps(String channelId, List<Long> messageIds) { if (messageIds.size() > 1000) { throw new DonkeyDaoException("Only up to 1000 message Ids at a time are supported."); }//from w w w . j a v a2s . c om Map<Long, Map<Integer, Map<String, Object>>> metaDataMaps = new HashMap<Long, Map<Integer, Map<String, Object>>>(); PreparedStatement statement = null; ResultSet resultSet = null; try { Map<String, Object> values = new HashMap<String, Object>(); values.put("localChannelId", getLocalChannelId(channelId)); values.put("messageIds", StringUtils.join(messageIds, ",")); // do not cache this statement since metadata columns may be added/removed statement = connection.prepareStatement(querySource.getQuery("getMetaDataMapByMessageId", values)); resultSet = statement.executeQuery(); while (resultSet.next()) { Long messageId = resultSet.getLong("message_id"); Integer metaDataId = resultSet.getInt("metadata_id"); Map<Integer, Map<String, Object>> connectorMetaDataMap = metaDataMaps.get(messageId); if (connectorMetaDataMap == null) { connectorMetaDataMap = new HashMap<Integer, Map<String, Object>>(); metaDataMaps.put(messageId, connectorMetaDataMap); } Map<String, Object> metaDataMap = connectorMetaDataMap.get(metaDataId); if (metaDataMap == null) { metaDataMap = new HashMap<String, Object>(); connectorMetaDataMap.put(metaDataId, metaDataMap); } ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); int columnCount = resultSetMetaData.getColumnCount(); for (int i = 1; i <= columnCount; i++) { MetaDataColumnType metaDataColumnType = MetaDataColumnType .fromSqlType(resultSetMetaData.getColumnType(i)); Object value = null; switch (metaDataColumnType) {//@formatter:off case STRING: value = resultSet.getString(i); break; case NUMBER: value = resultSet.getBigDecimal(i); break; case BOOLEAN: value = resultSet.getBoolean(i); break; case TIMESTAMP: Timestamp timestamp = resultSet.getTimestamp(i); if (timestamp != null) { value = Calendar.getInstance(); ((Calendar) value).setTimeInMillis(timestamp.getTime()); } break; default: throw new Exception("Unrecognized MetaDataColumnType"); } //@formatter:on metaDataMap.put(resultSetMetaData.getColumnName(i).toUpperCase(), value); } } return metaDataMaps; } catch (Exception e) { throw new DonkeyDaoException(e); } finally { close(resultSet); close(statement); } }