Example usage for java.sql Timestamp getNanos

List of usage examples for java.sql Timestamp getNanos

Introduction

In this page you can find the example usage for java.sql Timestamp getNanos.

Prototype

public int getNanos() 

Source Link

Document

Gets this Timestamp object's nanos value.

Usage

From source file:com.twineworks.kettle.ruby.step.execmodels.SimpleExecutionModel.java

public RubyHash createRubyInputRow(RowMetaInterface rowMeta, Object[] r) throws KettleException {

    // create a hash for the row, they are not reused on purpose, so the scripting user can safely use them to store entire rows between invocations
    RubyHash rubyRow = new RubyHash(data.runtime);

    String[] fieldNames = rowMeta.getFieldNames();
    for (int i = 0; i < fieldNames.length; i++) {

        String field = fieldNames[i];
        // null values don't need no special treatment, they'll become nil
        if (r[i] == null) {
            rubyRow.put(field, null);/* w w w.j  a va  2s  .  c om*/
        } else {

            ValueMetaInterface vm = rowMeta.getValueMeta(i);

            switch (vm.getType()) {
            case ValueMetaInterface.TYPE_BOOLEAN:
                rubyRow.put(field, vm.getBoolean(r[i]));
                break;
            case ValueMetaInterface.TYPE_INTEGER:
                rubyRow.put(field, vm.getInteger(r[i]));
                break;
            case ValueMetaInterface.TYPE_STRING:
                rubyRow.put(field, vm.getString(r[i]));
                break;
            case ValueMetaInterface.TYPE_NUMBER:
                rubyRow.put(field, vm.getNumber(r[i]));
                break;
            case ValueMetaInterface.TYPE_NONE:
                rubyRow.put(field, r[i]);
                break;
            case ValueMetaInterface.TYPE_SERIALIZABLE:
                if (r[i] instanceof RubyStepMarshalledObject) {
                    Object restoredObject = getMarshal().callMethod(data.runtime.getCurrentContext(), "restore",
                            data.runtime.newString(r[i].toString()));
                    rubyRow.put(field, restoredObject);
                } else {
                    // try to put the object in there as it is.. should create a nice adapter for the java object
                    rubyRow.put(field, r[i]);
                }
                break;
            case ValueMetaInterface.TYPE_BINARY:
                // put a ruby array with bytes in there, that is expensive and should probably be avoided
                rubyRow.put(fieldNames[i],
                        data.runtime.newArrayNoCopy(JavaUtil.convertJavaArrayToRuby(data.runtime,
                                ArrayUtils.toObject((byte[]) vm.getBinary(r[i])))));

                break;

            case ValueMetaInterface.TYPE_BIGNUMBER:
                IRubyObject bigDecimalObject = getBigDecimal().callMethod(data.runtime.getCurrentContext(),
                        "new", data.runtime.newString((vm.getBigNumber(r[i])).toString()));
                rubyRow.put(field, bigDecimalObject);
                break;

            case ValueMetaInterface.TYPE_DATE:
                rubyRow.put(field, data.runtime.newTime((vm.getDate(r[i])).getTime()));
                break;

            case ValueMetaInterface.TYPE_TIMESTAMP:
                ValueMetaTimestamp vmTimestamp = (ValueMetaTimestamp) vm;
                Timestamp ts = vmTimestamp.getTimestamp(r[i]);
                RubyTime rubyTime = data.runtime.newTime(ts.getTime() / 1000 * 1000);
                rubyTime.setNSec(ts.getNanos());
                rubyRow.put(field, rubyTime);
                break;

            case ValueMetaInterface.TYPE_INET:
                ValueMetaInternetAddress vmInet = (ValueMetaInternetAddress) vm;
                InetAddress ip = vmInet.getInternetAddress(r[i]);
                IRubyObject ipObject = getIPAddr().callMethod(data.runtime.getCurrentContext(), "new",
                        data.runtime.newString(ip.getHostAddress()));
                rubyRow.put(field, ipObject);
                break;
            }

        }

    }

    return rubyRow;

}

From source file:TypeConversionHelper.java

/**
 * Formats a timestamp in JDBC timestamp escape format using the timezone
 * of the passed Calendar./*www  .java  2s.c om*/
 * @param ts The timestamp to be formatted.
 * @param cal The Calendar
 * @return  A String in <tt>yyyy-mm-dd hh:mm:ss.fffffffff</tt> format.
 * @see java.sql.Timestamp
 */
public static String timestampToString(Timestamp ts, Calendar cal) {
    cal.setTime(ts);

    int year = cal.get(Calendar.YEAR);
    int month = cal.get(Calendar.MONTH) + 1; // Months are zero based in Calendar
    int day = cal.get(Calendar.DATE);
    int hour = cal.get(Calendar.HOUR_OF_DAY);
    int minute = cal.get(Calendar.MINUTE);
    int second = cal.get(Calendar.SECOND);

    String yearString = Integer.toString(year);
    String monthString = month < 10 ? "0" + month : Integer.toString(month);
    String dayString = day < 10 ? "0" + day : Integer.toString(day);
    String hourString = hour < 10 ? "0" + hour : Integer.toString(hour);
    String minuteString = minute < 10 ? "0" + minute : Integer.toString(minute);
    String secondString = second < 10 ? "0" + second : Integer.toString(second);
    String nanosString = Integer.toString(ts.getNanos());

    if (ts.getNanos() != 0) {
        // Add leading zeroes
        nanosString = ZEROES.substring(0, ZEROES.length() - nanosString.length()) + nanosString;

        // Truncate trailing zeroes
        int truncIndex = nanosString.length() - 1;
        while (nanosString.charAt(truncIndex) == '0') {
            --truncIndex;
        }

        nanosString = nanosString.substring(0, truncIndex + 1);
    }

    return (yearString + "-" + monthString + "-" + dayString + " " + hourString + ":" + minuteString + ":"
            + secondString + "." + nanosString);
}

From source file:eu.trentorise.opendata.jackan.CkanClient.java

/**
 * Formats a timestamp according to {@link #CKAN_TIMESTAMP_PATTERN}, with
 * precision up to microseconds.//  ww w .  j  a va 2s.c  o  m
 *
 * @see #parseTimestamp(java.lang.String) for the inverse process.
 * @since 0.4.1
 */
@Nullable
public static String formatTimestamp(Timestamp timestamp) {
    if (timestamp == null) {
        throw new IllegalArgumentException("Found null timestamp!");
    }
    Timestamp ret = Timestamp.valueOf(timestamp.toString());
    ret.setNanos((timestamp.getNanos() / 1000) * 1000);
    return Strings.padEnd(ret.toString().replace(" ", "T"), "1970-01-01T01:00:00.000001".length(), '0');
}

From source file:com.antsdb.saltedfish.server.mysql.PacketEncoder.java

/**
  * // w  ww  .j a  va 2 s.  c om
  * From server to client. One packet for each row in the result set.
  * 
  * <pre>
  * Bytes                   Name
  * -----                   ----
  * n (Length Coded String) (column value)
  * ...
  * 
  * (column value):         The data in the column, as a character string.
  *                         If a column is defined as non-character, the
  *                         server converts the value into a character
  *                         before sending it. Since the value is a Length
  *                         Coded String, a NULL can be represented with a
  *                         single byte containing 251(see the description
  *                         of Length Coded Strings in section "Elements" above).
  * 
  * @see http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol#Row_Data_Packet
  * </pre>
  * 
  * @param buffer
  * @param nColumns 
  * @param rowRec
  */
public void writeRowTextBody(ByteBuf buffer, long pRecord, int nColumns) {
    for (int i = 0; i < nColumns; i++) {
        Object fv = Record.getValue(pRecord, i);
        if (fv instanceof Boolean) {
            // mysql has no boolean it is actually tinyint
            fv = ((Boolean) fv) ? 1 : 0;
        }
        if (fv == null) {
            // null mark is 251
            buffer.writeByte((byte) 251);
        } else if (fv instanceof Duration) {
            Duration t = (Duration) fv;
            String text = DurationFormatUtils.formatDuration(t.toMillis(), "HH:mm:ss");
            BufferUtils.writeLenString(buffer, text, this.cs);
        } else if (fv instanceof Timestamp) {
            // @see ResultSetRow#getDateFast, mysql jdbc driver only take precision 19,21,29 if callers wants
            // to get a Date from a datetime column
            Timestamp ts = (Timestamp) fv;
            if (ts.getTime() == Long.MIN_VALUE) {
                // mysql '0000-00-00 00:00:00' is treated as null in jdbc
                buffer.writeByte((byte) 251);
            } else {
                String text;
                if (ts.getNanos() == 0) {
                    text = TIMESTAMP19_FORMAT.format(ts);
                } else {
                    text = TIMESTAMP29_FORMAT.format(ts);
                }
                BufferUtils.writeLenString(buffer, text, this.cs);
            }
        } else if (fv instanceof byte[]) {
            BufferUtils.writeWithLength(buffer, (byte[]) fv);
        } else if ((fv instanceof Date) && (((Date) fv).getTime() == Long.MIN_VALUE)) {
            // mysql '0000-00-00' is treated as null in jdbc
            buffer.writeByte((byte) 251);
        } else {
            String val = fv.toString();
            if (val.length() == 0) {
                // empty mark is 0
                buffer.writeByte((byte) 0);
            } else {
                BufferUtils.writeLenString(buffer, val, this.cs);
            }
        }
    }
}

From source file:DateUtils.java

/**
 * Copy constructor which will create exact copy of the timestamp including
 * the nanosecond portion.//w ww  . jav  a2s. c  o  m
 * 
 * @param original - original timestamp to copy
 */
public TimestampCopy(Timestamp original) {
    // Pass the time portion here
    super(original.getTime());
    // And now set the correct nanoseconds since it is required.
    setNanos(original.getNanos());
}

From source file:org.apache.taverna.prov.W3ProvenanceExport.java

protected Literal timestampToLiteral(Timestamp timestamp) {
    if (timestamp == null) {
        return null;
    }/*from ww  w  . j a v a2 s.c o  m*/
    GregorianCalendar cal = new GregorianCalendar();
    cal.setTime(timestamp);
    XMLGregorianCalendar xmlCal = datatypeFactory.newXMLGregorianCalendar(cal);
    // Chop of the trailing 0-s of non-precission
    xmlCal.setFractionalSecond(BigDecimal.valueOf(timestamp.getNanos() / 1000000, NANOSCALE - 6));
    return provModel.model.createTypedLiteral(xmlCal.toXMLFormat(), XSDDatatype.XSDdateTime);
}

From source file:com.streamsets.pipeline.lib.jdbc.JdbcUtil.java

public Field resultToField(ResultSetMetaData md, ResultSet rs, int columnIndex, int maxClobSize,
        int maxBlobSize, DataType userSpecifiedType, UnknownTypeAction unknownTypeAction,
        boolean timestampToString) throws SQLException, IOException, StageException {
    Field field;/*from www.  ja  va2s .c o  m*/
    if (userSpecifiedType != DataType.USE_COLUMN_TYPE) {
        // If user specifies the data type, overwrite the column type returned by database.
        field = Field.create(Field.Type.valueOf(userSpecifiedType.getLabel()), rs.getObject(columnIndex));
    } else {
        // All types as of JDBC 2.0 are here:
        // https://docs.oracle.com/javase/8/docs/api/constant-values.html#java.sql.Types.ARRAY
        // Good source of recommended mappings is here:
        // http://www.cs.mun.ca/java-api-1.5/guide/jdbc/getstart/mapping.html
        switch (md.getColumnType(columnIndex)) {
        case Types.BIGINT:
            field = Field.create(Field.Type.LONG, rs.getObject(columnIndex));
            break;
        case Types.BINARY:
        case Types.LONGVARBINARY:
        case Types.VARBINARY:
            field = Field.create(Field.Type.BYTE_ARRAY, rs.getBytes(columnIndex));
            break;
        case Types.BIT:
        case Types.BOOLEAN:
            field = Field.create(Field.Type.BOOLEAN, rs.getObject(columnIndex));
            break;
        case Types.CHAR:
        case Types.LONGNVARCHAR:
        case Types.LONGVARCHAR:
        case Types.NCHAR:
        case Types.NVARCHAR:
        case Types.VARCHAR:
            field = Field.create(Field.Type.STRING, rs.getObject(columnIndex));
            break;
        case Types.CLOB:
        case Types.NCLOB:
            field = Field.create(Field.Type.STRING, getClobString(rs.getClob(columnIndex), maxClobSize));
            break;
        case Types.BLOB:
            field = Field.create(Field.Type.BYTE_ARRAY, getBlobBytes(rs.getBlob(columnIndex), maxBlobSize));
            break;
        case Types.DATE:
            field = Field.create(Field.Type.DATE, rs.getDate(columnIndex));
            break;
        case Types.DECIMAL:
        case Types.NUMERIC:
            field = Field.create(Field.Type.DECIMAL, rs.getBigDecimal(columnIndex));
            field.setAttribute(HeaderAttributeConstants.ATTR_SCALE,
                    String.valueOf(rs.getMetaData().getScale(columnIndex)));
            field.setAttribute(HeaderAttributeConstants.ATTR_PRECISION,
                    String.valueOf(rs.getMetaData().getPrecision(columnIndex)));
            break;
        case Types.DOUBLE:
            field = Field.create(Field.Type.DOUBLE, rs.getObject(columnIndex));
            break;
        case Types.FLOAT:
        case Types.REAL:
            field = Field.create(Field.Type.FLOAT, rs.getObject(columnIndex));
            break;
        case Types.INTEGER:
            field = Field.create(Field.Type.INTEGER, rs.getObject(columnIndex));
            break;
        case Types.ROWID:
            field = Field.create(Field.Type.STRING, rs.getRowId(columnIndex).toString());
            break;
        case Types.SMALLINT:
        case Types.TINYINT:
            field = Field.create(Field.Type.SHORT, rs.getObject(columnIndex));
            break;
        case Types.TIME:
            field = Field.create(Field.Type.TIME, rs.getObject(columnIndex));
            break;
        case Types.TIMESTAMP:
            final Timestamp timestamp = rs.getTimestamp(columnIndex);
            if (timestampToString) {
                field = Field.create(Field.Type.STRING, timestamp == null ? null : timestamp.toString());
            } else {
                field = Field.create(Field.Type.DATETIME, timestamp);
                if (timestamp != null) {
                    final long actualNanos = timestamp.getNanos() % NANOS_TO_MILLIS_ADJUSTMENT;
                    if (actualNanos > 0) {
                        field.setAttribute(FIELD_ATTRIBUTE_NANOSECONDS, String.valueOf(actualNanos));
                    }
                }
            }
            break;
        // Ugly hack until we can support LocalTime, LocalDate, LocalDateTime, etc.
        case Types.TIME_WITH_TIMEZONE:
            OffsetTime offsetTime = rs.getObject(columnIndex, OffsetTime.class);
            field = Field.create(Field.Type.TIME, Date.from(offsetTime.atDate(LocalDate.MIN).toInstant()));
            break;
        case Types.TIMESTAMP_WITH_TIMEZONE:
            OffsetDateTime offsetDateTime = rs.getObject(columnIndex, OffsetDateTime.class);
            field = Field.create(Field.Type.ZONED_DATETIME, offsetDateTime.toZonedDateTime());
            break;
        //case Types.REF_CURSOR: // JDK8 only
        case Types.SQLXML:
        case Types.STRUCT:
        case Types.ARRAY:
        case Types.DATALINK:
        case Types.DISTINCT:
        case Types.JAVA_OBJECT:
        case Types.NULL:
        case Types.OTHER:
        case Types.REF:
        default:
            if (unknownTypeAction == null) {
                return null;
            }
            switch (unknownTypeAction) {
            case STOP_PIPELINE:
                throw new StageException(JdbcErrors.JDBC_37, md.getColumnType(columnIndex),
                        md.getColumnLabel(columnIndex));
            case CONVERT_TO_STRING:
                Object value = rs.getObject(columnIndex);
                if (value != null) {
                    field = Field.create(Field.Type.STRING, rs.getObject(columnIndex).toString());
                } else {
                    field = Field.create(Field.Type.STRING, null);
                }
                break;
            default:
                throw new IllegalStateException("Unknown action: " + unknownTypeAction);
            }
        }
    }

    return field;
}

From source file:org.epics.archiverappliance.retrieval.channelarchiver.ChannelArchiverReadOnlyPlugin.java

private List<Callable<EventStream>> getDataForPV(BasicContext context, String pvName, Timestamp startTime,
        Timestamp endTime, int archiveKey, PostProcessor postProcessor) throws IOException {
    try {/*w w w . j av a  2  s . c om*/
        // TODO the only thing that seems to get similar charts in ArchiveViewer for production data is using plot-binning.
        // This is hardcoded somewhere in the Data server or the ArchiveViewer code....
        // Need to figure out where it and and how to address it.
        String howStr = "3";
        String pvNameForCall = pvName;
        if (context.getPvNameFromRequest() != null) {
            logger.info("Using pvName from request " + context.getPvNameFromRequest()
                    + " when making a call to the ChannelArchiver for pv " + pvName);
            pvNameForCall = context.getPvNameFromRequest();
        }

        String archiveValuesStr = new String("<?xml version=\"1.0\"?>\n" + "<methodCall>\n"
                + "<methodName>archiver.values</methodName>\n" + "<params>\n" + "<param><value><i4>"
                + archiveKey + "</i4></value></param>\n" + "<param><value><array><data><value><string>"
                + pvNameForCall + "</string></value></data></array></value></param>\n" + "<param><value><i4>"
                + TimeUtils.convertToEpochSeconds(startTime) + "</i4></value></param>\n" + "<param><value><i4>"
                + startTime.getNanos() + "</i4></value></param>\n" + "<param><value><i4>"
                + TimeUtils.convertToEpochSeconds(endTime) + "</i4></value></param>\n" + "<param><value><i4>"
                + endTime.getNanos() + "</i4></value></param>\n" + "<param><value><i4>" + valuesRequested
                + "</i4></value></param>\n" + "<param><value><i4>" + howStr + "</i4></value></param>\n"
                + "</params>\n" + "</methodCall>\n");
        URI serverURI = new URI(serverURL);
        if (serverURI.getScheme().equals("file")) {
            logger.info("Using a file provider for Channel Archiver data - this better be a unit test.");
            // We use the file scheme for unit testing... Yeah, the extensions are hardcoded...
            InputStream is = new BufferedInputStream(
                    new FileInputStream(new File(serverURI.getPath() + File.separator + pvName + ".xml")));
            // ArchiverValuesHandler takes over the burden of closing the input stream.
            ArchiverValuesHandler handler = new ArchiverValuesHandler(pvName, is,
                    serverURL.toString() + "\n" + archiveValuesStr, context.getRetrievalExpectedDBRType());
            if (postProcessor != null) {
                return CallableEventStream.makeOneStreamCallableList(handler, postProcessor, true);
            } else {
                return CallableEventStream.makeOneStreamCallableList(handler);
            }
        } else {
            StringEntity archiverValues = new StringEntity(archiveValuesStr, ContentType.APPLICATION_XML);
            if (logger.isDebugEnabled()) {
                logger.debug(getDescription() + " making call to channel archiver with " + archiveValuesStr);
            }

            CloseableHttpClient httpclient = HttpClients.createDefault();
            HttpPost postMethod = new HttpPost(serverURL);
            postMethod.addHeader("Content-Type", "text/xml");
            postMethod.setEntity(archiverValues);
            if (logger.isDebugEnabled()) {
                logger.debug("About to make a POST with " + archiveValuesStr);
            }
            HttpResponse response = httpclient.execute(postMethod);
            int statusCode = response.getStatusLine().getStatusCode();
            if (statusCode >= 200 && statusCode <= 206) {
                HttpEntity entity = response.getEntity();
                if (entity != null) {
                    logger.debug("Obtained a HTTP entity of length " + entity.getContentLength());
                    // ArchiverValuesHandler takes over the burden of closing the input stream.
                    InputStream is = entity.getContent();
                    ArchiverValuesHandler handler = new ArchiverValuesHandler(pvName, is,
                            serverURL.toString() + "\n" + archiveValuesStr,
                            context.getRetrievalExpectedDBRType());
                    if (postProcessor != null) {
                        return CallableEventStream.makeOneStreamCallableList(handler, postProcessor, true);
                    } else {
                        return CallableEventStream.makeOneStreamCallableList(handler);
                    }
                } else {
                    throw new IOException("HTTP response did not have an entity associated with it");
                }
            } else {
                logger.error("Got an invalid status code " + statusCode + " from the server " + serverURL
                        + " for PV " + pvName + " so returning null");
                return null;
            }
        }
    } catch (UnsupportedEncodingException ex) {
        throw new IOException("Exception making call to Channel Archiver", ex);
    } catch (URISyntaxException e) {
        throw new IOException("Invalid URL " + serverURL, e);
    }
}

From source file:kx.c.java

void w(Timestamp p) {
    long j = p.getTime();
    if (vt < 1)
        throw new RuntimeException("Timestamp not valid pre kdb+2.6");
    w(j == nj ? j : 1000000 * (lg(j) - k) + p.getNanos() % 1000000);
}

From source file:org.isatools.isatab_v1.ISATABPersistenceTest.java

@SuppressWarnings("static-access")
@Test//  ww w.ja v  a2  s .  c o m
public void testPersistence() throws Exception {

    out.println("\n\n_______________________ ISATAB Persistence Test _______________________\n\n");

    String baseDir = System.getProperty("basedir");
    String filesPath = baseDir
            + "/target/test-classes/test-data/isatab/isatab_v1_200810/griffin_gauguier_200810";
    ISATABLoader loader = new ISATABLoader(filesPath);
    FormatSetInstance isatabInstance = loader.load();

    BIIObjectStore store = new BIIObjectStore();
    ISATABMapper isatabMapper = new ISATABMapper(store, isatabInstance);

    isatabMapper.map();

    assertTrue("Oh no! No mapped object! ", store.size() > 0);

    DotGraphGenerator dotter = new DotGraphGenerator(store.values(Processing.class));
    String dotPath = filesPath + "/graph.dot";
    // WILL NEVER WORK WITH THIS CAUSE IT ASSIGNS IDs!!!
    //      dotter.createGraph ( dotPath );
    //      out.println ( "Graph saved into " + dotPath );

    out.println("\n_____________ Persisting the objects:\n" + isatabMapper.report(store));

    // Test the repository too
    String repoPath = baseDir + "/target/bii_test_repo/meta_data";
    //      File repoDir = new File ( repoPath );
    //      if ( !repoDir.exists () )
    //         FileUtils.forceMkdir ( repoDir );

    ISATABPersister persister = new ISATABPersister(store, DaoFactory.getInstance(entityManager));
    Timestamp ts = persister.persist(filesPath);
    transaction.commit();

    // TODO: close sesssion, retrieve objects from DB, check they correspond to the submission

    Study study2 = store.getType(Study.class, "S:GG200810:2");
    String study2FileName = "study_" + DataLocationManager.getObfuscatedStudyFileName(study2);

    String submissionRepoPath2 = repoPath + "/" + study2FileName;
    assertTrue("Oh no! Submission directory not created in the submission repo: " + submissionRepoPath2 + "!",
            new File(submissionRepoPath2).exists());
    assertTrue("Oh no! Submission file investigation.csv didn't go to the submission repository "
            + submissionRepoPath2 + "!", new File(submissionRepoPath2 + "/investigation.csv").exists());
    assertTrue(
            "Oh no! Submission file s-Study-Griffin.txt didn't go to the submission repository "
                    + submissionRepoPath2 + "!",
            new File(submissionRepoPath2 + "/s-Study-Griffin.txt").exists());

    Study study1 = store.getType(Study.class, "S:GG200810:1");
    String study1FileName = "study_" + DataLocationManager.getObfuscatedStudyFileName(study1);

    String submissionRepoPath1 = repoPath + "/" + study1FileName;
    assertTrue("Oh no! Submission file a-S1.A3.txt didn't go to the submission repository "
            + submissionRepoPath1 + "!", new File(submissionRepoPath1 + "/a-S1.A3.txt").exists());

    String medaRepoPath = baseDir + "/target/bii_test_repo/meda",
            nonMedaRepoPath = baseDir + "/target/bii_test_repo/generic";

    assertTrue("Oh no! MEDA file repo wasn't created: " + medaRepoPath + "!", new File(medaRepoPath).exists());
    assertTrue("Oh no! non-MEDA file repo wasn't created: " + nonMedaRepoPath + "!",
            new File(nonMedaRepoPath).exists());
    assertTrue("Oh no! non-MEDA file clinchem.txt didn't go to its repository " + nonMedaRepoPath,
            new File(nonMedaRepoPath + "/" + study2FileName + "/raw_data/clinchem.txt").exists());

    boolean hasSomeAnn = false;
    for (Study study : store.valuesOfType(Study.class)) {
        for (Assay assay : study.getAssays()) {
            for (Xref xref : assay.getXrefs()) {
                ReferenceSource xsrc = xref.getSource();
                if (StringUtils.contains(xsrc.getDescription(), "Data Files Repository")) {
                    hasSomeAnn = true;
                    break;
                }
            }
        }
    }
    assertTrue("Ops! I didn't find any assay annotation about their linked data files!", hasSomeAnn);

    out.println("\n\n\n\n________________ Done, Submission TS: " + ts.getTime() + " (" + ts + " + "
            + ts.getNanos() + "ns)");
    out.println("  Results:\n" + store.toStringVerbose());
    out.println("\n\n___________________ /end: ISATAB Persistence Test ___________________\n\n");
}