Example usage for java.util Calendar clear

List of usage examples for java.util Calendar clear

Introduction

In this page you can find the example usage for java.util Calendar clear.

Prototype

public final void clear() 

Source Link

Document

Sets all the calendar field values and the time value (millisecond offset from the Epoch) of this Calendar undefined.

Usage

From source file:org.kuali.kfs.coa.document.validation.impl.AccountRuleTest.java

public void testCheckAccountExpirationDateTodayOrEarlier_FutureDate() {

    MaintenanceDocument maintDoc = newMaintDoc(newAccount);
    AccountRule rule = (AccountRule) setupMaintDocRule(maintDoc, AccountRule.class);
    boolean result;
    Calendar testCalendar;
    Date testTimestamp;//from w  ww  .j  a  v  a2  s . com

    // get an arbitrarily late date - fail
    testCalendar = Calendar.getInstance();
    testCalendar.clear();
    testCalendar.set(2100, 1, 1);
    testTimestamp = new Date(testCalendar.getTimeInMillis());

    // past or today expiration date - pass
    newAccount.setAccountExpirationDate(testTimestamp);
    result = rule.checkAccountExpirationDateValidTodayOrEarlier(newAccount);
    assertEquals("Arbitrarily late date should pass.", false, result);
    assertFieldErrorExists("accountExpirationDate",
            KFSKeyConstants.ERROR_DOCUMENT_ACCMAINT_ACCT_CANNOT_BE_CLOSED_EXP_DATE_INVALID);
    assertGlobalMessageMapSize(1);

}

From source file:org.kuali.ole.coa.document.validation.impl.AccountRuleTest.java

@Test
public void testCheckAccountExpirationDateTodayOrEarlier_PastDate() {

    MaintenanceDocument maintDoc = newMaintDoc(newAccount);
    AccountRule rule = (AccountRule) setupMaintDocRule(maintDoc, AccountRule.class);
    boolean result;
    Calendar testCalendar;
    Date testTimestamp;/*ww  w  .  j  a  v  a 2s . c o  m*/

    // get an arbitrarily early date
    testCalendar = Calendar.getInstance();
    testCalendar.clear();
    testCalendar.set(1900, 1, 1);
    testTimestamp = new Date(testCalendar.getTimeInMillis());

    // past expiration date - pass
    newAccount.setAccountExpirationDate(testTimestamp);
    result = rule.checkAccountExpirationDateValidTodayOrEarlier(newAccount);
    assertEquals("Arbitrarily early date should fail.", true, result);
    assertGlobalMessageMapEmpty();

}

From source file:org.apache.flume.sink.customhdfs.TestHDFSEventSink.java

/**
 * Test that a close due to roll interval removes the bucketwriter from
 * sfWriters map./*  w  w w.j  av  a2  s .co m*/
 */
@Test
public void testCloseRemovesFromSFWriters()
        throws InterruptedException, LifecycleException, EventDeliveryException, IOException {

    LOG.debug("Starting...");
    final String fileName = "FlumeData";
    final long batchSize = 2;
    String newPath = testPath + "/singleBucket";
    int i = 1, j = 1;

    HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
    sink = new HDFSEventSink(badWriterFactory);

    // clear the test directory
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path dirPath = new Path(newPath);
    fs.delete(dirPath, true);
    fs.mkdirs(dirPath);

    Context context = new Context();

    context.put("hdfs.path", newPath);
    context.put("hdfs.filePrefix", fileName);
    context.put("hdfs.rollCount", String.valueOf(0));
    context.put("hdfs.rollSize", String.valueOf(0));
    context.put("hdfs.rollInterval", String.valueOf(1));
    context.put("hdfs.batchSize", String.valueOf(batchSize));
    context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
    String expectedLookupPath = newPath + "/FlumeData";

    Configurables.configure(sink, context);

    MemoryChannel channel = new MemoryChannel();
    Configurables.configure(channel, new Context());

    sink.setChannel(channel);
    sink.start();

    Calendar eventDate = Calendar.getInstance();
    List<String> bodies = Lists.newArrayList();
    // push the event batches into channel
    channel.getTransaction().begin();
    try {
        for (j = 1; j <= 2 * batchSize; j++) {
            Event event = new SimpleEvent();
            eventDate.clear();
            eventDate.set(2011, i, i, i, 0); // yy mm dd
            event.getHeaders().put("timestamp", String.valueOf(eventDate.getTimeInMillis()));
            event.getHeaders().put("hostname", "Host" + i);
            String body = "Test." + i + "." + j;
            event.setBody(body.getBytes());
            bodies.add(body);
            // inject fault
            event.getHeaders().put("count-check", "");
            channel.put(event);
        }
        channel.getTransaction().commit();
    } finally {
        channel.getTransaction().close();
    }
    LOG.info("execute sink to process the events: " + sink.process());
    Assert.assertTrue(sink.getSfWriters().containsKey(expectedLookupPath));
    // Make sure the first file gets rolled due to rollTimeout.
    Thread.sleep(2001);
    Assert.assertFalse(sink.getSfWriters().containsKey(expectedLookupPath));
    LOG.info("execute sink to process the events: " + sink.process());
    // A new bucket writer should have been created for this bucket. So
    // sfWriters map should not have the same key again.
    Assert.assertTrue(sink.getSfWriters().containsKey(expectedLookupPath));
    sink.stop();

    LOG.info("Total number of bucket writers opened: {}", badWriterFactory.openCount.get());
    verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}

From source file:org.kuali.ole.coa.document.validation.impl.AccountRuleTest.java

@Test
public void testCheckAccountExpirationDateTodayOrEarlier_FutureDate() {

    MaintenanceDocument maintDoc = newMaintDoc(newAccount);
    AccountRule rule = (AccountRule) setupMaintDocRule(maintDoc, AccountRule.class);
    boolean result;
    Calendar testCalendar;
    Date testTimestamp;/*  w w w  .  j av  a2s.  c o  m*/

    // get an arbitrarily late date - fail
    testCalendar = Calendar.getInstance();
    testCalendar.clear();
    testCalendar.set(2100, 1, 1);
    testTimestamp = new Date(testCalendar.getTimeInMillis());

    // past or today expiration date - pass
    newAccount.setAccountExpirationDate(testTimestamp);
    result = rule.checkAccountExpirationDateValidTodayOrEarlier(newAccount);
    assertEquals("Arbitrarily late date should pass.", false, result);
    assertFieldErrorExists("accountExpirationDate",
            OLEKeyConstants.ERROR_DOCUMENT_ACCMAINT_ACCT_CANNOT_BE_CLOSED_EXP_DATE_INVALID);
    assertGlobalMessageMapSize(1);

}

From source file:org.apache.flume.sink.customhdfs.TestHDFSEventSink.java

@Test
public void testSlowAppendFailure()
        throws InterruptedException, LifecycleException, EventDeliveryException, IOException {

    LOG.debug("Starting...");
    final String fileName = "FlumeData";
    final long rollCount = 5;
    final long batchSize = 2;
    final int numBatches = 2;
    String newPath = testPath + "/singleBucket";
    int i = 1, j = 1;

    // clear the test directory
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path dirPath = new Path(newPath);
    fs.delete(dirPath, true);/*  www  .  java 2  s .  c  om*/
    fs.mkdirs(dirPath);

    // create HDFS sink with slow writer
    HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
    sink = new HDFSEventSink(badWriterFactory);

    Context context = new Context();
    context.put("hdfs.path", newPath);
    context.put("hdfs.filePrefix", fileName);
    context.put("hdfs.rollCount", String.valueOf(rollCount));
    context.put("hdfs.batchSize", String.valueOf(batchSize));
    context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
    context.put("hdfs.callTimeout", Long.toString(1000));
    Configurables.configure(sink, context);

    Channel channel = new MemoryChannel();
    Configurables.configure(channel, context);

    sink.setChannel(channel);
    sink.start();

    Calendar eventDate = Calendar.getInstance();

    // push the event batches into channel
    for (i = 0; i < numBatches; i++) {
        Transaction txn = channel.getTransaction();
        txn.begin();
        for (j = 1; j <= batchSize; j++) {
            Event event = new SimpleEvent();
            eventDate.clear();
            eventDate.set(2011, i, i, i, 0); // yy mm dd
            event.getHeaders().put("timestamp", String.valueOf(eventDate.getTimeInMillis()));
            event.getHeaders().put("hostname", "Host" + i);
            event.getHeaders().put("slow", "1500");
            event.setBody(("Test." + i + "." + j).getBytes());
            channel.put(event);
        }
        txn.commit();
        txn.close();

        // execute sink to process the events
        Status satus = sink.process();

        // verify that the append returned backoff due to timeotu
        Assert.assertEquals(satus, Status.BACKOFF);
    }

    sink.stop();
}

From source file:org.apache.flume.sink.customhdfs.TestHDFSEventSink.java

@Test
public void testAppend() throws InterruptedException, LifecycleException, EventDeliveryException, IOException {

    LOG.debug("Starting...");
    final long rollCount = 3;
    final long batchSize = 2;
    final String fileName = "FlumeData";

    // clear the test directory
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path dirPath = new Path(testPath);
    fs.delete(dirPath, true);/*  w  w w  .  java  2  s .  c  o  m*/
    fs.mkdirs(dirPath);

    Context context = new Context();

    context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
    context.put("hdfs.timeZone", "UTC");
    context.put("hdfs.filePrefix", fileName);
    context.put("hdfs.rollCount", String.valueOf(rollCount));
    context.put("hdfs.batchSize", String.valueOf(batchSize));

    Configurables.configure(sink, context);

    Channel channel = new MemoryChannel();
    Configurables.configure(channel, context);

    sink.setChannel(channel);
    sink.start();

    Calendar eventDate = Calendar.getInstance();
    List<String> bodies = Lists.newArrayList();
    // push the event batches into channel
    for (int i = 1; i < 4; i++) {
        Transaction txn = channel.getTransaction();
        txn.begin();
        for (int j = 1; j <= batchSize; j++) {
            Event event = new SimpleEvent();
            eventDate.clear();
            eventDate.set(2011, i, i, i, 0); // yy mm dd
            event.getHeaders().put("timestamp", String.valueOf(eventDate.getTimeInMillis()));
            event.getHeaders().put("hostname", "Host" + i);
            String body = "Test." + i + "." + j;
            event.setBody(body.getBytes());
            bodies.add(body);
            channel.put(event);
        }
        txn.commit();
        txn.close();

        // execute sink to process the events
        sink.process();
    }

    sink.stop();
    verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}

From source file:org.apache.flume.sink.customhdfs.TestHDFSEventSink.java

/**
 * Ensure that when a write throws an IOException we are
 * able to continue to progress in the next process() call.
 * This relies on Transactional rollback semantics for durability and
 * the behavior of the BucketWriter class of close()ing upon IOException.
 *///from w ww .ja v  a 2  s . c  om
@Test
public void testCloseReopen()
        throws InterruptedException, LifecycleException, EventDeliveryException, IOException {

    LOG.debug("Starting...");
    final int numBatches = 4;
    final String fileName = "FlumeData";
    final long rollCount = 5;
    final long batchSize = 2;
    String newPath = testPath + "/singleBucket";
    int i = 1, j = 1;

    HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
    sink = new HDFSEventSink(badWriterFactory);

    // clear the test directory
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path dirPath = new Path(newPath);
    fs.delete(dirPath, true);
    fs.mkdirs(dirPath);

    Context context = new Context();

    context.put("hdfs.path", newPath);
    context.put("hdfs.filePrefix", fileName);
    context.put("hdfs.rollCount", String.valueOf(rollCount));
    context.put("hdfs.batchSize", String.valueOf(batchSize));
    context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);

    Configurables.configure(sink, context);

    MemoryChannel channel = new MemoryChannel();
    Configurables.configure(channel, new Context());

    sink.setChannel(channel);
    sink.start();

    Calendar eventDate = Calendar.getInstance();
    List<String> bodies = Lists.newArrayList();
    // push the event batches into channel
    for (i = 1; i < numBatches; i++) {
        channel.getTransaction().begin();
        try {
            for (j = 1; j <= batchSize; j++) {
                Event event = new SimpleEvent();
                eventDate.clear();
                eventDate.set(2011, i, i, i, 0); // yy mm dd
                event.getHeaders().put("timestamp", String.valueOf(eventDate.getTimeInMillis()));
                event.getHeaders().put("hostname", "Host" + i);
                String body = "Test." + i + "." + j;
                event.setBody(body.getBytes());
                bodies.add(body);
                // inject fault
                event.getHeaders().put("fault-until-reopen", "");
                channel.put(event);
            }
            channel.getTransaction().commit();
        } finally {
            channel.getTransaction().close();
        }
        LOG.info("execute sink to process the events: " + sink.process());
    }
    LOG.info("clear any events pending due to errors: " + sink.process());
    sink.stop();

    verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}

From source file:org.apache.flume.sink.customhdfs.TestHDFSEventSink.java

@Test
public void testTextAppend()
        throws InterruptedException, LifecycleException, EventDeliveryException, IOException {

    LOG.debug("Starting...");
    final long rollCount = 3;
    final long batchSize = 2;
    final String fileName = "FlumeData";
    String newPath = testPath + "/singleTextBucket";
    int totalEvents = 0;
    int i = 1, j = 1;

    // clear the test directory
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path dirPath = new Path(newPath);
    fs.delete(dirPath, true);//from w  w w  .j  av  a 2  s .c om
    fs.mkdirs(dirPath);

    Context context = new Context();

    // context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
    context.put("hdfs.path", newPath);
    context.put("hdfs.filePrefix", fileName);
    context.put("hdfs.rollCount", String.valueOf(rollCount));
    context.put("hdfs.batchSize", String.valueOf(batchSize));
    context.put("hdfs.writeFormat", "Text");
    context.put("hdfs.fileType", "DataStream");

    Configurables.configure(sink, context);

    Channel channel = new MemoryChannel();
    Configurables.configure(channel, context);

    sink.setChannel(channel);
    sink.start();

    Calendar eventDate = Calendar.getInstance();
    List<String> bodies = Lists.newArrayList();

    // push the event batches into channel
    for (i = 1; i < 4; i++) {
        Transaction txn = channel.getTransaction();
        txn.begin();
        for (j = 1; j <= batchSize; j++) {
            Event event = new SimpleEvent();
            eventDate.clear();
            eventDate.set(2011, i, i, i, 0); // yy mm dd
            event.getHeaders().put("timestamp", String.valueOf(eventDate.getTimeInMillis()));
            event.getHeaders().put("hostname", "Host" + i);
            String body = "Test." + i + "." + j;
            event.setBody(body.getBytes());
            bodies.add(body);
            channel.put(event);
            totalEvents++;
        }
        txn.commit();
        txn.close();

        // execute sink to process the events
        sink.process();
    }

    sink.stop();

    // loop through all the files generated and check their contains
    FileStatus[] dirStat = fs.listStatus(dirPath);
    Path fList[] = FileUtil.stat2Paths(dirStat);

    // check that the roll happened correctly for the given data
    long expectedFiles = totalEvents / rollCount;
    if (totalEvents % rollCount > 0)
        expectedFiles++;
    Assert.assertEquals("num files wrong, found: " + Lists.newArrayList(fList), expectedFiles, fList.length);
    verifyOutputTextFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}

From source file:org.apache.flume.sink.customhdfs.TestHDFSEventSink.java

@Test
public void testAvroAppend()
        throws InterruptedException, LifecycleException, EventDeliveryException, IOException {

    LOG.debug("Starting...");
    final long rollCount = 3;
    final long batchSize = 2;
    final String fileName = "FlumeData";
    String newPath = testPath + "/singleTextBucket";
    int totalEvents = 0;
    int i = 1, j = 1;

    // clear the test directory
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path dirPath = new Path(newPath);
    fs.delete(dirPath, true);/*from w ww.  j a va2 s.  c o  m*/
    fs.mkdirs(dirPath);

    Context context = new Context();

    // context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
    context.put("hdfs.path", newPath);
    context.put("hdfs.filePrefix", fileName);
    context.put("hdfs.rollCount", String.valueOf(rollCount));
    context.put("hdfs.batchSize", String.valueOf(batchSize));
    context.put("hdfs.writeFormat", "Text");
    context.put("hdfs.fileType", "DataStream");
    context.put("serializer", "AVRO_EVENT");

    Configurables.configure(sink, context);

    Channel channel = new MemoryChannel();
    Configurables.configure(channel, context);

    sink.setChannel(channel);
    sink.start();

    Calendar eventDate = Calendar.getInstance();
    List<String> bodies = Lists.newArrayList();

    // push the event batches into channel
    for (i = 1; i < 4; i++) {
        Transaction txn = channel.getTransaction();
        txn.begin();
        for (j = 1; j <= batchSize; j++) {
            Event event = new SimpleEvent();
            eventDate.clear();
            eventDate.set(2011, i, i, i, 0); // yy mm dd
            event.getHeaders().put("timestamp", String.valueOf(eventDate.getTimeInMillis()));
            event.getHeaders().put("hostname", "Host" + i);
            String body = "Test." + i + "." + j;
            event.setBody(body.getBytes());
            bodies.add(body);
            channel.put(event);
            totalEvents++;
        }
        txn.commit();
        txn.close();

        // execute sink to process the events
        sink.process();
    }

    sink.stop();

    // loop through all the files generated and check their contains
    FileStatus[] dirStat = fs.listStatus(dirPath);
    Path fList[] = FileUtil.stat2Paths(dirStat);

    // check that the roll happened correctly for the given data
    long expectedFiles = totalEvents / rollCount;
    if (totalEvents % rollCount > 0)
        expectedFiles++;
    Assert.assertEquals("num files wrong, found: " + Lists.newArrayList(fList), expectedFiles, fList.length);
    verifyOutputAvroFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}

From source file:org.apache.flume.sink.customhdfs.TestHDFSEventSink.java

/**
 * Test that the old bucket writer is closed at the end of rollInterval and
 * a new one is used for the next set of events.
 *//*from w w  w  .  j  a  v  a2  s  .c o  m*/
@Test
public void testCloseReopenOnRollTime()
        throws InterruptedException, LifecycleException, EventDeliveryException, IOException {

    LOG.debug("Starting...");
    final int numBatches = 4;
    final String fileName = "FlumeData";
    final long batchSize = 2;
    String newPath = testPath + "/singleBucket";
    int i = 1, j = 1;

    HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
    sink = new HDFSEventSink(badWriterFactory);

    // clear the test directory
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    Path dirPath = new Path(newPath);
    fs.delete(dirPath, true);
    fs.mkdirs(dirPath);

    Context context = new Context();

    context.put("hdfs.path", newPath);
    context.put("hdfs.filePrefix", fileName);
    context.put("hdfs.rollCount", String.valueOf(0));
    context.put("hdfs.rollSize", String.valueOf(0));
    context.put("hdfs.rollInterval", String.valueOf(2));
    context.put("hdfs.batchSize", String.valueOf(batchSize));
    context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);

    Configurables.configure(sink, context);

    MemoryChannel channel = new MemoryChannel();
    Configurables.configure(channel, new Context());

    sink.setChannel(channel);
    sink.start();

    Calendar eventDate = Calendar.getInstance();
    List<String> bodies = Lists.newArrayList();
    // push the event batches into channel
    for (i = 1; i < numBatches; i++) {
        channel.getTransaction().begin();
        try {
            for (j = 1; j <= batchSize; j++) {
                Event event = new SimpleEvent();
                eventDate.clear();
                eventDate.set(2011, i, i, i, 0); // yy mm dd
                event.getHeaders().put("timestamp", String.valueOf(eventDate.getTimeInMillis()));
                event.getHeaders().put("hostname", "Host" + i);
                String body = "Test." + i + "." + j;
                event.setBody(body.getBytes());
                bodies.add(body);
                // inject fault
                event.getHeaders().put("count-check", "");
                channel.put(event);
            }
            channel.getTransaction().commit();
        } finally {
            channel.getTransaction().close();
        }
        LOG.info("execute sink to process the events: " + sink.process());
        // Make sure the first file gets rolled due to rollTimeout.
        if (i == 1) {
            Thread.sleep(2001);
        }
    }
    LOG.info("clear any events pending due to errors: " + sink.process());
    sink.stop();

    Assert.assertTrue(badWriterFactory.openCount.get() >= 2);
    LOG.info("Total number of bucket writers opened: {}", badWriterFactory.openCount.get());
    verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}