Java tutorial
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs; import junit.framework.TestCase; import java.io.*; import java.util.Random; import java.net.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileStatus; import java.text.SimpleDateFormat; import java.util.Date; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * This class tests the access time on files. * */ public class TestSetTimes extends TestCase { public static final Log LOG = LogFactory.getLog(TestSetTimes.class); static final long seed = 0xDEADBEEFL; static final int blockSize = 8192; static final int fileSize = 16384; static final int numDatanodes = 1; static final SimpleDateFormat dateForm = new SimpleDateFormat("yyyy-MM-dd HH:mm"); Random myrand = new Random(); Path hostsFile; Path excludeFile; private FSDataOutputStream writeFile(FileSystem fileSys, Path name, int repl) throws IOException { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); return stm; } private void cleanupFile(FileSystem fileSys, Path name) throws IOException { assertTrue(fileSys.exists(name)); fileSys.delete(name, true); assertTrue(!fileSys.exists(name)); } private void printDatanodeReport(DatanodeInfo[] info) { LOG.info("-------------------------------------------------"); for (int i = 0; i < info.length; i++) { LOG.info(info[i].getDatanodeReport()); } } /** * Tests mod & access time in DFS. */ public void testTimes() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); final int nnport = cluster.getNameNodePort(); InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); DFSClient client = new DFSClient(addr, conf); DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ", numDatanodes, info.length); FileSystem fileSys = cluster.getFileSystem(); int replicas = 1; assertTrue(fileSys instanceof DistributedFileSystem); try { // // create file and record atime/mtime // LOG.info("Creating testdir1 and testdir1/test1.dat."); Path dir1 = new Path("testdir1"); Path file1 = new Path(dir1, "test1.dat"); FSDataOutputStream stm = writeFile(fileSys, file1, replicas); FileStatus stat = fileSys.getFileStatus(file1); long atimeBeforeClose = stat.getAccessTime(); String adate = dateForm.format(new Date(atimeBeforeClose)); LOG.info("atime on " + file1 + " before close is " + adate + " (" + atimeBeforeClose + ")"); assertTrue(atimeBeforeClose != 0); stm.close(); stat = fileSys.getFileStatus(file1); long atime1 = stat.getAccessTime(); long mtime1 = stat.getModificationTime(); adate = dateForm.format(new Date(atime1)); String mdate = dateForm.format(new Date(mtime1)); LOG.info("atime on " + file1 + " is " + adate + " (" + atime1 + ")"); LOG.info("mtime on " + file1 + " is " + mdate + " (" + mtime1 + ")"); assertTrue(atime1 != 0); // // record dir times // stat = fileSys.getFileStatus(dir1); long mdir1 = stat.getAccessTime(); assertTrue(mdir1 == 0); // set the access time to be one day in the past long atime2 = atime1 - (24L * 3600L * 1000L); fileSys.setTimes(file1, -1, atime2); // check new access time on file stat = fileSys.getFileStatus(file1); long atime3 = stat.getAccessTime(); String adate3 = dateForm.format(new Date(atime3)); LOG.info("new atime on " + file1 + " is " + adate3 + " (" + atime3 + ")"); assertTrue(atime2 == atime3); assertTrue(mtime1 == stat.getModificationTime()); // set the modification time to be 1 hour in the past long mtime2 = mtime1 - (3600L * 1000L); fileSys.setTimes(file1, mtime2, -1); // check new modification time on file stat = fileSys.getFileStatus(file1); long mtime3 = stat.getModificationTime(); String mdate3 = dateForm.format(new Date(mtime3)); LOG.info("new mtime on " + file1 + " is " + mdate3 + " (" + mtime3 + ")"); assertTrue(atime2 == stat.getAccessTime()); assertTrue(mtime2 == mtime3); // shutdown cluster and restart cluster.shutdown(); try { Thread.sleep(2 * MAX_IDLE_TIME); } catch (InterruptedException e) { } cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); // verify that access times and modification times persist after a // cluster restart. LOG.info("Verifying times after cluster restart"); stat = fileSys.getFileStatus(file1); assertTrue(atime2 == stat.getAccessTime()); assertTrue(mtime3 == stat.getModificationTime()); cleanupFile(fileSys, file1); cleanupFile(fileSys, dir1); } catch (IOException e) { info = client.datanodeReport(DatanodeReportType.ALL); printDatanodeReport(info); throw e; } finally { fileSys.close(); cluster.shutdown(); } } /** * Tests mod time change at close in DFS. */ public void testTimesAtClose() throws IOException { Configuration conf = new HdfsConfiguration(); final int MAX_IDLE_TIME = 2000; // 2s int replicas = 1; // parameter initialization conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); DFSClient client = new DFSClient(addr, conf); DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ", numDatanodes, info.length); FileSystem fileSys = cluster.getFileSystem(); assertTrue(fileSys instanceof DistributedFileSystem); try { // create a new file and write to it Path file1 = new Path("/simple.dat"); FSDataOutputStream stm = writeFile(fileSys, file1, replicas); LOG.info("Created and wrote file simple.dat"); FileStatus statBeforeClose = fileSys.getFileStatus(file1); long mtimeBeforeClose = statBeforeClose.getModificationTime(); String mdateBeforeClose = dateForm.format(new Date(mtimeBeforeClose)); LOG.info("mtime on " + file1 + " before close is " + mdateBeforeClose + " (" + mtimeBeforeClose + ")"); assertTrue(mtimeBeforeClose != 0); //close file after writing stm.close(); LOG.info("Closed file."); FileStatus statAfterClose = fileSys.getFileStatus(file1); long mtimeAfterClose = statAfterClose.getModificationTime(); String mdateAfterClose = dateForm.format(new Date(mtimeAfterClose)); LOG.info("mtime on " + file1 + " after close is " + mdateAfterClose + " (" + mtimeAfterClose + ")"); assertTrue(mtimeAfterClose != 0); assertTrue(mtimeBeforeClose != mtimeAfterClose); cleanupFile(fileSys, file1); } catch (IOException e) { info = client.datanodeReport(DatanodeReportType.ALL); printDatanodeReport(info); throw e; } finally { fileSys.close(); cluster.shutdown(); } } public static void main(String[] args) throws Exception { new TestSetTimes().testTimes(); } }