List of usage examples for java.util Random nextBytes
public void nextBytes(byte[] bytes)
From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java
/** * Tests the flush() method only with Append operations. * Verifies both length-based and time-based flush triggers, as well as flushing rather large operations. *///from w w w. j av a 2 s. com @Test public void testFlushAppend() throws Exception { final WriterConfig config = DEFAULT_CONFIG; final int appendCount = config.getFlushThresholdBytes() * 10; @Cleanup TestContext context = new TestContext(config); context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join(); context.segmentAggregator.initialize(TIMEOUT, executorService()).join(); @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream(); AtomicLong outstandingSize = new AtomicLong(); // Number of bytes remaining to be flushed. SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize); // Part 1: flush triggered by accumulated size. for (int i = 0; i < appendCount; i++) { // Add another operation and record its length. StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context); outstandingSize.addAndGet(appendOp.getLength()); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); sequenceNumbers.record(appendOp); boolean expectFlush = outstandingSize.get() >= config.getFlushThresholdBytes(); Assert.assertEquals("Unexpected value returned by mustFlush() (size threshold).", expectFlush, context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (size threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); // Call flush() and inspect the result. FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); if (expectFlush) { AssertExtensions.assertGreaterThanOrEqual("Not enough bytes were flushed (size threshold).", config.getFlushThresholdBytes(), flushResult.getFlushedBytes()); outstandingSize.addAndGet(-flushResult.getFlushedBytes()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (size threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); } else { Assert.assertEquals(String.format("Not expecting a flush. OutstandingSize=%s, Threshold=%d", outstandingSize, config.getFlushThresholdBytes()), 0, flushResult.getFlushedBytes()); } Assert.assertFalse("Unexpected value returned by mustFlush() after flush (size threshold).", context.segmentAggregator.mustFlush()); Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes()); } // Part 2: flush triggered by time. for (int i = 0; i < appendCount; i++) { // Add another operation and record its length. StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context); outstandingSize.addAndGet(appendOp.getLength()); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); sequenceNumbers.record(appendOp); // Call flush() and inspect the result. context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot. Assert.assertTrue("Unexpected value returned by mustFlush() (time threshold).", context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (time threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); // We are always expecting a flush. AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes()); outstandingSize.addAndGet(-flushResult.getFlushedBytes()); Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (time threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes()); } // Part 3: Transaction appends. This will force an internal loop inside flush() to do so repeatedly. final int transactionSize = 100; for (int i = 0; i < appendCount / 10; i++) { for (int j = 0; j < transactionSize; j++) { // Add another operation and record its length. StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context); outstandingSize.addAndGet(appendOp.getLength()); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); sequenceNumbers.record(appendOp); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (Transaction appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); } // Call flush() and inspect the result. Assert.assertTrue("Unexpected value returned by mustFlush() (Transaction appends).", context.segmentAggregator.mustFlush()); FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); // We are always expecting a flush. AssertExtensions.assertGreaterThan("Not enough bytes were flushed (Transaction appends).", 0, flushResult.getFlushedBytes()); outstandingSize.addAndGet(-flushResult.getFlushedBytes()); Assert.assertFalse("Unexpected value returned by mustFlush() after flush (Transaction appends).", context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (Transaction appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes()); } // Part 4: large appends (larger than MaxFlushSize). Random random = new Random(); for (int i = 0; i < appendCount; i++) { // Add another operation and record its length. byte[] largeAppendData = new byte[config.getMaxFlushSizeBytes() * 10 + 1]; random.nextBytes(largeAppendData); StorageOperation appendOp = generateAppendAndUpdateMetadata(SEGMENT_ID, largeAppendData, context); outstandingSize.addAndGet(appendOp.getLength()); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); sequenceNumbers.record(appendOp); // Call flush() and inspect the result. context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot. Assert.assertTrue("Unexpected value returned by mustFlush() (large appends).", context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (large appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); // We are always expecting a flush. AssertExtensions.assertGreaterThan("Not enough bytes were flushed (large appends).", 0, flushResult.getFlushedBytes()); outstandingSize.addAndGet(-flushResult.getFlushedBytes()); Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (large appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); Assert.assertEquals("Not expecting any merged bytes in this test (large appends).", 0, flushResult.getMergedBytes()); } // Verify data. Assert.assertEquals("Not expecting leftover data not flushed.", 0, outstandingSize.get()); byte[] expectedData = writtenData.toByteArray(); byte[] actualData = new byte[expectedData.length]; long storageLength = context.storage .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join() .getLength(); Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength); context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join(); Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData); }
From source file:ch.zhaw.ficore.p2abc.services.verification.VerificationService.java
private String generateAccessToken() { Random rand = new SecureRandom(); String prefix = "" + rand.nextInt() + "-"; byte[] bytes = new byte[16]; rand.nextBytes(bytes); return prefix + DigestUtils.sha1Hex(bytes); }
From source file:com.voxbone.kelpie.Session.java
private boolean sendTransportInfo(CallSession callSession) { Packet p;/* w ww . jav a 2 s. c o m*/ StreamElement session; StreamElement transport; p = conn.getDataFactory().createPacketNode(new NSI("iq", "jabber:server"), Packet.class); p.setFrom(callSession.jabberLocal); p.setTo(callSession.jabberRemote); p.setID(Long.toString(++this.idNum)); p.setAttributeValue("type", "set"); Random r = new Random(); byte[] bytes = new byte[4]; r.nextBytes(bytes); callSession.candidateUser = String.format("%02x%02x%02x%02x", bytes[0], bytes[1], bytes[2], bytes[3]); // Jingle Candidate Info if (clientJingle) { StreamElement jin = p.addElement(new NSI("jingle", "urn:xmpp:jingle:1")); jin.setAttributeValue("action", "transport-info"); jin.setAttributeValue("initiator", callSession.jabberInitiator); jin.setAttributeValue("sid", callSession.jabberSessionId); StreamElement content = jin.addElement("content"); content.setAttributeValue("name", "audio"); content.setAttributeValue("creator", "initiator"); StreamElement jin_transport = content .addElement(new NSI("transport", "http://www.google.com/transport/p2p")); StreamElement jin_candidate = jin_transport.addElement("candidate"); jin_candidate.setAttributeValue("name", "rtp"); jin_candidate.setAttributeValue("address", SipService.getLocalIP()); jin_candidate.setAttributeValue("port", Integer.toString(callSession.relay.getJabberPort())); jin_candidate.setAttributeValue("preference", "1"); jin_candidate.setAttributeValue("username", callSession.candidateUser); jin_candidate.setAttributeValue("password", callSession.candidateUser); jin_candidate.setAttributeValue("protocol", "udp"); jin_candidate.setAttributeValue("generation", "0"); jin_candidate.setAttributeValue("type", "local"); jin_candidate.setAttributeValue("network", "0"); } // Gingle Candidate Info session = p.addElement(new NSI("session", "http://www.google.com/session")); session.setAttributeValue("type", "transport-info"); session.setID(callSession.jabberSessionId); session.setAttributeValue("initiator", callSession.jabberInitiator); transport = session.addElement(new NSI("transport", "http://www.google.com/transport/p2p")); StreamElement candidate = transport.addElement("candidate"); candidate.setAttributeValue("name", "rtp"); candidate.setAttributeValue("address", SipService.getLocalIP()); candidate.setAttributeValue("port", Integer.toString(callSession.relay.getJabberPort())); candidate.setAttributeValue("preference", "1"); candidate.setAttributeValue("username", callSession.candidateUser); candidate.setAttributeValue("password", callSession.candidateUser); candidate.setAttributeValue("protocol", "udp"); candidate.setAttributeValue("generation", "0"); candidate.setAttributeValue("type", "local"); candidate.setAttributeValue("network", "0"); try { sendPacket(p); } catch (StreamException e) { logger.error("[[" + internalCallId + "]] Error while sending TransportInfo", e); return false; } return true; }
From source file:com.voxbone.kelpie.Session.java
private boolean sendTransportCandidates(CallSession callSession, StreamType type) { Packet p;/*from w ww . jav a2 s .c o m*/ StreamElement session; Random r = new Random(); byte[] bytes = new byte[4]; r.nextBytes(bytes); if (type == StreamType.RTP || type == StreamType.RTCP) { if (callSession.candidateUser == null) { callSession.candidateUser = String.format("%02x%02x%02x%02x", bytes[0], bytes[1], bytes[2], bytes[3]); } p = conn.getDataFactory().createPacketNode(new NSI("iq", "jabber:server"), Packet.class); p.setFrom(callSession.jabberLocal); p.setTo(callSession.jabberRemote); p.setID(Long.toString(++this.idNum)); p.setAttributeValue("type", "set"); // Jingle Candidate Transport if (clientJingle) { StreamElement jin = p.addElement(new NSI("jingle", "urn:xmpp:jingle:1")); jin.setAttributeValue("action", "transport-info"); jin.setAttributeValue("initiator", callSession.jabberInitiator); jin.setAttributeValue("sid", callSession.jabberSessionId); StreamElement content = jin.addElement("content"); content.setAttributeValue("name", "audio"); content.setAttributeValue("creator", "initiator"); StreamElement jin_transport = content .addElement(new NSI("transport", "http://www.google.com/transport/p2p")); StreamElement jin_candidate = jin_transport.addElement("candidate"); if (type == StreamType.RTP) { jin_candidate.setAttributeValue("name", "rtp"); jin_candidate.setAttributeValue("address", SipService.getLocalIP()); jin_candidate.setAttributeValue("port", Integer.toString(callSession.relay.getJabberPort())); } else { jin_candidate.setAttributeValue("name", "rtcp"); jin_candidate.setAttributeValue("address", SipService.getLocalIP()); jin_candidate.setAttributeValue("port", Integer.toString(callSession.relay.getJabberRtcpPort())); } jin_candidate.setAttributeValue("preference", "1"); jin_candidate.setAttributeValue("username", callSession.candidateUser); jin_candidate.setAttributeValue("password", callSession.candidateUser); jin_candidate.setAttributeValue("protocol", "udp"); jin_candidate.setAttributeValue("generation", "0"); jin_candidate.setAttributeValue("type", "local"); jin_candidate.setAttributeValue("network", "0"); } // Gingle Candidate Transport session = p.addElement(new NSI("session", "http://www.google.com/session")); session.setAttributeValue("type", "candidates"); session.setID(callSession.jabberSessionId); session.setAttributeValue("initiator", callSession.jabberInitiator); StreamElement candidate = session.addElement("candidate"); if (type == StreamType.RTP) { candidate.setAttributeValue("name", "rtp"); candidate.setAttributeValue("address", SipService.getLocalIP()); candidate.setAttributeValue("port", Integer.toString(callSession.relay.getJabberPort())); } else { candidate.setAttributeValue("name", "rtcp"); candidate.setAttributeValue("address", SipService.getLocalIP()); candidate.setAttributeValue("port", Integer.toString(callSession.relay.getJabberRtcpPort())); } candidate.setAttributeValue("preference", "1"); candidate.setAttributeValue("username", callSession.candidateUser); candidate.setAttributeValue("password", callSession.candidateUser); candidate.setAttributeValue("protocol", "udp"); candidate.setAttributeValue("generation", "0"); candidate.setAttributeValue("type", "local"); candidate.setAttributeValue("network", "0"); try { sendPacket(p); } catch (StreamException e) { logger.error("[[" + internalCallId + "]] Error while sending audio TransportCandidates", e); return false; } } else if (type == StreamType.VRTP || type == StreamType.VRTCP) { if (callSession.candidateVUser == null) { callSession.candidateVUser = String.format("%02x%02x%02x%02x", bytes[0], bytes[1], bytes[2], bytes[3]); } p = conn.getDataFactory().createPacketNode(new NSI("iq", "jabber:server"), Packet.class); p.setFrom(callSession.jabberLocal); p.setTo(callSession.jabberRemote); p.setID(Long.toString(++this.idNum)); p.setAttributeValue("type", "set"); // Jingle Candidate Transport if (clientJingle) { StreamElement jin = p.addElement(new NSI("jingle", "urn:xmpp:jingle:1")); jin.setAttributeValue("action", "transport-info"); jin.setAttributeValue("initiator", callSession.jabberInitiator); jin.setAttributeValue("sid", callSession.jabberSessionId); StreamElement content = jin.addElement("content"); content.setAttributeValue("name", "video"); content.setAttributeValue("creator", "initiator"); StreamElement jin_transport = content .addElement(new NSI("transport", "http://www.google.com/transport/p2p")); StreamElement jin_candidate = jin_transport.addElement("candidate"); if (type == StreamType.VRTP) { jin_candidate.setAttributeValue("name", "video_rtp"); jin_candidate.setAttributeValue("address", SipService.getLocalIP()); jin_candidate.setAttributeValue("port", Integer.toString(callSession.vRelay.getJabberPort())); } else { jin_candidate.setAttributeValue("name", "video_rtcp"); jin_candidate.setAttributeValue("address", SipService.getLocalIP()); jin_candidate.setAttributeValue("port", Integer.toString(callSession.vRelay.getJabberRtcpPort())); } jin_candidate.setAttributeValue("preference", "1"); jin_candidate.setAttributeValue("username", callSession.candidateVUser); jin_candidate.setAttributeValue("password", callSession.candidateVUser); jin_candidate.setAttributeValue("protocol", "udp"); jin_candidate.setAttributeValue("generation", "0"); jin_candidate.setAttributeValue("type", "local"); jin_candidate.setAttributeValue("network", "0"); } // Gingle session = p.addElement(new NSI("session", "http://www.google.com/session")); session.setAttributeValue("type", "candidates"); session.setID(callSession.jabberSessionId); session.setAttributeValue("initiator", callSession.jabberInitiator); StreamElement candidate = session.addElement("candidate"); if (type == StreamType.VRTP) { candidate.setAttributeValue("name", "video_rtp"); candidate.setAttributeValue("address", SipService.getLocalIP()); candidate.setAttributeValue("port", Integer.toString(callSession.vRelay.getJabberPort())); } else { candidate.setAttributeValue("name", "video_rtcp"); candidate.setAttributeValue("address", SipService.getLocalIP()); candidate.setAttributeValue("port", Integer.toString(callSession.vRelay.getJabberRtcpPort())); } candidate.setAttributeValue("preference", "1"); candidate.setAttributeValue("username", callSession.candidateVUser); candidate.setAttributeValue("password", callSession.candidateVUser); candidate.setAttributeValue("protocol", "udp"); candidate.setAttributeValue("generation", "0"); candidate.setAttributeValue("type", "local"); candidate.setAttributeValue("network", "0"); try { sendPacket(p); } catch (StreamException e) { logger.error("[[" + internalCallId + "]] Error while sending video TransportCandidates", e); return false; } } return true; }
From source file:org.apache.flink.table.codegen.SortCodeGeneratorTest.java
private Object[] generateValues(InternalType type) { Random rnd = new Random(); int seedNum = RECORD_NUM / 5; Object[] seeds = new Object[seedNum]; seeds[0] = null;//ww w .ja va 2 s .com seeds[1] = value1(type, rnd); seeds[2] = value2(type, rnd); seeds[3] = value3(type, rnd); for (int i = 4; i < seeds.length; i++) { if (type.equals(InternalTypes.BOOLEAN)) { seeds[i] = rnd.nextBoolean(); } else if (type.equals(InternalTypes.BYTE)) { seeds[i] = (byte) rnd.nextLong(); } else if (type.equals(InternalTypes.SHORT)) { seeds[i] = (short) rnd.nextLong(); } else if (type.equals(InternalTypes.INT)) { seeds[i] = rnd.nextInt(); } else if (type.equals(InternalTypes.LONG)) { seeds[i] = rnd.nextLong(); } else if (type.equals(InternalTypes.FLOAT)) { seeds[i] = rnd.nextFloat() * rnd.nextLong(); } else if (type.equals(InternalTypes.DOUBLE)) { seeds[i] = rnd.nextDouble() * rnd.nextLong(); } else if (type.equals(InternalTypes.STRING)) { seeds[i] = BinaryString.fromString(RandomStringUtils.random(rnd.nextInt(20))); } else if (type instanceof DecimalType) { DecimalType decimalType = (DecimalType) type; BigDecimal decimal = new BigDecimal(rnd.nextInt()).divide( new BigDecimal(ThreadLocalRandom.current().nextInt(1, 256)), ThreadLocalRandom.current().nextInt(1, 30), BigDecimal.ROUND_HALF_EVEN); seeds[i] = Decimal.fromBigDecimal(decimal, decimalType.precision(), decimalType.scale()); } else if (type instanceof ArrayType || type.equals(InternalTypes.BINARY)) { byte[] bytes = new byte[rnd.nextInt(16) + 1]; rnd.nextBytes(bytes); seeds[i] = type.equals(InternalTypes.BINARY) ? bytes : BinaryArray.fromPrimitiveArray(bytes); } else if (type instanceof RowType) { RowType rowType = (RowType) type; if (rowType.getTypeAt(0).equals(InternalTypes.INT)) { seeds[i] = GenericRow.of(rnd.nextInt()); } else { seeds[i] = GenericRow.of(GenericRow.of(rnd.nextInt())); } } else if (type instanceof GenericType) { seeds[i] = new BinaryGeneric<>(rnd.nextInt(), IntSerializer.INSTANCE); } else { throw new RuntimeException("Not support!"); } } // result values Object[] results = new Object[RECORD_NUM]; for (int i = 0; i < RECORD_NUM; i++) { results[i] = seeds[rnd.nextInt(seedNum)]; } return results; }
From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java
/** * Test delete behavior and metrics for producer * @throws Exception// ww w. j a va 2s .c om */ public void runAllDeletesTest() throws Exception { final String topic = "alldeletestest"; createTestTopic(topic, 1, 1); final int ELEMENT_COUNT = 300; // ----------- Write some data into Kafka ------------------- StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort); env.setParallelism(1); env.getConfig().setRestartStrategy(RestartStrategies.noRestart()); env.getConfig().disableSysoutLogging(); DataStream<Tuple2<byte[], PojoValue>> kvStream = env .addSource(new SourceFunction<Tuple2<byte[], PojoValue>>() { @Override public void run(SourceContext<Tuple2<byte[], PojoValue>> ctx) throws Exception { Random rnd = new Random(1337); for (long i = 0; i < ELEMENT_COUNT; i++) { final byte[] key = new byte[200]; rnd.nextBytes(key); ctx.collect(new Tuple2<>(key, (PojoValue) null)); } } @Override public void cancel() { } }); TypeInformationKeyValueSerializationSchema<byte[], PojoValue> schema = new TypeInformationKeyValueSerializationSchema<>( byte[].class, PojoValue.class, env.getConfig()); Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings); producerProperties.setProperty("retries", "3"); producerProperties.putAll(secureProps); kafkaServer.produceIntoKafka(kvStream, topic, schema, producerProperties, null); env.execute("Write deletes to Kafka"); // ----------- Read the data again ------------------- env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort); env.setParallelism(1); env.getConfig().setRestartStrategy(RestartStrategies.noRestart()); env.getConfig().disableSysoutLogging(); Properties props = new Properties(); props.putAll(standardProps); props.putAll(secureProps); DataStream<Tuple2<byte[], PojoValue>> fromKafka = env .addSource(kafkaServer.getConsumer(topic, schema, props)); fromKafka.flatMap(new RichFlatMapFunction<Tuple2<byte[], PojoValue>, Object>() { long counter = 0; @Override public void flatMap(Tuple2<byte[], PojoValue> value, Collector<Object> out) throws Exception { // ensure that deleted messages are passed as nulls assertNull(value.f1); counter++; if (counter == ELEMENT_COUNT) { // we got the right number of elements throw new SuccessException(); } } }); tryExecute(env, "Read deletes from Kafka"); deleteTestTopic(topic); }
From source file:org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.java
/** check if nn.getCorruptFiles() returns a file that has corrupted blocks */ @Test(timeout = 300000)//from w ww. ja v a 2 s . c o m public void testListCorruptFilesCorruptedBlock() throws Exception { MiniDFSCluster cluster = null; Random random = new Random(); try { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans directories conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports // Set short retry timeouts so this test runs faster conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); // create two files with one block each DFSTestUtil util = new DFSTestUtil.Builder().setName("testCorruptFilesCorruptedBlock").setNumFiles(2) .setMaxLevels(1).setMaxSize(512).build(); util.createFiles(fs, "/srcdat10"); // fetch bad file list from namenode. There should be none. final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.getNamesystem() .listCorruptFileBlocks("/", null); assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.", badFiles.size() == 0); // Now deliberately corrupt one block String bpid = cluster.getNamesystem().getBlockPoolId(); File storageDir = cluster.getInstanceStorageDir(0, 1); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); assertTrue("data directory does not exist", data_dir.exists()); List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir); assertTrue("Data directory does not contain any blocks or there was an " + "IO error", metaFiles != null && !metaFiles.isEmpty()); File metaFile = metaFiles.get(0); RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); FileChannel channel = file.getChannel(); long position = channel.size() - 2; int length = 2; byte[] buffer = new byte[length]; random.nextBytes(buffer); channel.write(ByteBuffer.wrap(buffer), position); file.close(); LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset " + position + " length " + length); // read all files to trigger detection of corrupted replica try { util.checkFiles(fs, "/srcdat10"); } catch (BlockMissingException e) { System.out.println("Received BlockMissingException as expected."); } catch (IOException e) { assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " + " but received IOException " + e, false); } // fetch bad file list from namenode. There should be one file. badFiles = namenode.getNamesystem().listCorruptFileBlocks("/", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1); util.cleanup(fs, "/srcdat10"); } finally { if (cluster != null) { cluster.shutdown(); } } }
From source file:gr.abiss.calipso.CalipsoServiceImpl.java
/** * this has not been factored into the util package or a helper class * because it depends on the PasswordEncoder configured *//*from ww w . j a va 2 s.c om*/ @Override public String generatePassword() { byte[] ab = new byte[1]; Random r = new Random(); r.nextBytes(ab); return passwordEncoder.encodePassword(new String(ab), null).substring(24); }
From source file:org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.java
/** * Check that listCorruptFileBlocks works while the namenode is still in safemode. *///from w ww .jav a 2 s.co m @Test(timeout = 300000) public void testListCorruptFileBlocksInSafeMode() throws Exception { MiniDFSCluster cluster = null; Random random = new Random(); try { Configuration conf = new HdfsConfiguration(); // datanode scans directories conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode sends block reports conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // never leave safemode automatically conf.setFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1.5f); // start populating repl queues immediately conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY, 0f); // Set short retry timeouts so this test runs faster conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build(); cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); FileSystem fs = cluster.getFileSystem(); // create two files with one block each DFSTestUtil util = new DFSTestUtil.Builder().setName("testListCorruptFileBlocksInSafeMode") .setNumFiles(2).setMaxLevels(1).setMaxSize(512).build(); util.createFiles(fs, "/srcdat10"); // fetch bad file list from namenode. There should be none. Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = cluster.getNameNode().getNamesystem() .listCorruptFileBlocks("/", null); assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.", badFiles.size() == 0); // Now deliberately corrupt one block File storageDir = cluster.getInstanceStorageDir(0, 0); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, cluster.getNamesystem().getBlockPoolId()); assertTrue("data directory does not exist", data_dir.exists()); List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir); assertTrue("Data directory does not contain any blocks or there was an " + "IO error", metaFiles != null && !metaFiles.isEmpty()); File metaFile = metaFiles.get(0); RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); FileChannel channel = file.getChannel(); long position = channel.size() - 2; int length = 2; byte[] buffer = new byte[length]; random.nextBytes(buffer); channel.write(ByteBuffer.wrap(buffer), position); file.close(); LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset " + position + " length " + length); // read all files to trigger detection of corrupted replica try { util.checkFiles(fs, "/srcdat10"); } catch (BlockMissingException e) { System.out.println("Received BlockMissingException as expected."); } catch (IOException e) { assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException " + " but received IOException " + e, false); } // fetch bad file list from namenode. There should be one file. badFiles = cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1); // restart namenode cluster.restartNameNode(0); fs = cluster.getFileSystem(); // wait until replication queues have been initialized while (!cluster.getNameNode().namesystem.isPopulatingReplQueues()) { try { LOG.info("waiting for replication queues"); Thread.sleep(1000); } catch (InterruptedException ignore) { } } // read all files to trigger detection of corrupted replica try { util.checkFiles(fs, "/srcdat10"); } catch (BlockMissingException e) { System.out.println("Received BlockMissingException as expected."); } catch (IOException e) { assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException " + " but received IOException " + e, false); } // fetch bad file list from namenode. There should be one file. badFiles = cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1); // check that we are still in safe mode assertTrue("Namenode is not in safe mode", cluster.getNameNode().isInSafeMode()); // now leave safe mode so that we can clean up cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); util.cleanup(fs, "/srcdat10"); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw e; } finally { if (cluster != null) { cluster.shutdown(); } } }
From source file:org.apache.hadoop.hdfs.TestCrcCorruption.java
/** * check if DFS can handle corrupted CRC blocks *//*from w w w. j a v a 2 s .c om*/ private void thistest(Configuration conf, DFSTestUtil util) throws Exception { MiniDFSCluster cluster = null; int numDataNodes = 2; short replFactor = 2; Random random = new Random(); try { cluster = new MiniDFSCluster.Builder(conf).numNameNodes(1).numDataNodes(numDataNodes).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat", replFactor); util.waitReplication(fs, "/srcdat", (short) 2); // Now deliberately remove/truncate meta blocks from the first // directory of the first datanode. The complete absense of a meta // file disallows this Datanode to send data to another datanode. // However, a client is alowed access to this block. // File storageDir = MiniDFSCluster.getStorageDir(0, 1); String bpid = cluster.getNamesystem().getBlockPoolId(); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); assertTrue("data directory does not exist", data_dir.exists()); File[] blocks = data_dir.listFiles(); assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0)); int num = 0; for (int idx = 0; idx < blocks.length; idx++) { if (blocks[idx].getName().startsWith("blk_") && blocks[idx].getName().endsWith(".meta")) { num++; if (num % 3 == 0) { // // remove .meta file // LOG.info("Deliberately removing file " + blocks[idx].getName()); assertTrue("Cannot remove file.", blocks[idx].delete()); } else if (num % 3 == 1) { // // shorten .meta file // RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw"); FileChannel channel = file.getChannel(); int newsize = random.nextInt((int) channel.size() / 2); LOG.info("Deliberately truncating file " + blocks[idx].getName() + " to size " + newsize + " bytes."); channel.truncate(newsize); file.close(); } else { // // corrupt a few bytes of the metafile // RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw"); FileChannel channel = file.getChannel(); long position = 0; // // The very first time, corrupt the meta header at offset 0 // if (num != 2) { position = (long) random.nextInt((int) channel.size()); } int length = random.nextInt((int) (channel.size() - position + 1)); byte[] buffer = new byte[length]; random.nextBytes(buffer); channel.write(ByteBuffer.wrap(buffer), position); LOG.info("Deliberately corrupting file " + blocks[idx].getName() + " at offset " + position + " length " + length); file.close(); } } } // // Now deliberately corrupt all meta blocks from the second // directory of the first datanode // storageDir = MiniDFSCluster.getStorageDir(0, 1); data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); assertTrue("data directory does not exist", data_dir.exists()); blocks = data_dir.listFiles(); assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0)); int count = 0; File previous = null; for (int idx = 0; idx < blocks.length; idx++) { if (blocks[idx].getName().startsWith("blk_") && blocks[idx].getName().endsWith(".meta")) { // // Move the previous metafile into the current one. // count++; if (count % 2 == 0) { LOG.info("Deliberately insertimg bad crc into files " + blocks[idx].getName() + " " + previous.getName()); assertTrue("Cannot remove file.", blocks[idx].delete()); assertTrue("Cannot corrupt meta file.", previous.renameTo(blocks[idx])); assertTrue("Cannot recreate empty meta file.", previous.createNewFile()); previous = null; } else { previous = blocks[idx]; } } } // // Only one replica is possibly corrupted. The other replica should still // be good. Verify. // assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat")); LOG.info("All File still have a valid replica"); // // set replication factor back to 1. This causes only one replica of // of each block to remain in HDFS. The check is to make sure that // the corrupted replica generated above is the one that gets deleted. // This test is currently disabled until HADOOP-1557 is solved. // util.setReplication(fs, "/srcdat", (short) 1); //util.waitReplication(fs, "/srcdat", (short)1); //LOG.info("All Files done with removing replicas"); //assertTrue("Excess replicas deleted. Corrupted replicas found.", // util.checkFiles(fs, "/srcdat")); LOG.info("The excess-corrupted-replica test is disabled " + " pending HADOOP-1557"); util.cleanup(fs, "/srcdat"); } finally { if (cluster != null) { cluster.shutdown(); } } }