List of usage examples for java.util SortedMap keySet
Set<K> keySet();
From source file:com.google.gwt.emultest.java.util.TreeMapTest.java
/** * Test method for 'java.util.SortedMap.firstKey()'. * * @see java.util.SortedMap#firstKey()/*from w ww .ja v a 2 s .com*/ */ public void testFirstKey() { K[] keys = getSortedKeys(); V[] values = getSortedValues(); SortedMap<K, V> map = createNavigableMap(); // test with a single entry map map.put(keys[0], values[0]); assertEquals(keys[0], map.firstKey()); // is it consistent with other methods assertEquals(map.keySet().toArray()[0], map.firstKey()); assertEquals(keys[0], map.lastKey()); assertEquals(map.lastKey(), map.firstKey()); // test with two entry map map.put(keys[1], values[1]); assertEquals(keys[0], map.firstKey()); assertFalse(keys[1].equals(map.firstKey())); // is it consistent with other methods assertEquals(map.keySet().toArray()[0], map.firstKey()); assertFalse(keys[0].equals(map.lastKey())); assertFalse(map.lastKey().equals(map.firstKey())); map.put(keys[2], values[2]); map.put(keys[3], values[3]); assertEquals(keys[0], map.firstKey()); }
From source file:com.google.gwt.emultest.java.util.TreeMapTest.java
/** * Test method for 'java.util.SortedMap.lastKey()'. * * @see java.util.SortedMap#lastKey()/*from w ww. j a v a 2s.com*/ */ public void testLastKey() { K[] keys = getSortedKeys(); V[] values = getSortedValues(); SortedMap<K, V> map = createNavigableMap(); // test with a single entry map map.put(keys[0], values[0]); assertEquals(keys[0], map.lastKey()); // is it consistent with other methods assertEquals(map.keySet().toArray()[0], map.lastKey()); assertEquals(keys[0], map.firstKey()); assertEquals(map.firstKey(), map.lastKey()); // test with two entry map map.put(keys[1], values[1]); assertEquals(keys[1], map.lastKey()); assertFalse(keys[0].equals(map.lastKey())); // is it consistent with other methods assertEquals(map.keySet().toArray()[1], map.lastKey()); assertEquals(keys[0], map.firstKey()); assertFalse(map.firstKey().equals(map.lastKey())); map.put(keys[2], values[2]); map.put(keys[3], values[3]); assertEquals(keys[0], map.headMap(keys[1]).lastKey()); assertEquals(keys[keys.length - 1], map.tailMap(keys[2]).lastKey()); assertEquals(keys[2], map.subMap(keys[1], keys[3]).lastKey()); }
From source file:com.google.gwt.emultest.java.util.TreeMapTest.java
/** * Test method for 'java.util.Map.put(Object, Object)'. This test shows some * bad behavior of the TreeMap class before JDK 7. A mapping with null key can * be put in but several methods are are unusable afterward. * * A SortedMap with natural ordering (no comparator) is supposed to throw a * null pointer exception if a null keys are "not supported". For a natural * ordered TreeMap before JDK 7, a null pointer exception is not thrown. But, * the map is left in a state where any other key based methods result in a * null pointer exception.//from w ww.j a v a 2 s .c o m * * @see java.util.Map#put(Object, Object) */ public void testPut_nullKey() { K[] keys = getSortedKeys(); V[] values = getSortedValues(); SortedMap<K, V> sortedMap = createNavigableMap(); if (useNullKey()) { assertNull(sortedMap.put(null, values[0])); assertTrue(sortedMap.containsValue(values[0])); // the map methods the continue to function sortedMap.containsValue(null); sortedMap.containsValue(values[0]); sortedMap.entrySet(); sortedMap.equals(createMap()); sortedMap.hashCode(); sortedMap.isEmpty(); sortedMap.keySet(); sortedMap.putAll(createMap()); sortedMap.size(); sortedMap.values(); // all of the sorted map methods still function sortedMap.comparator(); sortedMap.firstKey(); sortedMap.lastKey(); sortedMap.subMap(getLessThanMinimumKey(), getGreaterThanMaximumKey()); sortedMap.headMap(getLessThanMinimumKey()); sortedMap.tailMap(getLessThanMinimumKey()); } else if (TestUtils.getJdkVersion() > 6) { // nulls are rejected immediately and don't poison the map anymore try { assertNull(sortedMap.put(null, values[0])); fail("should have thrown"); } catch (NullPointerException e) { // expected outcome } try { assertNull(sortedMap.put(null, values[1])); fail("expected exception adding second null"); } catch (NullPointerException e) { // expected outcome } try { sortedMap.containsKey(null); fail("expected exception on containsKey(null)"); } catch (NullPointerException e) { // expected outcome } sortedMap.containsKey(keys[0]); try { sortedMap.get(null); fail("expected exception on get(null)"); } catch (NullPointerException e) { // expected outcome } sortedMap.get(keys[0]); try { sortedMap.remove(null); } catch (NullPointerException e) { // expected } sortedMap.remove(keys[0]); } else { // before JDK 7, nulls poisoned the map try { assertNull(sortedMap.put(null, values[0])); // note: first null added is not required to throw NPE since no // comparisons are needed } catch (NullPointerException e) { // expected outcome } try { assertNull(sortedMap.put(null, values[1])); fail("expected exception adding second null"); } catch (NullPointerException e) { // expected outcome } try { sortedMap.containsKey(null); fail("expected exception on containsKey(null)"); } catch (NullPointerException e) { // expected outcome } try { sortedMap.containsKey(keys[0]); fail("expected exception on contains(key)"); } catch (NullPointerException e) { // expected outcome } try { sortedMap.get(null); fail("expected exception on get(null)"); } catch (NullPointerException e) { // expected outcome } try { sortedMap.get(keys[0]); fail("expected exception on get(key)"); } catch (NullPointerException e) { // expected outcome } try { sortedMap.remove(null); fail("expected exception on remove(null)"); } catch (NullPointerException e) { // expected outcome } try { sortedMap.remove(keys[0]); fail("expected exception on remove(key)"); } catch (NullPointerException e) { // expected outcome } } }
From source file:fr.aliacom.obm.common.calendar.CalendarBindingImplTest.java
@Test public void testBuildTreeMap() { DateTime eventDate = DateTime.now(); List<Attendee> attendees = createOrganiserAndContactAttendees(Participation.accepted()); Event firstException = createEventException(attendees, eventDate.plusDays(1).toDate()); Event secondException = createEventException(attendees, eventDate.plusDays(2).toDate()); Event thirdException = createEventException(attendees, eventDate.plusDays(3).toDate()); CalendarBindingImpl calendarService = new CalendarBindingImpl(null, null, null, null, null, null, null, null, null, attendeeService); SortedMap<Event, Event> treeMap = calendarService .buildSortedMap(ImmutableSet.of(firstException, secondException, thirdException)); assertThat(treeMap.keySet()).containsExactly(firstException, secondException, thirdException); }
From source file:accumulo.balancer.GroupBalancer.java
@Override public long balance(SortedMap<TServerInstance, TabletServerStatus> current, Set<KeyExtent> migrations, List<TabletMigration> migrationsOut) { // The terminology extra and expected are used in this code. Expected tablets is the number of tablets a tserver must have for a given group and is // numInGroup/numTservers. Extra tablets are any tablets more than the number expected for a given group. If numInGroup % numTservers > 0, then a tserver // may have one extra tablet for a group. ///*w w w . ja v a2 s .c om*/ // Assume we have 4 tservers and group A has 11 tablets. // * expected tablets : group A is expected to have 2 tablets on each tservers // * extra tablets : group A may have an additional tablet on each tserver. Group A has a total of 3 extra tablets. // // This balancer also evens out the extra tablets across all groups. The terminology extraExpected and extraExtra is used to describe these tablets. // ExtraExpected is totalExtra/numTservers. ExtraExtra is totalExtra%numTservers. Each tserver should have at least expectedExtra extra tablets and at most // one extraExtra tablets. All extra tablets on a tserver must be from different groups. // // Assume we have 6 tservers and three groups (G1, G2, G3) with 9 tablets each. Each tserver is expected to have one tablet from each group and could // possibly have 2 tablets from a group. Below is an illustration of an ideal balancing of extra tablets. To understand the illustration, the first column // shows tserver T1 with 2 tablets from G1, 1 tablet from G2, and two tablets from G3. EE means empty, put it there so eclipse formating would not mess up // table. // // T1 | T2 | T3 | T4 | T5 | T6 // ---+----+----+----+----+----- // G3 | G2 | G3 | EE | EE | EE <-- extra extra tablets // G1 | G1 | G1 | G2 | G3 | G2 <-- extra expected tablets. // G1 | G1 | G1 | G1 | G1 | G1 <-- expected tablets for group 1 // G2 | G2 | G2 | G2 | G2 | G2 <-- expected tablets for group 2 // G3 | G3 | G3 | G3 | G3 | G3 <-- expected tablets for group 3 // // Do not want to balance the extra tablets like the following. There are two problem with this. First extra tablets are not evenly spread. Since there are // a total of 9 extra tablets, every tserver is expected to have at least one extra tablet. Second tserver T1 has two extra tablet for group G1. This // violates the principal that a tserver can only have one extra tablet for a given group. // // T1 | T2 | T3 | T4 | T5 | T6 // ---+----+----+----+----+----- // G1 | EE | EE | EE | EE | EE <--- one extra tablets from group 1 // G3 | G3 | G3 | EE | EE | EE <--- three extra tablets from group 3 // G2 | G2 | G2 | EE | EE | EE <--- three extra tablets from group 2 // G1 | G1 | EE | EE | EE | EE <--- two extra tablets from group 1 // G1 | G1 | G1 | G1 | G1 | G1 <-- expected tablets for group 1 // G2 | G2 | G2 | G2 | G2 | G2 <-- expected tablets for group 2 // G3 | G3 | G3 | G3 | G3 | G3 <-- expected tablets for group 3 if (!shouldBalance(current, migrations)) { return 5000; } if (System.currentTimeMillis() - lastRun < getWaitTime()) { return 5000; } MapCounter<String> groupCounts = new MapCounter<>(); Map<TServerInstance, TserverGroupInfo> tservers = new HashMap<>(); for (TServerInstance tsi : current.keySet()) { tservers.put(tsi, new TserverGroupInfo(tsi)); } Function<KeyExtent, String> partitioner = getPartitioner(); // collect stats about current state for (Pair<KeyExtent, Location> entry : getLocationProvider()) { String group = partitioner.apply(entry.getFirst()); Location loc = entry.getSecond(); if (loc.equals(Location.NONE) || !tservers.containsKey(loc.getTserverInstance())) { return 5000; } groupCounts.increment(group, 1); TserverGroupInfo tgi = tservers.get(loc.getTserverInstance()); tgi.addGroup(group); } Map<String, Integer> expectedCounts = new HashMap<>(); int totalExtra = 0; for (String group : groupCounts.keySet()) { long groupCount = groupCounts.get(group); totalExtra += groupCount % current.size(); expectedCounts.put(group, (int) (groupCount / current.size())); } // The number of extra tablets from all groups that each tserver must have. int expectedExtra = totalExtra / current.size(); int maxExtraGroups = expectedExtra + 1; expectedCounts = Collections.unmodifiableMap(expectedCounts); tservers = Collections.unmodifiableMap(tservers); for (TserverGroupInfo tgi : tservers.values()) { tgi.finishedAdding(expectedCounts); } Moves moves = new Moves(); // The order of the following steps is important, because as ordered each step should not move any tablets moved by a previous step. balanceExpected(tservers, moves); if (moves.size() < getMaxMigrations()) { balanceExtraExpected(tservers, expectedExtra, moves); if (moves.size() < getMaxMigrations()) { boolean cont = balanceExtraMultiple(tservers, maxExtraGroups, moves); if (cont && moves.size() < getMaxMigrations()) { balanceExtraExtra(tservers, maxExtraGroups, moves); } } } populateMigrations(tservers.keySet(), migrationsOut, moves); lastRun = System.currentTimeMillis(); return 5000; }
From source file:org.apache.accumulo.tserver.Tablet.java
private CompactionStats _majorCompact(MajorCompactionReason reason) throws IOException, CompactionCanceledException { long t1, t2, t3; // acquire file info outside of tablet lock CompactionStrategy strategy = Property.createInstanceFromPropertyName(acuTableConf, Property.TABLE_COMPACTION_STRATEGY, CompactionStrategy.class, new DefaultCompactionStrategy()); strategy.init(Property.getCompactionStrategyOptions(acuTableConf)); Map<FileRef, Pair<Key, Key>> firstAndLastKeys = null; if (reason == MajorCompactionReason.CHOP) { firstAndLastKeys = getFirstAndLastKeys(datafileManager.getDatafileSizes()); } else if (reason != MajorCompactionReason.USER) { MajorCompactionRequest request = new MajorCompactionRequest(extent, reason, fs, acuTableConf); request.setFiles(datafileManager.getDatafileSizes()); strategy.gatherInformation(request); }/*from ww w . j a va2 s . c o m*/ Map<FileRef, DataFileValue> filesToCompact; int maxFilesToCompact = acuTableConf.getCount(Property.TSERV_MAJC_THREAD_MAXOPEN); CompactionStats majCStats = new CompactionStats(); CompactionPlan plan = null; boolean propogateDeletes = false; synchronized (this) { // plan all that work that needs to be done in the sync block... then do the actual work // outside the sync block t1 = System.currentTimeMillis(); majorCompactionWaitingToStart = true; tabletMemory.waitForMinC(); t2 = System.currentTimeMillis(); majorCompactionWaitingToStart = false; notifyAll(); if (extent.isRootTablet()) { // very important that we call this before doing major compaction, // otherwise deleted compacted files could possible be brought back // at some point if the file they were compacted to was legitimately // removed by a major compaction cleanUpFiles(fs, fs.listStatus(this.location), false); } SortedMap<FileRef, DataFileValue> allFiles = datafileManager.getDatafileSizes(); List<FileRef> inputFiles = new ArrayList<FileRef>(); if (reason == MajorCompactionReason.CHOP) { // enforce rules: files with keys outside our range need to be compacted inputFiles.addAll(findChopFiles(extent, firstAndLastKeys, allFiles.keySet())); } else if (reason == MajorCompactionReason.USER) { inputFiles.addAll(allFiles.keySet()); } else { MajorCompactionRequest request = new MajorCompactionRequest(extent, reason, fs, acuTableConf); request.setFiles(allFiles); plan = strategy.getCompactionPlan(request); if (plan != null) inputFiles.addAll(plan.inputFiles); } if (inputFiles.isEmpty()) { return majCStats; } // If no original files will exist at the end of the compaction, we do not have to propogate deletes Set<FileRef> droppedFiles = new HashSet<FileRef>(); droppedFiles.addAll(inputFiles); if (plan != null) droppedFiles.addAll(plan.deleteFiles); propogateDeletes = !(droppedFiles.equals(allFiles.keySet())); log.debug("Major compaction plan: " + plan + " propogate deletes : " + propogateDeletes); filesToCompact = new HashMap<FileRef, DataFileValue>(allFiles); filesToCompact.keySet().retainAll(inputFiles); t3 = System.currentTimeMillis(); datafileManager.reserveMajorCompactingFiles(filesToCompact.keySet()); } try { log.debug(String.format("MajC initiate lock %.2f secs, wait %.2f secs", (t3 - t2) / 1000.0, (t2 - t1) / 1000.0)); Pair<Long, List<IteratorSetting>> compactionId = null; if (!propogateDeletes) { // compacting everything, so update the compaction id in metadata try { compactionId = getCompactionID(); } catch (NoNodeException e) { throw new RuntimeException(e); } } List<IteratorSetting> compactionIterators = new ArrayList<IteratorSetting>(); if (compactionId != null) { if (reason == MajorCompactionReason.USER) { if (getCompactionCancelID() >= compactionId.getFirst()) { // compaction was canceled return majCStats; } synchronized (this) { if (lastCompactID >= compactionId.getFirst()) // already compacted return majCStats; } } compactionIterators = compactionId.getSecond(); } // need to handle case where only one file is being major compacted while (filesToCompact.size() > 0) { int numToCompact = maxFilesToCompact; if (filesToCompact.size() > maxFilesToCompact && filesToCompact.size() < 2 * maxFilesToCompact) { // on the second to last compaction pass, compact the minimum amount of files possible numToCompact = filesToCompact.size() - maxFilesToCompact + 1; } Set<FileRef> smallestFiles = removeSmallest(filesToCompact, numToCompact); FileRef fileName = getNextMapFilename( (filesToCompact.size() == 0 && !propogateDeletes) ? "A" : "C"); FileRef compactTmpName = new FileRef(fileName.path().toString() + "_tmp"); AccumuloConfiguration tableConf = createTableConfiguration(acuTableConf, plan); Span span = Trace.start("compactFiles"); try { CompactionEnv cenv = new CompactionEnv() { @Override public boolean isCompactionEnabled() { return Tablet.this.isCompactionEnabled(); } @Override public IteratorScope getIteratorScope() { return IteratorScope.majc; } }; HashMap<FileRef, DataFileValue> copy = new HashMap<FileRef, DataFileValue>( datafileManager.getDatafileSizes()); if (!copy.keySet().containsAll(smallestFiles)) throw new IllegalStateException("Cannot find data file values for " + smallestFiles); copy.keySet().retainAll(smallestFiles); log.debug("Starting MajC " + extent + " (" + reason + ") " + copy.keySet() + " --> " + compactTmpName + " " + compactionIterators); // always propagate deletes, unless last batch boolean lastBatch = filesToCompact.isEmpty(); Compactor compactor = new Compactor(conf, fs, copy, null, compactTmpName, lastBatch ? propogateDeletes : true, tableConf, extent, cenv, compactionIterators, reason); CompactionStats mcs = compactor.call(); span.data("files", "" + smallestFiles.size()); span.data("read", "" + mcs.getEntriesRead()); span.data("written", "" + mcs.getEntriesWritten()); majCStats.add(mcs); if (lastBatch && plan != null && plan.deleteFiles != null) { smallestFiles.addAll(plan.deleteFiles); } datafileManager.bringMajorCompactionOnline(smallestFiles, compactTmpName, fileName, filesToCompact.size() == 0 && compactionId != null ? compactionId.getFirst() : null, new DataFileValue(mcs.getFileSize(), mcs.getEntriesWritten())); // when major compaction produces a file w/ zero entries, it will be deleted... do not want // to add the deleted file if (filesToCompact.size() > 0 && mcs.getEntriesWritten() > 0) { filesToCompact.put(fileName, new DataFileValue(mcs.getFileSize(), mcs.getEntriesWritten())); } } finally { span.stop(); } } return majCStats; } finally { synchronized (Tablet.this) { datafileManager.clearMajorCompactingFile(); } } }
From source file:freemarker.ext.dump.DumpDirectiveTest.java
@Test public void dumpStringToObjectMap() { String varName = "stuff"; Map<String, Object> dataModel = new HashMap<String, Object>(); Map<String, Object> mixedMap = new HashMap<String, Object>(); String myString = "apples"; mixedMap.put("myString", myString); boolean myBool = true; mixedMap.put("myBoolean", myBool); int myInt = 4; mixedMap.put("myNumber", myInt); Date myDate = new Date(); mixedMap.put("myDate", myDate); List<String> myList = new ArrayList<String>(); myList.add("apples"); myList.add("bananas"); myList.add("oranges"); mixedMap.put("myList", myList); Map<String, String> myMap = new HashMap<String, String>(); myMap.put("Great Expectations", "Charles Dickens"); myMap.put("Pride and Prejudice", "Jane Austen"); myMap.put("Middlemarch", "George Eliot"); myMap.put("Jude the Obscure", "Thomas Hardy"); mixedMap.put("myMap", myMap); dataModel.put(varName, mixedMap);// w w w . ja va 2s . com Map<String, Object> expectedDumpValue = new HashMap<String, Object>(); expectedDumpValue.put(Key.TYPE.toString(), Type.HASH_EX); SortedMap<String, Object> mixedMapExpectedDump = new TreeMap<String, Object>(); Map<String, Object> myStringExpectedDump = new HashMap<String, Object>(); myStringExpectedDump.put(Key.TYPE.toString(), Type.STRING); myStringExpectedDump.put(Key.VALUE.toString(), myString); mixedMapExpectedDump.put("myString", myStringExpectedDump); Map<String, Object> myBooleanExpectedDump = new HashMap<String, Object>(); myBooleanExpectedDump.put(Key.TYPE.toString(), Type.BOOLEAN); myBooleanExpectedDump.put(Key.VALUE.toString(), myBool); mixedMapExpectedDump.put("myBoolean", myBooleanExpectedDump); Map<String, Object> myIntExpectedDump = new HashMap<String, Object>(); myIntExpectedDump.put(Key.TYPE.toString(), Type.NUMBER); myIntExpectedDump.put(Key.VALUE.toString(), myInt); mixedMapExpectedDump.put("myNumber", myIntExpectedDump); Map<String, Object> myDateExpectedDump = new HashMap<String, Object>(); myDateExpectedDump.put(Key.TYPE.toString(), Type.DATE); myDateExpectedDump.put(Key.DATE_TYPE.toString(), DateType.UNKNOWN); myDateExpectedDump.put(Key.VALUE.toString(), myDate); mixedMapExpectedDump.put("myDate", myDateExpectedDump); Map<String, Object> myListExpectedDump = new HashMap<String, Object>(); myListExpectedDump.put(Key.TYPE.toString(), Type.SEQUENCE); List<Map<String, Object>> myListItemsExpectedDump = new ArrayList<Map<String, Object>>(myList.size()); for (String item : myList) { Map<String, Object> itemDump = new HashMap<String, Object>(); itemDump.put(Key.TYPE.toString(), Type.STRING); itemDump.put(Key.VALUE.toString(), item); myListItemsExpectedDump.add(itemDump); } myListExpectedDump.put(Key.VALUE.toString(), myListItemsExpectedDump); mixedMapExpectedDump.put("myList", myListExpectedDump); Map<String, Object> myMapExpectedDump = new HashMap<String, Object>(); myMapExpectedDump.put(Key.TYPE.toString(), Type.HASH_EX); SortedMap<String, Object> myMapItemsExpectedDump = new TreeMap<String, Object>(); for (String key : myMap.keySet()) { Map<String, Object> itemDump = new HashMap<String, Object>(); itemDump.put(Key.TYPE.toString(), Type.STRING); itemDump.put(Key.VALUE.toString(), myMap.get(key)); myMapItemsExpectedDump.put(key, itemDump); } myMapExpectedDump.put(Key.VALUE.toString(), myMapItemsExpectedDump); mixedMapExpectedDump.put("myMap", myMapExpectedDump); expectedDumpValue.put(Key.VALUE.toString(), mixedMapExpectedDump); Map<String, Object> expectedDump = new HashMap<String, Object>(); expectedDump.put(varName, expectedDumpValue); Map<String, Object> dump = getDump(varName, dataModel); assertEquals(expectedDump, dump); // Test the sorting of the outer map List<String> expectedDumpValueKeys = new ArrayList<String>(mixedMapExpectedDump.keySet()); @SuppressWarnings("unchecked") Map<String, Object> actualDumpValue = (Map<String, Object>) dump.get(varName); @SuppressWarnings("unchecked") SortedMap<String, Object> mixedMapActualDump = (SortedMap<String, Object>) actualDumpValue .get(Key.VALUE.toString()); List<String> actualDumpValueKeys = new ArrayList<String>(mixedMapActualDump.keySet()); assertEquals(expectedDumpValueKeys, actualDumpValueKeys); // Test the sorting of the inner map List<String> myMapItemsExpectedDumpKeys = new ArrayList<String>(myMapItemsExpectedDump.keySet()); @SuppressWarnings("unchecked") Map<String, Object> myMapActualDump = (Map<String, Object>) mixedMapActualDump.get("myMap"); @SuppressWarnings("unchecked") SortedMap<String, Object> myMapItemsActualDump = (SortedMap<String, Object>) myMapActualDump .get(Key.VALUE.toString()); List<String> myMapItemsActualDumpKeys = new ArrayList<String>(myMapItemsActualDump.keySet()); assertEquals(myMapItemsExpectedDumpKeys, myMapItemsActualDumpKeys); }
From source file:org.apache.accumulo.server.tabletserver.Tablet.java
public TreeMap<KeyExtent, SplitInfo> split(byte[] sp) throws IOException { if (sp != null && extent.getEndRow() != null && extent.getEndRow().equals(new Text(sp))) { throw new IllegalArgumentException(); }/*from w w w. j av a 2 s . c om*/ if (extent.isRootTablet()) { String msg = "Cannot split root tablet"; log.warn(msg); throw new RuntimeException(msg); } try { initiateClose(true, false, false); } catch (IllegalStateException ise) { log.debug("File " + extent + " not splitting : " + ise.getMessage()); return null; } // obtain this info outside of synch block since it will involve opening // the map files... it is ok if the set of map files changes, because // this info is used for optimization... it is ok if map files are missing // from the set... can still query and insert into the tablet while this // map file operation is happening Map<FileRef, FileUtil.FileInfo> firstAndLastRows = FileUtil.tryToGetFirstAndLastRows(fs, tabletServer.getSystemConfiguration(), datafileManager.getFiles()); synchronized (this) { // java needs tuples ... TreeMap<KeyExtent, SplitInfo> newTablets = new TreeMap<KeyExtent, SplitInfo>(); long t1 = System.currentTimeMillis(); // choose a split point SplitRowSpec splitPoint; if (sp == null) splitPoint = findSplitRow(datafileManager.getFiles()); else { Text tsp = new Text(sp); splitPoint = new SplitRowSpec( FileUtil.estimatePercentageLTE(fs, tabletServer.getSystemConfiguration(), extent.getPrevEndRow(), extent.getEndRow(), datafileManager.getFiles(), tsp), tsp); } if (splitPoint == null || splitPoint.row == null) { log.info("had to abort split because splitRow was null"); closing = false; return null; } closed = true; completeClose(true, false); Text midRow = splitPoint.row; double splitRatio = splitPoint.splitRatio; KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow()); KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow); String lowDirectory = TabletOperations.createTabletDirectory(fs, extent.getTableId().toString(), midRow); // write new tablet information to MetadataTable SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<FileRef, DataFileValue>(); SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<FileRef, DataFileValue>(); List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>(); MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, firstAndLastRows, datafileManager.getDatafileSizes(), lowDatafileSizes, highDatafileSizes, highDatafilesToRemove); log.debug("Files for low split " + low + " " + lowDatafileSizes.keySet()); log.debug("Files for high split " + high + " " + highDatafileSizes.keySet()); String time = tabletTime.getMetadataValue(); // it is possible that some of the bulk loading flags will be deleted after being read below because the bulk load // finishes.... therefore split could propogate load flags for a finished bulk load... there is a special iterator // on the !METADATA table to clean up this type of garbage Map<FileRef, Long> bulkLoadedFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(), extent); MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SystemCredentials.get(), tabletServer.getLock()); MetadataTableUtil.addNewTablet(low, lowDirectory, tabletServer.getTabletSession(), lowDatafileSizes, bulkLoadedFiles, SystemCredentials.get(), time, lastFlushID, lastCompactID, tabletServer.getLock()); MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SystemCredentials.get(), tabletServer.getLock()); log.log(TLevel.TABLET_HIST, extent + " split " + low + " " + high); newTablets.put(high, new SplitInfo(tabletDirectory, highDatafileSizes, time, lastFlushID, lastCompactID)); newTablets.put(low, new SplitInfo(lowDirectory, lowDatafileSizes, time, lastFlushID, lastCompactID)); long t2 = System.currentTimeMillis(); log.debug(String.format("offline split time : %6.2f secs", (t2 - t1) / 1000.0)); closeComplete = true; return newTablets; } }
From source file:org.apache.accumulo.tserver.Tablet.java
public TreeMap<KeyExtent, SplitInfo> split(byte[] sp) throws IOException { if (sp != null && extent.getEndRow() != null && extent.getEndRow().equals(new Text(sp))) { throw new IllegalArgumentException(); }/*from w w w. j a va 2s. com*/ if (extent.isRootTablet()) { String msg = "Cannot split root tablet"; log.warn(msg); throw new RuntimeException(msg); } try { initiateClose(true, false, false); } catch (IllegalStateException ise) { log.debug("File " + extent + " not splitting : " + ise.getMessage()); return null; } // obtain this info outside of synch block since it will involve opening // the map files... it is ok if the set of map files changes, because // this info is used for optimization... it is ok if map files are missing // from the set... can still query and insert into the tablet while this // map file operation is happening Map<FileRef, FileUtil.FileInfo> firstAndLastRows = FileUtil.tryToGetFirstAndLastRows(fs, tabletServer.getSystemConfiguration(), datafileManager.getFiles()); synchronized (this) { // java needs tuples ... TreeMap<KeyExtent, SplitInfo> newTablets = new TreeMap<KeyExtent, SplitInfo>(); long t1 = System.currentTimeMillis(); // choose a split point SplitRowSpec splitPoint; if (sp == null) splitPoint = findSplitRow(datafileManager.getFiles()); else { Text tsp = new Text(sp); splitPoint = new SplitRowSpec(FileUtil.estimatePercentageLTE(fs, tabletServer.getSystemConfiguration(), extent.getPrevEndRow(), extent.getEndRow(), FileUtil.toPathStrings(datafileManager.getFiles()), tsp), tsp); } if (splitPoint == null || splitPoint.row == null) { log.info("had to abort split because splitRow was null"); closing = false; return null; } closed = true; completeClose(true, false); Text midRow = splitPoint.row; double splitRatio = splitPoint.splitRatio; KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow()); KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow); String lowDirectory = TabletOperations.createTabletDirectory(fs, extent.getTableId().toString(), midRow); // write new tablet information to MetadataTable SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<FileRef, DataFileValue>(); SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<FileRef, DataFileValue>(); List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>(); MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, firstAndLastRows, datafileManager.getDatafileSizes(), lowDatafileSizes, highDatafileSizes, highDatafilesToRemove); log.debug("Files for low split " + low + " " + lowDatafileSizes.keySet()); log.debug("Files for high split " + high + " " + highDatafileSizes.keySet()); String time = tabletTime.getMetadataValue(); // it is possible that some of the bulk loading flags will be deleted after being read below because the bulk load // finishes.... therefore split could propagate load flags for a finished bulk load... there is a special iterator // on the metadata table to clean up this type of garbage Map<FileRef, Long> bulkLoadedFiles = MetadataTableUtil.getBulkFilesLoaded(SystemCredentials.get(), extent); MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SystemCredentials.get(), tabletServer.getLock()); MasterMetadataUtil.addNewTablet(low, lowDirectory, tabletServer.getTabletSession(), lowDatafileSizes, bulkLoadedFiles, SystemCredentials.get(), time, lastFlushID, lastCompactID, tabletServer.getLock()); MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SystemCredentials.get(), tabletServer.getLock()); log.log(TLevel.TABLET_HIST, extent + " split " + low + " " + high); newTablets.put(high, new SplitInfo(tabletDirectory, highDatafileSizes, time, lastFlushID, lastCompactID, lastLocation)); newTablets.put(low, new SplitInfo(lowDirectory, lowDatafileSizes, time, lastFlushID, lastCompactID, lastLocation)); long t2 = System.currentTimeMillis(); log.debug(String.format("offline split time : %6.2f secs", (t2 - t1) / 1000.0)); closeComplete = true; return newTablets; } }
From source file:org.apache.accumulo.server.tabletserver.Tablet.java
/** * yet another constructor - this one allows us to avoid costly lookups into the Metadata table if we already know the files we need - as at split time *//* w w w . j av a2s. c om*/ private Tablet(final TabletServer tabletServer, final Text location, final KeyExtent extent, final TabletResourceManager trm, final Configuration conf, final VolumeManager fs, final List<LogEntry> logEntries, final SortedMap<FileRef, DataFileValue> datafiles, String time, final TServerInstance lastLocation, Set<FileRef> scanFiles, long initFlushID, long initCompactID) throws IOException { Path locationPath; if (location.find(":") >= 0) { locationPath = new Path(location.toString()); } else { locationPath = fs.getFullPath(FileType.TABLE, extent.getTableId().toString() + location.toString()); } this.location = locationPath.makeQualified(fs.getFileSystemByPath(locationPath)); this.lastLocation = lastLocation; this.tabletDirectory = location.toString(); this.conf = conf; this.acuTableConf = tabletServer.getTableConfiguration(extent); this.fs = fs; this.extent = extent; this.tabletResources = trm; this.lastFlushID = initFlushID; this.lastCompactID = initCompactID; if (extent.isRootTablet()) { long rtime = Long.MIN_VALUE; for (FileRef ref : datafiles.keySet()) { Path path = ref.path(); FileSystem ns = fs.getFileSystemByPath(path); FileSKVIterator reader = FileOperations.getInstance().openReader(path.toString(), true, ns, ns.getConf(), tabletServer.getTableConfiguration(extent)); long maxTime = -1; try { while (reader.hasTop()) { maxTime = Math.max(maxTime, reader.getTopKey().getTimestamp()); reader.next(); } } finally { reader.close(); } if (maxTime > rtime) { time = TabletTime.LOGICAL_TIME_ID + "" + maxTime; rtime = maxTime; } } } this.tabletServer = tabletServer; this.logId = tabletServer.createLogId(extent); this.timer = new TabletStatsKeeper(); setupDefaultSecurityLabels(extent); tabletMemory = new TabletMemory(); tabletTime = TabletTime.getInstance(time); persistedTime = tabletTime.getTime(); acuTableConf.addObserver(configObserver = new ConfigurationObserver() { private void reloadConstraints() { constraintChecker.set(new ConstraintChecker(getTableConfiguration())); } @Override public void propertiesChanged() { reloadConstraints(); try { setupDefaultSecurityLabels(extent); } catch (Exception e) { log.error("Failed to reload default security labels for extent: " + extent.toString()); } } @Override public void propertyChanged(String prop) { if (prop.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey())) reloadConstraints(); else if (prop.equals(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey())) { try { log.info("Default security labels changed for extent: " + extent.toString()); setupDefaultSecurityLabels(extent); } catch (Exception e) { log.error("Failed to reload default security labels for extent: " + extent.toString()); } } } @Override public void sessionExpired() { log.debug("Session expired, no longer updating per table props..."); } }); // Force a load of any per-table properties configObserver.propertiesChanged(); tabletResources.setTablet(this, acuTableConf); if (!logEntries.isEmpty()) { log.info("Starting Write-Ahead Log recovery for " + this.extent); final long[] count = new long[2]; final CommitSession commitSession = tabletMemory.getCommitSession(); count[1] = Long.MIN_VALUE; try { Set<String> absPaths = new HashSet<String>(); for (FileRef ref : datafiles.keySet()) absPaths.add(ref.path().toString()); tabletServer.recover(this.tabletServer.getFileSystem(), this, logEntries, absPaths, new MutationReceiver() { @Override public void receive(Mutation m) { // LogReader.printMutation(m); Collection<ColumnUpdate> muts = m.getUpdates(); for (ColumnUpdate columnUpdate : muts) { if (!columnUpdate.hasTimestamp()) { // if it is not a user set timestamp, it must have been set // by the system count[1] = Math.max(count[1], columnUpdate.getTimestamp()); } } tabletMemory.mutate(commitSession, Collections.singletonList(m)); count[0]++; } }); if (count[1] != Long.MIN_VALUE) { tabletTime.useMaxTimeFromWALog(count[1]); } commitSession.updateMaxCommittedTime(tabletTime.getTime()); tabletMemory.updateMemoryUsageStats(); if (count[0] == 0) { MetadataTableUtil.removeUnusedWALEntries(extent, logEntries, tabletServer.getLock()); logEntries.clear(); } } catch (Throwable t) { if (acuTableConf.getBoolean(Property.TABLE_FAILURES_IGNORE)) { log.warn("Error recovering from log files: ", t); } else { throw new RuntimeException(t); } } // make some closed references that represent the recovered logs currentLogs = new HashSet<DfsLogger>(); for (LogEntry logEntry : logEntries) { for (String log : logEntry.logSet) { String[] parts = log.split("/", 2); Path file = fs.getFullPath(FileType.WAL, parts[1]); currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), logEntry.server, file)); } } log.info("Write-Ahead Log recovery complete for " + this.extent + " (" + count[0] + " mutations applied, " + tabletMemory.getNumEntries() + " entries created)"); } String contextName = acuTableConf.get(Property.TABLE_CLASSPATH); if (contextName != null && !contextName.equals("")) { // initialize context classloader, instead of possibly waiting for it to initialize for a scan // TODO this could hang, causing other tablets to fail to load - ACCUMULO-1292 AccumuloVFSClassLoader.getContextManager().getClassLoader(contextName); } // do this last after tablet is completely setup because it // could cause major compaction to start datafileManager = new DatafileManager(datafiles); computeNumEntries(); datafileManager.removeFilesAfterScan(scanFiles); // look for hints of a failure on the previous tablet server if (!logEntries.isEmpty() || needsMajorCompaction(MajorCompactionReason.NORMAL)) { // look for any temp files hanging around removeOldTemporaryFiles(); } log.log(TLevel.TABLET_HIST, extent + " opened "); }