Example usage for java.util HashSet clear

List of usage examples for java.util HashSet clear

Introduction

In this page you can find the example usage for java.util HashSet clear.

Prototype

public void clear() 

Source Link

Document

Removes all of the elements from this set.

Usage

From source file:StorageEngineClient.CombineFileInputFormat.java

private void getMoreSplits(JobConf job, Path[] paths1, long maxSize, long minSizeNode, long minSizeRack,
        List<CombineFileSplit> splits) throws IOException, NullGzFileException {
    if (paths1.length == 0) {
        return;/*from www  . j a  va2  s  .  c  o m*/
    }

    Path[] paths = paths1;
    ArrayList<Path> splitable = new ArrayList<Path>();
    ArrayList<Path> unsplitable = new ArrayList<Path>();
    for (int i = 0; i < paths1.length; i++) {
        if (isSplitable(paths1[i].getFileSystem(job), paths1[i])) {
            splitable.add(paths1[i]);
        } else {
            unsplitable.add(paths1[i]);
        }
    }
    if (unsplitable.size() != 0) {
        paths = new Path[splitable.size()];
        splitable.toArray(paths);
    }

    OneFileInfo[] files;

    HashMap<String, List<OneBlockInfo>> rackToBlocks = new HashMap<String, List<OneBlockInfo>>();

    HashMap<OneBlockInfo, String[]> blockToNodes = new HashMap<OneBlockInfo, String[]>();

    HashMap<String, List<OneBlockInfo>> nodeToBlocks = new HashMap<String, List<OneBlockInfo>>();

    files = new OneFileInfo[paths.length];

    long totLength = 0;
    for (int i = 0; i < paths.length; i++) {
        files[i] = new OneFileInfo(paths[i], job, rackToBlocks, blockToNodes, nodeToBlocks);
        totLength += files[i].getLength();
    }

    for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = nodeToBlocks.entrySet().iterator(); iter
            .hasNext();) {

        Map.Entry<String, List<OneBlockInfo>> onenode = iter.next();
        this.processsplit(job, onenode, blockToNodes, maxSize, minSizeNode, minSizeRack, splits, "node");
    }

    for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = rackToBlocks.entrySet().iterator(); iter
            .hasNext();) {

        Map.Entry<String, List<OneBlockInfo>> onerack = iter.next();
        this.processsplit(job, onerack, blockToNodes, maxSize, minSizeNode, minSizeRack, splits, "rack");
    }

    this.processsplit(job, null, blockToNodes, maxSize, minSizeNode, minSizeRack, splits, "all");

    int maxFileNumPerSplit = job.getInt("hive.merge.inputfiles.maxFileNumPerSplit", 1000);

    HashSet<OneBlockInfo> hs = new HashSet<OneBlockInfo>();
    while (blockToNodes.size() > 0) {
        ArrayList<OneBlockInfo> validBlocks = new ArrayList<OneBlockInfo>();
        List<String> nodes = new ArrayList<String>();
        int filenum = 0;
        hs.clear();
        for (OneBlockInfo blockInfo : blockToNodes.keySet()) {
            validBlocks.add(blockInfo);
            filenum++;
            for (String host : blockInfo.hosts) {
                nodes.add(host);
            }
            hs.add(blockInfo);
            if (filenum >= maxFileNumPerSplit) {
                break;
            }
        }
        for (OneBlockInfo blockInfo : hs) {
            blockToNodes.remove(blockInfo);
        }
        this.addCreatedSplit(job, splits, nodes, validBlocks);
    }

    if (unsplitable.size() != 0) {

        HashMap<OneBlockInfo, String[]> fileToNodes = new HashMap<OneBlockInfo, String[]>();

        for (Path path : unsplitable) {
            FileSystem fs = path.getFileSystem(job);
            FileStatus stat = fs.getFileStatus(path);
            long len = fs.getFileStatus(path).getLen();
            BlockLocation[] locations = path.getFileSystem(job).getFileBlockLocations(stat, 0, len);
            if (locations.length == 0) {
                console.printError("The file " + path.toUri().toString() + " maybe is empty, please check it!");
                throw new NullGzFileException(
                        "The file " + path.toUri().toString() + " maybe is empty, please check it!");
            }

            LOG.info("unsplitable file:" + path.toUri().toString() + " length:" + len);

            OneBlockInfo oneblock = new OneBlockInfo(path, 0, len, locations[0].getHosts(),
                    locations[0].getTopologyPaths());
            fileToNodes.put(oneblock, locations[0].getHosts());
        }

        this.processsplitForUnsplit(job, null, fileToNodes, maxSize, minSizeNode, minSizeRack, splits, "all");
    }
}

From source file:StorageEngineClient.CombineFileInputFormat.java

private void getMoreSplitsWithStatus(JobConf job, Path[] paths1, Map<String, FileStatus> fileNameToStatus,
        long maxSize, long minSizeNode, long minSizeRack, List<CombineFileSplit> splits)
        throws IOException, NullGzFileException {
    if (paths1.length == 0) {
        return;//from w  w  w.j av  a 2 s. c  o  m
    }

    Path[] paths = paths1;
    ArrayList<Path> splitable = new ArrayList<Path>();
    ArrayList<Path> unsplitable = new ArrayList<Path>();
    for (int i = 0; i < paths1.length; i++) {
        if (isSplitable(paths1[i].getFileSystem(job), paths1[i])) {
            splitable.add(paths1[i]);
        } else {
            unsplitable.add(paths1[i]);
        }
    }
    if (unsplitable.size() != 0) {
        paths = new Path[splitable.size()];
        splitable.toArray(paths);
    }

    OneFileInfo[] files;

    HashMap<String, List<OneBlockInfo>> rackToBlocks = new HashMap<String, List<OneBlockInfo>>();

    HashMap<OneBlockInfo, String[]> blockToNodes = new HashMap<OneBlockInfo, String[]>();

    HashMap<String, List<OneBlockInfo>> nodeToBlocks = new HashMap<String, List<OneBlockInfo>>();

    files = new OneFileInfo[paths.length];

    long totLength = 0;
    for (int i = 0; i < paths.length; i++) {
        files[i] = new OneFileInfo(paths[i], fileNameToStatus.get(paths[i].toString()), job, rackToBlocks,
                blockToNodes, nodeToBlocks);
        totLength += files[i].getLength();
    }

    for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = nodeToBlocks.entrySet().iterator(); iter
            .hasNext();) {

        Map.Entry<String, List<OneBlockInfo>> onenode = iter.next();
        this.processsplit(job, onenode, blockToNodes, maxSize, minSizeNode, minSizeRack, splits, "node");
    }

    for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = rackToBlocks.entrySet().iterator(); iter
            .hasNext();) {

        Map.Entry<String, List<OneBlockInfo>> onerack = iter.next();
        this.processsplit(job, onerack, blockToNodes, maxSize, minSizeNode, minSizeRack, splits, "rack");
    }

    this.processsplit(job, null, blockToNodes, maxSize, minSizeNode, minSizeRack, splits, "all");

    int maxFileNumPerSplit = job.getInt("hive.merge.inputfiles.maxFileNumPerSplit", 1000);

    HashSet<OneBlockInfo> hs = new HashSet<OneBlockInfo>();
    while (blockToNodes.size() > 0) {
        ArrayList<OneBlockInfo> validBlocks = new ArrayList<OneBlockInfo>();
        List<String> nodes = new ArrayList<String>();
        int filenum = 0;
        hs.clear();
        for (OneBlockInfo blockInfo : blockToNodes.keySet()) {
            validBlocks.add(blockInfo);
            filenum++;
            for (String host : blockInfo.hosts) {
                nodes.add(host);
            }
            hs.add(blockInfo);
            if (filenum >= maxFileNumPerSplit) {
                break;
            }
        }
        for (OneBlockInfo blockInfo : hs) {
            blockToNodes.remove(blockInfo);
        }
        this.addCreatedSplit(job, splits, nodes, validBlocks);
    }

    if (unsplitable.size() != 0) {

        HashMap<OneBlockInfo, String[]> fileToNodes = new HashMap<OneBlockInfo, String[]>();

        for (Path path : unsplitable) {
            FileSystem fs = path.getFileSystem(job);
            FileStatus stat = fileNameToStatus.get(path.toString());//fs.getFileStatus(path);
            long len = stat.getLen();
            BlockLocation[] locations = fs.getFileBlockLocations(stat, 0, len);
            if (locations.length == 0) {
                console.printError("The file " + path.toUri().toString() + " maybe is empty, please check it!");
                throw new NullGzFileException(
                        "The file " + path.toUri().toString() + " maybe is empty, please check it!");
            }

            LOG.info("unsplitable file:" + path.toUri().toString() + " length:" + len);

            OneBlockInfo oneblock = new OneBlockInfo(path, 0, len, locations[0].getHosts(),
                    locations[0].getTopologyPaths());
            fileToNodes.put(oneblock, locations[0].getHosts());
        }

        this.processsplitForUnsplit(job, null, fileToNodes, maxSize, minSizeNode, minSizeRack, splits, "all");
    }
}

From source file:org.pentaho.platform.plugin.action.mondrian.MondrianModelComponent.java

public static String getInitialQuery(final Connection connection, final String cubeName) throws Throwable {

    String measuresMdx = null;//from  w  w w . jav  a  2  s . c  o  m
    String columnsMdx = null;
    String whereMdx = ""; //$NON-NLS-1$
    StringBuffer rowsMdx = new StringBuffer();

    // Get catalog info, if exists
    String catalog = connection.getCatalogName();
    MondrianCatalogComplementInfo catalogComplementInfo = MondrianCatalogHelper.getInstance()
            .getCatalogComplementInfoMap(catalog);

    try {

        Schema schema = connection.getSchema();
        if (schema == null) {
            Logger.error("MondrianModelComponent", Messages.getInstance()
                    .getErrorString("MondrianModel.ERROR_0002_INVALID_SCHEMA", connection.getConnectString())); //$NON-NLS-1$ //$NON-NLS-2$
            return null;
        }

        Cube[] cubes = schema.getCubes();
        if ((cubes == null) || (cubes.length == 0)) {
            Logger.error("MondrianModelComponent", Messages.getInstance()
                    .getErrorString("MondrianModel.ERROR_0003_NO_CUBES", connection.getConnectString())); //$NON-NLS-1$ //$NON-NLS-2$
            return null;
        }

        if ((cubes.length > 1) && (cubeName == null)) {
            Logger.error("MondrianModelComponent", Messages.getInstance().getErrorString(
                    "MondrianModel.ERROR_0004_CUBE_NOT_SPECIFIED", connection.getConnectString())); //$NON-NLS-1$ //$NON-NLS-2$
            return null;
        }

        Cube cube = null;
        if (cubes.length == 1) {
            cube = cubes[0];
        } else {
            for (Cube element : cubes) {
                if (element.getName().equals(cubeName)) {
                    cube = element;
                    break;
                }
            }
        }

        if (cube == null) {
            Logger.error("MondrianModelComponent", Messages.getInstance().getErrorString(
                    "MondrianModel.ERROR_0005_CUBE_NOT_FOUND", cubeName, connection.getConnectString())); //$NON-NLS-1$ //$NON-NLS-2$
            return null;
        }

        // If we have any whereConditions block, we need to find which hierarchies they are in
        // and not include them in the rows
        HashSet<Hierarchy> whereHierarchies = new HashSet<Hierarchy>();
        if (catalogComplementInfo != null && catalogComplementInfo.getWhereCondition(cube.getName()) != null
                && !catalogComplementInfo.getWhereCondition(cube.getName()).equals("")) { //$NON-NLS-1$

            final String rawString = catalogComplementInfo.getWhereCondition(cube.getName());

            // Caveat - It's possible that we have in the where condition a hierarchy that we don't have access
            // permissions; In this case, we'll ditch the where condition at all. Same for any error that
            // we find here

            try {

                // According to Julian, the better way to resolve the names is to build a query
                final String queryStr = "select " + rawString + " on columns, {} on rows from " //$NON-NLS-1$//$NON-NLS-2$
                        + cube.getName();
                final Query query = connection.parseQuery(queryStr);

                final Hierarchy[] hierarchies = query
                        .getMdxHierarchiesOnAxis(AxisOrdinal.StandardAxisOrdinal.COLUMNS);
                boolean isWhereValid = true;

                for (int i = 0; i < hierarchies.length && isWhereValid; i++) {
                    final Hierarchy hierarchy = hierarchies[i];
                    if (connection.getRole().canAccess(hierarchy)) {
                        whereHierarchies.add(hierarchy);
                    } else {
                        isWhereValid = false;
                        whereHierarchies.clear();
                    }
                }

                if (isWhereValid) {
                    whereMdx = " WHERE " + rawString; //$NON-NLS-1$
                }
            } catch (Exception e) {
                // We found an error in the where slicer, so we'll just act like it wasn't here
                whereHierarchies.clear();
            }
        }

        Dimension[] dimensions = cube.getDimensions();
        if ((dimensions == null) || (dimensions.length == 0)) {
            Logger.error("MondrianModelComponent", Messages.getInstance().getErrorString(
                    "MondrianModel.ERROR_0006_NO_DIMENSIONS", cubeName, connection.getConnectString())); //$NON-NLS-1$ //$NON-NLS-2$
            return null;
        }

        for (Dimension element : dimensions) {

            final Hierarchy hierarchy = element.getHierarchy();
            if (hierarchy == null) {
                Logger.error("MondrianModelComponent",
                        Messages.getInstance().getErrorString("MondrianModel.ERROR_0007_NO_HIERARCHIES",
                                element.getName(), cubeName, connection.getConnectString())); //$NON-NLS-1$ //$NON-NLS-2$
                return null;
            }

            if (!connection.getRole().canAccess(hierarchy)) {
                // We can't access this element
                continue;
            }

            if (whereHierarchies.contains(hierarchy)) {
                // We have it on the where condition - skip it
                continue;
            }

            Member member = Locus.execute((RolapConnection) connection, "Retrieving default members in plugin",
                    new Locus.Action<Member>() {
                        public Member execute() {
                            return connection.getSchemaReader().getHierarchyDefaultMember(hierarchy);
                        }
                    });

            if (member == null) {
                Logger.error("MondrianModelComponent",
                        Messages.getInstance().getErrorString("MondrianModel.ERROR_0008_NO_DEFAULT_MEMBER",
                                element.getName(), cubeName, connection.getConnectString())); //$NON-NLS-1$ //$NON-NLS-2$
                return null;
            }
            if (element.isMeasures()) {
                // measuresMdx = "with member "+ member.getUniqueName();
                // //$NON-NLS-1$
                measuresMdx = ""; //$NON-NLS-1$
                columnsMdx = " select NON EMPTY {" + member.getUniqueName() + "} ON columns, "; //$NON-NLS-1$ //$NON-NLS-2$
            } else {
                if (rowsMdx.length() > 0) {
                    rowsMdx.append(", "); //$NON-NLS-1$
                }
                rowsMdx.append(member.getUniqueName());
            }
        }
        if ((measuresMdx != null) && (columnsMdx != null) && (rowsMdx.length() > 0)) {
            StringBuffer result = new StringBuffer(
                    measuresMdx.length() + columnsMdx.length() + rowsMdx.length() + 50);
            result.append(measuresMdx).append(columnsMdx).append("NON EMPTY {(") //$NON-NLS-1$
                    .append(rowsMdx).append(")} ON rows ") //$NON-NLS-1$
                    .append("from [" + cube.getName() + "]") //$NON-NLS-1$ //$NON-NLS-2$
                    .append(whereMdx);

            return result.toString();

        }
        return null;
    } catch (Throwable t) {
        if (t instanceof MondrianException) {
            // pull the cause out, otherwise it never gets logged
            Throwable cause = ((MondrianException) t).getCause();
            if (cause != null) {
                throw cause;
            } else {
                throw t;
            }
        } else {
            throw t;
        }
    }
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.recovery.TestRMStateStore.java

void testRMAppStateStore(RMStateStoreHelper stateStoreHelper) throws Exception {
    long submitTime = System.currentTimeMillis();
    Configuration conf = new YarnConfiguration();
    RMStateStore store = stateStoreHelper.getRMStateStore();
    TestDispatcher dispatcher = new TestDispatcher();
    store.setRMDispatcher(dispatcher);/* w ww. ja  v  a2  s . c  o  m*/

    AMRMTokenSecretManager appTokenMgr = new AMRMTokenSecretManager(conf);
    ClientToAMTokenSecretManagerInRM clientToAMTokenMgr = new ClientToAMTokenSecretManagerInRM();

    ApplicationAttemptId attemptId1 = ConverterUtils
            .toApplicationAttemptId("appattempt_1352994193343_0001_000001");
    ApplicationId appId1 = attemptId1.getApplicationId();
    storeApp(store, appId1, submitTime);

    // create application token and client token key for attempt1
    Token<AMRMTokenIdentifier> appAttemptToken1 = generateAMRMToken(attemptId1, appTokenMgr);
    HashSet<Token<?>> attemptTokenSet1 = new HashSet<Token<?>>();
    attemptTokenSet1.add(appAttemptToken1);
    SecretKey clientTokenKey1 = clientToAMTokenMgr.createMasterKey(attemptId1);

    ContainerId containerId1 = storeAttempt(store, attemptId1, "container_1352994193343_0001_01_000001",
            appAttemptToken1, clientTokenKey1, dispatcher);

    String appAttemptIdStr2 = "appattempt_1352994193343_0001_000002";
    ApplicationAttemptId attemptId2 = ConverterUtils.toApplicationAttemptId(appAttemptIdStr2);

    // create application token and client token key for attempt2
    Token<AMRMTokenIdentifier> appAttemptToken2 = generateAMRMToken(attemptId2, appTokenMgr);
    HashSet<Token<?>> attemptTokenSet2 = new HashSet<Token<?>>();
    attemptTokenSet2.add(appAttemptToken2);
    SecretKey clientTokenKey2 = clientToAMTokenMgr.createMasterKey(attemptId2);

    ContainerId containerId2 = storeAttempt(store, attemptId2, "container_1352994193343_0001_02_000001",
            appAttemptToken2, clientTokenKey2, dispatcher);

    ApplicationAttemptId attemptIdRemoved = ConverterUtils
            .toApplicationAttemptId("appattempt_1352994193343_0002_000001");
    ApplicationId appIdRemoved = attemptIdRemoved.getApplicationId();
    storeApp(store, appIdRemoved, submitTime);
    storeAttempt(store, attemptIdRemoved, "container_1352994193343_0002_01_000001", null, null, dispatcher);

    RMApp mockRemovedApp = mock(RMApp.class);
    HashMap<ApplicationAttemptId, RMAppAttempt> attempts = new HashMap<ApplicationAttemptId, RMAppAttempt>();
    ApplicationSubmissionContext context = new ApplicationSubmissionContextPBImpl();
    context.setApplicationId(appIdRemoved);
    when(mockRemovedApp.getSubmitTime()).thenReturn(submitTime);
    when(mockRemovedApp.getApplicationSubmissionContext()).thenReturn(context);
    when(mockRemovedApp.getAppAttempts()).thenReturn(attempts);
    RMAppAttempt mockRemovedAttempt = mock(RMAppAttempt.class);
    when(mockRemovedAttempt.getAppAttemptId()).thenReturn(attemptIdRemoved);
    attempts.put(attemptIdRemoved, mockRemovedAttempt);
    store.removeApplication(mockRemovedApp);

    // let things settle down
    Thread.sleep(1000);
    store.close();

    // load state
    store = stateStoreHelper.getRMStateStore();
    RMState state = store.loadState();
    Map<ApplicationId, ApplicationState> rmAppState = state.getApplicationState();

    ApplicationState appState = rmAppState.get(appId1);
    // app is loaded
    assertNotNull(appState);
    // app is loaded correctly
    assertEquals(submitTime, appState.getSubmitTime());
    // submission context is loaded correctly
    assertEquals(appId1, appState.getApplicationSubmissionContext().getApplicationId());
    ApplicationAttemptState attemptState = appState.getAttempt(attemptId1);
    // attempt1 is loaded correctly
    assertNotNull(attemptState);
    assertEquals(attemptId1, attemptState.getAttemptId());
    // attempt1 container is loaded correctly
    assertEquals(containerId1, attemptState.getMasterContainer().getId());
    // attempt1 applicationToken is loaded correctly
    HashSet<Token<?>> savedTokens = new HashSet<Token<?>>();
    savedTokens.addAll(attemptState.getAppAttemptCredentials().getAllTokens());
    assertEquals(attemptTokenSet1, savedTokens);
    // attempt1 client token master key is loaded correctly
    assertArrayEquals(clientTokenKey1.getEncoded(),
            attemptState.getAppAttemptCredentials().getSecretKey(RMStateStore.AM_CLIENT_TOKEN_MASTER_KEY_NAME));

    attemptState = appState.getAttempt(attemptId2);
    // attempt2 is loaded correctly
    assertNotNull(attemptState);
    assertEquals(attemptId2, attemptState.getAttemptId());
    // attempt2 container is loaded correctly
    assertEquals(containerId2, attemptState.getMasterContainer().getId());
    // attempt2 applicationToken is loaded correctly
    savedTokens.clear();
    savedTokens.addAll(attemptState.getAppAttemptCredentials().getAllTokens());
    assertEquals(attemptTokenSet2, savedTokens);
    // attempt2 client token master key is loaded correctly
    assertArrayEquals(clientTokenKey2.getEncoded(),
            attemptState.getAppAttemptCredentials().getSecretKey(RMStateStore.AM_CLIENT_TOKEN_MASTER_KEY_NAME));

    // assert store is in expected state after everything is cleaned
    assertTrue(stateStoreHelper.isFinalStateValid());

    store.close();
}

From source file:com.mentor.questa.vrm.jenkins.QuestaVrmHostAction.java

private CategoryDataset buildDataSet(StaplerRequest req) {
    boolean showAction = Boolean.valueOf(req.getParameter("showActions")) || getActionCookie(req);
    DataSetBuilder<String, Long> dsb = new DataSetBuilder<String, Long>();

    PriorityQueue<Pair> pq = new PriorityQueue<Pair>();
    HashMap<String, Integer> hostCount = new HashMap<String, Integer>();
    for (TestResult temp : getRegressionResult().getActions()) {
        QuestaVrmAbstractResult action = (QuestaVrmAbstractResult) temp;
        if (showAction || action instanceof QuestaVrmTestResult) {
            if (action.getStartTime() == -1 || action.getDoneTime() == -1) {
                continue;
            }/*from   w ww. j a  v a2 s . c o  m*/
            pq.add(new Pair(action.getStartTimeDate(), action.getHost(), 1));
            pq.add(new Pair(action.getDoneTimeDate(), action.getHost(), -1));
            hostCount.put(action.getHost(), 0);
        }
    }

    if (pq.isEmpty()) {
        return dsb.build();
    }

    long offset = getRegressionResult().getRegressionBegin().getTime();
    int noOfTests;
    HashSet<String> visited = new HashSet<String>();

    while (!pq.isEmpty()) {
        long currentKey = pq.peek().date.getTime();

        while (!pq.isEmpty() && pq.peek().date.getTime() == currentKey) {
            Pair current = pq.peek();
            noOfTests = hostCount.get(current.host);
            while (!pq.isEmpty() && pq.peek().compareTo(current) == 0) {
                noOfTests += pq.poll().diff;
            }
            dsb.add(noOfTests, current.host, (current.date.getTime() - offset) / 1000);
            hostCount.put(current.host, noOfTests);
            visited.add(current.host);

        }
        for (String host : hostCount.keySet()) {
            if (!visited.contains(host)) {
                dsb.add(hostCount.get(host), host, (currentKey - offset) / 1000);
            }
        }
        visited.clear();

    }
    return dsb.build();

}

From source file:Business.InvertedIndex.java

public List searchForWholePhrase(List<DocInfo> docList, String[] queryTerms) {

    HashMap<DocInfo, Integer> documentPhraseScore = new HashMap<>();

    for (DocInfo doc : docList) {
        documentPhraseScore.put(doc, 0);
        int count = 0;
        for (String word1 : queryTerms) {
            String word2 = word1.replaceAll("[^\\w]", "");
            es.setCurrent(word2);/*from  w  w  w.  j av  a 2  s .  co  m*/
            es.stem();
            String queryTerm = es.getCurrent();
            HashSet<Integer> positions = new HashSet<>();

            if (doc.getTermsInADocument().containsKey(queryTerm)) {
                if (positions.isEmpty()) {
                    positions.addAll(doc.getTermsInADocument().get(queryTerm).getPositions());
                } else {
                    ArrayList<Integer> newPositions = doc.getTermsInADocument().get(queryTerm).getPositions();
                    ArrayList<Integer> tempList = new ArrayList<>();
                    boolean b = false;
                    for (int po : newPositions) {

                        if (positions.contains(po - 1)) {
                            tempList.add(po);
                            int currentScore = documentPhraseScore.get(doc);
                            int newScore = currentScore + count;
                            documentPhraseScore.put(doc, newScore);
                            b = true;
                        }
                    }
                    if (b) {
                        positions.clear();
                        positions.addAll(tempList);
                    }
                }
            }
            if (count == 0) {
                count++;
            } else {
                count = count * 10;
            }
        }

    }

    Set<DocInfo> set = documentPhraseScore.keySet();
    List<DocInfo> sortedDocs = new ArrayList<>(set);

    Collections.sort(sortedDocs, (DocInfo s1, DocInfo s2) -> Double.compare(documentPhraseScore.get(s2),
            documentPhraseScore.get(s1)));
    return sortedDocs;
}

From source file:org.unitedinternet.cosmo.service.impl.StandardContentServiceTest.java

/**
 * Tests update collection with children.
 * @throws Exception - if something is wrong this exception is thrown.
 *///  www .  ja v  a2  s  .  c  o  m
@Test
public void testUpdateCollectionWithChildren() throws Exception {
    User user = testHelper.makeDummyUser();
    CollectionItem rootCollection = contentDao.createRootItem(user);

    CollectionItem dummyCollection = new MockCollectionItem();
    dummyCollection.setName("foo");
    dummyCollection.setOwner(user);

    ContentItem dummyContent1 = new MockNoteItem();
    dummyContent1.setName("bar1");
    dummyContent1.setOwner(user);

    ContentItem dummyContent2 = new MockNoteItem();
    dummyContent2.setName("bar2");
    dummyContent2.setOwner(user);

    HashSet<Item> children = new HashSet<Item>();
    children.add(dummyContent1);
    children.add(dummyContent2);

    dummyCollection = service.createCollection(rootCollection, dummyCollection, children);

    Assert.assertEquals(2, dummyCollection.getChildren().size());

    ContentItem bar1 = getContentItemFromSet(dummyCollection.getChildren(), "bar1");
    ContentItem bar2 = getContentItemFromSet(dummyCollection.getChildren(), "bar2");
    Assert.assertNotNull(bar1);
    Assert.assertNotNull(bar2);

    bar1.setIsActive(false);

    ContentItem bar3 = new MockNoteItem();
    bar3.setName("bar3");
    bar3.setOwner(user);

    children.clear();
    children.add(bar1);
    children.add(bar2);
    children.add(bar3);

    dummyCollection = service.updateCollection(dummyCollection, children);

    Assert.assertEquals(2, dummyCollection.getChildren().size());

    bar1 = getContentItemFromSet(dummyCollection.getChildren(), "bar1");
    bar2 = getContentItemFromSet(dummyCollection.getChildren(), "bar2");
    bar3 = getContentItemFromSet(dummyCollection.getChildren(), "bar3");

    Assert.assertNull(bar1);
    Assert.assertNotNull(bar2);
    Assert.assertNotNull(bar3);
}

From source file:com.nextdoor.bender.ipc.es.ElasticSearchTransport.java

@Override
public void checkResponse(HttpResponse resp, String responseString) throws TransportException {
    /*/*from   w  w  w  .j  av  a 2  s  . c o m*/
     * Check responses status code of the overall bulk call. The call can succeed but have
     * individual failures which are checked later.
     */
    if (resp.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
        throw new TransportException("es call failed because " + resp.getStatusLine().getReasonPhrase());
    }

    /*
     * Short circuit deserializing the response by just looking if there are any errors
     */
    if (responseString.contains("\"errors\":false")) {
        return;
    }

    /*
     * Convert response text to a POJO. Only tested with ES 2.4.x and 5.x.
     */
    Gson gson = new GsonBuilder().create();
    EsResponse esResp = null;
    try {
        esResp = gson.fromJson(responseString, EsResponse.class);
    } catch (JsonSyntaxException e) {
        throw new TransportException("es call failed because " + resp.getStatusLine().getReasonPhrase(), e);
    }

    /*
     * Look for the errors per index request
     */
    int failures = 0;

    if (esResp.items == null) {
        throw new TransportException("es call failed because " + resp.getStatusLine().getReasonPhrase());
    }

    HashSet<String> errorTypes = new HashSet<String>();
    for (Item item : esResp.items) {
        Index index = item.index;

        if (index == null || index.error == null || index.error.reason == null) {
            continue;
        }

        /*
         * For now just allow 200's and 400's. Both are considered non-fatal errors from the lambda's
         * perspective.
         */
        switch (index.status) {
        case HttpStatus.SC_OK:
        case HttpStatus.SC_BAD_REQUEST:
            continue;
        default:
            failures++;

            if (index.error != null && index.error.reason != null && index.error.type != null) {
                if (!errorTypes.contains(index.error.type)) {
                    logger.error("Indexing Error Reason: " + index.error.reason);
                    if (index.error.caused_by != null) {
                        logger.error("Indexing Error Cause: " + index.error.caused_by.reason);
                    }
                    errorTypes.add(index.error.type);
                }
            }
        }
    }

    errorTypes.clear();
    if (failures != 0) {
        throw new TransportException("es index failure count is " + failures);
    }
}

From source file:ai.grakn.test.graql.analytics.DegreeTest.java

@Test
public void testDegreeIsCorrectAssertionAboutAssertion()
        throws GraknValidationException, ExecutionException, InterruptedException {
    // TODO: Fix on TinkerGraphComputer
    assumeFalse(usingTinker());/* w ww. j av a  2 s  . c  o m*/

    // create a simple graph
    RoleType pet = graph.putRoleType("pet");
    RoleType owner = graph.putRoleType("owner");
    RelationType mansBestFriend = graph.putRelationType("mans-best-friend").hasRole(pet).hasRole(owner);
    RoleType target = graph.putRoleType("target");
    RoleType value = graph.putRoleType("value");
    RelationType hasName = graph.putRelationType("has-name").hasRole(value).hasRole(target);
    EntityType person = graph.putEntityType("person").playsRole(owner);
    EntityType animal = graph.putEntityType("animal").playsRole(pet).playsRole(target);
    ResourceType<String> name = graph.putResourceType("name", ResourceType.DataType.STRING).playsRole(value);
    ResourceType<String> altName = graph.putResourceType("alternate-name", ResourceType.DataType.STRING)
            .playsRole(value);
    RoleType ownership = graph.putRoleType("ownership");
    RoleType ownershipResource = graph.putRoleType("ownership-resource");
    RelationType hasOwnershipResource = graph.putRelationType("has-ownership-resource").hasRole(ownership)
            .hasRole(ownershipResource);
    ResourceType<String> startDate = graph.putResourceType("start-date", ResourceType.DataType.STRING)
            .playsRole(ownershipResource);
    mansBestFriend.playsRole(ownership);

    // add data to the graph
    Entity coco = animal.addEntity();
    Entity dave = person.addEntity();
    Resource coconut = name.putResource("coconut");
    Resource stinky = altName.putResource("stinky");
    Relation daveOwnsCoco = mansBestFriend.addRelation().putRolePlayer(owner, dave).putRolePlayer(pet, coco);
    hasName.addRelation().putRolePlayer(target, coco).putRolePlayer(value, coconut);
    hasName.addRelation().putRolePlayer(target, coco).putRolePlayer(value, stinky);
    Resource sd = startDate.putResource("01/01/01");
    Relation ownsFrom = hasOwnershipResource.addRelation().putRolePlayer(ownershipResource, sd)
            .putRolePlayer(ownership, daveOwnsCoco);

    // manually compute the degree
    Map<String, Long> referenceDegrees1 = new HashMap<>();
    referenceDegrees1.put(coco.getId(), 1L);
    referenceDegrees1.put(dave.getId(), 1L);
    referenceDegrees1.put(daveOwnsCoco.getId(), 3L);
    referenceDegrees1.put(sd.getId(), 1L);
    referenceDegrees1.put(ownsFrom.getId(), 2L);

    // manually compute degrees
    Map<String, Long> referenceDegrees2 = new HashMap<>();
    referenceDegrees2.put(coco.getId(), 1L);
    referenceDegrees2.put(dave.getId(), 1L);
    referenceDegrees2.put(daveOwnsCoco.getId(), 2L);

    graph.commit();

    // create a subgraph with assertion on assertion
    HashSet<String> ct = Sets.newHashSet("animal", "person", "mans-best-friend", "start-date",
            "has-ownership-resource");
    Map<Long, Set<String>> degrees = graph.graql().compute().degree().in(ct).execute();
    assertTrue(!degrees.isEmpty());
    degrees.entrySet().forEach(entry -> entry.getValue().forEach(id -> {
        assertTrue(referenceDegrees1.containsKey(id));
        assertEquals(referenceDegrees1.get(id), entry.getKey());
    }));

    // create subgraph without assertion on assertion
    ct.clear();
    ct.add("animal");
    ct.add("person");
    ct.add("mans-best-friend");
    degrees = graph.graql().compute().degree().in(ct).execute();
    assertFalse(degrees.isEmpty());
    degrees.entrySet().forEach(entry -> entry.getValue().forEach(id -> {
        assertTrue(referenceDegrees2.containsKey(id));
        assertEquals(referenceDegrees2.get(id), entry.getKey());
    }));
}

From source file:org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicyImpl.java

@Override
public boolean isEnsembleAdheringToPlacementPolicy(List<BookieSocketAddress> ensembleList, int writeQuorumSize,
        int ackQuorumSize) {
    int ensembleSize = ensembleList.size();
    int minNumRacksPerWriteQuorumForThisEnsemble = Math.min(writeQuorumSize, minNumRacksPerWriteQuorum);
    HashSet<String> racksInQuorum = new HashSet<String>();
    BookieSocketAddress bookie;/*w  w  w .j a  va 2 s  . c o m*/
    for (int i = 0; i < ensembleList.size(); i++) {
        racksInQuorum.clear();
        for (int j = 0; j < writeQuorumSize; j++) {
            bookie = ensembleList.get((i + j) % ensembleSize);
            try {
                racksInQuorum.add(knownBookies.get(bookie).getNetworkLocation());
            } catch (Exception e) {
                /*
                 * any issue/exception in analyzing whether ensemble is
                 * strictly adhering to placement policy should be
                 * swallowed.
                 */
                LOG.warn("Received exception while trying to get network location of bookie: {}", bookie, e);
            }
        }
        if ((racksInQuorum.size() < minNumRacksPerWriteQuorumForThisEnsemble)
                || (enforceMinNumRacksPerWriteQuorum && racksInQuorum.contains(getDefaultRack()))) {
            return false;
        }
    }
    return true;
}