Example usage for java.util List clear

List of usage examples for java.util List clear

Introduction

In this page you can find the example usage for java.util List clear.

Prototype

void clear();

Source Link

Document

Removes all of the elements from this list (optional operation).

Usage

From source file:com.qwazr.database.TableServiceImpl.java

private final int flushBuffer(String table_name, List<Map<String, Object>> buffer)
        throws IOException, ServerException, DatabaseException {
    try {/*  w  ww  .j  av a2s .c om*/
        if (buffer == null || buffer.isEmpty())
            return 0;
        TableManager.INSTANCE.upsertRows(table_name, buffer);
        return buffer.size();
    } finally {
        buffer.clear();
    }
}

From source file:gov.nih.nci.ncicb.tcga.dcc.common.dao.jdbc.BAMFileQueriesJDBCImpl.java

/**
 * batchUpdate util method./*from  ww w  .  j a v a 2  s .  c om*/
 *
 * @param query
 * @param tableName
 * @param data
 * @param flush
 */
private void batchUpdate(final String query, final String tableName, final List<Object[]> data,
        final Boolean flush) {
    if ((data.size() >= BATCH_SIZE || flush) && data.size() > 0) {
        getSimpleJdbcTemplate().batchUpdate(query, data);
        logger.info("Merged " + data.size() + " records in table '" + tableName + "' ...");
        data.clear();
    }
}

From source file:com.tesora.dve.upgrade.versions.AddCollation.java

@Override
public void upgrade(DBHelper helper, InformationCallback stdout) throws PEException {
    super.upgrade(helper, stdout);

    try {//ww w .j  av  a2s.co m
        List<Object> params = new ArrayList<Object>();

        helper.prepare(
                "insert into collations (id, name, character_set_name, is_default, is_compiled, sortlen) values (?,?,?,?,?,?)");

        for (String collationName : MysqlNativeCollationCatalog.DEFAULT_CATALOG
                .getCollationsCatalogEntriesByName()) {
            NativeCollation nc = MysqlNativeCollationCatalog.DEFAULT_CATALOG.findCollationByName(collationName);
            params.clear();
            params.add(nc.getId());
            params.add(nc.getName());
            params.add(nc.getCharacterSetName());
            params.add(BooleanUtils.toInteger(nc.isDefault()));
            params.add(BooleanUtils.toInteger(nc.isCompiled()));
            params.add(nc.getSortLen());
            helper.executePrepared(params);
        }
    } catch (SQLException sqle) {
        throw new PEException("Unable to insert collation values: " + sqle.getMessage());
    }
}

From source file:org.beanio.spring.SpringTest.java

/**
 * Test BeanIO flat file writer for XML.
 *//*from  w ww.  j  a va 2 s .  c  o  m*/
@Test
@SuppressWarnings("unchecked")
public void testRestarbleXmlItemWriter() throws Exception {
    ExecutionContext ec = new ExecutionContext();

    File tempFile = File.createTempFile("beanio-", "xml");
    tempFile.deleteOnExit();

    BeanIOFlatFileItemWriter<Human> writer = (BeanIOFlatFileItemWriter<Human>) context
            .getBean("itemWriter-xml");
    writer.setResource(new FileSystemResource(tempFile));
    writer.open(ec);

    List<Human> list = new ArrayList<Human>();
    list.add(new Human(Human.FRIEND, "John", 'M'));
    writer.write(list);
    writer.update(ec);

    long position = ec.getLong("BeanIOFlatFileItemWriter.current.count");
    assertTrue(position > 0);

    list.clear();
    list.add(new Human(Human.COWORKER, "Mike", 'M'));
    list.add(new Human(Human.NEIGHBOR, "Steve", 'M'));
    writer.write(list);
    writer.close();
    assertFileMatches("xout1.xml", tempFile);

    // open for restart
    writer = (BeanIOFlatFileItemWriter<Human>) context.getBean("itemWriter-xml");
    writer.setResource(new FileSystemResource(tempFile));
    writer.open(ec);

    list.clear();
    list.add(new Human(Human.FRIEND, "Jen", 'F'));
    writer.write(list);

    writer.update(ec);
    writer.close();
    assertFileMatches("xout2.xml", tempFile);
}

From source file:grakn.core.server.session.computer.GraknSparkExecutor.java

public static <M> JavaPairRDD<Object, ViewIncomingPayload<M>> executeVertexProgramIteration(
        final JavaPairRDD<Object, VertexWritable> graphRDD,
        final JavaPairRDD<Object, ViewIncomingPayload<M>> viewIncomingRDD, final GraknSparkMemory memory,
        final Configuration graphComputerConfiguration, // has the Graph/GraphComputer.configuration() information
        final Configuration vertexProgramConfiguration) { // has the VertexProgram.loadState() information

    boolean partitionedGraphRDD = graphRDD.partitioner().isPresent();

    if (partitionedGraphRDD && null != viewIncomingRDD) // the graphRDD and the viewRDD must have the same partitioner
    {/*from   w  w w. j a  va  2  s  .  com*/
        assert graphRDD.partitioner().get().equals(viewIncomingRDD.partitioner().get());
    }
    final JavaPairRDD<Object, ViewOutgoingPayload<M>> viewOutgoingRDD = ((null == viewIncomingRDD)
            ? graphRDD.mapValues(
                    vertexWritable -> new Tuple2<>(vertexWritable, Optional.<ViewIncomingPayload<M>>absent()))
            : // first iteration will not have any views or messages
            graphRDD.leftOuterJoin(viewIncomingRDD)) // every other iteration may have views and messages
                    // for each partition of vertices emit a view and their outgoing messages
                    .mapPartitionsToPair(partitionIterator -> {
                        KryoShimServiceLoader.applyConfiguration(graphComputerConfiguration);

                        // if the partition is empty, return without starting a new VP iteration
                        if (!partitionIterator.hasNext()) {
                            return Collections.emptyIterator();
                        }

                        final VertexProgram<M> workerVertexProgram = VertexProgram.createVertexProgram(
                                HadoopGraph.open(graphComputerConfiguration), vertexProgramConfiguration); // each partition(Spark)/worker(TP3) has a local copy of the vertex program (a worker's task)
                        final String[] vertexComputeKeysArray = VertexProgramHelper
                                .vertexComputeKeysAsArray(workerVertexProgram.getVertexComputeKeys()); // the compute keys as an array
                        final SparkMessenger<M> messenger = new SparkMessenger<>();

                        workerVertexProgram.workerIterationStart(memory.asImmutable()); // start the worker
                        return IteratorUtils.map(partitionIterator, vertexViewIncoming -> {
                            final StarGraph.StarVertex vertex = vertexViewIncoming._2()._1().get(); // get the vertex from the vertex writable
                            final boolean hasViewAndMessages = vertexViewIncoming._2()._2().isPresent(); // if this is the first iteration, then there are no views or messages
                            final List<DetachedVertexProperty<Object>> previousView = hasViewAndMessages
                                    ? vertexViewIncoming._2()._2().get().getView()
                                    : memory.isInitialIteration() ? new ArrayList<>() : Collections.emptyList();
                            // revive compute properties if they already exist
                            if (memory.isInitialIteration() && vertexComputeKeysArray.length > 0) {
                                vertex.properties(vertexComputeKeysArray)
                                        .forEachRemaining(vertexProperty -> previousView
                                                .add(DetachedFactory.detach(vertexProperty, true)));
                            }
                            // drop any computed properties that are cached in memory
                            vertex.dropVertexProperties(vertexComputeKeysArray);
                            final List<M> incomingMessages = hasViewAndMessages
                                    ? vertexViewIncoming._2()._2().get().getIncomingMessages()
                                    : Collections.emptyList();
                            IteratorUtils.removeOnNext(previousView.iterator()).forEachRemaining(
                                    property -> property.attach(Attachable.Method.create(vertex))); // attach the view to the vertex
                            assert previousView.isEmpty();
                            // do the vertex's vertex program iteration
                            messenger.setVertexAndIncomingMessages(vertex, incomingMessages); // set the messenger with the incoming messages
                            workerVertexProgram.execute(
                                    ComputerGraph.vertexProgram(vertex, workerVertexProgram), messenger,
                                    memory); // execute the vertex program on this vertex for this iteration
                            // assert incomingMessages.isEmpty();  // maybe the program didn't read all the messages
                            incomingMessages.clear();
                            // detached the compute property view from the vertex
                            final List<DetachedVertexProperty<Object>> nextView = vertexComputeKeysArray.length == 0
                                    ? // not all vertex programs have compute keys
                            Collections.emptyList()
                                    : IteratorUtils.list(IteratorUtils.map(
                                            vertex.properties(vertexComputeKeysArray),
                                            vertexProperty -> DetachedFactory.detach(vertexProperty, true)));
                            // drop compute property view as it has now been detached from the vertex
                            vertex.dropVertexProperties(vertexComputeKeysArray);
                            final List<Tuple2<Object, M>> outgoingMessages = messenger.getOutgoingMessages(); // get the outgoing messages being sent by this vertex
                            if (!partitionIterator.hasNext()) {
                                workerVertexProgram.workerIterationEnd(memory.asImmutable()); // if no more vertices in the partition, end the worker's iteration}
                            }
                            return (nextView.isEmpty() && outgoingMessages.isEmpty()) ? null : // if there is no view nor outgoing messages, emit nothing
                            new Tuple2<>(vertex.id(), new ViewOutgoingPayload<>(nextView, outgoingMessages)); // else, emit the vertex id, its view, and its outgoing messages
                        });
                    }, true) // true means that the partition is preserved
                    .filter(tuple -> null != tuple); // if there are no messages or views, then the tuple is null (memory optimization)
    // the graphRDD and the viewRDD must have the same partitioner
    if (partitionedGraphRDD) {
        assert graphRDD.partitioner().get().equals(viewOutgoingRDD.partitioner().get());
    }
    /////////////////////////////////////////////////////////////
    /////////////////////////////////////////////////////////////
    final PairFlatMapFunction<Tuple2<Object, ViewOutgoingPayload<M>>, Object, Payload> messageFunction = tuple -> IteratorUtils
            .concat(IteratorUtils.of(new Tuple2<>(tuple._1(), tuple._2().getView())), // emit the view payload
                    IteratorUtils.map(tuple._2().getOutgoingMessages().iterator(),
                            message -> new Tuple2<>(message._1(), new MessagePayload<>(message._2()))));
    final MessageCombiner<M> messageCombiner = VertexProgram
            .<VertexProgram<M>>createVertexProgram(HadoopGraph.open(vertexProgramConfiguration),
                    vertexProgramConfiguration)
            .getMessageCombiner().orElse(null);
    final Function2<Payload, Payload, Payload> reducerFunction = (a, b) -> { // reduce the view and outgoing messages into a single payload object representing the new view and incoming messages for a vertex
        if (a instanceof ViewIncomingPayload) {
            ((ViewIncomingPayload<M>) a).mergePayload(b, messageCombiner);
            return a;
        } else if (b instanceof ViewIncomingPayload) {
            ((ViewIncomingPayload<M>) b).mergePayload(a, messageCombiner);
            return b;
        } else {
            final ViewIncomingPayload<M> c = new ViewIncomingPayload<>(messageCombiner);
            c.mergePayload(a, messageCombiner);
            c.mergePayload(b, messageCombiner);
            return c;
        }
    };
    /////////////////////////////////////////////////////////////
    /////////////////////////////////////////////////////////////
    // "message pass" by reducing on the vertex object id of the view and message payloads
    final JavaPairRDD<Object, ViewIncomingPayload<M>> newViewIncomingRDD = (partitionedGraphRDD
            ? viewOutgoingRDD.flatMapToPair(messageFunction).reduceByKey(graphRDD.partitioner().get(),
                    reducerFunction)
            : viewOutgoingRDD.flatMapToPair(messageFunction).reduceByKey(reducerFunction))
                    .mapValues(payload -> { // handle various corner cases of when views don't exist, messages don't exist, or neither exists.
                        if (payload instanceof ViewIncomingPayload) {// this happens if there is a vertex view with incoming messages
                            return (ViewIncomingPayload<M>) payload;
                        } else if (payload instanceof ViewPayload) { // this happens if there is a vertex view with no incoming messages
                            return new ViewIncomingPayload<>((ViewPayload) payload);
                        } else { // this happens when there is a single message to a vertex that has no view or outgoing messages
                            return new ViewIncomingPayload<>((MessagePayload<M>) payload);
                        }
                    });
    // the graphRDD and the viewRDD must have the same partitioner
    if (partitionedGraphRDD) {
        assert graphRDD.partitioner().get().equals(newViewIncomingRDD.partitioner().get());
    }
    newViewIncomingRDD.foreachPartition(partitionIterator -> {
        KryoShimServiceLoader.applyConfiguration(graphComputerConfiguration);
    }); // need to complete a task so its BSP and the memory for this iteration is updated
    return newViewIncomingRDD;
}

From source file:au.org.ala.layers.dao.LayerDAOImpl.java

@Override
public List<Layer> getLayersByCriteria(String keywords) {
    logger.info("Getting a list of all enabled layers by criteria: " + keywords);
    String sql = "";
    sql += "select * from layers where ";
    sql += " enabled=true AND ( ";
    sql += "lower(keywords) like ? ";
    sql += " or lower(displayname) like ? ";
    //sql += " or lower(type) like ? ";
    sql += " or lower(name) like ? ";
    sql += " or lower(domain) like ? ";
    sql += ") order by displayname ";

    keywords = "%" + keywords.toLowerCase() + "%";

    //List list = hibernateTemplate.find(sql, new String[]{keywords, keywords, keywords}); // keywords,
    List<Layer> list = jdbcTemplate.query(sql, ParameterizedBeanPropertyRowMapper.newInstance(Layer.class),
            keywords, keywords, keywords, keywords);

    //remove duplicates if any
    Set setItems = new LinkedHashSet(list);
    list.clear();
    list.addAll(setItems);//from   w  w w . j a va  2 s  .co m

    updateDisplayPaths(list);
    updateMetadataPaths(list);

    return list;//no duplicates now
    //        logger.info("Getting a list of all enabled layers");
    //        String sql = "select * from layers where enabled=true";
    //        List<Layer> l = jdbcTemplate.query(sql, ParameterizedBeanPropertyRowMapper.newInstance(Layer.class));
    //        return l;

}

From source file:ai.grakn.kb.internal.computer.GraknSparkExecutor.java

public static <M> JavaPairRDD<Object, ViewIncomingPayload<M>> executeVertexProgramIteration(
        final JavaPairRDD<Object, VertexWritable> graphRDD,
        final JavaPairRDD<Object, ViewIncomingPayload<M>> viewIncomingRDD, final GraknSparkMemory memory,
        final Configuration graphComputerConfiguration, // has the Graph/GraphComputer.configuration() information
        final Configuration vertexProgramConfiguration) { // has the VertexProgram.loadState() information

    boolean partitionedGraphRDD = graphRDD.partitioner().isPresent();

    if (partitionedGraphRDD && null != viewIncomingRDD) // the graphRDD and the viewRDD must have the same partitioner
    {/*from  w  ww  .  j a  v a  2s  . c om*/
        assert graphRDD.partitioner().get().equals(viewIncomingRDD.partitioner().get());
    }
    final JavaPairRDD<Object, ViewOutgoingPayload<M>> viewOutgoingRDD = ((null == viewIncomingRDD)
            ? graphRDD.mapValues(
                    vertexWritable -> new Tuple2<>(vertexWritable, Optional.<ViewIncomingPayload<M>>absent()))
            : // first iteration will not have any views or messages
            graphRDD.leftOuterJoin(viewIncomingRDD)) // every other iteration may have views and messages
                    // for each partition of vertices emit a view and their outgoing messages
                    .mapPartitionsToPair(partitionIterator -> {
                        KryoShimServiceLoader.applyConfiguration(graphComputerConfiguration);

                        // if the partition is empty, return without starting a new VP iteration
                        if (!partitionIterator.hasNext()) {
                            return Collections.emptyList();
                        }

                        final VertexProgram<M> workerVertexProgram = VertexProgram.createVertexProgram(
                                HadoopGraph.open(graphComputerConfiguration), vertexProgramConfiguration); // each partition(Spark)/worker(TP3) has a local copy of the vertex program (a worker's task)
                        final String[] vertexComputeKeysArray = VertexProgramHelper
                                .vertexComputeKeysAsArray(workerVertexProgram.getVertexComputeKeys()); // the compute keys as an array
                        final SparkMessenger<M> messenger = new SparkMessenger<>();

                        workerVertexProgram.workerIterationStart(memory.asImmutable()); // start the worker
                        return () -> IteratorUtils.map(partitionIterator, vertexViewIncoming -> {
                            final StarGraph.StarVertex vertex = vertexViewIncoming._2()._1().get(); // get the vertex from the vertex writable
                            final boolean hasViewAndMessages = vertexViewIncoming._2()._2().isPresent(); // if this is the first iteration, then there are no views or messages
                            final List<DetachedVertexProperty<Object>> previousView = hasViewAndMessages
                                    ? vertexViewIncoming._2()._2().get().getView()
                                    : memory.isInitialIteration() ? new ArrayList<>() : Collections.emptyList();
                            // revive compute properties if they already exist
                            if (memory.isInitialIteration() && vertexComputeKeysArray.length > 0) {
                                vertex.properties(vertexComputeKeysArray)
                                        .forEachRemaining(vertexProperty -> previousView
                                                .add(DetachedFactory.detach(vertexProperty, true)));
                            }
                            // drop any computed properties that are cached in memory
                            vertex.dropVertexProperties(vertexComputeKeysArray);
                            final List<M> incomingMessages = hasViewAndMessages
                                    ? vertexViewIncoming._2()._2().get().getIncomingMessages()
                                    : Collections.emptyList();
                            IteratorUtils.removeOnNext(previousView.iterator()).forEachRemaining(
                                    property -> property.attach(Attachable.Method.create(vertex))); // attach the view to the vertex
                            assert previousView.isEmpty();
                            // do the vertex's vertex program iteration
                            messenger.setVertexAndIncomingMessages(vertex, incomingMessages); // set the messenger with the incoming messages
                            workerVertexProgram.execute(
                                    ComputerGraph.vertexProgram(vertex, workerVertexProgram), messenger,
                                    memory); // execute the vertex program on this vertex for this iteration
                            // assert incomingMessages.isEmpty();  // maybe the program didn't read all the messages
                            incomingMessages.clear();
                            // detached the compute property view from the vertex
                            final List<DetachedVertexProperty<Object>> nextView = vertexComputeKeysArray.length == 0
                                    ? // not all vertex programs have compute keys
                            Collections.emptyList()
                                    : IteratorUtils.list(IteratorUtils.map(
                                            vertex.properties(vertexComputeKeysArray),
                                            vertexProperty -> DetachedFactory.detach(vertexProperty, true)));
                            // drop compute property view as it has now been detached from the vertex
                            vertex.dropVertexProperties(vertexComputeKeysArray);
                            final List<Tuple2<Object, M>> outgoingMessages = messenger.getOutgoingMessages(); // get the outgoing messages being sent by this vertex
                            if (!partitionIterator.hasNext()) {
                                workerVertexProgram.workerIterationEnd(memory.asImmutable()); // if no more vertices in the partition, end the worker's iteration}
                            }
                            return (nextView.isEmpty() && outgoingMessages.isEmpty()) ? null : // if there is no view nor outgoing messages, emit nothing
                            new Tuple2<>(vertex.id(), new ViewOutgoingPayload<>(nextView, outgoingMessages)); // else, emit the vertex id, its view, and its outgoing messages
                        });
                    }, true) // true means that the partition is preserved
                    .filter(tuple -> null != tuple); // if there are no messages or views, then the tuple is null (memory optimization)
    // the graphRDD and the viewRDD must have the same partitioner
    if (partitionedGraphRDD) {
        assert graphRDD.partitioner().get().equals(viewOutgoingRDD.partitioner().get());
    }
    /////////////////////////////////////////////////////////////
    /////////////////////////////////////////////////////////////
    final PairFlatMapFunction<Tuple2<Object, ViewOutgoingPayload<M>>, Object, Payload> messageFunction = tuple -> () -> IteratorUtils
            .concat(IteratorUtils.of(new Tuple2<>(tuple._1(), tuple._2().getView())), // emit the view payload
                    IteratorUtils.map(tuple._2().getOutgoingMessages().iterator(),
                            message -> new Tuple2<>(message._1(), new MessagePayload<>(message._2()))));
    final MessageCombiner<M> messageCombiner = VertexProgram
            .<VertexProgram<M>>createVertexProgram(HadoopGraph.open(vertexProgramConfiguration),
                    vertexProgramConfiguration)
            .getMessageCombiner().orElse(null);
    final Function2<Payload, Payload, Payload> reducerFunction = (a, b) -> { // reduce the view and outgoing messages into a single payload object representing the new view and incoming messages for a vertex
        if (a instanceof ViewIncomingPayload) {
            ((ViewIncomingPayload<M>) a).mergePayload(b, messageCombiner);
            return a;
        } else if (b instanceof ViewIncomingPayload) {
            ((ViewIncomingPayload<M>) b).mergePayload(a, messageCombiner);
            return b;
        } else {
            final ViewIncomingPayload<M> c = new ViewIncomingPayload<>(messageCombiner);
            c.mergePayload(a, messageCombiner);
            c.mergePayload(b, messageCombiner);
            return c;
        }
    };
    /////////////////////////////////////////////////////////////
    /////////////////////////////////////////////////////////////
    // "message pass" by reducing on the vertex object id of the view and message payloads
    final JavaPairRDD<Object, ViewIncomingPayload<M>> newViewIncomingRDD = (partitionedGraphRDD
            ? viewOutgoingRDD.flatMapToPair(messageFunction).reduceByKey(graphRDD.partitioner().get(),
                    reducerFunction)
            : viewOutgoingRDD.flatMapToPair(messageFunction).reduceByKey(reducerFunction))
                    .mapValues(payload -> { // handle various corner cases of when views don't exist, messages don't exist, or neither exists.
                        if (payload instanceof ViewIncomingPayload) {// this happens if there is a vertex view with incoming messages
                            return (ViewIncomingPayload<M>) payload;
                        } else if (payload instanceof ViewPayload) { // this happens if there is a vertex view with no incoming messages
                            return new ViewIncomingPayload<>((ViewPayload) payload);
                        } else { // this happens when there is a single message to a vertex that has no view or outgoing messages
                            return new ViewIncomingPayload<>((MessagePayload<M>) payload);
                        }
                    });
    // the graphRDD and the viewRDD must have the same partitioner
    if (partitionedGraphRDD) {
        assert graphRDD.partitioner().get().equals(newViewIncomingRDD.partitioner().get());
    }
    newViewIncomingRDD.foreachPartition(partitionIterator -> {
        KryoShimServiceLoader.applyConfiguration(graphComputerConfiguration);
    }); // need to complete a task so its BSP and the memory for this iteration is updated
    return newViewIncomingRDD;
}

From source file:eagle.log.entity.meta.IndexDefinition.java

/**
 * Check if the query is suitable to go through index. If true, then return the value of index fields in order. Otherwise return null.
 * TODO: currently index fields should be string type.
 * //  w  w  w  . j a  v a 2 s  .co  m
 * @param query query expression after re-write
 * @param rowkeys if the query can go through the index, all rowkeys will be added into rowkeys.
 * @return true if the query can go through the index, otherwise return false
 */
public IndexType canGoThroughIndex(ORExpression query, List<byte[]> rowkeys) {
    if (query == null || query.getANDExprList() == null || query.getANDExprList().isEmpty())
        return IndexType.NON_CLUSTER_INDEX;
    if (rowkeys != null) {
        rowkeys.clear();
    }
    final Map<String, String> indexfieldMap = new HashMap<String, String>();
    for (ANDExpression andExpr : query.getANDExprList()) {
        indexfieldMap.clear();
        for (AtomicExpression ae : andExpr.getAtomicExprList()) {
            // TODO temporarily ignore those fields which are not for attributes
            final String fieldName = parseEntityAttribute(ae.getKey());
            if (fieldName != null && ComparisonOperator.EQUAL.equals(ae.getOp())) {
                indexfieldMap.put(fieldName, ae.getValue());
            }
        }
        final String[] partitions = entityDef.getPartitions();
        int[] partitionValueHashs = null;
        if (partitions != null) {
            partitionValueHashs = new int[partitions.length];
            for (int i = 0; i < partitions.length; ++i) {
                final String value = indexfieldMap.get(partitions[i]);
                if (value == null) {
                    throw new IllegalArgumentException(
                            "Partition " + partitions[i] + " is not defined in the query: " + query.toString());
                }
                partitionValueHashs[i] = value.hashCode();
            }
        }
        final byte[][] indexFieldValues = new byte[columns.length][];
        for (int i = 0; i < columns.length; ++i) {
            final IndexColumn col = columns[i];
            if (!indexfieldMap.containsKey(col.getColumnName())) {
                // If we have to use scan anyway, there's no need to go through index
                return IndexType.NON_INDEX;
            }
            final String value = indexfieldMap.get(col.getColumnName());
            indexFieldValues[i] = value.getBytes();
        }
        final byte[] rowkey = generateUniqueIndexRowkey(indexFieldValues, partitionValueHashs, null);
        if (rowkeys != null) {
            rowkeys.add(rowkey);
        }
    }
    if (index.unique()) {
        return IndexType.UNIQUE_INDEX;
    }
    return IndexType.NON_CLUSTER_INDEX;
}

From source file:hivemall.xgboost.XGBoostPredictUDTF.java

private void predictAndFlush(final Booster model, final List<LabeledPointWithRowId> buf) throws HiveException {
    final DMatrix testData;
    final float[][] predicted;
    try {/*  ww  w  .  j  a  va 2  s . c  o  m*/
        testData = createDMatrix(buf);
        predicted = model.predict(testData);
    } catch (XGBoostError e) {
        throw new HiveException(e);
    }
    forwardPredicted(buf, predicted);
    buf.clear();
}