List of usage examples for com.google.common.collect Iterables toArray
static <T> T[] toArray(Iterable<? extends T> iterable, T[] array)
From source file:com.github.jcustenborder.kafka.connect.twitter.TwitterSourceConnectorConfig.java
public String[] filterKeywords() { List<String> keywordList = this.getList(FILTER_KEYWORDS_CONF); String[] keywords = Iterables.toArray(keywordList, String.class); return keywords; }
From source file:org.polarsys.reqcycle.repository.ui.providers.DummyInputContentProvider.java
@Override public Object[] getChildren(final Object object) { if (object instanceof DummyInput) { Object[] children = Collections2 .transform(((DummyInput) object).getInput(), new Function<RequirementSource, DummyObject>() { @Override/*from w w w . j a v a2 s . c om*/ public DummyObject apply(RequirementSource reqSource) { return new DummyObject(((DummyInput) object).getPredicate(), reqSource); }; }).toArray(); return children; } if (object instanceof DummyObject) { final DummyObject dummyObject = (DummyObject) object; EObject obj = dummyObject.getEobj(); Collection<AbstractElement> elements = Collections.emptyList(); if (obj instanceof RequirementSource) { elements = ((RequirementSource) obj).getRequirements(); } if (obj instanceof Section) { elements = ((Section) obj).getChildren(); } Collection<DummyObject> transform = Collections2.transform(elements, new Function<EObject, DummyObject>() { @Override public DummyObject apply(EObject eObj) { IPredicate predicate = dummyObject.getPredicate(); DummyObject dObj = new DummyObject(predicate, eObj); if (dObj.getEobj() instanceof Section && !(dObj.getEobj() instanceof SimpleRequirement)) { return dObj; // do not use predicate filter for // sections which are not // requirements } if (predicate != null) { return predicate.match(eObj) ? dObj : null; } else { return dObj; } } }); Iterable<DummyObject> result = Iterables.filter(transform, Predicates.notNull()); return Iterables.toArray(result, DummyObject.class); } return super.getChildren(object); }
From source file:io.prestosql.plugin.accumulo.io.AccumuloRecordSet.java
/** * Gets the scanner authorizations to use for scanning tables. * <p>// w w w . j a v a 2s.c om * In order of priority: session username authorizations, then table property, then the default connector auths. * * @param session Current session * @param split Accumulo split * @param connector Accumulo connector * @param username Accumulo username * @return Scan authorizations * @throws AccumuloException If a generic Accumulo error occurs * @throws AccumuloSecurityException If a security exception occurs */ private static Authorizations getScanAuthorizations(ConnectorSession session, AccumuloSplit split, Connector connector, String username) throws AccumuloException, AccumuloSecurityException { String sessionScanUser = AccumuloSessionProperties.getScanUsername(session); if (sessionScanUser != null) { Authorizations scanAuths = connector.securityOperations().getUserAuthorizations(sessionScanUser); LOG.debug("Using session scanner auths for user %s: %s", sessionScanUser, scanAuths); return scanAuths; } Optional<String> scanAuths = split.getScanAuthorizations(); if (scanAuths.isPresent()) { Authorizations auths = new Authorizations( Iterables.toArray(COMMA_SPLITTER.split(scanAuths.get()), String.class)); LOG.debug("scan_auths table property set: %s", auths); return auths; } else { Authorizations auths = connector.securityOperations().getUserAuthorizations(username); LOG.debug("scan_auths table property not set, using user auths: %s", auths); return auths; } }
From source file:com.samskivert.depot.Ops.java
/** * Multiplies the supplied expressions together. *//*from w ww. j a va 2s.c o m*/ public static <T extends Number> FluentExp<T> mul(Iterable<SQLExpression<T>> exprs) { return new Mul<T>(Iterables.toArray(exprs, SQLExpression.class)); }
From source file:co.cask.cdap.app.runtime.spark.submit.AbstractSparkSubmitter.java
@Override public final <V> ListenableFuture<V> submit(final SparkRuntimeContext runtimeContext, final SparkExecutionContextFactory contextFactory, Map<String, String> configs, List<LocalizeResource> resources, File jobJar, final V result) { final SparkSpecification spec = runtimeContext.getSparkSpecification(); final List<String> args = createSubmitArguments(spec, configs, resources, jobJar); // Spark submit is called from this executor // Use an executor to simplify logic that is needed to interrupt the running thread on stopping final ExecutorService executor = Executors.newSingleThreadExecutor(new ThreadFactory() { @Override// w w w . jav a 2 s. co m public Thread newThread(Runnable r) { return new Thread(r, "spark-submitter-" + spec.getName() + "-" + runtimeContext.getRunId()); } }); // Latch for the Spark job completion final CountDownLatch completion = new CountDownLatch(1); final SparkJobFuture<V> resultFuture = new SparkJobFuture<V>(runtimeContext) { @Override protected void cancelTask() { // Try to shutdown the running spark job. triggerShutdown(); // Wait for the Spark-Submit returns Uninterruptibles.awaitUninterruptibly(completion); } }; // Submit the Spark job executor.submit(new Runnable() { @Override public void run() { List<String> extraArgs = beforeSubmit(); try { String[] submitArgs = Iterables.toArray(Iterables.concat(args, extraArgs), String.class); submit(runtimeContext, contextFactory, submitArgs); onCompleted(true); resultFuture.set(result); } catch (Throwable t) { onCompleted(false); resultFuture.setException(t); } finally { completion.countDown(); } } }); // Shutdown the executor right after submit since the thread is only used for one submission. executor.shutdown(); return resultFuture; }
From source file:org.apache.james.utils.GuiceServerProbe.java
@Override public String[] listUsers() throws Exception { return Iterables.toArray(ImmutableList.copyOf(usersRepository.list()), String.class); }
From source file:com.b2international.index.compat.Highlighting.java
public static String[] getSuffixes(final String queryExpression, final String label) { final Splitter tokenSplitter = Splitter.on(TextConstants.WHITESPACE_OR_DELIMITER_MATCHER) .omitEmptyStrings();// w ww . j a v a2 s. co m final List<String> filterTokens = tokenSplitter.splitToList(queryExpression.toLowerCase()); final boolean spaceAtTheEnd = !queryExpression.isEmpty() && Character.isWhitespace(queryExpression.charAt(queryExpression.length() - 1)); final String lowerCaseLabel = label.toLowerCase(); final Iterable<String> labelTokens = tokenSplitter.split(lowerCaseLabel); final List<String> elementSuffixes = Lists.newArrayList(); for (final String labelToken : labelTokens) { final Iterator<String> itr = filterTokens.iterator(); while (itr.hasNext()) { final String filterToken = itr.next(); if (labelToken.startsWith(filterToken)) { // Last filter token? Also add suffix, unless it is already present in the filter and there's no whitespace at the end of it if (!itr.hasNext() && !filterTokens.contains(labelToken) && !spaceAtTheEnd) { elementSuffixes.add(labelToken.substring(filterToken.length())); } } } // If there's whitespace at the end, add complete word suggestions as well if (shouldSuggest(filterTokens, labelToken) && spaceAtTheEnd) { elementSuffixes.add(labelToken); } } return Iterables.toArray(elementSuffixes, String.class); }
From source file:com.sector91.wit.server.WebServer.java
public WebServer(ServerConfig config) { this.port = config.httpPort; this.threads = config.threads; this.responderFor404 = config.responderFor404; this.bus = config.bus; bus.register(this); if (config.threads == 0) { this.executor = Executors.newCachedThreadPool(); } else {// w w w. j ava 2 s .co m this.executor = Executors.newFixedThreadPool(config.threads); } for (WebAppEntry entry : config.apps) { try { installWebApp(entry.app.getClass().getSimpleName(), entry.prefix.isPresent() ? Iterables.toArray(Splitter.on('/').omitEmptyStrings().split(entry.prefix.get()), String.class) : new String[0], entry.app); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } } Log.setLogger(config.logConfig); }
From source file:org.apache.jackrabbit.oak.plugins.index.lucene.IndexTracker.java
private synchronized void diffAndUpdate(final NodeState root) { Map<String, IndexNode> original = indices; final Map<String, IndexNode> updates = newHashMap(); List<Editor> editors = newArrayListWithCapacity(original.size()); for (Map.Entry<String, IndexNode> entry : original.entrySet()) { final String path = entry.getKey(); editors.add(new SubtreeEditor(new DefaultEditor() { @Override//from w w w . java 2 s.com public void leave(NodeState before, NodeState after) { try { long start = PERF_LOGGER.start(); IndexNode index = IndexNode.open(path, root, after, cloner); PERF_LOGGER.end(start, -1, "[{}] Index found to be updated. Reopening the IndexNode", path); updates.put(path, index); // index can be null } catch (IOException e) { log.error("Failed to open Lucene index at " + path, e); } } }, Iterables.toArray(PathUtils.elements(path), String.class))); } EditorDiff.process(CompositeEditor.compose(editors), this.root, root); this.root = root; if (!updates.isEmpty()) { indices = ImmutableMap.<String, IndexNode>builder() .putAll(filterKeys(original, not(in(updates.keySet())))) .putAll(filterValues(updates, notNull())).build(); //This might take some time as close need to acquire the //write lock which might be held by current running searches //Given that Tracker is now invoked from a BackgroundObserver //not a high concern for (String path : updates.keySet()) { IndexNode index = original.get(path); try { index.close(); } catch (IOException e) { log.error("Failed to close Lucene index at " + path, e); } } } }
From source file:com.github.cassandra.jdbc.CassandraDriver.java
public DriverPropertyInfo[] getPropertyInfo(String url, Properties props) throws SQLException { List<DriverPropertyInfo> list = new ArrayList<DriverPropertyInfo>(); for (Map.Entry<String, Object> entry : CassandraConfiguration.DEFAULT.toSortedMap().entrySet()) { String key = entry.getKey(); list.add(createDriverPropertyInfo(key, entry.getValue())); }/* w w w.j a v a 2 s. c o m*/ return Iterables.toArray(list, DriverPropertyInfo.class); }