Example usage for java.util Queue remove

List of usage examples for java.util Queue remove

Introduction

In this page you can find the example usage for java.util Queue remove.

Prototype

E remove();

Source Link

Document

Retrieves and removes the head of this queue.

Usage

From source file:edu.emory.cci.aiw.umls.UMLSDatabaseConnection.java

@Override
public int getDistBF(ConceptUID cui1, ConceptUID cui2, String rela, SAB sab, int maxR)
        throws UMLSQueryException {
    Queue<ConceptUID> cuiQue = new LinkedList<ConceptUID>();
    Set<ConceptUID> visited = new HashSet<ConceptUID>();
    Map<Integer, Integer> radiusIdx = new HashMap<Integer, Integer>();
    int queIdx = 0;
    int r = 0;/*  w  w  w .j  av  a 2s .com*/
    radiusIdx.put(r, 0);

    if (maxR <= 0) {
        maxR = 3;
    }

    try {
        setupConn();
        cuiQue.add(cui1);
        visited.add(cui1);

        List<UMLSQuerySearchUID> params = new ArrayList<UMLSQuerySearchUID>();
        StringBuilder sql = new StringBuilder(
                "select distinct(CUI2) from MRREL where CUI1 = ? and (rel='PAR' or rel='CHD')");
        params.add(ConceptUID.EMPTY_CUI);
        if (sab != null) {
            sql.append(" and SAB = ?");
            params.add(sab);
        }
        if (rela != null && !rela.equals("")) {
            sql.append(" and RELA = ?");
            params.add(UMLSQueryStringValue.fromString(rela));
        }

        while (!cuiQue.isEmpty()) {
            ConceptUID node = cuiQue.remove();
            params.set(0, node);
            if (node.equals(cui2)) {
                return r;
            }

            List<ConceptUID> adjNodes = new ArrayList<ConceptUID>();

            ResultSet rs = executeAndLogQuery(substParams(sql.toString(), params));
            while (rs.next()) {
                ConceptUID c2 = ConceptUID.fromString(rs.getString(1));
                if (!visited.contains(c2)) {
                    adjNodes.add(c2);
                }
            }

            if (!radiusIdx.containsKey(r + 1)) {
                radiusIdx.put(r + 1, queIdx + cuiQue.size());
            }
            radiusIdx.put(r + 1, adjNodes.size());

            if (queIdx == radiusIdx.get(r)) {
                r++;
            }
            queIdx++;

            for (ConceptUID c : adjNodes) {
                visited.add(c);
                cuiQue.add(c);
            }
            if (r > maxR) {
                return r;
            }
        }
    } catch (SQLException sqle) {
        throw new UMLSQueryException(sqle);
    } catch (MalformedUMLSUniqueIdentifierException muuie) {
        throw new UMLSQueryException(muuie);
    } finally {
        tearDownConn();
    }

    log(Level.FINEST, "Returning -1");
    return -1;
}

From source file:org.apache.hadoop.hive.ql.MultiDriver.java

private int createcachetable(ArrayList<Pair<String, Configuration>> multiCmds)
        throws CommandNeedRetryException {

    int ret;//from  ww w.  jav  a 2  s  . c om
    synchronized (compileMonitor) {
        ret = CreateTableCompile(multiCmds, true);
    }

    boolean requireLock = false;
    boolean ckLock = checkLockManager();

    if (ckLock) {
        boolean lockOnlyMapred = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY);
        if (lockOnlyMapred) {
            Queue<Task<? extends Serializable>> taskQueue = new LinkedList<Task<? extends Serializable>>();
            taskQueue.addAll(plan.getRootTasks());
            while (taskQueue.peek() != null) {
                Task<? extends Serializable> tsk = taskQueue.remove();
                requireLock = requireLock || tsk.requireLock();
                if (requireLock) {
                    break;
                }
                if (tsk instanceof ConditionalTask) {
                    taskQueue.addAll(((ConditionalTask) tsk).getListTasks());
                }
                if (tsk.getChildTasks() != null) {
                    taskQueue.addAll(tsk.getChildTasks());
                }
                // does not add back up task here, because back up task should be the same
                // type of the original task.
            }
        } else {
            requireLock = true;
        }
    }

    if (requireLock) {
        ret = acquireReadWriteLocks();
        if (ret != 0) {
            releaseLocks(ctx.getHiveLocks());
            //  return new CommandProcessorResponse(ret, errorMessage, SQLState);
        }
    }

    ret = multiExecute();

    return ret;

}

From source file:org.apache.hadoop.hive.ql.MultiDriver.java

private CommandProcessorResponse runInternal(ArrayList<Pair<String, Configuration>> multiCmds)
        throws CommandNeedRetryException {
    errorMessage = null;/* ww w.  j av a2s .co  m*/
    SQLState = null;
    downstreamError = null;

    if (!validateConfVariables()) {
        return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    // Reset the perf logger
    PerfLogger perfLogger = PerfLogger.getPerfLogger(true);
    perfLogger.PerfLogBegin(LOG, PerfLogger.MULTIDRIVER_RUN);
    perfLogger.PerfLogBegin(LOG, PerfLogger.TIME_TO_SUBMIT);
    //createcachetable(multiCmds);

    int ret;
    synchronized (compileMonitor) {
        ret = multiCompile(multiCmds);
    }

    if (ret != 0) {
        for (int key = 0; key < multiPctx.size(); key++) {
            Context ctx = multiPctx.get(key).getContext();
            releaseLocks(ctx.getHiveLocks());
        }
        return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }

    boolean requireLock = false;
    boolean ckLock = checkLockManager();

    if (ckLock) {
        boolean lockOnlyMapred = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY);
        if (lockOnlyMapred) {
            Queue<Task<? extends Serializable>> taskQueue = new LinkedList<Task<? extends Serializable>>();
            taskQueue.addAll(plan.getRootTasks());
            while (taskQueue.peek() != null) {
                Task<? extends Serializable> tsk = taskQueue.remove();
                requireLock = requireLock || tsk.requireLock();
                if (requireLock) {
                    break;
                }
                if (tsk instanceof ConditionalTask) {
                    taskQueue.addAll(((ConditionalTask) tsk).getListTasks());
                }
                if (tsk.getChildTasks() != null) {
                    taskQueue.addAll(tsk.getChildTasks());
                }
                // does not add back up task here, because back up task should be the same
                // type of the original task.
            }
        } else {
            requireLock = true;
        }
    }

    if (requireLock) {
        ret = acquireReadWriteLocks();
        if (ret != 0) {
            releaseLocks(ctx.getHiveLocks());
            //  return new CommandProcessorResponse(ret, errorMessage, SQLState);
        }
    }
    boolean isexplain = ctx.getExplain();
    // if(isexplain){
    // multiOutputexplain();
    // }else{
    // reserved function
    ret = multiExecute();

    if (ret != 0) {
        //if needRequireLock is false, the release here will do nothing because there is no lock
        releaseLocks(ctx.getHiveLocks());
        return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }
    multiOutputResult();

    //if needRequireLock is false, the release here will do nothing because there is no lock
    releaseLocks(ctx.getHiveLocks());

    for (int key = 0; key < multiPctx.size(); key++) {
        Context ctx = multiPctx.get(key).getContext();
        releaseLocks(ctx.getHiveLocks());
    }

    multiPctx.clear();
    perfLogger.PerfLogEnd(LOG, PerfLogger.MULTIDRIVER_RUN);
    perfLogger.close(LOG, plan);

    return new CommandProcessorResponse(ret);
}

From source file:org.apache.hadoop.hive.ql.MultiDriver.java

private int multipreoptimizetest() throws CommandNeedRetryException {
    int i;/*from  w w  w .  ja  v a 2s  .c o  m*/
    PerfLogger perfLogger = PerfLogger.getPerfLogger();

    for (i = 0; i < cmds.size(); i++) {
        TaskFactory.resetId();
        ParseContext pCtx = multiPctx.get(i);
        //  conf=(HiveConf)confs.get(i);
        conf = pCtx.getConf();
        ctx = pCtx.getContext();

        LOG.info("Before  MultidoPhase2forTest Optree:\n" + Operator.toString(pCtx.getTopOps().values()));
        // do Optimizer  gen MR task
        SemanticAnalyzer sem;
        try {
            sem = new SemanticAnalyzer(conf);
            sem.MultidoPhase2forTest(pCtx);
            sem.validate();

            plan = new QueryPlan(cmds.get(i), sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN));

            if (false) {
                String queryPlanFileName = ctx.getLocalScratchDir(true) + Path.SEPARATOR_CHAR + "queryplan.xml";
                LOG.info("query plan = " + queryPlanFileName);
                queryPlanFileName = new Path(queryPlanFileName).toUri().getPath();

                // serialize the queryPlan
                FileOutputStream fos = new FileOutputStream(queryPlanFileName);
                Utilities.serializeObject(plan, fos);
                fos.close();
            }

            // initialize FetchTask right here
            if (plan.getFetchTask() != null) {
                plan.getFetchTask().initialize(conf, plan, null);
            }

            // get the output schema
            schema = schemas.get(i);

        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

        boolean requireLock = false;
        boolean ckLock = checkLockManager();

        if (ckLock) {
            boolean lockOnlyMapred = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY);
            if (lockOnlyMapred) {
                Queue<Task<? extends Serializable>> taskQueue = new LinkedList<Task<? extends Serializable>>();
                taskQueue.addAll(plan.getRootTasks());
                while (taskQueue.peek() != null) {
                    Task<? extends Serializable> tsk = taskQueue.remove();
                    requireLock = requireLock || tsk.requireLock();
                    if (requireLock) {
                        break;
                    }
                    if (tsk instanceof ConditionalTask) {
                        taskQueue.addAll(((ConditionalTask) tsk).getListTasks());
                    }
                    if (tsk.getChildTasks() != null) {
                        taskQueue.addAll(tsk.getChildTasks());
                    }
                    // does not add back up task here, because back up task should be the same
                    // type of the original task.
                }
            } else {
                requireLock = true;
            }
        }
        int ret;
        if (requireLock) {
            ret = acquireReadWriteLocks();
            if (ret != 0) {
                releaseLocks(ctx.getHiveLocks());
                //  return new CommandProcessorResponse(ret, errorMessage, SQLState);
            }
        }

        ret = execute();
        if (ret != 0) {
            //if needRequireLock is false, the release here will do nothing because there is no lock
            releaseLocks(ctx.getHiveLocks());
            //  return new CommandProcessorResponse(ret, errorMessage, SQLState);
        }

        //if needRequireLock is false, the release here will do nothing because there is no lock
        releaseLocks(ctx.getHiveLocks());

        //test output
        SessionState ss = SessionState.get();
        PrintStream out = ss.out;
        ArrayList<String> res = new ArrayList<String>();
        LOG.info("Output the result of query ID(" + i + "):");
        printHeader(this, out);
        int counter = 0;
        try {
            while (this.getResults(res)) {
                for (String r : res) {
                    out.println(r);
                }
                counter += res.size();
                res.clear();
                if (out.checkError()) {
                    break;
                }
            }
        } catch (IOException e) {
            console.printError("Failed with exception " + e.getClass().getName() + ":" + e.getMessage(),
                    "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
            ret = 1;
        }

    }

    return 0;
}

From source file:co.paralleluniverse.galaxy.core.Cache.java

private void receiveShortCircuit() {
    Queue<Message> ms = this.shortCircuitMessage.get();
    if (ms != null) {
        while (!ms.isEmpty()) {
            Message m = ms.remove();
            receive1(m);/*  w w w .ja  va2  s.c  om*/
        }
    }
    this.shortCircuitMessage.remove();
}

From source file:org.springframework.data.redis.connection.DefaultStringRedisConnection.java

@SuppressWarnings({ "unchecked", "rawtypes" })
private List<Object> convertResults(List<Object> results, Queue<Converter> converters) {
    if (!deserializePipelineAndTxResults || results == null) {
        return results;
    }/*  ww  w.j a  v  a  2 s .c o m*/
    if (results.size() != converters.size()) {
        // Some of the commands were done directly on the delegate, don't attempt to convert
        log.warn("Delegate returned an unexpected number of results. Abandoning type conversion.");
        return results;
    }
    List<Object> convertedResults = new ArrayList<Object>();
    for (Object result : results) {
        convertedResults.add(converters.remove().convert(result));
    }
    return convertedResults;
}

From source file:com.intel.ssg.dcst.panthera.parse.SkinDriver.java

public int execute() throws CommandNeedRetryException {
    PerfLogger perfLogger = PerfLogger.getPerfLogger();
    perfLogger.PerfLogBegin(LOG, PerfLogger.DRIVER_EXECUTE);

    boolean noName = StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HADOOPJOBNAME));
    int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);

    String queryId = plan.getQueryId();
    String queryStr = plan.getQueryStr();

    conf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId);
    conf.setVar(HiveConf.ConfVars.HIVEQUERYSTRING, queryStr);

    conf.set("mapreduce.workflow.id", "hive_" + queryId);
    conf.set("mapreduce.workflow.name", queryStr);

    maxthreads = HiveConf.getIntVar(conf, HiveConf.ConfVars.EXECPARALLETHREADNUMBER);

    try {/* w ww. ja va  2  s  . c  om*/
        LOG.info("Starting command: " + queryStr);

        plan.setStarted();

        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().startQuery(queryStr,
                    conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
            SessionState.get().getHiveHistory().logPlanProgress(plan);
        }
        resStream = null;

        HookContext hookContext = new HookContext(plan, conf, ctx.getPathToCS());
        hookContext.setHookType(HookContext.HookType.PRE_EXEC_HOOK);

        for (Hook peh : getHooks(HiveConf.ConfVars.PREEXECHOOKS)) {
            if (peh instanceof ExecuteWithHookContext) {
                perfLogger.PerfLogBegin(LOG, PerfLogger.PRE_HOOK + peh.getClass().getName());

                ((ExecuteWithHookContext) peh).run(hookContext);

                perfLogger.PerfLogEnd(LOG, PerfLogger.PRE_HOOK + peh.getClass().getName());
            } else if (peh instanceof PreExecute) {
                perfLogger.PerfLogBegin(LOG, PerfLogger.PRE_HOOK + peh.getClass().getName());

                ((PreExecute) peh).run(SessionState.get(), plan.getInputs(), plan.getOutputs(),
                        ShimLoader.getHadoopShims().getUGIForConf(conf));

                perfLogger.PerfLogEnd(LOG, PerfLogger.PRE_HOOK + peh.getClass().getName());
            }
        }

        int jobs = Utilities.getMRTasks(plan.getRootTasks()).size();
        if (jobs > 0) {
            console.printInfo("Total MapReduce jobs = " + jobs);
        }
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_NUM_TASKS,
                    String.valueOf(jobs));
            SessionState.get().getHiveHistory().setIdToTableMap(plan.getIdToTableNameMap());
        }
        String jobname = Utilities.abbreviate(queryStr, maxlen - 6);

        // A runtime that launches runnable tasks as separate Threads through
        // TaskRunners
        // As soon as a task isRunnable, it is put in a queue
        // At any time, at most maxthreads tasks can be running
        // The main thread polls the TaskRunners to check if they have finished.

        Queue<Task<? extends Serializable>> runnable = new ConcurrentLinkedQueue<Task<? extends Serializable>>();
        Map<TaskResult, TaskRunner> running = new HashMap<TaskResult, TaskRunner>();

        DriverContext driverCxt = new DriverContext(runnable, ctx);
        ctx.setHDFSCleanup(true);

        SessionState.get().setLastMapRedStatsList(new ArrayList<MapRedStats>());
        SessionState.get().setStackTraces(new HashMap<String, List<List<String>>>());
        SessionState.get().setLocalMapRedErrors(new HashMap<String, List<String>>());

        // Add root Tasks to runnable
        for (Task<? extends Serializable> tsk : plan.getRootTasks()) {
            // This should never happen, if it does, it's a bug with the potential to produce
            // incorrect results.
            assert tsk.getParentTasks() == null || tsk.getParentTasks().isEmpty();
            driverCxt.addToRunnable(tsk);
        }

        perfLogger.PerfLogEnd(LOG, PerfLogger.TIME_TO_SUBMIT);
        perfLogger.PerfLogBegin(LOG, PerfLogger.RUN_TASKS);
        // Loop while you either have tasks running, or tasks queued up
        while (running.size() != 0 || runnable.peek() != null) {
            // Launch upto maxthreads tasks
            while (runnable.peek() != null && running.size() < maxthreads) {
                Task<? extends Serializable> tsk = runnable.remove();
                perfLogger.PerfLogBegin(LOG, PerfLogger.TASK + tsk.getName() + "." + tsk.getId());
                launchTask(tsk, queryId, noName, running, jobname, jobs, driverCxt);
            }

            // poll the Tasks to see which one completed
            TaskResult tskRes = pollTasks(running.keySet());
            TaskRunner tskRun = running.remove(tskRes);
            Task<? extends Serializable> tsk = tskRun.getTask();
            perfLogger.PerfLogEnd(LOG, PerfLogger.TASK + tsk.getName() + "." + tsk.getId());
            hookContext.addCompleteTask(tskRun);

            int exitVal = tskRes.getExitVal();
            if (exitVal != 0) {
                if (tsk.ifRetryCmdWhenFail()) {
                    if (!running.isEmpty()) {
                        taskCleanup(running);
                    }
                    // in case we decided to run everything in local mode, restore the
                    // the jobtracker setting to its initial value
                    ctx.restoreOriginalTracker();
                    throw new CommandNeedRetryException();
                }
                Task<? extends Serializable> backupTask = tsk.getAndInitBackupTask();
                if (backupTask != null) {
                    setErrorMsgAndDetail(exitVal, tskRes.getTaskError(), tsk);
                    console.printError(errorMessage);
                    errorMessage = "ATTEMPT: Execute BackupTask: " + backupTask.getClass().getName();
                    console.printError(errorMessage);

                    // add backup task to runnable
                    if (DriverContext.isLaunchable(backupTask)) {
                        driverCxt.addToRunnable(backupTask);
                    }
                    continue;

                } else {
                    hookContext.setHookType(HookContext.HookType.ON_FAILURE_HOOK);
                    // Get all the failure execution hooks and execute them.
                    for (Hook ofh : getHooks(HiveConf.ConfVars.ONFAILUREHOOKS)) {
                        perfLogger.PerfLogBegin(LOG, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());

                        ((ExecuteWithHookContext) ofh).run(hookContext);

                        perfLogger.PerfLogEnd(LOG, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());
                    }
                    setErrorMsgAndDetail(exitVal, tskRes.getTaskError(), tsk);
                    SQLState = "08S01";
                    console.printError(errorMessage);
                    if (!running.isEmpty()) {
                        taskCleanup(running);
                    }
                    // in case we decided to run everything in local mode, restore the
                    // the jobtracker setting to its initial value
                    ctx.restoreOriginalTracker();
                    return exitVal;
                }
            }

            if (SessionState.get() != null) {
                SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(), Keys.TASK_RET_CODE,
                        String.valueOf(exitVal));
                SessionState.get().getHiveHistory().endTask(queryId, tsk);
            }

            if (tsk.getChildTasks() != null) {
                for (Task<? extends Serializable> child : tsk.getChildTasks()) {
                    if (DriverContext.isLaunchable(child)) {
                        driverCxt.addToRunnable(child);
                    }
                }
            }
        }
        perfLogger.PerfLogEnd(LOG, PerfLogger.RUN_TASKS);

        // in case we decided to run everything in local mode, restore the
        // the jobtracker setting to its initial value
        ctx.restoreOriginalTracker();

        // remove incomplete outputs.
        // Some incomplete outputs may be added at the beginning, for eg: for dynamic partitions.
        // remove them
        HashSet<WriteEntity> remOutputs = new HashSet<WriteEntity>();
        for (WriteEntity output : plan.getOutputs()) {
            if (!output.isComplete()) {
                remOutputs.add(output);
            }
        }

        for (WriteEntity output : remOutputs) {
            plan.getOutputs().remove(output);
        }

        hookContext.setHookType(HookContext.HookType.POST_EXEC_HOOK);
        // Get all the post execution hooks and execute them.
        for (Hook peh : getHooks(HiveConf.ConfVars.POSTEXECHOOKS)) {
            if (peh instanceof ExecuteWithHookContext) {
                perfLogger.PerfLogBegin(LOG, PerfLogger.POST_HOOK + peh.getClass().getName());

                ((ExecuteWithHookContext) peh).run(hookContext);

                perfLogger.PerfLogEnd(LOG, PerfLogger.POST_HOOK + peh.getClass().getName());
            } else if (peh instanceof PostExecute) {
                perfLogger.PerfLogBegin(LOG, PerfLogger.POST_HOOK + peh.getClass().getName());

                ((PostExecute) peh).run(SessionState.get(), plan.getInputs(), plan.getOutputs(),
                        (SessionState.get() != null ? SessionState.get().getLineageState().getLineageInfo()
                                : null),
                        ShimLoader.getHadoopShims().getUGIForConf(conf));

                perfLogger.PerfLogEnd(LOG, PerfLogger.POST_HOOK + peh.getClass().getName());
            }
        }

        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
                    String.valueOf(0));
            SessionState.get().getHiveHistory().printRowCount(queryId);
        }
    } catch (CommandNeedRetryException e) {
        throw e;
    } catch (Exception e) {
        ctx.restoreOriginalTracker();
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
                    String.valueOf(12));
        }
        // TODO: do better with handling types of Exception here
        errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
        SQLState = "08S01";
        downstreamError = e;
        console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return (12);
    } finally {
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().endQuery(queryId);
        }
        if (noName) {
            conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, "");
        }
        perfLogger.PerfLogEnd(LOG, PerfLogger.DRIVER_EXECUTE);

        if (SessionState.get().getLastMapRedStatsList() != null
                && SessionState.get().getLastMapRedStatsList().size() > 0) {
            long totalCpu = 0;
            console.printInfo("MapReduce Jobs Launched: ");
            for (int i = 0; i < SessionState.get().getLastMapRedStatsList().size(); i++) {
                console.printInfo("Job " + i + ": " + SessionState.get().getLastMapRedStatsList().get(i));
                totalCpu += SessionState.get().getLastMapRedStatsList().get(i).getCpuMSec();
            }
            console.printInfo("Total MapReduce CPU Time Spent: " + Utilities.formatMsecToStr(totalCpu));
        }
    }
    plan.setDone();

    if (SessionState.get() != null) {
        try {
            SessionState.get().getHiveHistory().logPlanProgress(plan);
        } catch (Exception e) {
        }
    }
    console.printInfo("OK");

    return (0);
}

From source file:it.geosolutions.geobatch.actions.ds2ds.geoserver.DSGeoServerAction.java

@Override
public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {

    listenerForwarder.started();//  w w w . java 2  s . c  o m

    // return object
    final Queue<EventObject> outputEvents = new LinkedList<EventObject>();

    //check global configurations
    //Geoserver config
    //----------------
    updateTask("Check GeoServer configuration");

    final String url = conf.getGeoserverURL();
    final String user = conf.getGeoserverUID();
    final String password = conf.getGeoserverPWD();
    GeoServerRESTManager gsMan = null;
    try {
        gsMan = new GeoServerRESTManager(new URL(url), user, password);
    } catch (MalformedURLException e) {
        failAction("Wrong GeoServer URL");

    } catch (IllegalArgumentException e) {
        failAction("Unable to create the GeoServer Manager using a null argument");

    }
    //TODO how to check if GS user/password are correct?
    listenerForwarder.progressing(5, "GeoServer configuration checked");

    //Check operation
    //---------------
    updateTask("Check operation");
    String op = conf.getOperation();
    if (op == null || !(op.equalsIgnoreCase("PUBLISH") || op.equalsIgnoreCase("REMOVE"))) {
        failAction("Bad operation: " + op + " in configuration");
    }
    listenerForwarder.progressing(10, "Operation checked");

    //Check WorkSpace
    //---------------
    updateTask("Check workspace configuration");
    String ws = conf.getDefaultNamespace();
    String wsUri = conf.getDefaultNamespaceUri();

    Boolean existWS = false;
    synchronized (existWS) {
        existWS = gsMan.getReader().getWorkspaceNames().contains(ws);

        if (!existWS) {

            boolean createWS = conf.getCreateNameSpace();
            if (createWS) {
                //try to create the workspace
                updateTask("Create workspace " + ws + " in GeoServer");
                boolean created = false;
                if (wsUri == null) {
                    created = gsMan.getPublisher().createWorkspace(ws);
                } else {
                    try {
                        created = gsMan.getPublisher().createWorkspace(ws, new URI(wsUri));
                    } catch (URISyntaxException e) {
                        failAction("Invalid NameSpace URI " + wsUri + " in configuration");
                    }
                }
                if (!created) {
                    failAction("FATAL: unable to create workspace " + ws + " in GeoServer");
                }
            } else {
                failAction("Bad workspace (namespace): " + ws + " in configuration");
            }
        }
    }

    listenerForwarder.progressing(25, "GeoServer workspace checked");

    //event-based business logic
    while (events.size() > 0) {
        final EventObject ev;
        try {
            if ((ev = events.remove()) != null) {

                updateTask("Working on incoming event: " + ev.getSource());

                updateTask("Check acceptable file");
                FileSystemEvent fileEvent = (FileSystemEvent) ev;

                //set FeatureConfiguration
                updateTask("Set Feature Configuration");
                this.createFeatureConfiguration(fileEvent);
                FeatureConfiguration featureConfig = conf.getFeatureConfiguration();

                //TODO check FeatureConfiguration
                updateTask("Check Feature Configuration");
                if (featureConfig.getTypeName() == null) {
                    failAction("feature typeName cannot be null");
                }

                //TODO check if the typeName already exists for the target workspace?

                //datastore check (and eventually creation)
                updateTask("Check datastore configuration");
                String ds = conf.getStoreName();

                Boolean existDS = false;
                synchronized (existDS) {

                    existDS = gsMan.getReader().getDatastores(ws).getNames().contains(ds);
                    if (!existDS) {
                        boolean createDS = conf.getCreateDataStore();
                        if (createDS) {

                            //create datastore
                            updateTask("Create datastore in GeoServer");
                            Map<String, Object> datastore = this.deserialize(featureConfig.getDataStore());

                            String dbType = (String) datastore.get("dbtype");

                            boolean created = false;
                            if (dbType.equalsIgnoreCase("postgis")) {
                                GSPostGISDatastoreEncoder encoder = new GSPostGISDatastoreEncoder(ds);
                                encoder.setName(ds);
                                encoder.setEnabled(true);
                                encoder.setHost((String) datastore.get("host"));
                                encoder.setPort(Integer.parseInt((String) datastore.get("port")));
                                encoder.setDatabase((String) datastore.get("database"));
                                encoder.setSchema((String) datastore.get("schema"));
                                encoder.setUser((String) datastore.get("user"));
                                encoder.setPassword((String) datastore.get("passwd"));

                                created = gsMan.getStoreManager().create(ws, encoder);
                                if (!created) {
                                    failAction("FATAL: unable to create PostGIS datastore " + ds
                                            + " in GeoServer");
                                }

                            } else if (dbType.equalsIgnoreCase("oracle")) {
                                String dbname = (String) datastore.get("database");
                                GSOracleNGDatastoreEncoder encoder = new GSOracleNGDatastoreEncoder(ds, dbname);
                                encoder.setName(ds);
                                encoder.setEnabled(true);
                                encoder.setHost((String) datastore.get("host"));
                                encoder.setPort(Integer.parseInt((String) datastore.get("port")));
                                encoder.setDatabase(dbname);
                                encoder.setSchema((String) datastore.get("schema"));
                                encoder.setUser((String) datastore.get("user"));
                                encoder.setPassword((String) datastore.get("passwd"));

                                created = gsMan.getStoreManager().create(ws, encoder);
                                if (!created) {
                                    failAction("FATAL: unable to create Oracle NG datastore " + ds
                                            + " in GeoServer");
                                }
                            } else {
                                failAction("The datastore type " + dbType + " is not supported");
                            }

                        } else {
                            failAction("Bad datastore:" + ds + " in configuration. Datastore " + ds
                                    + " doesn't exist in workspace (namespace) " + ws);
                        }
                    }
                }
                listenerForwarder.progressing(50, "Check GeoServer datastore");

                //feature type publication/removal
                boolean done = false;
                if (op.equalsIgnoreCase("PUBLISH")) {
                    if (!gsMan.getReader().getLayers().getNames().contains(featureConfig.getTypeName())) {

                        updateTask("Publish DBLayer " + featureConfig.getTypeName() + " in GeoServer");

                        //featuretype
                        final GSFeatureTypeEncoder fte = new GSFeatureTypeEncoder();
                        fte.setName(featureConfig.getTypeName());
                        fte.setTitle(featureConfig.getTypeName());
                        String crs = featureConfig.getCrs();
                        if (crs != null) {
                            fte.setSRS(featureConfig.getCrs());
                        } else {
                            fte.setSRS("EPSG:4326");
                        }
                        fte.setProjectionPolicy(ProjectionPolicy.FORCE_DECLARED);

                        //layer & styles
                        final GSLayerEncoder layerEncoder = new GSLayerEncoder();
                        layerEncoder.setDefaultStyle(this.defineLayerStyle(featureConfig, gsMan)); //default style

                        if (conf.getStyles() != null) {
                            //add available styles
                            for (String style : conf.getStyles()) {
                                layerEncoder.addStyle(style);
                            }
                        }

                        //publish
                        done = gsMan.getPublisher().publishDBLayer(ws, ds, fte, layerEncoder);
                        if (!done) {
                            failAction("Impossible to publish DBLayer " + featureConfig.getTypeName()
                                    + " in GeoServer");
                        }
                    }

                } else if (op.equalsIgnoreCase("REMOVE")) {
                    if (gsMan.getReader().getLayers().getNames().contains(featureConfig.getTypeName())) {

                        //remove
                        updateTask("Remove DBLayer " + featureConfig.getTypeName() + " from GeoServer");

                        done = gsMan.getPublisher().unpublishFeatureType(ws, ds, featureConfig.getTypeName());
                        if (!done) {
                            failAction("Impossible to remove DBLayer " + featureConfig.getTypeName()
                                    + " in GeoServer");
                        }
                    }
                }

                listenerForwarder.progressing(100F, "Successful Geoserver " + op + " operation");
                listenerForwarder.completed();
                outputEvents.add(ev);

            } else {
                if (LOGGER.isErrorEnabled()) {
                    LOGGER.error("Encountered a NULL event: SKIPPING...");
                }
                continue;
            }
        } catch (Exception ioe) {
            failAction("Unable to produce the output: " + ioe.getLocalizedMessage(), ioe);
        }
    }
    return outputEvents;

}

From source file:it.geosolutions.geobatch.geotiff.overview.GeotiffOverviewsEmbedder.java

public Queue<FileSystemEvent> execute(Queue<FileSystemEvent> events) throws ActionException {

    try {/*w w w.  ja  v a2 s  .c  om*/
        // looking for file
        if (events.size() == 0)
            throw new IllegalArgumentException(
                    "GeotiffOverviewsEmbedder::execute(): Wrong number of elements for this action: "
                            + events.size());

        listenerForwarder.setTask("config");
        listenerForwarder.started();

        // //
        //
        // data flow configuration and dataStore name must not be null.
        //
        // //
        if (configuration == null) {
            final String message = "GeotiffOverviewsEmbedder::execute(): DataFlowConfig is null.";
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            throw new IllegalStateException(message);
        }

        // //
        //
        // check the configuration and prepare the overviews embedder
        //
        // //
        final int downsampleStep = configuration.getDownsampleStep();
        if (downsampleStep <= 0)
            throw new IllegalArgumentException(
                    "GeotiffOverviewsEmbedder::execute(): Illegal downsampleStep: " + downsampleStep);

        int numberOfSteps = configuration.getNumSteps();
        if (numberOfSteps <= 0)
            throw new IllegalArgumentException("Illegal numberOfSteps: " + numberOfSteps);

        final OverviewsEmbedder oe = new OverviewsEmbedder();
        oe.setDownsampleStep(downsampleStep);
        oe.setNumSteps(configuration.getNumSteps());
        // SG: this way we are sure we use the standard tile cache
        oe.setTileCache(JAI.getDefaultInstance().getTileCache());

        String scaleAlgorithm = configuration.getScaleAlgorithm();
        if (scaleAlgorithm == null) {
            LOGGER.warn("No scaleAlgorithm defined. Using " + SubsampleAlgorithm.Nearest + " as default");
            scaleAlgorithm = SubsampleAlgorithm.Nearest.name();
        } else {
            final SubsampleAlgorithm algorithm = SubsampleAlgorithm.valueOf(scaleAlgorithm);
            if (algorithm == null) {
                throw new IllegalStateException("Bad scaleAlgorithm defined [" + scaleAlgorithm + "]");
            }
        }
        oe.setScaleAlgorithm(scaleAlgorithm);
        oe.setTileHeight(configuration.getTileH());
        oe.setTileWidth(configuration.getTileW());

        /*
         * TODO check this is formally wrong! this should be done into the
         * configuration.
         */
        // add logger/listener
        if (configuration.isLogNotification())
            oe.addProcessingEventListener(new ProcessingEventListener() {

                public void exceptionOccurred(ExceptionEvent event) {
                    if (LOGGER.isInfoEnabled())
                        LOGGER.info("GeotiffOverviewsEmbedder::execute(): " + event.getMessage(),
                                event.getException());
                }

                public void getNotification(ProcessingEvent event) {
                    if (LOGGER.isInfoEnabled())
                        LOGGER.info("GeotiffOverviewsEmbedder::execute(): " + event.getMessage());
                    listenerForwarder.progressing((float) event.getPercentage(), event.getMessage());
                }
            });

        // The return
        Queue<FileSystemEvent> ret = new LinkedList<FileSystemEvent>();

        while (events.size() > 0) {

            // run
            listenerForwarder.progressing(0, "Embedding overviews");

            final FileSystemEvent event = events.remove();

            final File eventFile = event.getSource();
            if (LOGGER.isDebugEnabled())
                LOGGER.debug("Processing file " + eventFile);

            if (eventFile.exists() && eventFile.canRead() && eventFile.canWrite()) {
                /*
                 * If here: we can start retiler actions on the incoming
                 * file event
                 */

                if (eventFile.isDirectory()) {

                    final FileFilter filter = new RegexFileFilter(".+\\.[tT][iI][fF]([fF]?)");
                    final Collector collector = new Collector(filter);
                    final List<File> fileList = collector.collect(eventFile);
                    int size = fileList.size();
                    for (int progress = 0; progress < size; progress++) {

                        final File inFile = fileList.get(progress);

                        try {
                            oe.setSourcePath(inFile.getAbsolutePath());
                            oe.run();
                        } catch (UnsupportedOperationException uoe) {
                            listenerForwarder.failed(uoe);
                            if (LOGGER.isWarnEnabled())
                                LOGGER.warn("GeotiffOverviewsEmbedder::execute(): " + uoe.getLocalizedMessage(),
                                        uoe);
                        } catch (IllegalArgumentException iae) {
                            listenerForwarder.failed(iae);
                            if (LOGGER.isWarnEnabled())
                                LOGGER.warn("GeotiffOverviewsEmbedder::execute(): " + iae.getLocalizedMessage(),
                                        iae);
                        } finally {
                            listenerForwarder.setProgress((progress * 100) / ((size != 0) ? size : 1));
                            listenerForwarder.progressing();
                        }
                    }
                } else {
                    // file is not a directory
                    try {
                        oe.setSourcePath(eventFile.getAbsolutePath());
                        oe.run();
                    } catch (UnsupportedOperationException uoe) {
                        listenerForwarder.failed(uoe);
                        if (LOGGER.isWarnEnabled())
                            LOGGER.warn("GeotiffOverviewsEmbedder::execute(): " + uoe.getLocalizedMessage(),
                                    uoe);
                    } catch (IllegalArgumentException iae) {
                        listenerForwarder.failed(iae);
                        if (LOGGER.isWarnEnabled())
                            LOGGER.warn("GeotiffOverviewsEmbedder::execute(): " + iae.getLocalizedMessage(),
                                    iae);
                    } finally {
                        listenerForwarder.setProgress(100 / ((events.size() != 0) ? events.size() : 1));
                    }
                }

                // add the directory to the return
                ret.add(event);
            } else {
                final String message = "GeotiffOverviewsEmbedder::execute(): The passed file event refers to a not existent "
                        + "or not readable/writeable file! File: " + eventFile.getAbsolutePath();
                if (LOGGER.isWarnEnabled())
                    LOGGER.warn(message);

                throw new ActionException(this, message);

            }
        } // endwile
        listenerForwarder.completed();

        // return
        if (ret.size() > 0) {
            events.clear();
            return ret;
        } else {
            /*
             * If here: we got an error no file are set to be returned the
             * input queue is returned
             */
            return events;
        }
    } catch (Exception t) {
        final String message = "GeotiffOverviewsEmbedder::execute(): " + t.getLocalizedMessage();
        if (LOGGER.isErrorEnabled())
            LOGGER.error(message, t);
        final ActionException exc = new ActionException(this, message, t);
        listenerForwarder.failed(exc);
        throw exc;
    }
}

From source file:it.geosolutions.geobatch.geotiff.overview.GeotiffOverviewsEmbedderAction.java

@Override
public Queue<FileSystemEvent> execute(Queue<FileSystemEvent> events) throws ActionException {

    try {/* w ww .j  a v a  2  s. c  om*/
        // looking for file
        if (events.size() == 0)
            throw new IllegalArgumentException(
                    "GeotiffOverviewsEmbedder::execute(): Wrong number of elements for this action: "
                            + events.size());

        listenerForwarder.setTask("config");
        listenerForwarder.started();

        // //
        //
        // data flow configuration and dataStore name must not be null.
        //
        // //
        if (configuration == null) {
            final String message = "GeotiffOverviewsEmbedder::execute(): DataFlowConfig is null.";
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            throw new IllegalStateException(message);
        }

        // //
        //
        // check the configuration and prepare the overviews embedder
        //
        // //
        final int downsampleStep = configuration.getDownsampleStep();
        if (downsampleStep <= 0)
            throw new IllegalArgumentException(
                    "GeotiffOverviewsEmbedder::execute(): Illegal downsampleStep: " + downsampleStep);

        int numberOfSteps = configuration.getNumSteps();
        if (numberOfSteps <= 0)
            throw new IllegalArgumentException("Illegal numberOfSteps: " + numberOfSteps);

        final OverviewsEmbedder oe = new OverviewsEmbedder();
        oe.setDownsampleStep(downsampleStep);
        oe.setNumSteps(configuration.getNumSteps());
        // SG: this way we are sure we use the standard tile cache
        oe.setTileCache(JAI.getDefaultInstance().getTileCache());

        String scaleAlgorithm = configuration.getScaleAlgorithm();
        if (scaleAlgorithm == null) {
            LOGGER.warn("No scaleAlgorithm defined. Using " + SubsampleAlgorithm.Nearest + " as default");
            scaleAlgorithm = SubsampleAlgorithm.Nearest.name();
        } else {
            final SubsampleAlgorithm algorithm = SubsampleAlgorithm.valueOf(scaleAlgorithm);
            if (algorithm == null) {
                throw new IllegalStateException("Bad scaleAlgorithm defined [" + scaleAlgorithm + "]");
            }
        }
        oe.setScaleAlgorithm(scaleAlgorithm);
        oe.setTileHeight(configuration.getTileH());
        oe.setTileWidth(configuration.getTileW());

        /*
         * TODO check this is formally wrong! this should be done into the
         * configuration.
         */
        // add logger/listener
        if (configuration.isLogNotification())
            oe.addProcessingEventListener(new ProcessingEventListener() {

                public void exceptionOccurred(ExceptionEvent event) {
                    if (LOGGER.isInfoEnabled())
                        LOGGER.info("GeotiffOverviewsEmbedder::execute(): " + event.getMessage(),
                                event.getException());
                }

                public void getNotification(ProcessingEvent event) {
                    if (LOGGER.isInfoEnabled())
                        LOGGER.info("GeotiffOverviewsEmbedder::execute(): " + event.getMessage());
                    listenerForwarder.progressing((float) event.getPercentage(), event.getMessage());
                }
            });

        // The return
        Queue<FileSystemEvent> ret = new LinkedList<FileSystemEvent>();

        while (events.size() > 0) {

            // run
            listenerForwarder.progressing(0, "Embedding overviews");

            final FileSystemEvent event = events.remove();

            final File eventFile = event.getSource();
            if (LOGGER.isDebugEnabled())
                LOGGER.debug("Processing file " + eventFile);

            if (eventFile.exists() && eventFile.canRead() && eventFile.canWrite()) {
                /*
                 * If here: we can start retiler actions on the incoming
                 * file event
                 */

                if (eventFile.isDirectory()) {

                    final FileFilter filter = new RegexFileFilter(".+\\.[tT][iI][fF]([fF]?)");
                    final Collector collector = new Collector(filter);
                    final List<File> fileList = collector.collect(eventFile);
                    int size = fileList.size();
                    for (int progress = 0; progress < size; progress++) {

                        final File inFile = fileList.get(progress);

                        try {
                            oe.setSourcePath(inFile.getAbsolutePath());
                            oe.run();
                        } catch (UnsupportedOperationException uoe) {
                            listenerForwarder.failed(uoe);
                            if (LOGGER.isWarnEnabled())
                                LOGGER.warn("GeotiffOverviewsEmbedder::execute(): " + uoe.getLocalizedMessage(),
                                        uoe);
                        } catch (IllegalArgumentException iae) {
                            listenerForwarder.failed(iae);
                            if (LOGGER.isWarnEnabled())
                                LOGGER.warn("GeotiffOverviewsEmbedder::execute(): " + iae.getLocalizedMessage(),
                                        iae);
                        } finally {
                            listenerForwarder.setProgress((progress * 100) / ((size != 0) ? size : 1));
                            listenerForwarder.progressing();
                        }
                    }
                } else {
                    // file is not a directory
                    try {
                        oe.setSourcePath(eventFile.getAbsolutePath());
                        oe.run();
                    } catch (UnsupportedOperationException uoe) {
                        listenerForwarder.failed(uoe);
                        if (LOGGER.isWarnEnabled())
                            LOGGER.warn("GeotiffOverviewsEmbedder::execute(): " + uoe.getLocalizedMessage(),
                                    uoe);
                    } catch (IllegalArgumentException iae) {
                        listenerForwarder.failed(iae);
                        if (LOGGER.isWarnEnabled())
                            LOGGER.warn("GeotiffOverviewsEmbedder::execute(): " + iae.getLocalizedMessage(),
                                    iae);
                    } finally {
                        listenerForwarder.setProgress(100 / ((events.size() != 0) ? events.size() : 1));
                    }
                }

                // add the directory to the return
                ret.add(event);
            } else {
                final String message = "GeotiffOverviewsEmbedder::execute(): The passed file event refers to a not existent "
                        + "or not readable/writeable file! File: " + eventFile.getAbsolutePath();
                if (LOGGER.isWarnEnabled())
                    LOGGER.warn(message);

                throw new ActionException(this, message);

            }
        } // endwile
        listenerForwarder.completed();

        // return
        if (ret.size() > 0) {
            events.clear();
            return ret;
        } else {
            /*
             * If here: we got an error no file are set to be returned the
             * input queue is returned
             */
            return events;
        }
    } catch (Exception t) {
        final String message = "GeotiffOverviewsEmbedder::execute(): " + t.getLocalizedMessage();
        if (LOGGER.isErrorEnabled())
            LOGGER.error(message, t);
        final ActionException exc = new ActionException(this, message, t);
        listenerForwarder.failed(exc);
        throw exc;
    }
}