Example usage for java.util.concurrent ExecutorService execute

List of usage examples for java.util.concurrent ExecutorService execute

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService execute.

Prototype

void execute(Runnable command);

Source Link

Document

Executes the given command at some time in the future.

Usage

From source file:com.globalsight.everest.workflowmanager.WorkflowManagerLocal.java

public void dispatch(Job p_job) throws RemoteException, WorkflowManagerException {
    JobImpl jobClone = null;//from w  w w .ja v  a 2 s  .c om
    Session session = HibernateUtil.getSession();
    Transaction transaction = null;
    JbpmContext ctx = null;

    try {
        transaction = HibernateUtil.getTransaction();
        jobClone = (JobImpl) session.get(JobImpl.class, new Long(p_job.getId()));
        if (jobClone != null) {
            // refresh job object in the session
            session.evict(jobClone);
            jobClone = (JobImpl) session.get(JobImpl.class, new Long(p_job.getId()));
        }
        Iterator it = jobClone.getWorkflows().iterator();
        // a Map containing task id as key and workflow as value.
        // This is used for possible creation of STF.
        HashMap<Long, Workflow> map = new HashMap<Long, Workflow>(1);
        HashMap<Long, String> etfMap = new HashMap<Long, String>(1);
        Date startDate = new Date();
        ExecutorService pool = Executors.newFixedThreadPool(MAX_THREAD);
        while (it.hasNext()) {
            Workflow wf = (Workflow) it.next();
            if (WF_READY.equals(wf.getState()) || Workflow.PENDING.equals(wf.getState())) {
                Workflow wfClone = (Workflow) session.get(WorkflowImpl.class, wf.getIdAsLong());
                TaskEmailInfo emailInfo = createTaskEmailInfo(jobClone, wfClone);

                ArrayList returnValue = dispatchWorkflow(wfClone, session, startDate, emailInfo);

                long taskId = ((Long) returnValue.get(0)).longValue();
                if (taskId != -1) {
                    Object actionType = returnValue.get(3);
                    if (actionType != null) {
                        etfMap.put(taskId, (String) actionType);
                    }

                    Task task = (Task) wfClone.getTasks().get(taskId);
                    long jobId = task.getJobId();
                    L10nProfile l10nProfile = ServerProxy.getJobHandler().getL10nProfileByJobId(jobId);
                    long wfStatePostId = l10nProfile.getWfStatePostId();
                    if (wfStatePostId != -1) {
                        WfStatePostThread myTask = new WfStatePostThread(task, null, true);
                        pool.execute(myTask);
                    }

                    // For sla issue
                    if (wfClone.isEstimatedTranslateCompletionDateOverrided()) {
                        updateEstimatedTranslateCompletionDate(wfClone.getId(),
                                wfClone.getEstimatedTranslateCompletionDate());
                    }

                    // prepare the map for possible creation of secondary
                    // target
                    // files
                    if (((Boolean) returnValue.get(1)).booleanValue()) {
                        map.put(new Long(taskId), wfClone);
                    }
                }
                session.saveOrUpdate(wfClone);
            }
        }
        pool.shutdown();

        jobClone.setState(WF_DISPATCHED);
        updatePageState(session, jobClone.getSourcePages(), PG_ACTIVE_JOB);
        session.saveOrUpdate(jobClone);

        HibernateUtil.commit(transaction);

        String pmId = p_job.getL10nProfile().getProject().getProjectManagerId();
        if (map.size() > 0) {
            Object[] keys = map.keySet().toArray();
            for (int i = 0; i < keys.length; i++) {
                Long stfTaskId = (Long) keys[i];
                Workflow wf = map.get(stfTaskId);
                exportForStfCreation(stfTaskId, wf, pmId);
            }
        }
        // GBS-3002
        if (etfMap.size() > 0) {
            Object[] keys = etfMap.keySet().toArray();
            for (int i = 0; i < keys.length; i++) {
                Long taskId = (Long) keys[i];
                String actionType = etfMap.get(taskId);
                SystemActionPerformer.perform(actionType, taskId, pmId);
            }
        }
    } catch (Exception e2) {
        HibernateUtil.rollback(transaction);
        s_logger.error("Failed to dispatch: " + p_job.getJobName(), e2);
        String[] args = new String[1];
        args[0] = new Long(p_job.getId()).toString();
        throw new WorkflowManagerException(WorkflowManagerException.MSG_FAILED_TO_DISPATCH_WORKFLOW, args, e2,
                WorkflowManagerException.PROPERTY_FILE_NAME);
    } finally {
        if (ctx != null) {
            ctx.close();
        }
    }

    runJobCreationScript(p_job);
}

From source file:org.apache.geode.internal.cache.GemFireCacheImpl.java

public void shutDownAll() {
    if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
        try {//  w  ww.ja v  a  2s  .  c  om
            CacheObserverHolder.getInstance().beforeShutdownAll();
        } finally {
            LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
        }
    }
    if (!this.isShutDownAll.compareAndSet(false, true)) {
        // it's already doing shutdown by another thread
        try {
            this.shutDownAllFinished.await();
        } catch (InterruptedException e) {
            logger.debug("Shutdown all interrupted while waiting for another thread to do the shutDownAll");
            Thread.currentThread().interrupt();
        }
        return;
    }
    synchronized (GemFireCacheImpl.class) {
        try {
            boolean testIGE = Boolean.getBoolean("TestInternalGemFireError");

            if (testIGE) {
                InternalGemFireError assErr = new InternalGemFireError(
                        LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString());
                throw assErr;
            }

            // bug 44031 requires multithread shutdownall should be grouped
            // by root region. However, shutDownAllDuringRecovery.conf test revealed that
            // we have to close colocated child regions first.
            // Now check all the PR, if anyone has colocate-with attribute, sort all the
            // PRs by colocation relationship and close them sequentially, otherwise still
            // group them by root region.
            TreeMap<String, Map<String, PartitionedRegion>> prTrees = getPRTrees();
            if (prTrees.size() > 1 && shutdownAllPoolSize != 1) {
                ExecutorService es = getShutdownAllExecutorService(prTrees.size());
                for (final Map<String, PartitionedRegion> prSubMap : prTrees.values()) {
                    es.execute(new Runnable() {
                        public void run() {
                            ConnectionTable.threadWantsSharedResources();
                            shutdownSubTreeGracefully(prSubMap);
                        }
                    });
                } // for each root
                es.shutdown();
                try {
                    es.awaitTermination(Integer.MAX_VALUE, TimeUnit.SECONDS);
                } catch (InterruptedException e) {
                    logger.debug("Shutdown all interrupted while waiting for PRs to be shutdown gracefully.");
                }

            } else {
                for (final Map<String, PartitionedRegion> prSubMap : prTrees.values()) {
                    shutdownSubTreeGracefully(prSubMap);
                }
            }

            close("Shut down all members", null, false, true);
        } finally {
            this.shutDownAllFinished.countDown();
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.HLog.java

private static List<Path> splitLog(final Path rootDir, final FileStatus[] logfiles, final FileSystem fs,
        final HBaseConfiguration conf) throws IOException {
    final Map<byte[], WriterAndPath> logWriters = Collections
            .synchronizedMap(new TreeMap<byte[], WriterAndPath>(Bytes.BYTES_COMPARATOR));
    List<Path> splits = null;

    // Number of threads to use when log splitting to rewrite the logs.
    // More means faster but bigger mem consumption.
    int logWriterThreads = conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);

    // Number of logs to read concurrently when log splitting.
    // More means faster but bigger mem consumption  */
    int concurrentLogReads = conf.getInt("hbase.regionserver.hlog.splitlog.reader.threads", 3);
    // Is append supported?
    boolean append = isAppend(conf);
    try {//from   w  w  w. j  a v  a 2s  .c om
        int maxSteps = Double.valueOf(Math.ceil((logfiles.length * 1.0) / concurrentLogReads)).intValue();
        for (int step = 0; step < maxSteps; step++) {
            final Map<byte[], LinkedList<HLogEntry>> logEntries = new TreeMap<byte[], LinkedList<HLogEntry>>(
                    Bytes.BYTES_COMPARATOR);
            // Stop at logfiles.length when it's the last step
            int endIndex = step == maxSteps - 1 ? logfiles.length
                    : step * concurrentLogReads + concurrentLogReads;
            for (int i = (step * concurrentLogReads); i < endIndex; i++) {
                // Check for possibly empty file. With appends, currently Hadoop 
                // reports a zero length even if the file has been sync'd. Revisit if
                // HADOOP-4751 is committed.
                long length = logfiles[i].getLen();
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Splitting hlog " + (i + 1) + " of " + logfiles.length + ": "
                            + logfiles[i].getPath() + ", length=" + logfiles[i].getLen());
                }
                recoverLog(fs, logfiles[i].getPath(), append);
                SequenceFile.Reader in = null;
                int count = 0;
                try {
                    in = new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
                    try {
                        HLogKey key = newKey(conf);
                        KeyValue val = new KeyValue();
                        while (in.next(key, val)) {
                            byte[] regionName = key.getRegionName();
                            LinkedList<HLogEntry> queue = logEntries.get(regionName);
                            if (queue == null) {
                                queue = new LinkedList<HLogEntry>();
                                LOG.debug("Adding queue for " + Bytes.toStringBinary(regionName));
                                logEntries.put(regionName, queue);
                            }
                            HLogEntry hle = new HLogEntry(val, key);
                            queue.push(hle);
                            count++;
                            // Make the key and value new each time; otherwise same instance
                            // is used over and over.
                            key = newKey(conf);
                            val = new KeyValue();
                        }
                        LOG.debug("Pushed=" + count + " entries from " + logfiles[i].getPath());
                    } catch (IOException e) {
                        LOG.debug("IOE Pushed=" + count + " entries from " + logfiles[i].getPath());
                        e = RemoteExceptionHandler.checkIOException(e);
                        if (!(e instanceof EOFException)) {
                            LOG.warn("Exception processing " + logfiles[i].getPath()
                                    + " -- continuing. Possible DATA LOSS!", e);
                        }
                    }
                } catch (IOException e) {
                    if (length <= 0) {
                        LOG.warn("Empty hlog, continuing: " + logfiles[i] + " count=" + count, e);
                        continue;
                    }
                    throw e;
                } finally {
                    try {
                        if (in != null) {
                            in.close();
                        }
                    } catch (IOException e) {
                        LOG.warn("Close in finally threw exception -- continuing", e);
                    }
                    // Delete the input file now so we do not replay edits. We could
                    // have gotten here because of an exception. If so, probably
                    // nothing we can do about it. Replaying it, it could work but we
                    // could be stuck replaying for ever. Just continue though we
                    // could have lost some edits.
                    fs.delete(logfiles[i].getPath(), true);
                }
            }
            ExecutorService threadPool = Executors.newFixedThreadPool(logWriterThreads);
            for (final byte[] key : logEntries.keySet()) {
                Thread thread = new Thread(Bytes.toStringBinary(key)) {
                    @Override
                    public void run() {
                        LinkedList<HLogEntry> entries = logEntries.get(key);
                        LOG.debug("Thread got " + entries.size() + " to process");
                        long threadTime = System.currentTimeMillis();
                        try {
                            int count = 0;
                            // Items were added to the linkedlist oldest first. Pull them
                            // out in that order.
                            for (ListIterator<HLogEntry> i = entries.listIterator(entries.size()); i
                                    .hasPrevious();) {
                                HLogEntry logEntry = i.previous();
                                WriterAndPath wap = logWriters.get(key);
                                if (wap == null) {
                                    Path logfile = new Path(
                                            HRegion.getRegionDir(
                                                    HTableDescriptor.getTableDir(rootDir,
                                                            logEntry.getKey().getTablename()),
                                                    HRegionInfo.encodeRegionName(key)),
                                            HREGION_OLDLOGFILE_NAME);
                                    Path oldlogfile = null;
                                    SequenceFile.Reader old = null;
                                    if (fs.exists(logfile)) {
                                        FileStatus stat = fs.getFileStatus(logfile);
                                        if (stat.getLen() <= 0) {
                                            LOG.warn("Old hlog file " + logfile + " is zero "
                                                    + "length. Deleting existing file");
                                            fs.delete(logfile, false);
                                        } else {
                                            LOG.warn("Old hlog file " + logfile + " already "
                                                    + "exists. Copying existing file to new file");
                                            oldlogfile = new Path(logfile.toString() + ".old");
                                            fs.rename(logfile, oldlogfile);
                                            old = new SequenceFile.Reader(fs, oldlogfile, conf);
                                        }
                                    }
                                    SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, logfile,
                                            getKeyClass(conf), KeyValue.class, getCompressionType(conf));
                                    wap = new WriterAndPath(logfile, w);
                                    logWriters.put(key, wap);
                                    if (LOG.isDebugEnabled()) {
                                        LOG.debug("Creating new hlog file writer for path " + logfile
                                                + " and region " + Bytes.toStringBinary(key));
                                    }

                                    if (old != null) {
                                        // Copy from existing log file
                                        HLogKey oldkey = newKey(conf);
                                        KeyValue oldval = new KeyValue();
                                        for (; old.next(oldkey, oldval); count++) {
                                            if (LOG.isDebugEnabled() && count > 0 && count % 10000 == 0) {
                                                LOG.debug("Copied " + count + " edits");
                                            }
                                            w.append(oldkey, oldval);
                                        }
                                        old.close();
                                        fs.delete(oldlogfile, true);
                                    }
                                }
                                wap.w.append(logEntry.getKey(), logEntry.getEdit());
                                count++;
                            }
                            if (LOG.isDebugEnabled()) {
                                LOG.debug("Applied " + count + " total edits to " + Bytes.toStringBinary(key)
                                        + " in " + (System.currentTimeMillis() - threadTime) + "ms");
                            }
                        } catch (IOException e) {
                            e = RemoteExceptionHandler.checkIOException(e);
                            LOG.warn("Got while writing region " + Bytes.toStringBinary(key) + " log " + e);
                            e.printStackTrace();
                        }
                    }
                };
                threadPool.execute(thread);
            }
            threadPool.shutdown();
            // Wait for all threads to terminate
            try {
                for (int i = 0; !threadPool.awaitTermination(5, TimeUnit.SECONDS); i++) {
                    LOG.debug("Waiting for hlog writers to terminate, iteration #" + i);
                }
            } catch (InterruptedException ex) {
                LOG.warn("Hlog writers were interrupted, possible data loss!");
            }
        }
    } finally {
        splits = new ArrayList<Path>(logWriters.size());
        for (WriterAndPath wap : logWriters.values()) {
            wap.w.close();
            LOG.debug("Closed " + wap.p);
            splits.add(wap.p);
        }
    }
    return splits;
}

From source file:org.apache.hadoop.raid.RaidShell.java

private long estimateSaving(final Codec codec, final List<Path> files, final int targetReplication,
        final int numThreads, final boolean isDebug) throws IOException {
    final AtomicLong totalSavingSize = new AtomicLong(0);
    ExecutorService executor = Executors.newFixedThreadPool(numThreads);
    LOG.info("Processing " + files.size() + " files/dirs for " + codec.id + " in " + numThreads + " threads");
    if (isDebug) {
        System.out.println("oldDiskSize | oldParitySize | newDiskSize | newParitySize"
                + "| savingSize | totalSavingSize | path ");
    }/*  ww w  .  ja  va  2 s.c  om*/
    final AtomicInteger finishNum = new AtomicInteger(0);
    for (int i = 0; i < numThreads; i++) {
        final int startIdx = i;
        Runnable work = new Runnable() {
            public void run() {
                try {
                    for (int idx = startIdx; idx < files.size(); idx += numThreads) {
                        try {
                            Path p = files.get(idx);
                            FileSystem fs = FileSystem.get(conf);
                            p = fs.makeQualified(p);
                            FileStatus stat = null;
                            try {
                                stat = fs.getFileStatus(p);
                            } catch (FileNotFoundException e) {
                                LOG.warn("Path " + p + " does not exist", e);
                            }
                            if (stat == null) {
                                continue;
                            }
                            short repl = 0;
                            List<FileStatus> lfs = null;
                            if (codec.isDirRaid) {
                                if (!stat.isDir()) {
                                    continue;
                                }
                                lfs = RaidNode.listDirectoryRaidFileStatus(conf, fs, p);
                                if (lfs == null) {
                                    continue;
                                }
                                repl = DirectoryStripeReader.getReplication(lfs);
                            } else {
                                repl = stat.getReplication();
                            }

                            // if should not raid, will not put the file into the write list.
                            if (!RaidNode.shouldRaid(conf, fs, stat, codec, lfs)) {
                                LOG.info("Should not raid file: " + p);
                                continue;
                            }
                            // check the replication.
                            boolean add = false;
                            if (repl > targetReplication) {
                                add = true;
                            } else if (repl == targetReplication
                                    && !ParityFilePair.parityExists(stat, codec, conf)) {
                                add = true;
                            }
                            if (add) {
                                long oldDiskSize = 0L;
                                long newDiskSize = 0L;
                                long numBlocks = 0L;
                                long parityBlockSize = 0L;
                                if (codec.isDirRaid) {
                                    for (FileStatus fsStat : lfs) {
                                        oldDiskSize += fsStat.getLen() * (fsStat.getReplication());
                                        newDiskSize += fsStat.getLen() * targetReplication;
                                    }
                                    numBlocks = DirectoryStripeReader.getBlockNum(lfs);
                                    parityBlockSize = DirectoryStripeReader.getParityBlockSize(conf, lfs);
                                } else {
                                    oldDiskSize = stat.getLen() * stat.getReplication();
                                    newDiskSize = stat.getLen() * targetReplication;
                                    numBlocks = RaidNode.getNumBlocks(stat);
                                    parityBlockSize = stat.getBlockSize();
                                }

                                long numStripes = RaidNode.numStripes(numBlocks, codec.stripeLength);
                                long newParitySize = numStripes * codec.parityLength * parityBlockSize
                                        * targetReplication;
                                long oldParitySize = 0L;
                                for (Codec other : Codec.getCodecs()) {
                                    if (other.priority < codec.priority) {
                                        Path parityPath = new Path(other.parityDirectory,
                                                RaidNode.makeRelative(stat.getPath()));
                                        long logicalSize = 0;
                                        try {
                                            logicalSize = fs.getContentSummary(parityPath).getSpaceConsumed();
                                        } catch (IOException ioe) {
                                            // doesn't exist
                                            continue;
                                        }
                                        oldParitySize += logicalSize;
                                    }
                                }
                                long savingSize = oldDiskSize + oldParitySize - newDiskSize - newParitySize;
                                totalSavingSize.addAndGet(savingSize);
                                if (isDebug) {
                                    System.out.println(oldDiskSize + " " + oldParitySize + " " + newDiskSize
                                            + " " + newParitySize + " " + savingSize + " "
                                            + totalSavingSize.get() + " " + stat.getPath());
                                }
                            }
                        } catch (IOException ioe) {
                            LOG.warn("Get IOException", ioe);
                        }
                    }
                } finally {
                    finishNum.incrementAndGet();
                }
            }
        };
        if (executor != null) {
            executor.execute(work);
        }
    }
    if (executor != null) {
        try {
            while (finishNum.get() < numThreads) {
                try {
                    Thread.sleep(2000);
                } catch (InterruptedException ie) {
                    LOG.warn("EstimateSaving get exception ", ie);
                    throw new IOException(ie);
                }
            }
        } finally {
            executor.shutdown(); // Waits for submitted tasks to finish.
        }
    }
    return totalSavingSize.get();
}

From source file:edu.lternet.pasta.datapackagemanager.DataPackageManagerResource.java

private void cleanTemporaryDir() {
    Cleaner cleaner = new Cleaner(this.tmpDir, this.ttl);
    ExecutorService executorService = Executors.newCachedThreadPool();
    executorService.execute(cleaner);
    executorService.shutdown();// w  ww  . j a v  a 2  s  .  co  m
}

From source file:edu.lternet.pasta.datapackagemanager.DataPackageManagerResource.java

/**
 * <strong>Evaluate Data Package</strong> operation, specifying the EML
 * document describing the data package to be evaluated in the request
 * message body, and returning a <em>transaction identifier</em> in the
 * response message body as plain text; the <em>transaction identifier</em>
 * may be used in a subsequent call to <code>readDataPackageError</code> to
 * determine the operation status or to <code>readEvaluateReport</code> to
 * obtain the evaluate quality report./*ww  w. ja v a 2  s . c o  m*/
 * 
 * <h4>Request:</h4>
 * <table border="1" cellspacing="0" cellpadding="3">
 * <tr>
 * <th><b>Message Body</b></th>
 * <th><b>MIME type</b></th>
 * <th><b>Sample Request</b></th>
 * </tr>
 * <tr>
 * <td align=center>EML document</td>
 * <td align=center><code>application/xml</code></td>
 * <td><code>curl -i -X POST -H "Content-Type: application/xml"
 * --data-binary @eml.xml
 * https://pasta.lternet.edu/package/evaluate/eml</code></td>
 * </tr>
 * </table>
 * 
 * <h4>Response:</h4>
 * <table border="1" cellspacing="0" cellpadding="3">
 * <tr>
 * <th><b>Status</b></th>
 * <th><b>Reason</b></th>
 * <th><b>Message Body</b></th>
 * <th><b>MIME type</b></th>
 * <th><b>Sample Message Body</b></th>
 * </tr>
 * <tr>
 * <td align=center>202 Accepted</td>
 * <td align=center>The evaluate request was accepted for processing</td>
 * <td align=center>A transaction identifier for use in subsequent
 * processing of the request (see <code>readDataPackageError</code> to
 * understand how the transaction identifier may be used to determine if an
 * error occurred during the operation)</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>1364424858431</code></td>
 * </tr>
 * <tr>
 * <td align=center>401 Unauthorized</td>
 * <td align=center>The requesting user is not authorized to execute this
 * service method</td>
 * <td align=center>An error message</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>Error message</code></td>
 * 
 * </tr>
 * <tr>
 * <td align=center>405 Method Not Allowed</td>
 * <td align=center>The specified HTTP method is not allowed for the
 * requested resource</td>
 * <td align=center>An error message</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>Error message</code></td>
 * 
 * </tr>
 * </table>
 * 
 * @param emlDocument
 *            The URL to an EML document, as specified in the payload of the
 *            request.
 * 
 * @return a Response, which if successful, contains a quality report XML
 *         document
 */
@POST
@Path("/evaluate/eml")
@Consumes("application/xml")
@Produces({ "application/xml", "text/html" })
public Response evaluateDataPackage(@Context HttpHeaders headers, File emlFile) {
    ResponseBuilder responseBuilder = null;
    Response response = null;
    final String serviceMethodName = "evaluateDataPackage";
    Rule.Permission permission = Rule.Permission.write;
    AuthToken authToken = null;

    String transaction = generateTransactionID("evaluate", null, null, null);

    authToken = getAuthToken(headers);
    String userId = authToken.getUserId();

    // Is user authorized to run the 'createDataPackage' service method?
    boolean serviceMethodAuthorized = isServiceMethodAuthorized(serviceMethodName, permission, authToken);
    if (!serviceMethodAuthorized) {
        throw new UnauthorizedException(
                "User " + userId + " is not authorized to execute service method " + serviceMethodName);
    }

    // Perform evaluateDataPackage in new thread
    Evaluator evaluator = new Evaluator(emlFile, userId, authToken, transaction);
    ExecutorService executorService = Executors.newCachedThreadPool();
    executorService.execute(evaluator);
    executorService.shutdown();

    responseBuilder = Response.status(Response.Status.ACCEPTED);
    responseBuilder.entity(transaction);
    response = responseBuilder.build();
    response = stampHeader(response);

    return response;

}

From source file:edu.lternet.pasta.datapackagemanager.DataPackageManagerResource.java

/**
 * <strong>Create Data Package Archive (Zip)</strong> operation, specifying
 * the scope, identifier, and revision of the data package to be Zipped in
 * the URI, and returning a <em>transaction identifier</em> in the response
 * message body as plain text; the <em>transaction identifier</em> may be
 * used in a subsequent call to <code>readDataPackageError</code> to
 * determine the operation status or to <code>readDataPackageArchive</code>
 * to obtain the Zip archive./*from  w w  w.j  ava2  s.c  o  m*/
 * 
 * <h4>Request:</h4>
 * <table border="1" cellspacing="0" cellpadding="3">
 * <tr>
 * <th><b>Message Body</b></th>
 * <th><b>MIME type</b></th>
 * <th><b>Sample Request</b></th>
 * </tr>
 * <tr>
 * <td align=center>none</td>
 * <td align=center>none</td>
 * <td><code>curl -i -X POST
 * https://pasta.lternet.edu/package/archive/eml/knb-lter-lno/1/1</code></td>
 * </tr>
 * </table>
 * 
 * <h4>Response:</h4>
 * <table border="1" cellspacing="0" cellpadding="3">
 * <tr>
 * <th><b>Status</b></th>
 * <th><b>Reason</b></th>
 * <th><b>Message Body</b></th>
 * <th><b>MIME type</b></th>
 * <th><b>Sample Message Body</b></th>
 * </tr>
 * <tr>
 * <td align=center>202 Accepted</td>
 * <td align=center>The create request was accepted for processing</td>
 * <td align=center>A transaction identifier for use in subsequent
 * processing of the request (see <code>readDataPackageError</code> to
 * understand how the transaction identifier may be used to determine if an
 * error occurred during the operation)</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>1364424858431</code></td>
 * </tr>
 * <tr>
 * <td align=center>401 Unauthorized</td>
 * <td align=center>The requesting user is not authorized to execute this
 * service method</td>
 * <td align=center>An error message</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>Error message</code></td>
 * 
 * </tr>
 * <tr>
 * <td align=center>405 Method Not Allowed</td>
 * <td align=center>The specified HTTP method is not allowed for the
 * requested resource</td>
 * <td align=center>An error message</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>Error message</code></td>
 * 
 * </tr>
 * </table>
 * 
 * @return a Response, which if successful, contains a resource map
 *         describing the contents of the data package
 */
@POST
@Path("/archive/eml/{scope}/{identifier}/{revision}")
@Produces("text/plain")
@Consumes("text/plain")
public Response createDataPackageArchive(@Context HttpHeaders headers, @PathParam("scope") String scope,
        @PathParam("identifier") Integer identifier, @PathParam("revision") Integer revision) {

    ResponseBuilder responseBuilder = null;
    Response response = null;
    final String serviceMethodName = "createDataPackageArchive";
    Rule.Permission permission = Rule.Permission.write;
    AuthToken authToken = null;

    String transaction = generateTransactionID("archive", scope, identifier, revision);

    authToken = getAuthToken(headers);
    String userId = authToken.getUserId();

    // Is user authorized to run the 'createDataPackage' service method?
    boolean serviceMethodAuthorized = isServiceMethodAuthorized(serviceMethodName, permission, authToken);
    if (!serviceMethodAuthorized) {
        throw new UnauthorizedException(
                "User " + userId + " is not authorized to execute service method " + serviceMethodName);
    }

    // Perform createDataPackage in new thread
    Archivor archivor = new Archivor(scope, identifier, revision, userId, authToken, transaction);
    ExecutorService executorService = Executors.newCachedThreadPool();
    executorService.execute(archivor);
    executorService.shutdown();

    responseBuilder = Response.status(Response.Status.ACCEPTED);
    responseBuilder.entity(transaction);
    response = responseBuilder.build();
    response = stampHeader(response);

    return response;

}

From source file:edu.lternet.pasta.datapackagemanager.DataPackageManagerResource.java

/**
 * <strong>Create Data Package</strong> operation, specifying the EML
 * document describing the data package to be created in the request message
 * body and returning a <em>transaction identifier</em> in the response
 * message body as plain text; the <em>transaction identifier</em> may be
 * used in a subsequent call to <code>readDataPackageError</code> to
 * determine the operation status; see <code>readDataPackage</code> to
 * obtain the data package resource map if the operation completed
 * successfully.//from ww w .j  av  a  2s. c  o  m
 * 
 * <h4>Request:</h4>
 * <table border="1" cellspacing="0" cellpadding="3">
 * <tr>
 * <th><b>Message Body</b></th>
 * <th><b>MIME type</b></th>
 * <th><b>Sample Request</b></th>
 * </tr>
 * <tr>
 * <td align=center>EML document</td>
 * <td align=center><code>application/xml</code></td>
 * <td>
 * <code>curl -i -u "uid=ucarroll,o=LTER,dc=ecoinformatics,dc=org:PASSWORD" 
 * -X POST -H "Content-Type: application/xml"
 * --data-binary @knb-lter-lno.1.1.xml https://pasta.lternet.edu/package/eml</code>
 * </td>
 * </tr>
 * </table>
 * 
 * <h4>Response:</h4>
 * <table border="1" cellspacing="0" cellpadding="3">
 * <tr>
 * <th><b>Status</b></th>
 * <th><b>Reason</b></th>
 * <th><b>Message Body</b></th>
 * <th><b>MIME type</b></th>
 * <th><b>Sample Message Body</b></th>
 * </tr>
 * <tr>
 * <td align=center>202 Accepted</td>
 * <td align=center>The <em>create data package</em> request was accepted
 * for processing</td>
 * <td align=center>A <em>transaction identifier</em> for use in subsequent
 * processing of the request (see <code>readDataPackageError</code> to
 * understand how the transaction identifier may be used to determine if an
 * error occurred during the operation)</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>1364424858431</code></td>
 * </tr>
 * <tr>
 * <td align=center>401 Unauthorized</td>
 * <td align=center>The requesting user is not authorized to execute this
 * service method</td>
 * <td align=center>An error message</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>Error message</code></td>
 * </tr>
 * <tr>
 * <td align=center>405 Method Not Allowed</td>
 * <td align=center>The specified HTTP method is not allowed for the
 * requested resource</td>
 * <td align=center>An error message</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>Error message</code></td>
 * </tr>
 * </table>
 * 
 * @param emlFile
 *            An EML document file, as specified in the payload of the
 *            request.
 * 
 * @return a Response, which if successful, contains a resource map
 *         describing the contents of the data package
 */
@POST
@Path("/eml")
@Consumes("application/xml")
@Produces("text/plain")
public Response createDataPackage(@Context HttpHeaders headers, File emlFile) {
    ResponseBuilder responseBuilder = null;
    Response response = null;
    final String serviceMethodName = "createDataPackage";
    Rule.Permission permission = Rule.Permission.write;
    AuthToken authToken = null;

    try {
        authToken = getAuthToken(headers);
        String userId = authToken.getUserId();

        // Is user authorized to run the 'createDataPackage' service method?
        boolean serviceMethodAuthorized = isServiceMethodAuthorized(serviceMethodName, permission, authToken);

        if (!serviceMethodAuthorized) {
            throw new UnauthorizedException(String.format(
                    "User %s is not authorized to execute service method %s", userId, serviceMethodName));
        }

        String transaction = generateTransactionID("create", null, null, null);

        // Perform createDataPackage in new thread
        Creator creator = new Creator(emlFile, userId, authToken, transaction);
        ExecutorService executorService = Executors.newCachedThreadPool();
        executorService.execute(creator);
        executorService.shutdown();

        responseBuilder = Response.status(Response.Status.ACCEPTED);
        responseBuilder.entity(transaction);
        response = responseBuilder.build();
        response = stampHeader(response);
    } catch (UnauthorizedException e) {
        response = WebExceptionFactory.makeUnauthorized(e).getResponse();
    }

    return response;
}

From source file:edu.lternet.pasta.datapackagemanager.DataPackageManagerResource.java

/**
 * <strong>Update Data Package</strong> operation, specifying the scope and
 * identifier of the data package to be updated in the URI, along with the
 * EML document describing the data package to be created in the request
 * message body, and returning a <em>transaction identifier</em> in the
 * response message body as plain text; the <em>transaction identifier</em>
 * may be used in a subsequent call to <code>readDataPackageError</code> to
 * determine the operation status; see <code>readDataPackage</code> to
 * obtain the data package resource map if the operation completed
 * successfully./*from   w w w. j a v  a2  s. c om*/
 * 
 * 
 * <h4>Requests:</h4>
 * <table border="1" cellspacing="0" cellpadding="3">
 * <tr>
 * <th><b>Message Body</b></th>
 * <th><b>MIME type</b></th>
 * <th><b>Sample Request</b></th>
 * </tr>
 * <tr>
 * <td align=center>EML document</td>
 * <td align=center><code>application/xml</code></td>
 * <td>
 * <code>curl -i -u "uid=ucarroll,o=LTER,dc=ecoinformatics,dc=org:PASSWORD" 
 * -X PUT -H "Content-Type: application/xml"
 * --data-binary @knb-lter-lno.1.1.xml
 * https://pasta.lternet.edu/package/eml/knb-lter-lno/1</code></td>
 * </tr>
 * </table>
 * 
 * <h4>Responses:</h4>
 * <table border="1" cellspacing="0" cellpadding="3">
 * <tr>
 * <th><b>Status</b></th>
 * <th><b>Reason</b></th>
 * <th><b>Message Body</b></th>
 * <th><b>MIME type</b></th>
 * <th><b>Sample Message Body</b></th>
 * </tr>
 * <tr>
 * <td align=center>202 Accepted</td>
 * <td align=center>The <em>update data package</em> request was accepted
 * for processing</td>
 * <td align=center>A <em>transaction identifier</em> for use in subsequent
 * processing of the request (see <code>readDataPackageError</code> to
 * understand how the transaction identifier may be used to determine if an
 * error occurred during the operation)</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>1364424858431</code></td>
 * </tr>
 * <tr>
 * <td align=center>401 Unauthorized</td>
 * <td align=center>The requesting user is not authorized to execute this
 * service method</td>
 * <td align=center>An error message</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>Error message</code></td>
 * </tr>
 * <tr>
 * <td align=center>405 Method Not Allowed</td>
 * <td align=center>The specified HTTP method is not allowed for the
 * requested resource</td>
 * <td align=center>An error message</td>
 * <td align=center><code>text/plain</code></td>
 * <td align=center><code>Error message</code></td>
 * </tr>
 * </table>
 * 
 * @param emlFile
 *            The URL to an EML document, as specified in the payload of the
 *            request.
 * 
 * @return a Response, which if successful, contains a resource map
 *         describing the contents of the updated data package
 */
@PUT
@Path("/eml/{scope}/{identifier}")
@Consumes("application/xml")
@Produces("text/plain")
public Response updateDataPackage(@Context HttpHeaders headers, @PathParam("scope") String scope,
        @PathParam("identifier") Integer identifier, File emlFile) {
    AuthToken authToken = null;
    ResponseBuilder responseBuilder = null;
    Response response = null;
    final String serviceMethodName = "updateDataPackage";
    Rule.Permission permission = Rule.Permission.write;

    try {
        authToken = getAuthToken(headers);
        String userId = authToken.getUserId();

        // Is user authorized to run the service method?
        boolean serviceMethodAuthorized = isServiceMethodAuthorized(serviceMethodName, permission, authToken);
        if (!serviceMethodAuthorized) {
            throw new UnauthorizedException(String.format(
                    "User %s is not authorized to execute service method %s", userId, serviceMethodName));
        }

        String transaction = generateTransactionID("update", scope, identifier, null);

        // Perform updateDataPackage in new thread
        Updator updator = new Updator(emlFile, scope, identifier, userId, authToken, transaction);
        ExecutorService executorService = Executors.newCachedThreadPool();
        executorService.execute(updator);
        executorService.shutdown();

        responseBuilder = Response.status(Response.Status.ACCEPTED);
        responseBuilder.entity(transaction);
        response = responseBuilder.build();
        response = stampHeader(response);
    } catch (UnauthorizedException e) {
        response = WebExceptionFactory.makeUnauthorized(e).getResponse();
    }

    return response;
}