Example usage for java.util ListIterator hasPrevious

List of usage examples for java.util ListIterator hasPrevious

Introduction

In this page you can find the example usage for java.util ListIterator hasPrevious.

Prototype

boolean hasPrevious();

Source Link

Document

Returns true if this list iterator has more elements when traversing the list in the reverse direction.

Usage

From source file:org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.java

@Override
public boolean rollback(final Server server, final RegionServerServices services) throws IOException {
    this.server = server;
    this.rsServices = services;
    // Coprocessor callback
    if (this.parent.getCoprocessorHost() != null) {
        this.parent.getCoprocessorHost().preRollBackSplit();
    }//from  www  .  j  ava 2  s. c om

    boolean result = true;
    ListIterator<JournalEntry> iterator = this.journal.listIterator(this.journal.size());
    // Iterate in reverse.
    while (iterator.hasPrevious()) {
        JournalEntry je = iterator.previous();

        transition(je.getPhase(), true);

        switch (je.getPhase()) {

        case SET_SPLITTING:
            if (services != null && !services.reportRegionStateTransition(TransitionCode.SPLIT_REVERTED,
                    parent.getRegionInfo(), hri_a, hri_b)) {
                return false;
            }
            break;

        case CREATE_SPLIT_DIR:
            this.parent.writestate.writesEnabled = true;
            this.parent.getRegionFileSystem().cleanupSplitsDir();
            break;

        case CLOSED_PARENT_REGION:
            try {
                // So, this returns a seqid but if we just closed and then reopened, we
                // should be ok. On close, we flushed using sequenceid obtained from
                // hosting regionserver so no need to propagate the sequenceid returned
                // out of initialize below up into regionserver as we normally do.
                // TODO: Verify.
                this.parent.initialize();
            } catch (IOException e) {
                LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region "
                        + parent.getRegionInfo().getRegionNameAsString(), e);
                throw new RuntimeException(e);
            }
            break;

        case STARTED_REGION_A_CREATION:
            this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a);
            break;

        case STARTED_REGION_B_CREATION:
            this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b);
            break;

        case OFFLINED_PARENT:
            if (services != null)
                services.addToOnlineRegions(this.parent);
            break;

        case PONR:
            // We got to the point-of-no-return so we need to just abort. Return
            // immediately.  Do not clean up created daughter regions.  They need
            // to be in place so we don't delete the parent region mistakenly.
            // See HBASE-3872.
            return false;

        // Informational only cases
        case STARTED:
        case PREPARED:
        case BEFORE_PRE_SPLIT_HOOK:
        case AFTER_PRE_SPLIT_HOOK:
        case BEFORE_POST_SPLIT_HOOK:
        case AFTER_POST_SPLIT_HOOK:
        case OPENED_REGION_A:
        case OPENED_REGION_B:
        case COMPLETED:
            break;

        default:
            throw new RuntimeException("Unhandled journal entry: " + je);
        }
    }
    // Coprocessor callback
    if (this.parent.getCoprocessorHost() != null) {
        this.parent.getCoprocessorHost().postRollBackSplit();
    }
    return result;
}

From source file:it.polito.tellmefirst.web.rest.clients.ClientEpub.java

private HashMap<String, String> parseEpub(File file) throws IOException, TMFVisibleException {

    LOG.debug("[parseEpub] - BEGIN");

    ZipFile fi = new ZipFile(file);

    for (Enumeration e = fi.entries(); e.hasMoreElements();) {
        ZipEntry entry = (ZipEntry) e.nextElement();
        if (entry.getName().endsWith("ncx")) {
            InputStream tocMaybeDirty = fi.getInputStream(entry);
            Scanner scanner = new Scanner(tocMaybeDirty, "UTF-8").useDelimiter("\\A");
            String theString = scanner.hasNext() ? scanner.next() : "";
            tocMaybeDirty.close();//from   ww w  . j  a  v a2 s . co  m
            scanner.close();

            String res = theString.replaceAll(">[\\s]*?<", "><");

            InputStream toc = new ByteArrayInputStream(res.getBytes(StandardCharsets.UTF_8));

            try {
                DocumentBuilder dBuilder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
                Document doc = dBuilder.parse(toc);
                toc.close();

                if (doc.hasChildNodes()) {
                    findNavMap(doc.getChildNodes());
                }
            } catch (Exception ex) {
                LOG.error("Unable to navigate the TOC");
            }

            removeEmptyTOC(epub);

            //search anchors in links and split
            Set set = epub.entrySet();
            Iterator i = set.iterator();
            while (i.hasNext()) {
                Map.Entry me = (Map.Entry) i.next();
                if (me.getValue().toString().contains("#")) {
                    String[] parts = me.getValue().toString().split("#");
                    String anchor = parts[1];
                    epub.put(me.getKey().toString(), anchor);
                }
            }
        }
        if (entry.getName().endsWith("opf")) { //manage files because order is important
            InputStream content = fi.getInputStream(entry);

            Scanner scanner = new Scanner(content, "UTF-8").useDelimiter("\\A");
            String contentString = scanner.hasNext() ? scanner.next() : "";
            content.close();
            scanner.close();

            String filenameRegex = "href=\"(.*.htm(|l))\".*media-type=\"application/xhtml";
            Pattern pattern = Pattern.compile(filenameRegex);
            Matcher matcher = pattern.matcher(contentString);

            Integer count = 0;
            while (matcher.find()) {
                files.put(count, matcher.group(1));
                count++;
            }
        }
        if (entry.getName().endsWith("html") || entry.getName().endsWith("htm")
                || entry.getName().endsWith("xhtml")) {
            InputStream htmlFile = fi.getInputStream(entry);

            Scanner scanner = new Scanner(htmlFile, "UTF-8").useDelimiter("\\A");
            String htmlString = scanner.hasNext() ? scanner.next() : "";

            String regex1 = htmlString.replaceAll("^[^_]*?<body>", ""); //remove head
            String regex2 = regex1.replaceAll("</body>.*$", ""); //remove tail
            String htmlCleaned = regex2.replaceAll("<a.*?/>", ""); //anchor with one tag

            String[] bits = entry.getName().split("/");
            String fileName = bits[bits.length - 1];

            htmls.put(fileName, htmlCleaned);
        }
    }
    fi.close();
    Integer i;
    for (i = 0; i < files.size(); i++) {
        stringBuilder.append("<p id=\"" + files.get(i) + "\"></p>"); // "anchor" also the heads of each files
        stringBuilder.append(htmls.get(files.get(i)));
    }
    String htmlAll = stringBuilder.toString();

    /* We have all needed files, start to split
       For each link -> made a chunk
       Start from the bottom */
    Metadata metadata = new Metadata();
    Parser parser = new HtmlParser();
    ListIterator<Map.Entry<String, String>> iter = new ArrayList<>(epub.entrySet()).listIterator(epub.size());

    while (iter.hasPrevious()) {
        Map.Entry<String, String> me = iter.previous();
        try {
            ContentHandler contenthandler = new BodyContentHandler(10 * htmlAll.length());
            Scanner sc = new Scanner(htmlAll);
            sc.useDelimiter("id=\"" + me.getValue().toString() + "\">");
            htmlAll = sc.next();
            InputStream stream = new ByteArrayInputStream(sc.next().getBytes(StandardCharsets.UTF_8));
            parser.parse(stream, contenthandler, metadata, new ParseContext());
            String chapterText = contenthandler.toString().toLowerCase().replaceAll("\\d+.*", "");
            String chapterTextWithoutNo = chapterText.replaceAll("\\d+.*", "");
            // Remove the Project Gutenberg meta information from the text
            String chapterTextCleaned = chapterTextWithoutNo.split("end of the project gutenberg ebook")[0];
            epub.put(me.getKey().toString(), chapterTextCleaned);

        } catch (Exception ex) {
            LOG.error("Unable to parse content for index: " + me.getKey() + ", this chapter will be deleted");
            removeChapter(epub, me.getKey().toString());
        }
    }

    /* I remove the Project Gutenberg license chapter from the Map, because it is useless
      for the classification and it generates a Lucene Exception in case of the Italian language
      (the license text is always in English).
            
      You can use this method in order to remove each chapter that is useless for classifying
      your Epub document. */
    removeChapter(epub, "A Word from Project Gutenberg");
    removeEmptyItems(epub);

    //If the Epub file has a bad structure, I try to use the basic Epub extractor of Tika.
    if (epub.size() == 0) {
        LOG.info("The Epub file has a bad structure. Try to use the Tika extractor");
        epub.put("All text", autoParseAll(file));
    }

    removeEmptyItems(epub);

    if (epub.size() == 0) {
        LOG.error("Unable to extract text from this Epub");
        throw new TMFVisibleException("Unable to extract any text from this Epub.");
    }

    removeDownloadedFile(TEMPORARY_PATH);

    LOG.debug("[parseEpub] - END");

    return epub;
}

From source file:com.projity.pm.criticalpath.CriticalPath.java

private void doPass(Task startTask, TaskSchedule.CalculationContext context) {
    if (startTask != null) {
        startTask.getSchedule(context.scheduleType).invalidate();
        startTask.setCalculationStateCount(getCalculationStateCount());
    }/*from w  w w.  j  ava  2s. com*/

    PredecessorTaskList.TaskReference taskReference;
    boolean forward = context.forward;
    ListIterator i = forward ? predecessorTaskList.listIterator() : predecessorTaskList.reverseIterator();
    Task task;
    TaskSchedule schedule;

    //      int count = 0;
    //      long z = System.currentTimeMillis();
    boolean projectForward = project.isForward();
    while (forward ? i.hasNext() : i.hasPrevious()) {
        taskReference = (PredecessorTaskList.TaskReference) (forward ? i.next() : i.previous());
        traceTask = task = taskReference.getTask();
        context.taskReferenceType = taskReference.getType();
        schedule = task.getSchedule(context.scheduleType);
        if (!forward)
            context.taskReferenceType = -taskReference.getType();

        if (task.isReverseScheduled()) {//  reverse scheduled must always be calculated
            schedule.invalidate();
            task.setCalculationStateCount(context.stateCount);
        }
        if (task.getCalculationStateCount() >= context.stateCount) {
            schedule.calcDates(context);
            if (context.assign && (projectForward || !task.isWbsParent())) { // in reverse scheduling, I see some parents have 0 or 1 as their dates. This is a workaround.
                if (schedule.getBegin() != 0L && !isSentinel(task))
                    earliestStart = Math.min(earliestStart, schedule.getStart());
                if (schedule.getEnd() != 0 && !isSentinel(task))
                    latestFinish = Math.max(latestFinish, schedule.getFinish());
            }

            //            schedule.dump();
        }
    }
    //      System.out.println("pass forward=" + forward + " tasks:" + count + " time " + (System.currentTimeMillis() -z) + " ms");
}

From source file:org.commonjava.maven.ext.core.impl.DependencyManipulator.java

/**
 * This will load the remote overrides. It will first try to load any overrides that might have
 * been prepopulated by the REST scanner, failing that it will load from a remote POM file.
 *
 * @return the loaded overrides//w w  w .j  a v  a2 s.  c  om
 * @throws ManipulationException if an error occurs.
 */
private Map<ArtifactRef, String> loadRemoteOverrides() throws ManipulationException {
    final DependencyState depState = session.getState(DependencyState.class);
    final RESTState restState = session.getState(RESTState.class);
    final List<ProjectVersionRef> gavs = depState.getRemoteBOMDepMgmt();

    // While in theory we are only mapping ProjectRef -> NewVersion if we store key as ProjectRef we can't then have
    // org.foo:foobar -> 1.2.0.redhat-2
    // org.foo:foobar -> 2.0.0.redhat-2
    // Which is useful for strictAlignment scenarios (although undefined for non-strict).
    Map<ArtifactRef, String> restOverrides = depState.getRemoteRESTOverrides();
    Map<ArtifactRef, String> bomOverrides = new LinkedHashMap<>();
    Map<ArtifactRef, String> mergedOverrides = new LinkedHashMap<>();

    if (gavs != null) {
        final ListIterator<ProjectVersionRef> iter = gavs.listIterator(gavs.size());
        // Iterate in reverse order so that the first GAV in the list overwrites the last
        while (iter.hasPrevious()) {
            final ProjectVersionRef ref = iter.previous();
            Map<ArtifactRef, String> rBom = effectiveModelBuilder.getRemoteDependencyVersionOverrides(ref);

            // We don't normalise the BOM list here as ::applyOverrides can handle multiple GA with different V
            // for strict override. However, it is undefined if strict is not enabled.
            bomOverrides.putAll(rBom);
        }
    }

    if (depState.getPrecedence() == DependencyPrecedence.BOM) {
        mergedOverrides = bomOverrides;
        if (mergedOverrides.isEmpty()) {
            String msg = restState.isEnabled() ? "dependencySource for restURL" : "dependencyManagement";

            logger.warn("No dependencies found for dependencySource {}. Has {} been configured? ",
                    depState.getPrecedence(), msg);
        }
    }
    if (depState.getPrecedence() == DependencyPrecedence.REST) {
        mergedOverrides = restOverrides;
        if (mergedOverrides.isEmpty()) {
            logger.warn("No dependencies found for dependencySource {}. Has restURL been configured? ",
                    depState.getPrecedence());
        }
    } else if (depState.getPrecedence() == DependencyPrecedence.RESTBOM) {
        mergedOverrides = bomOverrides;

        removeDuplicateArtifacts(mergedOverrides, restOverrides);
        mergedOverrides.putAll(restOverrides);
    } else if (depState.getPrecedence() == DependencyPrecedence.BOMREST) {
        mergedOverrides = restOverrides;
        removeDuplicateArtifacts(mergedOverrides, bomOverrides);
        mergedOverrides.putAll(bomOverrides);
    }
    logger.info("Remote precedence is {}", depState.getPrecedence());
    logger.debug("Final remote override list is {}", mergedOverrides);
    return mergedOverrides;
}

From source file:com.fastbootmobile.encore.app.fragments.PlaylistViewFragment.java

private void playNext() {
    // playNext adds elements after the current playing one. If we want to play the playlist
    // in the proper order, we need to put it backwards.
    ListIterator<String> it = mPlaylist.songsList().listIterator();
    while (it.hasNext()) {
        it.next();//from   ww w  . j  a  va 2 s  . c  o m
    }

    final ProviderAggregator aggregator = ProviderAggregator.getDefault();
    while (it.hasPrevious()) {
        PlaybackProxy.playNext(aggregator.retrieveSong(it.previous(), mPlaylist.getProvider()));
    }
}

From source file:com.projity.pm.graphic.spreadsheet.common.CommonSpreadSheet.java

/**
 * Used by find dialog/*from ww w .  j a  v a2s .c  om*/
 */
public boolean findNext(SearchContext context) {
    SpreadSheetSearchContext ctx = (SpreadSheetSearchContext) context;

    int row = this.getCurrentRow();
    // make sure in bounds
    if (row < 0)
        row = 0;
    if (row >= getCache().getSize())
        row = getCache().getSize() - 1;

    ListIterator i = getCache().getIterator(row);
    if (ctx.getRow() != -1) { // after the first search, need to move ahead or back
        if (ctx.isForward())
            if (i.hasNext())
                i.next();
            else if (i.hasPrevious())
                i.previous();
    }

    boolean found = false;
    GraphicNode gnode = null;
    Object obj;
    Node node;
    while (ctx.isForward() ? i.hasNext() : i.hasPrevious()) {
        gnode = (GraphicNode) (ctx.isForward() ? i.next() : i.previous());
        if (gnode.isVoid())
            continue;
        node = gnode.getNode();
        obj = node.getImpl();
        if (ctx.matches(obj)) {
            found = true;
            break;
        }
    }
    if (found) {
        int r = getCache().getRowAt(gnode);
        int col = getFieldArray().indexOf(ctx.getField()) - 1;
        this.changeSelection(r, col, false, false);
        ctx.setRow(r);
    }
    return found;
}

From source file:org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.java

/**
 * @param server Hosting server instance (May be null when testing).
 * @param services Services of regionserver, used to online regions.
 * @throws IOException If thrown, rollback failed. Take drastic action.
 * @return True if we successfully rolled back, false if we got to the point
 *         of no return and so now need to abort the server to minimize
 *         damage.//from   w ww.j a va 2s  .  c om
 */
@SuppressWarnings("deprecation")
public boolean rollback(final Server server, final RegionServerServices services) throws IOException {
    assert this.mergedRegionInfo != null;
    // Coprocessor callback
    if (rsCoprocessorHost != null) {
        rsCoprocessorHost.preRollBackMerge(this.region_a, this.region_b);
    }

    boolean result = true;
    ListIterator<JournalEntry> iterator = this.journal.listIterator(this.journal.size());
    // Iterate in reverse.
    while (iterator.hasPrevious()) {
        JournalEntry je = iterator.previous();
        switch (je) {

        case SET_MERGING_IN_ZK:
            if (server != null && server.getZooKeeper() != null) {
                cleanZK(server, this.mergedRegionInfo);
            }
            break;

        case CREATED_MERGE_DIR:
            this.region_a.writestate.writesEnabled = true;
            this.region_b.writestate.writesEnabled = true;
            this.region_a.getRegionFileSystem().cleanupMergesDir();
            break;

        case CLOSED_REGION_A:
            try {
                // So, this returns a seqid but if we just closed and then reopened,
                // we should be ok. On close, we flushed using sequenceid obtained
                // from hosting regionserver so no need to propagate the sequenceid
                // returned out of initialize below up into regionserver as we
                // normally do.
                this.region_a.initialize();
            } catch (IOException e) {
                LOG.error(
                        "Failed rollbacking CLOSED_REGION_A of region " + this.region_a.getRegionNameAsString(),
                        e);
                throw new RuntimeException(e);
            }
            break;

        case OFFLINED_REGION_A:
            if (services != null)
                services.addToOnlineRegions(this.region_a);
            break;

        case CLOSED_REGION_B:
            try {
                this.region_b.initialize();
            } catch (IOException e) {
                LOG.error(
                        "Failed rollbacking CLOSED_REGION_A of region " + this.region_b.getRegionNameAsString(),
                        e);
                throw new RuntimeException(e);
            }
            break;

        case OFFLINED_REGION_B:
            if (services != null)
                services.addToOnlineRegions(this.region_b);
            break;

        case STARTED_MERGED_REGION_CREATION:
            this.region_a.getRegionFileSystem().cleanupMergedRegion(this.mergedRegionInfo);
            break;

        case PONR:
            // We got to the point-of-no-return so we need to just abort. Return
            // immediately. Do not clean up created merged regions.
            return false;

        default:
            throw new RuntimeException("Unhandled journal entry: " + je);
        }
    }
    // Coprocessor callback
    if (rsCoprocessorHost != null) {
        rsCoprocessorHost.postRollBackMerge(this.region_a, this.region_b);
    }

    return result;
}

From source file:org.codehaus.mojo.webminifier.WebMinifierMojo.java

/**
 * Main entry point for the MOJO./*from  w w w  .j  ava2s.c o m*/
 * 
 * @throws MojoExecutionException if there's a problem in the normal course of execution.
 * @throws MojoFailureException if there's a problem with the MOJO itself.
 */
public void execute() throws MojoExecutionException, MojoFailureException {
    // Start off by copying all files over. We'll ultimately remove the js files that we don't need from there, and
    // create new ones in there (same goes for css files and anything else we minify).

    FileUtils.deleteQuietly(destinationFolder);
    try {
        FileUtils.copyDirectory(sourceFolder, destinationFolder);
    } catch (IOException e) {
        throw new MojoExecutionException("Cannot copy file to target folder", e);
    }

    // Process each HTML source file and concatenate into unminified output scripts
    int minifiedCounter = 0;

    // If a split point already exists on disk then we've been through the minification process. As
    // minification can be expensive, we would like to avoid performing it multiple times. Thus storing
    // a set of what we've previously minified enables us.
    Set<File> existingConcatenatedJsResources = new HashSet<File>();
    Set<File> consumedJsResources = new HashSet<File>();

    for (String targetHTMLFile : getArrayOfTargetHTMLFiles()) {
        File targetHTML = new File(destinationFolder, targetHTMLFile);

        // Parse HTML file and locate SCRIPT elements
        DocumentResourceReplacer replacer;
        try {
            replacer = new DocumentResourceReplacer(targetHTML);
        } catch (SAXException e) {
            throw new MojoExecutionException("Problem reading html document", e);
        } catch (IOException e) {
            throw new MojoExecutionException("Problem opening html document", e);
        }

        List<File> jsResources = replacer.findJSResources();

        if (jsSplitPoints == null) {
            jsSplitPoints = new Properties();
        }

        File concatenatedJsResource = null;

        URI destinationFolderUri = destinationFolder.toURI();

        // Split the js resources into two lists: one containing all external dependencies, the other containing
        // project sources. We do this so that project sources can be minified without the dependencies (libraries
        // generally don't need to distribute the dependencies).
        int jsDependencyProjectResourcesIndex;

        if (splitDependencies) {
            List<File> jsDependencyResources = new ArrayList<File>(jsResources.size());
            List<File> jsProjectResources = new ArrayList<File>(jsResources.size());
            for (File jsResource : jsResources) {
                String jsResourceUri = destinationFolderUri.relativize(jsResource.toURI()).toString();
                File jsResourceFile = new File(projectSourceFolder, jsResourceUri);
                if (jsResourceFile.exists()) {
                    jsProjectResources.add(jsResource);
                } else {
                    jsDependencyResources.add(jsResource);
                }
            }

            // Re-constitute the js resource list from dependency resources + project resources and note the index
            // in the list that represents the start of project sources in the list. We need this information later.
            jsDependencyProjectResourcesIndex = jsDependencyResources.size();

            jsResources = jsDependencyResources;
            jsResources.addAll(jsProjectResources);
        } else {
            jsDependencyProjectResourcesIndex = 0;
        }

        // Walk backwards through the script declarations and note what files will map to what split point.
        Map<File, File> jsResourceTargetFiles = new LinkedHashMap<File, File>(jsResources.size());
        ListIterator<File> jsResourcesIter = jsResources.listIterator(jsResources.size());

        boolean splittingDependencies = false;

        while (jsResourcesIter.hasPrevious()) {
            int jsResourceIterIndex = jsResourcesIter.previousIndex();
            File jsResource = jsResourcesIter.previous();

            String candidateSplitPointNameUri = destinationFolderUri.relativize(jsResource.toURI()).toString();
            String splitPointName = (String) jsSplitPoints.get(candidateSplitPointNameUri);

            // If we do not have a split point name and the resource is a dependency of this project i.e. it is not
            // within our src/main folder then we give it a split name of "dependencies". Factoring out dependencies
            // into their own split point is a useful thing to do and will always be required when building
            // libraries.
            if (splitDependencies && splitPointName == null && !splittingDependencies) {
                if (jsResourceIterIndex < jsDependencyProjectResourcesIndex) {
                    splitPointName = Integer.valueOf(++minifiedCounter).toString();
                    splittingDependencies = true;
                }
            }

            // If we have no name and we've not been in here before, then assign an initial name based on a number.
            if (splitPointName == null && concatenatedJsResource == null) {
                splitPointName = Integer.valueOf(++minifiedCounter).toString();
            }

            // We have a new split name so use it for this file and upwards in the script statements until we
            // either hit another split point or there are no more script statements.
            if (splitPointName != null) {
                concatenatedJsResource = new File(destinationFolder, splitPointName + ".js");

                // Note that we've previously created this.
                if (concatenatedJsResource.exists()) {
                    existingConcatenatedJsResources.add(concatenatedJsResource);
                }
            }

            jsResourceTargetFiles.put(jsResource, concatenatedJsResource);
        }

        for (File jsResource : jsResources) {
            concatenatedJsResource = jsResourceTargetFiles.get(jsResource);
            if (!existingConcatenatedJsResources.contains(concatenatedJsResource)) {
                // Concatenate input file onto output resource file
                try {
                    concatenateFile(jsResource, concatenatedJsResource);
                } catch (IOException e) {
                    throw new MojoExecutionException("Problem concatenating JS files", e);
                }

                // Finally, remove the JS resource from the target folder as it is no longer required (we've
                // concatenated it).
                consumedJsResources.add(jsResource);
            }
        }

        // Reduce the list of js resource target files to a distinct set
        LinkedHashSet<File> concatenatedJsResourcesSet = new LinkedHashSet<File>(
                jsResourceTargetFiles.values());
        File[] concatenatedJsResourcesArray = new File[concatenatedJsResourcesSet.size()];
        concatenatedJsResourcesSet.toArray(concatenatedJsResourcesArray);
        List<File> concatenatedJsResources = Arrays.asList(concatenatedJsResourcesArray);

        // Minify the concatenated JS resource files

        if (jsCompressorType != JsCompressorType.NONE) {
            List<File> minifiedJSResources = new ArrayList<File>(concatenatedJsResources.size());

            ListIterator<File> concatenatedJsResourcesIter = concatenatedJsResources
                    .listIterator(concatenatedJsResources.size());
            while (concatenatedJsResourcesIter.hasPrevious()) {
                concatenatedJsResource = concatenatedJsResourcesIter.previous();

                File minifiedJSResource;
                try {
                    String uri = concatenatedJsResource.toURI().toString();
                    int i = uri.lastIndexOf(".js");
                    String minUri;
                    if (i > -1) {
                        minUri = uri.substring(0, i) + "-min.js";
                    } else {
                        minUri = uri;
                    }
                    minifiedJSResource = FileUtils.toFile(new URL(minUri));
                } catch (MalformedURLException e) {
                    throw new MojoExecutionException("Problem determining file URL", e);
                }

                minifiedJSResources.add(minifiedJSResource);

                // If we've not actually performed the minification before... then do so. This is the expensive bit
                // so we like to avoid it if we can.
                if (!existingConcatenatedJsResources.contains(concatenatedJsResource)) {
                    boolean warningsFound;
                    try {
                        warningsFound = minifyJSFile(concatenatedJsResource, minifiedJSResource);
                    } catch (IOException e) {
                        throw new MojoExecutionException("Problem reading/writing JS", e);
                    }

                    logCompressionRatio(minifiedJSResource.getName(), concatenatedJsResource.length(),
                            minifiedJSResource.length());

                    // If there were warnings then the user may want to manually invoke the compressor for further
                    // investigation.
                    if (warningsFound) {
                        getLog().warn("Warnings were found. " + concatenatedJsResource
                                + " is available for your further investigations.");
                    }
                }
            }

            // Update source references
            replacer.replaceJSResources(destinationFolder, targetHTML, minifiedJSResources);
        } else {
            List<File> unminifiedJSResources = new ArrayList<File>(concatenatedJsResources.size());

            ListIterator<File> concatenatedJsResourcesIter = concatenatedJsResources
                    .listIterator(concatenatedJsResources.size());
            while (concatenatedJsResourcesIter.hasPrevious()) {
                concatenatedJsResource = concatenatedJsResourcesIter.previous();
                unminifiedJSResources.add(concatenatedJsResource);
            }

            replacer.replaceJSResources(destinationFolder, targetHTML, unminifiedJSResources);
            getLog().info("Concatenated resources with no compression");
        }

        // Write HTML file to output dir
        try {
            replacer.writeHTML(targetHTML, encoding);
        } catch (TransformerException e) {
            throw new MojoExecutionException("Problem transforming html", e);
        } catch (IOException e) {
            throw new MojoExecutionException("Problem writing html", e);
        }

    }

    // Clean up including the destination folder recursively where directories have nothing left in them.
    for (File consumedJsResource : consumedJsResources) {
        consumedJsResource.delete();
    }
    removeEmptyFolders(destinationFolder);
}

From source file:org.apache.hadoop.hbase.regionserver.SplitTransaction.java

/**
 * @param server Hosting server instance (May be null when testing).
 * @param services//from w  ww.  java2 s.  c  o m
 * @throws IOException If thrown, rollback failed.  Take drastic action.
 * @return True if we successfully rolled back, false if we got to the point
 * of no return and so now need to abort the server to minimize damage.
 */
@SuppressWarnings("deprecation")
public boolean rollback(final Server server, final RegionServerServices services) throws IOException {
    // Coprocessor callback
    if (this.parent.getCoprocessorHost() != null) {
        this.parent.getCoprocessorHost().preRollBackSplit();
    }

    boolean result = true;
    ListIterator<JournalEntry> iterator = this.journal.listIterator(this.journal.size());
    // Iterate in reverse.
    while (iterator.hasPrevious()) {
        JournalEntry je = iterator.previous();
        switch (je) {

        case SET_SPLITTING_IN_ZK:
            if (server != null && server.getZooKeeper() != null) {
                cleanZK(server, this.parent.getRegionInfo());
            }
            break;

        case CREATE_SPLIT_DIR:
            this.parent.writestate.writesEnabled = true;
            this.parent.getRegionFileSystem().cleanupSplitsDir();
            break;

        case CLOSED_PARENT_REGION:
            try {
                // So, this returns a seqid but if we just closed and then reopened, we
                // should be ok. On close, we flushed using sequenceid obtained from
                // hosting regionserver so no need to propagate the sequenceid returned
                // out of initialize below up into regionserver as we normally do.
                // TODO: Verify.
                this.parent.initialize();
            } catch (IOException e) {
                LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region "
                        + this.parent.getRegionNameAsString(), e);
                throw new RuntimeException(e);
            }
            break;

        case STARTED_REGION_A_CREATION:
            this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a);
            break;

        case STARTED_REGION_B_CREATION:
            this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b);
            break;

        case OFFLINED_PARENT:
            if (services != null)
                services.addToOnlineRegions(this.parent);
            break;

        case PONR:
            // We got to the point-of-no-return so we need to just abort. Return
            // immediately.  Do not clean up created daughter regions.  They need
            // to be in place so we don't delete the parent region mistakenly.
            // See HBASE-3872.
            return false;

        default:
            throw new RuntimeException("Unhandled journal entry: " + je);
        }
    }
    // Coprocessor callback
    if (this.parent.getCoprocessorHost() != null) {
        this.parent.getCoprocessorHost().postRollBackSplit();
    }
    return result;
}

From source file:org.apache.hadoop.hbase.regionserver.IndexSplitTransaction.java

/**
 * @param server Hosting server instance (May be null when testing).
 * @param services//from w  ww.java  2  s.  c o m
 * @throws IOException If thrown, rollback failed.  Take drastic action.
 * @return True if we successfully rolled back, false if we got to the point
 * of no return and so now need to abort the server to minimize damage.
 */
@Override
@SuppressWarnings("deprecation")
public boolean rollback(final Server server, final RegionServerServices services) throws IOException {
    // Coprocessor callback
    if (this.parent.getCoprocessorHost() != null) {
        this.parent.getCoprocessorHost().preRollBackSplit();
    }

    boolean result = true;
    ListIterator<JournalEntry> iterator = this.journal.listIterator(this.journal.size());
    // Iterate in reverse.
    while (iterator.hasPrevious()) {
        JournalEntry je = iterator.previous();
        switch (je) {

        case SET_SPLITTING_IN_ZK:
            if (server != null && server.getZooKeeper() != null) {
                cleanZK(server, this.parent.getRegionInfo());
            }
            break;

        case CREATE_SPLIT_DIR:
            this.parent.writestate.writesEnabled = true;
            this.parent.getRegionFileSystem().cleanupSplitsDir();
            break;

        case CLOSED_PARENT_REGION:
            try {
                // So, this returns a seqid but if we just closed and then reopened, we
                // should be ok. On close, we flushed using sequenceid obtained from
                // hosting regionserver so no need to propagate the sequenceid returned
                // out of initialize below up into regionserver as we normally do.
                // TODO: Verify.
                this.parent.initialize();
            } catch (IOException e) {
                LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region "
                        + this.parent.getRegionInfo().getRegionNameAsString(), e);
                throw new RuntimeException(e);
            }
            break;

        case STARTED_REGION_A_CREATION:
            this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a);
            break;

        case STARTED_REGION_B_CREATION:
            this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b);
            break;

        case OFFLINED_PARENT:
            if (services != null)
                services.addToOnlineRegions(this.parent);
            break;

        case PONR:
            // We got to the point-of-no-return so we need to just abort. Return
            // immediately.  Do not clean up created daughter regions.  They need
            // to be in place so we don't delete the parent region mistakenly.
            // See HBASE-3872.
            return false;

        default:
            throw new RuntimeException("Unhandled journal entry: " + je);
        }
    }
    // Coprocessor callback
    if (this.parent.getCoprocessorHost() != null) {
        this.parent.getCoprocessorHost().postRollBackSplit();
    }
    return result;
}