Example usage for java.util Queue peek

List of usage examples for java.util Queue peek

Introduction

In this page you can find the example usage for java.util Queue peek.

Prototype

E peek();

Source Link

Document

Retrieves, but does not remove, the head of this queue, or returns null if this queue is empty.

Usage

From source file:jef.database.DbUtils.java

/**
 * ?//www  .  jav  a  2  s.c  o m
 * 
 * @param tasks
 * @throws SQLException
 */
public static void parallelExecute(List<DbTask> tasks) throws SQLException {
    CountDownLatch latch = new CountDownLatch(tasks.size());
    Queue<SQLException> exceptions = new ConcurrentLinkedQueue<SQLException>();
    Queue<Throwable> throwables = new ConcurrentLinkedQueue<Throwable>();
    for (DbTask task : tasks) {
        task.prepare(latch, exceptions, throwables);
        DbUtils.es.execute(task);
    }
    try {
        latch.await();
    } catch (InterruptedException e) {
        throw new SQLException(e);
    }
    if (!exceptions.isEmpty()) {
        throw DbUtils.wrapExceptions(exceptions);
    }
    if (!throwables.isEmpty()) {
        throw DbUtils.toRuntimeException(throwables.peek());
    }
}

From source file:it.geosolutions.geobatch.postgres.shp2pg.Shp2pgAction.java

/**
 * Removes TemplateModelEvents from the queue and put
 *///from  w w w .jav a2 s  .  c o  m
public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {
    listenerForwarder.setTask("config");
    listenerForwarder.started();
    if (configuration == null) {
        throw new IllegalStateException("ActionConfig is null.");
    }
    File workingDir = Path.findLocation(configuration.getWorkingDirectory(),
            ((FileBaseCatalog) CatalogHolder.getCatalog()).getConfigDirectory());
    if (workingDir == null) {
        throw new IllegalStateException("Working directory is null.");
    }
    if (!workingDir.exists() || !workingDir.isDirectory()) {
        throw new IllegalStateException((new StringBuilder()).append("Working directory does not exist (")
                .append(workingDir.getAbsolutePath()).append(").").toString());
    }
    FileSystemEvent event = (FileSystemEvent) events.peek();
    // String shapeName = null;
    File shapefile = null;
    File zippedFile = null;
    File files[];
    if (events.size() == 1) {
        zippedFile = event.getSource();
        if (LOGGER.isTraceEnabled()) {
            LOGGER.trace((new StringBuilder()).append("Testing for compressed file: ")
                    .append(zippedFile.getAbsolutePath()).toString());
        }
        String tmpDirName = null;
        try {
            tmpDirName = Extract.extract(zippedFile.getAbsolutePath());
        } catch (Exception e) {
            final String message = "Shp2pgAction.execute(): Unable to read zip file: "
                    + e.getLocalizedMessage();
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            throw new ActionException(this, message);
        }
        listenerForwarder.progressing(5F, "File extracted");
        File tmpDirFile = new File(tmpDirName);
        if (!tmpDirFile.isDirectory()) {
            throw new IllegalStateException("Not valid input: we need a zip file ");
        }
        Collector c = new Collector(null);
        List fileList = c.collect(tmpDirFile);
        if (fileList != null) {
            files = (File[]) fileList.toArray(new File[1]);
        } else {
            String message = "Input is not a zipped file nor a valid collection of files";
            if (LOGGER.isErrorEnabled()) {
                LOGGER.error(message);
            }
            throw new IllegalStateException(message);
        }
    } else if (events.size() >= 3) {
        if (LOGGER.isTraceEnabled()) {
            LOGGER.trace("Checking input collection...");
        }
        listenerForwarder.progressing(6F, "Checking input collection...");
        files = new File[events.size()];
        int i = 0;
        for (Iterator i$ = events.iterator(); i$.hasNext();) {
            FileSystemEvent ev = (FileSystemEvent) i$.next();
            files[i++] = ev.getSource();
        }

    } else {
        throw new IllegalStateException("Input is not a zipped file nor a valid collection of files");
    }
    if ((shapefile = acceptable(files)) == null) {
        throw new IllegalStateException("The file list do not contains mondadory files");
    }

    listenerForwarder.progressing(10F, "In progress");

    // At this moment i have the shape and a file list

    // connect to the shapefile
    final Map<String, Serializable> connect = new HashMap<String, Serializable>();
    connect.put("url", DataUtilities.fileToURL(shapefile));

    DataStore sourceDataStore = null;
    String typeName = null;
    SimpleFeatureType originalSchema = null;
    try {
        sourceDataStore = SHP_FACTORY.createDataStore(connect);
        String[] typeNames = sourceDataStore.getTypeNames();
        typeName = typeNames[0];

        if (LOGGER.isInfoEnabled()) {
            LOGGER.info("Reading content " + typeName);
        }

        originalSchema = sourceDataStore.getSchema(typeName);
        if (LOGGER.isInfoEnabled()) {
            LOGGER.info("SCHEMA HEADER: " + DataUtilities.spec(originalSchema));
        }
    } catch (IOException e) {
        final String message = "Error to create PostGres datastore" + e.getLocalizedMessage();
        if (LOGGER.isErrorEnabled())
            LOGGER.error(message);
        if (sourceDataStore != null)
            sourceDataStore.dispose();
        throw new ActionException(this, message);
    }
    // prepare to open up a reader for the shapefile
    Query query = new Query();
    query.setTypeName(typeName);
    CoordinateReferenceSystem prj = originalSchema.getCoordinateReferenceSystem();
    query.setCoordinateSystem(prj);

    DataStore destinationDataSource = null;
    try {
        destinationDataSource = createPostgisDataStore(configuration);

        // check if the schema is present in postgis
        boolean schema = false;
        if (destinationDataSource.getTypeNames().length != 0) {
            for (String tableName : destinationDataSource.getTypeNames()) {
                if (tableName.equalsIgnoreCase(typeName)) {
                    schema = true;
                }
            }
        } else {
            schema = false;
        }
        if (!schema)
            destinationDataSource.createSchema(originalSchema);
        LOGGER.info("SCHEMA: " + schema);

    } catch (IOException e) {
        String message = "Error to create postGis datastore";
        if (LOGGER.isErrorEnabled()) {
            LOGGER.error(message);
        }
        if (destinationDataSource != null)
            destinationDataSource.dispose();
        throw new IllegalStateException(message);
    }

    final Transaction transaction = new DefaultTransaction("create");
    FeatureWriter<SimpleFeatureType, SimpleFeature> fw = null;
    FeatureReader<SimpleFeatureType, SimpleFeature> fr = null;
    try {
        SimpleFeatureBuilder builder = new SimpleFeatureBuilder(destinationDataSource.getSchema(typeName));
        fw = destinationDataSource.getFeatureWriter(typeName, transaction);
        fr = sourceDataStore.getFeatureReader(query, transaction);
        SimpleFeatureType sourceSchema = sourceDataStore.getSchema(typeName);
        FeatureStore postgisStore = (FeatureStore) destinationDataSource.getFeatureSource(typeName);
        while (fr.hasNext()) {
            final SimpleFeature oldfeature = fr.next();

            for (AttributeDescriptor ad : sourceSchema.getAttributeDescriptors()) {
                String attribute = ad.getLocalName();
                builder.set(attribute, oldfeature.getAttribute(attribute));
            }
            postgisStore.addFeatures(DataUtilities.collection(builder.buildFeature(null)));

        }

        // close transaction
        transaction.commit();

    } catch (Throwable e) {
        try {
            transaction.rollback();
        } catch (IOException e1) {
            final String message = "Transaction rollback unsuccessful: " + e1.getLocalizedMessage();
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            throw new ActionException(this, message);
        }
    } finally {
        try {
            transaction.close();
        } catch (IOException e) {
            final String message = "Transaction close unsuccessful: " + e.getLocalizedMessage();
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            throw new ActionException(this, message);
        }
        if (fr != null) {
            try {
                fr.close();
            } catch (IOException e1) {
                final String message = "Feature reader IO exception: " + e1.getLocalizedMessage();
                if (LOGGER.isErrorEnabled())
                    LOGGER.error(message);
                throw new ActionException(this, message);
            }
        }
        if (fw != null) {
            try {
                fw.close();
            } catch (IOException e1) {
                final String message = "Feature writer IO exception: " + e1.getLocalizedMessage();
                if (LOGGER.isErrorEnabled())
                    LOGGER.error(message);
                throw new ActionException(this, message);
            }
        }
        if (sourceDataStore != null) {
            try {
                sourceDataStore.dispose();
            } catch (Throwable t) {
            }
        }
        if (destinationDataSource != null) {
            try {
                destinationDataSource.dispose();
            } catch (Throwable t) {
            }
        }
    }

    GeoServerRESTPublisher publisher = new GeoServerRESTPublisher(configuration.getGeoserverURL(),
            configuration.getGeoserverUID(), configuration.getGeoserverPWD());

    publisher.createWorkspace(configuration.getDefaultNamespace());

    GSPostGISDatastoreEncoder datastoreEncoder = new GSPostGISDatastoreEncoder();

    datastoreEncoder.setUser(configuration.getDbUID());
    datastoreEncoder.setDatabase(configuration.getDbName());
    datastoreEncoder.setPassword(configuration.getDbPWD());
    datastoreEncoder.setHost(configuration.getDbServerIp());
    datastoreEncoder.setPort(Integer.valueOf(configuration.getDbPort()));
    datastoreEncoder.setName(configuration.getDbName());

    publisher.createPostGISDatastore(configuration.getDefaultNamespace(), datastoreEncoder);
    String shapeFileName = FilenameUtils.getBaseName(shapefile.getName());

    if (LOGGER.isInfoEnabled()) {
        LOGGER.info("Layer postgis publishing xml-> ");
        LOGGER.info("datastoreEncoder xml: " + datastoreEncoder.toString());
    }

    if (publisher.publishDBLayer(configuration.getDefaultNamespace(), configuration.getDbName(), shapeFileName,
            configuration.getCrs(), configuration.getDefaultStyle())) {
        String message = "PostGis layer SUCCESFULLY registered";
        if (LOGGER.isInfoEnabled()) {
            LOGGER.info(message);
        }
        listenerForwarder.progressing(100F, message);
    } else {
        String message = "PostGis layer not registered";
        ActionException ae = new ActionException(this, message);
        if (LOGGER.isErrorEnabled()) {
            LOGGER.error(message, ae);
        }
        listenerForwarder.failed(ae);
    }
    events.clear();

    return events;
}

From source file:it.geosolutions.geobatch.geoserver.shapefile.ShapeFileAction.java

/**
 *
 *///from  w ww .j  a  v a2s .c  om
public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {

    listenerForwarder.setTask("config");
    listenerForwarder.started();

    try {
        //
        // Initializing input variables
        //
        GeoServerActionConfiguration configuration = getConfiguration();
        if (configuration == null) {
            throw new IllegalStateException("ActionConfig is null.");
        }

        // how many files do we have?
        final int inputSize = events.size();

        // Fetch the first event in the queue.
        // We may have one in these 2 cases:
        // 1) a single event for a .zip file
        // 2) a list of events for a (.shp+.dbf+.shx) collection, plus some other optional files
        final EventObject event = events.peek();

        // the name of the shapefile
        String[] shapeNames;

        // the output (to send to the geoserver) file
        File zippedFile = null;

        // upload method to use
        it.geosolutions.geobatch.geoserver.UploadMethod transferMethod = it.geosolutions.geobatch.geoserver.UploadMethod
                .valueOf(configuration.getDataTransferMethod());
        if (transferMethod == null) {
            transferMethod = it.geosolutions.geobatch.geoserver.UploadMethod.getDefault(); // default one
        }

        // list of file to send to the GeoServer
        File[] files = null;
        File tmpDirFile = null;
        Integer epsgCode = null;
        GeometryDescriptor descriptor = null;
        CoordinateReferenceSystem crs = null;

        if (inputSize == 1) {
            //
            // SINGLE FILE, is a zip or throw error
            //
            zippedFile = toFile(event);
            if (LOGGER.isDebugEnabled())
                LOGGER.debug("Testing for compressed file: " + zippedFile);

            // try to extract
            tmpDirFile = Extract.extract(zippedFile, getTempDir(), false);
            listenerForwarder.progressing(5, "File extracted");

            //if the output (Extract) file is not a dir the event was a not compressed file so
            //we have to throw and error
            if (tmpDirFile == null) {
                throw new IllegalStateException("Not valid input: we need a zip file ");
            }

            if (!tmpDirFile.isDirectory()) {
                if (!tmpDirFile.isFile()) {
                    throw new IllegalStateException("Not valid input: we need a zip file ");
                } else {
                    tmpDirFile = tmpDirFile.getParentFile();
                }
            }

            // collect extracted files
            final Collector c = new Collector(
                    FileFilterUtils.notFileFilter(FileFilterUtils.nameFileFilter(tmpDirFile.getName()))); // no filter
            final List<File> fileList = c.collect(tmpDirFile);
            files = fileList.toArray(new File[1]);

            // Check if there is at least one shp there
            shapeNames = acceptable(files);

        } else {
            //
            // Multiple separated files, let's look for the right one
            //
            if (LOGGER.isTraceEnabled())
                LOGGER.trace("Checking input collection...");

            listenerForwarder.progressing(5, "Checking input collection...");

            // collect files
            files = new File[events.size()];
            int i = 0;
            for (EventObject ev : events) {
                files[i++] = toFile(ev);
            }

            // Get tmp dir from the absolute path of the first captured file
            tmpDirFile = new File(FilenameUtils.getFullPath(files[0].getAbsolutePath()));

            // Check for shapefile names
            shapeNames = acceptable(files);

            // zip to a single file if method is not external.
            // Will use the first shapeName as the zip name.
            if (transferMethod != it.geosolutions.geobatch.geoserver.UploadMethod.EXTERNAL) {
                zippedFile = Compressor.deflate(getTempDir(), shapeNames[0], files);
                if (zippedFile == null) {
                    throw new IllegalStateException("Unable to create the zip file");
                }
            }

        }

        // check that we actually found some shapefiles
        if (shapeNames == null) {
            final String message = "Input is not a zipped file nor a valid collection of files";
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            throw new IllegalStateException(message);
        }

        // do some additional checks and look for some ausillary information
        for (String shape : shapeNames) {
            FileDataStore store = null;

            try {
                // create a shapefile datastore
                store = Utils.SHP_FACTORY.createDataStore(new File(tmpDirFile, shape + ".shp").toURI().toURL());

                // get the CRS
                crs = store.getSchema().getCoordinateReferenceSystem();
                epsgCode = crs != null ? CRS.lookupEpsgCode(crs, false) : null;

                // get the geometry
                descriptor = store.getSchema().getGeometryDescriptor();
            } finally {
                if (store != null) {
                    try {
                        store.dispose();
                    } catch (Exception e) {
                        if (LOGGER.isTraceEnabled()) {
                            LOGGER.trace(e.getLocalizedMessage(), e);
                        }
                    }
                }
            }
        }
        listenerForwarder.progressing(10, "In progress");

        GeoServerRESTReader reader = new GeoServerRESTReader(configuration.getGeoserverURL(),
                configuration.getGeoserverUID(), configuration.getGeoserverPWD());
        GeoServerRESTPublisher publisher = new GeoServerRESTPublisher(configuration.getGeoserverURL(),
                configuration.getGeoserverUID(), configuration.getGeoserverPWD());

        WorkspaceUtils.createWorkspace(reader, publisher, configuration.getDefaultNamespace(),
                configuration.getDefaultNamespaceUri());

        // TODO: check if a layer with the same name already exists in GS
        // TODO: Handle CRSs for multiple files
        // TODO: Handle styles for multiple files (see comment on #16)

        // decide CRS
        String nativeCRS = null;
        ProjectionPolicy projectionPolicy = ProjectionPolicy.NONE; // by default we do nothing
        final String defaultCRS = configuration.getCrs(); //do we have a default crs in the config
        String finalEPSGCode = defaultCRS; // this is the SRS for this shape

        // retain original CRS if the code is there
        if (epsgCode == null) {
            // we do not have a valid EPSG code in the input file, we do need one as per default
            if (finalEPSGCode == null) {
                final String message = "Input file has no CRS neither the configuration provides a default one";
                final ActionException ae = new ActionException(this, message);
                if (LOGGER.isErrorEnabled())
                    LOGGER.error(message, ae);
                listenerForwarder.failed(ae);
                throw ae;
            }

            // we do have a default, let's choose the proper CRS management
            if (crs != null) {
                // we have a WKT native crs, let's use it
                nativeCRS = crs.toWKT();
                projectionPolicy = ProjectionPolicy.REPROJECT_TO_DECLARED;
            } else {
                projectionPolicy = ProjectionPolicy.FORCE_DECLARED;
            }

        } else {
            // we do have an EPSG code for the original CRS, do nothing
            finalEPSGCode = "EPSG:" + epsgCode;
            nativeCRS = finalEPSGCode;
        }

        // check style for this geometry
        String defaultStyle = configuration.getDefaultStyle();
        if (defaultStyle == null || defaultStyle.isEmpty()) {
            final GeometryType geometryType = descriptor.getType();
            Class clazz = geometryType.getBinding();
            if (clazz.isAssignableFrom(Point.class) || clazz.isAssignableFrom(MultiPoint.class)) {
                defaultStyle = Utils.DEFAULT_POINT_STYLE;
            } else if (clazz.isAssignableFrom(LineString.class)
                    || clazz.isAssignableFrom(MultiLineString.class)) {
                defaultStyle = Utils.DEFAULT_LINE_STYLE;
            } else if (clazz.isAssignableFrom(Polygon.class) || clazz.isAssignableFrom(MultiPolygon.class)) {
                defaultStyle = Utils.DEFAULT_POLYGON_STYLE;
            }
        }

        UploadMethod uMethod = null;
        switch (transferMethod) {
        case DIRECT:
            uMethod = UploadMethod.FILE;
            break;
        case EXTERNAL:
            uMethod = UploadMethod.EXTERNAL;
            break;
        default:
            throw new IllegalArgumentException(
                    "Unsupported transfer method: " + configuration.getDataTransferMethod());
        }

        // Get some common parameters
        String wsName = configuration.getDefaultNamespace();
        String dsName = configuration.getStoreName() == null ? shapeNames[0] : configuration.getStoreName();
        String lyrName = configuration.getLayerName() == null ? shapeNames[0] : configuration.getLayerName();
        String styleName = defaultStyle;

        //
        // SENDING data to GeoServer via REST protocol.
        //
        boolean success = false;

        // Either publish a single shapefile, or a collection of shapefiles
        if (shapeNames.length == 1) {
            success = publisher.publishShp(wsName, dsName, null, lyrName, uMethod, zippedFile.toURI(),
                    finalEPSGCode, nativeCRS, projectionPolicy, styleName);
        } else {
            success = publisher.publishShpCollection(wsName, dsName, zippedFile.toURI());
        }

        if (success) {
            final String message = "Shape file SUCCESFULLY sent";
            if (LOGGER.isInfoEnabled())
                LOGGER.info(message);
            listenerForwarder.progressing(90, message);
        } else {
            final String message = "Shape file FAILED to be sent";
            final ActionException ae = new ActionException(this, message);
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message, ae);
            listenerForwarder.failed(ae);
            throw ae;
        }

        // If we have shape specific config, apply now
        if (configuration instanceof GeoServerShapeActionConfiguration) {
            // Log
            if (LOGGER.isInfoEnabled())
                LOGGER.info("Configuring shape datastore connection parameters");

            // Get config
            GeoServerShapeActionConfiguration shpConfig = (GeoServerShapeActionConfiguration) configuration;

            // Get managers from geoserver-manager
            GeoServerRESTManager manager = new GeoServerRESTManager(new URL(shpConfig.getGeoserverURL()),
                    shpConfig.getGeoserverUID(), shpConfig.getGeoserverPWD());
            GeoServerRESTStoreManager dsManager = manager.getStoreManager();

            // Read config from GS
            RESTDataStore dsRead = manager.getReader().getDatastore(wsName, dsName);
            GSShapefileDatastoreEncoder dsWrite = new GSShapefileDatastoreEncoder(dsRead);

            // Update store params
            if (shpConfig.getUrl() != null)
                dsWrite.setUrl(shpConfig.getUrl());
            if (shpConfig.getCharset() != null)
                dsWrite.setCharset(shpConfig.getCharset());
            if (shpConfig.getCreateSpatialIndex() != null)
                dsWrite.setCreateSpatialIndex(shpConfig.getCreateSpatialIndex());
            if (shpConfig.getMemoryMappedBuffer() != null)
                dsWrite.setMemoryMappedBuffer(shpConfig.getMemoryMappedBuffer());
            if (shpConfig.getCacheAndReuseMemoryMaps() != null)
                dsWrite.setCacheAndReuseMemoryMaps(shpConfig.getCacheAndReuseMemoryMaps());

            // Push changes to GS
            success = dsManager.update(wsName, dsWrite);

            // Success or die
            if (success) {
                String message = "Shape datastore SUCCESFULLY configured";
                if (LOGGER.isInfoEnabled())
                    LOGGER.info(message);
                listenerForwarder.progressing(100, message);
            } else {
                String message = "Shape datastore FAILED to be configured";
                final ActionException ae = new ActionException(this, message);
                if (LOGGER.isErrorEnabled())
                    LOGGER.error(message, ae);
                listenerForwarder.failed(ae);
                throw ae;
            }
        }

        return events;

    } catch (Throwable t) {
        final ActionException ae = new ActionException(this, t.getMessage(), t);
        if (LOGGER.isErrorEnabled())
            LOGGER.error(ae.getLocalizedMessage(), ae);
        listenerForwarder.failed(ae); // fails the Action
        throw ae;
    }
}

From source file:cn.edu.bjtu.cit.recommender.Recommender.java

@SuppressWarnings("unchecked")
public int run(String[] args) throws Exception {
    if (args.length < 2) {
        System.err.println();//  w ww . ja v  a2s.  c  om
        System.err.println("Usage: " + this.getClass().getName()
                + " [generic options] input output [profiling] [estimation] [clustersize]");
        System.err.println();
        printUsage();
        GenericOptionsParser.printGenericCommandUsage(System.err);

        return 1;
    }
    OptionParser parser = new OptionParser(args);

    Pipeline pipeline = new MRPipeline(Recommender.class, getConf());

    if (parser.hasOption(CLUSTER_SIZE)) {
        pipeline.getConfiguration().setInt(ClusterOracle.CLUSTER_SIZE,
                Integer.parseInt(parser.getOption(CLUSTER_SIZE).getValue()));
    }

    if (parser.hasOption(PROFILING)) {
        pipeline.getConfiguration().setBoolean(Profiler.IS_PROFILE, true);
        this.profileFilePath = parser.getOption(PROFILING).getValue();

    }

    if (parser.hasOption(ESTIMATION)) {
        estFile = parser.getOption(ESTIMATION).getValue();
        est = new Estimator(estFile, clusterSize);
    }

    if (parser.hasOption(OPT_REDUCE)) {
        pipeline.getConfiguration().setBoolean(OPT_REDUCE, true);
    }

    if (parser.hasOption(OPT_MSCR)) {
        pipeline.getConfiguration().setBoolean(OPT_MSCR, true);
    }

    if (parser.hasOption(ACTIVE_THRESHOLD)) {
        threshold = Integer.parseInt(parser.getOption("at").getValue());
    }

    if (parser.hasOption(TOP)) {
        top = Integer.parseInt(parser.getOption("top").getValue());
    }

    profiler = new Profiler(pipeline);
    /*
     * input node
     */
    PCollection<String> lines = pipeline.readTextFile(args[0]);

    if (profiler.isProfiling() && lines.getSize() > 10 * 1024 * 1024) {
        lines = lines.sample(0.1);
    }

    /*
     * S0 + GBK
     */
    PGroupedTable<Long, Long> userWithPrefs = lines.parallelDo(new MapFn<String, Pair<Long, Long>>() {

        @Override
        public Pair<Long, Long> map(String input) {
            String[] split = input.split(Estimator.DELM);
            long userID = Long.parseLong(split[0]);
            long itemID = Long.parseLong(split[1]);
            return Pair.of(userID, itemID);
        }

        @Override
        public float scaleFactor() {
            return est.getScaleFactor("S0").sizeFactor;
        }

        @Override
        public float scaleFactorByRecord() {
            return est.getScaleFactor("S0").recsFactor;
        }
    }, Writables.tableOf(Writables.longs(), Writables.longs())).groupByKey(est.getClusterSize());

    /*
     * S1
     */
    PTable<Long, Vector> userVector = userWithPrefs
            .parallelDo(new MapFn<Pair<Long, Iterable<Long>>, Pair<Long, Vector>>() {
                @Override
                public Pair<Long, Vector> map(Pair<Long, Iterable<Long>> input) {
                    Vector userVector = new RandomAccessSparseVector(Integer.MAX_VALUE, 100);
                    for (long itemPref : input.second()) {
                        userVector.set((int) itemPref, 1.0f);
                    }
                    return Pair.of(input.first(), userVector);
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S1").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S1").recsFactor;
                }
            }, Writables.tableOf(Writables.longs(), Writables.vectors()));

    userVector = profiler.profile("S0-S1", pipeline, userVector, ProfileConverter.long_vector(),
            Writables.tableOf(Writables.longs(), Writables.vectors()));

    /*
     * S2
     */
    PTable<Long, Vector> filteredUserVector = userVector
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Long, Vector>>() {

                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Long, Vector>> emitter) {
                    if (input.second().getNumNondefaultElements() > threshold) {
                        emitter.emit(input);
                    }
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S2").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S2").recsFactor;
                }

            }, Writables.tableOf(Writables.longs(), Writables.vectors()));

    filteredUserVector = profiler.profile("S2", pipeline, filteredUserVector, ProfileConverter.long_vector(),
            Writables.tableOf(Writables.longs(), Writables.vectors()));

    /*
     * S3 + GBK
     */
    PGroupedTable<Integer, Integer> coOccurencePairs = filteredUserVector
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Integer, Integer>>() {
                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Integer, Integer>> emitter) {
                    Iterator<Vector.Element> it = input.second().iterateNonZero();
                    while (it.hasNext()) {
                        int index1 = it.next().index();
                        Iterator<Vector.Element> it2 = input.second().iterateNonZero();
                        while (it2.hasNext()) {
                            int index2 = it2.next().index();
                            emitter.emit(Pair.of(index1, index2));
                        }
                    }
                }

                @Override
                public float scaleFactor() {
                    float size = est.getScaleFactor("S3").sizeFactor;
                    return size;
                }

                @Override
                public float scaleFactorByRecord() {
                    float recs = est.getScaleFactor("S3").recsFactor;
                    return recs;
                }
            }, Writables.tableOf(Writables.ints(), Writables.ints())).groupByKey(est.getClusterSize());

    /*
     * S4
     */
    PTable<Integer, Vector> coOccurenceVector = coOccurencePairs
            .parallelDo(new MapFn<Pair<Integer, Iterable<Integer>>, Pair<Integer, Vector>>() {
                @Override
                public Pair<Integer, Vector> map(Pair<Integer, Iterable<Integer>> input) {
                    Vector cooccurrenceRow = new RandomAccessSparseVector(Integer.MAX_VALUE, 100);
                    for (int itemIndex2 : input.second()) {
                        cooccurrenceRow.set(itemIndex2, cooccurrenceRow.get(itemIndex2) + 1.0);
                    }
                    return Pair.of(input.first(), cooccurrenceRow);
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S4").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S4").recsFactor;
                }
            }, Writables.tableOf(Writables.ints(), Writables.vectors()));

    coOccurenceVector = profiler.profile("S3-S4", pipeline, coOccurenceVector, ProfileConverter.int_vector(),
            Writables.tableOf(Writables.ints(), Writables.vectors()));

    /*
     * S5 Wrapping co-occurrence columns
     */
    PTable<Integer, VectorOrPref> wrappedCooccurrence = coOccurenceVector
            .parallelDo(new MapFn<Pair<Integer, Vector>, Pair<Integer, VectorOrPref>>() {

                @Override
                public Pair<Integer, VectorOrPref> map(Pair<Integer, Vector> input) {
                    return Pair.of(input.first(), new VectorOrPref(input.second()));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S5").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S5").recsFactor;
                }

            }, Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    wrappedCooccurrence = profiler.profile("S5", pipeline, wrappedCooccurrence, ProfileConverter.int_vopv(),
            Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    /*
     * S6 Splitting user vectors
     */
    PTable<Integer, VectorOrPref> userVectorSplit = filteredUserVector
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Integer, VectorOrPref>>() {

                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Integer, VectorOrPref>> emitter) {
                    long userID = input.first();
                    Vector userVector = input.second();
                    Iterator<Vector.Element> it = userVector.iterateNonZero();
                    while (it.hasNext()) {
                        Vector.Element e = it.next();
                        int itemIndex = e.index();
                        float preferenceValue = (float) e.get();
                        emitter.emit(Pair.of(itemIndex, new VectorOrPref(userID, preferenceValue)));
                    }
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S6").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S6").recsFactor;
                }
            }, Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    userVectorSplit = profiler.profile("S6", pipeline, userVectorSplit, ProfileConverter.int_vopp(),
            Writables.tableOf(Writables.ints(), VectorOrPref.vectorOrPrefs()));

    /*
     * S7 Combine VectorOrPrefs
     */
    PTable<Integer, VectorAndPrefs> combinedVectorOrPref = wrappedCooccurrence.union(userVectorSplit)
            .groupByKey(est.getClusterSize())
            .parallelDo(new DoFn<Pair<Integer, Iterable<VectorOrPref>>, Pair<Integer, VectorAndPrefs>>() {

                @Override
                public void process(Pair<Integer, Iterable<VectorOrPref>> input,
                        Emitter<Pair<Integer, VectorAndPrefs>> emitter) {
                    Vector vector = null;
                    List<Long> userIDs = Lists.newArrayList();
                    List<Float> values = Lists.newArrayList();
                    for (VectorOrPref vop : input.second()) {
                        if (vector == null) {
                            vector = vop.getVector();
                        }
                        long userID = vop.getUserID();
                        if (userID != Long.MIN_VALUE) {
                            userIDs.add(vop.getUserID());
                        }
                        float value = vop.getValue();
                        if (!Float.isNaN(value)) {
                            values.add(vop.getValue());
                        }
                    }
                    emitter.emit(Pair.of(input.first(), new VectorAndPrefs(vector, userIDs, values)));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S7").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S7").recsFactor;
                }
            }, Writables.tableOf(Writables.ints(), VectorAndPrefs.vectorAndPrefs()));

    combinedVectorOrPref = profiler.profile("S5+S6-S7", pipeline, combinedVectorOrPref,
            ProfileConverter.int_vap(), Writables.tableOf(Writables.ints(), VectorAndPrefs.vectorAndPrefs()));
    /*
     * S8 Computing partial recommendation vectors
     */
    PTable<Long, Vector> partialMultiply = combinedVectorOrPref
            .parallelDo(new DoFn<Pair<Integer, VectorAndPrefs>, Pair<Long, Vector>>() {
                @Override
                public void process(Pair<Integer, VectorAndPrefs> input, Emitter<Pair<Long, Vector>> emitter) {
                    Vector cooccurrenceColumn = input.second().getVector();
                    List<Long> userIDs = input.second().getUserIDs();
                    List<Float> prefValues = input.second().getValues();
                    for (int i = 0; i < userIDs.size(); i++) {
                        long userID = userIDs.get(i);
                        if (userID != Long.MIN_VALUE) {
                            float prefValue = prefValues.get(i);
                            Vector partialProduct = cooccurrenceColumn.times(prefValue);
                            emitter.emit(Pair.of(userID, partialProduct));
                        }
                    }
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S8").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S8").recsFactor;
                }

            }, Writables.tableOf(Writables.longs(), Writables.vectors())).groupByKey(est.getClusterSize())
            .combineValues(new CombineFn<Long, Vector>() {

                @Override
                public void process(Pair<Long, Iterable<Vector>> input, Emitter<Pair<Long, Vector>> emitter) {
                    Vector partial = null;
                    for (Vector vector : input.second()) {
                        partial = partial == null ? vector : partial.plus(vector);
                    }
                    emitter.emit(Pair.of(input.first(), partial));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("combine").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("combine").recsFactor;
                }
            });

    partialMultiply = profiler.profile("S8-combine", pipeline, partialMultiply, ProfileConverter.long_vector(),
            Writables.tableOf(Writables.longs(), Writables.vectors()));

    /*
     * S9 Producing recommendations from vectors
     */
    PTable<Long, RecommendedItems> recommendedItems = partialMultiply
            .parallelDo(new DoFn<Pair<Long, Vector>, Pair<Long, RecommendedItems>>() {

                @Override
                public void process(Pair<Long, Vector> input, Emitter<Pair<Long, RecommendedItems>> emitter) {
                    Queue<RecommendedItem> topItems = new PriorityQueue<RecommendedItem>(11,
                            Collections.reverseOrder(BY_PREFERENCE_VALUE));
                    Iterator<Vector.Element> recommendationVectorIterator = input.second().iterateNonZero();
                    while (recommendationVectorIterator.hasNext()) {
                        Vector.Element element = recommendationVectorIterator.next();
                        int index = element.index();
                        float value = (float) element.get();
                        if (topItems.size() < top) {
                            topItems.add(new GenericRecommendedItem(index, value));
                        } else if (value > topItems.peek().getValue()) {
                            topItems.add(new GenericRecommendedItem(index, value));
                            topItems.poll();
                        }
                    }
                    List<RecommendedItem> recommendations = new ArrayList<RecommendedItem>(topItems.size());
                    recommendations.addAll(topItems);
                    Collections.sort(recommendations, BY_PREFERENCE_VALUE);
                    emitter.emit(Pair.of(input.first(), new RecommendedItems(recommendations)));
                }

                @Override
                public float scaleFactor() {
                    return est.getScaleFactor("S9").sizeFactor;
                }

                @Override
                public float scaleFactorByRecord() {
                    return est.getScaleFactor("S9").recsFactor;
                }

            }, Writables.tableOf(Writables.longs(), RecommendedItems.recommendedItems()));

    recommendedItems = profiler.profile("S9", pipeline, recommendedItems, ProfileConverter.long_ri(),
            Writables.tableOf(Writables.longs(), RecommendedItems.recommendedItems()));

    /*
     * Profiling
     */
    if (profiler.isProfiling()) {
        profiler.writeResultToFile(profileFilePath);
        profiler.cleanup(pipeline.getConfiguration());
        return 0;
    }
    /*
     * asText
     */
    pipeline.writeTextFile(recommendedItems, args[1]);
    PipelineResult result = pipeline.done();
    return result.succeeded() ? 0 : 1;
}

From source file:com.intel.ssg.dcst.panthera.parse.SkinDriver.java

private CommandProcessorResponse runInternal(String command) throws CommandNeedRetryException {
    errorMessage = null;//from  ww w  . jav a2  s. com
    SQLState = null;
    downstreamError = null;

    if (!validateConfVariables()) {
        return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    HiveDriverRunHookContext hookContext = new HiveDriverRunHookContextImpl(conf, command);
    // Get all the driver run hooks and pre-execute them.
    List<HiveDriverRunHook> driverRunHooks;
    try {
        driverRunHooks = getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, HiveDriverRunHook.class);
        for (HiveDriverRunHook driverRunHook : driverRunHooks) {
            driverRunHook.preDriverRun(hookContext);
        }
    } catch (Exception e) {
        errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
        SQLState = ErrorMsg.findSQLState(e.getMessage());
        downstreamError = e;
        console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    // Reset the perf logger
    PerfLogger perfLogger = PerfLogger.getPerfLogger(true);
    perfLogger.PerfLogBegin(LOG, PerfLogger.DRIVER_RUN);
    perfLogger.PerfLogBegin(LOG, PerfLogger.TIME_TO_SUBMIT);

    int ret;
    synchronized (compileMonitor) {
        ret = compile(command);
    }
    if (ret != 0) {
        releaseLocks(ctx.getHiveLocks());
        return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }

    boolean requireLock = false;
    boolean ckLock = checkLockManager();

    if (ckLock) {
        boolean lockOnlyMapred = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY);
        if (lockOnlyMapred) {
            Queue<Task<? extends Serializable>> taskQueue = new LinkedList<Task<? extends Serializable>>();
            taskQueue.addAll(plan.getRootTasks());
            while (taskQueue.peek() != null) {
                Task<? extends Serializable> tsk = taskQueue.remove();
                requireLock = requireLock || tsk.requireLock();
                if (requireLock) {
                    break;
                }
                if (tsk instanceof ConditionalTask) {
                    taskQueue.addAll(((ConditionalTask) tsk).getListTasks());
                }
                if (tsk.getChildTasks() != null) {
                    taskQueue.addAll(tsk.getChildTasks());
                }
                // does not add back up task here, because back up task should be the same
                // type of the original task.
            }
        } else {
            requireLock = true;
        }
    }

    if (requireLock) {
        ret = acquireReadWriteLocks();
        if (ret != 0) {
            releaseLocks(ctx.getHiveLocks());
            return new CommandProcessorResponse(ret, errorMessage, SQLState);
        }
    }

    ret = execute();
    if (ret != 0) {
        //if needRequireLock is false, the release here will do nothing because there is no lock
        releaseLocks(ctx.getHiveLocks());
        return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }

    //if needRequireLock is false, the release here will do nothing because there is no lock
    releaseLocks(ctx.getHiveLocks());

    perfLogger.PerfLogEnd(LOG, PerfLogger.DRIVER_RUN);
    perfLogger.close(LOG, plan);

    // Take all the driver run hooks and post-execute them.
    try {
        for (HiveDriverRunHook driverRunHook : driverRunHooks) {
            driverRunHook.postDriverRun(hookContext);
        }
    } catch (Exception e) {
        errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
        SQLState = ErrorMsg.findSQLState(e.getMessage());
        downstreamError = e;
        console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    return new CommandProcessorResponse(ret);
}

From source file:com.stehno.sanctuary.core.archive.DefaultFileArchiver.java

@Override
public MessageSet archiveChanges(final ChangeSet changeSet) {
    if (log.isDebugEnabled())
        log.debug("Archiving changes for " + changeSet.getRootDirectory() + "...");

    final MessageSet messageSet = new MessageSet(changeSet.getRootDirectory().getPath());

    final Queue<Future> futures = new LinkedList<Future>();

    int count = 0;
    for (final File file : changeSet.listFiles(FileStatus.NEW)) {
        futures.add(executor.submit(new Runnable() {
            @Override/*  ww  w  . j  a  v  a2  s . c o  m*/
            public void run() {
                try {
                    remoteStore.addFile(changeSet.getRootDirectory(), file);
                    localStore.storeFile(file);
                    messageSet.addMessage(file.getPath(), "Added successfully");

                } catch (Exception ex) {
                    messageSet.addError(file.getPath(), "Add failed: " + ex.getMessage());
                }
            }
        }));
        count++;
    }
    if (log.isDebugEnabled())
        log.debug("Scheduled Adds: " + count);

    count = 0;
    for (final File file : changeSet.listFiles(FileStatus.MODIFIED)) {
        futures.add(executor.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    remoteStore.updateFile(changeSet.getRootDirectory(), file);
                    localStore.storeFile(file);
                    messageSet.addMessage(file.getPath(), "Updated successfully");

                } catch (Exception ex) {
                    messageSet.addError(file.getPath(), "Update failed: " + ex.getMessage());
                }

            }
        }));
        count++;
    }
    if (log.isDebugEnabled())
        log.debug("Scheduled Updates: " + count);

    count = 0;
    for (final File file : changeSet.listFiles(FileStatus.DELETED)) {
        futures.add(executor.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    remoteStore.deleteFile(changeSet.getRootDirectory(), file);
                    localStore.removeFile(file);
                    messageSet.addMessage(file.getPath(), "Deleted successfully");

                } catch (Exception ex) {
                    messageSet.addError(file.getPath(), "Delete failed: " + ex.getMessage());
                }

            }
        }));
        count++;
    }
    if (log.isDebugEnabled())
        log.debug("Scheduled Deletes: " + count);

    do {
        while (!futures.isEmpty()) {
            if (futures.peek().isDone()) {
                futures.poll();
            }
        }

        if (collectionWaitTime > 0) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException ie) {
            }
        }

    } while (!futures.isEmpty());

    return messageSet;
}

From source file:com.intel.ssg.dcst.panthera.parse.SkinDriver.java

public int execute() throws CommandNeedRetryException {
    PerfLogger perfLogger = PerfLogger.getPerfLogger();
    perfLogger.PerfLogBegin(LOG, PerfLogger.DRIVER_EXECUTE);

    boolean noName = StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HADOOPJOBNAME));
    int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);

    String queryId = plan.getQueryId();
    String queryStr = plan.getQueryStr();

    conf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId);
    conf.setVar(HiveConf.ConfVars.HIVEQUERYSTRING, queryStr);

    conf.set("mapreduce.workflow.id", "hive_" + queryId);
    conf.set("mapreduce.workflow.name", queryStr);

    maxthreads = HiveConf.getIntVar(conf, HiveConf.ConfVars.EXECPARALLETHREADNUMBER);

    try {/*from w w w .j  a v  a 2 s  .  c om*/
        LOG.info("Starting command: " + queryStr);

        plan.setStarted();

        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().startQuery(queryStr,
                    conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
            SessionState.get().getHiveHistory().logPlanProgress(plan);
        }
        resStream = null;

        HookContext hookContext = new HookContext(plan, conf, ctx.getPathToCS());
        hookContext.setHookType(HookContext.HookType.PRE_EXEC_HOOK);

        for (Hook peh : getHooks(HiveConf.ConfVars.PREEXECHOOKS)) {
            if (peh instanceof ExecuteWithHookContext) {
                perfLogger.PerfLogBegin(LOG, PerfLogger.PRE_HOOK + peh.getClass().getName());

                ((ExecuteWithHookContext) peh).run(hookContext);

                perfLogger.PerfLogEnd(LOG, PerfLogger.PRE_HOOK + peh.getClass().getName());
            } else if (peh instanceof PreExecute) {
                perfLogger.PerfLogBegin(LOG, PerfLogger.PRE_HOOK + peh.getClass().getName());

                ((PreExecute) peh).run(SessionState.get(), plan.getInputs(), plan.getOutputs(),
                        ShimLoader.getHadoopShims().getUGIForConf(conf));

                perfLogger.PerfLogEnd(LOG, PerfLogger.PRE_HOOK + peh.getClass().getName());
            }
        }

        int jobs = Utilities.getMRTasks(plan.getRootTasks()).size();
        if (jobs > 0) {
            console.printInfo("Total MapReduce jobs = " + jobs);
        }
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_NUM_TASKS,
                    String.valueOf(jobs));
            SessionState.get().getHiveHistory().setIdToTableMap(plan.getIdToTableNameMap());
        }
        String jobname = Utilities.abbreviate(queryStr, maxlen - 6);

        // A runtime that launches runnable tasks as separate Threads through
        // TaskRunners
        // As soon as a task isRunnable, it is put in a queue
        // At any time, at most maxthreads tasks can be running
        // The main thread polls the TaskRunners to check if they have finished.

        Queue<Task<? extends Serializable>> runnable = new ConcurrentLinkedQueue<Task<? extends Serializable>>();
        Map<TaskResult, TaskRunner> running = new HashMap<TaskResult, TaskRunner>();

        DriverContext driverCxt = new DriverContext(runnable, ctx);
        ctx.setHDFSCleanup(true);

        SessionState.get().setLastMapRedStatsList(new ArrayList<MapRedStats>());
        SessionState.get().setStackTraces(new HashMap<String, List<List<String>>>());
        SessionState.get().setLocalMapRedErrors(new HashMap<String, List<String>>());

        // Add root Tasks to runnable
        for (Task<? extends Serializable> tsk : plan.getRootTasks()) {
            // This should never happen, if it does, it's a bug with the potential to produce
            // incorrect results.
            assert tsk.getParentTasks() == null || tsk.getParentTasks().isEmpty();
            driverCxt.addToRunnable(tsk);
        }

        perfLogger.PerfLogEnd(LOG, PerfLogger.TIME_TO_SUBMIT);
        perfLogger.PerfLogBegin(LOG, PerfLogger.RUN_TASKS);
        // Loop while you either have tasks running, or tasks queued up
        while (running.size() != 0 || runnable.peek() != null) {
            // Launch upto maxthreads tasks
            while (runnable.peek() != null && running.size() < maxthreads) {
                Task<? extends Serializable> tsk = runnable.remove();
                perfLogger.PerfLogBegin(LOG, PerfLogger.TASK + tsk.getName() + "." + tsk.getId());
                launchTask(tsk, queryId, noName, running, jobname, jobs, driverCxt);
            }

            // poll the Tasks to see which one completed
            TaskResult tskRes = pollTasks(running.keySet());
            TaskRunner tskRun = running.remove(tskRes);
            Task<? extends Serializable> tsk = tskRun.getTask();
            perfLogger.PerfLogEnd(LOG, PerfLogger.TASK + tsk.getName() + "." + tsk.getId());
            hookContext.addCompleteTask(tskRun);

            int exitVal = tskRes.getExitVal();
            if (exitVal != 0) {
                if (tsk.ifRetryCmdWhenFail()) {
                    if (!running.isEmpty()) {
                        taskCleanup(running);
                    }
                    // in case we decided to run everything in local mode, restore the
                    // the jobtracker setting to its initial value
                    ctx.restoreOriginalTracker();
                    throw new CommandNeedRetryException();
                }
                Task<? extends Serializable> backupTask = tsk.getAndInitBackupTask();
                if (backupTask != null) {
                    setErrorMsgAndDetail(exitVal, tskRes.getTaskError(), tsk);
                    console.printError(errorMessage);
                    errorMessage = "ATTEMPT: Execute BackupTask: " + backupTask.getClass().getName();
                    console.printError(errorMessage);

                    // add backup task to runnable
                    if (DriverContext.isLaunchable(backupTask)) {
                        driverCxt.addToRunnable(backupTask);
                    }
                    continue;

                } else {
                    hookContext.setHookType(HookContext.HookType.ON_FAILURE_HOOK);
                    // Get all the failure execution hooks and execute them.
                    for (Hook ofh : getHooks(HiveConf.ConfVars.ONFAILUREHOOKS)) {
                        perfLogger.PerfLogBegin(LOG, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());

                        ((ExecuteWithHookContext) ofh).run(hookContext);

                        perfLogger.PerfLogEnd(LOG, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());
                    }
                    setErrorMsgAndDetail(exitVal, tskRes.getTaskError(), tsk);
                    SQLState = "08S01";
                    console.printError(errorMessage);
                    if (!running.isEmpty()) {
                        taskCleanup(running);
                    }
                    // in case we decided to run everything in local mode, restore the
                    // the jobtracker setting to its initial value
                    ctx.restoreOriginalTracker();
                    return exitVal;
                }
            }

            if (SessionState.get() != null) {
                SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(), Keys.TASK_RET_CODE,
                        String.valueOf(exitVal));
                SessionState.get().getHiveHistory().endTask(queryId, tsk);
            }

            if (tsk.getChildTasks() != null) {
                for (Task<? extends Serializable> child : tsk.getChildTasks()) {
                    if (DriverContext.isLaunchable(child)) {
                        driverCxt.addToRunnable(child);
                    }
                }
            }
        }
        perfLogger.PerfLogEnd(LOG, PerfLogger.RUN_TASKS);

        // in case we decided to run everything in local mode, restore the
        // the jobtracker setting to its initial value
        ctx.restoreOriginalTracker();

        // remove incomplete outputs.
        // Some incomplete outputs may be added at the beginning, for eg: for dynamic partitions.
        // remove them
        HashSet<WriteEntity> remOutputs = new HashSet<WriteEntity>();
        for (WriteEntity output : plan.getOutputs()) {
            if (!output.isComplete()) {
                remOutputs.add(output);
            }
        }

        for (WriteEntity output : remOutputs) {
            plan.getOutputs().remove(output);
        }

        hookContext.setHookType(HookContext.HookType.POST_EXEC_HOOK);
        // Get all the post execution hooks and execute them.
        for (Hook peh : getHooks(HiveConf.ConfVars.POSTEXECHOOKS)) {
            if (peh instanceof ExecuteWithHookContext) {
                perfLogger.PerfLogBegin(LOG, PerfLogger.POST_HOOK + peh.getClass().getName());

                ((ExecuteWithHookContext) peh).run(hookContext);

                perfLogger.PerfLogEnd(LOG, PerfLogger.POST_HOOK + peh.getClass().getName());
            } else if (peh instanceof PostExecute) {
                perfLogger.PerfLogBegin(LOG, PerfLogger.POST_HOOK + peh.getClass().getName());

                ((PostExecute) peh).run(SessionState.get(), plan.getInputs(), plan.getOutputs(),
                        (SessionState.get() != null ? SessionState.get().getLineageState().getLineageInfo()
                                : null),
                        ShimLoader.getHadoopShims().getUGIForConf(conf));

                perfLogger.PerfLogEnd(LOG, PerfLogger.POST_HOOK + peh.getClass().getName());
            }
        }

        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
                    String.valueOf(0));
            SessionState.get().getHiveHistory().printRowCount(queryId);
        }
    } catch (CommandNeedRetryException e) {
        throw e;
    } catch (Exception e) {
        ctx.restoreOriginalTracker();
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
                    String.valueOf(12));
        }
        // TODO: do better with handling types of Exception here
        errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
        SQLState = "08S01";
        downstreamError = e;
        console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return (12);
    } finally {
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().endQuery(queryId);
        }
        if (noName) {
            conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, "");
        }
        perfLogger.PerfLogEnd(LOG, PerfLogger.DRIVER_EXECUTE);

        if (SessionState.get().getLastMapRedStatsList() != null
                && SessionState.get().getLastMapRedStatsList().size() > 0) {
            long totalCpu = 0;
            console.printInfo("MapReduce Jobs Launched: ");
            for (int i = 0; i < SessionState.get().getLastMapRedStatsList().size(); i++) {
                console.printInfo("Job " + i + ": " + SessionState.get().getLastMapRedStatsList().get(i));
                totalCpu += SessionState.get().getLastMapRedStatsList().get(i).getCpuMSec();
            }
            console.printInfo("Total MapReduce CPU Time Spent: " + Utilities.formatMsecToStr(totalCpu));
        }
    }
    plan.setDone();

    if (SessionState.get() != null) {
        try {
            SessionState.get().getHiveHistory().logPlanProgress(plan);
        } catch (Exception e) {
        }
    }
    console.printInfo("OK");

    return (0);
}

From source file:org.apache.bookkeeper.client.BookieWriteLedgerTest.java

/**
 * LedgerHandleAdv out of order writers with ensemble changes.
 * Verify that entry that was written to old ensemble will be
 * written to new enseble too after ensemble change.
 *
 * @throws Exception//  w ww. jav a2s. c  om
 */
@Test
public void testLedgerHandleAdvOutOfOrderWriteAndFrocedEnsembleChange() throws Exception {
    // Create a ledger
    long ledgerId = 0xABCDEF;
    SyncObj syncObj1 = new SyncObj();
    ByteBuffer entry;
    lh = bkc.createLedgerAdv(ledgerId, 3, 3, 3, digestType, ledgerPassword, null);
    entry = ByteBuffer.allocate(4);
    // Add entries 0-4
    for (int i = 0; i < 5; i++) {
        entry.rewind();
        entry.putInt(rng.nextInt(maxInt));
        lh.addEntry(i, entry.array());
    }

    // Add 10 as Async Entry, which goes to first ensemble
    ByteBuffer entry1 = ByteBuffer.allocate(4);
    entry1.putInt(rng.nextInt(maxInt));
    lh.asyncAddEntry(10, entry1.array(), 0, entry1.capacity(), this, syncObj1);

    // Make sure entry-10 goes to the bookies and gets response.
    java.util.Queue<PendingAddOp> myPendingAddOps = Whitebox.getInternalState(lh, "pendingAddOps");
    PendingAddOp addOp = null;
    boolean pendingAddOpReceived = false;

    while (!pendingAddOpReceived) {
        addOp = myPendingAddOps.peek();
        if (addOp.entryId == 10 && addOp.completed) {
            pendingAddOpReceived = true;
        } else {
            Thread.sleep(1000);
        }
    }

    CountDownLatch sleepLatch1 = new CountDownLatch(1);
    List<BookieSocketAddress> ensemble;

    ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next().getValue();

    // Put all 3 bookies to sleep and start 3 new ones
    sleepBookie(ensemble.get(0), sleepLatch1);
    sleepBookie(ensemble.get(1), sleepLatch1);
    sleepBookie(ensemble.get(2), sleepLatch1);
    startNewBookie();
    startNewBookie();
    startNewBookie();

    // Original bookies are in sleep, new bookies added.
    // Now add entries 5-9 which forces ensemble changes
    // So at this point entries 0-4, 10 went to first
    // ensemble, 5-9 will go to new ensemble.
    for (int i = 5; i < 10; i++) {
        entry.rewind();
        entry.putInt(rng.nextInt(maxInt));
        lh.addEntry(i, entry.array());
    }

    // Wakeup all 3 bookies that went to sleep
    sleepLatch1.countDown();

    // Wait for all entries to be acknowledged for the first ledger
    synchronized (syncObj1) {
        while (syncObj1.counter < 1) {
            syncObj1.wait();
        }
        assertEquals(BKException.Code.OK, syncObj1.rc);
    }

    // Close write handle
    lh.close();

    // Open read handle
    lh = bkc.openLedger(ledgerId, digestType, ledgerPassword);

    // Make sure to read all 10 entries.
    for (int i = 0; i < 11; i++) {
        lh.readEntries(i, i);
    }
    lh.close();
    bkc.deleteLedger(ledgerId);
}

From source file:org.apache.hadoop.hdfs.notifier.server.TestServerHistory.java

@Test
public void testBasicQueueNotification() throws Exception {
    // Starting without a ramp-up phase
    DummyServerCore core = new DummyServerCore();
    ServerHistory history = new ServerHistory(core, false);
    long historyLength = 100;
    history.setHistoryLength(historyLength);
    Queue<NamespaceNotification> historyNotifications;

    new Thread(history).start();

    // Step 1 - test with FILE_ADDED
    history.storeNotification(new NamespaceNotification("/a/b", EventType.FILE_ADDED.getByteValue(), 10));
    history.storeNotification(new NamespaceNotification("/a/c", EventType.FILE_ADDED.getByteValue(), 11));
    historyNotifications = new LinkedList<NamespaceNotification>();
    history.addNotificationsToQueue(new NamespaceEvent("/a", EventType.FILE_ADDED.getByteValue()), 10,
            historyNotifications);//  ww w .  ja v a  2s  .c om
    Assert.assertEquals(1, historyNotifications.size());
    Assert.assertEquals(11, historyNotifications.peek().txId);
    Assert.assertEquals("/a/c", historyNotifications.peek().path);

    // Step 2 - test with FILE_CLOSED
    history.storeNotification(new NamespaceNotification("/a/d", EventType.FILE_CLOSED.getByteValue(), 12));
    history.storeNotification(new NamespaceNotification("/a/e", EventType.FILE_CLOSED.getByteValue(), 13));
    historyNotifications = new LinkedList<NamespaceNotification>();
    history.addNotificationsToQueue(new NamespaceEvent("/a", EventType.FILE_CLOSED.getByteValue()), 12,
            historyNotifications);
    Assert.assertEquals(1, historyNotifications.size());
    Assert.assertEquals(13, historyNotifications.peek().txId);
    Assert.assertEquals("/a/e", historyNotifications.peek().path);

    // test the sub directories
    historyNotifications = new LinkedList<NamespaceNotification>();
    history.addNotificationsToQueue(new NamespaceEvent("/", EventType.FILE_ADDED.getByteValue()), 10,
            historyNotifications);
    Assert.assertEquals(1, historyNotifications.size());
    history.addNotificationsToQueue(new NamespaceEvent("/", EventType.FILE_CLOSED.getByteValue()), 10,
            historyNotifications);
    Assert.assertEquals(3, historyNotifications.size());

    core.shutdown();
}

From source file:org.apache.hadoop.hive.ql.Driver.java

private boolean requiresLock() {
    if (!checkConcurrency()) {
        return false;
    }//from  ww w  .ja v a2s  .  co m
    // Lock operations themselves don't require the lock.
    if (isExplicitLockOperation()) {
        return false;
    }
    if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_LOCK_MAPRED_ONLY)) {
        return true;
    }
    Queue<Task<? extends Serializable>> taskQueue = new LinkedList<Task<? extends Serializable>>();
    taskQueue.addAll(plan.getRootTasks());
    while (taskQueue.peek() != null) {
        Task<? extends Serializable> tsk = taskQueue.remove();
        if (tsk.requireLock()) {
            return true;
        }
        if (tsk instanceof ConditionalTask) {
            taskQueue.addAll(((ConditionalTask) tsk).getListTasks());
        }
        if (tsk.getChildTasks() != null) {
            taskQueue.addAll(tsk.getChildTasks());
        }
        // does not add back up task here, because back up task should be the same
        // type of the original task.
    }
    return false;
}