Example usage for java.io NotSerializableException NotSerializableException

List of usage examples for java.io NotSerializableException NotSerializableException

Introduction

In this page you can find the example usage for java.io NotSerializableException NotSerializableException.

Prototype

public NotSerializableException(String classname) 

Source Link

Document

Constructs a NotSerializableException object with message string.

Usage

From source file:eu.sathra.io.IO.java

@SuppressWarnings("unchecked")
public <T> T load(JSONObject jObj, Class<T> clazz) throws Exception {

    /*/*  w w w  .j a  v a 2  s . c  om*/
     * In case if parameter clazz is interface or has subclasses, it is
     * necessary to explicitly declare which class we want to deserialize.
     * It is done by declaring JSon element "class" with full classname as a
     * value. Example: "my_object": { "class":"eu.sathra.MyClass", ... }
     */
    if (jObj.has(CLASS_PARAM)) {
        try {
            clazz = (Class<T>) Class.forName(jObj.getString(CLASS_PARAM));
        } catch (ClassNotFoundException e) {
            Log.error("ClassNotFoundException: " + jObj.getString(CLASS_PARAM));
        } catch (ClassCastException e) {
            Log.error("ClassCastException: " + jObj.getString(CLASS_PARAM));
        }
    }

    SerializeInfo info = getSerializeInfo(clazz);

    if (info != null) {
        Object[] params = parseParams(info.types, info.params, info.defaults, jObj);

        return (T) info.constructor.newInstance(params);
    } else {
        // Try default constructor
        Constructor<T> defaultConstructor = clazz.getDeclaredConstructor();
        if (defaultConstructor != null) {
            return defaultConstructor.newInstance();
        }
    }

    throw new NotSerializableException("A class of type " + clazz.getCanonicalName() + " is not serializable.");
}

From source file:org.pssframework.cache.Cache.java

/**
 * Utility that check that an object is serializable.
 */// ww w .j a v a 2 s.  com
static void checkSerializable(Object value) {
    if (!(value instanceof Serializable))
        throw new IllegalStateException(
                "Cannot cache a non-serializable value of type " + value.getClass().getName(),
                new NotSerializableException(value.getClass().getName()));
}

From source file:org.pentaho.reporting.libraries.serializer.SerializerHelper.java

/**
 * Writes a serializable object description to the given object output stream. This method selects the best serialize
 * helper method for the given object.//from  ww w. j a  v  a  2  s  . co  m
 *
 * @param o   the to be serialized object.
 * @param out the outputstream that should receive the object.
 * @throws IOException if an I/O error occured.
 */
public synchronized void writeObject(final Object o, final ObjectOutputStream out) throws IOException {
    try {
        if (o == null) {
            out.writeByte(0);
            return;
        }
        if (o instanceof Serializable) {
            out.writeByte(1);
            out.writeObject(o);
            return;
        }

        final SerializeMethod m = getSerializer(o.getClass());
        if (m == null) {
            throw new NotSerializableException(o.getClass().getName());
        }
        out.writeByte(2);
        out.writeObject(m.getObjectClass());
        m.writeObject(o, out);
    } catch (NotSerializableException nse) {
        logger.warn("Unable to serialize object: " + o);
        throw nse;
    }
}

From source file:org.pentaho.reporting.libraries.serializer.SerializerHelper.java

/**
 * Reads the object from the object input stream. This object selects the best serializer to read the object.
 * <p/>//from   w  w  w . j  a  v  a2  s . c o m
 * Make sure, that you use the same configuration (library and class versions, registered methods in the
 * SerializerHelper) for reading as you used for writing.
 *
 * @param in the object input stream from where to read the serialized data.
 * @return the generated object.
 * @throws IOException            if reading the stream failed.
 * @throws ClassNotFoundException if serialized object class cannot be found.
 */
public synchronized Object readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
    final int type = in.readByte();
    if (type == 0) {
        return null;
    }
    if (type == 1) {
        return in.readObject();
    }
    final Class c = (Class) in.readObject();
    final SerializeMethod m = getSerializer(c);
    if (m == null) {
        throw new NotSerializableException(c.getName());
    }
    return m.readObject(in);
}

From source file:org.codice.admin.router.SparkServlet.java

private void writeObject(ObjectOutputStream stream) throws IOException {
    throw new NotSerializableException(getClass().getName());
}

From source file:org.apache.tinkerpop.gremlin.giraph.process.computer.GiraphGraphComputer.java

@Override
public int run(final String[] args) {
    final Storage storage = FileSystemStorage.open(this.giraphConfiguration);
    storage.rm(this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION));
    this.giraphConfiguration.setBoolean(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT_HAS_EDGES,
            this.persist.equals(Persist.EDGES));
    try {/*from  w w  w. j av a 2 s .co m*/
        // store vertex and edge filters (will propagate down to native InputFormat or else GiraphVertexInputFormat will process)
        final BaseConfiguration apacheConfiguration = new BaseConfiguration();
        apacheConfiguration.setDelimiterParsingDisabled(true);
        GraphFilterAware.storeGraphFilter(apacheConfiguration, this.giraphConfiguration, this.graphFilter);

        // it is possible to run graph computer without a vertex program (and thus, only map reduce jobs if they exist)
        if (null != this.vertexProgram) {
            // a way to verify in Giraph whether the traversal will go over the wire or not
            try {
                VertexProgram.createVertexProgram(this.hadoopGraph,
                        ConfUtil.makeApacheConfiguration(this.giraphConfiguration));
            } catch (final IllegalStateException e) {
                if (e.getCause() instanceof NumberFormatException)
                    throw new NotSerializableException(
                            "The provided traversal is not serializable and thus, can not be distributed across the cluster");
            }
            // remove historic combiners in configuration propagation (this occurs when job chaining)
            if (!this.vertexProgram.getMessageCombiner().isPresent())
                this.giraphConfiguration.unset(GiraphConstants.MESSAGE_COMBINER_CLASS.getKey());
            // split required workers across system (open map slots + max threads per machine = total amount of TinkerPop workers)
            if (!this.useWorkerThreadsInConfiguration) {
                final Cluster cluster = new Cluster(GiraphGraphComputer.this.giraphConfiguration);
                int totalMappers = cluster.getClusterStatus().getMapSlotCapacity() - 1; // 1 is needed for master
                cluster.close();
                if (this.workers <= totalMappers) {
                    this.giraphConfiguration.setWorkerConfiguration(this.workers, this.workers, 100.0F);
                    this.giraphConfiguration.setNumComputeThreads(1);
                } else {
                    if (totalMappers == 0)
                        totalMappers = 1; // happens in local mode
                    int threadsPerMapper = Long
                            .valueOf(Math.round((double) this.workers / (double) totalMappers)).intValue(); // TODO: need to find least common denominator
                    this.giraphConfiguration.setWorkerConfiguration(totalMappers, totalMappers, 100.0F);
                    this.giraphConfiguration.setNumComputeThreads(threadsPerMapper);
                }
            }
            // prepare the giraph vertex-centric computing job
            final GiraphJob job = new GiraphJob(this.giraphConfiguration,
                    Constants.GREMLIN_HADOOP_GIRAPH_JOB_PREFIX + this.vertexProgram);
            job.getInternalJob().setJarByClass(GiraphGraphComputer.class);
            this.logger.info(Constants.GREMLIN_HADOOP_GIRAPH_JOB_PREFIX + this.vertexProgram);
            // handle input paths (if any)
            String inputLocation = this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION, null);
            if (null != inputLocation && FileInputFormat.class.isAssignableFrom(this.giraphConfiguration
                    .getClass(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT, InputFormat.class))) {
                inputLocation = Constants.getSearchGraphLocation(inputLocation, storage)
                        .orElse(this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION));
                FileInputFormat.setInputPaths(job.getInternalJob(), new Path(inputLocation));
            }
            // handle output paths (if any)
            String outputLocation = this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
                    null);
            if (null != outputLocation && FileOutputFormat.class.isAssignableFrom(this.giraphConfiguration
                    .getClass(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT, OutputFormat.class))) {
                outputLocation = Constants.getGraphLocation(
                        this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION));
                FileOutputFormat.setOutputPath(job.getInternalJob(), new Path(outputLocation));
            }
            // execute the job and wait until it completes (if it fails, throw an exception)
            if (!job.run(true))
                throw new IllegalStateException(
                        "The GiraphGraphComputer job failed -- aborting all subsequent MapReduce jobs: "
                                + job.getInternalJob().getStatus().getFailureInfo());
            // add vertex program memory values to the return memory
            for (final MemoryComputeKey memoryComputeKey : this.vertexProgram.getMemoryComputeKeys()) {
                if (!memoryComputeKey.isTransient() && storage.exists(Constants.getMemoryLocation(
                        this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION),
                        memoryComputeKey.getKey()))) {
                    final ObjectWritableIterator iterator = new ObjectWritableIterator(this.giraphConfiguration,
                            new Path(Constants.getMemoryLocation(
                                    this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION),
                                    memoryComputeKey.getKey())));
                    if (iterator.hasNext()) {
                        this.memory.set(memoryComputeKey.getKey(), iterator.next().getValue());
                    }
                    // vertex program memory items are not stored on disk
                    storage.rm(Constants.getMemoryLocation(
                            this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION),
                            memoryComputeKey.getKey()));
                }
            }
            final Path path = new Path(Constants.getMemoryLocation(
                    this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION),
                    Constants.HIDDEN_ITERATION));
            this.memory.setIteration(
                    (Integer) new ObjectWritableIterator(this.giraphConfiguration, path).next().getValue());
            storage.rm(Constants.getMemoryLocation(
                    this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION),
                    Constants.HIDDEN_ITERATION));
        }
        // do map reduce jobs
        this.giraphConfiguration.setBoolean(Constants.GREMLIN_HADOOP_GRAPH_INPUT_FORMAT_HAS_EDGES,
                this.giraphConfiguration.getBoolean(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT_HAS_EDGES,
                        true));
        for (final MapReduce mapReduce : this.mapReducers) {
            this.memory.addMapReduceMemoryKey(mapReduce);
            MapReduceHelper.executeMapReduceJob(mapReduce, this.memory, this.giraphConfiguration);
        }

        // if no persistence, delete the graph and memory output
        if (this.persist.equals(Persist.NOTHING))
            storage.rm(this.giraphConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION));
    } catch (final Exception e) {
        throw new IllegalStateException(e.getMessage(), e);
    }
    return 0;
}

From source file:org.codice.admin.router.SparkServlet.java

private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException {
    throw new NotSerializableException(getClass().getName());
}

From source file:com.healthmarketscience.rmiio.DirectRemoteInputStream.java

/**
 * Serializes this object and all of the underlying stream's data directly
 * to the given ObjectOutputStream./*w ww. j  a v  a 2  s.c  om*/
 * 
 * @serialData the compression status of the stream, followed by the default
 *             chunk size for the serialized stream data (int), followed by
 *             chunks of the underlying stream. each chunk has a chunk code
 *             which indicates how to handle it's length (either default,
 *             explicit as int, or EOF), and then the specified number of
 *             bytes if not EOF.
 */
private void writeObject(ObjectOutputStream out) throws IOException {
    switch (_consumptionState) {
    case NONE:
        // this is the required state
        break;
    case LOCAL:
    case SERIAL:
        throw new NotSerializableException(getClass().getName()
                + " (underlying stream has already been consumed, type: " + _consumptionState + ")");
    default:
        throw new RuntimeException("unknown state " + _consumptionState);
    }

    out.defaultWriteObject();

    // once we start consuming the inputstream, we can't rewrite it
    _consumptionState = ConsumptionState.SERIAL;

    final int defaultChunkSize = RemoteInputStreamServer.DEFAULT_CHUNK_SIZE;

    // note, we create RemoteInputStreamServer instances, but we do not
    // actually export them.
    RemoteInputStreamServer server = null;
    try {
        if (_compress && (_tmpFile == null)) {
            // this is the first time the data is being read, and we need to
            // compress it as we read it.
            server = new GZIPRemoteInputStream(_in, _monitor, defaultChunkSize);
        } else {
            // we are re-serializing a previously serialized stream, so the data
            // is already compressed (if compression was desired)
            server = new SimpleRemoteInputStream(_in, _monitor, defaultChunkSize);
        }

        // record the default chunk size
        out.writeInt(defaultChunkSize);

        int packetId = RemoteStreamServer.INITIAL_VALID_SEQUENCE_ID;
        while (true) {

            byte[] packet = server.readPacket(packetId++);

            if (packet != null) {
                if (packet.length > 0) {
                    // we have a packet with data, write it to the output stream. if
                    // the packet is a different length, record the length.
                    if (packet.length == defaultChunkSize) {
                        out.write(DEFAULT_CHUNK_CODE);
                    } else {
                        out.write(CUSTOM_CHUNK_CODE);
                        out.writeInt(packet.length);
                    }
                    out.write(packet);
                }
            } else {
                // reached end of stream, indicate this
                out.write(EOF_CODE);
                break;
            }

        }

        // local stream is exhausted
        _gotEOF = true;

        // indicate successful read
        try {
            server.close(true);
        } catch (IOException e) {
            // log, but ignore failures here
            if (LOG.isDebugEnabled()) {
                LOG.debug("Failed closing server", e);
            }
        }

    } finally {
        RmiioUtil.closeQuietly(server);
        RmiioUtil.closeQuietly(this);
    }
}

From source file:com.healthmarketscience.rmiio.RemoteStreamServer.java

/**
 * Manages serialization for all remote stream instances by returning the
 * result of a call to {@link #export} on this instance as a Serializable
 * replacement for an instance of this class. While generally the developer
 * should be managing the call to export, implementing this method in a
 * useful way makes the simple things simple (passing a reference to a
 * server implementation in a remote method call will "do the right thing",
 * replacing the actual reference to this instance with a reference to an
 * automagically generated remote reference to this server instance).
 * /*from  www  .  ja v  a2s.c o m*/
 * @return an exported remote stub for this instance
 * @throws NotSerializableException
 *            if the export attempt fails
 * @serialData the serialized data is the object returned by the {@link #export} method
 */
protected final Object writeReplace() throws ObjectStreamException {
    // note, we only want to do implicit export once. it's possible that a
    // remote invocation failed and needs to be re-attempted, in which case we
    // don't want to re-export this instance, cause that will fail.
    StreamType replacement = _writeReplacement;
    if (replacement == null) {
        try {
            replacement = export();
            _writeReplacement = replacement;
        } catch (RemoteException e) {
            throw (NotSerializableException) (new NotSerializableException(
                    getClass().getName() + ": Could not export stream server")).initCause(e);
        }
    }
    return replacement;
}