Example usage for com.google.gson JsonObject addProperty

List of usage examples for com.google.gson JsonObject addProperty

Introduction

In this page you can find the example usage for com.google.gson JsonObject addProperty.

Prototype

public void addProperty(String property, Character value) 

Source Link

Document

Convenience method to add a char member.

Usage

From source file:ch.ethz.coss.nervous.pulse.WriteJSON.java

License:Open Source License

public static void sendGeoJSON(Socket socket, Object o) {

    try {/* ww  w.  j a v a2  s.c o  m*/
        Scanner in = new Scanner(socket.getInputStream());

        while (!in.nextLine().isEmpty())
            ;

        PrintWriter out = new PrintWriter(socket.getOutputStream());

        Visual reading = (Visual) o;

        JsonObject feature = new JsonObject();
        try {
            feature.addProperty("type", "Feature");
            // JsonArray featureList = new JsonArray();
            // iterate through your list
            // for (ListElement obj : list) {
            // {"geometry": {"type": "Point", "coordinates": [-94.149,
            // 36.33]}
            JsonObject point = new JsonObject();
            point.addProperty("type", "Point");
            // construct a JSONArray from a string; can also use an array or
            // list
            JsonArray coord = new JsonArray();
            coord.add(new JsonPrimitive(reading.location.latnLong[0]));
            coord.add(new JsonPrimitive(reading.location.latnLong[1]));
            point.add("coordinates", coord);
            feature.add("geometry", point);
            JsonObject properties = new JsonObject();
            if (reading.type == 0) {
                // System.out.println("Reading instance of light");
                properties.addProperty("readingType", "" + 0);
                properties.addProperty("lightLevel", "" + ((LightReading) reading).lightVal);
            } else if (reading.type == 1) {
                properties.addProperty("readingType", "" + 1);
                properties.addProperty("noiseLevel", "" + ((NoiseReading) reading).soundVal);
            } else if (reading.type == 2) {
                properties.addProperty("readingType", "" + 2);
                properties.addProperty("message", "" + ((TextVisual) reading).textMsg);
            } else {
                // System.out.println("Reading instance not known");
            }

            feature.add("properties", properties);

            // }
        } catch (JsonParseException e) {
            // System.out.println("can't save json object: " +
            // e.toString());
        }
        // output the result
        // System.out.println("featureCollection=" + feature.toString());

        String message = feature.toString();

        out.println("HTTP/1.0 200 OK");
        out.println("Content-Type: text/json");
        out.printf("Content-Length: %d%n", message.length());
        out.println("Access-Control-Allow-Origin: *");
        out.println();
        out.println(message);
        out.flush();

        socket.close();
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:ch.ethz.inf.vs.hypermedia.corehal.block.CoREHalResourceFuture.java

License:Open Source License

public static <V extends CoREHalResourceFuture> V createFromWebLink(Supplier<V> type, WebLink item) {
    String location = Utils.getWebLinkAttribute(item, "location");
    V block = type.get();/*from  www.  ja va 2 s  . c o m*/
    JsonObject jo = new JsonObject();
    jo.addProperty("_self", item.getURI());
    if (location != null) {
        jo.addProperty("location", location);
    }
    block.loadPartial(jo);
    return block;
}

From source file:ch.ethz.inf.vs.hypermedia.corehal.block.LocationDescriptionFuture.java

License:Open Source License

public CoapRequestFuture addChildLocation(LocationDescription description) {
    JsonObject partialDescription = new JsonObject();
    partialDescription.addProperty("_self", description.getSelf(description.getSelf("")));
    partialDescription.addProperty("location", description.getLocation());
    return getFormRequest("add-child", partialDescription);
}

From source file:ch.ethz.inf.vs.hypermedia.corehal.block.LocationDescriptionFuture.java

License:Open Source License

public CoapRequestFuture addThing(ThingDescription description) {
    JsonObject partialDescription = new JsonObject();
    partialDescription.addProperty("_self", description.getSelf(description.getSelf("")));
    partialDescription.addProperty("location", description.getLocation());
    return getFormRequest("add-thing", partialDescription);
}

From source file:ch.ethz.inf.vs.hypermedia.corehal.block.LocationDescriptionFuture.java

License:Open Source License

public CoapRequestFuture removeThing(ThingDescription description) {
    JsonObject partialDescription = new JsonObject();
    partialDescription.addProperty("_self", description.getSelf(description.getSelf("")));
    partialDescription.addProperty("location", description.getLocation());
    return getFormRequest("remove-thing", partialDescription);
}

From source file:ch.gaps.slasher.views.main.MainController.java

License:Open Source License

/**
 * To save the state of the software, the tab, the servers and the databases.
 *///from   ww  w  .  jav a  2s .c  o  m
public void saveState() {
    try {
        os = new BufferedWriter(new OutputStreamWriter(new FileOutputStream("save.json")));
        Gson jsonEngine = new GsonBuilder().setPrettyPrinting().create();
        JsonArray mainArray = new JsonArray();

        for (Server s : servers) {
            JsonObject server = new JsonObject();

            server.addProperty("serverDescription", s.getDescription());
            server.addProperty("serverDriver", s.getDiverName());
            server.addProperty("serverHost", s.getHost());
            server.addProperty("serverPort", s.getPort());

            JsonArray databases = new JsonArray();

            for (Database db : s.getDatabases()) {

                JsonObject database = new JsonObject();

                database.addProperty("databaseDescritpion", db.getDescritpion());
                database.addProperty("databaseName", db.getName());
                database.addProperty("databaseUsername", db.getUsername());

                JsonArray tabsJson = new JsonArray();

                tabs.forEach(editorTab -> {
                    if (editorTab.getDatabase() == db) {

                        JsonObject tabJson = new JsonObject();
                        tabJson.addProperty("tabName", "name");
                        tabJson.addProperty("moduleName", editorTab.getModuleName());
                        tabJson.addProperty("content", editorTab.getEditorController().getContent());
                        tabsJson.add(tabJson);
                    }
                });

                database.add("tabs", tabsJson);

                databases.add(database);

            }
            server.add("databases", databases);

            mainArray.add(server);
        }

        os.write(jsonEngine.toJson(mainArray));
        os.flush();

    } catch (IOException e) {
        addToUserCommunication(e.getMessage());
    }
}

From source file:ch.icclab.cyclops.consume.data.BillDeserializer.java

License:Open Source License

@Override
public void preDeserialize(Class<? extends T> clazz, JsonElement jsonElement, Gson gson) {

    // valid JSON object
    if (jsonElement != null && jsonElement.isJsonObject()) {
        JsonObject root = jsonElement.getAsJsonObject();

        // map data to string so it can be persisted as jsonb
        if (root.has(Bill.DATA_FIELD.getName())) {
            root.addProperty(Bill.DATA_FIELD.getName(), new Gson().toJson(root.get(Bill.DATA_FIELD.getName())));
        }//w  ww  .  j av a  2  s .  co  m
    }
}

From source file:ch.icclab.cyclops.consume.data.CDRDeserializer.java

License:Open Source License

@Override
public void preDeserialize(Class<? extends T> clazz, JsonElement jsonElement, Gson gson) {

    // valid JSON object
    if (jsonElement != null && jsonElement.isJsonObject()) {
        JsonObject root = jsonElement.getAsJsonObject();

        // map data to string so it can be persisted as jsonb
        if (root.has(CDR.DATA_FIELD.getName())) {
            root.addProperty(CDR.DATA_FIELD.getName(), new Gson().toJson(root.get(CDR.DATA_FIELD.getName())));
        }//from   ww w  . j  a  v  a  2  s  .  c om
    }
}

From source file:ch.icclab.cyclops.consume.data.UsageDeserializer.java

License:Open Source License

@Override
public void preDeserialize(Class<? extends T> clazz, JsonElement jsonElement, Gson gson) {

    // valid JSON object
    if (jsonElement != null && jsonElement.isJsonObject()) {
        JsonObject root = jsonElement.getAsJsonObject();

        // map data to string so it can be persisted as jsonb
        if (root.has(Usage.DATA_FIELD.getName())) {
            root.addProperty(Usage.DATA_FIELD.getName(),
                    new Gson().toJson(root.get(Usage.DATA_FIELD.getName())));
        }//from  w w w .jav  a 2 s  . co  m
    }
}

From source file:ch.iterate.openstack.swift.Client.java

License:Open Source License

/**
 * @param container          The name of the container
 * @param name               The name of the object
 * @param entity             The name of the request entity (make sure to set the Content-Type
 * @param metadata           The metadata for the object
 * @param md5sum             The 32 character hex encoded MD5 sum of the data
 * @param objectSize         The total size in bytes of the object to be stored
 * @param segmentSize        Optional size in bytes of the object segments to be stored (forces large object support) default 4G
 * @param dynamicLargeObject Optional setting to use dynamic large objects, False/null will use static large objects if required
 * @param segmentContainer   Optional name of container to store file segments, defaults to storing chunks in the same container as the file sill appear
 * @param segmentFolder      Optional name of folder for storing file segments, defaults to ".chunks/"
 * @param leaveSegments      Optional setting to leave segments of large objects in place when the manifest is overwrtten/changed
 * @return The ETAG if the save was successful, null otherwise
 * @throws GenericException There was a protocol level error talking to CloudFiles
 *///from  ww w  .j  a v  a  2  s . c o m
public String storeObject(Region region, String container, String name, HttpEntity entity,
        Map<String, String> metadata, String md5sum, Long objectSize, Long segmentSize,
        Boolean dynamicLargeObject, String segmentContainer, String segmentFolder, Boolean leaveSegments)
        throws IOException, InterruptedException {
    /*
     * Default values for large object support. We also use the defaults combined with the inputs
     * to determine whether to store as a large object.
     */

    /*
     * The maximum size of a single object (5GiB).
     */
    long singleObjectSizeLimit = (long) (5 * Math.pow(1024, 3));

    /*
     * The default minimum segment size (1MiB).
     */
    long minSegmentSize = 1024L * 1024L;

    /*
     * Set the segment size.
     *
     * Defaults to 4GiB segments, and will not permit smaller than 1MiB segments.
     */
    long actualSegmentSize = (segmentSize == null) ? (long) (4 * Math.pow(1024, 3))
            : Math.max(segmentSize, minSegmentSize);

    /*
     * Determines if we will store using large objects - we may do this for 3 reasons:
     *
     *  - A segmentSize has been specified and the object size is greater than the minimum segment size
     *  - If an objectSize is provided and is larger than the single object size limit of 5GiB
     *  - A segmentSize has been specified, but no objectSize given (we take this as a request for segmentation)
     *
     * The last case may fail if the user does not provide at least as much data as the minimum segment
     * size configured on the server, and will always produce a large object structure (even if only one
     * small segment is required).
     */
    objectSize = (objectSize == null) ? -1 : objectSize;
    boolean useLargeObject = ((segmentSize != null) && (objectSize > actualSegmentSize))
            || (objectSize > singleObjectSizeLimit) || ((segmentSize != null) && (objectSize == -1));

    if (!useLargeObject) {
        return storeObject(region, container, name, entity, metadata, md5sum);
    } else {
        /*
         * We need to upload a large object as defined by the method
         * parameters. For now this is done sequentially, but a parallel
         * version using appropriate random access to the underlying data
         * may be desirable.
         *
         * We make the assumption that the given file size will not be
         * greater than int.MAX_VALUE * segmentSize
         *
         */
        leaveSegments = (leaveSegments == null) ? Boolean.FALSE : leaveSegments;
        dynamicLargeObject = (dynamicLargeObject == null) ? Boolean.FALSE : dynamicLargeObject;
        segmentFolder = (segmentFolder == null) ? ".file-segments" : segmentFolder;
        segmentContainer = (segmentContainer == null) ? container : segmentContainer;

        /*
         * If we have chosen not to leave existing large object segments in place (default)
         * then we need to collect information about any existing file segments so that we can
         * deal with them after we complete the upload of the new manifest.
         *
         * We should only delete existing segments after a successful upload of a new manifest file
         * because this constitutes an object update and the older file should remain available
         * until the new file can be downloaded.
         */
        Map<String, List<StorageObject>> oldSegmentsToRemove = null;
        if (!leaveSegments) {
            oldSegmentsToRemove = listObjectSegments(region, container, name);
        }

        /*
         * Upload the new segments and manifest
         */
        int segmentNumber = 1;
        long timeStamp = System.currentTimeMillis() / 1000L;
        String segmentBase = String.format("%s/%d/%d", segmentFolder, timeStamp, objectSize);

        /*
         * Create subInputStream from the OutputStream we will pass to the
         * HttpEntity for writing content.
         */
        final PipedInputStream contentInStream = new PipedInputStream(64 * 1024);
        final PipedOutputStream contentOutStream = new PipedOutputStream(contentInStream);
        SubInputStream segmentStream = new SubInputStream(contentInStream, actualSegmentSize, false);

        /*
         * Fork the call to entity.writeTo() that allows us to grab any exceptions raised
         */
        final HttpEntity e = entity;

        final Callable<Boolean> writer = new Callable<Boolean>() {
            public Boolean call() throws Exception {
                e.writeTo(contentOutStream);
                return Boolean.TRUE;
            }
        };

        ExecutorService writeExecutor = Executors.newSingleThreadExecutor();
        final Future<Boolean> future = writeExecutor.submit(writer);
        /*
         * Check the future for exceptions after we've finished uploading segments
         */

        Map<String, List<StorageObject>> newSegmentsAdded = new HashMap<String, List<StorageObject>>();
        List<StorageObject> newSegments = new LinkedList<StorageObject>();
        JsonArray manifestSLO = new JsonArray();
        boolean finished = false;

        /*
         * Upload each segment of the file by reading sections of the content input stream
         * until the entire underlying stream is complete
         */
        while (!finished) {
            String segmentName = String.format("%s/%08d", segmentBase, segmentNumber);

            String etag;
            try {
                etag = storeObject(region, segmentContainer, segmentStream, "application/octet-stream",
                        segmentName, new HashMap<String, String>());
            } catch (IOException ex) {
                // Finished storing the object
                ex.printStackTrace();
                throw ex;
            }
            String segmentPath = segmentContainer + "/" + segmentName;
            long bytesUploaded = segmentStream.getBytesProduced();

            /*
             * Create the appropriate manifest structure if we're making a static large
             * object.
             *
             *   ETAG returned by the simple upload
             *   total size of segment uploaded
             *   path of segment
             */
            if (!dynamicLargeObject) {
                JsonObject segmentJSON = new JsonObject();

                segmentJSON.addProperty("path", segmentPath);
                segmentJSON.addProperty("etag", etag);
                segmentJSON.addProperty("size_bytes", bytesUploaded);
                manifestSLO.add(segmentJSON);

                newSegments.add(new StorageObject(segmentName));
            }

            segmentNumber++;
            if (!finished) {
                finished = segmentStream.endSourceReached();
            }
            newSegmentsAdded.put(segmentContainer, newSegments);

            segmentStream.readMoreBytes(actualSegmentSize);
        }

        /*
         * Attempts to retrieve the return value from the write operation
         * Any exceptions raised can then be handled appropriately
         */
        try {
            future.get();
        } catch (InterruptedException ex) {
            /*
             * The write was interrupted... should we delete the segments?
             * For now we'll leave orphaned segments, but we should re-visit this later
             */
        } catch (ExecutionException ex) {
            /*
             * This should always be an IOException or a RuntimeException
             * because the call to entity.writeTo() only throws IOException
             */
            Throwable t = ex.getCause();

            if (t instanceof IOException) {
                throw (IOException) t;
            } else {
                throw (RuntimeException) t;
            }
        }

        /*
         * Create an appropriate manifest depending on our DLO/SLO choice
         */
        String manifestEtag;
        if (dynamicLargeObject) {
            /*
             * Empty manifest with header detailing the shared prefix of object segments
             */
            long manifestTimeStamp = System.currentTimeMillis() / 1000L;
            metadata.put(Constants.X_OBJECT_META + "mtime", String.format("%s", manifestTimeStamp));
            manifestEtag = createDLOManifestObject(region, container, entity.getContentType().getValue(), name,
                    segmentBase, metadata);
        } else {
            /*
             * Manifest containing json list specifying details of the object segments.
             */
            manifestEtag = createSLOManifestObject(region, container, entity.getContentType().getValue(), name,
                    manifestSLO.toString(), metadata);
        }

        /*
         * Delete stale segments of overwritten large object if requested.
         */
        if (!leaveSegments) {
            /*
             * Before deleting old segments, remove any objects from the delete list
             * that are also part of a new static large object that were updated during the upload.
             */
            if (!(oldSegmentsToRemove == null)) {
                for (String c : oldSegmentsToRemove.keySet()) {
                    List<StorageObject> rmv = oldSegmentsToRemove.get(c);
                    if (newSegmentsAdded.containsKey(c)) {
                        rmv.removeAll(newSegmentsAdded.get(c));
                    }
                    List<String> rmvNames = new LinkedList<String>();
                    for (StorageObject s : rmv) {
                        rmvNames.add(s.getName());
                    }
                    deleteObjects(region, c, rmvNames);
                }
            }
        }

        return manifestEtag;
    }
}