Example usage for java.util Queue add

List of usage examples for java.util Queue add

Introduction

In this page you can find the example usage for java.util Queue add.

Prototype

boolean add(E e);

Source Link

Document

Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions, returning true upon success and throwing an IllegalStateException if no space is currently available.

Usage

From source file:org.wso2.carbon.andes.event.core.internal.subscription.registry.TopicManagerServiceImpl.java

/**
 * Adds a subscriptions to a list using the resource path provided
 *
 * @param resourcePath  the topic nam/*from ww w  .  j a  va2 s . c  o  m*/
 * @param subscriptions a list of subscriptions for the topic
 * @param pathsQueue    the topic folder
 * @param withChildren  to add subscriptions to children. i.e subtopics
 * @throws EventBrokerException
 */
private void addSubscriptions(String resourcePath, List<Subscription> subscriptions, Queue<String> pathsQueue,
        boolean withChildren) throws EventBrokerException {

    try {
        UserRegistry userRegistry = this.registryService
                .getGovernanceSystemRegistry(EventBrokerHolder.getInstance().getTenantId());
        String subscriptionsPath = getSubscriptionsPath(resourcePath);

        //first if there are subscriptions for this topic add them. else go to the other folders.
        if (userRegistry.resourceExists(subscriptionsPath)) {
            Collection collection = (Collection) userRegistry.get(subscriptionsPath);
            for (String subscriptionPath : collection.getChildren()) {
                Resource subscriptionResource = userRegistry.get(subscriptionPath);
                Subscription subscription = JavaUtil.getSubscription(subscriptionResource);
                subscription.setTopicName(removeResourcePath(resourcePath));

                if (subscriptionPath.endsWith("/")) {
                    subscriptionPath = subscriptionsPath.substring(0, subscriptionPath.lastIndexOf("/"));
                }
                subscription.setId(subscriptionPath.substring(subscriptionPath.lastIndexOf("/") + 1));
                subscriptions.add(subscription);
            }
        }

        // add child subscriptions only for resource collections
        if (withChildren) {
            Resource resource = userRegistry.get(resourcePath);
            if (resource instanceof Collection) {
                Collection childResources = (Collection) resource;
                for (String childResourcePath : childResources.getChildren()) {
                    if ((!EventBrokerConstants.EB_CONF_WS_SUBSCRIPTION_COLLECTION_NAME
                            .contains(childResourcePath))
                            && (!EventBrokerConstants.EB_CONF_JMS_SUBSCRIPTION_COLLECTION_NAME
                                    .contains(childResourcePath))) {
                        // i.e. this folder is a topic folder
                        pathsQueue.add(childResourcePath);
                    }
                }
            }
        }

    } catch (RegistryException e) {
        throw new EventBrokerException("Cannot access the registry", e);
    }
}

From source file:it.geosolutions.geobatch.actions.commons.MoveAction.java

/**
 * Removes TemplateModelEvents from the queue and put
 *//* w  w w  .j  a  va  2 s  .co  m*/
public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {

    listenerForwarder.started();
    listenerForwarder.setTask("build the output absolute file name");

    // return
    final Queue<EventObject> ret = new LinkedList<EventObject>();

    listenerForwarder.setTask("Building/getting the root data structure");

    boolean moveMultipleFile;
    final int size = events.size();
    if (size == 0) {
        throw new ActionException(this, "Empty file list");
    } else if (size > 1) {
        moveMultipleFile = true;
    } else {
        moveMultipleFile = false;
    }
    if (conf.getDestination() == null) {
        throw new IllegalArgumentException("Unable to work with a null dest dir");
    }
    if (!conf.getDestination().isAbsolute()) {
        conf.setDestination(new File(this.getConfigDir(), conf.getDestination().getPath()));
        if (LOGGER.isWarnEnabled()) {
            LOGGER.warn("Destination is not an absolute path. Absolutizing destination using temp dir: "
                    + conf.getDestination());
        }
    }

    boolean moveToDir;
    if (!conf.getDestination().isDirectory()) {
        // TODO LOG
        moveToDir = false;
        if (moveMultipleFile) {
            throw new ActionException(this,
                    "Unable to run on multiple file with an output file, use directory instead");
        }
    } else {
        moveToDir = true;
    }

    while (!events.isEmpty()) {
        listenerForwarder.setTask("Generating the output");

        final EventObject event = events.remove();
        if (event == null) {
            // TODO LOG
            continue;
        }
        if (event instanceof FileSystemEvent) {
            File source = ((FileSystemEvent) event).getSource();
            File dest;
            listenerForwarder.setTask("moving to destination");
            if (moveToDir) {
                dest = conf.getDestination();
                try {
                    FileUtils.moveFileToDirectory(source, dest, true);
                } catch (IOException e) {
                    throw new ActionException(this, e.getLocalizedMessage());
                }
            } else if (moveMultipleFile) {
                dest = new File(conf.getDestination(), source.getPath());
                try {
                    FileUtils.moveFile(source, dest);
                } catch (IOException e) {
                    throw new ActionException(this, e.getLocalizedMessage());
                }
            } else {
                // LOG continue
                continue;
            }

            // add the file to the return
            ret.add(new FileSystemEvent(dest, FileSystemEventType.FILE_ADDED));
        }
    }

    listenerForwarder.completed();
    return ret;
}

From source file:org.apache.streams.riak.http.RiakHttpPersistReader.java

@Override
public StreamsResultSet readAll() {

    Queue<StreamsDatum> readAllQueue = constructQueue();

    URIBuilder lk = null;/*  w ww.  j  av  a 2s  . c om*/

    try {

        lk = new URIBuilder(client.baseURI.toString());
        lk.setPath(client.baseURI.getPath().concat("/buckets/" + configuration.getDefaultBucket() + "/keys"));
        lk.setParameter("keys", "true");

    } catch (URISyntaxException e) {
        LOGGER.warn("URISyntaxException", e);
    }

    HttpResponse lkResponse = null;
    try {
        HttpGet lkGet = new HttpGet(lk.build());
        lkResponse = client.client().execute(lkGet);
    } catch (IOException e) {
        LOGGER.warn("IOException", e);
        return null;
    } catch (URISyntaxException e) {
        LOGGER.warn("URISyntaxException", e);
        return null;
    }

    String lkEntityString = null;
    try {
        lkEntityString = EntityUtils.toString(lkResponse.getEntity());
    } catch (IOException e) {
        LOGGER.warn("IOException", e);
        return null;
    }

    JsonNode lkEntityNode = null;
    try {
        lkEntityNode = MAPPER.readValue(lkEntityString, JsonNode.class);
    } catch (IOException e) {
        LOGGER.warn("IOException", e);
        return null;
    }

    ArrayNode keysArray = null;
    keysArray = (ArrayNode) lkEntityNode.get("keys");
    Iterator<JsonNode> keysIterator = keysArray.iterator();

    while (keysIterator.hasNext()) {
        JsonNode keyNode = keysIterator.next();
        String key = keyNode.asText();

        URIBuilder gk = null;

        try {

            gk = new URIBuilder(client.baseURI.toString());
            gk.setPath(client.baseURI.getPath()
                    .concat("/buckets/" + configuration.getDefaultBucket() + "/keys/" + key));

        } catch (URISyntaxException e) {
            LOGGER.warn("URISyntaxException", e);
            continue;
        }

        HttpResponse gkResponse = null;
        try {
            HttpGet gkGet = new HttpGet(gk.build());
            gkResponse = client.client().execute(gkGet);
        } catch (IOException e) {
            LOGGER.warn("IOException", e);
            continue;
        } catch (URISyntaxException e) {
            LOGGER.warn("URISyntaxException", e);
            continue;
        }

        String gkEntityString = null;
        try {
            gkEntityString = EntityUtils.toString(gkResponse.getEntity());
        } catch (IOException e) {
            LOGGER.warn("IOException", e);
            continue;
        }

        readAllQueue.add(new StreamsDatum(gkEntityString, key));
    }

    return new StreamsResultSet(readAllQueue);
}

From source file:com.clxcommunications.xms.ApiConnectionIT.java

/**
 * Verifies that the default HTTP client actually can handle multiple
 * simultaneous requests./*from   w w w . ja va 2 s .c  o  m*/
 * 
 * @throws Exception
 *             shouldn't happen
 */
@Test
public void canCancelBatchConcurrently() throws Exception {
    String spid = TestUtils.freshServicePlanId();

    // Set up the first request (the one that will be delayed).
    MtBatchSmsResult expected1 = MtBatchTextSmsResult.builder().sender("12345")
            .addRecipient("123456789", "987654321").body("Hello, world!").canceled(true)
            .id(TestUtils.freshBatchId()).createdAt(OffsetDateTime.now()).modifiedAt(OffsetDateTime.now())
            .build();

    String path1 = "/v1/" + spid + "/batches/" + expected1.id();
    byte[] response1 = json.writeValueAsBytes(expected1);

    wm.stubFor(delete(urlEqualTo(path1)).willReturn(aResponse().withFixedDelay(500) // Delay for a while.
            .withStatus(200).withHeader("Content-Type", "application/json; charset=UTF-8")
            .withBody(response1)));

    // Set up the second request.
    MtBatchSmsResult expected2 = MtBatchBinarySmsResult.builder().sender("12345")
            .addRecipient("123456789", "987654321").body("Hello, world!".getBytes()).udh((byte) 1)
            .canceled(true).id(TestUtils.freshBatchId()).createdAt(OffsetDateTime.now())
            .modifiedAt(OffsetDateTime.now()).build();

    String path2 = "/v1/" + spid + "/batches/" + expected2.id();

    stubDeleteResponse(expected2, path2);

    ApiConnection conn = ApiConnection.builder().servicePlanId(spid).token("tok")
            .endpoint("http://localhost:" + wm.port()).start();

    try {
        final Queue<MtBatchSmsResult> results = new ConcurrentArrayQueue<MtBatchSmsResult>();
        final CountDownLatch latch = new CountDownLatch(2);

        FutureCallback<MtBatchSmsResult> callback = new TestCallback<MtBatchSmsResult>() {

            @Override
            public void completed(MtBatchSmsResult result) {
                results.add(result);
                latch.countDown();
            }

        };

        conn.cancelBatchAsync(expected1.id(), callback);
        Thread.sleep(100);
        conn.cancelBatchAsync(expected2.id(), callback);

        // Wait for callback to be called.
        latch.await();

        // We expect the second message to be handled first.
        assertThat(results.size(), is(2));
        assertThat(results.poll(), is(expected2));
        assertThat(results.poll(), is(expected1));
    } finally {
        conn.close();
    }

    verifyDeleteRequest(path1);
    verifyDeleteRequest(path2);
}

From source file:com.oceans7.mobile.eagleswag.persistence.parsers.JsonDataFileParserStrategy.java

/**
 * This parsing method uses the convention over configuration paradigm,
 * where the name of the data file containing the questions data matches the
 * class name (simple class name) of the key provided, with a ".json" file
 * extension. For example, if GeneralQuestion.class is provided as the key,
 * the data file name expected is "GeneralQuestion.json." Likewise, the
 * object name for the array containing the questions data within the data
 * file is simple class name of the key provided. For example, if
 * GeneralQuestion.class is provided as a key, the format of the data file
 * would be://  w  ww. j  av  a  2 s.  c om
 * 
 * <pre>
 * {
 *   "GeneralQuestion": [
 *     {
 *       "text": "General question 1",
 *       "yesValue": "10",
 *       "noValue": "0",
 *       "usedCount": "0"
 *     },
 *           .
 *           .
 *           .
 *     ]
 * }
 * </pre>
 * 
 * If no data file is found for the key provided, an empty queue is
 * returned.
 * <p/>
 * 
 * {@inheritDoc}
 * 
 * @see com.oceans7.mobile.eagleswag.persistence.DataFileParserStrategy#getQuestion(java.lang.Class)
 */
@Override
public <T extends Question> Queue<T> getQuestions(Class<T> key) {

    // The queue used to store the questions retrieved from the data file
    Queue<T> questions = new LinkedList<T>();

    // Set the object name for the data file and the name of the data file
    // to the simple name of the key. This is the implementation of the
    // "convention over configuration" mechanism for obtaining data.
    String id = key.getSimpleName();
    String dataFileName = key.getSimpleName() + EXTENSION;

    try {

        // The JSON parser to parse the data file
        JSONParser parser = new JSONParser();

        // Open the JSON file containing the questions
        InputStream dataFileInputStream = this.context.getAssets().open(DATA_FILE_ASSET_PATH + dataFileName);

        // Parse the JSON file
        JSONObject jsonObj = (JSONObject) parser.parse(new InputStreamReader(dataFileInputStream));
        dataFileInputStream.close();

        // Obtain a JSON array for the question type supplied (dependent on
        // the ID supplied)
        JSONArray questionsArray = (JSONArray) jsonObj.get(id);

        for (Object question : questionsArray) {
            // Loop through each of the questions found in the data file

            // Convert the object to a JSON object
            JSONObject jsonQuestion = (JSONObject) question;

            // --------------------------------------------------------------
            // Extract the JSON values (note that the ID value is set to 0
            // because it will be supplied later by the data controller
            // (there is no ID associated with the questions in the
            // questions data file)
            // --------------------------------------------------------------
            int questionId = 0;
            String text = (String) jsonQuestion.get(QUESTION_TEXT_ID);
            int yesValue = Integer.parseInt((String) jsonQuestion.get(YES_VALUE_ID));
            int noValue = Integer.parseInt((String) jsonQuestion.get(NO_VALUE_ID));
            int usedCount = Integer.parseInt((String) jsonQuestion.get(USED_COUNT_ID));

            // Obtain the constructor for the supplied class
            Class<?>[] argTypes = new Class<?>[] { Integer.class, String.class, Integer.class, Integer.class,
                    Integer.class };
            Constructor<T> constructor = key.getDeclaredConstructor(argTypes);
            Object[] args = new Object[] { questionId, text, yesValue, noValue, usedCount };

            // Invoke the constructor to obtain the object
            T questionToAdd = constructor.newInstance(args);

            // Add the new general question (ignoring the ID)
            questions.add(questionToAdd);
        }
    } catch (FileNotFoundException e) {
        // A data file for the key provided cannot be found
        Log.e(this.getClass().getName(),
                "No data file named '" + dataFileName + "' found in '" + DATA_FILE_ASSET_PATH + "'");
    } catch (IOException e) {
        // IO exception occurred while accessing the JSON data file
        Log.e(this.getClass().getName(), "IO exception occurred while parsing " + dataFileName + ": " + e);
    } catch (ParseException e) {
        // A parse exception occurred while parsing a data file
        Log.e(this.getClass().getName(),
                "A parser exception occurred while parsing " + dataFileName + ": " + e);
    } catch (NoSuchMethodException e) {
        // A constructor with the specified format cannot be found for key
        Log.e(this.getClass().getName(), "Could not find the desired constructor for the " + id + ": " + e);
    } catch (IllegalArgumentException e) {
        // The arguments for the question constructor are incorrect
        Log.e(this.getClass().getName(),
                "Invalid constructor arguments while trying to create new " + id + ": " + e);
    } catch (InstantiationException e) {
        // The selected question object could not be instantiated
        Log.e(this.getClass().getName(), "Could not not instantiate an object for " + id + ": " + e);
    } catch (IllegalAccessException e) {
        // The selected question constructor could not be accessed
        Log.e(this.getClass().getName(), "Could not access the desired constructor for " + id + ": " + e);
    } catch (InvocationTargetException e) {
        // The constructor could not be invoked on the target object
        Log.e(this.getClass().getName(), "Could not invoke constructor on the target " + id + ": " + e);
    }

    // Return the queue containing the questions (which may be empty)
    return questions;
}

From source file:it.geosolutions.geobatch.migrationmonitor.monitor.MonitorAction.java

@Override
public Queue<FileSystemEvent> execute(Queue<FileSystemEvent> events) throws ActionException {

    // return object
    final Queue<FileSystemEvent> outputEvents = new LinkedList<FileSystemEvent>();

    try {//from   w w w  . ja  v a2  s . c  om
        // looking for file
        if (events.size() == 0)
            throw new IllegalArgumentException(
                    "MonitorAction::execute(): Wrong number of elements for this action: " + events.size());

        listenerForwarder.setTask("config");
        listenerForwarder.started();

        if (configuration == null) {
            final String message = "MonitorAction::execute(): DataFlowConfig is null.";
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            throw new IllegalStateException(message);
        }

        // retrieve all the ELEMENTS table from the DB
        List<MigrationMonitor> migrationList = migrationMonitorDAO.findTablesToMigrate();
        DS2DSTokenResolver tknRes = null;

        // init some usefull counters
        int failsCounter = 0;
        int iterCounter = 0;
        int totElem = migrationList.size();

        // Process all MigrationMonitors retrieved
        for (MigrationMonitor mm : migrationList) {
            iterCounter++;
            try {
                tknRes = new DS2DSTokenResolver(mm, getConfigDir());
            } catch (IOException e) {
                failsCounter++;
                LOGGER.error(e.getMessage(), e);
                LOGGER.error("error while processing MigrationMonitor " + mm.toString()
                        + " let's skip it! (the error happens while resolving tokens...)");
                LOGGER.error("fail number: " + failsCounter + " MigrationMonitor processed " + iterCounter + "/"
                        + totElem);
                continue;
            }

            String filename = getTempDir() + File.separator + mm.getTableName() + mm.getLayerId() + ".xml";
            Writer writer = null;
            try {
                writer = new FileWriter(filename);
                writer.append(tknRes.getOutputFileContent());
            } catch (IOException e) {
                LOGGER.error("error while processing MigrationMonitor " + mm.toString()
                        + " let's skip it! (the error happens while writing on the output file)");
                LOGGER.error("fail number: " + failsCounter + " MigrationMonitor processed " + iterCounter + "/"
                        + totElem);
                continue;
            } finally {
                try {
                    writer.close();
                    FileSystemEvent fse = new FileSystemEvent(new File(filename),
                            FileSystemEventType.FILE_ADDED);
                    outputEvents.add(fse);
                } catch (IOException e) {
                    LOGGER.error(e.getMessage(), e);
                }
            }

            mm.setMigrationStatus(MigrationStatus.INPROGRESS.toString().toUpperCase());
            migrationMonitorDAO.merge(mm);
        }

    } catch (Exception t) {
        final String message = "MonitorAction::execute(): " + t.getLocalizedMessage();
        if (LOGGER.isErrorEnabled())
            LOGGER.error(message, t);
        final ActionException exc = new ActionException(this, message, t);
        listenerForwarder.failed(exc);
        throw exc;
    }

    return outputEvents;
}

From source file:de.uni_koblenz.jgralab.utilities.rsa2tg.Rsa2Tg.java

private void checkAttributes() {
    GraphClass graphClass = sg.getFirstGraphClass();
    Map<String, AttributedElementClass> definedAttributes = new HashMap<>();
    for (Attribute a : graphClass.get_attributes()) {
        if (definedAttributes.containsKey(a.get_name())) {
            throw new RuntimeException(
                    "Attribute " + a.get_name() + " at " + graphClass.get_qualifiedName() + " is duplicate.");
        }//from  www  .ja v a2s.co m
        definedAttributes.put(a.get_name(), graphClass);
    }

    for (GraphElementClass gec : sg.getGraphElementClassVertices()) {
        boolean isVertexClass = gec.isInstanceOf(VertexClass.VC);
        definedAttributes = new HashMap<>();
        BooleanGraphMarker alreadyChecked = new BooleanGraphMarker(sg);
        Queue<GraphElementClass> queue = new LinkedList<>();
        queue.add(gec);
        while (!queue.isEmpty()) {
            GraphElementClass current = queue.poll();
            if (alreadyChecked.isMarked(current)) {
                continue;
            }
            for (Attribute att : current.get_attributes()) {
                if (definedAttributes.containsKey(att.get_name())) {
                    AttributedElementClass childClass = definedAttributes.get(att.get_name());
                    throw new RuntimeException("The name of the "
                            + ((childClass == gec) && (current != gec) ? "" : "inherited ") + "attribute "
                            + att.get_name() + " of " + (isVertexClass ? "VertexClass" : "EdgeClass") + " "
                            + childClass.get_qualifiedName()
                            + (current == gec ? " is duplicate"
                                    : (" is the same name as the inherited attribute of "
                                            + (isVertexClass ? "VertexClass" : "EdgeClass") + " "
                                            + current.get_qualifiedName()))
                            + ".");
                } else {
                    definedAttributes.put(att.get_name(), current);
                }
            }
            alreadyChecked.mark(current);
            for (Edge toSuperClass : current.incidences(
                    isVertexClass ? SpecializesVertexClass.EC : SpecializesEdgeClass.EC, EdgeDirection.OUT)) {
                GraphElementClass superClass = (GraphElementClass) toSuperClass.getThat();
                if (!alreadyChecked.isMarked(superClass)) {
                    queue.add(superClass);
                }
            }
        }
    }
}

From source file:co.paralleluniverse.galaxy.core.Cache.java

@Override
@SuppressWarnings({ "BoxedValueEquality" })
public void receive(Message message) {
    if (recursive.get() != Boolean.TRUE) {
        recursive.set(Boolean.TRUE);
        try {//  w w w . j  a va  2  s  .  co  m
            LOG.debug("Received: {}", message);
            receive1(message);
            receiveShortCircuit();
        } finally {
            recursive.remove();
        }
    } else { // short-circuit
        LOG.debug("Received short-circuit: {}", message);
        Queue<Message> ms = shortCircuitMessage.get();
        if (ms == null) {
            ms = new ArrayDeque<Message>();
            shortCircuitMessage.set(ms);
        }
        ms.add(message);
    }
}

From source file:replicatorg.app.gcode.GCodeParser.java

Queue<DriverCommand> drawArc(Point5d center, Point5d endpoint, boolean clockwise) {
    // System.out.println("Arc from " + current.toString() + " to " +
    // endpoint.toString() + " with center " + center);

    Queue<DriverCommand> points = new LinkedList<DriverCommand>();

    // angle variables.
    double angleA;
    double angleB;
    double angle;
    double radius;
    double length;

    // delta variables.
    double aX;//w w  w. j ava  2 s  .co m
    double aY;
    double bX;
    double bY;

    // figure out our deltas
    Point5d current = driver.getCurrentPosition(false);
    aX = current.x() - center.x();
    aY = current.y() - center.y();
    bX = endpoint.x() - center.x();
    bY = endpoint.y() - center.y();

    // Clockwise
    if (clockwise) {
        angleA = Math.atan2(bY, bX);
        angleB = Math.atan2(aY, aX);
    }
    // Counterclockwise
    else {
        angleA = Math.atan2(aY, aX);
        angleB = Math.atan2(bY, bX);
    }

    // Make sure angleB is always greater than angleA
    // and if not add 2PI so that it is (this also takes
    // care of the special case of angleA == angleB,
    // ie we want a complete circle)
    if (angleB <= angleA)
        angleB += 2 * Math.PI;
    angle = angleB - angleA;
    // calculate a couple useful things.
    radius = Math.sqrt(aX * aX + aY * aY);
    length = radius * angle;

    // for doing the actual move.
    int steps;
    int s;
    int step;

    // Maximum of either 2.4 times the angle in radians
    // or the length of the curve divided by the curve section constant
    steps = (int) Math.ceil(Math.max(angle * 2.4, length / curveSection));

    // this is the real draw action.
    Point5d newPoint = new Point5d(current);
    double arcStartZ = current.z();
    for (s = 1; s <= steps; s++) {
        // Forwards for CCW, backwards for CW
        if (!clockwise)
            step = s;
        else
            step = steps - s;

        // calculate our waypoint.
        newPoint.setX(center.x() + radius * Math.cos(angleA + angle * ((double) step / steps)));
        newPoint.setY(center.y() + radius * Math.sin(angleA + angle * ((double) step / steps)));
        newPoint.setZ(arcStartZ + (endpoint.z() - arcStartZ) * s / steps);

        // start the move
        points.add(new replicatorg.drivers.commands.QueuePoint(newPoint));
    }

    return points;
}

From source file:org.stem.db.compaction.CompactionManager.java

private void performSinglePassCompaction(MountPoint mp) throws IOException {
    // TODO: lock?
    if (!exceedThreshold(mp))
        return;/*from   ww w. ja v a 2  s. c o m*/

    // Get FULL files ready for compaction
    Collection<FatFile> scanReadyFFs = mp.findReadyForCompaction();

    if (!exceedCandidatesThreshold(scanReadyFFs))
        return;

    Queue<FatFile> originalFFs = new LinkedList<FatFile>();

    FatFile temporaryFF = null;
    int iterated = 0;
    int omitted = 0;
    for (FatFile currentFF : scanReadyFFs) {
        FFScanner scanner = new FFScanner(currentFF);

        while (scanner.hasNext()) {
            iterated += 1;
            Blob blob = scanner.next();
            String blobKey = Hex.encodeHexString(blob.key());
            if (blob.deleted()) {
                omitted += 1;
                mp.getDataTracker().removeDeletes(blob.key(), blob.size(), currentFF.id);
                logger.info("key 0x{} omitted as deleted", Hex.encodeHexString(blob.key()));
                continue;
            }

            ExtendedBlobDescriptor localDescriptor = new ExtendedBlobDescriptor(blob.key(), blob.size(),
                    mp.uuid, blob.getDescriptor());
            ExtendedBlobDescriptor remoteDescriptor = client.readMeta(blob.key(), mp.uuid);
            if (null == remoteDescriptor) {
                omitted += 1;
                logger.info("key 0x{} omitted as no meta info", Hex.encodeHexString(blob.key()));
                continue;
            }
            // As we eventual consistent then: if blob.hasInvalidOffset -> continue
            if (!descriptorsAreConsistent(localDescriptor, remoteDescriptor)) {
                logger.info("key 0x{} omitted as inconsistent meta", Hex.encodeHexString(blob.key()));
                continue;
            }

            if (null == temporaryFF) {
                temporaryFF = createTemporaryFF(currentFF.id);
            }

            if (temporaryFF.hasSpaceFor(blob)) {
                BlobDescriptor descriptor = temporaryFF.writeBlob(blob); // TODO: hold descriptors for a subsequent MetaStore updates
                logger.info("key 0x{} is written to temporaryFF", Hex.encodeHexString(blob.key()));
                continue;
            }

            // If we are here then we can't write blob to temporary file because the temporaryFF is full

            // mark temporaryFF FULL
            temporaryFF.writeIndex();
            temporaryFF.writeFullMarker();

            // Replace original FF with temporary FF
            FatFile originalFF = originalFFs.poll();
            replaceFF(originalFF, temporaryFF);
            updateMeta(originalFF, mp);
            markAllOriginalFFsAsBlank(originalFFs, mp);

            temporaryFF.close(); // TODO: this must be strictly synchronized
            FileUtils.forceDelete(new File(temporaryFF.getPath())); // remove file
            temporaryFF = null;

            // Once temporary file exceeded its capacity create another one
            temporaryFF = createTemporaryFF(currentFF.id);
            // And write blob to it
            BlobDescriptor descriptor = temporaryFF.writeBlob(blob); // TODO: hold descriptors for a subsequent MetaStore updates
        }

        originalFFs.add(currentFF); // When compaction finish this file would be marked as BLANK
    }

    // All candidates are iterated
    // Write the rest of TMP FatFile to StorageNode as usual and mark iterated FFs as BLANK
    if (null != temporaryFF) {
        FFScanner scanner = new FFScanner(temporaryFF);
        int restBlobs = 0;
        while (scanner.hasNext()) {

            restBlobs += 1;
            Blob blob = scanner.next();
            WriteBlobMessage message = new WriteBlobMessage(mp.uuid, blob.key(), blob.data());// TODO: direct access to fields?
            mp.getDataTracker().remove(blob.key(), blob.size());

            // TODO: too heterogeneous. Should be Blob.Descriptor or something like that
            StorageService.instance.write(message);
            logger.info("key 0x{} moved", Hex.encodeHexString(blob.key()));
        }
        temporaryFF.close();
        FileUtils.forceDelete(new File(temporaryFF.getPath())); // remove file
        temporaryFF = null;
    }

    // Mark the rest of files as BLANK
    markAllOriginalFFsAsBlank(originalFFs, mp);
    if (null != temporaryFF) {
        FileUtils.forceDelete(new File(temporaryFF.getPath())); // remove file
        temporaryFF = null;
    }

    // TODO: delete temporary file
}