Example usage for java.util Collections synchronizedSet

List of usage examples for java.util Collections synchronizedSet

Introduction

In this page you can find the example usage for java.util Collections synchronizedSet.

Prototype

public static <T> Set<T> synchronizedSet(Set<T> s) 

Source Link

Document

Returns a synchronized (thread-safe) set backed by the specified set.

Usage

From source file:canreg.client.dataentry.Import.java

/**
 *
 * @param task//from  www  . ja  va2s.c om
 * @param doc
 * @param map
 * @param file
 * @param server
 * @param io
 * @return
 * @throws java.sql.SQLException
 * @throws java.rmi.RemoteException
 * @throws canreg.server.database.RecordLockedException
 */
public static boolean importFile(Task<Object, String> task, Document doc,
        List<canreg.client.dataentry.Relation> map, File file, CanRegServerInterface server, ImportOptions io)
        throws SQLException, RemoteException, SecurityException, RecordLockedException {
    //public static boolean importFile(canreg.client.gui.management.CanReg4MigrationInternalFrame.MigrationTask task, Document doc, List<canreg.client.dataentry.Relation> map, File file, CanRegServerInterface server, ImportOptions io) throws SQLException, RemoteException, SecurityException, RecordLockedException {
    boolean success = false;

    Set<String> noNeedToLookAtPatientVariables = new TreeSet<String>();

    noNeedToLookAtPatientVariables.add(io.getPatientIDVariableName());
    noNeedToLookAtPatientVariables.add(io.getPatientRecordIDVariableName());

    String firstNameVariableName = io.getFirstNameVariableName();
    String sexVariableName = io.getSexVariableName();

    CSVParser parser = null;
    CSVFormat format = CSVFormat.DEFAULT.withFirstRecordAsHeader().withDelimiter(io.getSeparator());

    int linesToRead = io.getMaxLines();

    HashMap mpCodes = new HashMap();

    int numberOfLinesRead = 0;

    Map<String, Integer> nameSexTable = server.getNameSexTables();

    try {
        //            FileInputStream fis = new FileInputStream(file);
        //           BufferedReader bsr = new BufferedReader(new InputStreamReader(fis, io.getFileCharset()));

        // Logger.getLogger(Import.class.getName()).log(Level.CONFIG, "Name of the character encoding {0}");
        int numberOfRecordsInFile = canreg.common.Tools.numberOfLinesInFile(file.getAbsolutePath());

        if (linesToRead > 0) {
            linesToRead = Math.min(numberOfRecordsInFile, linesToRead);
        } else {
            linesToRead = numberOfRecordsInFile;
        }

        parser = CSVParser.parse(file, io.getFileCharset(), format);

        for (CSVRecord csvRecord : parser) {
            numberOfLinesRead++;
            // We allow for null tasks...
            boolean needToSavePatientAgain = true;
            int patientDatabaseRecordID = -1;

            if (task != null) {
                task.firePropertyChange("progress", (numberOfLinesRead - 1) * 100 / linesToRead,
                        (numberOfLinesRead) * 100 / linesToRead);
            }

            // Build patient part
            Patient patient = new Patient();
            for (int i = 0; i < map.size(); i++) {
                Relation rel = map.get(i);
                if (rel.getDatabaseTableVariableID() >= 0
                        && rel.getDatabaseTableName().equalsIgnoreCase("patient")) {
                    if (rel.getFileColumnNumber() < csvRecord.size()) {
                        if (rel.getVariableType().equalsIgnoreCase("Number")) {
                            if (csvRecord.get(rel.getFileColumnNumber()).length() > 0) {
                                try {
                                    patient.setVariable(rel.getDatabaseVariableName(),
                                            Integer.parseInt(csvRecord.get(rel.getFileColumnNumber())));
                                } catch (NumberFormatException ex) {
                                    Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                                            "Number format error in line: " + (numberOfLinesRead + 1 + 1)
                                                    + ". ",
                                            ex);
                                    success = false;
                                }
                            }
                        } else {
                            patient.setVariable(rel.getDatabaseVariableName(),
                                    StringEscapeUtils.unescapeCsv(csvRecord.get(rel.getFileColumnNumber())));
                        }
                    } else {
                        Logger.getLogger(Import.class.getName()).log(Level.INFO,
                                "Something wrong with patient part of line " + numberOfLinesRead + ".",
                                new Exception("Error in line: " + numberOfLinesRead + ". Can't find field: "
                                        + rel.getDatabaseVariableName()));
                    }
                }
            }
            // debugOut(patient.toString());

            // Build tumour part
            Tumour tumour = new Tumour();
            for (canreg.client.dataentry.Relation rel : map) {
                if (rel.getDatabaseTableVariableID() >= 0
                        && rel.getDatabaseTableName().equalsIgnoreCase("tumour")) {
                    if (rel.getFileColumnNumber() < csvRecord.size()) {
                        if (rel.getVariableType().equalsIgnoreCase("Number")) {
                            if (csvRecord.get(rel.getFileColumnNumber()).length() > 0) {
                                try {
                                    tumour.setVariable(rel.getDatabaseVariableName(),
                                            Integer.parseInt(csvRecord.get(rel.getFileColumnNumber())));
                                } catch (NumberFormatException ex) {
                                    Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                                            "Number format error in line: " + (numberOfLinesRead + 1 + 1)
                                                    + ". ",
                                            ex);
                                    success = false;
                                }
                            }
                        } else {
                            tumour.setVariable(rel.getDatabaseVariableName(),
                                    StringEscapeUtils.unescapeCsv(csvRecord.get(rel.getFileColumnNumber())));
                        }
                    } else {
                        Logger.getLogger(Import.class.getName()).log(Level.INFO,
                                "Something wrong with tumour part of line " + numberOfLinesRead + ".",
                                new Exception("Error in line: " + numberOfLinesRead + ". Can't find field: "
                                        + rel.getDatabaseVariableName()));
                    }
                }
            }

            // Build source part
            Set<Source> sources = Collections.synchronizedSet(new LinkedHashSet<Source>());
            Source source = new Source();
            for (canreg.client.dataentry.Relation rel : map) {
                if (rel.getDatabaseTableVariableID() >= 0
                        && rel.getDatabaseTableName().equalsIgnoreCase(Globals.SOURCE_TABLE_NAME)) {
                    if (rel.getFileColumnNumber() < csvRecord.size()) {
                        if (rel.getVariableType().equalsIgnoreCase("Number")) {
                            if (csvRecord.get(rel.getFileColumnNumber()).length() > 0) {
                                try {
                                    source.setVariable(rel.getDatabaseVariableName(),
                                            Integer.parseInt(csvRecord.get(rel.getFileColumnNumber())));
                                } catch (NumberFormatException ex) {
                                    Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                                            "Number format error in line: " + (numberOfLinesRead + 1 + 1)
                                                    + ". ",
                                            ex);
                                    success = false;
                                }
                            }
                        } else {
                            source.setVariable(rel.getDatabaseVariableName(),
                                    StringEscapeUtils.unescapeCsv(csvRecord.get(rel.getFileColumnNumber())));
                        }
                    } else {
                        Logger.getLogger(Import.class.getName()).log(Level.INFO,
                                "Something wrong with source part of line " + numberOfLinesRead + ".",
                                new Exception("Error in line: " + numberOfLinesRead + ". Can't find field: "
                                        + rel.getDatabaseVariableName()));
                    }

                }
            }
            sources.add(source);
            tumour.setSources(sources);

            // debugOut(tumour.toString());
            // add patient to the database
            Object patientID = patient.getVariable(io.getPatientIDVariableName());
            Object patientRecordID = patient.getVariable(io.getPatientRecordIDVariableName());

            if (patientID == null) {
                // save the record to get the new patientID;
                patientDatabaseRecordID = server.savePatient(patient);
                patient = (Patient) server.getRecord(patientDatabaseRecordID, Globals.PATIENT_TABLE_NAME,
                        false);
                patientID = patient.getVariable(io.getPatientIDVariableName());
                patientRecordID = patient.getVariable(io.getPatientRecordIDVariableName());
            }

            if (io.isDataFromPreviousCanReg()) {
                // set update date for the patient the same as for the tumour
                Object updateDate = tumour.getVariable(io.getTumourUpdateDateVariableName());
                patient.setVariable(io.getPatientUpdateDateVariableName(), updateDate);

                // Set the patientID the same as the tumourID initially
                // Object tumourSequence = tumour.getVariable(io.getTumourSequenceVariableName());
                Object tumourSequence = "1";

                String tumourSequenceString = tumourSequence + "";
                while (tumourSequenceString.length() < Globals.ADDITIONAL_DIGITS_FOR_PATIENT_RECORD) {
                    tumourSequenceString = "0" + tumourSequenceString;
                }
                patientRecordID = patientID + "" + tumourSequenceString;

                // If this is a multiple primary tumour...
                String mpCodeString = (String) tumour.getVariable(io.getMultiplePrimaryVariableName());
                if (mpCodeString != null && mpCodeString.length() > 0) {
                    patientID = lookUpPatientID(mpCodeString, patientID, mpCodes);

                    // rebuild sequenceNumber
                    Tumour[] tumours = new Tumour[0];
                    try {
                        tumours = CanRegClientApp.getApplication()
                                .getTumourRecordsBasedOnPatientID(patientID + "", false);
                    } catch (DistributedTableDescriptionException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (UnknownTableException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    }

                    tumourSequenceString = (tumours.length + 1) + "";
                    while (tumourSequenceString.length() < Globals.ADDITIONAL_DIGITS_FOR_PATIENT_RECORD) {
                        tumourSequenceString = "0" + tumourSequenceString;
                    }

                    patientRecordID = patientID + "" + tumourSequenceString;
                    Patient[] oldPatients = null;
                    try {
                        oldPatients = CanRegClientApp.getApplication().getPatientRecordsByID((String) patientID,
                                false);
                    } catch (RemoteException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (SecurityException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (DistributedTableDescriptionException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (RecordLockedException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (SQLException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (UnknownTableException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    }
                    for (Patient oldPatient : oldPatients) {
                        if (!Tools.newRecordContainsNewInfo(patient, oldPatient,
                                noNeedToLookAtPatientVariables)) {
                            needToSavePatientAgain = false;
                            patient = oldPatient;
                            patientRecordID = oldPatient.getVariable(io.getPatientRecordIDVariableName());
                        }
                    }
                }

                Object tumourID = patientRecordID + "" + tumourSequenceString;
                //
                patient.setVariable(io.getPatientIDVariableName(), patientID);
                tumour.setVariable(io.getTumourIDVariablename(), tumourID);
                // And store the record ID

                patient.setVariable(io.getPatientRecordIDVariableName(), patientRecordID);

                // Set the patient ID number on the tumour
                tumour.setVariable(io.getPatientIDTumourTableVariableName(), patientID);
                tumour.setVariable(io.getPatientRecordIDTumourTableVariableName(), patientRecordID);

                // Set the deprecated flag to 0 - no obsolete records from CR4
                tumour.setVariable(io.getObsoleteTumourFlagVariableName(), "0");
                patient.setVariable(io.getObsoletePatientFlagVariableName(), "0");

            }

            // Set the name in the firstName database
            String sex = (String) patient.getVariable(sexVariableName);
            if (sex != null && sex.length() > 0) {
                Integer sexCode = Integer.parseInt(sex);
                String firstNames = (String) patient.getVariable(firstNameVariableName);
                if (firstNames != null) {
                    String[] firstNamesArray = firstNames.split(" ");
                    for (String firstName : firstNamesArray) {
                        if (firstName != null && firstName.trim().length() > 0) {
                            // here we use the locale specific toUpperCase
                            Integer registeredSexCode = nameSexTable.get(firstName);
                            if (registeredSexCode == null) {
                                NameSexRecord nsr = new NameSexRecord();
                                nsr.setName(firstName);
                                nsr.setSex(sexCode);

                                server.saveNameSexRecord(nsr, false);

                                nameSexTable.put(firstName, sexCode);
                            } else if (registeredSexCode != sexCode) {
                                if (registeredSexCode != 9) {
                                    sexCode = 9;
                                    NameSexRecord nsr = new NameSexRecord();
                                    nsr.setName(firstName);
                                    nsr.setSex(sexCode);
                                    server.saveNameSexRecord(nsr, true);
                                    nameSexTable.remove(firstName);
                                    nameSexTable.put(firstName, sexCode);
                                }
                            }
                        }
                    }
                }
            }

            if (needToSavePatientAgain) {
                if (patientDatabaseRecordID > 0) {
                    server.editPatient(patient);
                } else {
                    patientDatabaseRecordID = server.savePatient(patient);
                }
            }
            if (patient != null && tumour != null) {
                String icd10 = (String) tumour.getVariable(io.getICD10VariableName());
                if (icd10 == null || icd10.trim().length() == 0) {
                    ConversionResult[] conversionResult = canreg.client.CanRegClientApp.getApplication()
                            .performConversions(Converter.ConversionName.ICDO3toICD10, patient, tumour);
                    tumour.setVariable(io.getICD10VariableName(), conversionResult[0].getValue());
                }
                String iccc = (String) tumour.getVariable(io.getICCCVariableName());
                if (iccc == null || iccc.trim().length() == 0) {
                    ConversionResult[] conversionResult = canreg.client.CanRegClientApp.getApplication()
                            .performConversions(Converter.ConversionName.ICDO3toICCC3, patient, tumour);
                    tumour.setVariable(io.getICCCVariableName(), conversionResult[0].getValue());
                }
            }
            if (tumour.getVariable(io.getPatientIDTumourTableVariableName()) == null) {
                tumour.setVariable(io.getPatientIDTumourTableVariableName(), patientID);
            }

            if (tumour.getVariable(io.getPatientRecordIDTumourTableVariableName()) == null) {
                tumour.setVariable(io.getPatientRecordIDTumourTableVariableName(), patientRecordID);
            }

            int tumourDatabaseIDNumber = server.saveTumour(tumour);

            if (Thread.interrupted()) {
                //We've been interrupted: no more importing.
                throw new InterruptedException();
            }
        }
        task.firePropertyChange("finished", null, null);
        success = true;
    } catch (IOException ex) {
        Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                "Error in line: " + (numberOfLinesRead + 1 + 1) + ". ", ex);
        success = false;
    } catch (NumberFormatException ex) {
        Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                "Error in line: " + (numberOfLinesRead + 1 + 1) + ". ", ex);
        success = false;
    } catch (InterruptedException ex) {
        Logger.getLogger(Import.class.getName()).log(Level.INFO,
                "Interupted on line: " + (numberOfLinesRead + 1) + ". ", ex);
        success = true;
    } catch (IndexOutOfBoundsException ex) {
        Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                "Error in line: " + (numberOfLinesRead + 1 + 1) + ". ", ex);
        success = false;
    } catch (SQLException ex) {
        Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                "Error in line: " + (numberOfLinesRead + 1 + 1) + ". ", ex);
        success = false;
    } finally {
        if (parser != null) {
            try {
                parser.close();
            } catch (IOException ex) {
                Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
            }
        }
    }

    return success;
}

From source file:HSqlManager.java

private static void commonInitialize(int bps, Connection connection) throws SQLException, IOException {
    String base = new File("").getAbsolutePath();
    CSV.makeDirectory(new File(base + "/PhageData"));
    INSTANCE = ImportPhagelist.getInstance();
    INSTANCE.parseAllPhages(bps);// www.  j a  v  a2 s .  co m
    written = true;
    Connection db = connection;
    db.setAutoCommit(false);
    Statement stat = db.createStatement();
    stat.execute("SET FILES LOG FALSE\n");
    PreparedStatement st = db.prepareStatement("Insert INTO Primerdb.Primers"
            + "(Bp,Sequence, CommonP, UniqueP, Picked, Strain, Cluster)" + " Values(?,?,true,false,false,?,?)");
    ResultSet call = stat.executeQuery("Select * From Primerdb.Phages;");
    List<String[]> phages = new ArrayList<>();
    while (call.next()) {
        String[] r = new String[3];
        r[0] = call.getString("Strain");
        r[1] = call.getString("Cluster");
        r[2] = call.getString("Name");
        phages.add(r);
    }
    phages.parallelStream().map(x -> x[0]).collect(Collectors.toSet()).parallelStream().forEach(x -> {
        phages.stream().filter(y -> y[0].equals(x)).map(y -> y[1]).collect(Collectors.toSet()).forEach(z -> {
            try {
                List<String> clustphages = phages.stream().filter(a -> a[0].equals(x) && a[1].equals(z))
                        .map(a -> a[2]).collect(Collectors.toList());
                Set<String> primers = Collections.synchronizedSet(CSV
                        .readCSV(base + "/PhageData/" + Integer.toString(bps) + clustphages.get(0) + ".csv"));
                clustphages.remove(0);
                clustphages.parallelStream().forEach(phage -> {
                    primers.retainAll(
                            CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + phage + ".csv"));
                });
                int i = 0;
                for (CharSequence a : primers) {
                    try {
                        //finish update
                        st.setInt(1, bps);
                        st.setString(2, a.toString());
                        st.setString(3, x);
                        st.setString(4, z);
                        st.addBatch();
                    } catch (SQLException e) {
                        e.printStackTrace();
                        System.out.println("Error occurred at " + x + " " + z);
                    }
                    i++;
                    if (i == 1000) {
                        i = 0;
                        st.executeBatch();
                        db.commit();
                    }
                }
                if (i > 0) {
                    st.executeBatch();
                    db.commit();
                }
            } catch (SQLException e) {
                e.printStackTrace();
                System.out.println("Error occurred at " + x + " " + z);
            }
        });
    });
    stat.execute("SET FILES LOG TRUE\n");
    st.close();
    stat.close();
    System.out.println("Common Updated");
}

From source file:mitm.common.dlp.impl.MimeMessageTextExtractorImpl.java

public void setSkipHeaders(Collection<String> headersToSkip) {
    if (headersToSkip == null) {
        return;/* w  ww .  ja v a 2s.  c o m*/
    }

    skipHeaders = Collections.synchronizedSet(new HashSet<String>());

    for (String header : headersToSkip) {
        header = StringUtils.trimToNull(header);

        if (header == null) {
            continue;
        }

        skipHeaders.add(header.toLowerCase());
    }

    logger.info("Skip headers: " + StringUtils.join(skipHeaders, ","));
}

From source file:org.apache.hadoop.hbase.master.SplitLogManager.java

/**
 * Its OK to construct this object even when region-servers are not online. It
 * does lookup the orphan tasks in zk but it doesn't block waiting for them
 * to be done.//  w ww.  j a v  a2 s  . c  o  m
 *
 * @param zkw the ZK watcher
 * @param conf the HBase configuration
 * @param stopper the stoppable in case anything is wrong
 * @param master the master services
 * @param serverName the master server name
 * @param tf task finisher
 */
public SplitLogManager(ZooKeeperWatcher zkw, Configuration conf, Stoppable stopper, MasterServices master,
        ServerName serverName, TaskFinisher tf) {
    super(zkw);
    this.taskFinisher = tf;
    this.conf = conf;
    this.stopper = stopper;
    this.master = master;
    this.zkretries = conf.getLong("hbase.splitlog.zk.retries", DEFAULT_ZK_RETRIES);
    this.resubmit_threshold = conf.getLong("hbase.splitlog.max.resubmit", DEFAULT_MAX_RESUBMIT);
    this.timeout = conf.getInt("hbase.splitlog.manager.timeout", DEFAULT_TIMEOUT);
    this.unassignedTimeout = conf.getInt("hbase.splitlog.manager.unassigned.timeout",
            DEFAULT_UNASSIGNED_TIMEOUT);
    this.distributedLogReplay = HLogSplitter.isDistributedLogReplay(conf);
    LOG.info("Timeout=" + timeout + ", unassigned timeout=" + unassignedTimeout + ", distributedLogReplay="
            + this.distributedLogReplay);

    this.serverName = serverName;
    this.timeoutMonitor = new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000),
            stopper);

    this.failedDeletions = Collections.synchronizedSet(new HashSet<String>());

    Threads.setDaemonThreadRunning(timeoutMonitor.getThread(), serverName + ".splitLogManagerTimeoutMonitor");
    // Watcher can be null during tests with Mock'd servers.
    if (this.watcher != null) {
        this.watcher.registerListener(this);
        lookForOrphans();
    }
}

From source file:org.deri.iris.performance.IRISPerformanceTest.java

/**
 * Executes a set of datalog queries using the given configuration
 * @param queries The set of Datalog queries
 * @param config The configuration for the test suite
 * @return a list of IRISTestCase objects with the result of the test campaign
 *///from ww  w. ja  v  a 2s.c o m
public List<IRISTestCase> executeTests(final List<String> queries, final TestConfiguration config) {

    // Get the logger
    LOGGER = Logger.getLogger(IRISPerformanceTest.class.getName());

    // Construct a valid IRIS+- program using the queries and the configuration file
    String program = "";

    // add the query and its IRIS execution command to the program
    program += "/// Query ///\n";
    for (final String s : queries) {
        program += s + "\n";
        program += "?-" + s.substring(0, s.indexOf(":-")) + ".\n";
    }
    program += "\n";

    // If reasoning is enabled, add the TBOX to the program
    program += "/// TBox ///\n";
    if (config.getReasoning()) {
        String tboxPath = config.getTestHomePath() + "/" + config.getDataset() + "/tbox";
        if (config.getExpressiveness().compareTo("RDFS") == 0) {
            tboxPath += "/rdfs";
        }
        if (config.getExpressiveness().compareTo("OWL-QL") == 0) {
            tboxPath += "/owlql";
        }
        final String tbox = loadFile(tboxPath + "/" + config.getDataset() + ".dtg");
        program += tbox + "\n";
    } else {
        program += "/// EMPTY ///\n";
    }

    // Add the SBox
    program += "/// SBox ///\n";
    String sboxPath = config.getTestHomePath() + "/" + config.getDataset() + "/sbox";
    if (config.getExpressiveness().compareTo("RDFS") == 0) {
        sboxPath += "/rdfs";
    }
    if (config.getExpressiveness().compareTo("OWL-QL") == 0) {
        sboxPath += "/owlql";
    }
    final String sbox = loadFile(sboxPath + "/" + config.getDataset() + ".dtg");
    program += sbox + "\n\n";

    LOGGER.debug(program);

    // Get the parser
    final Parser parser = new Parser();

    // Parse the program
    try {
        parser.parse(program);
    } catch (final ParserException e) {
        e.printStackTrace();
    }

    // Get the TGDs from the set of rules
    final List<IRule> tgds = RewritingUtils.getTGDs(parser.getRules(), parser.getQueries());

    // Get the query bodies
    final List<IRule> bodies = new ArrayList<IRule>(parser.getRules());
    final List<IRule> datalogQueries = RewritingUtils.getQueries(bodies, parser.getQueries());

    // Get the constraints from the set of rules
    final Set<IRule> constraints = RewritingUtils.getConstraints(parser.getRules(), parser.getQueries());

    // Get the SBox rules from the set of rules
    final List<IRule> storageRules = RewritingUtils.getSBoxRules(parser.getRules(), parser.getQueries());

    // Check that the TBox is FO-reducible
    IRuleSafetyProcessor ruleProc = new LinearReducibleRuleSafetyProcessor();
    try {
        ruleProc.process(tgds);
    } catch (final RuleUnsafeException e) {
        e.printStackTrace();
    }

    // Check that the SBox rules are Safe Datalog
    ruleProc = new StandardRuleSafetyProcessor();
    try {
        ruleProc.process(storageRules);
    } catch (final RuleUnsafeException e) {
        e.printStackTrace();
    }

    // Connect to the storage
    StorageManager.getInstance();
    try {
        StorageManager.connect(config.getDBVendor(), config.getDBProtocol(), config.getDBHost(),
                config.getDBPort(), config.getDBName(), config.getSchemaName(), config.getDBUsername(),
                config.getDBPassword());
    } catch (final SQLException e) {
        e.printStackTrace();
    }

    // Evaluate the queries
    final List<IRISTestCase> output = new LinkedList<IRISTestCase>();
    for (final IQuery q : parser.getQueries()) {
        // Generate a new test-case
        final IRISTestCase currentTest = new IRISTestCase();
        int nTask = -10;

        // Get the Factories
        final IRelationFactory rf = new RelationFactory();

        // Get the Rewriter Engine
        final ParallelRewriter rewriter = new ParallelRewriter(DecompositionStrategy.DECOMPOSE,
                RewritingLanguage.UCQ, SubCheckStrategy.TAIL, NCCheck.TAIL);

        // Get and log the rule corresponding to the query
        final IRule ruleQuery = getRuleQuery(q, datalogQueries);
        currentTest.setQuery(ruleQuery);

        final Map<Pair<IPosition, IPosition>, Set<List<IRule>>> deps = DepGraphUtils
                .computePositionDependencyGraph(tgds);

        final Set<Expressivity> exprs = RewritingUtils.getExpressivity(tgds);

        // Compute and log the FO-Rewriting
        LOGGER.info("Computing TBox Rewriting");
        float duration = -System.nanoTime();
        final Set<IRule> rewriting = rewriter.getRewriting(ruleQuery, tgds, constraints, deps, exprs);
        duration = ((duration + System.nanoTime()) / 1000000);
        currentTest.getTasks()
                .add(new Task(nTask++, "TBox Rewriting", duration, 0, 0, "ms", rewriting.toString()));
        LOGGER.info("done.");
        int count = 0;
        for (final IRule r : rewriting) {
            LOGGER.debug("(Qr" + ++count + ")" + r);
        }

        // Produce the rewriting according to the Nyaya Data Model
        final IQueryRewriter ndmRewriter = new NDMRewriter(storageRules);

        // Create a buffer for the output
        final IRelation outRelation = rf.createRelation();

        // Get the SBox rewriting
        try {
            LOGGER.info("Computing SBox Rewriting");
            final Set<IRule> sboxRewriting = new LinkedHashSet<IRule>();
            duration = -System.nanoTime();
            for (final IRule pr : rewriting) {
                sboxRewriting.addAll(ndmRewriter.getRewriting(pr));
            }
            duration = ((duration + System.nanoTime()) / 1000000);
            currentTest.getTasks()
                    .add(new Task(nTask++, "SBox Rewriting", duration, 0, 0, "ms", sboxRewriting.toString()));
            LOGGER.info("done.");
            count = 0;
            for (final IRule n : sboxRewriting) {
                LOGGER.debug("(Qn" + ++count + ")" + n);
            }

            // Produce the SQL rewriting for each query in the program
            final SQLRewriter sqlRewriter = new SQLRewriter(sboxRewriting);

            // Get the SQL rewriting as Union of Conjunctive Queries (UCQ)
            LOGGER.info("Computing SQL Rewriting");
            duration = -System.nanoTime();
            final List<String> ucqSQLRewriting = new LinkedList<String>();
            ucqSQLRewriting.add(sqlRewriter.getUCQSQLRewriting("", 10000, 0));
            duration = ((duration + System.nanoTime()) / 1000000);
            currentTest.getTasks()
                    .add(new Task(nTask++, "SQL Rewriting", duration, 0, 0, "ms", ucqSQLRewriting.toString()));
            LOGGER.info("done.");
            count = 0;
            for (final String s : ucqSQLRewriting) {
                LOGGER.debug("(Qs" + ++count + ") " + s);
            }

            // Execute the UCQ
            LOGGER.info("Executing SQL");

            // float ansConstructOverall = 0;

            // The synchronized structure to store the output tuples
            final Set<ITuple> result = Collections.synchronizedSet(new HashSet<ITuple>());

            /*
             * Prepare a set of runnable objects representing each partial rewriting to be executed in parallel
             */
            final List<RunnableQuery> rql = new LinkedList<RunnableQuery>();
            for (final String cq : ucqSQLRewriting) {
                // Construct a Runnable Query
                rql.add(new RunnableQuery(cq, result, currentTest.getTasks()));
            }

            // Get an executor that allows a number of parallel threads equals to the number of available processors
            // ExecutorService queryExecutor =
            // Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()*5);
            final ExecutorService queryExecutor = Executors.newSingleThreadScheduledExecutor();

            // Execute all the partial rewritings in parallel
            float ucqExecOverall = -System.nanoTime();
            for (final RunnableQuery rq : rql) {
                queryExecutor.execute(rq);
            }
            queryExecutor.shutdown();
            if (queryExecutor.awaitTermination(1, TimeUnit.DAYS)) {
                LOGGER.info("done.");
            } else
                throw new InterruptedException("Timeout Occured");
            ucqExecOverall = ((ucqExecOverall + System.nanoTime()) / 1000000);
            StorageManager.disconnect();

            // inizio aggiunta
            float minTime = System.nanoTime();
            float maxTime = 0;
            float avgTime = 0;
            int n = 0;
            for (final Task t : currentTest.getTasks()) {
                if (t.getName().contains("Execution")) {
                    avgTime += (t.getFinalTime() - t.getInitTime()) / 1000000;
                    n++;
                    if (t.getFinalTime() > maxTime) {
                        maxTime = t.getFinalTime();
                    }
                    if (t.getInitTime() < minTime) {
                        minTime = t.getInitTime();
                    }
                }
            }
            ucqExecOverall = (maxTime - minTime) / 1000000;
            // fine aggiunta

            currentTest.getTasks()
                    .add(new Task(nTask++, "UCQ Overall Execution Time", ucqExecOverall, 0, 0, "ms"));

            // inizio aggiunta
            avgTime = (avgTime / n);
            System.out.println(n);
            currentTest.getTasks().add(new Task(nTask++, "UCQ Average Execution Time", avgTime, 0, 0, "ms"));
            Collections.sort(currentTest.getTasks());
            // fine aggiunta

            for (final ITuple t : result) {
                outRelation.add(t);
            }

        } catch (final SQLException e) {
            e.printStackTrace();
        } catch (final EvaluationException e) {
            e.printStackTrace();
        } catch (final InterruptedException e) {
            e.printStackTrace();
        }
        currentTest.setAnswer(outRelation);
        output.add(currentTest);
    }
    return (output);
}

From source file:org.fao.geonet.kernel.SelectionManager.java

/**
 * <p> Gets selection for given element type. </p>
 *
 * @param type The type of selected element handled in session
 * @return Set<String>/*ww w  .  j a va  2  s. c om*/
 */
public Set<String> getSelection(String type) {
    Set<String> sel = selections.get(type);
    if (sel == null) {
        Set<String> MDSelection = Collections.synchronizedSet(new HashSet<String>(0));
        selections.put(type, MDSelection);
    }
    return selections.get(type);
}

From source file:com.all.i18n.DefaultMessages.java

public DefaultMessages() {
    internationalizableSet = Collections.synchronizedSet(new HashSet<Internationalizable>());
}

From source file:com.pinterest.terrapin.client.TerrapinClient.java

/**
 * Issues a call for retrieving a response for multiple keys. Does appropriate batching.
 * Uses a 2 pass approach:/*from   w ww .j a  va  2s.c  o m*/
 * 1) Pass 1 - Batches the keys by partition, issues the requests to each relevant replica
 *             and tracks failed keys and replicas with failures.
 * 2) Pass 2 - Batches the failed keys from pass 1 by host, issues the request to
 *             relevant replicas, excluding the replicas with failures in pass 1.
 *
 * TODO(varun): Think about using speculative execution for the single key lookup use case.
 *              It could provide better latency.
 *
 * @param fileSet The file set against which the request should be issued.
 * @param keyList The list of keys.
 * @param retry Whether to perform a second try on the cluster.
 * @return A future wrapping a TerrapinResponse object.
 */
protected Future<TerrapinResponse> getManyHelper(final String fileSet, final Set<ByteBuffer> keyList,
        final boolean retry) {
    Pair<FileSetInfo, ViewInfo> pair = null;
    try {
        pair = fileSetViewManager.getFileSetViewInfo(fileSet);
    } catch (TerrapinGetException e) {
        return Future.exception(e);
    }
    final FileSetInfo info = pair.getLeft();
    final ViewInfo viewInfo = pair.getRight();

    // This runs in two passes. In the first pass, we send a query to all the hosts
    // containing the keys in @keyList. We collect the list of failed keys in the first
    // pass and also the set of hosts which had errors. We send out a second query
    // with the failed keys to the respective set of hosts and attempt to exclude
    // the initial set of failed hosts.
    final Set<String> failedHostsFirstPass = Collections.synchronizedSet(Sets.<String>newHashSet());
    final Map<ByteBuffer, TerrapinSingleResponse> failedKeysFirstPass = Collections
            .synchronizedMap(Maps.<ByteBuffer, TerrapinSingleResponse>newHashMap());
    Map<String, Future<TerrapinResponse>> responseFutureMapFirstPass = getManyHelper(fileSet,
            info.servingInfo.helixResource, info.servingInfo.numPartitions, viewInfo, keyList,
            info.servingInfo.partitionerType, failedHostsFirstPass, failedKeysFirstPass,
            (Set) Sets.newHashSet(), 1);
    List<Future<TerrapinResponse>> responseFutureListFirstPass = Lists
            .newArrayListWithCapacity(responseFutureMapFirstPass.size());
    for (Map.Entry<String, Future<TerrapinResponse>> entry : responseFutureMapFirstPass.entrySet()) {
        responseFutureListFirstPass.add(entry.getValue());
    }
    // For the failed keys.
    return Stats.timeFutureMillis(statsPrefix + fileSet + "-latency",
            Future.<TerrapinResponse>collect(responseFutureListFirstPass)
                    .flatMap(new Function<List<TerrapinResponse>, Future<TerrapinResponse>>() {
                        @Override
                        public Future<TerrapinResponse> apply(final List<TerrapinResponse> responseListPass1) {
                            // At this point, we have a black list of hosts and we also have a list of keys
                            // which did not succeed in the first run.
                            // If the first pass fully succeeded or we have explicitly disabled retries,
                            // then don't perform a retry.
                            if (failedKeysFirstPass.isEmpty() || !retry) {
                                TerrapinResponse aggResponse = new TerrapinResponse();
                                aggResponse
                                        .setResponseMap((Map) Maps.newHashMapWithExpectedSize(keyList.size()));
                                for (TerrapinResponse response : responseListPass1) {
                                    aggResponse.getResponseMap().putAll(response.getResponseMap());
                                }
                                aggResponse.getResponseMap().putAll(failedKeysFirstPass);
                                return Future.value(aggResponse);
                            }
                            // Otherwise, we fire off a second set of futures.
                            Map<String, Future<TerrapinResponse>> responseFutureMapSecondPass = getManyHelper(
                                    fileSet, info.servingInfo.helixResource, info.servingInfo.numPartitions,
                                    viewInfo, failedKeysFirstPass.keySet(), info.servingInfo.partitionerType,
                                    null, null, failedHostsFirstPass, 2);
                            List<Future<TerrapinResponse>> responseFutureListSecondPass = Lists
                                    .newArrayListWithCapacity(responseFutureMapSecondPass.size());
                            responseFutureListSecondPass.addAll(responseFutureMapSecondPass.values());
                            return Future.collect(responseFutureListSecondPass)
                                    .map(new Function<List<TerrapinResponse>, TerrapinResponse>() {
                                        @Override
                                        public TerrapinResponse apply(
                                                List<TerrapinResponse> responseListPass2) {
                                            // The two responses (first pass and second pass) should be disjoint
                                            // in the set of keys they return, so we can safely merge them.
                                            TerrapinResponse aggResponse = new TerrapinResponse();
                                            aggResponse.setResponseMap(
                                                    (Map) Maps.newHashMapWithExpectedSize(keyList.size()));
                                            for (TerrapinResponse response : responseListPass1) {
                                                aggResponse.getResponseMap().putAll(response.getResponseMap());
                                            }
                                            for (TerrapinResponse response : responseListPass2) {
                                                aggResponse.getResponseMap().putAll(response.getResponseMap());
                                            }
                                            return aggResponse;
                                        }
                                    });
                        }
                    }));
}

From source file:org.opennms.ng.services.capsd.SuspectEventProcessor.java

/**
 * Responsible for setting the Set used to track suspect scans that
 * are already enqueued for processing.  Should be called once by Capsd
 * at startup.//from w  w  w . j a  va 2  s . c  om
 *
 * @param queuedSuspectTracker a {@link java.util.Set} object.
 */
public synchronized void setQueuedSuspectsTracker(Set<String> queuedSuspectTracker) {
    m_queuedSuspectTracker = Collections.synchronizedSet(queuedSuspectTracker);
}

From source file:org.demoiselle.internal.implementation.ConfigurationLoader.java

private Collection<Class<?>> getCache() {
    if (this.extractorCache == null) {
        this.extractorCache = Collections.synchronizedSet(new HashSet<>());
    }//from   w  ww. jav  a2  s .com

    return this.extractorCache;
}