List of usage examples for java.io File canWrite
public boolean canWrite()
From source file:net.pandoragames.far.ui.FARConfig.java
/** * Loads the base and the backupd directory from a Properties object. * The property names are defined as constants of this class. * The property values are supposed to be valid pathes, but both * directories are controlled for existance. Inheriting classes may overwrite * this method to load additional properties. * @param properties holding properties for base and backup directory. *///from w w w. j ava 2s . com protected void loadFromProperties(Properties properties) { // base directory File baseDir = getValidDirectory(properties.getProperty(PPT_BASEDIR)); if (baseDir != null) { setBaseDirectory(baseDir); } else { logger.info("Base directory " + properties.getProperty(PPT_BASEDIR) + " could not be restored. Using fall back: " + lastBaseDir.getPath()); } // backup directory File bckpDir = getValidDirectory(properties.getProperty(PPT_BCKPDIR)); if (bckpDir != null) { if (bckpDir.canWrite()) { if (bckpDir.equals(new File(System.getProperty("java.io.tmpdir")))) { bckpDir = makeDefaultBackupDir(); } setBackupDirectory(bckpDir); } else { defaultBackupDirectory = makeDefaultBackupDir(); logger.info("Backup directory " + properties.getProperty(PPT_BCKPDIR) + " is not writable. Using fall back: " + defaultBackupDirectory.getPath()); } } else { defaultBackupDirectory = makeDefaultBackupDir(); if (!defaultBackupDirectory.getPath().equals(properties.getProperty(PPT_BCKPDIR))) { logger.info("Backup directory " + properties.getProperty(PPT_BCKPDIR) + " could not be restored. Using fall back: " + defaultBackupDirectory.getPath()); } } // list impex directory setFileListExportDirectory(getValidDirectory(properties.getProperty(PPT_LISTEXDIR))); // group reference indicator String grefind = properties.getProperty(PPT_GROUPREF); if (grefind == null || !(grefind.equals(GROUPREFINDICATORLIST[0]) || grefind.equals(GROUPREFINDICATORLIST[1]) || grefind.equals(GROUPREFINDICATORLIST[2]))) { grefind = GROUPREFINDICATORLIST[0]; } groupReferenceIndicator = grefind.charAt(0); // application version String versionNumber = properties.getProperty(PPT_VERSION); String currentVersion = getCurrentApplicationVersion(); if (currentVersion != null && !currentVersion.equals(versionNumber)) { versionHasChanged = true; } processBinary = Boolean.parseBoolean(properties.getProperty(PPT_BINARY)); if (properties.containsKey(PPT_UNKNOWN_AS_BINARY)) { FileType.TREAT_UNKNOWN_AS_BINARY = Boolean.parseBoolean(properties.getProperty(PPT_UNKNOWN_AS_BINARY)); } }
From source file:com.baomidou.framework.upload.UploadMultipartRequest.java
@SuppressWarnings("unchecked") public void upload() throws IOException { // Sanity check values if (request == null) { throw new IllegalArgumentException("request cannot be null"); }//from www . ja v a 2 s .c om if (saveDirectory == null) { throw new IllegalArgumentException("saveDirectory cannot be null"); } if (maxPostSize <= 0) { throw new IllegalArgumentException("maxPostSize must be positive"); } // Save the dir File dir = new File(saveDirectory); // Check saveDirectory is truly a directory if (!dir.isDirectory()) { throw new IllegalArgumentException("Not a directory: " + saveDirectory); } // Check saveDirectory is writable if (!dir.canWrite()) { throw new IllegalArgumentException("Not writable: " + saveDirectory); } // Parse the incoming multipart, storing files in the dir provided, // and populate the meta objects which describe what we found MultipartParser parser = new MultipartParser(request, maxPostSize, true, true, getCharset()); //?? HashMap<String, String> paramParts = new HashMap<String, String>(); Part part; while ((part = parser.readNextPart()) != null) { String name = part.getName(); if (name == null) { throw new IOException("Malformed input: parameter name missing (known Opera 7 bug)"); } if (part.isFile()) { // It's a file part FilePart filePart = (FilePart) part; String fileName = filePart.getFileName(); if (fileName != null) { //filePart.setRenamePolicy(policy); // null policy is OK // The part actually contained a file UploadFile cfi = writeTo(dir, fileName, getFileRenamePolicy(), filePart); cfi.setDir(dir.toString()); cfi.setOriginal(fileName); cfi.setParamParts(paramParts); files.put(name, cfi); } else { // The field did not contain a file files.put(name, new UploadFile()); } } else if (part.isParam()) { ParamPart paramPart = (ParamPart) part; paramParts.put(paramPart.getName(), paramPart.getStringValue()); } } }
From source file:edu.kit.dama.ui.admin.wizard.AccessPointCreation.java
@Override public boolean validateSettings() { if (addWebDavAccessPoint.getValue()) { if (!UIUtils7.validate(getMainLayout())) { return false; }//from w w w . j a v a2 s .c om if (createAdminLogin.getValue()) { if (adminPassword.getValue().equals(adminPasswordCheck.getValue())) { adminPasswordCheck.setComponentError(null); } else { adminPasswordCheck.setComponentError(new UserError("Passwords are different.")); return false; } } try { new URL(baseUrl.getValue()); baseUrl.setComponentError(null); } catch (MalformedURLException ex) { baseUrl.setComponentError(new UserError("Not a valid URL.")); return false; } File f = new File(basePath.getValue()); if (!f.exists()) { basePath.setComponentError(new UserError("Base path does not exist.")); return false; } if (!f.isDirectory()) { basePath.setComponentError(new UserError("Base path is not a directory.")); return false; } if (!f.canRead() || !f.canWrite()) { basePath.setComponentError(new UserError("Base path is not readable and/or writeable.")); return false; } File usersDir = new File(f, "USERS"); if (!usersDir.exists()) { basePath.setComponentError(new UserError( "Base path seems not be be the local web dav folder. No 'USERS' sub directory found.")); return false; } } return true; }
From source file:azkaban.app.JobManager.java
public void deployJob(String jobName, String path, Props props) { File jobPath = new File(_jobDirs.get(0), path); if (jobPath.exists()) { if (!jobPath.isDirectory() || !jobPath.canWrite()) throw new JobDeploymentException( jobPath + " is not a directory or does not have write permission."); } else {//from ww w .j a va2 s. co m logger.debug("Creating job directory " + jobPath); jobPath.mkdirs(); } // @TODO validate and prevent addition of changes y File jobFile = new File(jobPath, jobName + ".job"); jobFile.delete(); try { props.storeLocal(jobFile); } catch (IOException e) { throw new RuntimeException("Error deploying job " + jobName); } logger.info("Deployed job " + jobName + " to path " + path); updateFlowManager(); }
From source file:cz.etnetera.reesmo.writer.storage.FileSystemStorage.java
public FileSystemStorage(File baseDir) throws StorageException { if (baseDir == null) throw new StorageException("Base directory is null"); if (!baseDir.exists()) if (baseDir.mkdirs()) { getLogger().info("Base directory was created: " + baseDir); } else {//from ww w . ja va 2 s . c o m throw new StorageException("Base directory does not exists and can not be created: " + baseDir); } if (!baseDir.canWrite()) throw new StorageException("Base directory is not writeable: " + baseDir); this.baseDir = baseDir; }
From source file:com.ikanow.infinit.e.application.handlers.polls.LogstashSourceDeletionPollHandler.java
@Override public void performPoll() { boolean isSlave = false; if (null == LOGSTASH_CONFIG) { // (static memory not yet initialized) try {//from w w w . ja v a2s. c om Thread.sleep(1000); // (extend the sleep time a bit) } catch (Exception e) { } return; } File logstashDirectory = new File(LOGSTASH_CONFIG); String slaveHostname = null; if (!logstashDirectory.isDirectory() || !logstashDirectory.canRead() || !logstashDirectory.canWrite()) { logstashDirectory = new File(LOGSTASH_CONFIG_DISTRIBUTED); isSlave = true; if (!logstashDirectory.isDirectory() || !logstashDirectory.canRead() || !logstashDirectory.canWrite()) { try { Thread.sleep(10000); // (extend the sleep time a bit) } catch (Exception e) { } return; } try { slaveHostname = java.net.InetAddress.getLocalHost().getHostName(); } catch (Exception e) { // too complex if we don't have a hostname, just return return; } } // Deletion of distributed sources requires some co-ordination, we'll do it in master if (isSlave) { // register my existence BasicDBObject existence = new BasicDBObject("_id", slaveHostname); existence.put("ping", new Date()); DbManager.getIngest().getLogHarvesterSlaves().save(existence); } //TESTED (by hand) else { // MASTER: clear out old slaves // (if it hasn't pinged for more than 30 minutes) long now = new Date().getTime(); BasicDBObject deadSlaveQuery = new BasicDBObject("ping", new BasicDBObject(DbManager.lt_, new Date(now - 1000L * 1800L))); boolean found = false; DBCursor dbc = DbManager.getIngest().getLogHarvesterSlaves().find(deadSlaveQuery); while (dbc.hasNext()) { BasicDBObject deadSlave = (BasicDBObject) dbc.next(); found = true; String hostname = deadSlave.getString("_id"); if (null != hostname) { DbManager.getIngest().getLogHarvesterQ().remove(new BasicDBObject("forSlave", hostname)); _logger.info("Removing unresponsive slave host=" + hostname); } } if (found) { DbManager.getIngest().getLogHarvesterSlaves().remove(deadSlaveQuery); } } //TESTED (by hand) // Read delete elements from the Q... if (null == _logHarvesterQ) { _logHarvesterQ = new MongoQueue(DbManager.getIngest().getLogHarvesterQ().getDB().getName(), DbManager.getIngest().getLogHarvesterQ().getName()); } BasicDBObject queueQuery = new BasicDBObject("deleteOnlyCommunityId", new BasicDBObject(DbManager.exists_, true)); if (!isSlave) { // only get master messages queueQuery.put("forSlave", new BasicDBObject(DbManager.exists_, false)); } else { // only get messages intended for me queueQuery.put("forSlave", slaveHostname); } DBObject nextElement = _logHarvesterQ.pop(queueQuery); LinkedList<TestLogstashExtractorPojo> secondaryQueue = new LinkedList<TestLogstashExtractorPojo>(); LinkedList<String> deleteAfterRestartQueue = new LinkedList<String>(); boolean deletedSources = false; boolean deletedSinceDbs = false; while (nextElement != null) { //DEBUG //System.out.println("HOST: " + slaveHostname + ": RECEIVED: " + nextElement.toString() + " FROM " + queueQuery); _logger.info("host=" + slaveHostname + " received=" + nextElement.toString() + " from=" + queueQuery); TestLogstashExtractorPojo testInfo = TestLogstashExtractorPojo.fromDb(nextElement, TestLogstashExtractorPojo.class); if (null == testInfo.sourceKey) { continue; // need a sourceKey parameter... } if (!isSlave) { // slaves don't need to delete anything from the index, only files secondaryQueue.add(testInfo); } //(end if master) try { // First off - need to remove the conf file and restart logstash if we're actually deleting this... boolean deletedSource = false; if ((null == testInfo.deleteDocsOnly) || !testInfo.deleteDocsOnly) { // (default = delete entire source) deletedSources = true; deletedSource = true; String fileToDelete = new StringBuffer(LOGSTASH_CONFIG).append(testInfo._id.toString()) .append(LOGSTASH_CONFIG_EXTENSION).toString(); boolean deleted = false; try { deleted = new File(fileToDelete).delete(); } catch (Exception e) { } //DEBUG //System.out.println("DELETED CONF FILE" + fileToDelete + " ? " + deleted); _logger.info("delete conf_file=" + fileToDelete + " success=" + deleted); } //TESTED (docs-only + source deletion) // If _not_ deleting the source, then do delete the sincedb file // (else let it get cleaned up separately - minimizes race conditions where the source starts ingesting again) String fileToDelete = new StringBuffer(LOGSTASH_WD).append(".sincedb_") .append(testInfo._id.toString()).toString(); if (!deletedSource) { boolean deleted = false; try { deleted = new File(fileToDelete).delete(); deletedSinceDbs |= deleted; } catch (Exception e) { } //DEBUG //System.out.println("DELETED SINCEDB" + fileToDelete + " ? " + deletedSinceDb); _logger.info("primary delete sincedb_file=" + fileToDelete + " success=" + deleted); } else { deleteAfterRestartQueue.add(fileToDelete); } //TESTED (primary + secondary deletes) } catch (Exception e) { //e.printStackTrace(); } // probably just doesn't exist // Get next element and carry on nextElement = _logHarvesterQ.pop(queueQuery); } //TESTED (end first loop over elements to delete) if (deletedSources || deletedSinceDbs) { // this file actually existed - need to restart the logstash unfortunately _logger.info("Restarting logstash, and sleeping until logstash is restarted"); try { new File(LOGSTASH_RESTART_FILE).createNewFile(); for (int i = 0; i < 12; ++i) { Thread.sleep(10L * 1000L); if (!new File(LOGSTASH_RESTART_FILE).exists()) { Thread.sleep(5L * 1000L); // (extra wait for it to shut down) break; // (early exit) } } } catch (Exception e) { } } //TESTED (from doc deletion and from src deletion) for (String fileToDelete : deleteAfterRestartQueue) { boolean deleted = false; try { deleted = new File(fileToDelete).delete(); } catch (Exception e) { } //DEBUG //System.out.println("DELETED SINCEDB" + fileToDelete + " ? " + deletedSinceDb); _logger.info("secondary delete sincedb_file=" + fileToDelete + " success=" + deleted); } //TESTED (primary and secondary deletion) for (TestLogstashExtractorPojo testInfo : secondaryQueue) { String commIdStr = testInfo.deleteOnlyCommunityId.toString(); // Get all the indexes that might need to be cleansed: ElasticSearchManager indexMgr = ElasticSearchManager.getIndex(DUMMY_INDEX); // Stashed index ArrayList<String> indices = new ArrayList<String>(); String stashedIndex = "recs_" + commIdStr; ClusterStateResponse retVal = indexMgr.getRawClient().admin().cluster().prepareState() .setIndices(stashedIndex).setRoutingTable(false).setNodes(false).setListenerThreaded(false) .get(); if (!retVal.getState().getMetaData().getIndices().isEmpty()) { indices.add(stashedIndex); } // (else doesn't exist...) // Live indexes: String indexPattern = new StringBuffer("recs_t_").append(commIdStr).append("*").toString(); retVal = indexMgr.getRawClient().admin().cluster().prepareState().setIndices(indexPattern) .setRoutingTable(false).setNodes(false).setListenerThreaded(false).get(); for (IndexMetaData indexMetadata : retVal.getState().getMetaData()) { //DEBUG //System.out.println("INDEX=" + indexMetadata.index()); indices.add(indexMetadata.index()); } deleteSourceKeyRecords(indexMgr, indices.toArray(new String[0]), testInfo.sourceKey); _logger.info("Deleted key=" + testInfo.sourceKey + " from indexes=" + ArrayUtils.toString(indices.toArray())); // Now I've deleted, go and distribute the deletion messages to the slaves if ((null != testInfo.distributed) && testInfo.distributed) { // Copy into the slaves' queue DBCursor dbc = DbManager.getIngest().getLogHarvesterSlaves().find(); while (dbc.hasNext()) { BasicDBObject slave = (BasicDBObject) dbc.next(); testInfo.forSlave = slave.getString("_id"); _logHarvesterQ.push(testInfo.toDb()); testInfo.forSlave = null; //DEBUG //System.out.println("DISTRIBUTING DELETION MESSAGE TO " + slave.toString()); _logger.info("distributing deletion message to host=" + slave.toString()); } } //TESTED (by hand) } //(end loop over secondary queue, ie to actually delete the indexes) }
From source file:gmgen.util.MiscUtilities.java
/** * Copy a file/* w w w. ja v a 2s . co m*/ * @param from_file * @param to_file * @throws IOException */ public static void copy(File from_file, File to_file) throws IOException { // First make sure the source file exists, is a file, and is readable. if (!from_file.exists()) { throw new IOException("FileCopy: no such source file: " + from_file.getPath()); } if (!from_file.isFile()) { throw new IOException("FileCopy: can't copy directory: " + from_file.getPath()); } if (!from_file.canRead()) { throw new IOException("FileCopy: source file is unreadable: " + from_file.getPath()); } // If the destination is a directory, use the source file name // as the destination file name if (to_file.isDirectory()) { to_file = new File(to_file, from_file.getName()); } // If the destination exists, make sure it is a writeable file // and ask before overwriting it. If the destination doesn't // exist, make sure the directory exists and is writeable. if (to_file.exists()) { if (!to_file.canWrite()) { throw new IOException("FileCopy: destination file is unwriteable: " + to_file.getPath()); } // Ask whether to overwrite it int choice = JOptionPane.showConfirmDialog(null, "Overwrite existing file " + to_file.getPath(), "File Exists", JOptionPane.YES_NO_OPTION, JOptionPane.QUESTION_MESSAGE); if (choice != JOptionPane.YES_OPTION) { throw new IOException("FileCopy: existing file was not overwritten."); } } else { // if file doesn't exist, check if directory exists and is writeable. // If getParent() returns null, then the directory is the current dir. // so look up the user.dir system property to find out what that is. String parent = to_file.getParent(); // Get the destination directory if (parent == null) { parent = Globals.getDefaultPath(); // or CWD } File dir = new File(parent); // Convert it to a file. if (!dir.exists()) { throw new IOException("FileCopy: destination directory doesn't exist: " + parent); } if (dir.isFile()) { throw new IOException("FileCopy: destination is not a directory: " + parent); } if (!dir.canWrite()) { throw new IOException("FileCopy: destination directory is unwriteable: " + parent); } } // If we've gotten this far, then everything is okay. // So we copy the file, a buffer of bytes at a time. FileInputStream from = null; // Stream to read from source FileOutputStream to = null; // Stream to write to destination try { from = new FileInputStream(from_file); // Create input stream to = new FileOutputStream(to_file); // Create output stream byte[] buffer = new byte[4096]; // A buffer to hold file contents int bytes_read; // How many bytes in buffer while ((bytes_read = from.read(buffer)) != -1) { // Read bytes until EOF to.write(buffer, 0, bytes_read); // write bytes } } // Always close the streams, even if exceptions were thrown finally { if (from != null) { try { from.close(); } catch (IOException e) { //TODO: Should this really be ignored? } } if (to != null) { try { to.close(); } catch (IOException e) { //TODO: Should this really be ignored? } } } }
From source file:it.geosolutions.geobatch.geotiff.retile.GeotiffRetilerAction.java
@Override public Queue<FileSystemEvent> execute(Queue<FileSystemEvent> events) throws ActionException { try {//from w ww. j a v a2 s . co m if (configuration == null) { final String message = "GeotiffRetiler::execute(): flow configuration is null."; if (LOGGER.isErrorEnabled()) LOGGER.error(message); throw new ActionException(this, message); } if (events.size() == 0) { throw new ActionException(this, "GeotiffRetiler::execute(): Unable to process an empty events queue."); } if (LOGGER.isInfoEnabled()) LOGGER.info("GeotiffRetiler::execute(): Starting with processing..."); listenerForwarder.started(); // The return final Queue<FileSystemEvent> ret = new LinkedList<FileSystemEvent>(); while (events.size() > 0) { FileSystemEvent event = events.remove(); File eventFile = event.getSource(); FileSystemEventType eventType = event.getEventType(); if (eventFile.exists() && eventFile.canRead() && eventFile.canWrite()) { /* * If here: we can start retiler actions on the incoming file event */ if (eventFile.isDirectory()) { File[] fileList = eventFile.listFiles(); int size = fileList.length; for (int progress = 0; progress < size; progress++) { File inFile = fileList[progress]; final String absolutePath = inFile.getAbsolutePath(); final String inputFileName = FilenameUtils.getName(absolutePath); if (LOGGER.isInfoEnabled()) LOGGER.info("is going to retile: " + inputFileName); try { listenerForwarder.setTask("GeotiffRetiler"); GeoTiffRetilerUtils.reTile(inFile, configuration, getTempDir()); // set the output /* * COMMENTED OUT 21 Feb 2011: simone: If the event represents a Dir * we have to return a Dir. Do not matter failing files. * * carlo: we may also want to check if a file is already tiled! * * File outputFile=reTile(inFile); if (outputFile!=null){ //TODO: * here we use the same event for each file in the ret.add(new * FileSystemEvent(outputFile, eventType)); } */ } catch (UnsupportedOperationException uoe) { listenerForwarder.failed(uoe); if (LOGGER.isWarnEnabled()) LOGGER.warn(uoe.getLocalizedMessage(), uoe); continue; } catch (IOException ioe) { listenerForwarder.failed(ioe); if (LOGGER.isWarnEnabled()) LOGGER.warn(ioe.getLocalizedMessage(), ioe); continue; } catch (IllegalArgumentException iae) { listenerForwarder.failed(iae); if (LOGGER.isWarnEnabled()) LOGGER.warn(iae.getLocalizedMessage(), iae); continue; } finally { listenerForwarder.setProgress((progress * 100) / ((size != 0) ? size : 1)); listenerForwarder.progressing(); } } if (LOGGER.isInfoEnabled()) LOGGER.info("SUCCESSFULLY completed work on: " + event.getSource()); // add the directory to the return ret.add(event); } else { // file is not a directory try { listenerForwarder.setTask("GeotiffRetiler"); final File outputFile = GeoTiffRetilerUtils.reTile(eventFile, configuration, getTempDir()); if (LOGGER.isInfoEnabled()) LOGGER.info("SUCCESSFULLY completed work on: " + event.getSource()); listenerForwarder.setProgress(100); ret.add(new FileSystemEvent(outputFile, eventType)); } catch (UnsupportedOperationException uoe) { listenerForwarder.failed(uoe); if (LOGGER.isWarnEnabled()) LOGGER.warn(uoe.getLocalizedMessage(), uoe); continue; } catch (IOException ioe) { listenerForwarder.failed(ioe); if (LOGGER.isWarnEnabled()) LOGGER.warn(ioe.getLocalizedMessage(), ioe); continue; } catch (IllegalArgumentException iae) { listenerForwarder.failed(iae); if (LOGGER.isWarnEnabled()) LOGGER.warn(iae.getLocalizedMessage(), iae); continue; } finally { listenerForwarder.setProgress((100) / ((events.size() != 0) ? events.size() : 1)); listenerForwarder.progressing(); } } } else { final String message = "The passed file event refers to a not existent " + "or not readable/writeable file! File: " + eventFile.getAbsolutePath(); if (LOGGER.isWarnEnabled()) LOGGER.warn(message); final IllegalArgumentException iae = new IllegalArgumentException(message); listenerForwarder.failed(iae); } } // endwile listenerForwarder.completed(); // return if (ret.size() > 0) { events.clear(); return ret; } else { /* * If here: we got an error no file are set to be returned the input queue is * returned */ return events; } } catch (Exception t) { if (LOGGER.isErrorEnabled()) LOGGER.error(t.getLocalizedMessage(), t); final ActionException exc = new ActionException(this, t.getLocalizedMessage(), t); listenerForwarder.failed(exc); throw exc; } }
From source file:org.eclipse.virgo.ide.runtime.internal.core.Server.java
public IPath getServerDeployDirectory() { String deployDir = getDeployDirectory(); IPath deployPath = new Path(deployDir); if (!deployPath.isAbsolute()) { IPath base = getRuntimeBaseDirectory(); deployPath = base.append(deployPath); }/* w w w. j a v a 2 s . c o m*/ // Make sure that stage directory is accessible and we can write into it; if we can't the stage will be in the // plugin's statelocation File deployPathFile = deployPath.toFile(); if (!deployPathFile.exists() && !deployPathFile.getParentFile().canWrite()) { deployPath = ServerCorePlugin.getDefault().getStateLocation().append(deployDir); } else if (deployPathFile.exists() && !deployPathFile.canWrite()) { deployPath = ServerCorePlugin.getDefault().getStateLocation().append(deployDir); } return deployPath; }
From source file:info.rmapproject.core.rmapservice.impl.openrdf.triplestore.SesameSailMemoryTriplestore.java
@Override protected Repository intitializeRepository() throws RepositoryException { if (repository == null) { do {//from w w w . ja v a 2 s .c o m if (dataDirectory == null || dataDirectory.length() == 0) { // not persisting repository = new SailRepository(new MemoryStore()); break; } File dataFile = new File(dataDirectory); if (!dataFile.exists()) { throw new RepositoryException("Directory " + dataDirectory + " does not exist"); } if (!dataFile.isDirectory()) { throw new RepositoryException("Directory " + dataDirectory + " is not a directory"); } if (!dataFile.canRead()) { throw new RepositoryException("Directory " + dataDirectory + " cannot be read"); } if (!dataFile.canWrite()) { throw new RepositoryException("Directory " + dataDirectory + " cannot be written to"); } repository = new SailRepository(new MemoryStore(dataFile)); } while (false); repository.initialize(); } return repository; }