List of usage examples for java.lang String contentEquals
public boolean contentEquals(CharSequence cs)
From source file:edu.du.penrose.systems.fedoraApp.util.MetsBatchFileSplitter.java
/** * Split the batchIngest command file. If the batch contains additions put the results into the 'mets/new' directory. If updates put the * files into the 'mets/upates' directory. The ingest command (only one per batchfile!!) is saved in a comment prior to the <mets:mets> * element for each split file. This means the batch can only contain files of one type ie adds or updates. * <br><br>/*from w ww.ja va 2 s . c om*/ * If an error occurs we will try to remove any generated output file and then throw an exception. * <br> * One the ingest command line is found ie "<ingestControl command="A" type="pidInOBJID" />" ALL other command line are ignored!! AFter that * point we only look for <mets:mets> and </mets:mets> to split the file. * * NOTE Since this method may need to get fedora pids The following libraries are needed... * wsdl4j-1.5.1.jar * commons-discovery.jar * fcrepo-server-3.4-utilities-main.jar * jaxrpc.jar * logback-core-0.9.18.jar * logback-classic-0.9.18.jar * trippi-1.1.2-core.jar * fcrepo-common-3.4.jar * fcrepo-client-admin-3.4.jar * jdom.jar * * @param ingestOptions set batchIngestDate, batchDescription, * @param threadStatus can be null. * @param inFile batch file to split * @param metsNewDirectory * @param metsUpdatesDirectory * @param nameFileFromOBJID create xml file's that are named the same as it's OBJID element. This seems like a good idea but if the file * already exists you will get a error, killing the entire ingest. * @param fedoraUser used for a replyWithPid ingest. If null we will pull from the batchIngest.properties file. * @param fedoraPassword used for a replyWithPid ingest. If null we will pull from the batchIngest.properties file. * * @return IF the batch file is an add of type 'replyWithPid' return a map of OBJID and PIDs otherwise return null. NOTE: if the <mets:mets OBJID> element is * empty in the batch file, both the key and the value of the returned map will contain the pid. * * @throws Exception */ static public Map<String, String> splitMetsBatchFile_version_2(BatchIngestOptions ingestOptions, ThreadStatusMsg threadStatus, File inFile, String metsNewDirectory, String metsUpdatesDirectory, boolean nameFileFromOBJID, String fedoraHost, String fedoraPort, String fedoraUser, String fedoraPassword) throws Exception { Map<String, String> pidMap = null; FileInputStream batchFileInputStream; try { batchFileInputStream = new FileInputStream(inFile); } catch (FileNotFoundException e) { throw new FatalException(e.getMessage()); } DataInputStream batchFileDataInputStream = new DataInputStream(batchFileInputStream); BufferedReader batchFileBufferedReader = new BufferedReader( new InputStreamReader(batchFileDataInputStream)); String oneLine = null; String ingestControlLine = null; int fileCount = 0; String documentType = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"; String batchCreationDate = null; String batchDescription = null; String metsDirectory = null; // will get set to either the new directory or the updates directory. File outFile = null; FileOutputStream metsFileOutputStream = null; BufferedWriter metsBufferedWriter = null; boolean headerFoundLookOnlyForMetsNow = false; while (batchFileBufferedReader.ready()) { oneLine = batchFileBufferedReader.readLine(); if (!headerFoundLookOnlyForMetsNow) { if (oneLine.contains("<?xml version") && oneLine.trim().startsWith("<")) { documentType = oneLine; } // LOOK FOR BATCH DESCRIPTION if (oneLine.contains(BATCH_DESCRIPTION_ELEMENT_MARKER) && oneLine.indexOf("<!") == -1) { int tempLocation1 = oneLine.indexOf("batchCreationDate=" + QUOTE); if (tempLocation1 == -1) { oneLine.indexOf("batchCreationDate=" + APOST); } int tempLocation2 = oneLine.indexOf(QUOTE, tempLocation1 + 19); if (tempLocation2 == -1) { oneLine.indexOf(APOST, tempLocation1 + 19); } batchCreationDate = oneLine.substring(tempLocation1 + 19, tempLocation2); ingestOptions.setBatchIngestDate(batchCreationDate); oneLine = batchFileBufferedReader.readLine(); if (!oneLine.contains("<literal>")) { throw new FatalException("Invalid batchDescription"); } StringBuffer tempBatchDescription = new StringBuffer(); boolean endBatchDescription = false; do { tempBatchDescription.append(oneLine); if (oneLine.contains("</literal>")) { endBatchDescription = true; batchDescription = tempBatchDescription.toString(); batchDescription = batchDescription.replace("<literal>", ""); // it may all be on one line. batchDescription = batchDescription.replace("</literal>", ""); } oneLine = batchFileBufferedReader.readLine(); } while (!endBatchDescription); ingestOptions.setBatchDescription(batchDescription.trim()); } // look for batch command at the top of the file, prior to the first <mets:mets> if (oneLine.contains((INGEST_CONTROL_ELEMENT_MARKER)) && oneLine.indexOf("<!") == -1) { if ((!oneLine.contains("command")) || (!oneLine.contains("type"))) { throw new FatalException( "The batch control element must have both command and type attributes"); } ingestControlLine = oneLine.trim(); boolean validCommandLine = parseCommandLine(ingestOptions, ingestControlLine); if (!validCommandLine) { throw new Exception("ERROR: Invalid command line found in batch ingest file:" + inFile + " , " + ingestControlLine); } headerFoundLookOnlyForMetsNow = true; switch (ingestOptions.getIngestCommand()) { case UPDATE: metsDirectory = metsUpdatesDirectory; break; case ADD: metsDirectory = metsNewDirectory; break; default: throw new Exception("ERROR: Invalid ingest command"); } } // if line is ingestControl (command line) } else // if-else headerFoundLookOnlyForMetsNow { if (oneLine.contains((INGEST_CONTROL_ELEMENT_MARKER)) && oneLine.indexOf("<!") == -1) { logger.warn("More than one ingest control line found in batch file! extras will be ignored:" + inFile); } // look for <mets:mets> and get complete <mets:mets> element if (oneLine.contains("<mets:mets") && oneLine.indexOf("<!") == -1) { boolean haveEntireMetsLine = false; while (!haveEntireMetsLine) { StringBuffer tempBuffer = new StringBuffer(oneLine); String moreOfMetsLine = null; if (!oneLine.contains(">")) { moreOfMetsLine = batchFileBufferedReader.readLine(); tempBuffer.append(moreOfMetsLine); if (moreOfMetsLine.contains(">")) { haveEntireMetsLine = true; oneLine = tempBuffer.toString(); } else { oneLine = tempBuffer.toString(); } } else { haveEntireMetsLine = true; } } // process everything up to </mets:mets> String objID = MetsBatchFileSplitter.getObjID(oneLine); if (nameFileFromOBJID) { outFile = new File(metsDirectory + objID + ".xml"); logger.info("outputSplitFile METS file: " + metsDirectory + objID + ".xml"); if (outFile.exists()) { String errorMsg = "file already exists:" + outFile.getName(); System.out.println(errorMsg); logger.error(errorMsg); throw new FatalException(errorMsg); } } else { switch (ingestOptions.getIngestThreadType()) { case BACKGROUND: // TBD this is probably an error case MANUAL: outFile = new File(metsDirectory + edu.du.penrose.systems.util.FileUtil.getDateTimeMilliSecondEnsureUnique() + ".xml"); break; case REMOTE: outFile = new File(metsDirectory + edu.du.penrose.systems.util.FileUtil.getDateTimeMilliSecondEnsureUnique() + FedoraAppConstants.REMOTE_TASK_NAME_SUFFIX + ".xml"); break; } } // oneLine now contains the entire <mets:mets....> line logger.info("outputSplitFile METS file: " + outFile.toString() + "\n\n"); boolean errorOccurred = false; try { metsFileOutputStream = new FileOutputStream(outFile); metsBufferedWriter = new BufferedWriter( new OutputStreamWriter(metsFileOutputStream, "UTF-8")); metsBufferedWriter.write(documentType); metsBufferedWriter.newLine(); switch (ingestOptions.getIngestCommand()) { case ADD: switch (ingestOptions.getAddCommandType()) { case REPLY_WITH_PID: // we get one pid at a time, write it to the <mets:mets> line OBJID value and add it to the pidMap String[] tempPids = null; if (fedoraPassword == null || fedoraUser == null || fedoraHost == null || fedoraPort == null) { tempPids = FedoraAppUtil.getPIDs(ingestOptions.getInstitution(), new NonNegativeInteger("1")); } else { tempPids = FedoraAppUtil.getPIDs(fedoraHost, Integer.valueOf(fedoraPort), fedoraUser, fedoraPassword, ingestOptions.getInstitution(), new NonNegativeInteger("1")); } String reservedPid = tempPids[0]; metsBufferedWriter.write("<!--" + ingestControlLine + "-->\n"); if (pidMap == null) { pidMap = new LinkedHashMap<String, String>(); } oneLine = putPidInMetsLineOBJID(oneLine, reservedPid); if (objID.contentEquals("")) { pidMap.put(reservedPid, reservedPid); } else { pidMap.put(objID, reservedPid); } break; case PID_IN_OBJID: case NORMAL: default: metsBufferedWriter.write("<!--" + ingestControlLine + "-->\n"); } break; case UPDATE: metsBufferedWriter.write("<!--" + ingestControlLine + "-->\n"); break; case NOT_SET: default: throw new Exception("ERROR: Invalid ingest command"); } // read lines from batch file and write to the new mets file until </mets:mets> line while (!oneLine.contains("</mets:mets")) { // null pointer on premature end of file. metsBufferedWriter.write(oneLine); metsBufferedWriter.newLine(); oneLine = batchFileBufferedReader.readLine(); if (oneLine == null) { throw new FatalException("Error spliting batch file, missing </mets:mets>"); } } metsBufferedWriter.write(oneLine); metsBufferedWriter.newLine(); metsBufferedWriter.close(); } catch (Exception e) { errorOccurred = true; // for cleanup, see below. throw new Exception(e); } finally { metsBufferedWriter.close(); if (errorOccurred) { outFile.delete(); //clean up. } } fileCount++; if (threadStatus != null) { threadStatus.setStatus("Spliting XML file #: " + fileCount); } } // if <mets:mets> found (look for another mets section now). } // if-else ! headerFoundLookOnlyForMetsNow } // while return pidMap; // may be null }
From source file:org.candlepin.pinsetter.tasks.HypervisorUpdateJob.java
/** * {@inheritDoc}//from w w w .java2 s . co m * * Executes {@link ConsumerResource#create(org.candlepin.model.Consumer, org.candlepin.auth.Principal, * java.utl.String, java.utl.String, java.utl.String)} * Executes (@link ConusmerResource#performConsumerUpdates(java.utl.String, org.candlepin.model.Consumer)} * as a pinsetter job. * * @param context the job's execution context */ @Transactional @SuppressWarnings("checkstyle:indentation") public void toExecute(JobExecutionContext context) throws JobExecutionException { try { JobDataMap map = context.getMergedJobDataMap(); String ownerKey = map.getString(JobStatus.TARGET_ID); Boolean create = map.getBoolean(CREATE); Principal principal = (Principal) map.get(PRINCIPAL); String jobReporterId = map.getString(REPORTER_ID); HypervisorUpdateResult result = new HypervisorUpdateResult(); Owner owner = ownerCurator.lookupByKey(ownerKey); if (owner == null) { context.setResult("Nothing to do. Owner does not exist"); log.warn("Hypervisor update attempted against non-existent org id ''{0}''", ownerKey); return; } byte[] data = (byte[]) map.get(DATA); String json = decompress(data); HypervisorList hypervisors = (HypervisorList) Util.fromJson(json, HypervisorList.class); log.debug("Hypervisor consumers for create/update: {}", hypervisors.getHypervisors().size()); log.debug("Updating hypervisor consumers for org {0}", ownerKey); Set<String> hosts = new HashSet<String>(); Set<String> guests = new HashSet<String>(); Map<String, Consumer> incomingHosts = new HashMap<String, Consumer>(); parseHypervisorList(hypervisors, hosts, guests, incomingHosts); // Maps virt hypervisor ID to registered consumer for that hypervisor, should one exist: VirtConsumerMap hypervisorConsumersMap = consumerCurator.getHostConsumersMap(owner, hosts); // Maps virt guest ID to registered consumer for guest, if one exists: VirtConsumerMap guestConsumersMap = consumerCurator.getGuestConsumersMap(owner, guests); for (String hypervisorId : hosts) { Consumer knownHost = hypervisorConsumersMap.get(hypervisorId); Consumer incoming = incomingHosts.get(hypervisorId); Consumer reportedOnConsumer = null; List<GuestId> startGuests = new ArrayList<GuestId>(); if (knownHost == null) { if (!create) { result.failed(hypervisorId, "Unable to find hypervisor with id " + hypervisorId + " in org " + ownerKey); } else { log.debug("Registering new host consumer for hypervisor ID: {}", hypervisorId); Consumer newHost = createConsumerForHypervisorId(hypervisorId, owner, principal); consumerResource.performConsumerUpdates(incoming, newHost, guestConsumersMap, false); consumerResource.create(newHost, principal, null, owner.getKey(), null, false); hypervisorConsumersMap.add(hypervisorId, newHost); result.created(newHost); reportedOnConsumer = newHost; } } else { startGuests = knownHost.getGuestIds(); reportedOnConsumer = knownHost; if (jobReporterId != null && knownHost.getHypervisorId() != null && hypervisorId.equalsIgnoreCase(knownHost.getHypervisorId().getHypervisorId()) && knownHost.getHypervisorId().getReporterId() != null && !jobReporterId.equalsIgnoreCase(knownHost.getHypervisorId().getReporterId())) { log.debug("Reporter changed for Hypervisor {} of Owner {} from {} to {}", hypervisorId, ownerKey, knownHost.getHypervisorId().getReporterId(), jobReporterId); } if (consumerResource.performConsumerUpdates(incoming, knownHost, guestConsumersMap, false)) { consumerCurator.update(knownHost); result.updated(knownHost); } else { result.unchanged(knownHost); } } consumerResource.checkForGuestsMigration(knownHost, startGuests, knownHost == null ? null : knownHost.getGuestIds(), guestConsumersMap); // update reporter id if it changed if (jobReporterId != null && reportedOnConsumer != null && reportedOnConsumer.getHypervisorId() != null && (reportedOnConsumer.getHypervisorId().getReporterId() == null || !jobReporterId .contentEquals(reportedOnConsumer.getHypervisorId().getReporterId()))) { reportedOnConsumer.getHypervisorId().setReporterId(jobReporterId); } else if (jobReporterId == null) { log.debug("hypervisor checkin reported asynchronously without reporter id " + "for hypervisor:{} of owner:{}", hypervisorId, ownerKey); } } log.info("Summary for report from {} by principal {}\n {}", jobReporterId, principal, result); context.setResult(result); } catch (Exception e) { log.error("HypervisorUpdateJob encountered a problem.", e); context.setResult(e.getMessage()); throw new JobExecutionException(e.getMessage(), e, false); } }
From source file:controllers.Consumer.java
/** * 1. Customer buy offer as a User. Deployment needs CLOUD_ADMIN privilege. * Hence,require deploy user setup for the enterprise that consumer belongs * to.2 users - session user and deploy user . 2. Save the deployment * details such as user, vdc created, SC offer id , lease etc into database. * 3. Destroy date needs to be updated with the date when offer is * undeployed after lease has expired (in future releases). For now, its * null. 4. Refer portal-schema if needed. * /*from www. ja va2 s . c o m*/ * @param id_datacenter * The datacenter id to be used for deployment. * @param vdc_id_param * The id of virtual datacenter to be created. * @param sc_offer_id * The id of virtual appliance to be deployed. * @param va_param * The virtual appliance name. * @param lease_period */ @SuppressWarnings({ "null", "deprecation" }) public static void Deploy(final Integer id_datacenter, final Integer vdc_id_param, final Integer sc_offer_id, final String va_param, final String lease_period, @Nullable final String new_name, @Nullable final String new_lease_period, @Nullable final Integer spinner) { Logger.info("---------INSIDE CONSUMER DEPLOY()---------------"); Logger.info( " DEPLOY( INTEGER ID_DATACENTER:: " + id_datacenter + ", INTEGER VDC_ID_PARAM :: " + vdc_id_param + ", INTEGER SC_OFFER_ID :: " + sc_offer_id + " , String va_param:: " + va_param + ")"); String deploy_username = null; String deploy_password = null; Integer deploy_enterprise_id = null; String user = session.get("username"); String password = session.get("password"); AbiquoContext contextt = Context.getApiClient(user, password); if (contextt != null) { AbiquoUtils.setAbiquoUtilsContext(contextt); /* ---------------------------- */ /* * Retrieve the deploy username and password for current user's * Enterprise. */ Enterprise current_enterprise = AbiquoUtils.getCurrentUserEnterprise(); Integer enterprise_id = current_enterprise.getId(); deploy_username = user; deploy_password = password; deploy_enterprise_id = current_enterprise.getId(); Logger.info(" DEPLOY ENTERPRISE ID + USERNAME + PASSWORD :" + deploy_enterprise_id + " " + deploy_username + " " + deploy_password); /* ---------------------------- */ /* Create context with deploy username and password for deployments */ AbiquoContext context = Context.getApiClient(deploy_username, deploy_password); VirtualDatacenter vdc_toDeploy = null; VirtualAppliance virtualapp_todeploy = null; VirtualMachine vm_todeploy = null; VirtualDatacenter virtualDC = null; String vdc_name = null; try { //AbiquoUtils.setAbiquoUtilsContext(context); Enterprise enterprise = AbiquoUtils.getEnterprise(deploy_enterprise_id); String useremail = session.get("email"); String vdc_user = session.get("username"); String vdcname = Helper.vdcNameGen(vdc_user); Logger.info("CURRENT USER EMAIL ID: " + useremail); Logger.info(" vdcname : " + vdcname); virtualDC = AbiquoUtils.getMarketplaceDetails(vdc_id_param); Logger.info(" VDC to deploy: ", virtualDC); vdc_name = virtualDC.getName(); HypervisorType hypervisor = virtualDC.getHypervisorType(); Logger.info(" Hypervisor to deploy: ", hypervisor); // get first datacenter allowed. For developement only will be one. // get first datacenter allowed. For developement only will be one. Datacenter datacenter = virtualDC.getDatacenter(); //enterprise.listAllowedDatacenters().get(0); Logger.info(" Datacenter to deploy: ", datacenter); PrivateNetwork network = PrivateNetwork.builder(context.getApiContext()).name("192.168.0.0") .gateway("192.168.0.1").address("192.168.0.0").mask(22).build(); Logger.info(" Network Built"); //create public ips /*if (spinner != null) { PublicNetwork publicNetwork = PublicNetwork.builder(context.getApiContext(), datacenter) .name("192.168.0.0").gateway("192.168.0.1") .address("192.168.0.0").mask(22).build(); }*/ vdc_toDeploy = VirtualDatacenter.builder(context.getApiContext(), datacenter, enterprise) .name(vdcname).cpuCountLimits(0, 0).hdLimitsInMb(0, 0).publicIpsLimits(0, 0).ramLimits(0, 0) .storageLimits(0, 0).vlansLimits(0, 0).hypervisorType(hypervisor).network(network).build(); Logger.info("VDC built "); vdc_toDeploy.save(); Logger.info(" 1. VDC CREATED "); virtualapp_todeploy = VirtualAppliance.builder(context.getApiContext(), vdc_toDeploy).name(va_param) .build(); virtualapp_todeploy.save(); Logger.info(" 2. VAPP CREATED "); /* Save the deploy info to the portal database : user, vdc etc */ final User userAbiquo = contextt.getAdministrationService().getCurrentUser(); final Integer idUser = userAbiquo.getId(); final OfferPurchased offerPurchased = new OfferPurchased(); UserPortal userToSave = UserPortal.findById(idUser); if (userToSave == null) { // Try to recover from jClouds final String nickUser = userAbiquo.getNick(); final String emailUser = userAbiquo.getEmail(); final Integer idEnterprise = userAbiquo.getEnterprise().getId(); userToSave = new UserPortal(idUser, nickUser, emailUser, idEnterprise); userToSave.save(); } offerPurchased.setUser(userToSave); Date current = new Date(); Calendar cal = Calendar.getInstance(); if (lease_period.contentEquals("30 days")) { Logger.info("case1 : 30 days lease "); cal.add(Calendar.DATE, 30); } else if (lease_period.contentEquals("60 days")) { Logger.info("case2 : 60 days lease"); cal.add(Calendar.DATE, 60); } else if (lease_period.contentEquals("90 days")) { Logger.info("case3 : 90 days lease "); cal.add(Calendar.DATE, 90); } Date expiration = null; if (new_lease_period != null) expiration = new Date(new_lease_period); Logger.info("--------------------"); offerPurchased.setStart(current); offerPurchased.setExpiration(expiration == null ? cal.getTime() : expiration); // user_consumption.setVdc_name(vdc_toDeploy.getName()); offerPurchased.setLeasePeriod(lease_period); offerPurchased.setIdVirtualDatacenterUser(vdc_toDeploy.getId()); offerPurchased.setIdVirtualApplianceUser(virtualapp_todeploy.getId()); final Offer offer = Offer.findById(sc_offer_id); //offer.setVirtualDatacenter(vdc_toDeploy.getId()); offerPurchased.setOffer(offer); Set<Deploy_Bundle> deploy_bundle_set = new HashSet<Deploy_Bundle>(); Deploy_Bundle deploy_Bundle = new Deploy_Bundle(); deploy_Bundle.setDeploy_datacenter(datacenter.getId()); deploy_Bundle.setDeploy_hypervisorType(hypervisor.toString()); deploy_Bundle.setDeploy_network(""); deploy_Bundle.setVapp_name(virtualapp_todeploy.getName()); deploy_Bundle.setVdc_name(vdc_toDeploy.getId()); deploy_Bundle.setOfferPurchased(offerPurchased); deploy_Bundle.setVapp_id(virtualapp_todeploy.getId()); deploy_bundle_set.add(deploy_Bundle); //EventHandler handler = new VmEventHandler(context,vm_todeploy); /* * String query = * "select p from sc_offer as p where p.sc_offer_id = ?1"; * JPAQuery result = sc_offer.find(query, sc_offer_id); */List<Offer> nodes = ProducerDAO.getOfferDetails(sc_offer_id); for (Offer node : nodes) { /////Set<Deploy_Bundle_Nodes> deploy_Bundle_Nodes_list = new HashSet<Deploy_Bundle_Nodes>(); /// Retrieve nodes from jClouds Set<Nodes> vmlist_todeploy = node.getNodes(); Set<Deploy_Bundle_Nodes> deploy_Bundle_Nodes_list = new HashSet<Deploy_Bundle_Nodes>(); for (Nodes aVM : vmlist_todeploy) { String vmName = aVM.getNode_name(); VirtualMachineTemplate vm_template_todeploy = virtualDC .getAvailableTemplate(aVM.getIdImage()); int cpu = aVM.getCpu(); int ram = aVM.getRam(); // String description = aVM.getDescription(); vm_todeploy = VirtualMachine .builder(context.getApiContext(), virtualapp_todeploy, vm_template_todeploy) .nameLabel(vmName).cpu(cpu).ram(ram).password("vmpassword").build(); vm_todeploy.save(); Logger.info(" 3. VM CREATED"); Deploy_Bundle_Nodes deploy_Bundle_Nodes = new Deploy_Bundle_Nodes(); deploy_Bundle_Nodes.setCpu(cpu); deploy_Bundle_Nodes.setNode_name(vmName); deploy_Bundle_Nodes.setNode_name(vm_todeploy.getNameLabel()); deploy_Bundle_Nodes.setNode_id(vm_todeploy.getId()); deploy_Bundle_Nodes.setRam(ram); deploy_Bundle_Nodes.setVdrp_password(""); deploy_Bundle_Nodes.setVdrpIP(""); deploy_Bundle_Nodes.setVdrpPort(0); deploy_Bundle_Nodes_list.add(deploy_Bundle_Nodes); // deploy_Bundle_Nodes.setResources(resources); List<HardDisk> hardDisk_toattach = new ArrayList<HardDisk>(); Set<Deploy_Nodes_Resources> deploy_Nodes_Resources_list = new HashSet<Deploy_Nodes_Resources>(); Set<Nodes_Resources> resources = aVM.getResources(); for (Nodes_Resources resource : resources) { Long size = resource.getValue(); HardDisk disk = HardDisk.builder(context.getApiContext(), vdc_toDeploy).sizeInMb(size) .build(); disk.save(); hardDisk_toattach.add(disk); Deploy_Nodes_Resources deploy_Nodes_Resources = new Deploy_Nodes_Resources(); deploy_Nodes_Resources.setResourceType(resource.getResourceType()); deploy_Nodes_Resources.setResourceType(resource.getSequence()); deploy_Nodes_Resources.setValue(resource.getValue()); deploy_Nodes_Resources_list.add(deploy_Nodes_Resources); } deploy_Bundle_Nodes.setResources(deploy_Nodes_Resources_list); HardDisk[] disks = new HardDisk[hardDisk_toattach.size()]; for (int i = 0; i < hardDisk_toattach.size(); i++) { disks[i] = hardDisk_toattach.get(i); } if (disks.length > 0) { vm_todeploy.attachHardDisks(disks); Logger.info(" 4. HARDDISKS ATTACHED "); } Logger.info(" Handler created :"); vm_todeploy.deploy(); Logger.info("STARTING MONITORING ......"); } Logger.info("SAVING DEPLOY INFORMATION ......"); deploy_Bundle.setNodes(deploy_Bundle_Nodes_list); offerPurchased.setNodes(deploy_bundle_set); offerPurchased.setServiceLevel(new_name == null ? offer.getDefaultServiceLevel() : new_name); offerPurchased.save(); // Register the handler so it starts to listen to events VirtualApplianceMonitor monitor = context.getMonitoringService().getVirtualApplianceMonitor(); VappEventHandler handler = new VappEventHandler(monitor); monitor.register(handler); // Monitor the task and call the callback when it completes monitor.monitorDeploy(virtualapp_todeploy); // The 'monitor' method will not block and the program execution will continue // normally. Events will be dispatched to handlers when monitor completes, fails // or reaches timeout. //Mails.sendEmail(vncPort, vncAddress, password, name, offerName, useremail, exp_date) Logger.info("DEPLOY INFO SAVED ......"); Logger.info("------------EXITING CONSUMER DEPLOY()--------------"); render(vdc_name, enterprise_id); } } catch (AuthorizationException ae) { Logger.warn(ae, "EXCEPTION OCCURED IN deploy()"); String message = "Deployment cannot proceed further. Please Check deploy user and password for your enterprise."; render("/errors/error.html", message); } catch (Exception ae) { Logger.warn(ae, "EXCEPTION OCCURED IN deploy()"); String message = "Deployment cannot proceed further. Please contact your System Administrator."; render("/errors/error.html", message); if (context != null) { context.close(); } } } else { flash.error("You are not connected.Please Login"); Login.login_page(); } }
From source file:com.moodmap.HomeActivity.java
private void registerLocationListeners() { mLocationManager = (LocationManager) getSystemService(Context.LOCATION_SERVICE); if (mLocationListener_Fine == null) { mLocationListener_Fine = new LocationListener() { // LocationListener @Override//w w w . j a v a 2 s. com public void onLocationChanged(Location location) { float currentLocationAccuracy = location.getAccuracy(); myLocationFixCnt_Fine++; if ((myLocationFixCnt_Fine >= Constants.kMaxGpsFixCnt) || ((location.hasAccuracy()) && (currentLocationAccuracy <= 60.0))) // tighter, // slower // location { // stop the fine location service mLocationManager.removeUpdates(this); // also stop the coarse location updates, if for some // reason it has not resolved yet if (mLocationListener_Coarse != null) { mLocationManager.removeUpdates(mLocationListener_Coarse); } } updateMyLocation(location); } @Override public void onProviderDisabled(String provider) { Log.v(Constants.LOGTAG, "Fine Provider Disabled: " + provider); } @Override public void onProviderEnabled(String provider) { Log.v(Constants.LOGTAG, "Fine Provider Enabled: " + provider); } @Override public void onStatusChanged(String provider, int status, Bundle extras) { myStatusChangeCnt_Fine++; if ((status == LocationProvider.OUT_OF_SERVICE)) // not sure if needed (myStatusChangeCnt_Fine >= // Constants.kMaxGpsFixCnt)) { // if cannot resolve the location, do not leave the gps // running mLocationManager.removeUpdates(mLocationListener_Fine); } Log.v(Constants.LOGTAG, "Fine Provider Status Change (OVER): " + provider + " status:" + status + " myStatusChangeCnt_Fine:" + myStatusChangeCnt_Fine); // LocationProvider.OUT_OF_SERVICE } }; } if (mLocationListener_Coarse == null) { mLocationListener_Coarse = new LocationListener() { // LocationListener @Override public void onLocationChanged(Location location) { float currentLocationAccuracy = location.getAccuracy(); myLocationFixCnt_Coarse++; if ((myLocationFixCnt_Coarse >= Constants.kMaxGpsFixCnt) || ((location.hasAccuracy()) && (currentLocationAccuracy <= 1000.0))) // quick, // rough // location { // stop the coarse location service mLocationManager.removeUpdates(this); } updateMyLocation(location); } @Override public void onProviderDisabled(String provider) { Log.v(Constants.LOGTAG, "Provider Disabled: " + provider); } @Override public void onProviderEnabled(String provider) { Log.v(Constants.LOGTAG, "Provider Enabled: " + provider); } @Override public void onStatusChanged(String provider, int status, Bundle extras) { Log.v(Constants.LOGTAG, "Provider Status Change: " + provider + " status:" + status); // LocationProvider.OUT_OF_SERVICE } }; } // still in registerLocationListeners // String provider = null; Criteria crta = new Criteria(); crta.setAccuracy(Criteria.ACCURACY_FINE); crta.setAltitudeRequired(false); crta.setBearingRequired(false); crta.setCostAllowed(false); // Indicates whether the provider is allowed // to incur monetary cost. // crta.setPowerRequirement(Criteria.POWER_MEDIUM); // POWER_LOW); provider = mLocationManager.getBestProvider(crta, true); // provider = LocationManager.NETWORK_PROVIDER; // get the last, possibly very wrong location currentLocation = mLocationManager.getLastKnownLocation(provider); // updateMyLocation(location); // minTime (2nd) the minimum time interval for notifications, in // milliseconds. This field is only used as a hint to conserve power, // and actual time between location updates may be greater or lesser // than this value. // minDistance (3rd)the minimum distance interval for notifications, in // meters // should be ~ 10000, 100 // mLocationManager.requestLocationUpdates(provider, 3000, 50, // mLocationListener_Fine); mLocationManager.requestLocationUpdates(provider, 3000, 0, mLocationListener_Fine); // Add second quick location provider Criteria coarse = new Criteria(); coarse.setAccuracy(Criteria.ACCURACY_COARSE); coarse.setAltitudeRequired(false); coarse.setBearingRequired(false); // coarse.setCostAllowed(false); // crta.setPowerRequirement(Criteria.POWER_MEDIUM); // POWER_LOW); String coarseProvider = mLocationManager.getBestProvider(coarse, true); if ((provider != null) && (!provider.contentEquals(coarseProvider))) { // only add coarse location resolution if DIFFERENT than the fine // location provider mLocationManager.requestLocationUpdates(coarseProvider, 1000, 1000, mLocationListener_Coarse); } }
From source file:com.tremolosecurity.idp.providers.OpenIDConnectIdP.java
public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { String action = (String) request.getAttribute(IDP.ACTION_NAME); UrlHolder holder = (UrlHolder) request.getAttribute(ProxyConstants.AUTOIDM_CFG); if (holder == null) { throw new ServletException("Holder is null"); }/* w ww . j a v a 2s . c om*/ AuthController ac = ((AuthController) request.getSession().getAttribute(ProxyConstants.AUTH_CTL)); if (action.equalsIgnoreCase(".well-known/openid-configuration")) { Gson gson = new GsonBuilder().setPrettyPrinting().create(); String json = gson.toJson(new OpenIDConnectConfig(this.idpName, request, mapper)); response.setContentType("application/json"); response.getWriter().print(json); AccessLog.log(AccessEvent.AzSuccess, holder.getApp(), (HttpServletRequest) request, ac.getAuthInfo(), "NONE"); return; } else if (action.equalsIgnoreCase("certs")) { try { X509Certificate cert = GlobalEntries.getGlobalEntries().getConfigManager() .getCertificate(this.jwtSigningKeyName); JsonWebKey jwk = JsonWebKey.Factory.newJwk(cert.getPublicKey()); String keyID = buildKID(cert); jwk.setKeyId(keyID); jwk.setUse("sig"); jwk.setAlgorithm("RS256"); response.setContentType("application/json"); response.getWriter().print(new JsonWebKeySet(jwk).toJson()); AccessLog.log(AccessEvent.AzSuccess, holder.getApp(), (HttpServletRequest) request, ac.getAuthInfo(), "NONE"); return; } catch (JoseException e) { throw new ServletException("Could not generate jwt", e); } } else if (action.equalsIgnoreCase("auth")) { String clientID = request.getParameter("client_id"); String responseCode = request.getParameter("response_type"); String scope = request.getParameter("scope"); String redirectURI = request.getParameter("redirect_uri"); String state = request.getParameter("state"); String nonce = request.getParameter("nonce"); OpenIDConnectTransaction transaction = new OpenIDConnectTransaction(); transaction.setClientID(clientID); transaction.setResponseCode(responseCode); transaction.setNonce(nonce); StringTokenizer toker = new StringTokenizer(scope, " ", false); while (toker.hasMoreTokens()) { String token = toker.nextToken(); transaction.getScope().add(token); } transaction.setRedirectURI(redirectURI); transaction.setState(state); OpenIDConnectTrust trust = trusts.get(clientID); if (trust == null) { StringBuffer b = new StringBuffer(); b.append(redirectURI).append("?error=unauthorized_client"); logger.warn("Trust '" + clientID + "' not found"); response.sendRedirect(b.toString()); return; } if (trust.isVerifyRedirect()) { if (!trust.getRedirectURI().equals(redirectURI)) { StringBuffer b = new StringBuffer(); b.append(trust.getRedirectURI()).append("?error=unauthorized_client"); logger.warn("Invalid redirect"); AccessLog.log(AccessEvent.AzFail, holder.getApp(), (HttpServletRequest) request, ac.getAuthInfo(), "NONE"); response.sendRedirect(b.toString()); return; } transaction.setRedirectURI(trust.getRedirectURI()); } else { transaction.setRedirectURI(redirectURI); } if (transaction.getScope().size() == 0 || !transaction.getScope().get(0).equals("openid")) { StringBuffer b = new StringBuffer(); b.append(transaction.getRedirectURI()).append("?error=invalid_scope"); logger.warn("First scope not openid"); AccessLog.log(AccessEvent.AzFail, holder.getApp(), (HttpServletRequest) request, ac.getAuthInfo(), "NONE"); response.sendRedirect(b.toString()); return; } else { //we don't need the openid scope anymore transaction.getScope().remove(0); } String authChain = trust.getAuthChain(); if (authChain == null) { StringBuffer b = new StringBuffer(); b.append("IdP does not have an authenticaiton chain configured"); throw new ServletException(b.toString()); } HttpSession session = request.getSession(); AuthInfo authData = ((AuthController) session.getAttribute(ProxyConstants.AUTH_CTL)).getAuthInfo(); AuthChainType act = holder.getConfig().getAuthChains().get(authChain); session.setAttribute(OpenIDConnectIdP.TRANSACTION_DATA, transaction); if (authData == null || !authData.isAuthComplete() && !(authData.getAuthLevel() < act.getLevel())) { nextAuth(request, response, session, false, act); } else { if (authData.getAuthLevel() < act.getLevel()) { //step up authentication, clear existing auth data session.removeAttribute(ProxyConstants.AUTH_CTL); holder.getConfig().createAnonUser(session); nextAuth(request, response, session, false, act); } else { StringBuffer b = genFinalURL(request); response.sendRedirect(b.toString()); //TODO if session already exists extend the life of the id_token } } } else if (action.contentEquals("completefed")) { this.completeFederation(request, response); } else if (action.equalsIgnoreCase("userinfo")) { try { processUserInfoRequest(request, response); } catch (JoseException | InvalidJwtException e) { throw new ServletException("Could not process userinfo request", e); } } }
From source file:de.intranda.goobi.plugins.CSICMixedImport.java
/** * Gets the corrsponding image folders and if necessary constructs a suffix to the identifier to distinguish this record from others with the same * identifier. Returns a unique identifier String for this record * //from ww w . j a v a 2 s .c o m * * @param r * @return */ private String parseImagefolder(Record r) { imageDirs = new ArrayList<File>(); pdfFiles = new ArrayList<File>(); // identifiers of metadata file String id = r.getId(); String identifier = null; String pieceDesignation = null; String volumeIdentifier = null; String[] idParts = id.split("_"); int first = 0; int last = idParts.length - 1; if (idParts == null || idParts.length == 0) { return null; } if (idParts[0].contentEquals("M") && idParts.length > 1) { first = 1; } identifier = idParts[first]; if (idParts.length > first + 1) { if (idParts[last].startsWith("V")) { volumeIdentifier = idParts[last]; last--; } for (int i = first + 1; i < last + 1; i++) { if (pieceDesignation == null) { pieceDesignation = idParts[i]; } else if (idParts[i] != null && !idParts[i].isEmpty()) { pieceDesignation += ("_" + idParts[i]); } } } // getting matching image folders if (!exportFolder.isDirectory()) { logger.warn("export folder does not exist. Cannot copy image files"); return identifier; } File[] projectFolders = exportFolder.listFiles(); for (File folder : projectFolders) { if (!folder.isDirectory() || !folder.getName().startsWith("00")) { continue; } projectName = folder.getName(); identifierSuffix = null; logger.debug("Found project " + projectName); List<File> folders = Arrays.asList(folder.listFiles()); File tiffDir = null, pdfDir = null; for (File file : folders) { if (file.isDirectory() && file.getName().toLowerCase().contentEquals("tiff")) { logger.debug("found \"tiff\" directory in " + folder.getName()); tiffDir = file; } if (file.isDirectory() && file.getName().toLowerCase().contentEquals("pdf")) { logger.debug("found \"pdf\" directory in " + folder.getName()); pdfDir = file; } } List<File> processFolders = Arrays.asList(folder.listFiles()); List<File> tiffFolders = null; if (tiffDir != null) { tiffFolders = Arrays.asList(tiffDir.listFiles()); } ArrayList<File> idImageDirs = new ArrayList<File>(); ArrayList<File> idRecordFiles = new ArrayList<File>(); // Get image dirs matching the identifier for (File file : processFolders) { if (file.isDirectory() && file.getName().contains(identifier)) { idImageDirs.add(file); logger.debug("found export folder " + file.getName() + " in " + folder.getAbsolutePath()); } else if (file.isFile() && file.getName().toLowerCase().endsWith(".xml") && file.getName().contains(identifier)) { idRecordFiles.add(file); } } if (tiffFolders != null) { for (File file : tiffFolders) { if (file.isDirectory() && file.getName().contains(identifier)) { idImageDirs.add(file); logger.debug("found export folder " + file.getName() + " in " + tiffDir.getAbsolutePath()); } } } if (idImageDirs == null || idImageDirs.isEmpty()) { continue; } // from these, get image dirs actually matching the metadata-file if (idImageDirs.size() == 1) { imageDirs.add(idImageDirs.get(0)); } else if (idRecordFiles.size() == 1) { // there is only one record of this id, so get all matching imageDirs imageDirs = idImageDirs; } else { for (File dir : idImageDirs) { if (pieceDesignation == null || dir.getName().contains("_" + pieceDesignation)) { if (volumeIdentifier == null || dir.getName().contains("_" + volumeIdentifier)) { imageDirs.add(dir); } } } } //if necessary, attempt to get volumeNumber from image dir if (pieceDesignation != null && volumeIdentifier == null && imageDirs.size() == 1) { String imageDirName = imageDirs.get(0).getName(); int pieceDesignationStartPos = imageDirName.lastIndexOf(pieceDesignation); int pieceDesignationEndPos = pieceDesignationStartPos + pieceDesignation.length() + 1; if (pieceDesignationStartPos > 0 && pieceDesignationEndPos < imageDirName.length()) { String volumeString = imageDirName.substring(pieceDesignationEndPos); if (volumeString != null && !volumeString.isEmpty()) { volumeIdentifier = volumeString; } } } // sort imageDirs by name if (imageDirs != null && !imageDirs.isEmpty()) { Collections.sort(imageDirs); } if (idImageDirs.size() > imageDirs.size()) { // there is at least one other process with the same identifier. Add volume or pieceDesignation to this identifier if (volumeIdentifier != null && !volumeIdentifier.contentEquals("V00")) { identifierSuffix = volumeIdentifier; } else { identifierSuffix = pieceDesignation; } } String logString = "Found the following image directories:"; if (imageDirs != null && !imageDirs.isEmpty()) { for (File file : imageDirs) { logString += ("\n" + file.getAbsolutePath()); } } else { logString += "\nNONE"; } logger.debug(logString); // get pdfFile if one exists if (pdfDir != null) { for (File file : pdfDir.listFiles(pdfFilter)) { if (file.getName().contains(identifier)) { // pdf has the correct identifier, check if is matches the volume as well if (idImageDirs.size() < 2 || imageDirs.size() == idImageDirs.size() || (volumeIdentifier != null && file.getName().contains(volumeIdentifier)) || (pieceDesignation != null && file.getName().contains(pieceDesignation))) { pdfFiles.add(file); logger.debug("Found pdf-file " + file.getAbsolutePath()); } } } } if (identifierSuffix != null) { return identifier + "_" + identifierSuffix; } else { return identifier; } } return null; }
From source file:com.codename1.impl.android.AndroidImplementation.java
private void clearMediaDB(String lastId, String capturePath) { final String[] imageColumns = { MediaStore.Images.Media.DATA, MediaStore.Images.Media.DATE_TAKEN, MediaStore.Images.Media.SIZE, MediaStore.Images.Media._ID }; final String imageOrderBy = MediaStore.Images.Media._ID + " DESC"; final String imageWhere = MediaStore.Images.Media._ID + ">?"; final String[] imageArguments = { lastId }; Cursor imageCursor = getContext().getContentResolver().query(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, imageColumns, imageWhere, imageArguments, imageOrderBy); if (imageCursor.getCount() > 1) { while (imageCursor.moveToNext()) { int id = imageCursor.getInt(imageCursor.getColumnIndex(MediaStore.Images.Media._ID)); String path = imageCursor.getString(imageCursor.getColumnIndex(MediaStore.Images.Media.DATA)); Long takenTimeStamp = imageCursor .getLong(imageCursor.getColumnIndex(MediaStore.Images.Media.DATE_TAKEN)); Long size = imageCursor.getLong(imageCursor.getColumnIndex(MediaStore.Images.Media.SIZE)); if (path.contentEquals(capturePath)) { // Remove it ContentResolver cr = getContext().getContentResolver(); cr.delete(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, MediaStore.Images.Media._ID + "=?", new String[] { Long.toString(id) }); break; }/*from w w w . j a va 2s . c o m*/ } } imageCursor.close(); }