List of usage examples for org.apache.hadoop.conf Configuration setLong
public void setLong(String name, long value)
name
property to a long
. From source file:com.facebook.hiveio.output.FaultyCheckOutputSpecsTest.java
License:Apache License
@Test(expectedExceptions = IOException.class) public void testExceptionAfterTooManyRetriesWhenTableDoesNotExist() throws Exception { HiveMetastores.setTestClient(new FaultyThriftHiveMetastore(BackoffRetryTask.NUM_TRIES.getDefaultValue())); Configuration conf = new Configuration(); conf.setLong(BackoffRetryTask.INITIAL_RETRY_DELAY_MSEC.getKey(), 100); HiveOutputDescription outputDesc = new HiveOutputDescription(); outputDesc.getTableDesc().setTableName("doesnt-exist"); OutputConf outputConf = new OutputConf(conf, PROFILE_ID); outputConf.writeOutputDescription(outputDesc); HiveApiOutputFormat outputFormat = new HiveApiOutputFormat(); outputFormat.setMyProfileId(PROFILE_ID); JobConf jobConf = new JobConf(conf); TaskAttemptContext taskContext = new HackTaskAttemptContext(jobConf, new TaskAttemptID()); JobContext jobContext = new HackJobContext(jobConf, taskContext.getJobID()); outputFormat.checkOutputSpecs(jobContext); fail();// w ww . j a va2s . com }
From source file:com.facebook.hiveio.output.FaultyCheckOutputSpecsTest.java
License:Apache License
@Test public void testRecoveredFromFailuresAfterRetries() throws Exception { FaultyThriftHiveMetastore metastore = new FaultyThriftHiveMetastore( BackoffRetryTask.NUM_TRIES.getDefaultValue() - 1); Configuration conf = new Configuration(); conf.setLong(BackoffRetryTask.INITIAL_RETRY_DELAY_MSEC.getKey(), 100); HiveOutputDescription outputDesc = new HiveOutputDescription(); outputDesc.getTableDesc().setTableName("foo"); OutputConf outputConf = new OutputConf(conf, PROFILE_ID); outputConf.writeOutputDescription(outputDesc); HiveApiOutputFormat outputFormat = new HiveApiOutputFormat(); outputFormat.setMyProfileId(PROFILE_ID); JobConf jobConf = new JobConf(conf); TaskAttemptContext taskContext = new HackTaskAttemptContext(jobConf, new TaskAttemptID()); JobContext jobContext = new HackJobContext(jobConf, taskContext.getJobID()); HiveMetastores.setTestClient(metastore); outputFormat.checkOutputSpecs(jobContext); assertEquals(BackoffRetryTask.NUM_TRIES.getDefaultValue(), metastore.getNumCalls()); }
From source file:com.facebook.presto.hive.HdfsConfigurationUpdater.java
License:Apache License
public void updateConfiguration(Configuration config) { if (resourcePaths != null) { for (String resourcePath : resourcePaths) { config.addResource(new Path(resourcePath)); }// w ww. ja v a 2s. c o m } // this is to prevent dfs client from doing reverse DNS lookups to determine whether nodes are rack local config.setClass("topology.node.switch.mapping.impl", NoOpDNSToSwitchMapping.class, DNSToSwitchMapping.class); if (socksProxy != null) { config.setClass("hadoop.rpc.socket.factory.class.default", SocksSocketFactory.class, SocketFactory.class); config.set("hadoop.socks.server", socksProxy.toString()); } if (domainSocketPath != null) { config.setStrings("dfs.domain.socket.path", domainSocketPath); } // only enable short circuit reads if domain socket path is properly configured if (!config.get("dfs.domain.socket.path", "").trim().isEmpty()) { config.setBooleanIfUnset("dfs.client.read.shortcircuit", true); } config.setInt("dfs.socket.timeout", toIntExact(dfsTimeout.toMillis())); config.setInt("ipc.ping.interval", toIntExact(ipcPingInterval.toMillis())); config.setInt("ipc.client.connect.timeout", toIntExact(dfsConnectTimeout.toMillis())); config.setInt("ipc.client.connect.max.retries", dfsConnectMaxRetries); // re-map filesystem schemes to match Amazon Elastic MapReduce config.set("fs.s3.impl", PrestoS3FileSystem.class.getName()); config.set("fs.s3a.impl", PrestoS3FileSystem.class.getName()); config.set("fs.s3n.impl", PrestoS3FileSystem.class.getName()); config.set("fs.s3bfs.impl", "org.apache.hadoop.fs.s3.S3FileSystem"); // set AWS credentials for S3 if (s3AwsAccessKey != null) { config.set(PrestoS3FileSystem.S3_ACCESS_KEY, s3AwsAccessKey); config.set("fs.s3bfs.awsAccessKeyId", s3AwsAccessKey); } if (s3AwsSecretKey != null) { config.set(PrestoS3FileSystem.S3_SECRET_KEY, s3AwsSecretKey); config.set("fs.s3bfs.awsSecretAccessKey", s3AwsSecretKey); } if (s3Endpoint != null) { config.set(PrestoS3FileSystem.S3_ENDPOINT, s3Endpoint); config.set("fs.s3bfs.Endpoint", s3Endpoint); } if (s3SignerType != null) { config.set(PrestoS3FileSystem.S3_SIGNER_TYPE, s3SignerType.getSignerType()); } config.setInt("fs.cache.max-size", fileSystemMaxCacheSize); configureCompression(config, compressionCodec); // set config for S3 config.setBoolean(PrestoS3FileSystem.S3_USE_INSTANCE_CREDENTIALS, s3UseInstanceCredentials); config.setBoolean(PrestoS3FileSystem.S3_SSL_ENABLED, s3SslEnabled); config.setBoolean(PrestoS3FileSystem.S3_SSE_ENABLED, s3SseEnabled); if (s3EncryptionMaterialsProvider != null) { config.set(PrestoS3FileSystem.S3_ENCRYPTION_MATERIALS_PROVIDER, s3EncryptionMaterialsProvider); } if (s3KmsKeyId != null) { config.set(PrestoS3FileSystem.S3_KMS_KEY_ID, s3KmsKeyId); } config.setInt(PrestoS3FileSystem.S3_MAX_CLIENT_RETRIES, s3MaxClientRetries); config.setInt(PrestoS3FileSystem.S3_MAX_ERROR_RETRIES, s3MaxErrorRetries); config.set(PrestoS3FileSystem.S3_MAX_BACKOFF_TIME, s3MaxBackoffTime.toString()); config.set(PrestoS3FileSystem.S3_MAX_RETRY_TIME, s3MaxRetryTime.toString()); config.set(PrestoS3FileSystem.S3_CONNECT_TIMEOUT, s3ConnectTimeout.toString()); config.set(PrestoS3FileSystem.S3_SOCKET_TIMEOUT, s3SocketTimeout.toString()); config.set(PrestoS3FileSystem.S3_STAGING_DIRECTORY, s3StagingDirectory.toString()); config.setInt(PrestoS3FileSystem.S3_MAX_CONNECTIONS, s3MaxConnections); config.setLong(PrestoS3FileSystem.S3_MULTIPART_MIN_FILE_SIZE, s3MultipartMinFileSize.toBytes()); config.setLong(PrestoS3FileSystem.S3_MULTIPART_MIN_PART_SIZE, s3MultipartMinPartSize.toBytes()); config.setBoolean(PrestoS3FileSystem.S3_PIN_CLIENT_TO_CURRENT_REGION, pinS3ClientToCurrentRegion); config.set(PrestoS3FileSystem.S3_USER_AGENT_PREFIX, s3UserAgentPrefix); }
From source file:com.facebook.presto.hive.s3.PrestoS3ConfigurationUpdater.java
License:Apache License
@Override public void updateConfiguration(Configuration config) { // re-map filesystem schemes to match Amazon Elastic MapReduce config.set("fs.s3.impl", PrestoS3FileSystem.class.getName()); config.set("fs.s3a.impl", PrestoS3FileSystem.class.getName()); config.set("fs.s3n.impl", PrestoS3FileSystem.class.getName()); if (awsAccessKey != null) { config.set(PrestoS3FileSystem.S3_ACCESS_KEY, awsAccessKey); }//from w ww. j a va 2s. c om if (awsSecretKey != null) { config.set(PrestoS3FileSystem.S3_SECRET_KEY, awsSecretKey); } if (endpoint != null) { config.set(PrestoS3FileSystem.S3_ENDPOINT, endpoint); } if (signerType != null) { config.set(PrestoS3FileSystem.S3_SIGNER_TYPE, signerType.name()); } config.setBoolean(PrestoS3FileSystem.S3_PATH_STYLE_ACCESS, pathStyleAccess); config.setBoolean(PrestoS3FileSystem.S3_USE_INSTANCE_CREDENTIALS, useInstanceCredentials); config.setBoolean(PrestoS3FileSystem.S3_SSL_ENABLED, sslEnabled); config.setBoolean(PrestoS3FileSystem.S3_SSE_ENABLED, sseEnabled); config.set(PrestoS3FileSystem.S3_SSE_TYPE, sseType.name()); if (encryptionMaterialsProvider != null) { config.set(PrestoS3FileSystem.S3_ENCRYPTION_MATERIALS_PROVIDER, encryptionMaterialsProvider); } if (kmsKeyId != null) { config.set(PrestoS3FileSystem.S3_KMS_KEY_ID, kmsKeyId); } if (sseKmsKeyId != null) { config.set(PrestoS3FileSystem.S3_SSE_KMS_KEY_ID, sseKmsKeyId); } config.setInt(PrestoS3FileSystem.S3_MAX_CLIENT_RETRIES, maxClientRetries); config.setInt(PrestoS3FileSystem.S3_MAX_ERROR_RETRIES, maxErrorRetries); config.set(PrestoS3FileSystem.S3_MAX_BACKOFF_TIME, maxBackoffTime.toString()); config.set(PrestoS3FileSystem.S3_MAX_RETRY_TIME, maxRetryTime.toString()); config.set(PrestoS3FileSystem.S3_CONNECT_TIMEOUT, connectTimeout.toString()); config.set(PrestoS3FileSystem.S3_SOCKET_TIMEOUT, socketTimeout.toString()); config.set(PrestoS3FileSystem.S3_STAGING_DIRECTORY, stagingDirectory.toString()); config.setInt(PrestoS3FileSystem.S3_MAX_CONNECTIONS, maxConnections); config.setLong(PrestoS3FileSystem.S3_MULTIPART_MIN_FILE_SIZE, multipartMinFileSize.toBytes()); config.setLong(PrestoS3FileSystem.S3_MULTIPART_MIN_PART_SIZE, multipartMinPartSize.toBytes()); config.setBoolean(PrestoS3FileSystem.S3_PIN_CLIENT_TO_CURRENT_REGION, pinClientToCurrentRegion); config.set(PrestoS3FileSystem.S3_USER_AGENT_PREFIX, userAgentPrefix); }
From source file:com.flyhz.avengers.framework.application.AnalyzeApplication.java
License:Apache License
private void analyze() { LOG.info("initanalyze first ......"); HConnection hConnection = null;/*from ww w . ja v a 2s .com*/ HBaseAdmin hbaseAdmin = null; // HTable hVersion = null; HTable hPage = null; try { hConnection = HConnectionManager.createConnection(hbaseConf); hbaseAdmin = new HBaseAdmin(hConnection); Configuration configuration = HBaseConfiguration.create(hbaseConf); configuration.setLong("hbase.rpc.timeout", 600000); // Scan configuration.setLong("hbase.client.scanner.caching", 1000); hPage = new HTable(hbaseConf, AVTable.av_page.name()); Scan scan = new Scan(); scan.addColumn(Bytes.toBytes(AVFamily.i.name()), Bytes.toBytes(AVColumn.bid.name())); ResultScanner rs = hPage.getScanner(scan); for (Result result : rs) { for (Cell cell : result.rawCells()) { String url = Bytes.toString(cell.getRowArray()); String family = Bytes.toString(cell.getFamilyArray()); String column = Bytes.toString(cell.getQualifierArray()); Long value = Bytes.toLong(cell.getValueArray()); LOG.info("rowkey -> {},family -> {},column -> {},value ->{}", url, family, column, value); if ("preference".equals(family) && "batchId".equals(column) && value.equals(this.batchId)) { urls.add(url); } if (urls.size() == 100) { this.numTotalContainers = 100; for (int i = 0; i < numTotalContainers; ++i) { ContainerRequest containerAsk = setupContainerAskForRM(); amRMClient.addContainerRequest(containerAsk); } numRequestedContainers.set(numTotalContainers); while (!done && (numCompletedContainers.get() != numTotalContainers)) { try { Thread.sleep(200); } catch (InterruptedException ex) { } } } } } } catch (IOException e) { LOG.error("analyze", e); } catch (Throwable e) { LOG.error("analyze", e); } finally { if (hPage != null) { try { hPage.close(); } catch (IOException e) { LOG.error("", e); } } if (hbaseAdmin != null) { try { hbaseAdmin.close(); } catch (IOException e) { LOG.error("", e); } } if (hConnection != null) { try { hConnection.close(); } catch (IOException e) { LOG.error("", e); } } } }
From source file:com.flyhz.avengers.framework.application.CrawlApplication.java
License:Apache License
@SuppressWarnings("unchecked") private void initHbase() { LOG.info("init hbase"); HConnection hConnection = null;// w ww . j av a 2 s. c o m HBaseAdmin hbaseAdmin = null; HTable hDomain = null; try { hConnection = HConnectionManager.createConnection(hbaseConf); hbaseAdmin = new HBaseAdmin(hConnection); // av_fetch if (!hbaseAdmin.tableExists(AVTable.av_fetch.name())) { LOG.info("table[av_fetch] create it"); HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(AVTable.av_fetch.name())); HColumnDescriptor info = new HColumnDescriptor(AVTable.AVFamily.i.name()); tableDesc.addFamily(info); hbaseAdmin.createTable(tableDesc); } // av_page if (!hbaseAdmin.tableExists(AVTable.av_page.name())) { LOG.info("table[av_page] create it"); HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(AVTable.av_page.name())); HColumnDescriptor info = new HColumnDescriptor(AVTable.AVFamily.i.name()); tableDesc.addFamily(info); hbaseAdmin.createTable(tableDesc); } // av_crawl if (hbaseAdmin.tableExists(AVTable.av_crawl.name())) { LOG.info("table[av_crawl] exist drop it"); if (hbaseAdmin.isTableEnabled(AVTable.av_crawl.name())) { hbaseAdmin.disableTable(AVTable.av_crawl.name()); } hbaseAdmin.deleteTable(Bytes.toBytes(AVTable.av_crawl.name())); HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(AVTable.av_crawl.name())); HColumnDescriptor info = new HColumnDescriptor(AVTable.AVFamily.i.name()); tableDesc.addFamily(info); LOG.info("table[av_crawl] dropped then create it"); hbaseAdmin.createTable(tableDesc); } else { LOG.info("table[av_crawl] create it"); HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(AVTable.av_crawl.name())); HColumnDescriptor info = new HColumnDescriptor(AVTable.AVFamily.i.name()); tableDesc.addFamily(info); hbaseAdmin.createTable(tableDesc); } if (!hbaseAdmin.tableExists(AVTable.av_domain.name())) { HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(AVTable.av_domain.name())); HColumnDescriptor info = new HColumnDescriptor(AVTable.AVFamily.i.name()); tableDesc.addFamily(info); hbaseAdmin.createTable(tableDesc); } if (!hbaseAdmin.tableExists(AVTable.av_color.name())) { HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(AVTable.av_color.name())); HColumnDescriptor info = new HColumnDescriptor(AVTable.AVFamily.i.name()); tableDesc.addFamily(info); hbaseAdmin.createTable(tableDesc); } Configuration configuration = HBaseConfiguration.create(hbaseConf); configuration.setLong("hbase.rpc.timeout", 600000); // Scan configuration.setLong("hbase.client.scanner.caching", 1000); Map<String, Object> context = XConfiguration.getAvengersContext(); hDomain = new HTable(hbaseConf, AVTable.av_domain.name()); for (String root : (Set<String>) context.get(XConfiguration.ROOTS)) { Map<String, Object> domainMap = (Map<String, Object>) context.get(root); Get hDomainGet = new Get(Bytes.toBytes(root)); hDomainGet.addColumn(Bytes.toBytes(AVFamily.i.name()), Bytes.toBytes(AVColumn.bid.name())); Result result = hDomain.get(hDomainGet); LOG.info("root[{}] result.isEmpty -> {}", root, result == null ? null : result.isEmpty()); if (result != null && !result.isEmpty()) { Cell cell = result.rawCells()[0]; Calendar calendar = Calendar.getInstance(); LOG.info("root[{}] av_domain.batchId -> ", root, Bytes.toLong(cell.getValueArray())); calendar.setTime(new Date(Bytes.toLong(cell.getValueArray()))); // ? Integer period = (Integer) domainMap.get(XConfiguration.CRAWL_PERIOD); calendar.add(Calendar.HOUR_OF_DAY, period.intValue()); LOG.info("root[{}] calendar -> {},current.version -> {}", calendar.getTimeInMillis(), batchId); if (calendar.after(new Date(batchId))) { continue; } } domainRootForCrawlSet.add(root); } } catch (IOException e) { LOG.error("", e); } catch (Throwable e) { LOG.error("", e); } finally { if (hbaseAdmin != null) { try { hbaseAdmin.close(); } catch (IOException e) { LOG.error("", e); } } if (hConnection != null) { try { hConnection.close(); } catch (IOException e) { LOG.error("", e); } } } }
From source file:com.flyhz.avengers.framework.application.FetchApplication.java
License:Apache License
private void fetch() { LOG.info("initFetch first ......"); HConnection hConnection = null;// w w w . j a v a 2s . c om HBaseAdmin hbaseAdmin = null; // HTable hVersion = null; HTable hPage = null; HTable hDomain = null; try { hConnection = HConnectionManager.createConnection(hbaseConf); hbaseAdmin = new HBaseAdmin(hConnection); Configuration configuration = HBaseConfiguration.create(hbaseConf); configuration.setLong("hbase.rpc.timeout", 1200000); // Scan configuration.setLong("hbase.client.scanner.caching", 1000); hPage = new HTable(hbaseConf, AVTable.av_page.name()); Scan hPageScan = new Scan(); hPageScan.addColumn(Bytes.toBytes(AVFamily.i.name()), Bytes.toBytes(AVColumn.bid.name())); ResultScanner hPageRs = hPage.getScanner(hPageScan); if (hPageRs != null) { Long startKey = 0L; Long endKey = 0L; for (Result result : hPageRs) { Long id = Bytes.toLong(result.getRow()); Long batchId = Bytes.toLong( result.getValue(Bytes.toBytes(AVFamily.i.name()), Bytes.toBytes(AVColumn.bid.name()))); if (this.batchId == batchId) { LOG.info("rowkey -> {},batchId ->{}", id, batchId); startKey = id; break; } } hDomain = new HTable(hbaseConf, AVTable.av_domain.name()); Scan hDomainScan = new Scan(); hDomainScan.addColumn(Bytes.toBytes(AVFamily.i.name()), Bytes.toBytes(AVColumn.maxid.name())); ResultScanner hDomainRs = hDomain.getScanner(hDomainScan); if (hDomainRs == null) { return; } for (Result result : hDomainRs) { Long value = Bytes.toLong(result.getValue(Bytes.toBytes(AVFamily.i.name()), Bytes.toBytes(AVColumn.maxid.name()))); if (endKey.compareTo(value) < 0) { endKey = value; } } endKey = endKey + 1; LOG.info("startKey > {},endKey > {}", startKey, endKey); numTotalContainers = (Integer) XConfiguration.getAvengersContext() .get(XConfiguration.NUM_FETCH_CONTAINERS); Long size = (endKey - startKey + 1) / numTotalContainers; for (int i = 0; i < numTotalContainers; i++) { Long sk = startKey + size * i; Long ek = startKey + size * (i + 1); LOG.info("start > {} end > {}", sk, ek); XPairDto<Long, Long> pair = new XPairDto<Long, Long>(sk, ek); pairs.add(pair); ContainerRequest containerAsk = setupContainerAskForRM(); amRMClient.addContainerRequest(containerAsk); } for (int i = 0; i < numTotalContainers; i++) { // ContainerRequest containerAsk = setupContainerAskForRM(); // amRMClient.addContainerRequest(containerAsk); } numRequestedContainers.set(numTotalContainers); while (!done && (numCompletedContainers.get() != numTotalContainers)) { try { Thread.sleep(200); } catch (InterruptedException ex) { } } } } catch (IOException e) { LOG.error("fetch", e); } catch (Throwable e) { LOG.error("fetch", e); } finally { if (hPage != null) { try { hPage.close(); } catch (IOException e) { LOG.error("", e); } } if (hbaseAdmin != null) { try { hbaseAdmin.close(); } catch (IOException e) { LOG.error("", e); } } if (hConnection != null) { try { hConnection.close(); } catch (IOException e) { LOG.error("", e); } } } }
From source file:com.flyhz.avengers.framework.AvengersAppMaster.java
License:Apache License
private void initHbase() { LOG.info("init hbase"); HConnection hConnection = null;//from w ww .j av a 2 s.c o m HBaseAdmin hbaseAdmin = null; // HTable hVersion = null; HTable hPage = null; HTable hDomain = null; try { hConnection = HConnectionManager.createConnection(hbaseConf); hbaseAdmin = new HBaseAdmin(hConnection); if (hbaseAdmin.tableExists("av_page")) { hbaseAdmin.disableTable("av_page"); hbaseAdmin.deleteTable(Bytes.toBytes("av_page")); HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf("av_page")); HColumnDescriptor info = new HColumnDescriptor("info"); HColumnDescriptor preference = new HColumnDescriptor("preference"); tableDesc.addFamily(info); tableDesc.addFamily(preference); hbaseAdmin.createTable(tableDesc); } else { HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf("av_page")); HColumnDescriptor info = new HColumnDescriptor("info"); HColumnDescriptor preference = new HColumnDescriptor("preference"); tableDesc.addFamily(info); tableDesc.addFamily(preference); hbaseAdmin.createTable(tableDesc); } if (!hbaseAdmin.tableExists("av_domain")) { HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf("av_domain")); HColumnDescriptor preference = new HColumnDescriptor("preference"); tableDesc.addFamily(preference); hbaseAdmin.createTable(tableDesc); } if (!hbaseAdmin.tableExists("av_product")) { HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf("av_product")); HColumnDescriptor info = new HColumnDescriptor("info"); HColumnDescriptor preference = new HColumnDescriptor("preference"); tableDesc.addFamily(info); tableDesc.addFamily(preference); hbaseAdmin.createTable(tableDesc); } Configuration configuration = HBaseConfiguration.create(hbaseConf); configuration.setLong("hbase.rpc.timeout", 600000); // Scan configuration.setLong("hbase.client.scanner.caching", 1000); long version = System.currentTimeMillis(); this.version = version; this.domainRootForCrawlSet.clear(); Map<String, Object> context = XConfiguration.getAvengersContext(); hDomain = new HTable(hbaseConf, "av_domain"); for (String root : context.keySet()) { @SuppressWarnings("unchecked") Map<String, Object> domainMap = (Map<String, Object>) context.get(root); Get hDomainGet = new Get(Bytes.toBytes(root)); hDomainGet.addColumn(Bytes.toBytes("preference"), Bytes.toBytes("batchId")); Result result = hDomain.get(hDomainGet); LOG.info("root[{}] result.isEmpty -> {}", root, result == null ? null : result.isEmpty()); if (result != null && !result.isEmpty()) { Cell cell = result.rawCells()[0]; Calendar calendar = Calendar.getInstance(); LOG.info("root[{}] av_domain.version -> ", root, Bytes.toLong(cell.getValueArray())); calendar.setTime(new Date(Bytes.toLong(cell.getValueArray()))); // ? Integer period = (Integer) domainMap.get(XConfiguration.CRAWL_PERIOD); calendar.add(Calendar.HOUR_OF_DAY, period.intValue()); LOG.info("root[{}] calendar -> {},current.version -> {}", calendar.getTimeInMillis(), version); if (calendar.after(new Date(version))) { continue; } } Put avDomainPut = new Put(Bytes.toBytes(root)); // ??version?crawl?info? avDomainPut.add(Bytes.toBytes("preference"), Bytes.toBytes("batchId"), Bytes.toBytes(version)); hDomain.put(avDomainPut); domainRootForCrawlSet.add(root); } } catch (IOException e) { LOG.error("", e); } catch (Throwable e) { LOG.error("", e); } finally { if (hPage != null) { try { hPage.close(); } catch (IOException e) { LOG.error("", e); } } if (hbaseAdmin != null) { try { hbaseAdmin.close(); } catch (IOException e) { LOG.error("", e); } } if (hConnection != null) { try { hConnection.close(); } catch (IOException e) { LOG.error("", e); } } } }
From source file:com.flyhz.avengers.framework.AvengersAppMaster.java
License:Apache License
private void fetch() { LOG.info("initFetch first ......"); initCommon();//from ww w . jav a2s.co m currentProcess = "fetch"; HConnection hConnection = null; HBaseAdmin hbaseAdmin = null; // HTable hVersion = null; HTable hPage = null; try { hConnection = HConnectionManager.createConnection(hbaseConf); hbaseAdmin = new HBaseAdmin(hConnection); Configuration configuration = HBaseConfiguration.create(hbaseConf); configuration.setLong("hbase.rpc.timeout", 600000); // Scan configuration.setLong("hbase.client.scanner.caching", 1000); hPage = new HTable(hbaseConf, "av_page"); Scan scan = new Scan(); scan.addColumn(Bytes.toBytes("preference"), Bytes.toBytes("batchId")); ResultScanner rs = hPage.getScanner(scan); // Set<String> urls = new HashSet<String>(); for (Result result : rs) { for (Cell cell : result.rawCells()) { String url = Bytes.toString(cell.getRowArray()); String family = Bytes.toString(cell.getFamilyArray()); String column = Bytes.toString(cell.getQualifierArray()); Long value = Bytes.toLong(cell.getValueArray()); LOG.info("rowkey -> {},family -> {},column -> {},value ->{}", url, family, column, value); if ("preference".equals(family) && "batchId".equals(column) && value.equals(this.version)) { urls.add(url); } if (urls.size() == 100) { this.numTotalContainers = 100; for (int i = 0; i < numTotalContainers; ++i) { ContainerRequest containerAsk = setupContainerAskForRM(); amRMClient.addContainerRequest(containerAsk); } numRequestedContainers.set(numTotalContainers); while (!done && (numCompletedContainers.get() != numTotalContainers)) { try { Thread.sleep(200); } catch (InterruptedException ex) { } } } } } } catch (IOException e) { LOG.error("fetch", e); } catch (Throwable e) { LOG.error("fetch", e); } finally { if (hPage != null) { try { hPage.close(); } catch (IOException e) { LOG.error("", e); } } if (hbaseAdmin != null) { try { hbaseAdmin.close(); } catch (IOException e) { LOG.error("", e); } } if (hConnection != null) { try { hConnection.close(); } catch (IOException e) { LOG.error("", e); } } } }
From source file:com.github.sakserv.minicluster.impl.HbaseRestLocalCluster.java
License:Apache License
@Override public void start() throws Exception { VersionInfo.logVersion();//from ww w . j ava2s. co m Configuration conf = builder.getHbaseConfiguration(); conf.set("hbase.rest.port", hbaseRestPort.toString()); conf.set("hbase.rest.readonly", (hbaseRestReadOnly == null) ? "true" : hbaseRestReadOnly.toString()); conf.set("hbase.rest.info.port", (hbaseRestInfoPort == null) ? "8085" : hbaseRestInfoPort.toString()); String hbaseRestHost = (this.hbaseRestHost == null) ? "0.0.0.0" : this.hbaseRestHost; Integer hbaseRestThreadMax = (this.hbaseRestThreadMax == null) ? 100 : this.hbaseRestThreadMax; Integer hbaseRestThreadMin = (this.hbaseRestThreadMin == null) ? 2 : this.hbaseRestThreadMin; UserProvider userProvider = UserProvider.instantiate(conf); Pair<FilterHolder, Class<? extends ServletContainer>> pair = loginServerPrincipal(userProvider, conf); FilterHolder authFilter = pair.getFirst(); Class<? extends ServletContainer> containerClass = pair.getSecond(); RESTServlet.getInstance(conf, userProvider); // set up the Jersey servlet container for Jetty ServletHolder sh = new ServletHolder(containerClass); sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass", ResourceConfig.class.getCanonicalName()); sh.setInitParameter("com.sun.jersey.config.property.packages", "jetty"); ServletHolder shPojoMap = new ServletHolder(containerClass); Map<String, String> shInitMap = sh.getInitParameters(); for (Map.Entry<String, String> e : shInitMap.entrySet()) { shPojoMap.setInitParameter(e.getKey(), e.getValue()); } shPojoMap.setInitParameter(JSONConfiguration.FEATURE_POJO_MAPPING, "true"); // set up Jetty and run the embedded server server = new Server(); Connector connector = new SelectChannelConnector(); if (conf.getBoolean(RESTServer.REST_SSL_ENABLED, false)) { SslSelectChannelConnector sslConnector = new SslSelectChannelConnector(); String keystore = conf.get(RESTServer.REST_SSL_KEYSTORE_STORE); String password = HBaseConfiguration.getPassword(conf, RESTServer.REST_SSL_KEYSTORE_PASSWORD, null); String keyPassword = HBaseConfiguration.getPassword(conf, RESTServer.REST_SSL_KEYSTORE_KEYPASSWORD, password); sslConnector.setKeystore(keystore); sslConnector.setPassword(password); sslConnector.setKeyPassword(keyPassword); connector = sslConnector; } connector.setPort(hbaseRestPort); connector.setHost(hbaseRestHost); connector.setHeaderBufferSize(8192); server.addConnector(connector); QueuedThreadPool threadPool = new QueuedThreadPool(hbaseRestThreadMax); threadPool.setMinThreads(hbaseRestThreadMin); server.setThreadPool(threadPool); server.setSendServerVersion(false); server.setSendDateHeader(false); server.setStopAtShutdown(true); // set up context Context context = new Context(server, "/", Context.SESSIONS); context.addServlet(shPojoMap, "/status/cluster"); context.addServlet(sh, "/*"); if (authFilter != null) { context.addFilter(authFilter, "/*", 1); } HttpServerUtil.constrainHttpMethods(context); // Put up info server. int port = (hbaseRestInfoPort == null) ? 8085 : hbaseRestInfoPort; if (port >= 0) { conf.setLong("startcode", System.currentTimeMillis()); String a = hbaseRestHost; infoServer = new InfoServer("rest", a, port, false, conf); infoServer.setAttribute("hbase.conf", conf); infoServer.start(); } // start server server.start(); }