• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Java HTestConst类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Java中org.apache.hadoop.hbase.HTestConst的典型用法代码示例。如果您正苦于以下问题:Java HTestConst类的具体用法?Java HTestConst怎么用?Java HTestConst使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



HTestConst类属于org.apache.hadoop.hbase包,在下文中一共展示了HTestConst类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: testSmallScan

import org.apache.hadoop.hbase.HTestConst; //导入依赖的package包/类
@Test
public void testSmallScan() throws Exception {
  TableName TABLE = TableName.valueOf("testSmallScan");

  int numRows = 10;
  byte[][] ROWS = HTestConst.makeNAscii(ROW, numRows);

  int numQualifiers = 10;
  byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, numQualifiers);

  Table ht = TEST_UTIL.createTable(TABLE, FAMILY);

  Put put;
  List<Put> puts = new ArrayList<Put>();
  for (int row = 0; row < ROWS.length; row++) {
    put = new Put(ROWS[row]);
    for (int qual = 0; qual < QUALIFIERS.length; qual++) {
      KeyValue kv = new KeyValue(ROWS[row], FAMILY, QUALIFIERS[qual], VALUE);
      put.add(kv);
    }
    puts.add(put);
  }
  ht.put(puts);

  int expectedRows = numRows;
  int expectedCols = numRows * numQualifiers;

  // Test normal and reversed
  testSmallScan(ht, true, expectedRows, expectedCols);
  testSmallScan(ht, false, expectedRows, expectedCols);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:TestScannersFromClientSide.java


示例2: testProcessBasedCluster

import org.apache.hadoop.hbase.HTestConst; //导入依赖的package包/类
public void testProcessBasedCluster() throws Exception {
  ProcessBasedLocalHBaseCluster cluster = new ProcessBasedLocalHBaseCluster(
      TEST_UTIL.getConfiguration(), 2, 3);
  cluster.startMiniDFS();
  cluster.startHBase();
  try {
    TEST_UTIL.createRandomTable(HTestConst.DEFAULT_TABLE_STR,
        HTestConst.DEFAULT_CF_STR_SET,
        HColumnDescriptor.DEFAULT_VERSIONS, COLS_PER_ROW, FLUSHES, NUM_REGIONS,
        ROWS_PER_FLUSH);
    Table table = new HTable(TEST_UTIL.getConfiguration(), HTestConst.DEFAULT_TABLE);
    ResultScanner scanner = table.getScanner(HTestConst.DEFAULT_CF_BYTES);
    Result result;
    int rows = 0;
    int cols = 0;
    while ((result = scanner.next()) != null) {
      ++rows;
      cols += result.getFamilyMap(HTestConst.DEFAULT_CF_BYTES).size();
    }
    LOG.info("Read " + rows + " rows, " + cols + " columns");
    scanner.close();
    table.close();

    // These numbers are deterministic, seeded by table name.
    assertEquals(19, rows);
    assertEquals(35, cols);
  } catch (Exception ex) {
    LOG.error(ex);
    throw ex;
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:34,代码来源:TestProcessBasedCluster.java


示例3: testProcessBasedCluster

import org.apache.hadoop.hbase.HTestConst; //导入依赖的package包/类
public void testProcessBasedCluster() throws Exception {
  ProcessBasedLocalHBaseCluster cluster = new ProcessBasedLocalHBaseCluster(
      TEST_UTIL.getConfiguration(), 2, 3);
  cluster.startMiniDFS();
  cluster.startHBase();
  try {
    TEST_UTIL.createRandomTable(HTestConst.DEFAULT_TABLE_STR,
        HTestConst.DEFAULT_CF_STR_SET,
        HColumnDescriptor.DEFAULT_VERSIONS, COLS_PER_ROW, FLUSHES, NUM_REGIONS,
        ROWS_PER_FLUSH);
    HTable table = new HTable(TEST_UTIL.getConfiguration(), HTestConst.DEFAULT_TABLE_BYTES);
    ResultScanner scanner = table.getScanner(HTestConst.DEFAULT_CF_BYTES);
    Result result;
    int rows = 0;
    int cols = 0;
    while ((result = scanner.next()) != null) {
      ++rows;
      cols += result.getFamilyMap(HTestConst.DEFAULT_CF_BYTES).size();
    }
    LOG.info("Read " + rows + " rows, " + cols + " columns");
    scanner.close();
    table.close();

    // These numbers are deterministic, seeded by table name.
    assertEquals(19, rows);
    assertEquals(35, cols);
  } catch (Exception ex) {
    LOG.error(ex);
    throw ex;
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:34,代码来源:TestProcessBasedCluster.java


示例4: testSmallScan

import org.apache.hadoop.hbase.HTestConst; //导入依赖的package包/类
@Test
public void testSmallScan() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());

  int numRows = 10;
  byte[][] ROWS = HTestConst.makeNAscii(ROW, numRows);

  int numQualifiers = 10;
  byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, numQualifiers);

  Table ht = TEST_UTIL.createTable(tableName, FAMILY);

  Put put;
  List<Put> puts = new ArrayList<>();
  for (int row = 0; row < ROWS.length; row++) {
    put = new Put(ROWS[row]);
    for (int qual = 0; qual < QUALIFIERS.length; qual++) {
      KeyValue kv = new KeyValue(ROWS[row], FAMILY, QUALIFIERS[qual], VALUE);
      put.add(kv);
    }
    puts.add(put);
  }
  ht.put(puts);

  int expectedRows = numRows;
  int expectedCols = numRows * numQualifiers;

  // Test normal and reversed
  testSmallScan(ht, true, expectedRows, expectedCols);
  testSmallScan(ht, false, expectedRows, expectedCols);
}
 
开发者ID:apache,项目名称:hbase,代码行数:32,代码来源:TestScannersFromClientSide.java


示例5: testMaxResultSizeIsSetToDefault

import org.apache.hadoop.hbase.HTestConst; //导入依赖的package包/类
@Test
public void testMaxResultSizeIsSetToDefault() throws Exception {
  TableName TABLE = TableName.valueOf("testMaxResultSizeIsSetToDefault");
  Table ht = TEST_UTIL.createTable(TABLE, FAMILY);

  // The max result size we expect the scan to use by default.
  long expectedMaxResultSize =
      TEST_UTIL.getConfiguration().getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
        HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);

  int numRows = 5;
  byte[][] ROWS = HTestConst.makeNAscii(ROW, numRows);

  int numQualifiers = 10;
  byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, numQualifiers);

  // Specify the cell size such that a single row will be larger than the default
  // value of maxResultSize. This means that Scan RPCs should return at most a single
  // result back to the client.
  int cellSize = (int) (expectedMaxResultSize / (numQualifiers - 1));
  byte[] cellValue = Bytes.createMaxByteArray(cellSize);

  Put put;
  List<Put> puts = new ArrayList<Put>();
  for (int row = 0; row < ROWS.length; row++) {
    put = new Put(ROWS[row]);
    for (int qual = 0; qual < QUALIFIERS.length; qual++) {
      KeyValue kv = new KeyValue(ROWS[row], FAMILY, QUALIFIERS[qual], cellValue);
      put.add(kv);
    }
    puts.add(put);
  }
  ht.put(puts);

  // Create a scan with the default configuration.
  Scan scan = new Scan();

  ResultScanner scanner = ht.getScanner(scan);
  assertTrue(scanner instanceof ClientScanner);
  ClientScanner clientScanner = (ClientScanner) scanner;

  // Call next to issue a single RPC to the server
  scanner.next();

  // The scanner should have, at most, a single result in its cache. If there more results exists
  // in the cache it means that more than the expected max result size was fetched.
  assertTrue("The cache contains: " + clientScanner.getCacheSize() + " results",
    clientScanner.getCacheSize() <= 1);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:50,代码来源:TestScannersFromClientSide.java


示例6: testScanLimitAndOffset

import org.apache.hadoop.hbase.HTestConst; //导入依赖的package包/类
/**
 * Test from client side for scan with maxResultPerCF set
 *
 * @throws Exception
 */
@Test
public void testScanLimitAndOffset() throws Exception {
  //byte [] TABLE = HTestConst.DEFAULT_TABLE_BYTES;
  byte [][] ROWS = HTestConst.makeNAscii(HTestConst.DEFAULT_ROW_BYTES, 2);
  byte [][] FAMILIES = HTestConst.makeNAscii(HTestConst.DEFAULT_CF_BYTES, 3);
  byte [][] QUALIFIERS = HTestConst.makeNAscii(HTestConst.DEFAULT_QUALIFIER_BYTES, 10);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(HTestConst.DEFAULT_TABLE_BYTES));
  HRegionInfo info = new HRegionInfo(HTestConst.DEFAULT_TABLE, null, null, false);
  for (byte[] family : FAMILIES) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    htd.addFamily(hcd);
  }
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
  try {
    Put put;
    Scan scan;
    Result result;
    boolean toLog = true;

    List<Cell> kvListExp = new ArrayList<Cell>();

    int storeOffset = 1;
    int storeLimit = 3;
    for (int r = 0; r < ROWS.length; r++) {
      put = new Put(ROWS[r]);
      for (int c = 0; c < FAMILIES.length; c++) {
        for (int q = 0; q < QUALIFIERS.length; q++) {
          KeyValue kv = new KeyValue(ROWS[r], FAMILIES[c], QUALIFIERS[q], 1,
              HTestConst.DEFAULT_VALUE_BYTES);
          put.add(kv);
          if (storeOffset <= q && q < storeOffset + storeLimit) {
            kvListExp.add(kv);
          }
        }
      }
      region.put(put);
    }

    scan = new Scan();
    scan.setRowOffsetPerColumnFamily(storeOffset);
    scan.setMaxResultsPerColumnFamily(storeLimit);
    RegionScanner scanner = region.getScanner(scan);
    List<Cell> kvListScan = new ArrayList<Cell>();
    List<Cell> results = new ArrayList<Cell>();
    while (scanner.next(results) || !results.isEmpty()) {
      kvListScan.addAll(results);
      results.clear();
    }
    result = Result.create(kvListScan);
    TestScannersFromClientSide.verifyResult(result, kvListExp, toLog,
        "Testing scan with storeOffset and storeLimit");
  } finally {
    region.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:63,代码来源:TestIntraRowPagination.java


示例7: testMaxResultSizeIsSetToDefault

import org.apache.hadoop.hbase.HTestConst; //导入依赖的package包/类
@Test
public void testMaxResultSizeIsSetToDefault() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  Table ht = TEST_UTIL.createTable(tableName, FAMILY);

  // The max result size we expect the scan to use by default.
  long expectedMaxResultSize =
      TEST_UTIL.getConfiguration().getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
        HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);

  int numRows = 5;
  byte[][] ROWS = HTestConst.makeNAscii(ROW, numRows);

  int numQualifiers = 10;
  byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, numQualifiers);

  // Specify the cell size such that a single row will be larger than the default
  // value of maxResultSize. This means that Scan RPCs should return at most a single
  // result back to the client.
  int cellSize = (int) (expectedMaxResultSize / (numQualifiers - 1));
  byte[] cellValue = Bytes.createMaxByteArray(cellSize);

  Put put;
  List<Put> puts = new ArrayList<>();
  for (int row = 0; row < ROWS.length; row++) {
    put = new Put(ROWS[row]);
    for (int qual = 0; qual < QUALIFIERS.length; qual++) {
      KeyValue kv = new KeyValue(ROWS[row], FAMILY, QUALIFIERS[qual], cellValue);
      put.add(kv);
    }
    puts.add(put);
  }
  ht.put(puts);

  // Create a scan with the default configuration.
  Scan scan = new Scan();

  ResultScanner scanner = ht.getScanner(scan);
  assertTrue(scanner instanceof ClientScanner);
  ClientScanner clientScanner = (ClientScanner) scanner;

  // Call next to issue a single RPC to the server
  scanner.next();

  // The scanner should have, at most, a single result in its cache. If there more results exists
  // in the cache it means that more than the expected max result size was fetched.
  assertTrue("The cache contains: " + clientScanner.getCacheSize() + " results",
    clientScanner.getCacheSize() <= 1);
}
 
开发者ID:apache,项目名称:hbase,代码行数:50,代码来源:TestScannersFromClientSide.java


示例8: testScanLimitAndOffset

import org.apache.hadoop.hbase.HTestConst; //导入依赖的package包/类
/**
 * Test from client side for scan with maxResultPerCF set
 *
 * @throws Exception
 */
@Test
public void testScanLimitAndOffset() throws Exception {
  //byte [] TABLE = HTestConst.DEFAULT_TABLE_BYTES;
  byte [][] ROWS = HTestConst.makeNAscii(HTestConst.DEFAULT_ROW_BYTES, 2);
  byte [][] FAMILIES = HTestConst.makeNAscii(HTestConst.DEFAULT_CF_BYTES, 3);
  byte [][] QUALIFIERS = HTestConst.makeNAscii(HTestConst.DEFAULT_QUALIFIER_BYTES, 10);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(HTestConst.DEFAULT_TABLE_BYTES));
  HRegionInfo info = new HRegionInfo(HTestConst.DEFAULT_TABLE, null, null, false);
  for (byte[] family : FAMILIES) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    htd.addFamily(hcd);
  }
  HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(),
      TEST_UTIL.getConfiguration(), htd);
  try {
    Put put;
    Scan scan;
    Result result;
    boolean toLog = true;

    List<Cell> kvListExp = new ArrayList<>();

    int storeOffset = 1;
    int storeLimit = 3;
    for (int r = 0; r < ROWS.length; r++) {
      put = new Put(ROWS[r]);
      for (int c = 0; c < FAMILIES.length; c++) {
        for (int q = 0; q < QUALIFIERS.length; q++) {
          KeyValue kv = new KeyValue(ROWS[r], FAMILIES[c], QUALIFIERS[q], 1,
              HTestConst.DEFAULT_VALUE_BYTES);
          put.add(kv);
          if (storeOffset <= q && q < storeOffset + storeLimit) {
            kvListExp.add(kv);
          }
        }
      }
      region.put(put);
    }

    scan = new Scan();
    scan.setRowOffsetPerColumnFamily(storeOffset);
    scan.setMaxResultsPerColumnFamily(storeLimit);
    RegionScanner scanner = region.getScanner(scan);
    List<Cell> kvListScan = new ArrayList<>();
    List<Cell> results = new ArrayList<>();
    while (scanner.next(results) || !results.isEmpty()) {
      kvListScan.addAll(results);
      results.clear();
    }
    result = Result.create(kvListScan);
    TestScannersFromClientSide.verifyResult(result, kvListExp, toLog,
        "Testing scan with storeOffset and storeLimit");
  } finally {
    HBaseTestingUtility.closeRegionAndWAL(region);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:63,代码来源:TestIntraRowPagination.java


示例9: testScanLimitAndOffset

import org.apache.hadoop.hbase.HTestConst; //导入依赖的package包/类
/**
 * Test from client side for scan with maxResultPerCF set
 *
 * @throws Exception
 */
@Test
public void testScanLimitAndOffset() throws Exception {
  //byte [] TABLE = HTestConst.DEFAULT_TABLE_BYTES;
  byte [][] ROWS = HTestConst.makeNAscii(HTestConst.DEFAULT_ROW_BYTES, 2);
  byte [][] FAMILIES = HTestConst.makeNAscii(HTestConst.DEFAULT_CF_BYTES, 3);
  byte [][] QUALIFIERS = HTestConst.makeNAscii(HTestConst.DEFAULT_QUALIFIER_BYTES, 10);

  HTableDescriptor htd = new HTableDescriptor(HTestConst.DEFAULT_TABLE_BYTES);
  HRegionInfo info = new HRegionInfo(HTestConst.DEFAULT_TABLE_BYTES, null, null, false);
  for (byte[] family : FAMILIES) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    htd.addFamily(hcd);
  }
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
  try {
    Put put;
    Scan scan;
    Result result;
    boolean toLog = true;

    List<KeyValue> kvListExp = new ArrayList<KeyValue>();

    int storeOffset = 1;
    int storeLimit = 3;
    for (int r = 0; r < ROWS.length; r++) {
      put = new Put(ROWS[r]);
      for (int c = 0; c < FAMILIES.length; c++) {
        for (int q = 0; q < QUALIFIERS.length; q++) {
          KeyValue kv = new KeyValue(ROWS[r], FAMILIES[c], QUALIFIERS[q], 1,
              HTestConst.DEFAULT_VALUE_BYTES);
          put.add(kv);
          if (storeOffset <= q && q < storeOffset + storeLimit) {
            kvListExp.add(kv);
          }
        }
      }
      region.put(put);
    }

    scan = new Scan();
    scan.setRowOffsetPerColumnFamily(storeOffset);
    scan.setMaxResultsPerColumnFamily(storeLimit);
    RegionScanner scanner = region.getScanner(scan);
    List<KeyValue> kvListScan = new ArrayList<KeyValue>();
    List<KeyValue> results = new ArrayList<KeyValue>();
    while (scanner.next(results) || !results.isEmpty()) {
      kvListScan.addAll(results);
      results.clear();
    }
    result = new Result(kvListScan);
    TestScannersFromClientSide.verifyResult(result, kvListExp, toLog,
        "Testing scan with storeOffset and storeLimit");
  } finally {
    region.close();
  }
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:63,代码来源:TestIntraRowPagination.java



注:本文中的org.apache.hadoop.hbase.HTestConst类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Java IAnimationData类代码示例发布时间:2022-05-23
下一篇:
Java ScoreboardTeam类代码示例发布时间:2022-05-23
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap