• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Java DeleteType类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Java中org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType的典型用法代码示例。如果您正苦于以下问题:Java DeleteType类的具体用法?Java DeleteType怎么用?Java DeleteType使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



DeleteType类属于org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest包,在下文中一共展示了DeleteType类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: testBulkDeleteEndpoint

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteEndpoint() throws Throwable {
  TableName tableName = TableName.valueOf("testBulkDeleteEndpoint");
  Table ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    puts.add(createPut(rowkey, "v1"));
  }
  ht.put(puts);
  // Deleting all the rows.
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 5, DeleteType.ROW, null);
  assertEquals(100, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    rows++;
  }
  assertEquals(0, rows);
  ht.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestBulkDeleteProtocol.java


示例2: testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion()
    throws Throwable {
  TableName tableName = TableName
      .valueOf("testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion");
  Table ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    puts.add(createPut(rowkey, "v1"));
  }
  ht.put(puts);
  // Deleting all the rows.
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 10, DeleteType.ROW, null);
  assertEquals(100, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    rows++;
  }
  assertEquals(0, rows);
  ht.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestBulkDeleteProtocol.java


示例3: testBulkDeleteFamily

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteFamily() throws Throwable {
  TableName tableName = TableName.valueOf("testBulkDeleteFamily");
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor(FAMILY1));
  htd.addFamily(new HColumnDescriptor(FAMILY2));
  TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
  Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    Put put = new Put(Bytes.toBytes(j));
    put.add(FAMILY1, QUALIFIER1, "v1".getBytes());
    put.add(FAMILY2, QUALIFIER2, "v2".getBytes());
    puts.add(put);
  }
  ht.put(puts);
  Scan scan = new Scan();
  scan.addFamily(FAMILY1);
  // Delete the column family cf1
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.FAMILY, null);
  assertEquals(100, noOfRowsDeleted);
  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    assertTrue(result.getFamilyMap(FAMILY1).isEmpty());
    assertEquals(1, result.getColumnCells(FAMILY2, QUALIFIER2).size());
    rows++;
  }
  assertEquals(100, rows);
  ht.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestBulkDeleteProtocol.java


示例4: testBulkDeleteEndpoint

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteEndpoint() throws Throwable {
  byte[] tableName = Bytes.toBytes("testBulkDeleteEndpoint");
  HTable ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    puts.add(createPut(rowkey, "v1"));
  }
  ht.put(puts);
  // Deleting all the rows.
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 5, DeleteType.ROW, null);
  assertEquals(100, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    rows++;
  }
  assertEquals(0, rows);
  ht.close();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:21,代码来源:TestBulkDeleteProtocol.java


示例5: testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion()
    throws Throwable {
  byte[] tableName = Bytes
      .toBytes("testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion");
  HTable ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    puts.add(createPut(rowkey, "v1"));
  }
  ht.put(puts);
  // Deleting all the rows.
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 10, DeleteType.ROW, null);
  assertEquals(100, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    rows++;
  }
  assertEquals(0, rows);
  ht.close();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:23,代码来源:TestBulkDeleteProtocol.java


示例6: testBulkDeleteEndpoint

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
@Test
public void testBulkDeleteEndpoint() throws Throwable {
  byte[] tableName = Bytes.toBytes("testBulkDeleteEndpoint");
  HTable ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    puts.add(createPut(rowkey, "v1"));
  }
  ht.put(puts);
  // Deleting all the rows.
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 5, DeleteType.ROW, null);
  assertEquals(100, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    rows++;
  }
  assertEquals(0, rows);
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:21,代码来源:TestBulkDeleteProtocol.java


示例7: testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
@Test
public void testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion()
    throws Throwable {
  byte[] tableName = Bytes
      .toBytes("testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion");
  HTable ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    puts.add(createPut(rowkey, "v1"));
  }
  ht.put(puts);
  // Deleting all the rows.
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 10, DeleteType.ROW, null);
  assertEquals(100, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    rows++;
  }
  assertEquals(0, rows);
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:23,代码来源:TestBulkDeleteProtocol.java


示例8: invokeBulkDeleteProtocol

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
private long invokeBulkDeleteProtocol(TableName tableName, final Scan scan, final int rowBatchSize,
    final DeleteType deleteType, final Long timeStamp) throws Throwable {
  Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
  long noOfDeletedRows = 0L;
  Batch.Call<BulkDeleteService, BulkDeleteResponse> callable =
    new Batch.Call<BulkDeleteService, BulkDeleteResponse>() {
    ServerRpcController controller = new ServerRpcController();
    BlockingRpcCallback<BulkDeleteResponse> rpcCallback =
      new BlockingRpcCallback<BulkDeleteResponse>();

    public BulkDeleteResponse call(BulkDeleteService service) throws IOException {
      Builder builder = BulkDeleteRequest.newBuilder();
      builder.setScan(ProtobufUtil.toScan(scan));
      builder.setDeleteType(deleteType);
      builder.setRowBatchSize(rowBatchSize);
      if (timeStamp != null) {
        builder.setTimestamp(timeStamp);
      }
      service.delete(controller, builder.build(), rpcCallback);
      return rpcCallback.get();
    }
  };
  Map<byte[], BulkDeleteResponse> result = ht.coprocessorService(BulkDeleteService.class, scan
      .getStartRow(), scan.getStopRow(), callable);
  for (BulkDeleteResponse response : result.values()) {
    noOfDeletedRows += response.getRowsDeleted();
  }
  ht.close();
  return noOfDeletedRows;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:TestBulkDeleteProtocol.java


示例9: testBulkDeleteWithConditionBasedDelete

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteWithConditionBasedDelete() throws Throwable {
  TableName tableName = TableName.valueOf("testBulkDeleteWithConditionBasedDelete");
  Table ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    String value = (j % 10 == 0) ? "v1" : "v2";
    puts.add(createPut(rowkey, value));
  }
  ht.put(puts);
  Scan scan = new Scan();
  FilterList fl = new FilterList(Operator.MUST_PASS_ALL);
  SingleColumnValueFilter scvf = new SingleColumnValueFilter(FAMILY1, QUALIFIER3,
      CompareOp.EQUAL, Bytes.toBytes("v1"));
  // fl.addFilter(new FirstKeyOnlyFilter());
  fl.addFilter(scvf);
  scan.setFilter(fl);
  // Deleting all the rows where cf1:c1=v1
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.ROW, null);
  assertEquals(10, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    rows++;
  }
  assertEquals(90, rows);
  ht.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestBulkDeleteProtocol.java


示例10: testBulkDeleteColumn

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteColumn() throws Throwable {
  TableName tableName = TableName.valueOf("testBulkDeleteColumn");
  Table ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    String value = (j % 10 == 0) ? "v1" : "v2";
    puts.add(createPut(rowkey, value));
  }
  ht.put(puts);
  Scan scan = new Scan();
  scan.addColumn(FAMILY1, QUALIFIER2);
  // Delete the column cf1:col2
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.COLUMN, null);
  assertEquals(100, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    assertEquals(2, result.getFamilyMap(FAMILY1).size());
    assertTrue(result.getColumnCells(FAMILY1, QUALIFIER2).isEmpty());
    assertEquals(1, result.getColumnCells(FAMILY1, QUALIFIER1).size());
    assertEquals(1, result.getColumnCells(FAMILY1, QUALIFIER3).size());
    rows++;
  }
  assertEquals(100, rows);
  ht.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestBulkDeleteProtocol.java


示例11: invokeBulkDeleteProtocol

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
private long invokeBulkDeleteProtocol(byte[] tableName, final Scan scan, final int rowBatchSize,
    final DeleteType deleteType, final Long timeStamp) throws Throwable {
  HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
  long noOfDeletedRows = 0L;
  Batch.Call<BulkDeleteService, BulkDeleteResponse> callable =
    new Batch.Call<BulkDeleteService, BulkDeleteResponse>() {
    ServerRpcController controller = new ServerRpcController();
    BlockingRpcCallback<BulkDeleteResponse> rpcCallback =
      new BlockingRpcCallback<BulkDeleteResponse>();

    public BulkDeleteResponse call(BulkDeleteService service) throws IOException {
      Builder builder = BulkDeleteRequest.newBuilder();
      builder.setScan(ProtobufUtil.toScan(scan));
      builder.setDeleteType(deleteType);
      builder.setRowBatchSize(rowBatchSize);
      if (timeStamp != null) {
        builder.setTimestamp(timeStamp);
      }
      service.delete(controller, builder.build(), rpcCallback);
      return rpcCallback.get();
    }
  };
  Map<byte[], BulkDeleteResponse> result = ht.coprocessorService(BulkDeleteService.class, scan
      .getStartRow(), scan.getStopRow(), callable);
  for (BulkDeleteResponse response : result.values()) {
    noOfDeletedRows += response.getRowsDeleted();
  }
  ht.close();
  return noOfDeletedRows;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:31,代码来源:TestBulkDeleteProtocol.java


示例12: testBulkDeleteWithConditionBasedDelete

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteWithConditionBasedDelete() throws Throwable {
  byte[] tableName = Bytes.toBytes("testBulkDeleteWithConditionBasedDelete");
  HTable ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    String value = (j % 10 == 0) ? "v1" : "v2";
    puts.add(createPut(rowkey, value));
  }
  ht.put(puts);
  Scan scan = new Scan();
  FilterList fl = new FilterList(Operator.MUST_PASS_ALL);
  SingleColumnValueFilter scvf = new SingleColumnValueFilter(FAMILY1, QUALIFIER3,
      CompareOp.EQUAL, Bytes.toBytes("v1"));
  // fl.addFilter(new FirstKeyOnlyFilter());
  fl.addFilter(scvf);
  scan.setFilter(fl);
  // Deleting all the rows where cf1:c1=v1
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.ROW, null);
  assertEquals(10, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    rows++;
  }
  assertEquals(90, rows);
  ht.close();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:29,代码来源:TestBulkDeleteProtocol.java


示例13: testBulkDeleteColumn

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteColumn() throws Throwable {
  byte[] tableName = Bytes.toBytes("testBulkDeleteColumn");
  HTable ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    String value = (j % 10 == 0) ? "v1" : "v2";
    puts.add(createPut(rowkey, value));
  }
  ht.put(puts);
  Scan scan = new Scan();
  scan.addColumn(FAMILY1, QUALIFIER2);
  // Delete the column cf1:col2
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.COLUMN, null);
  assertEquals(100, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    assertEquals(2, result.getFamilyMap(FAMILY1).size());
    assertTrue(result.getColumnCells(FAMILY1, QUALIFIER2).isEmpty());
    assertEquals(1, result.getColumnCells(FAMILY1, QUALIFIER1).size());
    assertEquals(1, result.getColumnCells(FAMILY1, QUALIFIER3).size());
    rows++;
  }
  assertEquals(100, rows);
  ht.close();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:28,代码来源:TestBulkDeleteProtocol.java


示例14: testBulkDeleteFamily

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteFamily() throws Throwable {
  byte[] tableName = Bytes.toBytes("testBulkDeleteFamily");
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  htd.addFamily(new HColumnDescriptor(FAMILY1));
  htd.addFamily(new HColumnDescriptor(FAMILY2));
  TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
  HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    Put put = new Put(Bytes.toBytes(j));
    put.add(FAMILY1, QUALIFIER1, "v1".getBytes());
    put.add(FAMILY2, QUALIFIER2, "v2".getBytes());
    puts.add(put);
  }
  ht.put(puts);
  Scan scan = new Scan();
  scan.addFamily(FAMILY1);
  // Delete the column family cf1
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.FAMILY, null);
  assertEquals(100, noOfRowsDeleted);
  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    assertTrue(result.getFamilyMap(FAMILY1).isEmpty());
    assertEquals(1, result.getColumnCells(FAMILY2, QUALIFIER2).size());
    rows++;
  }
  assertEquals(100, rows);
  ht.close();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:30,代码来源:TestBulkDeleteProtocol.java


示例15: invokeBulkDeleteProtocol

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
private long invokeBulkDeleteProtocol(byte[] tableName, final Scan scan, final int rowBatchSize,
    final DeleteType deleteType, final Long timeStamp) throws Throwable {
  HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
  long noOfDeletedRows = 0L;
  Batch.Call<BulkDeleteService, BulkDeleteResponse> callable = 
    new Batch.Call<BulkDeleteService, BulkDeleteResponse>() {
    ServerRpcController controller = new ServerRpcController();
    BlockingRpcCallback<BulkDeleteResponse> rpcCallback = 
      new BlockingRpcCallback<BulkDeleteResponse>();

    public BulkDeleteResponse call(BulkDeleteService service) throws IOException {
      Builder builder = BulkDeleteRequest.newBuilder();
      builder.setScan(ProtobufUtil.toScan(scan));
      builder.setDeleteType(deleteType);
      builder.setRowBatchSize(rowBatchSize);
      if (timeStamp != null) {
        builder.setTimestamp(timeStamp);
      }
      service.delete(controller, builder.build(), rpcCallback);
      return rpcCallback.get();
    }
  };
  Map<byte[], BulkDeleteResponse> result = ht.coprocessorService(BulkDeleteService.class, scan
      .getStartRow(), scan.getStopRow(), callable);
  for (BulkDeleteResponse response : result.values()) {
    noOfDeletedRows += response.getRowsDeleted();
  }
  return noOfDeletedRows;
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:30,代码来源:TestBulkDeleteProtocol.java


示例16: testBulkDeleteWithConditionBasedDelete

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
@Test
public void testBulkDeleteWithConditionBasedDelete() throws Throwable {
  byte[] tableName = Bytes.toBytes("testBulkDeleteWithConditionBasedDelete");
  HTable ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    String value = (j % 10 == 0) ? "v1" : "v2";
    puts.add(createPut(rowkey, value));
  }
  ht.put(puts);
  Scan scan = new Scan();
  FilterList fl = new FilterList(Operator.MUST_PASS_ALL);
  SingleColumnValueFilter scvf = new SingleColumnValueFilter(FAMILY1, QUALIFIER3,
      CompareOp.EQUAL, Bytes.toBytes("v1"));
  // fl.addFilter(new FirstKeyOnlyFilter());
  fl.addFilter(scvf);
  scan.setFilter(fl);
  // Deleting all the rows where cf1:c1=v1
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.ROW, null);
  assertEquals(10, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    rows++;
  }
  assertEquals(90, rows);
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:29,代码来源:TestBulkDeleteProtocol.java


示例17: testBulkDeleteColumn

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
@Test
public void testBulkDeleteColumn() throws Throwable {
  byte[] tableName = Bytes.toBytes("testBulkDeleteColumn");
  HTable ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    String value = (j % 10 == 0) ? "v1" : "v2";
    puts.add(createPut(rowkey, value));
  }
  ht.put(puts);
  Scan scan = new Scan();
  scan.addColumn(FAMILY1, QUALIFIER2);
  // Delete the column cf1:col2
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.COLUMN, null);
  assertEquals(100, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    assertEquals(2, result.getFamilyMap(FAMILY1).size());
    assertTrue(result.getColumn(FAMILY1, QUALIFIER2).isEmpty());
    assertEquals(1, result.getColumn(FAMILY1, QUALIFIER1).size());
    assertEquals(1, result.getColumn(FAMILY1, QUALIFIER3).size());
    rows++;
  }
  assertEquals(100, rows);
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:28,代码来源:TestBulkDeleteProtocol.java


示例18: testBulkDeleteFamily

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
@Test
public void testBulkDeleteFamily() throws Throwable {
  byte[] tableName = Bytes.toBytes("testBulkDeleteFamily");
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor(FAMILY1));
  htd.addFamily(new HColumnDescriptor(FAMILY2));
  TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
  HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    Put put = new Put(Bytes.toBytes(j));
    put.add(FAMILY1, QUALIFIER1, "v1".getBytes());
    put.add(FAMILY2, QUALIFIER2, "v2".getBytes());
    puts.add(put);
  }
  ht.put(puts);
  Scan scan = new Scan();
  scan.addFamily(FAMILY1);
  // Delete the column family cf1
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.FAMILY, null);
  assertEquals(100, noOfRowsDeleted);
  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    assertTrue(result.getFamilyMap(FAMILY1).isEmpty());
    assertEquals(1, result.getColumn(FAMILY2, QUALIFIER2).size());
    rows++;
  }
  assertEquals(100, rows);
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:30,代码来源:TestBulkDeleteProtocol.java


示例19: testBulkDeleteColumnVersion

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteColumnVersion() throws Throwable {
  TableName tableName = TableName.valueOf("testBulkDeleteColumnVersion");
  Table ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    Put put = new Put(Bytes.toBytes(j));
    byte[] value = "v1".getBytes();
    put.add(FAMILY1, QUALIFIER1, 1234L, value);
    put.add(FAMILY1, QUALIFIER2, 1234L, value);
    put.add(FAMILY1, QUALIFIER3, 1234L, value);
    // Latest version values
    value = "v2".getBytes();
    put.add(FAMILY1, QUALIFIER1, value);
    put.add(FAMILY1, QUALIFIER2, value);
    put.add(FAMILY1, QUALIFIER3, value);
    put.add(FAMILY1, null, value);
    puts.add(put);
  }
  ht.put(puts);
  Scan scan = new Scan();
  scan.addFamily(FAMILY1);
  // Delete the latest version values of all the columns in family cf1.
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.VERSION,
      HConstants.LATEST_TIMESTAMP);
  assertEquals(100, noOfRowsDeleted);
  int rows = 0;
  scan = new Scan();
  scan.setMaxVersions();
  for (Result result : ht.getScanner(scan)) {
    assertEquals(3, result.getFamilyMap(FAMILY1).size());
    List<Cell> column = result.getColumnCells(FAMILY1, QUALIFIER1);
    assertEquals(1, column.size());
    assertTrue(CellUtil.matchingValue(column.get(0), "v1".getBytes()));

    column = result.getColumnCells(FAMILY1, QUALIFIER2);
    assertEquals(1, column.size());
    assertTrue(CellUtil.matchingValue(column.get(0), "v1".getBytes()));

    column = result.getColumnCells(FAMILY1, QUALIFIER3);
    assertEquals(1, column.size());
    assertTrue(CellUtil.matchingValue(column.get(0), "v1".getBytes()));
    rows++;
  }
  assertEquals(100, rows);
  ht.close();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:47,代码来源:TestBulkDeleteProtocol.java


示例20: testBulkDeleteColumnVersionBasedOnTS

import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteColumnVersionBasedOnTS() throws Throwable {
  TableName tableName = TableName.valueOf("testBulkDeleteColumnVersionBasedOnTS");
  Table ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    Put put = new Put(Bytes.toBytes(j));
    // TS = 1000L
    byte[] value = "v1".getBytes();
    put.add(FAMILY1, QUALIFIER1, 1000L, value);
    put.add(FAMILY1, QUALIFIER2, 1000L, value);
    put.add(FAMILY1, QUALIFIER3, 1000L, value);
    // TS = 1234L
    value = "v2".getBytes();
    put.add(FAMILY1, QUALIFIER1, 1234L, value);
    put.add(FAMILY1, QUALIFIER2, 1234L, value);
    put.add(FAMILY1, QUALIFIER3, 1234L, value);
    // Latest version values
    value = "v3".getBytes();
    put.add(FAMILY1, QUALIFIER1, value);
    put.add(FAMILY1, QUALIFIER2, value);
    put.add(FAMILY1, QUALIFIER3, value);
    puts.add(put);
  }
  ht.put(puts);
  Scan scan = new Scan();
  scan.addColumn(FAMILY1, QUALIFIER3);
  // Delete the column cf1:c3's one version at TS=1234
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.VERSION, 1234L);
  assertEquals(100, noOfRowsDeleted);
  int rows = 0;
  scan = new Scan();
  scan.setMaxVersions();
  for (Result result : ht.getScanner(scan)) {
    assertEquals(3, result.getFamilyMap(FAMILY1).size());
    assertEquals(3, result.getColumnCells(FAMILY1, QUALIFIER1).size());
    assertEquals(3, result.getColumnCells(FAMILY1, QUALIFIER2).size());
    List<Cell> column = result.getColumnCells(FAMILY1, QUALIFIER3);
    assertEquals(2, column.size());
    assertTrue(CellUtil.matchingValue(column.get(0), "v3".getBytes()));
    assertTrue(CellUtil.matchingValue(column.get(1), "v1".getBytes()));
    rows++;
  }
  assertEquals(100, rows);
  ht.close();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:46,代码来源:TestBulkDeleteProtocol.java



注:本文中的org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Java DefaultRolloverStrategy类代码示例发布时间:2022-05-23
下一篇:
Java Holder类代码示例发布时间:2022-05-23
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap