本文整理汇总了Java中org.apache.hadoop.hbase.TableDescriptors类的典型用法代码示例。如果您正苦于以下问题:Java TableDescriptors类的具体用法?Java TableDescriptors怎么用?Java TableDescriptors使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TableDescriptors类属于org.apache.hadoop.hbase包,在下文中一共展示了TableDescriptors类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: Context
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@InterfaceAudience.Private
public Context(
final Configuration conf,
final FileSystem fs,
final ReplicationPeerConfig peerConfig,
final String peerId,
final UUID clusterId,
final ReplicationPeer replicationPeer,
final MetricsSource metrics,
final TableDescriptors tableDescriptors) {
this.peerConfig = peerConfig;
this.conf = conf;
this.fs = fs;
this.clusterId = clusterId;
this.peerId = peerId;
this.replicationPeer = replicationPeer;
this.metrics = metrics;
this.tableDescriptors = tableDescriptors;
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:ReplicationEndpoint.java
示例2: RegionReplicaOutputSink
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
public RegionReplicaOutputSink(PipelineController controller, TableDescriptors tableDescriptors,
EntryBuffers entryBuffers, ClusterConnection connection, ExecutorService pool,
int numWriters, int operationTimeout) {
super(controller, entryBuffers, numWriters);
this.sinkWriter = new RegionReplicaSinkWriter(this, connection, pool, operationTimeout);
this.tableDescriptors = tableDescriptors;
// A cache for the table "memstore replication enabled" flag.
// It has a default expiry of 5 sec. This means that if the table is altered
// with a different flag value, we might miss to replicate for that amount of
// time. But this cache avoid the slow lookup and parsing of the TableDescriptor.
int memstoreReplicationEnabledCacheExpiryMs = connection.getConfiguration()
.getInt("hbase.region.replica.replication.cache.memstoreReplicationEnabled.expiryMs", 5000);
this.memstoreReplicationEnabled = CacheBuilder.newBuilder()
.expireAfterWrite(memstoreReplicationEnabledCacheExpiryMs, TimeUnit.MILLISECONDS)
.initialCapacity(10)
.maximumSize(1000)
.build();
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:RegionReplicaReplicationEndpoint.java
示例3: testRegionOpenFailsDueToIOException
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
/**
* If region open fails with IOException in openRegion() while doing tableDescriptors.get()
* the region should not add into regionsInTransitionInRS map
* @throws Exception
*/
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
TableDescriptors htd = Mockito.mock(TableDescriptors.class);
Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
try {
ProtobufUtil.openRegion(null, regionServer.getRSRpcServices(),
regionServer.getServerName(), REGIONINFO);
fail("It should throw IOException ");
} catch (IOException e) {
}
Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
assertFalse("Region should not be in RIT",
regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestZKBasedOpenCloseRegion.java
示例4: testRegionOpenFailsDueToIOException
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
/**
* If region open fails with IOException in openRegion() while doing tableDescriptors.get()
* the region should not add into regionsInTransitionInRS map
* @throws Exception
*/
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
TableDescriptors htd = Mockito.mock(TableDescriptors.class);
Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
try {
ProtobufUtil.openRegion(regionServer.getRSRpcServices(),
regionServer.getServerName(), REGIONINFO);
fail("It should throw IOException ");
} catch (IOException e) {
}
Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
assertFalse("Region should not be in RIT",
regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
开发者ID:grokcoder,项目名称:pbase,代码行数:25,代码来源:TestZKBasedOpenCloseRegion.java
示例5: testRegionOpenFailsDueToIOException
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
/**
* If region open fails with IOException in openRegion() while doing tableDescriptors.get()
* the region should not add into regionsInTransitionInRS map
* @throws Exception
*/
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
TableDescriptors htd = Mockito.mock(TableDescriptors.class);
Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
try {
ProtobufUtil.openRegion(regionServer, regionServer.getServerName(), REGIONINFO);
fail("It should throw IOException ");
} catch (IOException e) {
}
Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
assertFalse("Region should not be in RIT",
regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:24,代码来源:TestZKBasedOpenCloseRegion.java
示例6: Context
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@InterfaceAudience.Private
public Context(
final Configuration localConf,
final Configuration conf,
final FileSystem fs,
final String peerId,
final UUID clusterId,
final ReplicationPeer replicationPeer,
final MetricsSource metrics,
final TableDescriptors tableDescriptors,
final Abortable abortable) {
this.localConf = localConf;
this.conf = conf;
this.fs = fs;
this.clusterId = clusterId;
this.peerId = peerId;
this.replicationPeer = replicationPeer;
this.metrics = metrics;
this.tableDescriptors = tableDescriptors;
this.abortable = abortable;
}
开发者ID:apache,项目名称:hbase,代码行数:22,代码来源:ReplicationEndpoint.java
示例7: fixTableStates
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
public static void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
throws IOException {
final Map<String, TableDescriptor> allDescriptors =
tableDescriptors.getAllDescriptors();
final Map<String, TableState> states = new HashMap<>();
MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() {
@Override
public boolean visit(Result r) throws IOException {
TableState state = MetaTableAccessor.getTableState(r);
if (state != null)
states.put(state.getTableName().getNameAsString(), state);
return true;
}
});
for (Map.Entry<String, TableDescriptor> entry : allDescriptors.entrySet()) {
String table = entry.getKey();
if (table.equals(TableName.META_TABLE_NAME.getNameAsString())) {
continue;
}
if (!states.containsKey(table)) {
LOG.warn(table + " has no state, assuming ENABLED");
MetaTableAccessor.updateTableState(connection, TableName.valueOf(table),
TableState.State.ENABLED);
}
}
}
开发者ID:apache,项目名称:hbase,代码行数:27,代码来源:TableStateManager.java
示例8: testRegionOpenFailsDueToIOException
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
/**
* If region open fails with IOException in openRegion() while doing tableDescriptors.get()
* the region should not add into regionsInTransitionInRS map
* @throws Exception
*/
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
TableDescriptors htd = Mockito.mock(TableDescriptors.class);
Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
try {
ProtobufUtil.openRegion(regionServer, REGIONINFO);
fail("It should throw IOException ");
} catch (IOException e) {
}
Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
assertFalse("Region should not be in RIT",
regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:24,代码来源:TestZKBasedOpenCloseRegion.java
示例9: testRegionOpenFailsDueToIOException
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
/**
* If region open fails with IOException in openRegion() while doing tableDescriptors.get()
* the region should not add into regionsInTransitionInRS map
* @throws Exception
*/
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
HRegionInfo REGIONINFO = new HRegionInfo(Bytes.toBytes("t"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
TableDescriptors htd = Mockito.mock(TableDescriptors.class);
Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
Mockito.doThrow(new IOException()).when(htd).get((byte[]) Mockito.any());
try {
ProtobufUtil.openRegion(regionServer, REGIONINFO);
fail("It should throw IOException ");
} catch (IOException e) {
}
Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
assertFalse("Region should not be in RIT",
regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
开发者ID:daidong,项目名称:DominoHBase,代码行数:24,代码来源:TestZKBasedOpenCloseRegion.java
示例10: testRemoves
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testRemoves() throws IOException {
final String name = "testRemoves";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
assertNotNull(htds.remove(htd.getTableName()));
assertNull(htds.remove(htd.getTableName()));
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestFSTableDescriptors.java
示例11: testNoSuchTable
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testNoSuchTable() throws IOException {
final String name = "testNoSuchTable";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
assertNull("There shouldn't be any HTD for this table", htds.get(TableName.valueOf("NoSuchTable")));
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:TestFSTableDescriptors.java
示例12: testUpdates
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testUpdates() throws IOException {
final String name = "testUpdates";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
htds.add(htd);
htds.add(htd);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestFSTableDescriptors.java
示例13: testRemoves
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testRemoves() throws IOException {
final String name = "testRemoves";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
assertNotNull(htds.remove(htd.getTableName()));
assertNull(htds.remove(htd.getTableName()));
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:13,代码来源:TestFSTableDescriptors.java
示例14: testNoSuchTable
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testNoSuchTable() throws IOException {
final String name = "testNoSuchTable";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
assertNull("There shouldn't be any HTD for this table",
htds.get(TableName.valueOf("NoSuchTable")));
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:11,代码来源:TestFSTableDescriptors.java
示例15: testUpdates
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testUpdates() throws IOException {
final String name = "testUpdates";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
htds.add(htd);
htds.add(htd);
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:13,代码来源:TestFSTableDescriptors.java
示例16: initAndStartReplicationEndpoint
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
private void initAndStartReplicationEndpoint(ReplicationEndpoint replicationEndpoint)
throws IOException, TimeoutException {
TableDescriptors tableDescriptors = null;
if (server instanceof HRegionServer) {
tableDescriptors = ((HRegionServer) server).getTableDescriptors();
}
replicationEndpoint
.init(new ReplicationEndpoint.Context(conf, replicationPeer.getConfiguration(), fs, peerId,
clusterId, replicationPeer, metrics, tableDescriptors, server));
replicationEndpoint.start();
replicationEndpoint.awaitRunning(waitOnEndpointSeconds, TimeUnit.SECONDS);
}
开发者ID:apache,项目名称:hbase,代码行数:13,代码来源:ReplicationSource.java
示例17: chore
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
justification="Intentional")
protected void chore() {
try {
TableDescriptors htds = master.getTableDescriptors();
Map<String, TableDescriptor> map = htds.getAll();
for (TableDescriptor htd : map.values()) {
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
if (hcd.isMobEnabled() && hcd.getMinVersions() == 0) {
// clean only for mob-enabled column.
// obtain a read table lock before cleaning, synchronize with MobFileCompactionChore.
final LockManager.MasterLock lock = master.getLockManager().createMasterLock(
MobUtils.getTableLockName(htd.getTableName()), LockType.SHARED,
this.getClass().getSimpleName() + ": Cleaning expired mob files");
try {
lock.acquire();
cleaner.cleanExpiredMobFiles(htd.getTableName().getNameAsString(), hcd);
} finally {
lock.release();
}
}
}
}
} catch (Exception e) {
LOG.error("Fail to clean the expired mob files", e);
}
}
开发者ID:apache,项目名称:hbase,代码行数:29,代码来源:ExpiredMobFileCleanerChore.java
示例18: chore
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Override
protected void chore() {
try {
TableDescriptors htds = master.getTableDescriptors();
Map<String, TableDescriptor> map = htds.getAll();
for (TableDescriptor htd : map.values()) {
if (!master.getTableStateManager().isTableState(htd.getTableName(),
TableState.State.ENABLED)) {
continue;
}
boolean reported = false;
try {
final LockManager.MasterLock lock = master.getLockManager().createMasterLock(
MobUtils.getTableLockName(htd.getTableName()), LockType.EXCLUSIVE,
this.getClass().getName() + ": mob compaction");
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
if (!hcd.isMobEnabled()) {
continue;
}
if (!reported) {
master.reportMobCompactionStart(htd.getTableName());
reported = true;
}
MobUtils.doMobCompaction(master.getConfiguration(), master.getFileSystem(),
htd.getTableName(), hcd, pool, false, lock);
}
} finally {
if (reported) {
master.reportMobCompactionEnd(htd.getTableName());
}
}
}
} catch (Exception e) {
LOG.error("Failed to compact mob files", e);
}
}
开发者ID:apache,项目名称:hbase,代码行数:37,代码来源:MobCompactionChore.java
示例19: testRemoves
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testRemoves() throws IOException {
final String name = this.name.getMethodName();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
htds.add(htd);
assertNotNull(htds.remove(htd.getTableName()));
assertNull(htds.remove(htd.getTableName()));
}
开发者ID:apache,项目名称:hbase,代码行数:13,代码来源:TestFSTableDescriptors.java
示例20: testNoSuchTable
import org.apache.hadoop.hbase.TableDescriptors; //导入依赖的package包/类
@Test
public void testNoSuchTable() throws IOException {
final String name = "testNoSuchTable";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
assertNull("There shouldn't be any HTD for this table",
htds.get(TableName.valueOf("NoSuchTable")));
}
开发者ID:apache,项目名称:hbase,代码行数:11,代码来源:TestFSTableDescriptors.java
注:本文中的org.apache.hadoop.hbase.TableDescriptors类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论