本文整理汇总了Java中org.apache.hadoop.metrics2.MetricsBuilder类的典型用法代码示例。如果您正苦于以下问题:Java MetricsBuilder类的具体用法?Java MetricsBuilder怎么用?Java MetricsBuilder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MetricsBuilder类属于org.apache.hadoop.metrics2包,在下文中一共展示了MetricsBuilder类的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the metricsBuilder.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
if (regionSources != null) {
lock.readLock().lock();
try {
for (MetricsRegionSourceImpl regionMetricSource : regionSources) {
regionMetricSource.snapshot(mrb, all);
}
} finally {
lock.readLock().unlock();
}
}
metricsRegistry.snapshot(mrb, all);
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:28,代码来源:MetricsRegionAggregateSourceImpl.java
示例2: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
if (wrapper != null) {
mrb.addGauge(QUEUE_SIZE_NAME, QUEUE_SIZE_DESC, wrapper.getTotalQueueSize())
.addGauge(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC, wrapper.getGeneralQueueLength())
.addGauge(REPLICATION_QUEUE_NAME,
REPLICATION_QUEUE_DESC, wrapper.getReplicationQueueLength())
.addGauge(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC, wrapper.getPriorityQueueLength())
.addGauge(NUM_OPEN_CONNECTIONS_NAME,
NUM_OPEN_CONNECTIONS_DESC, wrapper.getNumOpenConnections())
.addGauge(NUM_ACTIVE_HANDLER_NAME,
NUM_ACTIVE_HANDLER_DESC, wrapper.getActiveRpcHandlerCount());
}
metricsRegistry.snapshot(mrb, all);
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:19,代码来源:MetricsHBaseServerSourceImpl.java
示例3: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
if (wrapper != null) {
mrb.addGauge(QUEUE_SIZE_NAME, QUEUE_SIZE_DESC, wrapper.getTotalQueueSize())
.addGauge(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC, wrapper.getGeneralQueueLength())
.addGauge(REPLICATION_QUEUE_NAME,
REPLICATION_QUEUE_DESC, wrapper.getReplicationQueueLength())
.addGauge(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC, wrapper.getPriorityQueueLength())
.addGauge(NUM_OPEN_CONNECTIONS_NAME,
NUM_OPEN_CONNECTIONS_DESC, wrapper.getNumOpenConnections());
}
metricsRegistry.snapshot(mrb, all);
}
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:17,代码来源:MetricsHBaseServerSourceImpl.java
示例4: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
/**
* Call getMetrics on source and get a record builder mock to verify
* @param source the metrics source
* @return the record builder mock to verify
*/
public static MetricsRecordBuilder getMetrics(MetricsSource source) {
MetricsBuilder mb = mock(MetricsBuilder.class);
final MetricsRecordBuilder rb = mock(MetricsRecordBuilder.class,
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
StringBuilder sb = new StringBuilder();
for (Object o : args) {
if (sb.length() > 0) sb.append(", ");
sb.append(String.valueOf(o));
}
LOG.debug(invocation.getMethod().getName() +": "+ sb);
return invocation.getMock();
}
});
when(mb.addRecord(anyString())).thenReturn(rb);
source.getMetrics(mb, true);
return rb;
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:26,代码来源:MetricsAsserts.java
示例5: registerSystemSource
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
private void registerSystemSource() {
sysSource = new MetricsSourceAdapter(prefix, MS_STATS_NAME, MS_STATS_DESC,
new MetricsSource() {
@Override
public void getMetrics(MetricsBuilder builder, boolean all) {
int numSources, numSinks;
synchronized(MetricsSystemImpl.this) {
numSources = sources.size();
numSinks = sinks.size();
}
MetricsRecordBuilder rb = builder.addRecord(MS_NAME)
.setContext(MS_CONTEXT)
.addGauge(NUM_SOURCES_KEY, NUM_SOURCES_DESC, numSources)
.addGauge(NUM_SINKS_KEY, NUM_SINKS_DESC, numSinks);
synchronized(MetricsSystemImpl.this) {
for (MetricsSinkAdapter sa : sinks.values()) {
sa.snapshot(rb, all);
}
}
snapshotStat.snapshot(rb, all);
publishStat.snapshot(rb, all);
dropStat.snapshot(rb, all);
}
}, injectedTags, null, null, period);
sysSource.start();
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:27,代码来源:MetricsSystemImpl.java
示例6: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
@Override
public void getMetrics(MetricsBuilder builder, boolean all) {
builder.addRecord("FSNamesystem").setContext("dfs")
.addGauge("FilesTotal", "", getFilesTotal())
.addGauge("BlocksTotal", "", getBlocksTotal())
.addGauge("CapacityTotalGB", "",
roundBytesToGBytes(getCapacityTotal()))
.addGauge("CapacityUsedGB", "",
roundBytesToGBytes(getCapacityUsed()))
.addGauge("CapacityRemainingGB", "",
roundBytesToGBytes(getCapacityRemaining()))
.addGauge("TotalLoad", "", getTotalLoad())
.addGauge("CorruptBlocks", "", getCorruptReplicaBlocks())
.addGauge("ExcessBlocks", "", getExcessBlocks())
.addGauge("PendingDeletionBlocks", "", getPendingDeletionBlocks())
.addGauge("PendingReplicationBlocks", "", getPendingReplicationBlocks())
.addGauge("UnderReplicatedBlocks", "", getUnderReplicatedBlocks())
.addGauge("ScheduledReplicationBlocks", "",
getScheduledReplicationBlocks())
.addGauge("MissingBlocks", "", getMissingBlocksCount())
.addGauge("BlockCapacity", "", getBlockCapacity());
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:23,代码来源:FSNamesystem.java
示例7: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the metricsBuilder.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
if (regionSources != null) {
for (MetricsRegionSourceImpl regionMetricSource : regionSources) {
regionMetricSource.snapshot(mrb, all);
}
}
metricsRegistry.snapshot(mrb, all);
}
开发者ID:daidong,项目名称:DominoHBase,代码行数:25,代码来源:MetricsRegionAggregateSourceImpl.java
示例8: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
/**
* Method to export all the metrics.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
metricsRegistry.snapshot(mrb, all);
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:13,代码来源:BaseSourceImpl.java
示例9: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
/**
* Method to export all the metrics.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder metricsRecordBuilder = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
// masterWrapper can be null because this function is called inside of init.
if (masterWrapper != null) {
metricsRecordBuilder
.addGauge(MASTER_ACTIVE_TIME_NAME,
MASTER_ACTIVE_TIME_DESC, masterWrapper.getActiveTime())
.addGauge(MASTER_START_TIME_NAME,
MASTER_START_TIME_DESC, masterWrapper.getStartTime())
.addGauge(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC, masterWrapper.getAverageLoad())
.tag(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC,
masterWrapper.getRegionServers())
.addGauge(NUM_REGION_SERVERS_NAME,
NUMBER_OF_REGION_SERVERS_DESC, masterWrapper.getNumRegionServers())
.tag(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC,
masterWrapper.getDeadRegionServers())
.addGauge(NUM_DEAD_REGION_SERVERS_NAME,
NUMBER_OF_DEAD_REGION_SERVERS_DESC,
masterWrapper.getNumDeadRegionServers())
.tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, masterWrapper.getZookeeperQuorum())
.tag(SERVER_NAME_NAME, SERVER_NAME_DESC, masterWrapper.getServerName())
.tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, masterWrapper.getClusterId())
.tag(IS_ACTIVE_MASTER_NAME,
IS_ACTIVE_MASTER_DESC,
String.valueOf(masterWrapper.getIsActiveMaster()));
}
metricsRegistry.snapshot(metricsRecordBuilder, all);
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:40,代码来源:MetricsMasterSourceImpl.java
示例10: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
/**
* Method to export all the metrics.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder metricsRecordBuilder = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
// masterWrapper can be null because this function is called inside of init.
if (masterWrapper != null) {
metricsRecordBuilder
.addGauge(MASTER_ACTIVE_TIME_NAME,
MASTER_ACTIVE_TIME_DESC, masterWrapper.getActiveTime())
.addGauge(MASTER_START_TIME_NAME,
MASTER_START_TIME_DESC, masterWrapper.getStartTime())
.addGauge(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC, masterWrapper.getAverageLoad())
.addGauge(NUM_REGION_SERVERS_NAME,
NUMBER_OF_REGION_SERVERS_DESC, masterWrapper.getRegionServers())
.addGauge(NUM_DEAD_REGION_SERVERS_NAME,
NUMBER_OF_DEAD_REGION_SERVERS_DESC,
masterWrapper.getDeadRegionServers())
.tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, masterWrapper.getZookeeperQuorum())
.tag(SERVER_NAME_NAME, SERVER_NAME_DESC, masterWrapper.getServerName())
.tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, masterWrapper.getClusterId())
.tag(IS_ACTIVE_MASTER_NAME,
IS_ACTIVE_MASTER_DESC,
String.valueOf(masterWrapper.getIsActiveMaster()));
}
metricsRegistry.snapshot(metricsRecordBuilder, all);
}
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:36,代码来源:MetricsMasterSourceImpl.java
示例11: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
@Override
public void getMetrics(MetricsBuilder builder, boolean all) {
mapsRunning.set(tt.mapTotal);
redsRunning.set(tt.reduceTotal);
mapSlots.set(tt.getMaxCurrentMapTasks());
redSlots.set(tt.getMaxCurrentReduceTasks());
registry.snapshot(builder.addRecord(registry.name()), all);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:9,代码来源:TaskTrackerMetricsSource.java
示例12: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
@Override
public void getMetrics(MetricsBuilder builder, boolean all) {
MetricsRecordBuilder rb = builder.addRecord(registry.name());
rb.addGauge("shuffle_handler_busy_percent", "", ttWorkerThreads == 0 ? 0
: 100. * serverHandlerBusy / ttWorkerThreads);
registry.snapshot(rb, all);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:8,代码来源:ShuffleServerInstrumentation.java
示例13: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
@Override
public void getMetrics(MetricsBuilder builder, boolean all) {
MetricsRecordBuilder rb = builder.addRecord(registry.name());
rb.addGauge("shuffle_fetchers_busy_percent", "", numCopiers == 0 ? 0
: 100. * threadsBusy / numCopiers);
registry.snapshot(rb, all);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:8,代码来源:ReduceTask.java
示例14: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
@Override
public void getMetrics(MetricsBuilder builder, boolean all) {
MetricsRecordBuilder rb = builder.addRecord(RECORD_NAME)
.setContext(CONTEXT)
.tag(PROCESSNAME_KEY, PROCESSNAME_DESC, processName)
.tag(SESSIONID_KEY, SESSIONID_DESC, sessionId);
getMemoryUsage(rb);
getGcUsage(rb);
getThreadUsage(rb);
getEventCounters(rb);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:12,代码来源:JvmMetricsSource.java
示例15: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the metricsBuilder.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
// rsWrap can be null because this function is called inside of init.
if (rsWrap != null) {
mrb.addGauge(REGION_COUNT, REGION_COUNT_DESC, rsWrap.getNumOnlineRegions())
.addGauge(STORE_COUNT, STORE_COUNT_DESC, rsWrap.getNumStores())
.addGauge(HLOGFILE_COUNT, HLOGFILE_COUNT_DESC, rsWrap.getNumHLogFiles())
.addGauge(HLOGFILE_SIZE, HLOGFILE_SIZE_DESC, rsWrap.getHLogFileSize())
.addGauge(STOREFILE_COUNT, STOREFILE_COUNT_DESC, rsWrap.getNumStoreFiles())
.addGauge(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC, rsWrap.getMemstoreSize())
.addGauge(STOREFILE_SIZE, STOREFILE_SIZE_DESC, rsWrap.getStoreFileSize())
.addGauge(RS_START_TIME_NAME, RS_START_TIME_DESC, rsWrap.getStartCode())
.addCounter(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC, rsWrap.getTotalRequestCount())
.addCounter(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC, rsWrap.getReadRequestsCount())
.addCounter(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC, rsWrap.getWriteRequestsCount())
.addCounter(CHECK_MUTATE_FAILED_COUNT,
CHECK_MUTATE_FAILED_COUNT_DESC,
rsWrap.getCheckAndMutateChecksFailed())
.addCounter(CHECK_MUTATE_PASSED_COUNT,
CHECK_MUTATE_PASSED_COUNT_DESC,
rsWrap.getCheckAndMutateChecksPassed())
.addGauge(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC, rsWrap.getStoreFileIndexSize())
.addGauge(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC, rsWrap.getTotalStaticIndexSize())
.addGauge(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC, rsWrap.getTotalStaticBloomSize())
.addGauge(NUMBER_OF_MUTATIONS_WITHOUT_WAL,
NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC,
rsWrap.getNumMutationsWithoutWAL())
.addGauge(DATA_SIZE_WITHOUT_WAL,
DATA_SIZE_WITHOUT_WAL_DESC,
rsWrap.getDataInMemoryWithoutWAL())
.addGauge(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC, rsWrap.getPercentFileLocal())
.addGauge(COMPACTION_QUEUE_LENGTH,
COMPACTION_QUEUE_LENGTH_DESC,
rsWrap.getCompactionQueueSize())
.addGauge(LARGE_COMPACTION_QUEUE_LENGTH,
COMPACTION_QUEUE_LENGTH_DESC,
rsWrap.getLargeCompactionQueueSize())
.addGauge(SMALL_COMPACTION_QUEUE_LENGTH,
COMPACTION_QUEUE_LENGTH_DESC,
rsWrap.getSmallCompactionQueueSize())
.addGauge(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC, rsWrap.getFlushQueueSize())
.addGauge(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC, rsWrap.getBlockCacheFreeSize())
.addGauge(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC, rsWrap.getBlockCacheCount())
.addGauge(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC, rsWrap.getBlockCacheSize())
.addCounter(BLOCK_CACHE_HIT_COUNT,
BLOCK_CACHE_HIT_COUNT_DESC,
rsWrap.getBlockCacheHitCount())
.addCounter(BLOCK_CACHE_MISS_COUNT,
BLOCK_COUNT_MISS_COUNT_DESC,
rsWrap.getBlockCacheMissCount())
.addCounter(BLOCK_CACHE_EVICTION_COUNT,
BLOCK_CACHE_EVICTION_COUNT_DESC,
rsWrap.getBlockCacheEvictedCount())
.addGauge(BLOCK_CACHE_HIT_PERCENT,
BLOCK_CACHE_HIT_PERCENT_DESC,
rsWrap.getBlockCacheHitPercent())
.addGauge(BLOCK_CACHE_EXPRESS_HIT_PERCENT,
BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC,
rsWrap.getBlockCacheHitCachingPercent())
.addCounter(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC, rsWrap.getUpdatesBlockedTime())
.tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, rsWrap.getZookeeperQuorum())
.tag(SERVER_NAME_NAME, SERVER_NAME_DESC, rsWrap.getServerName())
.tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, rsWrap.getClusterId());
}
metricsRegistry.snapshot(mrb, all);
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:80,代码来源:MetricsRegionServerSourceImpl.java
示例16: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
/**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the metricsBuilder.
*
* @param metricsBuilder Builder to accept metrics
* @param all push all or only changed?
*/
@Override
public void getMetrics(MetricsBuilder metricsBuilder, boolean all) {
MetricsRecordBuilder mrb = metricsBuilder.addRecord(metricsName)
.setContext(metricsContext);
// rsWrap can be null because this function is called inside of init.
if (rsWrap != null) {
mrb.addGauge(REGION_COUNT, REGION_COUNT_DESC, rsWrap.getNumOnlineRegions())
.addGauge(STORE_COUNT, STORE_COUNT_DESC, rsWrap.getNumStores())
.addGauge(STOREFILE_COUNT, STOREFILE_COUNT_DESC, rsWrap.getNumStoreFiles())
.addGauge(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC, rsWrap.getMemstoreSize())
.addGauge(STOREFILE_SIZE, STOREFILE_SIZE_DESC, rsWrap.getStoreFileSize())
.addGauge(RS_START_TIME_NAME, RS_START_TIME_DESC, rsWrap.getStartCode())
.addCounter(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC, rsWrap.getTotalRequestCount())
.addCounter(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC, rsWrap.getReadRequestsCount())
.addCounter(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC, rsWrap.getWriteRequestsCount())
.addCounter(CHECK_MUTATE_FAILED_COUNT,
CHECK_MUTATE_FAILED_COUNT_DESC,
rsWrap.getCheckAndMutateChecksFailed())
.addCounter(CHECK_MUTATE_PASSED_COUNT,
CHECK_MUTATE_PASSED_COUNT_DESC,
rsWrap.getCheckAndMutateChecksPassed())
.addGauge(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC, rsWrap.getStoreFileIndexSize())
.addGauge(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC, rsWrap.getTotalStaticIndexSize())
.addGauge(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC, rsWrap.getTotalStaticBloomSize())
.addGauge(NUMBER_OF_MUTATIONS_WITHOUT_WAL,
NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC,
rsWrap.getNumMutationsWithoutWAL())
.addGauge(DATA_SIZE_WITHOUT_WAL,
DATA_SIZE_WITHOUT_WAL_DESC,
rsWrap.getDataInMemoryWithoutWAL())
.addGauge(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC, rsWrap.getPercentFileLocal())
.addGauge(COMPACTION_QUEUE_LENGTH,
COMPACTION_QUEUE_LENGTH_DESC,
rsWrap.getCompactionQueueSize())
.addGauge(LARGE_COMPACTION_QUEUE_LENGTH,
COMPACTION_QUEUE_LENGTH_DESC,
rsWrap.getLargeCompactionQueueSize())
.addGauge(SMALL_COMPACTION_QUEUE_LENGTH,
COMPACTION_QUEUE_LENGTH_DESC,
rsWrap.getSmallCompactionQueueSize())
.addGauge(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC, rsWrap.getFlushQueueSize())
.addGauge(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC, rsWrap.getBlockCacheFreeSize())
.addGauge(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC, rsWrap.getBlockCacheCount())
.addGauge(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC, rsWrap.getBlockCacheSize())
.addCounter(BLOCK_CACHE_HIT_COUNT,
BLOCK_CACHE_HIT_COUNT_DESC,
rsWrap.getBlockCacheHitCount())
.addCounter(BLOCK_CACHE_MISS_COUNT,
BLOCK_COUNT_MISS_COUNT_DESC,
rsWrap.getBlockCacheMissCount())
.addCounter(BLOCK_CACHE_EVICTION_COUNT,
BLOCK_CACHE_EVICTION_COUNT_DESC,
rsWrap.getBlockCacheEvictedCount())
.addGauge(BLOCK_CACHE_HIT_PERCENT,
BLOCK_CACHE_HIT_PERCENT_DESC,
rsWrap.getBlockCacheHitPercent())
.addGauge(BLOCK_CACHE_EXPRESS_HIT_PERCENT,
BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC,
rsWrap.getBlockCacheHitCachingPercent())
.addCounter(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC, rsWrap.getUpdatesBlockedTime())
.tag(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC, rsWrap.getZookeeperQuorum())
.tag(SERVER_NAME_NAME, SERVER_NAME_DESC, rsWrap.getServerName())
.tag(CLUSTER_ID_NAME, CLUSTER_ID_DESC, rsWrap.getClusterId());
}
metricsRegistry.snapshot(mrb, all);
}
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:78,代码来源:MetricsRegionServerSourceImpl.java
示例17: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
public void getMetrics(MetricsBuilder builder, boolean all) {
updateRunningTime();
registry.snapshot(builder.addRecord(registry.name()), all);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:5,代码来源:QueueMetrics.java
示例18: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
public void getMetrics(MetricsBuilder builder, boolean all) {
registry.snapshot(builder.addRecord(registry.name()), all);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:4,代码来源:JobTrackerMetricsSource.java
示例19: getMetrics
import org.apache.hadoop.metrics2.MetricsBuilder; //导入依赖的package包/类
@Override
public void getMetrics(MetricsBuilder builder, boolean all) {
registry.snapshot(builder.addRecord(registry.name()), all);
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:5,代码来源:UgiInstrumentation.java
注:本文中的org.apache.hadoop.metrics2.MetricsBuilder类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论