本文整理汇总了Java中org.apache.hadoop.hbase.util.Triple类的典型用法代码示例。如果您正苦于以下问题:Java Triple类的具体用法?Java Triple怎么用?Java Triple使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Triple类属于org.apache.hadoop.hbase.util包,在下文中一共展示了Triple类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: sendRegionOpen
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
/**
* Sends an OPEN RPC to the specified server to open the specified region.
* <p>
* Open should not fail but can if server just crashed.
* <p>
* @param server server to open a region
* @param regionOpenInfos info of a list of regions to open
* @return a list of region opening states
*/
public List<RegionOpeningState> sendRegionOpen(ServerName server,
List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
throws IOException {
AdminService.BlockingInterface admin = getRsAdmin(server);
if (admin == null) {
LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
" failed because no RPC connection found to this server");
return null;
}
OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, regionOpenInfos,
(RecoveryMode.LOG_REPLAY == this.services.getMasterFileSystem().getLogRecoveryMode()));
try {
OpenRegionResponse response = admin.openRegion(null, request);
return ResponseConverter.getRegionOpeningStateList(response);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:ServerManager.java
示例2: testSecondaryAndTertiaryPlacementWithSingleRack
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
// Test the case where there is a single rack and we need to choose
// Primary/Secondary/Tertiary from a single rack.
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 10);
// have lots of regions to test with
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
// although we created lots of regions we should have no overlap on the
// primary/secondary/tertiary for any given region
for (HRegionInfo region : regions) {
ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestFavoredNodeAssignmentHelper.java
示例3: testSecondaryAndTertiaryPlacementWithSingleServer
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
public void testSecondaryAndTertiaryPlacementWithSingleServer() {
// Test the case where we have a single node in the cluster. In this case
// the primary can be assigned but the secondary/tertiary would be null
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 1);
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
// no secondary/tertiary placement in case of a single RegionServer
assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestFavoredNodeAssignmentHelper.java
示例4: testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
// Test the case where we have two racks but with less than two servers in each
// We will not have enough machines to select secondary/tertiary
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 1);
rackToServerCount.put("rack2", 1);
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
assertTrue(primaryRSMap.size() == 6);
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
for (HRegionInfo region : regions) {
// not enough secondary/tertiary room to place the regions
assertTrue(secondaryAndTertiaryMap.get(region) == null);
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestFavoredNodeAssignmentHelper.java
示例5: secondaryAndTertiaryRSPlacementHelper
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
private Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
secondaryAndTertiaryRSPlacementHelper(
int regionCount, Map<String, Integer> rackToServerCount) {
Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>();
List<ServerName> servers = getServersFromRack(rackToServerCount);
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
Map<ServerName, List<HRegionInfo>> assignmentMap =
new HashMap<ServerName, List<HRegionInfo>>();
helper.initialize();
// create regions
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionCount);
for (int i = 0; i < regionCount; i++) {
HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"),
Bytes.toBytes(i), Bytes.toBytes(i + 1));
regions.add(region);
}
// place the regions
helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
return new Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
(primaryRSMap, helper, regions);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestFavoredNodeAssignmentHelper.java
示例6: buildOpenRegionRequest
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
/**
* Create a protocol buffer OpenRegionRequest to open a list of regions
*
* @param server the serverName for the RPC
* @param regionOpenInfos info of a list of regions to open
* @param openForReplay
* @return a protocol buffer OpenRegionRequest
*/
public static OpenRegionRequest
buildOpenRegionRequest(ServerName server, final List<Triple<HRegionInfo, Integer,
List<ServerName>>> regionOpenInfos, Boolean openForReplay) {
OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
for (Triple<HRegionInfo, Integer, List<ServerName>> regionOpenInfo: regionOpenInfos) {
Integer second = regionOpenInfo.getSecond();
int versionOfOfflineNode = second == null ? -1 : second.intValue();
builder.addOpenInfo(buildRegionOpenInfo(regionOpenInfo.getFirst(), versionOfOfflineNode,
regionOpenInfo.getThird(), openForReplay));
}
if (server != null) {
builder.setServerStartCode(server.getStartcode());
}
// send the master's wall clock time as well, so that the RS can refer to it
builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
return builder.build();
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:RequestConverter.java
示例7: testSecondaryAndTertiaryPlacementWithSingleRack
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
@Test
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
// Test the case where there is a single rack and we need to choose
// Primary/Secondary/Tertiary from a single rack.
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 10);
// have lots of regions to test with
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
// although we created lots of regions we should have no overlap on the
// primary/secondary/tertiary for any given region
for (HRegionInfo region : regions) {
ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
}
}
开发者ID:grokcoder,项目名称:pbase,代码行数:24,代码来源:TestFavoredNodeAssignmentHelper.java
示例8: testSecondaryAndTertiaryPlacementWithSingleServer
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
@Test
public void testSecondaryAndTertiaryPlacementWithSingleServer() {
// Test the case where we have a single node in the cluster. In this case
// the primary can be assigned but the secondary/tertiary would be null
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 1);
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
// no secondary/tertiary placement in case of a single RegionServer
assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
}
开发者ID:grokcoder,项目名称:pbase,代码行数:18,代码来源:TestFavoredNodeAssignmentHelper.java
示例9: testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
@Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
// Test the case where we have two racks but with less than two servers in each
// We will not have enough machines to select secondary/tertiary
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 1);
rackToServerCount.put("rack2", 1);
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
assertTrue(primaryRSMap.size() == 6);
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
for (HRegionInfo region : regions) {
// not enough secondary/tertiary room to place the regions
assertTrue(secondaryAndTertiaryMap.get(region) == null);
}
}
开发者ID:grokcoder,项目名称:pbase,代码行数:21,代码来源:TestFavoredNodeAssignmentHelper.java
示例10: buildOpenRegionRequest
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
/**
* Create a protocol buffer OpenRegionRequest to open a list of regions
*
* @param server the serverName for the RPC
* @param regionOpenInfos info of a list of regions to open
* @param openForReplay
* @return a protocol buffer OpenRegionRequest
*/
public static OpenRegionRequest
buildOpenRegionRequest(ServerName server, final List<Triple<HRegionInfo, Integer,
List<ServerName>>> regionOpenInfos, Boolean openForReplay) {
OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
for (Triple<HRegionInfo, Integer, List<ServerName>> regionOpenInfo: regionOpenInfos) {
Integer second = regionOpenInfo.getSecond();
int versionOfOfflineNode = second == null ? -1 : second.intValue();
builder.addOpenInfo(buildRegionOpenInfo(regionOpenInfo.getFirst(), versionOfOfflineNode,
regionOpenInfo.getThird(), openForReplay));
}
if (server != null) {
builder.setServerStartCode(server.getStartcode());
}
return builder.build();
}
开发者ID:grokcoder,项目名称:pbase,代码行数:24,代码来源:RequestConverter.java
示例11: sendRegionOpen
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
/**
* Sends an OPEN RPC to the specified server to open the specified region.
* <p>
* Open should not fail but can if server just crashed.
* <p>
* @param server server to open a region
* @param regionOpenInfos info of a list of regions to open
* @return a list of region opening states
*/
public List<RegionOpeningState> sendRegionOpen(ServerName server,
List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
throws IOException {
AdminService.BlockingInterface admin = getRsAdmin(server);
if (admin == null) {
LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
" failed because no RPC connection found to this server");
return null;
}
OpenRegionRequest request =
RequestConverter.buildOpenRegionRequest(regionOpenInfos);
try {
OpenRegionResponse response = admin.openRegion(null, request);
return ResponseConverter.getRegionOpeningStateList(response);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:29,代码来源:ServerManager.java
示例12: testSecondaryAndTertiaryPlacementWithSingleRack
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
@Test
public void testSecondaryAndTertiaryPlacementWithSingleRack() {
// Test the case where there is a single rack and we need to choose
// Primary/Secondary/Tertiary from a single rack.
Map<String,Integer> rackToServerCount = new HashMap<>();
rackToServerCount.put("rack1", 10);
// have lots of regions to test with
Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
// although we created lots of regions we should have no overlap on the
// primary/secondary/tertiary for any given region
for (RegionInfo region : regions) {
ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region);
assertNotNull(secondaryAndTertiaryServers);
assertTrue(primaryRSMap.containsKey(region));
assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region)));
assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region)));
assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1]));
}
}
开发者ID:apache,项目名称:hbase,代码行数:26,代码来源:TestFavoredNodeAssignmentHelper.java
示例13: testSecondaryAndTertiaryPlacementWithSingleServer
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
@Test
public void testSecondaryAndTertiaryPlacementWithSingleServer() {
// Test the case where we have a single node in the cluster. In this case
// the primary can be assigned but the secondary/tertiary would be null
Map<String,Integer> rackToServerCount = new HashMap<>();
rackToServerCount.put("rack1", 1);
Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
// no secondary/tertiary placement in case of a single RegionServer
assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null);
}
开发者ID:apache,项目名称:hbase,代码行数:18,代码来源:TestFavoredNodeAssignmentHelper.java
示例14: testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
@Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
// Test the case where we have two racks but with less than two servers in each
// We will not have enough machines to select secondary/tertiary
Map<String,Integer> rackToServerCount = new HashMap<>();
rackToServerCount.put("rack1", 1);
rackToServerCount.put("rack2", 1);
Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
assertTrue(primaryRSMap.size() == 6);
Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
for (RegionInfo region : regions) {
// not enough secondary/tertiary room to place the regions
assertTrue(secondaryAndTertiaryMap.get(region) == null);
}
}
开发者ID:apache,项目名称:hbase,代码行数:21,代码来源:TestFavoredNodeAssignmentHelper.java
示例15: secondaryAndTertiaryRSPlacementHelper
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
private Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>>
secondaryAndTertiaryRSPlacementHelper(
int regionCount, Map<String, Integer> rackToServerCount) {
Map<RegionInfo, ServerName> primaryRSMap = new HashMap<RegionInfo, ServerName>();
List<ServerName> servers = getServersFromRack(rackToServerCount);
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
Map<ServerName, List<RegionInfo>> assignmentMap =
new HashMap<ServerName, List<RegionInfo>>();
helper.initialize();
// create regions
List<RegionInfo> regions = new ArrayList<>(regionCount);
for (int i = 0; i < regionCount; i++) {
regions.add(RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(Bytes.toBytes(i))
.setEndKey(Bytes.toBytes(i + 1))
.build());
}
// place the regions
helper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
return new Triple<>(primaryRSMap, helper, regions);
}
开发者ID:apache,项目名称:hbase,代码行数:22,代码来源:TestFavoredNodeAssignmentHelper.java
示例16: createSubmittableJob
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
/**
* Sets up the actual job.
*
* @param conf The current configuration.
* @param args The command line parameters.
* @return The newly created job.
* @throws IOException When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException {
Triple<TableName, Scan, Path> arguments = ExportUtils.getArgumentsFromCommandLine(conf, args);
String tableName = arguments.getFirst().getNameAsString();
Path outputDir = arguments.getThird();
Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
job.setJobName(NAME + "_" + tableName);
job.setJarByClass(Export.class);
// Set optional scan parameters
Scan s = arguments.getSecond();
IdentityTableMapper.initJob(tableName, s, IdentityTableMapper.class, job);
// No reducers. Just write straight to output files.
job.setNumReduceTasks(0);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(Result.class);
FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't have a default fs.
return job;
}
开发者ID:apache,项目名称:hbase,代码行数:28,代码来源:Export.java
示例17: testProtosInParamTypes
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
@Ignore @Test
public void testProtosInParamTypes() throws ClassNotFoundException, IOException, LinkageError {
Set<Class<?>> classes = findPublicClasses();
List<Triple<Class<?>, Method, Class<?>>> protosParamType = new ArrayList<>();
for (Class<?> clazz : classes) {
findProtoInParamType(clazz, protosParamType);
}
if (protosParamType.size() != 0) {
LOG.info("These are the methods that have Protos as the param type");
for (Triple<Class<?>, Method, Class<?>> pair : protosParamType) {
LOG.info(pair.getFirst().getName() + " " + pair.getSecond().getName() + " "
+ pair.getThird().getName());
}
}
Assert.assertEquals("Public exposed methods should not have protos in param type", 0,
protosParamType.size());
}
开发者ID:apache,项目名称:hbase,代码行数:20,代码来源:TestInterfaceAudienceAnnotations.java
示例18: findProtoInParamType
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
private void findProtoInParamType(Class<?> clazz,
List<Triple<Class<?>, Method, Class<?>>> protosParamType) {
Triple<Class<?>, Method, Class<?>> paramType = new Triple<>();
Method[] methods = clazz.getMethods();
paramType.setFirst(clazz);
for (Method method : methods) {
if (clazz.isInterface() || method.getModifiers() == Modifier.PUBLIC) {
if (!isInterfacePrivateMethod(method)) {
Class<?>[] parameters = method.getParameterTypes();
for (Class<?> param : parameters) {
if (param.getName().contains(HBASE_PROTOBUF)) {
paramType.setSecond(method);
paramType.setThird(param);
protosParamType.add(paramType);
break;
}
}
}
}
}
}
开发者ID:apache,项目名称:hbase,代码行数:22,代码来源:TestInterfaceAudienceAnnotations.java
示例19: removeFirstDone
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
/**
* Wait for one of tasks to be done, and remove it from the list.
* @return the tasks done.
*/
private Triple<MultiAction<R>, HRegionLocation, Future<MultiResponse>>
removeFirstDone() throws InterruptedException {
while (true) {
synchronized (finishedTasks) {
if (!finishedTasks.isEmpty()) {
MultiAction<R> done = finishedTasks.remove(finishedTasks.size() - 1);
// We now need to remove it from the inProgress part.
Iterator<Triple<MultiAction<R>, HRegionLocation, Future<MultiResponse>>> it =
inProgress.iterator();
while (it.hasNext()) {
Triple<MultiAction<R>, HRegionLocation, Future<MultiResponse>> task = it.next();
if (task.getFirst() == done) { // We have the exact object. No java equals here.
it.remove();
return task;
}
}
LOG.error("Development error: We didn't see a task in the list. " +
done.getRegions());
}
finishedTasks.wait(10);
}
}
}
开发者ID:daidong,项目名称:DominoHBase,代码行数:29,代码来源:HConnectionManager.java
示例20: testSecondaryAndTertiaryPlacementWithMultipleRacks
import org.apache.hadoop.hbase.util.Triple; //导入依赖的package包/类
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
public void testSecondaryAndTertiaryPlacementWithMultipleRacks() {
// Test the case where we have multiple racks and the region servers
// belong to multiple racks
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 10);
rackToServerCount.put("rack2", 10);
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
assertTrue(primaryRSMap.size() == 60000);
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
assertTrue(secondaryAndTertiaryMap.size() == 60000);
// for every region, the primary should be on one rack and the secondary/tertiary
// on another (we create a lot of regions just to increase probability of failure)
for (Map.Entry<HRegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) {
ServerName[] allServersForRegion = entry.getValue();
String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey()));
String secondaryRSRack = rackManager.getRack(allServersForRegion[0]);
String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]);
assertTrue(!primaryRSRack.equals(secondaryRSRack));
assertTrue(secondaryRSRack.equals(tertiaryRSRack));
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestFavoredNodeAssignmentHelper.java
注:本文中的org.apache.hadoop.hbase.util.Triple类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论