• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Java DatanodeDescriptor类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor的典型用法代码示例。如果您正苦于以下问题:Java DatanodeDescriptor类的具体用法?Java DatanodeDescriptor怎么用?Java DatanodeDescriptor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



DatanodeDescriptor类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了DatanodeDescriptor类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: testBlocksScheduledCounter

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
public void testBlocksScheduledCounter() throws IOException {
  
  MiniDFSCluster cluster = new MiniDFSCluster(new Configuration(), 1, 
                                              true, null);
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  
  //open a file an write a few bytes:
  FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
  for (int i=0; i<1024; i++) {
    out.write(i);
  }
  // flush to make sure a block is allocated.
  ((DFSOutputStream)(out.getWrappedStream())).sync();
  
  ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
  cluster.getNameNode().namesystem.DFSNodesStatus(dnList, dnList);
  DatanodeDescriptor dn = dnList.get(0);
  
  assertEquals(1, dn.getBlocksScheduled());
 
  // close the file and the counter should go to zero.
  out.close();   
  assertEquals(0, dn.getBlocksScheduled());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:TestBlocksScheduledCounter.java


示例2: populateDatanodeMetrics

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
/**
 * Pushes live/dead datanode metrics, and return the list of live nodes,
 * so it can be reused.
 */
private void populateDatanodeMetrics(ArrayList<DatanodeDescriptor> live,
    ArrayList<DatanodeDescriptor> dead) {

  DatanodeStatus status = FSNamesystemDatanodeHelper.getDatanodeStats(fsNameSystem,
      live, dead);

  // populate metrics
  numLiveNodes.set(status.numLive);
  numLiveExcludedNodes.set(status.numLiveExcluded);
  numLiveDecommissioningInProgressNodes.set(status.numLiveDecommissioningInProgress);
  numLiveDecommissioned.set(status.numLiveDecommissioned);

  numDeadNodes.set(status.numDead);
  numDeadExcludedNodes.set(status.numDeadExcluded);
  numDeadDecommissioningNotCompletedNodes.set(status.numDeadDecommissioningNotCompleted);
  numDeadDecommissioned.set(status.numDeadDecommissioned);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:22,代码来源:FSNamesystemMetrics.java


示例3: testBlocksScheduledCounter

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
public void testBlocksScheduledCounter() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
                                             .build();

  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  
  //open a file an write a few bytes:
  FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
  for (int i=0; i<1024; i++) {
    out.write(i);
  }
  // flush to make sure a block is allocated.
  ((DFSOutputStream)(out.getWrappedStream())).hflush();
  
  ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
  cluster.getNamesystem().DFSNodesStatus(dnList, dnList);
  DatanodeDescriptor dn = dnList.get(0);
  
  assertEquals(1, dn.getBlocksScheduled());
 
  // close the file and the counter should go to zero.
  out.close();   
  assertEquals(0, dn.getBlocksScheduled());
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:26,代码来源:TestBlocksScheduledCounter.java


示例4: testPseudoSortByDistance

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
public void testPseudoSortByDistance() throws Exception {
  DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
  
  // array contains both local node & local rack node
  testNodes[0] = dataNodes[1];
  testNodes[1] = dataNodes[2];
  testNodes[2] = dataNodes[0];
  cluster.pseudoSortByDistance(dataNodes[0], testNodes );
  assertTrue(testNodes[0] == dataNodes[0]);
  assertTrue(testNodes[1] == dataNodes[1]);
  assertTrue(testNodes[2] == dataNodes[2]);

  // array contains local node
  testNodes[0] = dataNodes[1];
  testNodes[1] = dataNodes[3];
  testNodes[2] = dataNodes[0];
  cluster.pseudoSortByDistance(dataNodes[0], testNodes );
  assertTrue(testNodes[0] == dataNodes[0]);
  assertTrue(testNodes[1] == dataNodes[1]);
  assertTrue(testNodes[2] == dataNodes[3]);

  // array contains local rack node
  testNodes[0] = dataNodes[5];
  testNodes[1] = dataNodes[3];
  testNodes[2] = dataNodes[1];
  cluster.pseudoSortByDistance(dataNodes[0], testNodes );
  assertTrue(testNodes[0] == dataNodes[1]);
  assertTrue(testNodes[1] == dataNodes[3]);
  assertTrue(testNodes[2] == dataNodes[5]);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:31,代码来源:TestNetworkTopology.java


示例5: pickNodesAtRandom

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
/**
 * This picks a large number of nodes at random in order to ensure coverage
 * 
 * @param numNodes the number of nodes
 * @param excludedScope the excluded scope
 * @return the frequency that nodes were chosen
 */
private Map<Node, Integer> pickNodesAtRandom(int numNodes,
    String excludedScope) {
  Map<Node, Integer> frequency = new HashMap<Node, Integer>();
  for (DatanodeDescriptor dnd : dataNodes) {
    frequency.put(dnd, 0);
  }

  for (int j = 0; j < numNodes; j++) {
    Node random = cluster.chooseRandom(excludedScope);
    frequency.put(random, frequency.get(random) + 1);
  }
  return frequency;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:TestNetworkTopology.java


示例6: updateDatanodeMap

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
/**
 * Does a lot of hacks to change namenode and datanode datastructures to
 * identify datanodes by the machine name rather than the IP address. This is
 * done since we can give each datanode a different hostname in a unit test
 * but not a different ip address.
 * 
 * @param cluster
 *          the {@link MiniDFSCluster} to operate on
 * @throws Exception
 */
private static void updateDatanodeMap(MiniDFSCluster cluster)
    throws Exception {
  FSNamesystem namesystem = cluster.getNameNode().namesystem;
  for (DataNode node : cluster.getDataNodes()) {
    // Get old descriptor.
    DatanodeID dnId = createDataNodeID(node);
    DatanodeDescriptor dnDs = namesystem.getDatanode(dnId);

    // Create new id and descriptor.
    DatanodeID newId = new DatanodeID(node.getMachineName(),
        dnDs.getStorageID(), dnDs.getInfoPort(), dnDs.getIpcPort());
    DatanodeDescriptor newDS = new DatanodeDescriptor(newId,
        dnDs.getNetworkLocation(), dnDs.getHostName(), dnDs.getCapacity(),
        dnDs.getDfsUsed(), dnDs.getRemaining(), dnDs.getNamespaceUsed(),
        dnDs.getXceiverCount());
    
    newDS.isAlive = true;
    // Overwrite NN maps with new descriptor.
    namesystem.writeLock();
    namesystem.clusterMap.remove(dnDs);
    namesystem.resolveNetworkLocation(newDS);
    namesystem.unprotectedAddDatanode(newDS);
    namesystem.clusterMap.add(newDS);
    namesystem.writeUnlock();
    // Overwrite DN map with new registration.
    node.setRegistrationName(node.getMachineName());
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:39,代码来源:TestFavoredNodes.java


示例7: checkDecommissionStatus

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
private void checkDecommissionStatus(DatanodeDescriptor decommNode,
    int expectedUnderRep, int expectedDecommissionOnly,
    int expectedUnderRepInOpenFiles) {
  assertEquals(decommNode.decommissioningStatus.getUnderReplicatedBlocks(),
      expectedUnderRep);
  assertEquals(
      decommNode.decommissioningStatus.getDecommissionOnlyReplicas(),
      expectedDecommissionOnly);
  assertEquals(decommNode.decommissioningStatus
      .getUnderReplicatedInOpenFiles(), expectedUnderRepInOpenFiles);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:TestDecommissioningStatus.java


示例8: getLiveDatanodeCapacity

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
public static long getLiveDatanodeCapacity(FSNamesystem ns) {
  ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
  ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
  ns.DFSNodesStatus(live, dead);
  long capacity = 0;
  for (final DatanodeDescriptor dn : live) {
    capacity += dn.getCapacity();
  }
  return capacity;
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:11,代码来源:DFSTestUtil.java


示例9: waitForDatanodeStatus

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
public static void waitForDatanodeStatus(FSNamesystem ns, int expectedLive, 
    int expectedDead, long expectedVolFails, long expectedTotalCapacity, 
    long timeout) throws InterruptedException, TimeoutException {
  ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
  ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
  final int ATTEMPTS = 10;
  int count = 0;
  long currTotalCapacity = 0;
  int volFails = 0;

  do {
    Thread.sleep(timeout);
    live.clear();
    dead.clear();
    ns.DFSNodesStatus(live, dead);
    currTotalCapacity = 0;
    volFails = 0;
    for (final DatanodeDescriptor dd : live) {
      currTotalCapacity += dd.getCapacity();
      volFails += dd.getVolumeFailures();
    }
    count++;
  } while ((expectedLive != live.size() ||
            expectedDead != dead.size() ||
            expectedTotalCapacity != currTotalCapacity ||
            expectedVolFails != volFails)
           && count < ATTEMPTS);

  if (count == ATTEMPTS) {
    throw new TimeoutException("Timed out waiting for capacity."
        + " Live = "+live.size()+" Expected = "+expectedLive
        + " Dead = "+dead.size()+" Expected = "+expectedDead
        + " Total capacity = "+currTotalCapacity
        + " Expected = "+expectedTotalCapacity
        + " Vol Fails = "+volFails+" Expected = "+expectedVolFails);
  }
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:38,代码来源:DFSTestUtil.java


示例10: testPipeline

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
public void testPipeline() {
  DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
  // array contains both local node & local rack node
  testNodes[0] = dataNodes[1];
  testNodes[1] = dataNodes[2];
  testNodes[2] = dataNodes[0];
  cluster.getPipeline(dataNodes[0], testNodes);
  assertTrue(testNodes[0] == dataNodes[0]);
  assertTrue(testNodes[1] == dataNodes[1]);
  assertTrue(testNodes[2] == dataNodes[2]);

  // array does not contain local node or local rack node
  testNodes[0] = dataNodes[5];
  testNodes[1] = dataNodes[3];
  testNodes[2] = dataNodes[2];
  cluster.getPipeline(dataNodes[0], testNodes);
  assertTrue(testNodes[0] == dataNodes[2] && testNodes[1] == dataNodes[3] ||
      testNodes[0] == dataNodes[3] && testNodes[1] == dataNodes[2]);
  assertTrue(testNodes[2] == dataNodes[5]);

  // array contains local rack node
  testNodes[0] = dataNodes[5];
  testNodes[1] = dataNodes[3];
  testNodes[2] = dataNodes[1];
  cluster.getPipeline(dataNodes[0], testNodes);
  assertTrue(testNodes[0] == dataNodes[1]);
  assertTrue(testNodes[1] == dataNodes[3]);
  assertTrue(testNodes[2] == dataNodes[5]);

  // two on a different rack, two in a different datacenter
  testNodes = new DatanodeDescriptor[5];
  testNodes[0] = dataNodes[5];
  testNodes[1] = dataNodes[3];
  testNodes[2] = dataNodes[6];
  testNodes[3] = dataNodes[0];
  testNodes[4] = dataNodes[2];
  cluster.getPipeline(dataNodes[0], testNodes);
  assertTrue(testNodes[0] == dataNodes[0]);
  assertTrue(testNodes[1] == dataNodes[2] && testNodes[2] == dataNodes[3] ||
      testNodes[1] == dataNodes[3] && testNodes[2] == dataNodes[2]);
  assertTrue(testNodes[3] == dataNodes[5] && testNodes[4] == dataNodes[6] ||
      testNodes[3] == dataNodes[6] && testNodes[4] == dataNodes[5]);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:44,代码来源:TestNetworkTopology.java


示例11: testDeadDatanodes

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
@Test
public void testDeadDatanodes() throws Exception {
  DFSTestUtil util = new DFSTestUtil("testDeadDatanodes", 1, 1, MAX_FILE_SIZE);
  String topDir = "/testDeadDatanodes";
  util.createFiles(fs, topDir);
  FastCopy fastCopy = new FastCopy(conf);

  // Find the locations for the last block of the file.
  String filename = util.getFileNames(topDir)[0];
  LocatedBlocks lbks = cluster.getNameNode().getBlockLocations(filename, 0,
      Long.MAX_VALUE);
  assertNotNull(lbks);

  int namespaceID = cluster.getNameNode().getNamespaceID();
  DataNode dn = cluster.getDataNodes().get(0);
  DatanodeID dnId = dn.getDNRegistrationForNS(namespaceID);
  List <Block> deleteList = new ArrayList <Block> ();
  for(LocatedBlock block : lbks.getLocatedBlocks()) {
    deleteList.add(block.getBlock());
  }

  assertEquals(lbks.locatedBlockCount(),
      dn.getFSDataset().getBlockReport(namespaceID).length);
  DatanodeDescriptor dnDs = cluster.getNameNode().namesystem.getDatanode(dnId);
  dnDs.addBlocksToBeInvalidated(deleteList);

  // Make sure all blocks are deleted.
  while(dn.getFSDataset().getBlockReport(namespaceID).length != 0) {
    Thread.sleep(1000);
  }

  // Now run FastCopy
  try {
    for (String fileName : util.getFileNames(topDir)) {
      fastCopy.copy(fileName, fileName + "dst", (DistributedFileSystem) fs,
          (DistributedFileSystem) fs);
    }
  } finally {
    fastCopy.shutdown();
  }

  // Make sure no errors are reported.
  Map<DatanodeInfo, Integer> dnErrors = fastCopy.getDatanodeErrors();
  assertEquals(0, dnErrors.size());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:46,代码来源:TestFastCopyDeletedBlocks.java


示例12: testDecommissionStatus

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
/**
 * Tests Decommissioning Status in DFS.
 */

@Test
public void testDecommissionStatus() throws IOException, InterruptedException {
  InetSocketAddress addr = new InetSocketAddress("localhost", cluster
      .getNameNodePort());
  DFSClient client = new DFSClient(addr, conf);
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
  assertEquals("Number of Datanodes ", 2, info.length);
  FileSystem fileSys = cluster.getFileSystem();

  short replicas = 2;
  //
  // Decommission one node. Verify the decommission status
  // 
  Path file1 = new Path("decommission.dat");
  writeFile(fileSys, file1, replicas);

  Path file2 = new Path("decommission1.dat");
  FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas);
  Thread.sleep(5000);

  FSNamesystem fsn = cluster.getNameNode().getNamesystem();
  for (int iteration = 0; iteration < numDatanodes; iteration++) {
    String downnode = decommissionNode(fsn, conf, client, localFileSys,
        iteration);
    decommissionedNodes.add(downnode);
    Thread.sleep(5000);
    ArrayList<DatanodeDescriptor> decommissioningNodes = fsn
        .getDecommissioningNodesList();
    if (iteration == 0) {
      assertEquals(decommissioningNodes.size(), 1);
      DatanodeDescriptor decommNode = decommissioningNodes.get(0);
      checkDecommissionStatus(decommNode, 4, 0, 2);
    } else {
      assertEquals(decommissioningNodes.size(), 2);
      DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
      DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
      checkDecommissionStatus(decommNode1, 4, 4, 2);
      checkDecommissionStatus(decommNode2, 4, 4, 2);
    }
  }
  // Call refreshNodes on FSNamesystem with empty exclude file.
  // This will remove the datanodes from decommissioning list and
  // make them available again.
  writeConfigFile(localFileSys, excludeFile, null);
  fsn.refreshNodes(conf);
  st1.close();
  cleanupFile(fileSys, file1);
  cleanupFile(fileSys, file2);
  cleanupFile(localFileSys, dir);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:55,代码来源:TestDecommissioningStatus.java


示例13: testNameNodeBehavior

import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor; //导入依赖的package包/类
public void testNameNodeBehavior() 
    throws IOException, ClassNotFoundException, InterruptedException {
  
  setup(2, -1);
  final int fileLenBlocks = STRIPE_LENGTH;
  final int repl = 1;
  
  // Get set up with datanode references
  DatanodeInfo[] nodeInfos = namenode.getDatanodeReport(DatanodeReportType.ALL);
  DatanodeDescriptor[] nodes = new DatanodeDescriptor[nodeInfos.length];
  for (int i = 0; i < nodes.length; i++) {
    nodes[i] = namenode.namesystem.getDatanode(nodeInfos[i]);
    LOG.info("nodes[" + i + "]=" + nodes[i].getName());
  }
  
  // Create file with one block on nodes[1] and the rest on nodes[0]
  Path raidPath = new Path("/raidrs");
  Path filePath = new Path("/user/hadoop/testNameNodeBehavior/file");
  long[] crc = createRandomFileDispersed(filePath, fileLenBlocks, 
                                       nodes[0], nodes[1]);
  
  FileStatus file = fileSys.getFileStatus(filePath);
  
  // Raid the file; parity blocks go on nodes[0]
  BlockPlacementPolicyFakeData.lastInstance.overridingDatanode = nodes[0];
  
  RaidNode.doRaid(conf, file, raidPath, Codec.getCodec("rs"),
      new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE, 
      false, repl, repl);
  Thread.sleep(1000);
  printFileLocations(file);
  
  BlockPlacementPolicyFakeData.lastInstance.overridingDatanode = null;
  
  // Now decommission the second node
  ArrayList<String> decommissioned = new ArrayList<String>();
  decommissioned.add(nodes[1].getName());
  
  writeExcludesFileAndRefresh(decommissioned);
  
  // Wait for the BlockRegenerator to do its thing
  long now = System.currentTimeMillis();
  BlockIntegrityMonitor bf = raidnode.blockIntegrityMonitor;
  while ((bf.getNumFilesCopied() == 0) && (bf.getNumFileCopyFailures() == 0)
    && ((System.currentTimeMillis() - now) < 30000)) {
    LOG.info("Waiting for the BlockRegenerator to finish... ");
    Thread.sleep(1000);
  }
  
  // Validate result
  printFileLocations(file);
  assertEquals(0, bf.getNumFileCopyFailures());
  assertEquals(1, bf.getNumFilesCopied());
  
  // No corrupt block fixing should have happened
  assertEquals("corrupt block fixer unexpectedly performed fixing", 
      0, bf.getNumFilesFixed());
  assertEquals("corrupt block fixer unexpectedly attempted fixing", 
      0, bf.getNumFileFixFailures());
  
  validateFileCopy(fileSys, filePath, file.getLen(), crc, false);
  
  teardown();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:65,代码来源:TestBlockCopier.java



注:本文中的org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Java EntityDonkey类代码示例发布时间:2022-05-22
下一篇:
Java ProgressIndicatorEx类代码示例发布时间:2022-05-22
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap