本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption类的典型用法代码示例。如果您正苦于以下问题:Java StartupOption类的具体用法?Java StartupOption怎么用?Java StartupOption使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
StartupOption类属于org.apache.hadoop.hdfs.server.common.HdfsConstants包,在下文中一共展示了StartupOption类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: MiniDFSCluster
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
public MiniDFSCluster(int nameNodePort,
Configuration conf,
int numDataNodes,
boolean format,
boolean manageNameDfsDirs,
boolean manageDataDfsDirs,
StartupOption operation,
String[] racks, String hosts[],
long[] simulatedCapacities,
boolean waitSafeMode,
boolean setupHostsFile,
int numNameNodes,
boolean federation) throws IOException {
this(nameNodePort, conf, numDataNodes, format, manageNameDfsDirs,
manageDataDfsDirs, operation, racks, hosts, simulatedCapacities,
waitSafeMode, setupHostsFile, numNameNodes, federation, true);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:MiniDFSCluster.java
示例2: createFederatedNameNode
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
private void createFederatedNameNode(int nnIndex, Configuration conf,
int numDataNodes, boolean manageNameDfsDirs, boolean format,
StartupOption operation, String nameserviceId)
throws IOException {
conf.set(FSConstants.DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
NameNode nn = createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs,
format, operation, nameserviceId);
DFSUtil.setGenericConf(conf, nameserviceId,
NameNode.NAMESERVICE_SPECIFIC_KEYS);
conf.set(DFSUtil.getNameServiceIdKey(
FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NetUtils.
toIpPort(nn.getHttpAddress()));
conf.set(DFSUtil.getNameServiceIdKey(
NameNode.DATANODE_PROTOCOL_ADDRESS, nameserviceId), NetUtils.
toIpPort(nn.getNameNodeDNAddress()));
nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf));
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:MiniDFSCluster.java
示例3: checkNameNodeFiles
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
/**
* start with -importCheckpoint option and verify that the files are in separate directories and of the right length
* @throws IOException
*/
private void checkNameNodeFiles() throws IOException{
// start namenode with import option
LOG.info("-- about to start DFS cluster");
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster(0, config, 1, false, false, false, StartupOption.IMPORT, null, null, null);
cluster.waitActive();
LOG.info("--NN started with checkpoint option");
NameNode nn = cluster.getNameNode();
assertNotNull(nn);
// Verify that image file sizes did not change.
FSImage image = nn.getFSImage();
verifyDifferentDirs(image, this.fsimageLength, this.editsLength);
} finally {
if(cluster != null)
cluster.shutdown();
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:TestStartup.java
示例4: parseArguments
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
/**
* Parse and verify command line arguments and set configuration parameters.
*
* @return false if passed argements are incorrect
*/
private static boolean parseArguments(String args[],
Configuration conf) {
int argsLen = (args == null) ? 0 : args.length;
StartupOption startOpt = StartupOption.REGULAR;
for(int i=0; i < argsLen; i++) {
String cmd = args[i];
if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
LOG.error("-r, --rack arguments are not supported anymore. RackID " +
"resolution is handled by the NameNode.");
System.exit(-1);
} else if ("-rollback".equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
} else if ("-regular".equalsIgnoreCase(cmd)) {
startOpt = StartupOption.REGULAR;
} else
return false;
}
setStartupOption(conf, startOpt);
return true;
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:AvatarDataNode.java
示例5: testUpgradeFailureAfterSaveImage
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
/**
* This test simulates the scenario where the upgrade fails after saving image
* and ensures that the recovery on the journal nodes work correctly.
*/
@Test
public void testUpgradeFailureAfterSaveImage() throws Exception {
h.failAfterSaveImage = true;
long[] checksums = getChecksums();
// Upgrade the cluster.
MiniJournalCluster journalCluster = cluster.getJournalCluster();
// This upgrade will fail after saving the image.
try {
cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1)
.format(false).startOpt(StartupOption.UPGRADE)
.setJournalCluster(journalCluster).instantionRetries(1).build();
fail("Upgrade did not throw exception");
} catch (IOException ie) {
// ignore.
}
// This will correctly recover the upgrade directories.
cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1).format(false)
.setJournalCluster(cluster.getJournalCluster()).build();
verifyUpgrade(checksums, true);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:29,代码来源:TestAvatarQJMUpgrade.java
示例6: testRollback
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
/**
* This test verifies that we can rollback the upgrade for journal nodes.
*/
@Test
public void testRollback() throws Exception {
// Uprade the namenode.
long[] checksums = doUpgrade(false);
cluster.shutDownAvatarNodes();
cluster.shutDownDataNodes();
// Now rollback the cluster.
cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1).format(false)
.startOpt(StartupOption.ROLLBACK)
.setJournalCluster(cluster.getJournalCluster()).build();
assertNotNull(h.checksumsAfterRollback);
verifyRollback(checksums, true, h.checksumsAfterRollback);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:TestAvatarQJMUpgrade.java
示例7: verifyDistributedUpgradeProgress
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
/**
* Verify that the distributed upgrade state is valid.
* @param startOpt the option the namenode was started with.
*/
void verifyDistributedUpgradeProgress(StartupOption startOpt
) throws IOException {
if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT)
return;
assert upgradeManager != null : "FSNameSystem.upgradeManager is null.";
if(startOpt != StartupOption.UPGRADE) {
if(upgradeManager.getUpgradeState())
throw new IOException(
"\n Previous distributed upgrade was not completed. "
+ "\n Please restart NameNode with -upgrade option.");
if(upgradeManager.getDistributedUpgrades() != null)
throw new IOException("\n Distributed upgrade for NameNode version "
+ upgradeManager.getUpgradeVersion()
+ " to current LV " + layoutVersion
+ " is required.\n Please restart NameNode"
+ " with -upgrade option.");
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:NNStorage.java
示例8: recoverStorageDirs
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
/**
* For each storage directory, performs recovery of incomplete transitions
* (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
* state into the dataDirStates map.
* @param dataDirStates output of storage directory states
* @return true if there is at least one valid formatted storage directory
*/
private boolean recoverStorageDirs(StartupOption startOpt,
Map<StorageDirectory, StorageState> dataDirStates) throws IOException {
boolean isFormatted = false;
for (Iterator<StorageDirectory> it =
storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt);
isFormatted |= NNStorage.recoverDirectory(sd, startOpt, curState, true);
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
dataDirStates.put(sd,curState);
}
return isFormatted;
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:FSImage.java
示例9: transitionJournal
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
public void transitionJournal(NamespaceInfo nsInfo,
Transition transition, StartupOption startOpt) throws IOException {
switch (transition) {
case UPGRADE:
doUpgradeJournal(nsInfo);
break;
case COMPLETE_UPGRADE:
completeUpgradeJournal(nsInfo);
break;
case RECOVER:
recoverJournal(startOpt);
break;
case FORMAT:
formatJournal(nsInfo);
break;
case ROLLBACK:
rollbackJournal(nsInfo);
break;
case FINALIZE:
finalizeJournal();
break;
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:Journal.java
示例10: transitionImage
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
public void transitionImage(NamespaceInfo nsInfo,
Transition transition, StartupOption startOpt) throws IOException {
switch (transition) {
case UPGRADE:
doUpgradeImage(nsInfo);
break;
case COMPLETE_UPGRADE:
completeUpgradeImage(nsInfo);
break;
case RECOVER:
recoverImage(startOpt);
break;
case FORMAT:
formatImage(nsInfo);
break;
case ROLLBACK:
rollbackImage(nsInfo);
break;
case FINALIZE:
finalizeImage();
break;
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:Journal.java
示例11: recover
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
void recover(StartupOption startOpt) throws IOException {
LOG.info("Recovering journal " + sd + " with nsid: " + getNamespaceID());
// Unlock the directory before formatting, because we will
// re-analyze it after format(). The analyzeStorage() call
// below is reponsible for re-locking it. This is a no-op
// if the storage is not currently locked.
unlockAll();
try {
StorageState curState = sd.analyzeStorage(startOpt);
NNStorage.recoverDirectory(sd, startOpt, curState, false);
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:17,代码来源:JNStorage.java
示例12: testUpgradeFromImage
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
public void testUpgradeFromImage() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
System.setProperty("test.build.data", "build/test/data");
}
conf.setInt("dfs.datanode.scan.period.hours", -1); // block scanning off
cluster = new MiniDFSCluster(0, conf, numDataNodes, false, true,
StartupOption.UPGRADE, null);
cluster.waitActive();
DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
DFSClient dfsClient = dfs.dfs;
//Safemode will be off only after upgrade is complete. Wait for it.
while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) {
LOG.info("Waiting for SafeMode to be OFF.");
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
}
verifyFileSystem(dfs);
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:27,代码来源:TestDFSUpgradeFromImage.java
示例13: parseArguments
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
private static StartupOption parseArguments(String args[]) {
int argsLen = (args == null) ? 0 : args.length;
StartupOption startOpt = StartupOption.REGULAR;
for(int i=0; i < argsLen; i++) {
String cmd = args[i];
if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FORMAT;
} else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.REGULAR;
} else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.UPGRADE;
} else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
} else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FINALIZE;
} else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.IMPORT;
} else
return null;
}
return startOpt;
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:23,代码来源:NameNode.java
示例14: verifyDistributedUpgradeProgress
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
private void verifyDistributedUpgradeProgress(StartupOption startOpt
) throws IOException {
if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT)
return;
UpgradeManager um = FSNamesystem.getFSNamesystem().upgradeManager;
assert um != null : "FSNameSystem.upgradeManager is null.";
if(startOpt != StartupOption.UPGRADE) {
if(um.getUpgradeState())
throw new IOException(
"\n Previous distributed upgrade was not completed. "
+ "\n Please restart NameNode with -upgrade option.");
if(um.getDistributedUpgrades() != null)
throw new IOException("\n Distributed upgrade for NameNode version "
+ um.getUpgradeVersion() + " to current LV " + FSConstants.LAYOUT_VERSION
+ " is required.\n Please restart NameNode with -upgrade option.");
}
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:18,代码来源:FSImage.java
示例15: parseArguments
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
/**
* Parse and verify command line arguments and set configuration parameters.
*
* @return false if passed argements are incorrect
*/
private static boolean parseArguments(String args[],
Configuration conf) {
int argsLen = (args == null) ? 0 : args.length;
StartupOption startOpt = StartupOption.REGULAR;
for(int i=0; i < argsLen; i++) {
String cmd = args[i];
if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
LOG.error("-r, --rack arguments are not supported anymore. RackID " +
"resolution is handled by the NameNode.");
System.exit(-1);
} else if ("-rollback".equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
} else if ("-regular".equalsIgnoreCase(cmd)) {
startOpt = StartupOption.REGULAR;
} else
return false;
}
setStartupOption(conf, startOpt);
return true;
}
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:26,代码来源:DataNode.java
示例16: parseArguments
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
private static StartupOption parseArguments(String args[]) {
int argsLen = (args == null) ? 0 : args.length;
StartupOption startOpt = StartupOption.REGULAR;
for(int i=0; i < argsLen; i++) {
String cmd = args[i];
if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FORMAT;
} else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.REGULAR;
} else if (StartupOption.BACKUP.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.BACKUP;
} else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.CHECKPOINT;
} else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.UPGRADE;
} else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
} else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FINALIZE;
} else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.IMPORT;
} else
return null;
}
return startOpt;
}
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:27,代码来源:NameNode.java
示例17: verifyDistributedUpgradeProgress
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
private void verifyDistributedUpgradeProgress(StartupOption startOpt
) throws IOException {
if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT)
return;
UpgradeManager um = getFSNamesystem().upgradeManager;
assert um != null : "FSNameSystem.upgradeManager is null.";
if(startOpt != StartupOption.UPGRADE) {
if(um.getUpgradeState())
throw new IOException(
"\n Previous distributed upgrade was not completed. "
+ "\n Please restart NameNode with -upgrade option.");
if(um.getDistributedUpgrades() != null)
throw new IOException("\n Distributed upgrade for NameNode version "
+ um.getUpgradeVersion() + " to current LV " + FSConstants.LAYOUT_VERSION
+ " is required.\n Please restart NameNode with -upgrade option.");
}
}
开发者ID:iVCE,项目名称:RDFS,代码行数:18,代码来源:FSImage.java
示例18: startBackupNode
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
/**
* Start the BackupNode
*/
public BackupNode startBackupNode(Configuration conf) throws IOException {
String dataDir = getTestingDir();
// Set up testing environment directories
hdfsDir = new File(dataDir, "backupNode");
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
File currDir = new File(hdfsDir, "name2");
File currDir2 = new File(currDir, "current");
File currDir3 = new File(currDir, "image");
assertTrue(currDir.mkdirs());
assertTrue(currDir2.mkdirs());
assertTrue(currDir3.mkdirs());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name2")).toString());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");
// Start BackupNode
String[] args = new String [] { StartupOption.BACKUP.getName() };
BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);
return bu;
}
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:29,代码来源:TestHDFSServerPorts.java
示例19: createFederatedNameNode
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
private void createFederatedNameNode(int nnIndex, Configuration conf,
int numDataNodes, boolean manageNameDfsDirs, boolean format,
StartupOption operation, String nameserviceId)
throws IOException {
conf.set(FSConstants.DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
NameNode nn = createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs,
format, operation, nameserviceId);
DFSUtil.setGenericConf(conf, nameserviceId,
NameNode.NAMESERVICE_SPECIFIC_KEYS);
conf.set(DFSUtil.getNameServiceIdKey(
FSConstants.DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode
.getHostPortString(nn.getHttpAddress()));
conf.set(DFSUtil.getNameServiceIdKey(
NameNode.DATANODE_PROTOCOL_ADDRESS, nameserviceId), NameNode
.getHostPortString(nn.getNameNodeDNAddress()));
nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf));
}
开发者ID:iVCE,项目名称:RDFS,代码行数:18,代码来源:MiniDFSCluster.java
示例20: startNameNodeShouldFail
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; //导入依赖的package包/类
/**
* Attempts to start a NameNode with the given operation. Starting
* the NameNode should throw an exception.
*/
void startNameNodeShouldFail(StartupOption operation) {
try {
cluster = new MiniDFSCluster(conf, 0, operation); // should fail
throw new AssertionError("NameNode should have failed to start");
} catch (Exception expected) {
// expected
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:13,代码来源:TestDFSUpgrade.java
注:本文中的org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论