本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure类的典型用法代码示例。如果您正苦于以下问题:Java ReplaceDatanodeOnFailure类的具体用法?Java ReplaceDatanodeOnFailure怎么用?Java ReplaceDatanodeOnFailure使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ReplaceDatanodeOnFailure类属于org.apache.hadoop.hdfs.protocol.datatransfer包,在下文中一共展示了ReplaceDatanodeOnFailure类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testDefaultPolicy
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; //导入依赖的package包/类
/** Test DEFAULT ReplaceDatanodeOnFailure policy. */
@Test
public void testDefaultPolicy() throws Exception {
final Configuration conf = new HdfsConfiguration();
final ReplaceDatanodeOnFailure p = ReplaceDatanodeOnFailure.get(conf);
final DatanodeInfo[] infos = new DatanodeInfo[5];
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
datanodes[0] = new DatanodeInfo[0];
for(int i = 0; i < infos.length; ) {
infos[i] = DFSTestUtil.getLocalDatanodeInfo(50020 + i);
i++;
datanodes[i] = new DatanodeInfo[i];
System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);
}
final boolean[] isAppend = {true, true, false, false};
final boolean[] isHflushed = {true, false, true, false};
for(short replication = 1; replication <= infos.length; replication++) {
for(int nExistings = 0; nExistings < datanodes.length; nExistings++) {
final DatanodeInfo[] existings = datanodes[nExistings];
Assert.assertEquals(nExistings, existings.length);
for(int i = 0; i < isAppend.length; i++) {
for(int j = 0; j < isHflushed.length; j++) {
final int half = replication/2;
final boolean enoughReplica = replication <= nExistings;
final boolean noReplica = nExistings == 0;
final boolean replicationL3 = replication < 3;
final boolean existingsLEhalf = nExistings <= half;
final boolean isAH = isAppend[i] || isHflushed[j];
final boolean expected;
if (enoughReplica || noReplica || replicationL3) {
expected = false;
} else {
expected = isAH || existingsLEhalf;
}
final boolean computed = p.satisfy(
replication, existings, isAppend[i], isHflushed[j]);
try {
Assert.assertEquals(expected, computed);
} catch(AssertionError e) {
final String s = "replication=" + replication
+ "\nnExistings =" + nExistings
+ "\nisAppend =" + isAppend[i]
+ "\nisHflushed =" + isHflushed[j];
throw new RuntimeException(s, e);
}
}
}
}
}
}
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:TestReplaceDatanodeOnFailure.java
示例2: DFSClient
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; //导入依赖的package包/类
/**
* Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
* Exactly one of nameNodeUri or rpcNamenode must be null.
*/
@VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
Configuration conf, FileSystem.Statistics stats)
throws IOException {
// Copy only the required DFSClient configuration
this.dfsClientConf = new Conf(conf);
this.shouldUseLegacyBlockReaderLocal =
this.dfsClientConf.useLegacyBlockReaderLocal;
if (this.dfsClientConf.useLegacyBlockReaderLocal) {
LOG.debug("Using legacy short-circuit local reads.");
}
this.conf = conf;
this.stats = stats;
this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
this.ugi = UserGroupInformation.getCurrentUser();
this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" +
DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId();
if (rpcNamenode != null) {
// This case is used for testing.
Preconditions.checkArgument(nameNodeUri == null);
this.namenode = rpcNamenode;
dtService = null;
} else {
Preconditions.checkArgument(nameNodeUri != null,
"null URI");
NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo =
NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class);
this.dtService = proxyInfo.getDelegationTokenService();
this.namenode = proxyInfo.getProxy();
}
// read directly from the block file if configured.
this.domainSocketFactory = new DomainSocketFactory(dfsClientConf);
String localInterfaces[] =
conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
LOG.debug("Using local interfaces [" +
Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
Joiner.on(',').join(localInterfaceAddrs) + "]");
}
this.peerCache = PeerCache.getInstance(dfsClientConf.socketCacheCapacity, dfsClientConf.socketCacheExpiry);
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:56,代码来源:DFSClient.java
示例3: testDefaultPolicy
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; //导入依赖的package包/类
/** Test DEFAULT ReplaceDatanodeOnFailure policy. */
@Test
public void testDefaultPolicy() throws Exception {
final ReplaceDatanodeOnFailure p = ReplaceDatanodeOnFailure.DEFAULT;
final DatanodeInfo[] infos = new DatanodeInfo[5];
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
datanodes[0] = new DatanodeInfo[0];
for(int i = 0; i < infos.length; ) {
infos[i] = DFSTestUtil.getLocalDatanodeInfo(50020 + i);
i++;
datanodes[i] = new DatanodeInfo[i];
System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);
}
final boolean[] isAppend = {true, true, false, false};
final boolean[] isHflushed = {true, false, true, false};
for(short replication = 1; replication <= infos.length; replication++) {
for(int nExistings = 0; nExistings < datanodes.length; nExistings++) {
final DatanodeInfo[] existings = datanodes[nExistings];
Assert.assertEquals(nExistings, existings.length);
for(int i = 0; i < isAppend.length; i++) {
for(int j = 0; j < isHflushed.length; j++) {
final int half = replication/2;
final boolean enoughReplica = replication <= nExistings;
final boolean noReplica = nExistings == 0;
final boolean replicationL3 = replication < 3;
final boolean existingsLEhalf = nExistings <= half;
final boolean isAH = isAppend[i] || isHflushed[j];
final boolean expected;
if (enoughReplica || noReplica || replicationL3) {
expected = false;
} else {
expected = isAH || existingsLEhalf;
}
final boolean computed = p.satisfy(
replication, existings, isAppend[i], isHflushed[j]);
try {
Assert.assertEquals(expected, computed);
} catch(AssertionError e) {
final String s = "replication=" + replication
+ "\nnExistings =" + nExistings
+ "\nisAppend =" + isAppend[i]
+ "\nisHflushed =" + isHflushed[j];
throw new RuntimeException(s, e);
}
}
}
}
}
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:56,代码来源:TestReplaceDatanodeOnFailure.java
示例4: DFSClient
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; //导入依赖的package包/类
/**
* Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
* Exactly one of nameNodeUri or rpcNamenode must be null.
*/
DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf,
FileSystem.Statistics stats) throws IOException {
// Copy only the required DFSClient configuration
this.dfsClientConf = new Conf(conf);
checkSmallFilesSupportConf(conf);
this.conf = conf;
this.stats = stats;
this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
// The hdfsTimeout is currently the same as the ipc timeout
this.hdfsTimeout = Client.getTimeout(conf);
this.ugi = UserGroupInformation.getCurrentUser();
this.authority = nameNodeUri == null ? "null" : nameNodeUri.getAuthority();
String clientNamePrefix = "";
if(dfsClientConf.hdfsClientEmulationForSF){
clientNamePrefix = "DFSClient";
}else{
clientNamePrefix = "HopsFS_DFSClient";
}
this.clientName = clientNamePrefix+ "_" + dfsClientConf.taskId + "_" +
DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId();
if (rpcNamenode != null) {
// This case is used for testing.
Preconditions.checkArgument(nameNodeUri == null);
namenodeSelector = new NamenodeSelector(conf, rpcNamenode, this.ugi);
dtService = null;
} else {
Preconditions.checkArgument(nameNodeUri != null, "null URI");
NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo =
NameNodeProxies.createProxy(conf, nameNodeUri, this.ugi,
ClientProtocol.class);
this.dtService = proxyInfo.getDelegationTokenService();
namenodeSelector = new NamenodeSelector(conf, nameNodeUri, this.ugi);
}
// read directly from the block file if configured.
this.shortCircuitLocalReads =
conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT);
if (LOG.isDebugEnabled()) {
LOG.debug("Short circuit read is " + shortCircuitLocalReads);
}
String localInterfaces[] =
conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
LOG.debug("Using local interfaces [" +
Joiner.on(',').join(localInterfaces) + "] with addresses [" +
Joiner.on(',').join(localInterfaceAddrs) + "]");
}
this.socketCache = SocketCache
.getInstance(dfsClientConf.socketCacheCapacity,
dfsClientConf.socketCacheExpiry);
this.MAX_RPC_RETRIES =
conf.getInt(DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_KEY,
DFSConfigKeys.DFS_CLIENT_RETRIES_ON_FAILURE_DEFAULT);
}
开发者ID:hopshadoop,项目名称:hops,代码行数:69,代码来源:DFSClient.java
示例5: testDefaultPolicy
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; //导入依赖的package包/类
/**
* Test DEFAULT ReplaceDatanodeOnFailure policy.
*/
@Test
public void testDefaultPolicy() throws Exception {
final ReplaceDatanodeOnFailure p = ReplaceDatanodeOnFailure.DEFAULT;
final DatanodeInfo[] infos = new DatanodeInfo[5];
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
datanodes[0] = new DatanodeInfo[0];
for (int i = 0; i < infos.length; ) {
infos[i] = DFSTestUtil.getLocalDatanodeInfo(50020 + i);
i++;
datanodes[i] = new DatanodeInfo[i];
System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);
}
final boolean[] isAppend = {true, true, false, false};
final boolean[] isHflushed = {true, false, true, false};
for (short replication = 1; replication <= infos.length; replication++) {
for (int nExistings = 0; nExistings < datanodes.length; nExistings++) {
final DatanodeInfo[] existings = datanodes[nExistings];
Assert.assertEquals(nExistings, existings.length);
for (boolean anIsAppend : isAppend) {
for (boolean anIsHflushed : isHflushed) {
final int half = replication / 2;
final boolean enoughReplica = replication <= nExistings;
final boolean noReplica = nExistings == 0;
final boolean replicationL3 = replication < 3;
final boolean existingsLEhalf = nExistings <= half;
final boolean isAH = anIsAppend || anIsHflushed;
final boolean expected;
if (enoughReplica || noReplica || replicationL3) {
expected = false;
} else {
expected = isAH || existingsLEhalf;
}
final boolean computed =
p.satisfy(replication, existings, anIsAppend, anIsHflushed);
try {
Assert.assertEquals(expected, computed);
} catch (AssertionError e) {
final String s =
"replication=" + replication + "\nnExistings =" + nExistings +
"\nisAppend =" + anIsAppend + "\nisHflushed =" +
anIsHflushed;
throw new RuntimeException(s, e);
}
}
}
}
}
}
开发者ID:hopshadoop,项目名称:hops,代码行数:58,代码来源:TestReplaceDatanodeOnFailure.java
注:本文中的org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论