本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.EditLogInputStream类的典型用法代码示例。如果您正苦于以下问题:Java EditLogInputStream类的具体用法?Java EditLogInputStream怎么用?Java EditLogInputStream使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
EditLogInputStream类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了EditLogInputStream类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testOneJNMissingSegments
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
/**
* Regression test for HDFS-3725. One of the journal nodes is down
* during the writing of one segment, then comes back up later to
* take part in a later segment. Thus, its local edits are
* not a contiguous sequence. This should be handled correctly.
*/
@Test
public void testOneJNMissingSegments() throws Exception {
writeSegment(cluster, qjm, 1, 3, true);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
cluster.getJournalNode(0).stopAndJoin(0);
writeSegment(cluster, qjm, 4, 3, true);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
cluster.restartJournalNode(0);
writeSegment(cluster, qjm, 7, 3, true);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
cluster.getJournalNode(1).stopAndJoin(0);
QuorumJournalManager readerQjm = createSpyingQJM();
List<EditLogInputStream> streams = Lists.newArrayList();
try {
readerQjm.selectInputStreams(streams, 1, false);
verifyEdits(streams, 1, 9);
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
readerQjm.close();
}
}
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestQuorumJournalManager.java
示例2: testSelectInputStreamsMajorityDown
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
/**
* Regression test for HDFS-3891: selectInputStreams should throw
* an exception when a majority of journalnodes have crashed.
*/
@Test
public void testSelectInputStreamsMajorityDown() throws Exception {
// Shut down all of the JNs.
cluster.shutdown();
List<EditLogInputStream> streams = Lists.newArrayList();
try {
qjm.selectInputStreams(streams, 0, false);
fail("Did not throw IOE");
} catch (QuorumException ioe) {
GenericTestUtils.assertExceptionContains(
"Got too many exceptions", ioe);
assertTrue(streams.isEmpty());
}
}
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestQuorumJournalManager.java
示例3: testSelectInputStreamsNotOnBoundary
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
@Test
public void testSelectInputStreamsNotOnBoundary() throws Exception {
final int txIdsPerSegment = 10;
for (int txid = 1; txid <= 5 * txIdsPerSegment; txid += txIdsPerSegment) {
writeSegment(cluster, qjm, txid, txIdsPerSegment, true);
}
File curDir = cluster.getCurrentDir(0, JID);
GenericTestUtils.assertGlobEquals(curDir, "edits_.*",
NNStorage.getFinalizedEditsFileName(1, 10),
NNStorage.getFinalizedEditsFileName(11, 20),
NNStorage.getFinalizedEditsFileName(21, 30),
NNStorage.getFinalizedEditsFileName(31, 40),
NNStorage.getFinalizedEditsFileName(41, 50));
ArrayList<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
qjm.selectInputStreams(streams, 25, false);
verifyEdits(streams, 25, 50);
}
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestQuorumJournalManager.java
示例4: recoverAndReturnLastTxn
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
public static long recoverAndReturnLastTxn(QuorumJournalManager qjm)
throws IOException {
qjm.recoverUnfinalizedSegments();
long lastRecoveredTxn = 0;
List<EditLogInputStream> streams = Lists.newArrayList();
try {
qjm.selectInputStreams(streams, 0, false);
for (EditLogInputStream elis : streams) {
assertTrue(elis.getFirstTxId() > lastRecoveredTxn);
lastRecoveredTxn = elis.getLastTxId();
}
} finally {
IOUtils.cleanup(null, streams.toArray(new Closeable[0]));
}
return lastRecoveredTxn;
}
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:QJMTestUtil.java
示例5: testOneJNMissingSegments
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
/**
* Regression test for HDFS-3725. One of the journal nodes is down
* during the writing of one segment, then comes back up later to
* take part in a later segment. Thus, its local edits are
* not a contiguous sequence. This should be handled correctly.
*/
@Test
public void testOneJNMissingSegments() throws Exception {
writeSegment(cluster, qjm, 1, 3, true);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
cluster.getJournalNode(0).stopAndJoin(0);
writeSegment(cluster, qjm, 4, 3, true);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
cluster.restartJournalNode(0);
writeSegment(cluster, qjm, 7, 3, true);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
cluster.getJournalNode(1).stopAndJoin(0);
QuorumJournalManager readerQjm = createSpyingQJM();
List<EditLogInputStream> streams = Lists.newArrayList();
try {
readerQjm.selectInputStreams(streams, 1, false, true);
verifyEdits(streams, 1, 9);
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
readerQjm.close();
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:29,代码来源:TestQuorumJournalManager.java
示例6: testSelectInputStreamsMajorityDown
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
/**
* Regression test for HDFS-3891: selectInputStreams should throw
* an exception when a majority of journalnodes have crashed.
*/
@Test
public void testSelectInputStreamsMajorityDown() throws Exception {
// Shut down all of the JNs.
cluster.shutdown();
List<EditLogInputStream> streams = Lists.newArrayList();
try {
qjm.selectInputStreams(streams, 0, false, true);
fail("Did not throw IOE");
} catch (QuorumException ioe) {
GenericTestUtils.assertExceptionContains(
"Got too many exceptions", ioe);
assertTrue(streams.isEmpty());
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:TestQuorumJournalManager.java
示例7: testRefreshOnlyForInprogress
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
/**
* Ensure that refresh functionality does not work for finalized streams (at
* startup)
*/
@Test
public void testRefreshOnlyForInprogress() throws Exception {
// start new segment
EditLogOutputStream stm = qjm.startLogSegment(0);
// write a bunch of transactions
QJMTestUtil.writeTxns(stm, 0, 10);
qjm.finalizeLogSegment(0, 9);
// get input stream
List<EditLogInputStream> streams = Lists.newArrayList();
// get only finalized streams
qjm.selectInputStreams(streams, 0, false, false);
try {
// try refreshing the stream (this is startup mode
// inprogress segments not allowed -> refresh should fail
streams.get(0).refresh(10, 0);
fail("The shream should not allow refreshing");
} catch (IOException e) {
LOG.info("Expected exception: ", e);
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:TestQuorumJournalManagerInputStream.java
示例8: getTailingJN
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
/**
* Get the journal node we are tailing from, and indicate which stream this is.
*/
private JournalNode getTailingJN(EditLogInputStream str,
URLLogInputStream[] tailingStream) throws Exception {
RedundantEditLogInputStream is = (RedundantEditLogInputStream) str;
Field curIdxF = RedundantEditLogInputStream.class
.getDeclaredField("curIdx");
curIdxF.setAccessible(true);
int curIdx = curIdxF.getInt(is);
URLLogInputStream[] streams = getStreams(is);
JournalNode jn = null;
for (JournalNode j : cluster.getJournalNodes()) {
if (streams[curIdx].getName().contains(
Integer.toString(j.getBoundHttpAddress().getPort()))) {
jn = j;
break;
}
}
tailingStream[0] = streams[curIdx];
return jn;
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:TestQuorumJournalManagerInputStream.java
示例9: recoverAndReturnLastTxn
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
public static long recoverAndReturnLastTxn(QuorumJournalManager qjm)
throws IOException {
qjm.recoverUnfinalizedSegments();
long lastRecoveredTxn = 0;
List<EditLogInputStream> streams = Lists.newArrayList();
try {
qjm.selectInputStreams(streams, 0, false, true);
for (EditLogInputStream elis : streams) {
assertTrue(elis.getFirstTxId() > lastRecoveredTxn);
lastRecoveredTxn = elis.getLastTxId();
}
} finally {
IOUtils.cleanup(null, streams.toArray(new Closeable[0]));
}
return lastRecoveredTxn;
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:QJMTestUtil.java
示例10: testGetInputStreamNoValidationNoCheckLastTxId
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
@Test
public void testGetInputStreamNoValidationNoCheckLastTxId() throws Exception {
setupTest("test-get-input-stream-no-validation-no-check-last-txid");
File tempEditsFile = FSEditLogTestUtil.createTempEditsFile(
"test-get-input-stream-with-validation");
try {
EditLogOutputStream bkeos = bkjm.startLogSegment(1);
EditLogOutputStream elfos =
new EditLogFileOutputStream(tempEditsFile, null);
elfos.create();
FSEditLogTestUtil.populateStreams(1, 100, bkeos, elfos);
EditLogInputStream bkeis =
getJournalInputStreamDontCheckLastTxId(bkjm, 1);
EditLogInputStream elfis = new EditLogFileInputStream(tempEditsFile);
Map<String, EditLogInputStream> streamByName =
ImmutableMap.of("BookKeeper", bkeis, "File", elfis);
FSEditLogTestUtil.assertStreamsAreEquivalent(100, streamByName);
} finally {
if (!tempEditsFile.delete()) {
LOG.warn("Unable to delete edits file: " +
tempEditsFile.getAbsolutePath());
}
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:TestBookKeeperJournalManager.java
示例11: getJournalInputStreamDontCheckLastTxId
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
static EditLogInputStream getJournalInputStreamDontCheckLastTxId(
JournalManager jm, long txId) throws IOException {
List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
jm.selectInputStreams(streams, txId, true, false);
if (streams.size() < 1) {
throw new IOException("Cannot obtain stream for txid: " + txId);
}
Collections.sort(streams, JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
if (txId == HdfsConstants.INVALID_TXID) {
return streams.get(0);
}
for (EditLogInputStream elis : streams) {
if (elis.getFirstTxId() == txId) {
return elis;
}
}
throw new IOException("Cannot obtain stream for txid: " + txId);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:TestBookKeeperJournalManager.java
示例12: testSelectInputStreamsNotOnBoundary
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; //导入依赖的package包/类
@Test
public void testSelectInputStreamsNotOnBoundary() throws Exception {
final int txIdsPerSegment = 10;
for (int txid = 1; txid <= 5 * txIdsPerSegment; txid += txIdsPerSegment) {
writeSegment(cluster, qjm, txid, txIdsPerSegment, true);
}
File curDir = cluster.getCurrentDir(0, JID);
GenericTestUtils.assertGlobEquals(curDir, "edits_.*",
NNStorage.getFinalizedEditsFileName(1, 10),
NNStorage.getFinalizedEditsFileName(11, 20),
NNStorage.getFinalizedEditsFileName(21, 30),
NNStorage.getFinalizedEditsFileName(31, 40),
NNStorage.getFinalizedEditsFileName(41, 50));
ArrayList<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
qjm.selectInputStreams(streams, 25, false, false);
verifyEdits(streams, 25, 50);
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:TestQuorumJournalManager.java
注:本文中的org.apache.hadoop.hdfs.server.namenode.EditLogInputStream类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论