本文整理汇总了Java中org.apache.hadoop.hdfs.qjournal.server.Journal类的典型用法代码示例。如果您正苦于以下问题:Java Journal类的具体用法?Java Journal怎么用?Java Journal使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Journal类属于org.apache.hadoop.hdfs.qjournal.server包,在下文中一共展示了Journal类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: assertEpochFilesCopied
import org.apache.hadoop.hdfs.qjournal.server.Journal; //导入依赖的package包/类
private static void assertEpochFilesCopied(MiniQJMHACluster jnCluster)
throws IOException {
for (int i = 0; i < 3; i++) {
File journalDir = jnCluster.getJournalCluster().getJournalDir(i, "ns1");
File currDir = new File(journalDir, "current");
File prevDir = new File(journalDir, "previous");
for (String fileName : new String[]{ Journal.LAST_PROMISED_FILENAME,
Journal.LAST_WRITER_EPOCH }) {
File prevFile = new File(prevDir, fileName);
// Possible the prev file doesn't exist, e.g. if there has never been a
// writer before the upgrade.
if (prevFile.exists()) {
PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
fileName), -11);
assertTrue("Value in " + fileName + " has decreased on upgrade in "
+ journalDir, prevLongFile.get() <= currLongFile.get());
}
}
}
}
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestDFSUpgradeWithHA.java
示例2: assertContentsForJournal
import org.apache.hadoop.hdfs.qjournal.server.Journal; //导入依赖的package包/类
/**
* Assert contents for a single journal.
*/
private void assertContentsForJournal(Journal journal, byte[] written,
long txid) throws IOException {
LOG.info("---- validating contents ---- for txid: " + txid);
InputStream is = null;
try {
File uploaded = journal.getImageStorage().getCheckpointImageFile(txid);
assertTrue(uploaded.exists());
assertEquals(written.length, uploaded.length());
// assert contents of the uploaded file
is = new FileInputStream(uploaded);
byte[] contents = new byte[written.length];
is.read(contents);
assertTrue(Arrays.equals(written, contents));
} finally {
if (is != null)
is.close();
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:TestImageUploadStream.java
示例3: testJournalLocking
import org.apache.hadoop.hdfs.qjournal.server.Journal; //导入依赖的package包/类
@Test
public void testJournalLocking() throws Exception {
Assume.assumeTrue(journal.getJournalStorage().isLockSupported(0));
StorageDirectory sd = journal.getJournalStorage().getStorageDir(0);
File lockFile = new File(sd.getRoot(), STORAGE_FILE_LOCK);
// Journal should be locked, since the format() call locks it.
GenericTestUtils.assertExists(lockFile);
journal.newEpoch(FAKE_NSINFO, 1);
try {
new Journal(TEST_LOG_DIR, TEST_IMG_DIR, JID, mockErrorReporter, mockJournalNode);
fail("Did not fail to create another journal in same dir");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot lock storage", ioe);
}
journal.close();
// Journal should no longer be locked after the close() call.
// Hence, should be able to create a new Journal in the same dir.
Journal journal2 = new Journal(TEST_LOG_DIR, TEST_IMG_DIR, JID,
mockErrorReporter, mockJournalNode);
journal2.newEpoch(FAKE_NSINFO, 2);
journal2.close();
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:TestJournal.java
示例4: testJournalLocking
import org.apache.hadoop.hdfs.qjournal.server.Journal; //导入依赖的package包/类
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
StorageDirectory sd = journal.getStorage().getStorageDir(0);
File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
// Journal should be locked, since the format() call locks it.
GenericTestUtils.assertExists(lockFile);
journal.newEpoch(FAKE_NSINFO, 1);
try {
new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
fail("Did not fail to create another journal in same dir");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Cannot lock storage", ioe);
}
journal.close();
// Journal should no longer be locked after the close() call.
// Hence, should be able to create a new Journal in the same dir.
Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
journal2.newEpoch(FAKE_NSINFO, 2);
journal2.close();
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:TestJournal.java
示例5: getCommittedTxnIdValue
import org.apache.hadoop.hdfs.qjournal.server.Journal; //导入依赖的package包/类
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
throws IOException {
Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
.getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
.getInternalState(journal1, "committedTxnId");
return committedTxnId != null ? committedTxnId.get() :
HdfsConstants.INVALID_TXID;
}
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestDFSUpgradeWithHA.java
示例6: getCommittedTxnIdValue
import org.apache.hadoop.hdfs.qjournal.server.Journal; //导入依赖的package包/类
private long getCommittedTxnIdValue(MiniQJMHACluster qjCluster)
throws IOException {
Journal journal1 = qjCluster.getJournalCluster().getJournalNode(0)
.getOrCreateJournal(MiniQJMHACluster.NAMESERVICE);
BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox
.getInternalState(journal1, "committedTxnId");
return committedTxnId != null ? committedTxnId.get() :
HdfsServerConstants.INVALID_TXID;
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:10,代码来源:TestDFSUpgradeWithHA.java
示例7: assertContents
import org.apache.hadoop.hdfs.qjournal.server.Journal; //导入依赖的package包/类
/**
* Assert contenst and hash for every journal.
*/
private void assertContents(MiniJournalCluster cluster, byte[] written,
long txid, MD5Hash writtenHash, TestImageUploadStreamInjectionHandler h)
throws IOException {
int numJournals = cluster.getNumNodes();
// assert that each file contains what it should
for (int i = 0; i < numJournals; i++) {
if (h.failOn[i] != null) {
continue;
}
Journal j = cluster.getJournalNode(i).getOrCreateJournal(JID.getBytes());
assertContentsForJournal(j, written, txid);
}
// for failures assert the number of exceptions
int expectedExceptionCount = 0;
for (InjectionEventI e : h.failOn) {
expectedExceptionCount += (e == null ? 0 : 1);
}
assertEquals(expectedExceptionCount, h.getExceptions());
// assert hashes
assertEquals(numJournals - expectedExceptionCount, h.uploadHashes.size());
for (MD5Hash hash : h.uploadHashes) {
assertEquals(writtenHash, hash);
}
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:32,代码来源:TestImageUploadStream.java
示例8: setup
import org.apache.hadoop.hdfs.qjournal.server.Journal; //导入依赖的package包/类
@Before
public void setup() throws Exception {
FileUtil.fullyDelete(TEST_LOG_DIR);
FileUtil.fullyDelete(TEST_IMG_DIR);
journal = new Journal(TEST_LOG_DIR, TEST_IMG_DIR, JID, mockErrorReporter,
mockJournalNode);
journal.transitionJournal(FAKE_NSINFO, Transition.FORMAT, null);
journal.transitionImage(FAKE_NSINFO, Transition.FORMAT, null);
}
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:10,代码来源:TestJournal.java
示例9: setup
import org.apache.hadoop.hdfs.qjournal.server.Journal; //导入依赖的package包/类
@Before
public void setup() throws Exception {
FileUtil.fullyDelete(TEST_LOG_DIR);
conf = new Configuration();
journal = new Journal(conf, TEST_LOG_DIR, JID,
mockErrorReporter);
journal.format(FAKE_NSINFO);
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:9,代码来源:TestJournal.java
注:本文中的org.apache.hadoop.hdfs.qjournal.server.Journal类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论