本文整理汇总了Java中org.apache.hadoop.hbase.io.HFileLink类的典型用法代码示例。如果您正苦于以下问题:Java HFileLink类的具体用法?Java HFileLink怎么用?Java HFileLink使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
HFileLink类属于org.apache.hadoop.hbase.io包,在下文中一共展示了HFileLink类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: testHFileLink
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
@Test
public void testHFileLink() throws Exception {
// pass a link, and verify that correct paths are returned.
Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
Path aFileLink = new Path(rootDir, "table/2086db948c48/cf/table=21212abcdc33-0906db948c48");
Path preNamespaceTablePath = new Path(rootDir, "table/21212abcdc33/cf/0906db948c48");
Path preNamespaceArchivePath =
new Path(rootDir, ".archive/table/21212abcdc33/cf/0906db948c48");
Path preNamespaceTempPath = new Path(rootDir, ".tmp/table/21212abcdc33/cf/0906db948c48");
boolean preNSTablePathExists = false;
boolean preNSArchivePathExists = false;
boolean preNSTempPathExists = false;
assertTrue(HFileLink.isHFileLink(aFileLink));
HFileLink hFileLink =
HFileLink.buildFromHFileLinkPattern(TEST_UTIL.getConfiguration(), aFileLink);
assertTrue(hFileLink.getArchivePath().toString().startsWith(rootDir.toString()));
HFileV1Detector t = new HFileV1Detector();
t.setConf(TEST_UTIL.getConfiguration());
FileLink fileLink = t.getFileLinkWithPreNSPath(aFileLink);
//assert it has 6 paths (2 NS, 2 Pre NS, and 2 .tmp) to look.
assertTrue(fileLink.getLocations().length == 6);
for (Path p : fileLink.getLocations()) {
if (p.equals(preNamespaceArchivePath)) preNSArchivePathExists = true;
if (p.equals(preNamespaceTablePath)) preNSTablePathExists = true;
if (p.equals(preNamespaceTempPath)) preNSTempPathExists = true;
}
assertTrue(preNSArchivePathExists & preNSTablePathExists & preNSTempPathExists);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestUpgradeTo96.java
示例2: addStoreFile
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Add the specified store file to the stats
* @param region region encoded Name
* @param family family name
* @param storeFile store file name
* @return the store file information
*/
FileInfo addStoreFile(final HRegionInfo region, final String family,
final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
HFileLink link = HFileLink.build(conf, snapshotTable, region.getEncodedName(),
family, storeFile.getName());
boolean isCorrupted = false;
boolean inArchive = false;
long size = -1;
try {
if ((inArchive = fs.exists(link.getArchivePath()))) {
size = fs.getFileStatus(link.getArchivePath()).getLen();
hfileArchiveSize.addAndGet(size);
hfileArchiveCount.incrementAndGet();
} else {
size = link.getFileStatus(fs).getLen();
hfileSize.addAndGet(size);
hfilesCount.incrementAndGet();
}
isCorrupted = (storeFile.hasFileSize() && storeFile.getFileSize() != size);
if (isCorrupted) hfilesCorrupted.incrementAndGet();
} catch (FileNotFoundException e) {
hfilesMissing.incrementAndGet();
}
return new FileInfo(inArchive, size, isCorrupted);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:SnapshotInfo.java
示例3: getStoreFileInfo
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the
* files of the primary region, so an HFileLink is used to construct the StoreFileInfo. This
* way ensures that the secondary will be able to continue reading the store files even if
* they are moved to archive after compaction
* @throws IOException
*/
public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs,
HRegionInfo regionInfo, HRegionInfo regionInfoForFs, String familyName, Path path)
throws IOException {
// if this is a primary region, just return the StoreFileInfo constructed from path
if (regionInfo.equals(regionInfoForFs)) {
return new StoreFileInfo(conf, fs, path);
}
// else create a store file link. The link file does not exists on filesystem though.
HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(),
regionInfoForFs.getEncodedName(), familyName, path.getName());
if (StoreFileInfo.isReference(path)) {
Reference reference = Reference.read(fs, path);
return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference);
}
return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link);
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:ServerRegionReplicaUtil.java
示例4: isValid
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Return if the specified file is a valid store file or not.
*
* @param fileStatus The {@link FileStatus} of the file
* @return <tt>true</tt> if the file is valid
*/
public static boolean isValid(final FileStatus fileStatus) throws IOException {
final Path p = fileStatus.getPath();
if (fileStatus.isDirectory()) return false;
// Check for empty hfile. Should never be the case but can happen
// after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646
// NOTE: that the HFileLink is just a name, so it's an empty file.
if (!HFileLink.isHFileLink(p) && fileStatus.getLen() <= 0) {
LOG.warn("Skipping " + p + " because it is empty. HBASE-646 DATA LOSS?");
return false;
}
return validateStoreFileName(p.getName());
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:StoreFileInfo.java
示例5: verifyRestore
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd,
final HTableDescriptor htdClone) throws IOException {
List<String> files = SnapshotTestingUtils.listHFileNames(fs,
FSUtils.getTableDir(rootDir, htdClone.getTableName()));
assertEquals(12, files.size());
for (int i = 0; i < files.size(); i += 2) {
String linkFile = files.get(i);
String refFile = files.get(i+1);
assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile));
assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile));
assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile));
Path refPath = getReferredToFile(refFile);
LOG.debug("get reference name for file " + refFile + " = " + refPath);
assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName()));
assertEquals(linkFile, refPath.getName());
}
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestRestoreSnapshotHelper.java
示例6: testEqualsWithLink
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
@Test
public void testEqualsWithLink() throws IOException {
Path origin = new Path("/origin");
Path tmp = new Path("/tmp");
Path archive = new Path("/archive");
HFileLink link1 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"),
new Path(archive, "f1"));
HFileLink link2 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"),
new Path(archive, "f1"));
StoreFileInfo info1 = new StoreFileInfo(TEST_UTIL.getConfiguration(),
TEST_UTIL.getTestFileSystem(), null, link1);
StoreFileInfo info2 = new StoreFileInfo(TEST_UTIL.getConfiguration(),
TEST_UTIL.getTestFileSystem(), null, link2);
assertEquals(info1, info2);
assertEquals(info1.hashCode(), info2.hashCode());
}
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestStoreFileInfo.java
示例7: verifySnapshot
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Verify the validity of the snapshot
*
* @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify
* @param snapshotDesc the {@link SnapshotDescription} of the snapshot to verify
* @throws CorruptedSnapshotException if the snapshot is corrupted
* @throws IOException if an error occurred while scanning the directory
*/
public static void verifySnapshot(final Configuration conf, final FileSystem fs,
final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException {
final String table = snapshotDesc.getTable();
visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
HFileLink link = HFileLink.create(conf, table, region, family, hfile);
try {
link.getFileStatus(fs);
} catch (FileNotFoundException e) {
throw new CorruptedSnapshotException("Corrupted snapshot '" + snapshotDesc + "'", e);
}
}
});
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:26,代码来源:SnapshotReferenceUtil.java
示例8: getHFileNames
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Returns the store file names in the snapshot.
*
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @throws IOException if an error occurred while scanning the directory
* @return the names of hfiles in the specified snaphot
*/
public static Set<String> getHFileNames(final FileSystem fs, final Path snapshotDir)
throws IOException {
final Set<String> names = new HashSet<String>();
visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
if (HFileLink.isHFileLink(hfile)) {
names.add(HFileLink.getReferencedHFileName(hfile));
} else {
names.add(hfile);
}
}
});
return names;
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:24,代码来源:SnapshotReferenceUtil.java
示例9: getOutputPath
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Returns the location where the inputPath will be copied.
* - hfiles are encoded as hfile links hfile-region-table
* - logs are encoded as serverName/logName
*/
private Path getOutputPath(final Path inputPath) throws IOException {
Path path;
if (HFileLink.isHFileLink(inputPath) || StoreFile.isReference(inputPath)) {
String family = inputPath.getParent().getName();
String table = HFileLink.getReferencedTableName(inputPath.getName());
String region = HFileLink.getReferencedRegionName(inputPath.getName());
String hfile = HFileLink.getReferencedHFileName(inputPath.getName());
path = new Path(table, new Path(region, new Path(family, hfile)));
} else if (isHLogLinkPath(inputPath)) {
String logName = inputPath.getName();
path = new Path(new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME), logName);
} else {
path = inputPath;
}
return new Path(outputArchive, path);
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:22,代码来源:ExportSnapshot.java
示例10: getFileStatus
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
private FileStatus getFileStatus(final FileSystem fs, final Path path) {
try {
if (HFileLink.isHFileLink(path) || StoreFile.isReference(path)) {
HFileLink link = new HFileLink(inputRoot, inputArchive, path);
return link.getFileStatus(fs);
} else if (isHLogLinkPath(path)) {
String serverName = path.getParent().getName();
String logName = path.getName();
return new HLogLink(inputRoot, serverName, logName).getFileStatus(fs);
}
return fs.getFileStatus(path);
} catch (IOException e) {
LOG.warn("Unable to get the status for file=" + path);
return null;
}
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:17,代码来源:ExportSnapshot.java
示例11: addStoreFile
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Add the specified store file to the stats
* @param region region encoded Name
* @param family family name
* @param hfile store file name
* @return the store file information
*/
FileInfo addStoreFile(final String region, final String family, final String hfile)
throws IOException {
String table = this.snapshot.getTable();
HFileLink link = HFileLink.create(conf, table, region, family, hfile);
boolean inArchive = false;
long size = -1;
try {
if ((inArchive = fs.exists(link.getArchivePath()))) {
size = fs.getFileStatus(link.getArchivePath()).getLen();
hfileArchiveSize += size;
hfileArchiveCount++;
} else {
size = link.getFileStatus(fs).getLen();
hfileSize += size;
hfilesCount++;
}
} catch (FileNotFoundException e) {
hfilesMissing++;
}
return new FileInfo(inArchive, size);
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:29,代码来源:SnapshotInfo.java
示例12: Reader
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size, CacheConfig cacheConf,
DataBlockEncoding preferredEncodingInCache, boolean closeIStream) throws IOException {
super(path);
pWinterPath = path;
FSDataInputStream in = hfileLink.open(fs);
FSDataInputStream inNoChecksum = in;
if (fs instanceof HFileSystem) {
FileSystem noChecksumFs = ((HFileSystem) fs).getNoChecksumFs();
inNoChecksum = hfileLink.open(noChecksumFs);
}
reader =
HFile.createReaderWithEncoding(fs, path, in, inNoChecksum, size, cacheConf,
preferredEncodingInCache, closeIStream);
bloomFilterType = BloomType.NONE;
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:17,代码来源:StoreFile.java
示例13: corruptSnapshot
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Corrupt the specified snapshot by deleting some files.
*
* @param util {@link HBaseTestingUtility}
* @param snapshotName name of the snapshot to corrupt
* @return array of the corrupted HFiles
* @throws IOException on unexecpted error reading the FS
*/
public static ArrayList corruptSnapshot(final HBaseTestingUtility util, final String snapshotName)
throws IOException {
final MasterFileSystem mfs = util.getHBaseCluster().getMaster().getMasterFileSystem();
final FileSystem fs = mfs.getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,
mfs.getRootDir());
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
final String table = snapshotDesc.getTable();
final ArrayList corruptedFiles = new ArrayList();
SnapshotReferenceUtil.visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
HFileLink link = HFileLink.create(util.getConfiguration(), table, region, family, hfile);
if (corruptedFiles.size() % 2 == 0) {
fs.delete(link.getAvailablePath(fs));
corruptedFiles.add(hfile);
}
}
});
assertTrue(corruptedFiles.size() > 0);
return corruptedFiles;
}
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:34,代码来源:SnapshotTestingUtils.java
示例14: getOutputPath
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Returns the location where the inputPath will be copied.
*/
private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException {
Path path = null;
switch (inputInfo.getType()) {
case HFILE:
Path inputPath = new Path(inputInfo.getHfile());
String family = inputPath.getParent().getName();
TableName table =HFileLink.getReferencedTableName(inputPath.getName());
String region = HFileLink.getReferencedRegionName(inputPath.getName());
String hfile = HFileLink.getReferencedHFileName(inputPath.getName());
path = new Path(FSUtils.getTableDir(new Path("./"), table),
new Path(region, new Path(family, hfile)));
break;
case WAL:
Path oldLogsDir = new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME);
path = new Path(oldLogsDir, inputInfo.getWalName());
break;
default:
throw new IOException("Invalid File Type: " + inputInfo.getType().toString());
}
return new Path(outputArchive, path);
}
开发者ID:grokcoder,项目名称:pbase,代码行数:25,代码来源:ExportSnapshot.java
示例15: addStoreFile
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Add the specified store file to the stats
* @param region region encoded Name
* @param family family name
* @param hfile store file name
* @return the store file information
*/
FileInfo addStoreFile(final HRegionInfo region, final String family,
final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
HFileLink link = HFileLink.create(conf, snapshotTable, region.getEncodedName(),
family, storeFile.getName());
boolean isCorrupted = false;
boolean inArchive = false;
long size = -1;
try {
if ((inArchive = fs.exists(link.getArchivePath()))) {
size = fs.getFileStatus(link.getArchivePath()).getLen();
hfileArchiveSize.addAndGet(size);
hfileArchiveCount.incrementAndGet();
} else {
size = link.getFileStatus(fs).getLen();
hfileSize.addAndGet(size);
hfilesCount.incrementAndGet();
}
isCorrupted = (storeFile.hasFileSize() && storeFile.getFileSize() != size);
if (isCorrupted) hfilesCorrupted.incrementAndGet();
} catch (FileNotFoundException e) {
hfilesMissing.incrementAndGet();
}
return new FileInfo(inArchive, size, isCorrupted);
}
开发者ID:grokcoder,项目名称:pbase,代码行数:32,代码来源:SnapshotInfo.java
示例16: getStoreFileInfo
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the
* files of the primary region, so an HFileLink is used to construct the StoreFileInfo. This
* way ensures that the secondary will be able to continue reading the store files even if
* they are moved to archive after compaction
* @throws IOException
*/
public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs,
HRegionInfo regionInfo, HRegionInfo regionInfoForFs, String familyName, FileStatus status)
throws IOException {
// if this is a primary region, just return the StoreFileInfo constructed from path
if (regionInfo.equals(regionInfoForFs)) {
return new StoreFileInfo(conf, fs, status);
}
// else create a store file link. The link file does not exists on filesystem though.
HFileLink link = new HFileLink(conf,
HFileLink.createPath(regionInfoForFs.getTable(), regionInfoForFs.getEncodedName()
, familyName, status.getPath().getName()));
return new StoreFileInfo(conf, fs, status, link);
}
开发者ID:grokcoder,项目名称:pbase,代码行数:23,代码来源:ServerRegionReplicaUtil.java
示例17: isValid
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Return if the specified file is a valid store file or not.
* @param fileStatus The {@link FileStatus} of the file
* @return <tt>true</tt> if the file is valid
*/
public static boolean isValid(final FileStatus fileStatus)
throws IOException {
final Path p = fileStatus.getPath();
if (fileStatus.isDirectory())
return false;
// Check for empty hfile. Should never be the case but can happen
// after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646
// NOTE: that the HFileLink is just a name, so it's an empty file.
if (!HFileLink.isHFileLink(p) && fileStatus.getLen() <= 0) {
LOG.warn("Skipping " + p + " because it is empty. HBASE-646 DATA LOSS?");
return false;
}
return validateStoreFileName(p.getName());
}
开发者ID:grokcoder,项目名称:pbase,代码行数:23,代码来源:StoreFileInfo.java
示例18: verifyRestore
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd,
final HTableDescriptor htdClone) throws IOException {
String[] files = SnapshotTestingUtils.listHFileNames(fs,
FSUtils.getTableDir(rootDir, htdClone.getTableName()));
assertEquals(12, files.length);
for (int i = 0; i < files.length; i += 2) {
String linkFile = files[i];
String refFile = files[i+1];
assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile));
assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile));
assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile));
Path refPath = getReferredToFile(refFile);
LOG.debug("get reference name for file " + refFile + " = " + refPath);
assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName()));
assertEquals(linkFile, refPath.getName());
}
}
开发者ID:grokcoder,项目名称:pbase,代码行数:18,代码来源:TestRestoreSnapshotHelper.java
示例19: verifySnapshot
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Verify the validity of the snapshot
*
* @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify
* @param snapshotDesc the {@link SnapshotDescription} of the snapshot to verify
* @throws CorruptedSnapshotException if the snapshot is corrupted
* @throws IOException if an error occurred while scanning the directory
*/
public static void verifySnapshot(final Configuration conf, final FileSystem fs,
final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException {
final TableName table = TableName.valueOf(snapshotDesc.getTable());
visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
HFileLink link = HFileLink.create(conf, table, region, family, hfile);
try {
link.getFileStatus(fs);
} catch (FileNotFoundException e) {
throw new CorruptedSnapshotException("Corrupted snapshot '" + snapshotDesc + "'", e);
}
}
});
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:26,代码来源:SnapshotReferenceUtil.java
示例20: getOutputPath
import org.apache.hadoop.hbase.io.HFileLink; //导入依赖的package包/类
/**
* Returns the location where the inputPath will be copied.
* - hfiles are encoded as hfile links hfile-region-table
* - logs are encoded as serverName/logName
*/
private Path getOutputPath(final Path inputPath) throws IOException {
Path path;
if (HFileLink.isHFileLink(inputPath) || StoreFileInfo.isReference(inputPath)) {
String family = inputPath.getParent().getName();
TableName table =
HFileLink.getReferencedTableName(inputPath.getName());
String region = HFileLink.getReferencedRegionName(inputPath.getName());
String hfile = HFileLink.getReferencedHFileName(inputPath.getName());
path = new Path(FSUtils.getTableDir(new Path("./"), table),
new Path(region, new Path(family, hfile)));
} else if (isHLogLinkPath(inputPath)) {
String logName = inputPath.getName();
path = new Path(new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME), logName);
} else {
path = inputPath;
}
return new Path(outputArchive, path);
}
开发者ID:tenggyut,项目名称:HIndex,代码行数:24,代码来源:ExportSnapshot.java
注:本文中的org.apache.hadoop.hbase.io.HFileLink类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论