本文整理汇总了Java中org.apache.hadoop.hive.ql.metadata.Partition类的典型用法代码示例。如果您正苦于以下问题:Java Partition类的具体用法?Java Partition怎么用?Java Partition使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Partition类属于org.apache.hadoop.hive.ql.metadata包,在下文中一共展示了Partition类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: onAddPartition
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
/**
* Listener which fires when a partition is added.
*
* <p>For auditing purposes the read/write differential is the non-existence
* and existence of the added partition respectively.</p>
*
* @param event The add partition event
*/
@Override
public void onAddPartition(AddPartitionEvent event) throws MetaException {
try {
Table table = new Table(event.getTable());
Set<ReadEntity> readEntities = new HashSet<>();
Set<WriteEntity> writeEntities = new HashSet<>();
for (org.apache.hadoop.hive.metastore.api.Partition partition :
event.getPartitions()) {
writeEntities.add(
new WriteEntity(
new Partition(table, partition),
WriteType.INSERT
)
);
}
run(readEntities, writeEntities, HiveOperation.THRIFT_ADD_PARTITION);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
开发者ID:airbnb,项目名称:reair,代码行数:31,代码来源:MetastoreAuditLogListener.java
示例2: onDropPartition
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
/**
* Listener which fires when a partition is dropped.
*
* <p>For auditing purposes the read/write differential is the existence and
* non-existence of the dropped partition respectively.</p>
*
* @param event The drop partition event
*/
@Override
public void onDropPartition(DropPartitionEvent event) throws MetaException {
try {
Set<ReadEntity> readEntities = new HashSet<>();
readEntities.add(
new ReadEntity(
new Partition(new Table(event.getTable()), event.getPartition())
)
);
Set<WriteEntity> writeEntities = new HashSet<>();
run(readEntities, writeEntities, HiveOperation.THRIFT_DROP_PARTITION);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
开发者ID:airbnb,项目名称:reair,代码行数:27,代码来源:MetastoreAuditLogListener.java
示例3: populatePartSpec
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
private UpdatePeriod populatePartSpec(Partition p, Map<String, Date> timeSpec,
Map<String, String> nonTimeSpec) throws HiveException {
String timePartColsStr = p.getTable().getTTable().getParameters().get(MetastoreConstants.TIME_PART_COLUMNS);
String upParam = p.getParameters().get(MetastoreConstants.PARTITION_UPDATE_PERIOD);
UpdatePeriod period = UpdatePeriod.valueOf(upParam);
Map<String, String> partSpec = new HashMap<>();
partSpec.putAll(p.getSpec());
if (timePartColsStr != null) {
String[] timePartCols = StringUtils.split(timePartColsStr, ',');
for (String partCol : timePartCols) {
String dateStr = partSpec.get(partCol);
Date date;
try {
date = period.parse(dateStr);
} catch (Exception e) {
continue;
}
partSpec.remove(partCol);
timeSpec.put(partCol, date);
}
}
if (!partSpec.isEmpty()) {
nonTimeSpec.putAll(partSpec);
}
return period;
}
开发者ID:apache,项目名称:lens,代码行数:27,代码来源:CubeMetastoreServiceImpl.java
示例4: run
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
@Override
public void run()
throws Exception {
Iterator<HiveDataset> iterator = this.datasetFinder.getDatasetsIterator();
while (iterator.hasNext()) {
ConvertibleHiveDataset hiveDataset = (ConvertibleHiveDataset) iterator.next();
try (AutoReturnableObject<IMetaStoreClient> client = hiveDataset.getClientPool().getClient()) {
Set<Partition> sourcePartitions =
new HashSet<>(HiveUtils.getPartitions(client.get(), hiveDataset.getTable(), Optional.<String>absent()));
sourcePartitions.parallelStream().filter(partition -> isUnixTimeStamp(partition.getDataLocation().getName()))
.forEach(partition -> {
Arrays.stream(listFiles(partition.getDataLocation().getParent())).filter(
fileStatus -> !fileStatus.getPath().toString()
.equalsIgnoreCase(partition.getDataLocation().toString())).forEach(fileStatus -> {
deletePath(fileStatus, this.graceTimeInMillis, true);
});
});
}
}
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:22,代码来源:Avro2OrcStaleDatasetCleaner.java
示例5: setVersions
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
private void setVersions(final String name, final State state)
throws IOException {
try {
UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
loginUser.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run()
throws IOException {
synchronized (lock) {
List<Partition> partitions = null;
for (String tableName : ComplianceRetentionJob.tableNamesList) {
for (String pattern : patterns) {
if (tableName.contains(pattern)) {
partitions = getPartitions(tableName);
addPartitionsToVersions(versions, name, partitions);
}
}
}
}
return null;
}
});
} catch (InterruptedException | IOException e) {
throw new IOException(e);
}
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:27,代码来源:HivePartitionVersionFinder.java
示例6: getPartitions
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
private static List<Partition> getPartitions(String completeTableName) {
List<String> tableList = At_SPLITTER.splitToList(completeTableName);
if (tableList.size() != 2) {
log.warn("Invalid table name " + completeTableName);
return Collections.EMPTY_LIST;
}
try (AutoReturnableObject<IMetaStoreClient> client = ComplianceRetentionJob.pool.getClient()) {
Table table = client.get().getTable(tableList.get(0), tableList.get(1));
HiveDataset dataset = new HiveDataset(FileSystem.newInstance(new Configuration()), ComplianceRetentionJob.pool,
new org.apache.hadoop.hive.ql.metadata.Table(table), new Properties());
return dataset.getPartitionsFromDataset();
} catch (IOException | TException e) {
log.warn("Unable to get Partitions for table " + completeTableName + " " + e.getMessage());
}
return Collections.EMPTY_LIST;
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:17,代码来源:HivePartitionVersionFinder.java
示例7: findDatasets
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
/**
* Will find all datasets according to whitelist, except the backup, trash and staging tables.
*/
@Override
public List<HivePartitionDataset> findDatasets()
throws IOException {
List<HivePartitionDataset> list = new ArrayList<>();
for (HiveDataset hiveDataset : this.hiveDatasets) {
for (Partition partition : hiveDataset.getPartitionsFromDataset()) {
list.add(new HivePartitionDataset(partition));
}
}
String selectionPolicyString = this.state.getProp(ComplianceConfigurationKeys.DATASET_SELECTION_POLICY_CLASS,
ComplianceConfigurationKeys.DEFAULT_DATASET_SELECTION_POLICY_CLASS);
Policy<HivePartitionDataset> selectionPolicy =
GobblinConstructorUtils.invokeConstructor(Policy.class, selectionPolicyString);
return selectionPolicy.selectedList(list);
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:19,代码来源:HivePartitionFinder.java
示例8: getDatasetVersion
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
/**
* Create a {@link TimestampedHiveDatasetVersion} from a {@link Partition} based on the Modified time of underlying
* hdfs data location
* @throws IllegalArgumentException when argument is null
* @throws IllegalArgumentException when data location of partition is null
* @throws IllegalArgumentException when data location of partition doesn't exist
* {@inheritDoc}
*/
@Override
protected TimestampedHiveDatasetVersion getDatasetVersion(Partition partition) {
try {
Preconditions.checkArgument(partition != null, "Argument to method ");
Path dataLocation = partition.getDataLocation();
Preconditions
.checkArgument(dataLocation != null, "Data location is null for partition " + partition.getCompleteName());
boolean exists = this.fs.exists(dataLocation);
Preconditions.checkArgument(exists, "Data location doesn't exist for partition " + partition.getCompleteName());
long modificationTS = this.fs.getFileStatus(dataLocation).getModificationTime();
return new TimestampedHiveDatasetVersion(new DateTime(modificationTS), partition);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:26,代码来源:HdfsModifiedTimeHiveVersionFinder.java
示例9: getDropPartitionsDDLInfo
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
/**
* Parse the {@link #REPLACED_PARTITIONS_HIVE_METASTORE_KEY} from partition parameters to returns DDLs for all the partitions to be
* dropped.
*
* @return A {@link List} of partitions to be dropped. Each element of the list is a {@link Map} which maps a partition's
* key and value.
*
*/
public static List<Map<String, String>> getDropPartitionsDDLInfo(Partition hivePartition) {
List<Map<String, String>> replacedPartitionsDDLInfo = Lists.newArrayList();
List<FieldSchema> partitionKeys = hivePartition.getTable().getPartitionKeys();
if (StringUtils.isNotBlank(hivePartition.getParameters().get(REPLACED_PARTITIONS_HIVE_METASTORE_KEY))) {
// Partitions are separated by "|"
for (String partitionsInfoString : Splitter.on("|").omitEmptyStrings().split(hivePartition.getParameters().get(REPLACED_PARTITIONS_HIVE_METASTORE_KEY))) {
// Values for a partition are separated by ","
List<String> partitionValues = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(partitionsInfoString);
// Do not drop partition the being processed. Sometimes a partition may have replaced another partition of the same values.
if (!partitionValues.equals(hivePartition.getValues())) {
ImmutableMap.Builder<String, String> partitionDDLInfoMap = ImmutableMap.builder();
for (int i = 0; i < partitionKeys.size(); i++) {
partitionDDLInfoMap.put(partitionKeys.get(i).getName(), partitionValues.get(i));
}
replacedPartitionsDDLInfo.add(partitionDDLInfoMap.build());
}
}
}
return replacedPartitionsDDLInfo;
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:33,代码来源:AbstractAvroToOrcConverter.java
示例10: getCreateTime
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
@VisibleForTesting
public static long getCreateTime(Partition partition) {
// If create time is set, use it.
// .. this is always set if HiveJDBC or Hive mestastore is used to create partition.
// .. it might not be set (ie. equals 0) if Thrift API call is used to create partition.
if (partition.getTPartition().getCreateTime() > 0) {
return TimeUnit.MILLISECONDS.convert(partition.getTPartition().getCreateTime(), TimeUnit.SECONDS);
}
// Try to use distcp-ng registration generation time if it is available
else if (partition.getTPartition().isSetParameters()
&& partition.getTPartition().getParameters().containsKey(DISTCP_REGISTRATION_GENERATION_TIME_KEY)) {
log.debug("Did not find createTime in Hive partition, used distcp registration generation time.");
return Long.parseLong(partition.getTPartition().getParameters().get(DISTCP_REGISTRATION_GENERATION_TIME_KEY));
} else {
log.warn(String.format("Could not find create time for partition %s. Will return createTime as 0",
partition.getCompleteName()));
return 0;
}
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:20,代码来源:HiveSource.java
示例11: createPartitionCopy
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
public HivePartitionFileSet createPartitionCopy(Path location, long registrationGenerationTime,
boolean targetPartitionExists) {
HivePartitionFileSet partitionCopy = Mockito.mock(HivePartitionFileSet.class);
Partition partition = Mockito.mock(Partition.class);
Mockito.doReturn(location).when(partition).getDataLocation();
Mockito.doReturn(partition).when(partitionCopy).getPartition();
if (targetPartitionExists) {
Partition targetPartition = Mockito.mock(Partition.class);
Map<String, String> parameters = Maps.newHashMap();
parameters.put(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
Long.toString(registrationGenerationTime));
Mockito.doReturn(parameters).when(targetPartition).getParameters();
Mockito.doReturn(Optional.of(targetPartition)).when(partitionCopy).getExistingTargetPartition();
} else {
Mockito.doReturn(Optional.absent()).when(partitionCopy).getExistingTargetPartition();
}
return partitionCopy;
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:25,代码来源:RegistrationTimeSkipPredicateTest.java
示例12: clean
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
@Override
public void clean() throws IOException {
// Possible empty directories to clean for this partition (version)
Set<Path> possiblyEmptyDirectories = new HashSet<>();
try (AutoReturnableObject<IMetaStoreClient> client = cleanableHiveDataset.getClientPool().getClient()) {
Partition partition = hiveDatasetVersion.getPartition();
try {
if (!cleanableHiveDataset.isSimulate()) {
client.get().dropPartition(partition.getTable().getDbName(), partition.getTable().getTableName(), partition.getValues(), false);
log.info("Successfully dropped partition " + partition.getCompleteName());
} else {
log.info("Simulating drop partition " + partition.getCompleteName());
}
if (cleanableHiveDataset.isShouldDeleteData()) {
cleanableHiveDataset.getFsCleanableHelper().clean(hiveDatasetVersion, possiblyEmptyDirectories);
}
} catch (TException | IOException e) {
log.warn(String.format("Failed to completely delete partition %s.", partition.getCompleteName()), e);
throw new IOException(e);
}
}
cleanableHiveDataset.getFsCleanableHelper().cleanEmptyDirectories(possiblyEmptyDirectories, cleanableHiveDataset);
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:26,代码来源:HiveDatasetVersionCleaner.java
示例13: getPartitions
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
/**
* Get a list of {@link Partition}s for the <code>table</code> that matches an optional <code>filter</code>
*
* @param client an {@link IMetaStoreClient} for the correct metastore.
* @param table the {@link Table} for which we should get partitions.
* @param filter an optional filter for partitions as would be used in Hive. Can only filter on String columns.
* (e.g. "part = \"part1\"" or "date > \"2015\"".
* @return a list of {@link Partition}s
*/
public static List<Partition> getPartitions(IMetaStoreClient client, Table table,
Optional<String> filter, Optional<? extends HivePartitionExtendedFilter> hivePartitionExtendedFilterOptional)
throws IOException {
try {
List<Partition> partitions = Lists.newArrayList();
List<org.apache.hadoop.hive.metastore.api.Partition> partitionsList = filter.isPresent()
? client.listPartitionsByFilter(table.getDbName(), table.getTableName(), filter.get(), (short) -1)
: client.listPartitions(table.getDbName(), table.getTableName(), (short) -1);
for (org.apache.hadoop.hive.metastore.api.Partition p : partitionsList) {
if (!hivePartitionExtendedFilterOptional.isPresent() ||
hivePartitionExtendedFilterOptional.get().accept(p)) {
Partition partition = new Partition(table, p);
partitions.add(partition);
}
}
return partitions;
} catch (TException | HiveException te) {
throw new IOException("Hive Error", te);
}
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:30,代码来源:HiveUtils.java
示例14: generateCopyEntities
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
@Override
protected Collection<CopyEntity> generateCopyEntities()
throws IOException {
List<CopyEntity> deregisterCopyEntities = Lists.newArrayList();
int priority = 1;
for (Partition partition : partitionsToDeregister) {
try {
priority = this.helper.addPartitionDeregisterSteps(deregisterCopyEntities, getName(), priority,
this.helper.getTargetTable(), partition);
} catch (IOException ioe) {
log.error(
"Could not create work unit to deregister partition " + partition.getCompleteName());
}
}
return deregisterCopyEntities;
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:17,代码来源:HivePartitionsDeregisterFileSet.java
示例15: testDefaults
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
@Test
public void testDefaults() throws Exception {
DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, ConfigFactory.empty());
String tableName = "VfTb1";
Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("datepartition"));
org.apache.hadoop.hive.metastore.api.Partition tp =
this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016-01-01-20"), (int) System.currentTimeMillis());
Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);
assertThat(partition.getName(), is("datepartition=2016-01-01-20"));
TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:17,代码来源:DatePartitionedHiveVersionFinderTest.java
示例16: testUserDefinedDatePattern
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
@Test
public void testUserDefinedDatePattern() throws Exception {
String tableName = "VfTb2";
Config conf =
ConfigFactory.parseMap(ImmutableMap.<String, String> of(DatePartitionHiveVersionFinder.PARTITION_KEY_NAME_KEY, "field1",
DatePartitionHiveVersionFinder.PARTITION_VALUE_DATE_TIME_PATTERN_KEY, "yyyy/MM/dd/HH"));
DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, conf);
Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("field1"));
org.apache.hadoop.hive.metastore.api.Partition tp =
this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016/01/01/20"), (int) System.currentTimeMillis());
Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);
Assert.assertEquals(URLDecoder.decode(partition.getName(), "UTF-8"), "field1=2016/01/01/20");
TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:18,代码来源:DatePartitionedHiveVersionFinderTest.java
示例17: testWhitelist
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
@Test
public void testWhitelist() throws Exception {
BackfillHiveSource backfillHiveSource = new BackfillHiveSource();
SourceState state = new SourceState();
state.setProp(BackfillHiveSource.BACKFILL_SOURCE_PARTITION_WHITELIST_KEY,
"[email protected]@datepartition=2016-08-04-00,[email protected]@datepartition=2016-08-05-00");
backfillHiveSource.initBackfillHiveSource(state);
Partition pass1 = Mockito.mock(Partition.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(pass1.getCompleteName()).thenReturn("[email protected]@datepartition=2016-08-04-00");
Partition pass2 = Mockito.mock(Partition.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(pass2.getCompleteName()).thenReturn("[email protected]@datepartition=2016-08-05-00");
Partition fail = Mockito.mock(Partition.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(fail.getCompleteName()).thenReturn("[email protected]@datepartition=2016-08-06-00");
Assert.assertTrue(backfillHiveSource.shouldCreateWorkunit(pass1, new LongWatermark(0)));
Assert.assertTrue(backfillHiveSource.shouldCreateWorkunit(pass2, new LongWatermark(0)));
Assert.assertFalse(backfillHiveSource.shouldCreateWorkunit(fail, new LongWatermark(0)));
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:22,代码来源:BackfillHiveSourceTest.java
示例18: testGetPreviousHighWatermarkForPartition
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
@Test
public void testGetPreviousHighWatermarkForPartition() throws Exception {
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "[email protected]_dataset_urn");
previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015", 100l, "2016", 101l)));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
Table table = mockTable("test_dataset_urn");
Partition partition2015 = mockPartition(table, ImmutableList.of("2015"));
Partition partition2016 = mockPartition(table, ImmutableList.of("2016"));
Assert.assertEquals(watermarker.getPreviousHighWatermark(partition2015), new LongWatermark(100l));
Assert.assertEquals(watermarker.getPreviousHighWatermark(partition2016), new LongWatermark(101l));
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:18,代码来源:PartitionLevelWatermarkerTest.java
示例19: testDroppedPartitions
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
@Test
public void testDroppedPartitions() throws Exception {
WorkUnitState previousWus = new WorkUnitState();
previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "[email protected]_dataset_urn");
previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true);
previousWus
.setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015-01", 100l, "2015-02", 101l)));
SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus));
PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state);
Table table = mockTable("test_dataset_urn");
Mockito.when(table.getPartitionKeys()).thenReturn(ImmutableList.of(new FieldSchema("year", "string", "")));
Partition partition2015 = mockPartition(table, ImmutableList.of("2015"));
// partition 2015 replaces 2015-01 and 2015-02
Mockito.when(partition2015.getParameters()).thenReturn(
ImmutableMap.of(AbstractAvroToOrcConverter.REPLACED_PARTITIONS_HIVE_METASTORE_KEY, "2015-01|2015-02"));
watermarker.onPartitionProcessBegin(partition2015, 0l, 0l);
Assert.assertEquals(watermarker.getExpectedHighWatermarks().get("[email protected]_dataset_urn"), ImmutableMap.of("2015", 0l));
}
开发者ID:apache,项目名称:incubator-gobblin,代码行数:24,代码来源:PartitionLevelWatermarkerTest.java
示例20: loadTimelinesFromAllPartitions
import org.apache.hadoop.hive.ql.metadata.Partition; //导入依赖的package包/类
private void loadTimelinesFromAllPartitions(String storageTableName, String timeLineKey)
throws HiveException, LensException {
// Then add all existing partitions for batch addition in respective timelines.
Table storageTable = getTable(storageTableName);
List<String> timeParts = getTimePartColNamesOfTable(storageTable);
List<FieldSchema> partCols = storageTable.getPartCols();
for (Partition partition : getPartitionsByFilter(storageTableName, null)) {
UpdatePeriod period = deduceUpdatePeriod(partition);
List<String> values = partition.getValues();
if (values.contains(StorageConstants.LATEST_PARTITION_VALUE)) {
log.info("dropping latest partition from fact storage table: {}. Spec: {}", storageTableName,
partition.getSpec());
getClient().dropPartition(storageTableName, values, false);
continue;
}
for (int i = 0; i < partCols.size(); i++) {
if (timeParts.contains(partCols.get(i).getName())) {
addForBatchAddition(timeLineKey, storageTableName, period, partCols.get(i).getName(), values.get(i));
}
}
}
}
开发者ID:apache,项目名称:lens,代码行数:23,代码来源:CubeMetastoreClient.java
注:本文中的org.apache.hadoop.hive.ql.metadata.Partition类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论