本文整理汇总了Java中org.apache.kudu.Type类的典型用法代码示例。如果您正苦于以下问题:Java Type类的具体用法?Java Type怎么用?Java Type使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Type类属于org.apache.kudu包,在下文中一共展示了Type类的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: getSqlTypeFromKuduType
import org.apache.kudu.Type; //导入依赖的package包/类
private RelDataType getSqlTypeFromKuduType(RelDataTypeFactory typeFactory, Type type) {
switch (type) {
case BOOL:
return typeFactory.createSqlType(SqlTypeName.BOOLEAN);
case DOUBLE:
return typeFactory.createSqlType(SqlTypeName.DOUBLE);
case FLOAT:
return typeFactory.createSqlType(SqlTypeName.FLOAT);
case INT16:
case INT32:
case INT64:
case INT8:
return typeFactory.createSqlType(SqlTypeName.INTEGER);
case STRING:
return typeFactory.createSqlType(SqlTypeName.VARCHAR);
case UNIXTIME_MICROS:
return typeFactory.createSqlType(SqlTypeName.TIMESTAMP);
case BINARY:
return typeFactory.createSqlType(SqlTypeName.VARBINARY, Integer.MAX_VALUE);
default:
throw new UnsupportedOperationException("Unsupported type.");
}
}
开发者ID:axbaretto,项目名称:drill,代码行数:24,代码来源:DrillKuduTable.java
示例2: convertFromKuduType
import org.apache.kudu.Type; //导入依赖的package包/类
/**
* Convert from Kudu type to SDC Field type
* @param kuduType
* @return Field.Type
*/
public static Field.Type convertFromKuduType(Type kuduType){
switch(kuduType) {
case BINARY: return Field.Type.BYTE_ARRAY;
case BOOL: return Field.Type.BOOLEAN;
case DOUBLE: return Field.Type.DOUBLE;
case FLOAT: return Field.Type.FLOAT;
case INT8: return Field.Type.BYTE;
case INT16: return Field.Type.SHORT;
case INT32: return Field.Type.INTEGER;
case INT64: return Field.Type.LONG;
case STRING: return Field.Type.STRING;
case UNIXTIME_MICROS: return Field.Type.DATETIME;
default:
throw new UnsupportedOperationException("Unknown data type: " + kuduType.getName());
}
}
开发者ID:streamsets,项目名称:datacollector,代码行数:22,代码来源:KuduUtils.java
示例3: testNullColumnWillEndInError
import org.apache.kudu.Type; //导入依赖的package包/类
/**
* Ensure that if given field is null and column doesn't support that, the record will
* end up in error stream rather then terminating whole pipeline execution.
*/
@Test
public void testNullColumnWillEndInError() throws Exception{
TargetRunner targetRunner = setTargetRunner(tableName, KuduOperationType.INSERT, UnsupportedOperationAction.SEND_TO_ERROR);
targetRunner.runInit();
Record record = RecordCreator.create();
LinkedHashMap<String, Field> field = new LinkedHashMap<>();
field.put("key", Field.create(1));
field.put("value", Field.create(Field.Type.STRING, null));
field.put("name", Field.create(Field.Type.STRING, null));
record.set(Field.createListMap(field));
try {
targetRunner.runWrite(ImmutableList.of(record));
List<Record> errors = targetRunner.getErrorRecords();
Assert.assertEquals(1, errors.size());
} finally {
targetRunner.runDestroy();
}
}
开发者ID:streamsets,项目名称:datacollector,代码行数:26,代码来源:TestKuduTarget.java
示例4: init
import org.apache.kudu.Type; //导入依赖的package包/类
public void init() throws KuduException {
client = new KuduClient.KuduClientBuilder(KUDU_MASTER).build();
List<ColumnSchema> columns = new ArrayList(2);
columns.add(new ColumnSchema.ColumnSchemaBuilder("id", Type.INT32).key(true).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("value", Type.STRING).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("hash", Type.INT32).build());
Schema schema = new Schema(columns);
List<String> rangeColumns = new ArrayList<String>(1);
rangeColumns.add("id");
client.createTable(KUDU_TABLE_NAME, schema, new CreateTableOptions().setRangePartitionColumns(rangeColumns));
table = client.openTable(KUDU_TABLE_NAME);
session = client.newSession();
}
开发者ID:xausky,项目名称:big-data-store-benchmark,代码行数:14,代码来源:KuduKeyValueTest.java
示例5: getType
import org.apache.kudu.Type; //导入依赖的package包/类
private Type getType(MajorType t) {
if(t.getMode() == DataMode.REPEATED){
throw UserException
.dataWriteError()
.message("Kudu does not support array types.")
.build(logger);
}
switch (t.getMinorType()) {
case BIGINT:
return Type.INT64;
case BIT:
return Type.BOOL;
case FLOAT4:
return Type.FLOAT;
case FLOAT8:
return Type.DOUBLE;
case INT:
return Type.INT32;
case TIMESTAMP:
return Type.UNIXTIME_MICROS;
case VARCHAR:
return Type.STRING;
case VARBINARY:
return Type.BINARY;
default:
throw UserException
.dataWriteError()
.message("Data type: '%s' not supported in Kudu.", t.getMinorType().name())
.build(logger);
}
}
开发者ID:axbaretto,项目名称:drill,代码行数:34,代码来源:KuduRecordWriterImpl.java
示例6: createField
import org.apache.kudu.Type; //导入依赖的package包/类
/**
* Create a field and assign a value off of RowResult.
* @param result Result obtained from scan
* @param fieldName Field name to create
* @param type Kudu Type for the field
* @return Generated field
* @throws StageException
*/
public static Field createField(RowResult result, String fieldName, Type type) throws StageException {
switch (type) {
case INT8:
return Field.create(Field.Type.BYTE, result.getByte(fieldName));
case INT16:
return Field.create(Field.Type.SHORT, result.getShort(fieldName));
case INT32:
return Field.create(Field.Type.INTEGER, result.getInt(fieldName));
case INT64:
return Field.create(Field.Type.LONG, result.getLong(fieldName));
case BINARY:
try {
return Field.create(Field.Type.BYTE_ARRAY, result.getBinary(fieldName));
} catch (IllegalArgumentException ex) {
throw new OnRecordErrorException(Errors.KUDU_35, fieldName);
}
case STRING:
return Field.create(Field.Type.STRING, result.getString(fieldName));
case BOOL:
return Field.create(Field.Type.BOOLEAN, result.getBoolean(fieldName));
case FLOAT:
return Field.create(Field.Type.FLOAT, result.getFloat(fieldName));
case DOUBLE:
return Field.create(Field.Type.DOUBLE, result.getDouble(fieldName));
case UNIXTIME_MICROS:
//UNIXTIME_MICROS is in microsecond
return Field.create(Field.Type.DATETIME, new Date(result.getLong(fieldName)/1000L));
default:
throw new StageException(Errors.KUDU_10, fieldName, type.getName());
}
}
开发者ID:streamsets,项目名称:datacollector,代码行数:40,代码来源:KuduUtils.java
示例7: KuduRecordConverter
import org.apache.kudu.Type; //导入依赖的package包/类
public KuduRecordConverter(Map<String, Field.Type> columnsToFieldTypes, Map<String, String> fieldsToColumns,
Schema schema, FieldPathConverter converter) {
this.columnsToFieldTypes = ImmutableMap.copyOf(columnsToFieldTypes);
this.fieldsToColumns = ImmutableMap.copyOf(fieldsToColumns);
this.schema = schema;
this.fieldConverter = converter;
}
开发者ID:streamsets,项目名称:datacollector,代码行数:8,代码来源:KuduRecordConverter.java
示例8: setup
import org.apache.kudu.Type; //导入依赖的package包/类
@Before
public void setup() {
// Sample table and schema
List<ColumnSchema> columns = new ArrayList(2);
columns.add(new ColumnSchema.ColumnSchemaBuilder("key", Type.INT32).key(true).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("value", Type.STRING).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("name", Type.STRING).build());
final Schema schema = new Schema(columns);
// Mock KuduTable class
KuduTable table = PowerMockito.spy(PowerMockito.mock(KuduTable.class));
PowerMockito.suppress(PowerMockito.method(AsyncKuduClient.class, "getTablesList"));
PowerMockito.when(table.getSchema()).thenReturn(schema);
// Mock KuduSession class
PowerMockito.suppress(PowerMockito.method(
AsyncKuduSession.class,
"apply",
Operation.class
));
PowerMockito.suppress(PowerMockito.method(
AsyncKuduSession.class,
"flush"
));
PowerMockito.suppress(PowerMockito.method(
KuduLookupProcessor.class,
"destroy"
));
}
开发者ID:streamsets,项目名称:datacollector,代码行数:30,代码来源:TestKuduLookup.java
示例9: testUpdate
import org.apache.kudu.Type; //导入依赖的package包/类
@Test
public void testUpdate() throws Exception {
record.set("/str", Field.create("val1"));
record.set("/long", Field.create((long)10));
record.set("/short1", Field.create(Field.Type.SHORT, null));
kuduRecordConverter.convert(record, partialRow, KuduOperationType.UPDATE.code); // must not throw NPE
}
开发者ID:streamsets,项目名称:datacollector,代码行数:8,代码来源:TestKuduRecordConverter.java
示例10: testDelete
import org.apache.kudu.Type; //导入依赖的package包/类
@Test
public void testDelete() throws Exception {
record.set("/str", Field.create("primary key"));
record.set("/long", Field.create((long)10));
record.set("/short1", Field.create(Field.Type.SHORT, null));
kuduRecordConverter.convert(record, partialRow, KuduOperationType.DELETE.code); // must not throw NPE
Assert.assertTrue(Utils.format("Message: {}", partialRow.stringifyRowKey()), partialRow.stringifyRowKey().contains("primary key"));
}
开发者ID:streamsets,项目名称:datacollector,代码行数:9,代码来源:TestKuduRecordConverter.java
示例11: initCols
import org.apache.kudu.Type; //导入依赖的package包/类
private void initCols(Schema schema) throws SchemaChangeException {
ImmutableList.Builder<ProjectedColumnInfo> pciBuilder = ImmutableList.builder();
for (int i = 0; i < schema.getColumnCount(); i++) {
ColumnSchema col = schema.getColumnByIndex(i);
final String name = col.getName();
final Type kuduType = col.getType();
MinorType minorType = TYPES.get(kuduType);
if (minorType == null) {
logger.warn("Ignoring column that is unsupported.", UserException
.unsupportedError()
.message(
"A column you queried has a data type that is not currently supported by the Kudu storage plugin. "
+ "The column's name was %s and its Kudu data type was %s. ",
name, kuduType.toString())
.addContext("column Name", name)
.addContext("plugin", "kudu")
.build(logger));
continue;
}
MajorType majorType;
if (col.isNullable()) {
majorType = Types.optional(minorType);
} else {
majorType = Types.required(minorType);
}
MaterializedField field = MaterializedField.create(name, majorType);
final Class<? extends ValueVector> clazz = TypeHelper.getValueVectorClass(
minorType, majorType.getMode());
ValueVector vector = output.addField(field, clazz);
vector.allocateNew();
ProjectedColumnInfo pci = new ProjectedColumnInfo();
pci.vv = vector;
pci.kuduColumn = col;
pci.index = i;
pciBuilder.add(pci);
}
projectedCols = pciBuilder.build();
}
开发者ID:axbaretto,项目名称:drill,代码行数:44,代码来源:KuduRecordReader.java
示例12: createKuduTable
import org.apache.kudu.Type; //导入依赖的package包/类
public static void createKuduTable(String tableName, int tablets, int replicas, int rows) throws Exception {
try (KuduClient client = new KuduClient.KuduClientBuilder(KUDU_MASTER).build()) {
ListTablesResponse tables = client.getTablesList(tableName);
if (!tables.getTablesList().isEmpty()) {
client.deleteTable(tableName);
}
List<ColumnSchema> columns = new ArrayList<>(5);
columns.add(new ColumnSchema.ColumnSchemaBuilder("key", Type.INT32).key(true).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("binary", Type.BINARY).nullable(false).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("boolean", Type.BOOL).nullable(true).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("float", Type.FLOAT).nullable(false).build());
columns.add(new ColumnSchema.ColumnSchemaBuilder("string", Type.STRING).nullable(true).build());
Schema schema = new Schema(columns);
CreateTableOptions builder = new CreateTableOptions();
builder.setNumReplicas(replicas);
builder.setRangePartitionColumns(Arrays.asList("key"));
for (int i = 1; i < tablets; i++) {
PartialRow splitRow = schema.newPartialRow();
splitRow.addInt("key", i*1000);
builder.addSplitRow(splitRow);
}
client.createTable(tableName, schema, builder);
KuduTable table = client.openTable(tableName);
KuduSession session = client.newSession();
session.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_SYNC);
for (int i = 0; i < rows; i++) {
Insert insert = table.newInsert();
PartialRow row = insert.getRow();
row.addInt(0, i);
row.addBinary(1, ("Row " + i).getBytes());
row.addBoolean(2, i % 2 == 0);
row.addFloat(3, i + 0.01f);
row.addString(4, ("Row " + i));
session.apply(insert);
}
List<String> projectColumns = new ArrayList<>(1);
projectColumns.add("float");
KuduScanner scanner = client.newScannerBuilder(table)
.setProjectedColumnNames(projectColumns)
.build();
while (scanner.hasMoreRows()) {
RowResultIterator results = scanner.nextRows();
while (results.hasNext()) {
RowResult result = results.next();
System.out.println(result.toStringLongFormat());
}
}
}
}
开发者ID:axbaretto,项目名称:drill,代码行数:59,代码来源:TestKuduConnect.java
示例13: testWriteShouldWriteToATableWithTheSameNameAsTheProperty
import org.apache.kudu.Type; //导入依赖的package包/类
@Test
public void testWriteShouldWriteToATableWithTheSameNameAsTheProperty() throws KuduException {
org.apache.kudu.Schema kuduSchema = new org.apache.kudu.Schema(Arrays.asList(
new ColumnSchema.ColumnSchemaBuilder("id", Type.STRING).build()
));
Schema schema = SchemaBuilder.struct().name("record")
.version(1)
.field("id", STRING_SCHEMA)
.field("_table", STRING_SCHEMA)
.build();
final String TABLE_1 = "table_1";
final String TABLE_2 = "table_2";
final String TABLE_3 = "table_3";
Struct struct1 = new Struct(schema)
.put("id", "a")
.put("_table", TABLE_1);
SinkRecord record1 = new SinkRecord("topic", 1, STRING_SCHEMA, "key", struct1.schema(), struct1, 1);
Struct struct2 = new Struct(schema)
.put("id", "a")
.put("_table", TABLE_2);
SinkRecord record2 = new SinkRecord("topic", 1, STRING_SCHEMA, "key", struct2.schema(), struct2, 2);
Struct struct3 = new Struct(schema)
.put("id", "b")
.put("_table", TABLE_3);
SinkRecord record3 = new SinkRecord("topic", 1, STRING_SCHEMA, "key", struct3.schema(), struct3, 3);
when(client.newSession()).thenReturn(session);
when(session.apply(upsert)).thenReturn(operationResponse);
when(client.openTable(TABLE_1)).thenReturn(table);
when(table.getSchema()).thenReturn(kuduSchema);
when(table.newUpsert()).thenReturn(upsert);
when(upsert.getRow()).thenReturn(row);
Map<String,String> props = new HashMap<String, String>();
props.put("kudu.master", "0.0.0.0");
props.put("kudu.table.field", "_table");
props.put("kudu.table.filter", TABLE_2.substring(TABLE_2.length() - 3, TABLE_2.length()));
KuduSinkConfig config = new KuduSinkConfig(props);
KuduWriter writer = new KuduWriter(config, client);
writer.write(Arrays.asList(record1, record2, record3));
verify(client, times(1)).openTable(TABLE_1);
verify(client, never()).openTable(TABLE_2);
verify(client, times(1)).openTable(TABLE_3);
verify(client, never()).openTable("topic");
verify(row, times(1)).addString("id", "a");
verify(row, never()).addString("_table", TABLE_1);
verify(session, times(1)).flush();
}
开发者ID:onfocusio,项目名称:kafka-connect-kudu,代码行数:59,代码来源:KuduWriterTest.java
示例14: testWriteShouldInsertKeyFields
import org.apache.kudu.Type; //导入依赖的package包/类
@Test
public void testWriteShouldInsertKeyFields() throws KuduException {
org.apache.kudu.Schema kuduSchema = new org.apache.kudu.Schema(Arrays.asList(
new ColumnSchema.ColumnSchemaBuilder("ks", Type.STRING).build(),
new ColumnSchema.ColumnSchemaBuilder("ki", Type.INT32).build(),
new ColumnSchema.ColumnSchemaBuilder("int_field", Type.INT32).build()
));
Schema keySchema = SchemaBuilder.struct().name("record")
.version(1)
.field("ks", STRING_SCHEMA)
.field("ki", INT32_SCHEMA)
.build();
Struct key = new Struct(keySchema)
.put("ks", "z")
.put("ki", 9);
Schema valueSchema = SchemaBuilder.struct().name("record")
.version(1)
.field("int_field", INT32_SCHEMA)
.build();
Struct value = new Struct(valueSchema)
.put("int_field", 1);
SinkRecord record = new SinkRecord("topic", 1, key.schema(), key, value.schema(), value, 1);
when(client.newSession()).thenReturn(session);
when(session.apply(upsert)).thenReturn(operationResponse);
when(client.openTable("topic")).thenReturn(table);
when(table.getSchema()).thenReturn(kuduSchema);
when(table.newUpsert()).thenReturn(upsert);
when(upsert.getRow()).thenReturn(row);
Map<String,String> props = new HashMap<String, String>();
props.put("kudu.master","0.0.0.0");
props.put("key.insert", "true");
KuduSinkConfig config = new KuduSinkConfig(props);
KuduWriter writer = new KuduWriter(config, client);
writer.write(Arrays.asList(record));
verify(client, times(1)).openTable("topic");
verify(row, times(1)).addString("ks", "z");
verify(row, times(1)).addInt("ki", 9);
verify(row, times(1)).addInt("int_field", 1);
verify(session, times(1)).flush();
}
开发者ID:onfocusio,项目名称:kafka-connect-kudu,代码行数:49,代码来源:KuduWriterTest.java
示例15: testWriteShouldHandleArrays
import org.apache.kudu.Type; //导入依赖的package包/类
@Test
public void testWriteShouldHandleArrays() throws KuduException {
org.apache.kudu.Schema kuduSchema = new org.apache.kudu.Schema(Arrays.asList(
new ColumnSchema.ColumnSchemaBuilder("k_1", Type.STRING).build(),
new ColumnSchema.ColumnSchemaBuilder("k_2", Type.STRING).build(),
new ColumnSchema.ColumnSchemaBuilder("k_3", Type.STRING).build(),
new ColumnSchema.ColumnSchemaBuilder("a_1", Type.STRING).build(),
new ColumnSchema.ColumnSchemaBuilder("a_2", Type.STRING).build()
));
Schema keySchema = SchemaBuilder.struct().name("record")
.version(1)
.field("k", SchemaBuilder.array(STRING_SCHEMA))
.build();
Struct keyStruct = new Struct(keySchema)
.put("k", Arrays.asList(new String[] {"a", "b", "c", "d"}));
Schema valueSchema = SchemaBuilder.struct().name("record")
.version(1)
.field("a", SchemaBuilder.array(STRING_SCHEMA))
.build();
Struct valueStruct = new Struct(valueSchema)
.put("a", Arrays.asList(new String[] {"e", "f", "g"}));
SinkRecord record = new SinkRecord("topic", 1, keyStruct.schema(), keyStruct, valueStruct.schema(), valueStruct, 1);
when(client.newSession()).thenReturn(session);
when(session.apply(upsert)).thenReturn(operationResponse);
when(client.openTable("topic")).thenReturn(table);
when(table.getSchema()).thenReturn(kuduSchema);
when(table.newUpsert()).thenReturn(upsert);
when(upsert.getRow()).thenReturn(row);
Map<String,String> props = new HashMap<String, String>();
props.put("kudu.master","0.0.0.0");
props.put("key.insert", "true");
KuduSinkConfig config = new KuduSinkConfig(props);
KuduWriter writer = new KuduWriter(config, client);
writer.write(Arrays.asList(record));
verify(client, times(1)).openTable("topic");
verify(row, times(1)).addString("a_1", "e");
verify(row, times(1)).addString("a_2", "f");
verify(row, never()).addString("a_3", "g");
verify(row, times(1)).addString("k_1", "a");
verify(row, times(1)).addString("k_2", "b");
verify(row, times(1)).addString("k_3", "c");
verify(row, never()).addString("k_4", "d");
verify(session, times(1)).flush();
}
开发者ID:onfocusio,项目名称:kafka-connect-kudu,代码行数:56,代码来源:KuduWriterTest.java
示例16: extractGetterForColumn
import org.apache.kudu.Type; //导入依赖的package包/类
/***
* Used to build a getter for the given schema column from the POJO field definition
* @param columnSchema The Kudu column definition
* @param fieldDefinition The POJO field definition
*/
private void extractGetterForColumn(ColumnSchema columnSchema, Field fieldDefinition)
{
Type columnType = columnSchema.getType();
Class pojoClass = getTuplePayloadClass();
Object getter = null;
switch ( columnType ) {
case BINARY:
getter = PojoUtils.createGetter(pojoClass, fieldDefinition.getName(), ByteBuffer.class);
break;
case STRING:
getter = PojoUtils.createGetter(pojoClass, fieldDefinition.getName(), String.class);
break;
case BOOL:
getter = PojoUtils.createGetterBoolean(pojoClass, fieldDefinition.getName());
break;
case DOUBLE:
getter = PojoUtils.createGetterDouble(pojoClass, fieldDefinition.getName());
break;
case FLOAT:
getter = PojoUtils.createGetterFloat(pojoClass, fieldDefinition.getName());
break;
case INT8:
getter = PojoUtils.createGetterByte(pojoClass, fieldDefinition.getName());
break;
case INT16:
getter = PojoUtils.createGetterShort(pojoClass, fieldDefinition.getName());
break;
case INT32:
getter = PojoUtils.createGetterInt(pojoClass, fieldDefinition.getName());
break;
case INT64:
case UNIXTIME_MICROS:
getter = PojoUtils.createGetterLong(pojoClass, fieldDefinition.getName());
break;
default:
LOG.error(fieldDefinition.getName() + " has a data type that is not yet supported");
throw new UnsupportedOperationException(fieldDefinition.getName() + " does not have a compatible data type");
}
if (getter != null) {
kuduColumnBasedGetters.put(columnSchema.getName(),getter);
}
}
开发者ID:apache,项目名称:apex-malhar,代码行数:48,代码来源:AbstractKuduOutputOperator.java
示例17: recordToRow
import org.apache.kudu.Type; //导入依赖的package包/类
private void recordToRow(Record record, PartialRow row, String fieldName, String column, int operation) throws OnRecordErrorException {
Field.Type type = columnsToFieldTypes.get(column);
ColumnSchema columnSchema = schema.getColumn(column);
if (record.has(fieldName)) {
Field field = record.get(fieldName);
if (field.getValue() == null) {
if (!columnSchema.isNullable()) {
throw new OnRecordErrorException(record, Errors.KUDU_06, column, fieldName);
}
row.setNull(column);
} else {
try {
switch (type) {
case BOOLEAN:
row.addBoolean(column, field.getValueAsBoolean());
break;
case BYTE:
row.addByte(column, field.getValueAsByte());
break;
case SHORT:
row.addShort(column, field.getValueAsShort());
break;
case INTEGER:
row.addInt(column, field.getValueAsInteger());
break;
case LONG:
if (columnSchema.getType() == Type.UNIXTIME_MICROS) {
// Convert millisecond to microsecond
row.addLong(column, field.getValueAsLong() * 1000);
} else {
row.addLong(column, field.getValueAsLong());
}
break;
case FLOAT:
row.addFloat(column, field.getValueAsFloat());
break;
case DOUBLE:
row.addDouble(column, field.getValueAsDouble());
break;
case STRING:
row.addString(column, field.getValueAsString());
break;
case BYTE_ARRAY:
row.addBinary(column, field.getValueAsByteArray());
break;
default:
throw new OnRecordErrorException(record, Errors.KUDU_04, fieldName, type.name());
}
} catch (IllegalArgumentException e) {
throw new OnRecordErrorException(record, Errors.KUDU_09, fieldName, type.name(), e.toString(), e);
}
}
} else {
// SDC-5816. do not null out columns in UPDATE or UPSERT mode.
// if the columns are not specified - they should not be changed.
if (operation == KuduOperationType.INSERT.code) {
if (!columnSchema.isNullable()) {
throw new OnRecordErrorException(record, Errors.KUDU_06, column, fieldName);
}
row.setNull(column);
}
}
}
开发者ID:streamsets,项目名称:datacollector,代码行数:64,代码来源:KuduRecordConverter.java
示例18: setup
import org.apache.kudu.Type; //导入依赖的package包/类
@Before
public void setup() throws Exception {
context = ContextInfoCreator.createSourceContext("i", false, OnRecordError.TO_ERROR, Collections.EMPTY_LIST);
record = context.createRecord("123");
record.set("/", Field.create(new LinkedHashMap<String, Field>()));
record.set("/byte", Field.create((byte)1));
record.set("/short", Field.create((short)123));
record.set("/int", Field.create(123));
record.set("/long", Field.create(123L));
record.set("/float", Field.create(123.0f));
record.set("/double", Field.create(123.0d));
record.set("/bytes", Field.create("ABC".getBytes(StandardCharsets.UTF_8)));
record.set("/str", Field.create("ABC"));
record.set("/bool", Field.create(true));
DateTime dt = new DateTime(2017, 8, 24, 9, 15, 30, DateTimeZone.UTC); // 2017/8/24 9:15:30
record.set("/unixtime", Field.create(dt.getMillis() * 1000L));
Map<String, Field.Type> columnsToFieldTypes = ImmutableMap.<String, Field.Type>builder()
.put("byte1", Field.Type.BYTE)
.put("short1", Field.Type.SHORT)
.put("int1", Field.Type.INTEGER)
.put("long1", Field.Type.LONG)
.put("float1", Field.Type.FLOAT)
.put("double1", Field.Type.DOUBLE)
.put("bytes", Field.Type.BYTE_ARRAY)
.put("str", Field.Type.STRING)
.put("bool1", Field.Type.BOOLEAN)
.put("unixtime_micro", Field.Type.LONG)
.build();
Map<String, String> fieldsToColumns = ImmutableMap.<String, String>builder()
.put("/byte", "byte1")
.put("/short", "short1")
.put("/int", "int1")
.put("/long", "long1")
.put("/float", "float1")
.put("/double", "double1")
.put("/bytes", "bytes")
.put("/str", "str")
.put("/bool", "bool1")
.put("/unixtime", "unixtime_micro")
.build();
Schema schema = new Schema(Arrays.asList(
new ColumnSchema.ColumnSchemaBuilder("str", Type.STRING).key(true).build(),
new ColumnSchema.ColumnSchemaBuilder("byte1", Type.INT8).build(),
new ColumnSchema.ColumnSchemaBuilder("short1", Type.INT16).nullable(true).build(),
new ColumnSchema.ColumnSchemaBuilder("int1", Type.INT32).build(),
new ColumnSchema.ColumnSchemaBuilder("long1", Type.INT64).build(),
new ColumnSchema.ColumnSchemaBuilder("float1", Type.FLOAT).build(),
new ColumnSchema.ColumnSchemaBuilder("double1", Type.DOUBLE).build(),
new ColumnSchema.ColumnSchemaBuilder("bytes", Type.BINARY).build(),
new ColumnSchema.ColumnSchemaBuilder("bool1", Type.BOOL).build(),
new ColumnSchema.ColumnSchemaBuilder("unixtime_micro", Type.UNIXTIME_MICROS).build()
));
partialRow = new PartialRow(schema);
kuduRecordConverter = new KuduRecordConverter(columnsToFieldTypes, fieldsToColumns, schema, null);
}
开发者ID:streamsets,项目名称:datacollector,代码行数:56,代码来源:TestKuduRecordConverter.java
注:本文中的org.apache.kudu.Type类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论